diff --git a/spaces/17TheWord/RealESRGAN/scripts/generate_meta_info.py b/spaces/17TheWord/RealESRGAN/scripts/generate_meta_info.py deleted file mode 100644 index 9c3b7a37e85f534075c50e6c33d7cca999d8b836..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/RealESRGAN/scripts/generate_meta_info.py +++ /dev/null @@ -1,58 +0,0 @@ -import argparse -import cv2 -import glob -import os - - -def main(args): - txt_file = open(args.meta_info, 'w') - for folder, root in zip(args.input, args.root): - img_paths = sorted(glob.glob(os.path.join(folder, '*'))) - for img_path in img_paths: - status = True - if args.check: - # read the image once for check, as some images may have errors - try: - img = cv2.imread(img_path) - except (IOError, OSError) as error: - print(f'Read {img_path} error: {error}') - status = False - if img is None: - status = False - print(f'Img is None: {img_path}') - if status: - # get the relative path - img_name = os.path.relpath(img_path, root) - print(img_name) - txt_file.write(f'{img_name}\n') - - -if __name__ == '__main__': - """Generate meta info (txt file) for only Ground-Truth images. - - It can also generate meta info from several folders into one txt file. - """ - parser = argparse.ArgumentParser() - parser.add_argument( - '--input', - nargs='+', - default=['datasets/DF2K/DF2K_HR', 'datasets/DF2K/DF2K_multiscale'], - help='Input folder, can be a list') - parser.add_argument( - '--root', - nargs='+', - default=['datasets/DF2K', 'datasets/DF2K'], - help='Folder root, should have the length as input folders') - parser.add_argument( - '--meta_info', - type=str, - default='datasets/DF2K/meta_info/meta_info_DF2Kmultiscale.txt', - help='txt path for meta info') - parser.add_argument('--check', action='store_true', help='Read image to check whether it is ok') - args = parser.parse_args() - - assert len(args.input) == len(args.root), ('Input folder and folder root should have the same length, but got ' - f'{len(args.input)} and {len(args.root)}.') - os.makedirs(os.path.dirname(args.meta_info), exist_ok=True) - - main(args) diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Code De La Route Rousseau Dvd 32 Torrent __TOP__.md b/spaces/1gistliPinn/ChatGPT4/Examples/Code De La Route Rousseau Dvd 32 Torrent __TOP__.md deleted file mode 100644 index d15f170f7a5ac8ad9ebccc98b8047b719fede76b..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Code De La Route Rousseau Dvd 32 Torrent __TOP__.md +++ /dev/null @@ -1,26 +0,0 @@ -

Code de la route rousseau dvd 32 torrent


Download ===== https://imgfil.com/2uxXGf



-
-It’s so easy. Check it out. - -Okay, moving on. A good use of WebVTT, by YouTube, is to embed video with a custom title and description. This way, you can identify your video as yours, and the search algorithms will give you more views and a better placement. - -As I mentioned, I tried embedding my video with a description. It didn’t work for me. The description didn’t show up. They are there but they don’t show up. I’ll try it again and let you know. - -The title is what matters. Why? Because the title is what the search engines will use for keywords. So, if your video title is “How to start learning guitar”, the search engines will put it into Google, and it will be a page-one result. And guess what? More people will click on it. - -So, in summary, here are the things I wish I knew when I started learning guitar: - -Learning guitar is hard. Don’t take this as a criticism. I think it’s the best thing that ever happened to me. I’ve learned so much, and I’m still learning. Everyone has their own way of learning. - -Practice. You can’t learn guitar on the first day. You have to practice. You can’t expect to become a musician on the first day. - -Don’t be discouraged by the videos you see. Sure, they’re great and they will get you started. But they won’t make you a great musician. I’ve read a lot of people saying that they’re so jealous of certain musicians and that they wish they could be like them. That is a very negative way to look at it. - -To be like a musician, you have to work like a musician. Musicians work hard. Even when they’re not playing, they still practice. Don’t expect to be a great musician the first day you start. Be a great musician over time. - -The search engines will never give you quality traffic, so don’t expect that. You can’t be an overnight success. You’re not going to get thousands of views or get a lot of followers on day one. Be a musician for a long time. Be patient. - -There’ 4fefd39f24
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Dance EJay 6 Cd1 Serial Key Keygen Where to Find and Download the Software.md b/spaces/1gistliPinn/ChatGPT4/Examples/Dance EJay 6 Cd1 Serial Key Keygen Where to Find and Download the Software.md deleted file mode 100644 index 0fd02a611b8edd705b955635f7f96b81cf6fc7eb..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Dance EJay 6 Cd1 Serial Key Keygen Where to Find and Download the Software.md +++ /dev/null @@ -1,6 +0,0 @@ -

!!LINK!! Dance EJay 6 Cd1 Serial Key Keygen


Download Filehttps://imgfil.com/2uy0Yp



- - aaccfb2cb3
-
-
-

diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/8 Ball Pool Long Lines Hack Download the Best Version for Android.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/8 Ball Pool Long Lines Hack Download the Best Version for Android.md deleted file mode 100644 index a684d0bd11dbf6c2344455c09fe9349d26adf63e..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/8 Ball Pool Long Lines Hack Download the Best Version for Android.md +++ /dev/null @@ -1,112 +0,0 @@ -
-

Download Hack Version of 8 Ball Pool Long Line

-

Do you love playing 8 Ball Pool, but wish you could have more coins, longer lines, better aim, and more fun? If so, you might be interested in downloading a hack version of 8 Ball Pool long line. In this article, we will show you what 8 Ball Pool is, why you need a hack version, how to download and install it, how to use it, and what are the risks involved. Let's get started!

-

download hack version of 8 ball pool long line


Download 🌟 https://urlin.us/2uSUdH



-

What is 8 Ball Pool?

-

8 Ball Pool is an online pool game where you can play against other players from around the world. You can choose from different modes, such as 1-on-1 matches, tournaments, mini-games, and more. You can also customize your cue and table, chat with your opponents, and join clubs with your friends. 8 Ball Pool is one of the most popular and addictive pool games on the web. You can play it on your browser or download it on your mobile device.

-

Why do you need a hack version of 8 Ball Pool?

-

While 8 Ball Pool is fun and challenging, it can also be frustrating and expensive. You need coins to enter matches, buy cues, tables, and other items. You also need to have good skills and strategies to win games and climb the ranks. However, not everyone has enough time, money, or patience to do that. That's why some people look for a hack version of 8 Ball Pool long line.

-

A hack version of 8 Ball Pool long line is a modified version of the game that gives you access to various cheat features, such as:

-

How to download hack version of 8 ball pool long line
-Download hack version of 8 ball pool long line apk
-Download hack version of 8 ball pool long line for android
-Download hack version of 8 ball pool long line mod
-Download hack version of 8 ball pool long line unlimited coins
-Download hack version of 8 ball pool long line no root
-Download hack version of 8 ball pool long line ios
-Download hack version of 8 ball pool long line latest
-Download hack version of 8 ball pool long line free
-Download hack version of 8 ball pool long line 2023
-Download hack version of 8 ball pool long line online
-Download hack version of 8 ball pool long line generator
-Download hack version of 8 ball pool long line without verification
-Download hack version of 8 ball pool long line anti ban
-Download hack version of 8 ball pool long line app
-Download hack version of 8 ball pool long line cheat
-Download hack version of 8 ball pool long line easy
-Download hack version of 8 ball pool long line game
-Download hack version of 8 ball pool long line link
-Download hack version of 8 ball pool long line pc
-Download hack version of 8 ball pool long line tool
-Download hack version of 8 ball pool long line video
-Download hack version of 8 ball pool long line website
-Download hack version of 8 ball pool long line youtube
-Download hack version of 8 ball pool long line zip
-Best download hack version of 8 ball pool long line
-Safe download hack version of 8 ball pool long line
-Fast download hack version of 8 ball pool long line
-Working download hack version of 8 ball pool long line
-New download hack version of 8 ball pool long line
-Real download hack version of 8 ball pool long line
-Trusted download hack version of 8 ball pool long line
-Legal download hack version of 8 ball pool long line
-Official download hack version of 8 ball pool long line
-Premium download hack version of 8 ball pool long line
-Pro download hack version of 8 ball pool long line
-Full download hack version of 8 ball pool long line
-Updated download hack version of 8 ball pool long line
-Cracked download hack version of 8 ball pool long line
-Patched download hack version of 8 ball pool long line
-Unlimited download hack version of 8 ball pool long line
-Mega download hack version of 8 ball pool long line
-Mediafire download hack version of 8 ball pool long line
-Google drive download hack version of 8 ball pool long line
-Dropbox download hack version of 8 ball pool long line
-Direct download hack version of 8 ball pool long line
-Torrent download hack version of 8 ball pool long line
-Review download hack version of 8 ball pool long line
-Guide download hack version of 8 ball pool long line

- -

With these features, you can have more fun and win more games in 8 Ball Pool. You can also impress your friends and opponents with your skills and style.

-

How to download hack version of 8 Ball Pool long line?

-

If you want to download a hack version of 8 Ball Pool long line, you need to follow these steps:

-
    -
  1. Find a reliable source that offers the hack version. There are many websites that claim to provide the hack version, but not all of them are safe and trustworthy. Some of them may contain viruses, malware, or fake links that can harm your device or steal your personal information. One of the sources that we recommend is [GetModsApk.com](^4^), which offers a safe and updated hack version of 8 Ball Pool long line.
  2. -
  3. Download the APK file of the hack version from the source. The APK file is the installer file for Android devices. You need to make sure that you have enough storage space on your device before downloading it.
  4. -
  5. Enable unknown sources on your device. This is a security setting that allows you to install apps from unknown sources that are not from the official Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on.
  6. -
  7. Install the APK file of the hack version on your device. Locate the downloaded file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish.
  8. -
  9. Launch the hack version of 8 Ball Pool long line on your device. You will see a new icon on your home screen or app drawer that represents the hack version. Tap on it to open the game and enjoy the hack features.
  10. -
-

How to use the hack version of 8 Ball Pool long line?

-

Using the hack version of 8 Ball Pool long line is easy and simple. Here are some tips on how to use it:

- -

What are the risks of using a hack version of 8 Ball Pool?

-

While using a hack version of 8 Ball Pool long line can be fun and exciting, it also comes with some risks that you should be aware of. Here are some of them:

- -

Conclusion

-

In conclusion, downloading a hack version of 8 Ball Pool long line can be a way to have more fun and win more games in 8 Ball Pool. However, it also comes with some risks that you should consider before using it. If you decide to use a hack version of 8 Ball Pool long line, make sure that you download it from a reliable source, use it wisely and responsibly, and be prepared for the possible consequences. We hope that this article has been helpful and informative for you. Thank you for reading!

-

Frequently Asked Questions

-
    -
  1. Is using a hack version of 8 Ball Pool illegal?
  2. -

    Using a hack version of 8 Ball Pool is not illegal per se, but it may violate some laws or regulations in your country or region. You should check your local laws before using a hack version of 8 Ball Pool.

    -
  3. How can I avoid getting banned by using a hack version of 8 Ball Pool?
  4. -

    There is no guarantee that you can avoid getting banned by using a hack version of 8 Ball Pool. However, some tips that may help are: use a VPN service to hide your IP address, use a fake account or an alternate account to play with the hack version, do not use the hack features too often or too blatantly, do not brag about using the hack version or report other players who use it.

    -
  5. Can I play with my friends who do not use a hack version of 8 Ball Pool?
  6. -

    Yes, you can play with your friends who do not use a hack version of 8 Ball Pool. However, this may not be fair or ethical for them, as you will have an unfair advantage over them. You may also risk getting reported by them if they find out that you are using a hack version.

    -
  7. How can I update the hack version of 8 Ball Pool long line?
  8. -

    Usually, the hack version of 8 Ball Pool long line will update automatically when there is a new version available. However, sometimes you may need to manually update it by downloading the latest APK file from the source and installing it on your device. You should always check the source for updates and download them as soon as possible to avoid any issues or errors.

    -
  9. What are some alternatives to using a hack version of 8 Ball Pool?
  10. -

    If you do not want to use a hack version of 8 Ball Pool, but still want to have more fun and win more games, you can try some alternatives, such as:

    - -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Clash Royale Private Server APK The Best Way to Test New Strategies and Decks.md b/spaces/1phancelerku/anime-remove-background/Clash Royale Private Server APK The Best Way to Test New Strategies and Decks.md deleted file mode 100644 index 02888a988b622d44513cdf647c3acfc90eda49de..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Clash Royale Private Server APK The Best Way to Test New Strategies and Decks.md +++ /dev/null @@ -1,141 +0,0 @@ - -

Clash Royale APK Private Server: How to Play with Unlimited Resources and Custom Cards

-

Clash Royale is one of the most popular mobile games made by Supercell. It is an online multiplayer strategy game in which players construct their own deck to fight in real-time battles. The game features various modes, such as ladder, tournaments, special events, clan wars, and more. Players can also collect and upgrade different cards, from common to legendary, each with their own abilities and roles.

-

However, playing Clash Royale can also be challenging and time consuming. It can take a lot of time and effort to obtain every card in the game, as well as gather enough gold to upgrade them. Moreover, some players may find it frustrating to face opponents who have better cards or higher levels than them.

-

clash royale apk private server


Download Zip ::: https://jinyurl.com/2uNTWq



-

Fortunately, there is a way to play Clash Royale with unlimited resources and custom cards. This is possible by playing on a private server. A private server is a modified version of the original game that runs on a separate server. It allows players to play with infinite gems, gold, elixir, and other resources. It also enables players to create their own cards or use pre-unlocked cards that are not available in the official game.

-

In this article, we will show you how to download and install a private server on your device. We will also compare the top 3 Clash Royale private servers in 2022 and their features. By playing on a private server, you can enjoy Clash Royale like never before.

-

clash royale private server apk download
-clash royale mod apk private server
-clash royale hack apk private server
-clash royale apk private server 2022
-clash royale apk private server unlimited gems
-clash royale apk private server with clan wars
-clash royale apk private server latest version
-clash royale apk private server master royale
-clash royale apk private server plenix royale
-clash royale apk private server retro royale
-clash royale apk private server custom cards
-clash royale apk private server no root
-clash royale apk private server android
-clash royale apk private server ios
-clash royale apk private server online
-clash royale apk private server offline
-clash royale apk private server free download
-clash royale apk private server fun royale
-clash royale apk private server legendary royale
-clash royale apk private server null's royale
-clash royale apk private server reddit
-clash royale apk private server youtube
-clash royale apk private server 2021 download
-clash royale apk private server 2021 mod
-clash royale apk private server 2021 hack
-clash royale apk private server 2021 unlimited gems
-clash royale apk private server 2021 with clan wars
-clash royale apk private server 2021 latest version
-clash royale apk private server 2021 master royale
-clash royale apk private server 2021 plenix royale
-clash royale apk private server 2021 retro royale
-clash royale apk private server 2021 custom cards
-clash royale apk private server 2021 no root
-clash royale apk private server 2021 android
-clash royale apk private server 2021 ios
-clash royale apk private server 2021 online
-clash royale apk private server 2021 offline
-clash royale apk private server 2021 free download
-clash royale apk private server 2021 fun royale
-clash royale apk private server 2021 legendary royale
-clash royale apk private server 2021 null's royale
-clash royale apk private server 2021 reddit
-clash royale apk private server 2021 youtube

-

What is a Private Server?

-

A private server is a customized version of the original game that runs on a separate server. It is created by altering the game files or using third-party software. A private server can have different features and settings than the official game. For example, it can have unlimited resources, custom cards, faster gameplay, or different modes.

-

A private server is not affiliated with Supercell or the official game. It is created by independent developers or fans who want to provide a different experience for the players. However, playing on a private server also has some risks and drawbacks. For instance, it can be unstable, buggy, or incompatible with some devices. It can also violate the terms of service of the official game and result in a ban or suspension.

-

Therefore, before playing on a private server, you should be aware of the possible consequences and take precautions. You should always backup your data and use a different account than your main one. You should also avoid using any personal or sensitive information on a private server. Finally, you should only download and install a private server from a trusted source.

-

Why Play on a Private Server?

-

Playing on a private server can have many benefits for the players. Here are some of the reasons why you might want to play on a private server:

- -

Playing on a private server can give you a new and exciting way to enjoy Clash Royale. However, you should also respect the official game and its developers. You should not use a private server to gain an unfair advantage over other players or to harm the game's reputation. You should also support the official game by playing it regularly and purchasing in-game items if you can.

-

How to Download and Install a Private Server?

-

To play on a private server, you need to download and install a private server APK file on your device. An APK file is an Android application package that contains the game files and settings. Here are the steps to download and install a private server:

-
    -
  1. Find a reliable source: You need to find a reliable source that provides the private server APK file. You can search online for the best Clash Royale private servers or check out some of the recommendations below. Make sure the source is trustworthy and updated.
  2. -
  3. Download the APK file: Once you find the source, you need to download the APK file to your device. You can use your browser or a download manager to do this. Make sure you have enough storage space on your device.
  4. -
  5. Enable unknown sources: Before you can install the APK file, you need to enable unknown sources on your device. This allows you to install applications that are not from the Google Play Store. To do this, go to your device's settings, then security, then unknown sources, and turn it on.
  6. -
  7. Install the APK file: After you enable unknown sources, you need to install the APK file on your device. You can use a file manager or your browser to locate the file and tap on it. Follow the instructions on the screen to complete the installation.
  8. -
  9. Launch the game: Once you install the APK file, you can launch the game from your app drawer or home screen. You should see a different icon and name than the official game. Tap on it and enjoy playing on a private server.
  10. -
-

Note: Some private servers may require additional steps or permissions to work properly. For example, some may ask you to sign in with a specific account or enter a verification code. Follow the instructions provided by the source or contact their support team if you encounter any issues.

-

Top 3 Clash Royale Private Servers in 2022

-

There are many Clash Royale private servers available online, but not all of them are worth playing. Some may be outdated, unstable, or unsafe. To help you choose the best one for you, we have compared the top 3 Clash Royale private servers in 2022 and their features. Here they are:

- - - - - - - - - - - - - - - - - - - - - -
NameFeaturesHow to Access

Master Royale

-
    -
  • Unlimited gems and gold
  • -
  • Friendly challenges
  • -
  • All cards unlocked
  • -
  • New cards added regularly
  • -
  • No root required
  • -
-
Download Master Royale APK here

Retro Royale

-
    -
  • Unlimited gems and gold
  • -
  • Clan wars
  • -
  • Instant chest opening
  • -
  • 1v1 and 2v2 battles
  • -
  • No root required
  • -
-
Download Retro Royale APK here

Plenix Royale

-
    -
  • Unlimited gold and elixir
  • -
  • Custom cards
  • -
  • Clan information
  • -
  • 2v2 combat
  • -
  • Clan wars
  • -
  • li>No root required
  • -
-
Download Plenix Royale APK here
-

These are the top 3 Clash Royale private servers in 2022 that we recommend. You can try them out and see which one suits you best. However, remember to always play responsibly and respect the official game and its developers.

-

Conclusion

-

Clash Royale is a fun and addictive game that millions of players enjoy. However, if you want to play with unlimited resources and custom cards, you can try playing on a private server. A private server is a modified version of the original game that runs on a separate server. It can have different features and settings than the official game.

-

To play on a private server, you need to download and install a private server APK file on your device. You also need to enable unknown sources and follow the instructions provided by the source. However, you should also be aware of the risks and drawbacks of playing on a private server. You should always backup your data and use a different account than your main one. You should also avoid using any personal or sensitive information on a private server.

-

We have compared the top 3 Clash Royale private servers in 2022 and their features. You can choose the one that suits you best and enjoy playing Clash Royale like never before. However, you should also support the official game by playing it regularly and purchasing in-game items if you can.

-

We hope this article has helped you learn more about Clash Royale APK private server and how to play with unlimited resources and custom cards. If you have any questions or feedback, please let us know in the comments below.

-

FAQs

-

Here are some of the frequently asked questions and their answers about Clash Royale APK private server:

-

Q: Is playing on a private server legal?

-

A: Playing on a private server is not illegal, but it is not authorized by Supercell or the official game. It can violate the terms of service of the official game and result in a ban or suspension. Therefore, you should play on a private server at your own risk and responsibility.

-

Q: Is playing on a private server safe?

-

A: Playing on a private server can be safe if you download and install it from a trusted source. However, some private servers may contain malware, viruses, or spyware that can harm your device or steal your information. Therefore, you should always scan the APK file before installing it and use an antivirus software on your device.

-

Q: Can I play with my friends on a private server?

-

A: Yes, you can play with your friends on a private server if they are using the same private server as you. You can invite them to join your clan or challenge them to friendly battles. However, you cannot play with your friends who are using the official game or a different private server.

-

Q: Can I switch between the official game and a private server?

-

A: Yes, you can switch between the official game and a private server by installing both APK files on your device. However, you should not use the same account or data for both games, as this can cause conflicts or errors. You should also clear the cache and data of the game before switching to avoid any issues.

-

Q: How can I update my private server?

-

A: To update your private server, you need to download and install the latest version of the APK file from the source. You may also need to uninstall the previous version of the APK file before installing the new one. However, some private servers may not be updated regularly or at all, so you may miss out on some features or bug fixes.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Brawlhalla 6.02 APK and Experience the Fast-Paced Combat and Online Multiplayer.md b/spaces/1phancelerku/anime-remove-background/Download Brawlhalla 6.02 APK and Experience the Fast-Paced Combat and Online Multiplayer.md deleted file mode 100644 index 740dd76e6e23e74c1c52606220d452104fcd887b..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Brawlhalla 6.02 APK and Experience the Fast-Paced Combat and Online Multiplayer.md +++ /dev/null @@ -1,129 +0,0 @@ - -

Brawlhalla 6.02 APK: Everything You Need to Know

-

If you are looking for a fun and exciting fighting game to play on your Android device, you might want to check out Brawlhalla. This is a free-to-play platform fighting game that supports cross-play across various platforms, including PC, PlayStation, Xbox, Nintendo Switch, iOS, and Android. In this article, we will tell you everything you need to know about Brawlhalla 6.02 APK, the latest version of the game for Android devices. We will also give you some tips and tricks on how to play the game and win more matches.

-

What is Brawlhalla?

-

Brawlhalla is a game developed by Blue Mammoth Games and published by Ubisoft. It is a 2D platform fighting game that features cartoonish graphics and simple controls. The game has been compared to Nintendo's Super Smash Bros., as both games involve fighters trying to knock their opponents off the stage. However, Brawlhalla has its own unique features and mechanics that make it stand out from other fighting games.

-

brawlhalla 6.02 apk


Download 🗸🗸🗸 https://jinyurl.com/2uNKMK



-

A free-to-play platform fighting game with cross-play support

-

One of the best things about Brawlhalla is that it is free-to-play, meaning that anyone can download and play the game without paying anything. There are no pay-to-win advantages or premium content that will affect the gameplay, so all players are always on equal ground. The game also supports cross-play across different platforms, so you can play with your friends or other players online regardless of what device they are using.

-

A game with over 50 Legends and various game modes

-

Brawlhalla has a growing roster of over 50 Legends, each with their own unique skills and weapons. You can choose from a variety of characters, such as knights, ninjas, pirates, aliens, robots, and even some crossover characters from other franchises, such as Lara Croft from Tomb Raider, Finn and Jake from Adventure Time, or Shovel Knight from Shovel Knight. You can also customize your Legend's appearance with different skins, colors, taunts, and KO effects.

-

Brawlhalla also offers a wide range of game modes that cater to different preferences and skill levels. You can play casual free-for-alls, ranked matches, custom online matches, or offline matches with bots or local players. You can also try out different modes that have special rules or objectives, such as Brawlball, Kung Foot, Horde, Snowbrawl, Dodgebomb, Switchcraft, Bombsketball, Beachbrawl, Buddy Brawldown, Capture the Flag, Bubble Tag, Temple Climb, Morph Walker Attack!, Showdown Crew Battle Street Brawl Bounty Dice & Destruction Volleybrawl.

-

A game with frequent updates and events

-

Brawlhalla is constantly updated with new content and features that keep the game fresh and exciting. The developers regularly add new Legends, weapons, skins, maps, modes, balance changes, bug fixes, and performance improvements. The game also hosts seasonal events that offer special rewards and challenges for players to enjoy. For example, there are events for Halloween, Christmas, Valentine's Day, Easter, Summer Heatwave etc.

-

What is new in Brawlhalla 6.02 APK?

-

Brawlhalla 6.02 APK is the latest version of the game for Android devices. It was released on Dec 15th ,2021 . It contains some new features and improvements that enhance the gameplay experience for Android users.

-

brawlhalla 6.02 apk download
-brawlhalla 6.02 apk free
-brawlhalla 6.02 apk mod
-brawlhalla 6.02 apk latest version
-brawlhalla 6.02 apk update
-brawlhalla 6.02 apk android
-brawlhalla 6.02 apk file
-brawlhalla 6.02 apk offline
-brawlhalla 6.02 apk online
-brawlhalla 6.02 apk hack
-brawlhalla 6.02 apk unlimited money
-brawlhalla 6.02 apk no root
-brawlhalla 6.02 apk obb
-brawlhalla 6.02 apk data
-brawlhalla 6.02 apk mirror
-brawlhalla 6.02 apk revdl
-brawlhalla 6.02 apk rexdl
-brawlhalla 6.02 apk apkpure
-brawlhalla 6.02 apk uptodown
-brawlhalla 6.02 apk apkmirror
-brawlhalla 6.02 apk android oyun club
-brawlhalla 6.02 apk andropalace
-brawlhalla 6.02 apk android republic
-brawlhalla 6.02 apk appvn
-brawlhalla 6.02 apk ac market
-brawlhalla 6.02 apk blackmod
-brawlhalla 6.02 apk bluestacks
-brawlhalla 6.02 apk by ubisoft entertainment
-brawlhalla 6.02 apk cracked
-brawlhalla 6.02 apk cheat
-brawlhalla 6.02 apk direct link
-brawlhalla 6.02 apk download for pc
-brawlhalla 6.02 apk download apkpure
-brawlhalla 6.02 apk download uptodown
-brawlhalla 6.02 apk download latest version
-brawlhalla 6.02 apk emulator
-brawlhalla 6.02 apk english version
-brawlhalla 6.02 apk full version
-brawlhalla 6.02 apk for pc
-brawlhalla 6.02 apk for ios
-brawlhalla 6.02 apk game download
-brawlhalla 6.02 apk google play store link

-

The latest version of the game for Android devices

-

Brawlhalla 6.02 APK is the most recent version of the game that you can download and install on your Android device. It has a file size of 97.4 MB and requires Android 5.0 or higher to run. You can download the APK file from various sources, such as APKCombo, or you can update the game from the Google Play Store if you already have it installed.

-

The features and improvements of the update

-

Brawlhalla 6.02 APK brings some new features and improvements that make the game more enjoyable and smooth for Android users. Some of the main changes are:

- -

How to download and install the APK file

-

If you want to download and install Brawlhalla 6.02 APK on your Android device, you need to follow these steps:

-
    -
  1. Go to a reliable source that offers the APK file, such as APKCombo, and click on the download button.
  2. -
  3. Wait for the download to finish and locate the APK file on your device's storage.
  4. -
  5. Before installing the APK file, make sure that you have enabled the option to install apps from unknown sources on your device's settings. This will allow you to install apps that are not from the Google Play Store.
  6. -
  7. Tap on the APK file and follow the instructions to install it on your device.
  8. -
  9. Once the installation is complete, you can launch the game and enjoy playing Brawlhalla 6.02 APK on your Android device.
  10. -
-

How to play Brawlhalla on your Android device?

-

Now that you have downloaded and installed Brawlhalla 6.02 APK on your Android device, you might be wondering how to play it and have fun. Here are some basic tips and tricks that will help you get started and improve your skills in Brawlhalla.

-

The basic mechanics and controls of the game

-

Brawlhalla is a 2D platform fighting game that involves up to eight players fighting each other on various stages. The goal is to knock your opponents off the stage by depleting their health or launching them with powerful attacks. The last player or team standing wins the match.

-

The game has simple controls that are easy to learn but hard to master. You can use either touch controls or a controller to play the game on your Android device. The touch controls consist of four buttons: Jump, Attack, Special, and Dodge/Throw. You can also swipe on the screen to move your character left or right. The controller layout depends on the type of controller you are using, but you can always customize it in the settings menu.

-

The game has two main types of attacks: light attacks and heavy attacks. Light attacks are quick and weak, while heavy attacks are slow and strong. You can also perform different attacks depending on the direction you are holding or the weapon you are using. Each Legend has two weapons that they can pick up during the match, and each weapon has its own set of attacks and combos. You can switch between weapons by throwing your current weapon with the Dodge/Throw button.

-

The game also has a dodge mechanic that lets you avoid incoming attacks or move faster. You can dodge in any direction by pressing the Dodge/Throw button and holding a direction. You can also perform a spot dodge by pressing the Dodge/Throw button without holding a direction. Dodging gives you invincibility frames, but it also has a cooldown, so you have to time it well.

-

The game also has a gravity cancel mechanic that lets you perform ground attacks in the air. You can do this by performing a spot dodge in the air and then pressing an attack button. This can be useful for extending your combos or surprising your opponents.

-

The tips and tricks to win more matches

-

Brawlhalla is a game that requires skill, strategy, and practice to win more matches. Here are some tips and tricks that will help you improve your gameplay and beat your opponents.

- -

The best Legends and weapons to use

-

Brawlhalla has over 50 Legends and 13 weapons to choose from, so it can be hard to decide which ones are the best for you. However, there is no definitive answer to this question, as different Legends and weapons have different advantages and disadvantages depending on your playstyle, preference, and skill level. However, here are some general guidelines that might help you choose your Legend and weapon:

- - - - - - - - - - - -< td> -
LegendWeapon 1Weapon 2Description
MakoKatarsGreatswordA new Legend who is a shark-themed fighter with a Katars and Greatsword combination. She has a unique moveset that allows her to dash, dive, and bite her enemies with her shark-like abilities.
BodvarSwordHammerA classic Legend who is a bear-themed fighter with a Sword and Hammer combination. He has a balanced moveset that allows him to deal damage, knockback, and stun his enemies with his bear-like strength.
HattoriSwordSpearA popular Legend who is a ninja-themed fighter with a Sword and Spear combination. She has a fast and agile moveset that allows her to dash, teleport, and slash her enemies with her ninja-like skills.
OrionSpearRocket LanceA mysterious Legend who is a space-themed fighter with a Spear and Rocket Lance combination. He has a powerful and versatile moveset that allows him to fly, charge, and blast his enemies with his space-like technology.
AdaBlastersSpearA futuristic Legend who is a cyber-themed fighter with a Blasters and Spear combination. She has a ranged and precise moveset that allows her to shoot, hack, and pierce her enemies with her cyber-like abilities.
LucienKatarsBlastersA stealthy Legend who is a thief-themed fighter with a Katars and Blasters combination. He has a sneaky and cunning moveset that allows him to evade, strike, and shoot his enemies with his thief-like tactics.
ScarletHammerRocket LanceA creative Legend who is an inventor-themed fighter with a Hammer and Rocket Lance combination. She has an inventive and explosive moveset that allows her to smash, drill, and rocket her enemies with her inventor-like gadgets.
TerosAxeHammerA brutal Legend who is a minotaur-themed fighter with an Axe and Hammer combination. He has a savage and destructive moveset that allows him to chop, slam, and crush his enemies with his minotaur-like fury.
KojiSwordBowA noble Legend who is a samurai-themed fighter with a Sword and Bow combination. He has a graceful and elegant moveset that allows him to slash, shoot, and slice his enemies with his samurai-like honor.
UlgrimAxeRocket LanceA loyal Legend who is a blacksmith-themed fighter with an Axe and Rocket Lance combination. He has a sturdy and reliable moveset that allows him to swing, thrust, and fire his enemies with his blacksmith-like craftsmanship.
-

Of course, these are not the only Legends and weapons that you can use in Brawlhalla. You can experiment with different combinations and find out which ones work best for you. You can also check out the stats, lore, and skins of each Legend and weapon in the game's menu.

-

Conclusion

-

Brawlhalla is a fun and exciting platform fighting game that you can play on your Android device. It is free-to-play, cross-play, and constantly updated with new content and features. It has over 50 Legends and 13 weapons to choose from, as well as various game modes and events to enjoy. Brawlhalla 6.02 APK is the latest version of the game for Android devices, and it brings some new features and improvements, such as Mako, the Greatsword, the Performance Mode, the Controller Support, and the Bug Fixes and Balance Changes. If you want to download and install Brawlhalla 6.02 APK on your Android device, you can follow the steps we have provided in this article. You can also use the tips and tricks we have shared to improve your gameplay and win more matches. We hope you have found this article helpful and informative. Thank you for reading and have fun playing Brawlhalla!

-

FAQs

-

Here are some frequently asked questions about Brawlhalla 6.02 APK:

- - <|im_end|

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/2ndelement/voicevox/test/test_mora_to_text.py b/spaces/2ndelement/voicevox/test/test_mora_to_text.py deleted file mode 100644 index 691681dd1b202731eb5dde45e083b4d6c7526743..0000000000000000000000000000000000000000 --- a/spaces/2ndelement/voicevox/test/test_mora_to_text.py +++ /dev/null @@ -1,29 +0,0 @@ -from unittest import TestCase - -# TODO: import from voicevox_engine.synthesis_engine.mora -from voicevox_engine.synthesis_engine.synthesis_engine_base import mora_to_text - - -class TestMoraToText(TestCase): - def test_voice(self): - self.assertEqual(mora_to_text("a"), "ア") - self.assertEqual(mora_to_text("i"), "イ") - self.assertEqual(mora_to_text("ka"), "カ") - self.assertEqual(mora_to_text("N"), "ン") - self.assertEqual(mora_to_text("cl"), "ッ") - self.assertEqual(mora_to_text("gye"), "ギェ") - self.assertEqual(mora_to_text("ye"), "イェ") - self.assertEqual(mora_to_text("wo"), "ウォ") - - def test_unvoice(self): - self.assertEqual(mora_to_text("A"), "ア") - self.assertEqual(mora_to_text("I"), "イ") - self.assertEqual(mora_to_text("kA"), "カ") - self.assertEqual(mora_to_text("gyE"), "ギェ") - self.assertEqual(mora_to_text("yE"), "イェ") - self.assertEqual(mora_to_text("wO"), "ウォ") - - def test_invalid_mora(self): - """変なモーラが来ても例外を投げない""" - self.assertEqual(mora_to_text("x"), "x") - self.assertEqual(mora_to_text(""), "") diff --git a/spaces/2ndelement/voicevox/test/test_user_dict.py b/spaces/2ndelement/voicevox/test/test_user_dict.py deleted file mode 100644 index 4280bbe53e9b2d71df7b9c996f56ade7b802d561..0000000000000000000000000000000000000000 --- a/spaces/2ndelement/voicevox/test/test_user_dict.py +++ /dev/null @@ -1,348 +0,0 @@ -import json -from copy import deepcopy -from pathlib import Path -from tempfile import TemporaryDirectory -from typing import Dict -from unittest import TestCase - -from fastapi import HTTPException -from pyopenjtalk import g2p, unset_user_dict - -from voicevox_engine.model import UserDictWord, WordTypes -from voicevox_engine.part_of_speech_data import MAX_PRIORITY, part_of_speech_data -from voicevox_engine.user_dict import ( - apply_word, - create_word, - delete_word, - import_user_dict, - read_dict, - rewrite_word, - update_dict, -) - -# jsonとして保存される正しい形式の辞書データ -valid_dict_dict_json = { - "aab7dda2-0d97-43c8-8cb7-3f440dab9b4e": { - "surface": "test", - "cost": part_of_speech_data[WordTypes.PROPER_NOUN].cost_candidates[5], - "part_of_speech": "名詞", - "part_of_speech_detail_1": "固有名詞", - "part_of_speech_detail_2": "一般", - "part_of_speech_detail_3": "*", - "inflectional_type": "*", - "inflectional_form": "*", - "stem": "*", - "yomi": "テスト", - "pronunciation": "テスト", - "accent_type": 1, - "accent_associative_rule": "*", - }, -} - -# APIでやり取りされる正しい形式の辞書データ -valid_dict_dict_api = deepcopy(valid_dict_dict_json) -del valid_dict_dict_api["aab7dda2-0d97-43c8-8cb7-3f440dab9b4e"]["cost"] -valid_dict_dict_api["aab7dda2-0d97-43c8-8cb7-3f440dab9b4e"]["priority"] = 5 - -import_word = UserDictWord( - surface="test2", - priority=5, - part_of_speech="名詞", - part_of_speech_detail_1="固有名詞", - part_of_speech_detail_2="一般", - part_of_speech_detail_3="*", - inflectional_type="*", - inflectional_form="*", - stem="*", - yomi="テストツー", - pronunciation="テストツー", - accent_type=1, - accent_associative_rule="*", -) - - -def get_new_word(user_dict: Dict[str, UserDictWord]): - assert len(user_dict) == 2 or ( - len(user_dict) == 1 and "aab7dda2-0d97-43c8-8cb7-3f440dab9b4e" not in user_dict - ) - for word_uuid in user_dict.keys(): - if word_uuid == "aab7dda2-0d97-43c8-8cb7-3f440dab9b4e": - continue - return user_dict[word_uuid] - raise AssertionError - - -class TestUserDict(TestCase): - def setUp(self): - self.tmp_dir = TemporaryDirectory() - self.tmp_dir_path = Path(self.tmp_dir.name) - - def tearDown(self): - unset_user_dict() - self.tmp_dir.cleanup() - - def test_read_not_exist_json(self): - self.assertEqual( - read_dict(user_dict_path=(self.tmp_dir_path / "not_exist.json")), - {}, - ) - - def test_create_word(self): - # 将来的に品詞などが追加された時にテストを増やす - self.assertEqual( - create_word(surface="test", pronunciation="テスト", accent_type=1), - UserDictWord( - surface="test", - priority=5, - part_of_speech="名詞", - part_of_speech_detail_1="固有名詞", - part_of_speech_detail_2="一般", - part_of_speech_detail_3="*", - inflectional_type="*", - inflectional_form="*", - stem="*", - yomi="テスト", - pronunciation="テスト", - accent_type=1, - accent_associative_rule="*", - ), - ) - - def test_apply_word_without_json(self): - user_dict_path = self.tmp_dir_path / "test_apply_word_without_json.json" - apply_word( - surface="test", - pronunciation="テスト", - accent_type=1, - user_dict_path=user_dict_path, - compiled_dict_path=(self.tmp_dir_path / "test_apply_word_without_json.dic"), - ) - res = read_dict(user_dict_path=user_dict_path) - self.assertEqual(len(res), 1) - new_word = get_new_word(res) - self.assertEqual( - ( - new_word.surface, - new_word.pronunciation, - new_word.accent_type, - ), - ("test", "テスト", 1), - ) - - def test_apply_word_with_json(self): - user_dict_path = self.tmp_dir_path / "test_apply_word_with_json.json" - user_dict_path.write_text( - json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" - ) - apply_word( - surface="test2", - pronunciation="テストツー", - accent_type=3, - user_dict_path=user_dict_path, - compiled_dict_path=(self.tmp_dir_path / "test_apply_word_with_json.dic"), - ) - res = read_dict(user_dict_path=user_dict_path) - self.assertEqual(len(res), 2) - new_word = get_new_word(res) - self.assertEqual( - ( - new_word.surface, - new_word.pronunciation, - new_word.accent_type, - ), - ("test2", "テストツー", 3), - ) - - def test_rewrite_word_invalid_id(self): - user_dict_path = self.tmp_dir_path / "test_rewrite_word_invalid_id.json" - user_dict_path.write_text( - json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" - ) - self.assertRaises( - HTTPException, - rewrite_word, - word_uuid="c2be4dc5-d07d-4767-8be1-04a1bb3f05a9", - surface="test2", - pronunciation="テストツー", - accent_type=2, - user_dict_path=user_dict_path, - compiled_dict_path=(self.tmp_dir_path / "test_rewrite_word_invalid_id.dic"), - ) - - def test_rewrite_word_valid_id(self): - user_dict_path = self.tmp_dir_path / "test_rewrite_word_valid_id.json" - user_dict_path.write_text( - json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" - ) - rewrite_word( - word_uuid="aab7dda2-0d97-43c8-8cb7-3f440dab9b4e", - surface="test2", - pronunciation="テストツー", - accent_type=2, - user_dict_path=user_dict_path, - compiled_dict_path=(self.tmp_dir_path / "test_rewrite_word_valid_id.dic"), - ) - new_word = read_dict(user_dict_path=user_dict_path)[ - "aab7dda2-0d97-43c8-8cb7-3f440dab9b4e" - ] - self.assertEqual( - (new_word.surface, new_word.pronunciation, new_word.accent_type), - ("test2", "テストツー", 2), - ) - - def test_delete_word_invalid_id(self): - user_dict_path = self.tmp_dir_path / "test_delete_word_invalid_id.json" - user_dict_path.write_text( - json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" - ) - self.assertRaises( - HTTPException, - delete_word, - word_uuid="c2be4dc5-d07d-4767-8be1-04a1bb3f05a9", - user_dict_path=user_dict_path, - compiled_dict_path=(self.tmp_dir_path / "test_delete_word_invalid_id.dic"), - ) - - def test_delete_word_valid_id(self): - user_dict_path = self.tmp_dir_path / "test_delete_word_valid_id.json" - user_dict_path.write_text( - json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" - ) - delete_word( - word_uuid="aab7dda2-0d97-43c8-8cb7-3f440dab9b4e", - user_dict_path=user_dict_path, - compiled_dict_path=(self.tmp_dir_path / "test_delete_word_valid_id.dic"), - ) - self.assertEqual(len(read_dict(user_dict_path=user_dict_path)), 0) - - def test_priority(self): - for pos in part_of_speech_data: - for i in range(MAX_PRIORITY + 1): - self.assertEqual( - create_word( - surface="test", - pronunciation="テスト", - accent_type=1, - word_type=pos, - priority=i, - ).priority, - i, - ) - - def test_import_dict(self): - user_dict_path = self.tmp_dir_path / "test_import_dict.json" - compiled_dict_path = self.tmp_dir_path / "test_import_dict.dic" - user_dict_path.write_text( - json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" - ) - import_user_dict( - {"b1affe2a-d5f0-4050-926c-f28e0c1d9a98": import_word}, - override=False, - user_dict_path=user_dict_path, - compiled_dict_path=compiled_dict_path, - ) - self.assertEqual( - read_dict(user_dict_path)["b1affe2a-d5f0-4050-926c-f28e0c1d9a98"], - import_word, - ) - self.assertEqual( - read_dict(user_dict_path)["aab7dda2-0d97-43c8-8cb7-3f440dab9b4e"], - UserDictWord(**valid_dict_dict_api["aab7dda2-0d97-43c8-8cb7-3f440dab9b4e"]), - ) - - def test_import_dict_no_override(self): - user_dict_path = self.tmp_dir_path / "test_import_dict_no_override.json" - compiled_dict_path = self.tmp_dir_path / "test_import_dict_no_override.dic" - user_dict_path.write_text( - json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" - ) - import_user_dict( - {"aab7dda2-0d97-43c8-8cb7-3f440dab9b4e": import_word}, - override=False, - user_dict_path=user_dict_path, - compiled_dict_path=compiled_dict_path, - ) - self.assertEqual( - read_dict(user_dict_path)["aab7dda2-0d97-43c8-8cb7-3f440dab9b4e"], - UserDictWord(**valid_dict_dict_api["aab7dda2-0d97-43c8-8cb7-3f440dab9b4e"]), - ) - - def test_import_dict_override(self): - user_dict_path = self.tmp_dir_path / "test_import_dict_override.json" - compiled_dict_path = self.tmp_dir_path / "test_import_dict_override.dic" - user_dict_path.write_text( - json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" - ) - import_user_dict( - {"aab7dda2-0d97-43c8-8cb7-3f440dab9b4e": import_word}, - override=True, - user_dict_path=user_dict_path, - compiled_dict_path=compiled_dict_path, - ) - self.assertEqual( - read_dict(user_dict_path)["aab7dda2-0d97-43c8-8cb7-3f440dab9b4e"], - import_word, - ) - - def test_import_invalid_word(self): - user_dict_path = self.tmp_dir_path / "test_import_invalid_dict.json" - compiled_dict_path = self.tmp_dir_path / "test_import_invalid_dict.dic" - invalid_accent_associative_rule_word = deepcopy(import_word) - invalid_accent_associative_rule_word.accent_associative_rule = "invalid" - user_dict_path.write_text( - json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" - ) - self.assertRaises( - AssertionError, - import_user_dict, - { - "aab7dda2-0d97-43c8-8cb7-3f440dab9b4e": invalid_accent_associative_rule_word - }, - override=True, - user_dict_path=user_dict_path, - compiled_dict_path=compiled_dict_path, - ) - invalid_pos_word = deepcopy(import_word) - invalid_pos_word.context_id = 2 - invalid_pos_word.part_of_speech = "フィラー" - invalid_pos_word.part_of_speech_detail_1 = "*" - invalid_pos_word.part_of_speech_detail_2 = "*" - invalid_pos_word.part_of_speech_detail_3 = "*" - self.assertRaises( - ValueError, - import_user_dict, - {"aab7dda2-0d97-43c8-8cb7-3f440dab9b4e": invalid_pos_word}, - override=True, - user_dict_path=user_dict_path, - compiled_dict_path=compiled_dict_path, - ) - - def test_update_dict(self): - user_dict_path = self.tmp_dir_path / "test_update_dict.json" - compiled_dict_path = self.tmp_dir_path / "test_update_dict.dic" - update_dict( - user_dict_path=user_dict_path, compiled_dict_path=compiled_dict_path - ) - test_text = "テスト用の文字列" - success_pronunciation = "デフォルトノジショデハゼッタイニセイセイサレナイヨミ" - - # 既に辞書に登録されていないか確認する - self.assertNotEqual(g2p(text=test_text, kana=True), success_pronunciation) - - apply_word( - surface=test_text, - pronunciation=success_pronunciation, - accent_type=1, - priority=10, - user_dict_path=user_dict_path, - compiled_dict_path=compiled_dict_path, - ) - self.assertEqual(g2p(text=test_text, kana=True), success_pronunciation) - - # 疑似的にエンジンを再起動する - unset_user_dict() - update_dict( - user_dict_path=user_dict_path, compiled_dict_path=compiled_dict_path - ) - - self.assertEqual(g2p(text=test_text, kana=True), success_pronunciation) diff --git a/spaces/2ndelement/voicevox/voicevox_engine/utility/mutex_utility.py b/spaces/2ndelement/voicevox/voicevox_engine/utility/mutex_utility.py deleted file mode 100644 index 09d8cb9680f71758018bffe82838a763ca46fe31..0000000000000000000000000000000000000000 --- a/spaces/2ndelement/voicevox/voicevox_engine/utility/mutex_utility.py +++ /dev/null @@ -1,15 +0,0 @@ -import threading - - -def mutex_wrapper(lock: threading.Lock): - def wrap(f): - def func(*args, **kw): - lock.acquire() - try: - return f(*args, **kw) - finally: - lock.release() - - return func - - return wrap diff --git a/spaces/3B-Group/ConvRe-Leaderboard/src/css_html.py b/spaces/3B-Group/ConvRe-Leaderboard/src/css_html.py deleted file mode 100644 index 7d55f75c9596fa6a91843f196078768033583b2c..0000000000000000000000000000000000000000 --- a/spaces/3B-Group/ConvRe-Leaderboard/src/css_html.py +++ /dev/null @@ -1,83 +0,0 @@ -# source: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/blob/main/src/assets/css_html_js.py -custom_css = """ -#changelog-text { - font-size: 16px !important; -} - -#changelog-text h2 { - font-size: 18px !important; -} - -.markdown-text { - font-size: 16px !important; -} - -#answer-text { - font-size: 28px !important; -} - -#models-to-add-text { - font-size: 18px !important; -} - -#citation-button span { - font-size: 16px !important; -} - -#citation-button textarea { - font-size: 16px !important; -} - -#citation-button > label > button { - margin: 6px; - transform: scale(1.3); -} - -#leaderboard-table { - margin-top: 15px -} - -#leaderboard-table-lite { - margin-top: 15px -} - -#search-bar-table-box > div:first-child { - background: none; - border: none; -} - -#search-bar { - padding: 0px; -} - -/* Hides the final AutoEvalColumn */ -#llm-benchmark-tab-table table td:last-child, -#llm-benchmark-tab-table table th:last-child { - display: none; -} - -/* Limit the width of the first AutoEvalColumn so that names don't expand too much */ -table td:first-child, -table th:first-child { - max-width: 400px; - overflow: auto; - white-space: nowrap; -} - -.tab-buttons button { - font-size: 20px; -} - -#scale-logo { - border-style: none !important; - box-shadow: none; - display: block; - margin-left: auto; - margin-right: auto; - max-width: 600px; -} - -#scale-logo .download { - display: none; -} -""" \ No newline at end of file diff --git a/spaces/4Taps/SadTalker/src/face3d/util/html.py b/spaces/4Taps/SadTalker/src/face3d/util/html.py deleted file mode 100644 index cc3262a1eafda34842e4dbad47bb6ba72f0c5a68..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/face3d/util/html.py +++ /dev/null @@ -1,86 +0,0 @@ -import dominate -from dominate.tags import meta, h3, table, tr, td, p, a, img, br -import os - - -class HTML: - """This HTML class allows us to save images and write texts into a single HTML file. - - It consists of functions such as (add a text header to the HTML file), - (add a row of images to the HTML file), and (save the HTML to the disk). - It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API. - """ - - def __init__(self, web_dir, title, refresh=0): - """Initialize the HTML classes - - Parameters: - web_dir (str) -- a directory that stores the webpage. HTML file will be created at /index.html; images will be saved at 0: - with self.doc.head: - meta(http_equiv="refresh", content=str(refresh)) - - def get_image_dir(self): - """Return the directory that stores images""" - return self.img_dir - - def add_header(self, text): - """Insert a header to the HTML file - - Parameters: - text (str) -- the header text - """ - with self.doc: - h3(text) - - def add_images(self, ims, txts, links, width=400): - """add images to the HTML file - - Parameters: - ims (str list) -- a list of image paths - txts (str list) -- a list of image names shown on the website - links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page - """ - self.t = table(border=1, style="table-layout: fixed;") # Insert a table - self.doc.add(self.t) - with self.t: - with tr(): - for im, txt, link in zip(ims, txts, links): - with td(style="word-wrap: break-word;", halign="center", valign="top"): - with p(): - with a(href=os.path.join('images', link)): - img(style="width:%dpx" % width, src=os.path.join('images', im)) - br() - p(txt) - - def save(self): - """save the current content to the HMTL file""" - html_file = '%s/index.html' % self.web_dir - f = open(html_file, 'wt') - f.write(self.doc.render()) - f.close() - - -if __name__ == '__main__': # we show an example usage here. - html = HTML('web/', 'test_html') - html.add_header('hello world') - - ims, txts, links = [], [], [] - for n in range(4): - ims.append('image_%d.png' % n) - txts.append('text_%d' % n) - links.append('image_%d.png' % n) - html.add_images(ims, txts, links) - html.save() diff --git a/spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/nets_33966KB.py b/spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/nets_33966KB.py deleted file mode 100644 index 73a5b836177b706c306e27875f8391c1aed4b948..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/nets_33966KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import layers_33966KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16, 32)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 16) - self.stg1_high_band_net = BaseASPPNet(2, 16) - - self.stg2_bridge = layers.Conv2DBNActiv(18, 8, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(8, 16) - - self.stg3_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(16, 32) - - self.out = nn.Conv2d(32, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(16, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(16, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/AIFILMS/generate_human_motion/VQ-Trans/visualize/render_mesh.py b/spaces/AIFILMS/generate_human_motion/VQ-Trans/visualize/render_mesh.py deleted file mode 100644 index d44d04f551ccb4f1ffc9efb4cb1a44c407ede836..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/generate_human_motion/VQ-Trans/visualize/render_mesh.py +++ /dev/null @@ -1,33 +0,0 @@ -import argparse -import os -from visualize import vis_utils -import shutil -from tqdm import tqdm - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("--input_path", type=str, required=True, help='stick figure mp4 file to be rendered.') - parser.add_argument("--cuda", type=bool, default=True, help='') - parser.add_argument("--device", type=int, default=0, help='') - params = parser.parse_args() - - assert params.input_path.endswith('.mp4') - parsed_name = os.path.basename(params.input_path).replace('.mp4', '').replace('sample', '').replace('rep', '') - sample_i, rep_i = [int(e) for e in parsed_name.split('_')] - npy_path = os.path.join(os.path.dirname(params.input_path), 'results.npy') - out_npy_path = params.input_path.replace('.mp4', '_smpl_params.npy') - assert os.path.exists(npy_path) - results_dir = params.input_path.replace('.mp4', '_obj') - if os.path.exists(results_dir): - shutil.rmtree(results_dir) - os.makedirs(results_dir) - - npy2obj = vis_utils.npy2obj(npy_path, sample_i, rep_i, - device=params.device, cuda=params.cuda) - - print('Saving obj files to [{}]'.format(os.path.abspath(results_dir))) - for frame_i in tqdm(range(npy2obj.real_num_frames)): - npy2obj.save_obj(os.path.join(results_dir, 'frame{:03d}.obj'.format(frame_i)), frame_i) - - print('Saving SMPL params to [{}]'.format(os.path.abspath(out_npy_path))) - npy2obj.save_npy(out_npy_path) diff --git a/spaces/AIGC-Audio/Make_An_Audio/ldm/models/autoencoder_multi.py b/spaces/AIGC-Audio/Make_An_Audio/ldm/models/autoencoder_multi.py deleted file mode 100644 index cc4f830e24e99950f5ff412e8c5776e6a3489bf2..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/Make_An_Audio/ldm/models/autoencoder_multi.py +++ /dev/null @@ -1,201 +0,0 @@ -""" -与autoencoder.py的区别在于,autoencoder.py计算loss时只有一个discriminator,而此处又多了个multiwindowDiscriminator,所以优化器 -优化的参数改为: -opt_disc = torch.optim.Adam(list(self.loss.discriminator.parameters()) + list(self.loss.discriminator_multi.parameters()), - lr=lr, betas=(0.5, 0.9)) -""" - -import os -import torch -import pytorch_lightning as pl -import torch.nn.functional as F -from contextlib import contextmanager - -from packaging import version -import numpy as np -from ldm.modules.diffusionmodules.model import Encoder, Decoder -from ldm.modules.distributions.distributions import DiagonalGaussianDistribution -from torch.optim.lr_scheduler import LambdaLR -from ldm.util import instantiate_from_config - - - -class AutoencoderKL(pl.LightningModule): - def __init__(self, - ddconfig, - lossconfig, - embed_dim, - ckpt_path=None, - ignore_keys=[], - image_key="image", - colorize_nlabels=None, - monitor=None, - ): - super().__init__() - self.image_key = image_key - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - self.loss = instantiate_from_config(lossconfig) - assert ddconfig["double_z"] - self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) - self.embed_dim = embed_dim - if colorize_nlabels is not None: - assert type(colorize_nlabels)==int - self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) - if monitor is not None: - self.monitor = monitor - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) - - def init_from_ckpt(self, path, ignore_keys=list()): - sd = torch.load(path, map_location="cpu")["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - self.load_state_dict(sd, strict=False) - print(f"Restored from {path}") - - def encode(self, x): - h = self.encoder(x) - moments = self.quant_conv(h) - posterior = DiagonalGaussianDistribution(moments) - return posterior - - def decode(self, z): - z = self.post_quant_conv(z) - dec = self.decoder(z) - return dec - - def forward(self, input, sample_posterior=True): - posterior = self.encode(input) - if sample_posterior: - z = posterior.sample() - else: - z = posterior.mode() - dec = self.decode(z) - return dec, posterior - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() - return x - - def training_step(self, batch, batch_idx, optimizer_idx): - inputs = self.get_input(batch, self.image_key) - reconstructions, posterior = self(inputs) - - if optimizer_idx == 0: - # train encoder+decoder+logvar - aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) - self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) - return aeloss - - if optimizer_idx == 1: - # train the discriminator - discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - - self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) - self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) - return discloss - - def validation_step(self, batch, batch_idx): - inputs = self.get_input(batch, self.image_key) - reconstructions, posterior = self(inputs) - aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, - last_layer=self.get_last_layer(), split="val") - - discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, - last_layer=self.get_last_layer(), split="val") - - self.log("val/rec_loss", log_dict_ae["val/rec_loss"]) - self.log_dict(log_dict_ae) - self.log_dict(log_dict_disc) - return self.log_dict - - def test_step(self, batch, batch_idx): - inputs = self.get_input(batch, self.image_key)# inputs shape:(b,c,mel_len,T) or (b,c,h,w) - reconstructions, posterior = self(inputs)# reconstructions:(b,c,mel_len,T) or (b,c,h,w) - reconstructions = (reconstructions + 1)/2 # to mel scale - test_ckpt_path = os.path.basename(self.trainer.tested_ckpt_path) - savedir = os.path.join(self.trainer.log_dir,f'output_imgs_{test_ckpt_path}','fake_class') - if not os.path.exists(savedir): - os.makedirs(savedir) - - file_names = batch['f_name'] - # print(f"reconstructions.shape:{reconstructions.shape}",file_names) - reconstructions = reconstructions.cpu().numpy().squeeze(1) # squuze channel dim - for b in range(reconstructions.shape[0]): - vname_num_split_index = file_names[b].rfind('_')# file_names[b]:video_name+'_'+num - v_n,num = file_names[b][:vname_num_split_index],file_names[b][vname_num_split_index+1:] - save_img_path = os.path.join(savedir,f'{v_n}_sample_{num}.npy') - np.save(save_img_path,reconstructions[b]) - - return None - - def configure_optimizers(self): - lr = self.learning_rate - opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ - list(self.decoder.parameters())+ - list(self.quant_conv.parameters())+ - list(self.post_quant_conv.parameters()), - lr=lr, betas=(0.5, 0.9)) - opt_disc = torch.optim.Adam(list(self.loss.discriminator.parameters()) + list(self.loss.discriminator_multi.parameters()), - lr=lr, betas=(0.5, 0.9)) - return [opt_ae, opt_disc], [] - - def get_last_layer(self): - return self.decoder.conv_out.weight - - @torch.no_grad() - def log_images(self, batch, only_inputs=False, **kwargs): - log = dict() - x = self.get_input(batch, self.image_key) - x = x.to(self.device) - if not only_inputs: - xrec, posterior = self(x) - if x.shape[1] > 3: - # colorize with random projection - assert xrec.shape[1] > 3 - x = self.to_rgb(x) - xrec = self.to_rgb(xrec) - log["samples"] = self.decode(torch.randn_like(posterior.sample())) - log["reconstructions"] = xrec - log["inputs"] = x - return log - - def to_rgb(self, x): - assert self.image_key == "segmentation" - if not hasattr(self, "colorize"): - self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) - x = F.conv2d(x, weight=self.colorize) - x = 2.*(x-x.min())/(x.max()-x.min()) - 1. - return x - - -class IdentityFirstStage(torch.nn.Module): - def __init__(self, *args, vq_interface=False, **kwargs): - self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff - super().__init__() - - def encode(self, x, *args, **kwargs): - return x - - def decode(self, x, *args, **kwargs): - return x - - def quantize(self, x, *args, **kwargs): - if self.vq_interface: - return x, None, [None, None, None] - return x - - def forward(self, x, *args, **kwargs): - return x \ No newline at end of file diff --git a/spaces/AIWaves/Debate/src/agents/SOP.py b/spaces/AIWaves/Debate/src/agents/SOP.py deleted file mode 100644 index 7fc3e2f5e0c496774d9967fb88593fa4c88347e2..0000000000000000000000000000000000000000 --- a/spaces/AIWaves/Debate/src/agents/SOP.py +++ /dev/null @@ -1,296 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The AIWaves Inc. team. - -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""standard operation procedure of an LLM Autonomous agent""" -import random -from LLM.base_LLM import * -from State import State -from utils import extract, get_relevant_history -from Memory import Memory -from Prompt import * -import json -import os - -class SOP: - """ - Responsible for managing the operational processes of all agents - """ - - # SOP should have args : "states" "relations" "root" - - def __init__(self, **kwargs): - self.controller_dict = {} - self.LLM = init_LLM("logs/god",**kwargs) - - self.states = {} - self.init_states(kwargs["states"]) - self.init_relation(kwargs["relations"]) - for state_name, states_dict in kwargs["states"].items(): - if state_name != "end_state" and "controller" in states_dict: - self.controller_dict[state_name] = states_dict["controller"] - - self.user_names = kwargs["user_names"] if "user_names" in kwargs else [] - self.root = self.states[kwargs["root"]] - self.current_state = self.root - self.finish_state_name = ( - kwargs["finish_state_name"] - if "finish_state_name" in kwargs - else "end_state" - ) - self.roles_to_names = None - self.names_to_roles = None - self.finished = False - - @classmethod - def from_config(cls, config_path): - with open(config_path) as f: - config = json.load(f) - os.environ.clear() - for key,value in config["config"].items(): - if key == "API_BASE": - if value == "": - pass - else: - os.environ[key] = value - # assert "API_KEY" in os.environ and os.environ["API_KEY"] != "API_KEY","Please go to config.json to set API_KEY" - - sop = SOP(**config) - return sop - - def init_states(self, states_dict): - for state_name, state_dict in states_dict.items(): - state_dict["name"] = state_name - self.states[state_name] = State(**state_dict) - - def init_relation(self, relations): - for state_name, state_relation in relations.items(): - for idx, next_state_name in state_relation.items(): - self.states[state_name].next_states[idx] = self.states[next_state_name] - - def transit(self, chat_history, **kwargs): - """ - Determine the next state based on the current situation - Return : - next_state(State) : the next state - """ - # 如果是单一循环节点,则一直循环即可 - # If it is a single loop node, just keep looping - if len(self.current_state.next_states) == 1: - next_state = "0" - - # 否则则需要controller去判断进入哪一节点 - # Otherwise, the controller needs to determine which node to enter. - else: - current_state = self.current_state - controller_dict = self.controller_dict[current_state.name] - relevant_history = kwargs["relevant_history"] - - max_chat_nums = controller_dict["max_chat_nums"] if "max_chat_nums" in controller_dict else 1000 - if current_state.chat_nums>=max_chat_nums: - return self.current_state.next_states["1"] - - - # 否则则让controller判断是否结束 - # Otherwise, let the controller judge whether to end - judge_system_prompt = controller_dict["judge_system_prompt"] - environment_prompt = eval(Get_environment_prompt) if current_state.environment_prompt else "" - transit_system_prompt = eval(Transit_system_prompt) - - judge_last_prompt = controller_dict["judge_last_prompt"] - transit_last_prompt = eval(Transit_last_prompt) - - - - environment = kwargs["environment"] - environment_summary = environment.shared_memory["short_term_memory"] - chat_history_message = Memory.get_chat_history(chat_history) - query = chat_history[-1].get_query() - - chat_messages = [ - { - "role": "user", - "content": eval(Transit_message) - } - ] - - extract_words = controller_dict["judge_extract_words"] if "judge_extract_words" in controller_dict else "end" - - - response = self.LLM.get_response( - chat_messages, transit_system_prompt, transit_last_prompt, stream=False, **kwargs - ) - next_state = ( - response if response.isdigit() else extract(response, extract_words) - ) - - # 如果没有parse出来则继续循环 - # If no parse comes out, continue looping - if not next_state.isdigit(): - next_state = "0" - - next_state = self.current_state.next_states[next_state] - return next_state - - - def route(self, chat_history, **kwargs): - """ - Determine the role that needs action based on the current situation - Return : - current_agent(Agent) : the next act agent - """ - - agents = kwargs["agents"] - - # 知道进入哪一状态后开始分配角色,如果该状态下只有一个角色则直接分配给他 - # Start assigning roles after knowing which state you have entered. If there is only one role in that state, assign it directly to him. - if len(self.current_state.roles) == 1: - next_role = self.current_state.roles[0] - - - - # 否则controller进行分配 - # Otherwise the controller determines - else: - relevant_history = kwargs["relevant_history"] - controller_type = ( - self.controller_dict[self.current_state.name]["controller_type"] - if "controller_type" in self.controller_dict[self.current_state.name] - else "order" - ) - - - # 如果是rule 控制器,则交由LLM进行分配角色 - # If controller type is rule, it is left to LLM to assign roles. - if controller_type == "rule": - controller_dict = self.controller_dict[self.current_state.name] - - call_last_prompt = controller_dict["call_last_prompt"] if "call_last_prompt" in controller_dict else "" - - allocate_prompt = "" - roles = list(set(self.current_state.roles)) - for role in roles: - allocate_prompt += eval(Allocate_component) - - call_system_prompt = controller_dict["call_system_prompt"] if "call_system_prompt" in controller_dict else "" - environment_prompt = eval(Get_environment_prompt) if self.current_state.environment_prompt else "" - # call_system_prompt + environment + allocate_prompt - call_system_prompt = eval(Call_system_prompt) - - query = chat_history[-1].get_query() - last_name = chat_history[-1].send_name - # last_prompt: note + last_prompt + query - call_last_prompt =eval(Call_last_prompt) - - - chat_history_message = Memory.get_chat_history(chat_history) - # Intermediate historical conversation records - chat_messages = [ - { - "role": "user", - "content": eval(Call_message), - } - ] - - extract_words = controller_dict["call_extract_words"] if "call_extract_words" in controller_dict else "end" - - response = self.LLM.get_response( - chat_messages, call_system_prompt, call_last_prompt, stream=False, **kwargs - ) - - # get next role - next_role = extract(response, extract_words) - - # Speak in order - elif controller_type == "order": - # If there is no begin role, it will be given directly to the first person. - if not self.current_state.current_role: - next_role = self.current_state.roles[0] - # otherwise first - else: - self.current_state.index += 1 - self.current_state.index = (self.current_state.index) % len(self.current_state.roles) - next_role = self.current_state.roles[self.current_state.index] - # random speak - elif controller_type == "random": - next_role = random.choice(self.current_state.roles) - - # 如果下一角色不在,则随机挑选一个 - # If the next character is not available, pick one at random - if next_role not in self.current_state.roles: - next_role = random.choice(self.current_state.roles) - - self.current_state.current_role = next_role - - next_agent = agents[self.roles_to_names[self.current_state.name][next_role]] - - return next_agent - - def next(self, environment, agents): - """ - Determine the next state and the agent that needs action based on the current situation - """ - - # 如果是第一次进入该状态 - # If it is the first time to enter this state - - if self.current_state.is_begin: - agent_name = self.roles_to_names[self.current_state.name][self.current_state.begin_role] - agent = agents[agent_name] - return self.current_state,agent - - - # get relevant history - query = environment.shared_memory["long_term_memory"][-1].content - relevant_history = get_relevant_history( - query, - environment.shared_memory["long_term_memory"][:-1], - environment.shared_memory["chat_embeddings"][:-1], - ) - relevant_history = Memory.get_chat_history(relevant_history) - - - - next_state = self.transit( - chat_history=environment.shared_memory["long_term_memory"][ - environment.current_chat_history_idx : - ], - relevant_history=relevant_history, - environment=environment, - ) - # 如果进入终止节点,则直接终止 - # If you enter the termination node, terminate directly - if next_state.name == self.finish_state_name: - self.finished = True - return None, None - - self.current_state = next_state - - # 如果是首次进入该节点且有开场白,则直接分配给开场角色 - # If it is the first time to enter the state and there is a begin query, it will be directly assigned to the begin role. - if self.current_state.is_begin and self.current_state.begin_role: - agent_name = self.roles_to_names[self.current_state.name][self.current_state.begin_role] - agent = agents[agent_name] - return self.current_state,agent - - - next_agent = self.route( - chat_history=environment.shared_memory["long_term_memory"][ - environment.current_chat_history_idx : - ], - agents = agents, - relevant_history=relevant_history, - ) - - return self.current_state, next_agent diff --git a/spaces/AIZero2Hero4Health/2-BiomedEntityRecognition-GR/app.py b/spaces/AIZero2Hero4Health/2-BiomedEntityRecognition-GR/app.py deleted file mode 100644 index 02c37db97a9bdb34d6da850963d156ef27384571..0000000000000000000000000000000000000000 --- a/spaces/AIZero2Hero4Health/2-BiomedEntityRecognition-GR/app.py +++ /dev/null @@ -1,81 +0,0 @@ -import gradio as gr -import pandas as pd -import json -from collections import defaultdict - -# Create tokenizer for biomed model -from transformers import pipeline, AutoTokenizer, AutoModelForTokenClassification -tokenizer = AutoTokenizer.from_pretrained("d4data/biomedical-ner-all") -model = AutoModelForTokenClassification.from_pretrained("d4data/biomedical-ner-all") -pipe = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple") - -# Matplotlib for entity graph -import matplotlib.pyplot as plt -plt.switch_backend("Agg") - -# Load examples from JSON -EXAMPLES = {} -with open("examples.json", "r") as f: - example_json = json.load(f) - EXAMPLES = {x["text"]: x["label"] for x in example_json} - -def group_by_entity(raw): - out = defaultdict(int) - for ent in raw: - out[ent["entity_group"]] += 1 - # out["total"] = sum(out.values()) - return out - - -def plot_to_figure(grouped): - fig = plt.figure() - plt.bar(x=list(grouped.keys()), height=list(grouped.values())) - plt.margins(0.2) - plt.subplots_adjust(bottom=0.4) - plt.xticks(rotation=90) - return fig - - -def ner(text): - raw = pipe(text) - ner_content = { - "text": text, - "entities": [ - { - "entity": x["entity_group"], - "word": x["word"], - "score": x["score"], - "start": x["start"], - "end": x["end"], - } - for x in raw - ], - } - - grouped = group_by_entity(raw) - figure = plot_to_figure(grouped) - label = EXAMPLES.get(text, "Unknown") - - meta = { - "entity_counts": grouped, - "entities": len(set(grouped.keys())), - "counts": sum(grouped.values()), - } - - return (ner_content, meta, label, figure) - - -interface = gr.Interface( - ner, - inputs=gr.Textbox(label="Note text", value=""), - outputs=[ - gr.HighlightedText(label="NER", combine_adjacent=True), - gr.JSON(label="Entity Counts"), - gr.Label(label="Rating"), - gr.Plot(label="Bar"), - ], - examples=list(EXAMPLES.keys()), - allow_flagging="never", -) - -interface.launch() \ No newline at end of file diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/analytics.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/analytics.ts deleted file mode 100644 index 72fd5d70df54c0436a8aa3f5fca4dfbcb5f64ff5..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/utils/analytics.ts +++ /dev/null @@ -1,39 +0,0 @@ -export interface GAEvent { - hitType: "event"; - eventCategory: string; - eventAction: string; - eventLabel?: string; - eventValue?: number; -} - -// Send a Google Analytics event -export function sendAnalyticsEvent({ - eventCategory, - eventAction, - eventLabel, - eventValue, -}: Omit): void { - // Mandatory fields - const event: GAEvent = { - hitType: "event", - eventCategory, - eventAction, - }; - // Optional fields - if (eventLabel) { - event.eventLabel = eventLabel; - } - if (eventValue) { - event.eventValue = eventValue; - } - - // @ts-expect-error typescript doesn't know gtag is on the window object - if (!!window?.gtag && typeof window?.gtag === "function") { - // @ts-expect-error typescript doesn't know gtag is on the window object - window?.gtag("event", eventAction, { - event_category: event.eventCategory, - event_label: event.eventLabel, - value: event.eventValue, - }); - } -} diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/training/projectors/w_projector.py b/spaces/Amrrs/DragGan-Inversion/PTI/training/projectors/w_projector.py deleted file mode 100644 index a4caffc368f87e06b41eaac2807a273079708840..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/PTI/training/projectors/w_projector.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Project given image to the latent space of pretrained network pickle.""" - -import copy -import wandb -import numpy as np -import torch -import torch.nn.functional as F -from tqdm import tqdm -from PTI.configs import global_config, hyperparameters -from PTI.utils import log_utils -import dnnlib - - -def project( - G, - target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution - *, - num_steps=1000, - w_avg_samples=10000, - initial_learning_rate=0.01, - initial_noise_factor=0.05, - lr_rampdown_length=0.25, - lr_rampup_length=0.05, - noise_ramp_length=0.75, - regularize_noise_weight=1e5, - verbose=False, - device: torch.device, - use_wandb=False, - initial_w=None, - image_log_step=global_config.image_rec_result_log_snapshot, - w_name: str -): - assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution),print(target.shape,G.img_resolution) - - def logprint(*args): - if verbose: - print(*args) - - G = copy.deepcopy(G).eval().requires_grad_(False).to(device).float() # type: ignore - - # Compute w stats. - logprint(f'Computing W midpoint and stddev using {w_avg_samples} samples...') - z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim) - w_samples = G.mapping(torch.from_numpy(z_samples).to(device), None) # [N, L, C] - w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C] - w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C] - w_avg_tensor = torch.from_numpy(w_avg).to(global_config.device) - w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5 - - start_w = initial_w if initial_w is not None else w_avg - - # Setup noise inputs. - noise_bufs = {name: buf for (name, buf) in G.synthesis.named_buffers() if 'noise_const' in name} - - # Load VGG16 feature detector. - url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt' - with dnnlib.util.open_url(url) as f: - vgg16 = torch.jit.load(f).eval().to(device) - - # Features for target image. - target_images = target.unsqueeze(0).to(device).to(torch.float32) - if target_images.shape[2] > 256: - target_images = F.interpolate(target_images, size=(256, 256), mode='area') - target_features = vgg16(target_images, resize_images=False, return_lpips=True) - - w_opt = torch.tensor(start_w, dtype=torch.float32, device=device, - requires_grad=True) # pylint: disable=not-callable - optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999), - lr=hyperparameters.first_inv_lr) - - # Init noise. - for buf in noise_bufs.values(): - buf[:] = torch.randn_like(buf) - buf.requires_grad = True - - for step in tqdm(range(num_steps)): - - # Learning rate schedule. - t = step / num_steps - w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2 - lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length) - lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi) - lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length) - lr = initial_learning_rate * lr_ramp - for param_group in optimizer.param_groups: - param_group['lr'] = lr - - # Synth images from opt_w. - w_noise = torch.randn_like(w_opt) * w_noise_scale - ws = (w_opt + w_noise).repeat([1, G.mapping.num_ws, 1]) - synth_images = G.synthesis(ws, noise_mode='const', force_fp32=True) - - # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images. - synth_images = (synth_images + 1) * (255 / 2) - if synth_images.shape[2] > 256: - synth_images = F.interpolate(synth_images, size=(256, 256), mode='area') - - # Features for synth images. - synth_features = vgg16(synth_images, resize_images=False, return_lpips=True) - dist = (target_features - synth_features).square().sum() - - # Noise regularization. - reg_loss = 0.0 - for v in noise_bufs.values(): - noise = v[None, None, :, :] # must be [1,1,H,W] for F.avg_pool2d() - while True: - reg_loss += (noise * torch.roll(noise, shifts=1, dims=3)).mean() ** 2 - reg_loss += (noise * torch.roll(noise, shifts=1, dims=2)).mean() ** 2 - if noise.shape[2] <= 8: - break - noise = F.avg_pool2d(noise, kernel_size=2) - loss = dist + reg_loss * regularize_noise_weight - - if step % image_log_step == 0: - with torch.no_grad(): - if use_wandb: - global_config.training_step += 1 - wandb.log({f'first projection _{w_name}': loss.detach().cpu()}, step=global_config.training_step) - log_utils.log_image_from_w(w_opt.repeat([1, G.mapping.num_ws, 1]), G, w_name) - - # Step - optimizer.zero_grad(set_to_none=True) - loss.backward() - optimizer.step() - logprint(f'step {step + 1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}') - - # Normalize noise. - with torch.no_grad(): - for buf in noise_bufs.values(): - buf -= buf.mean() - buf *= buf.square().mean().rsqrt() - - del G - return w_opt.repeat([1, 18, 1]) diff --git a/spaces/Amrrs/DragGan-Inversion/gradio_utils/utils.py b/spaces/Amrrs/DragGan-Inversion/gradio_utils/utils.py deleted file mode 100644 index d4e760e1515f3f69b11d11426ac3e8fa51f1a99c..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/gradio_utils/utils.py +++ /dev/null @@ -1,154 +0,0 @@ -import gradio as gr -import numpy as np -from PIL import Image, ImageDraw - - -class ImageMask(gr.components.Image): - """ - Sets: source="canvas", tool="sketch" - """ - - is_template = True - - def __init__(self, **kwargs): - super().__init__(source="upload", - tool="sketch", - interactive=False, - **kwargs) - - def preprocess(self, x): - if x is None: - return x - if self.tool == "sketch" and self.source in ["upload", "webcam" - ] and type(x) != dict: - decode_image = gr.processing_utils.decode_base64_to_image(x) - width, height = decode_image.size - mask = np.ones((height, width, 4), dtype=np.uint8) - mask[..., -1] = 255 - mask = self.postprocess(mask) - x = {'image': x, 'mask': mask} - return super().preprocess(x) - - -def get_valid_mask(mask: np.ndarray): - """Convert mask from gr.Image(0 to 255, RGBA) to binary mask. - """ - if mask.ndim == 3: - mask_pil = Image.fromarray(mask).convert('L') - mask = np.array(mask_pil) - if mask.max() == 255: - mask = mask / 255 - return mask - - -def draw_points_on_image(image, - points, - curr_point=None, - highlight_all=True, - radius_scale=0.01): - overlay_rgba = Image.new("RGBA", image.size, 0) - overlay_draw = ImageDraw.Draw(overlay_rgba) - for point_key, point in points.items(): - if ((curr_point is not None and curr_point == point_key) - or highlight_all): - p_color = (255, 0, 0) - t_color = (0, 0, 255) - - else: - p_color = (255, 0, 0, 35) - t_color = (0, 0, 255, 35) - - rad_draw = int(image.size[0] * radius_scale) - - p_start = point.get("start_temp", point["start"]) - p_target = point["target"] - - if p_start is not None and p_target is not None: - p_draw = int(p_start[0]), int(p_start[1]) - t_draw = int(p_target[0]), int(p_target[1]) - - overlay_draw.line( - (p_draw[0], p_draw[1], t_draw[0], t_draw[1]), - fill=(255, 255, 0), - width=2, - ) - - if p_start is not None: - p_draw = int(p_start[0]), int(p_start[1]) - overlay_draw.ellipse( - ( - p_draw[0] - rad_draw, - p_draw[1] - rad_draw, - p_draw[0] + rad_draw, - p_draw[1] + rad_draw, - ), - fill=p_color, - ) - - if curr_point is not None and curr_point == point_key: - # overlay_draw.text(p_draw, "p", font=font, align="center", fill=(0, 0, 0)) - overlay_draw.text(p_draw, "p", align="center", fill=(0, 0, 0)) - - if p_target is not None: - t_draw = int(p_target[0]), int(p_target[1]) - overlay_draw.ellipse( - ( - t_draw[0] - rad_draw, - t_draw[1] - rad_draw, - t_draw[0] + rad_draw, - t_draw[1] + rad_draw, - ), - fill=t_color, - ) - - if curr_point is not None and curr_point == point_key: - # overlay_draw.text(t_draw, "t", font=font, align="center", fill=(0, 0, 0)) - overlay_draw.text(t_draw, "t", align="center", fill=(0, 0, 0)) - - return Image.alpha_composite(image.convert("RGBA"), - overlay_rgba).convert("RGB") - - -def draw_mask_on_image(image, mask): - im_mask = np.uint8(mask * 255) - im_mask_rgba = np.concatenate( - ( - np.tile(im_mask[..., None], [1, 1, 3]), - 45 * np.ones( - (im_mask.shape[0], im_mask.shape[1], 1), dtype=np.uint8), - ), - axis=-1, - ) - im_mask_rgba = Image.fromarray(im_mask_rgba).convert("RGBA") - - return Image.alpha_composite(image.convert("RGBA"), - im_mask_rgba).convert("RGB") - - -def on_change_single_global_state(keys, - value, - global_state, - map_transform=None): - if map_transform is not None: - value = map_transform(value) - - curr_state = global_state - if isinstance(keys, str): - last_key = keys - - else: - for k in keys[:-1]: - curr_state = curr_state[k] - - last_key = keys[-1] - - curr_state[last_key] = value - return global_state - - -def get_latest_points_pair(points_dict): - if not points_dict: - return None - point_idx = list(points_dict.keys()) - latest_point_idx = max(point_idx) - return latest_point_idx diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/training_scripts/sg2/training/dataset.py b/spaces/Amrrs/DragGan-Inversion/stylegan_human/training_scripts/sg2/training/dataset.py deleted file mode 100644 index cfb6ff76e18cb42a9493e2ddae1d843895acdadc..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/training_scripts/sg2/training/dataset.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import os -import numpy as np -import zipfile -import PIL.Image -import json -import torch -import dnnlib -import cv2 -from collections import Counter - - -try: - import pyspng -except ImportError: - pyspng = None - -# ---------------------------------------------------------------------------- - - -class Dataset(torch.utils.data.Dataset): - def __init__(self, - name, # Name of the dataset. - raw_shape, # Shape of the raw image data (NCHW). - # Artificially limit the size of the dataset. None = no limit. Applied before xflip. - max_size=None, - # Enable conditioning labels? False = label dimension is zero. - use_labels=False, - # Artificially double the size of the dataset via x-flips. Applied after max_size. - xflip=False, - # Random seed to use when applying max_size. - random_seed=0, - square=False, - ): - # print(' Inside Dataset ') - self._name = name - self._raw_shape = list(raw_shape) - self._use_labels = use_labels - self._raw_labels = None - self._label_shape = None - self._square = square - - # Apply max_size. - self._raw_idx = np.arange(self._raw_shape[0], dtype=np.int64) - if (max_size is not None) and (self._raw_idx.size > max_size): - np.random.RandomState(random_seed).shuffle(self._raw_idx) - self._raw_idx = np.sort(self._raw_idx[:max_size]) - - # Apply xflip. - self._xflip = np.zeros(self._raw_idx.size, dtype=np.uint8) - if xflip: - self._raw_idx = np.tile(self._raw_idx, 2) - self._xflip = np.concatenate( - [self._xflip, np.ones_like(self._xflip)]) - - def _get_raw_labels(self): - if self._raw_labels is None: - self._raw_labels = self._load_raw_labels() if self._use_labels else None - if self._raw_labels is None: - self._raw_labels = np.zeros( - [self._raw_shape[0], 0], dtype=np.float32) - assert isinstance(self._raw_labels, np.ndarray) - assert self._raw_labels.shape[0] == self._raw_shape[0] - assert self._raw_labels.dtype in [np.float32, np.int64] - if self._raw_labels.dtype == np.int64: - assert self._raw_labels.ndim == 1 - assert np.all(self._raw_labels >= 0) - return self._raw_labels - - def close(self): # to be overridden by subclass - pass - - def _load_raw_image(self, raw_idx): # to be overridden by subclass - raise NotImplementedError - - def _load_raw_labels(self): # to be overridden by subclass - raise NotImplementedError - - def __getstate__(self): - return dict(self.__dict__, _raw_labels=None) - - def __del__(self): - try: - self.close() - except: - pass - - def __len__(self): - return self._raw_idx.size - - def __getitem__(self, idx): - image = self._load_raw_image(self._raw_idx[idx]) - assert isinstance(image, np.ndarray) - assert list(image.shape) == self.image_shape - assert image.dtype == np.uint8 - if self._xflip[idx]: - assert image.ndim == 3 # CHW - image = image[:, :, ::-1] - return image.copy(), self.get_label(idx) - - def get_label(self, idx): - label = self._get_raw_labels()[self._raw_idx[idx]] - if label.dtype == np.int64: - onehot = np.zeros(self.label_shape, dtype=np.float32) - onehot[label] = 1 - label = onehot - return label.copy() - - def get_details(self, idx): - d = dnnlib.EasyDict() - d.raw_idx = int(self._raw_idx[idx]) - d.xflip = (int(self._xflip[idx]) != 0) - d.raw_label = self._get_raw_labels()[d.raw_idx].copy() - return d - - @property - def name(self): - return self._name - - @property - def image_shape(self): - return list(self._raw_shape[1:]) - - @property - def num_channels(self): - assert len(self.image_shape) == 3 # CHW - return self.image_shape[0] - - @property - def resolution(self): - assert len(self.image_shape) == 3 # CHW - if self._square: - assert self.image_shape[1] == self.image_shape[2] - else: - assert self.image_shape[1] == self.image_shape[2] * 2 - return self.image_shape[1] - - @property - def label_shape(self): - if self._label_shape is None: - raw_labels = self._get_raw_labels() - if raw_labels.dtype == np.int64: - self._label_shape = [int(np.max(raw_labels)) + 1] - else: - self._label_shape = raw_labels.shape[1:] - return list(self._label_shape) - - @property - def label_dim(self): - assert len(self.label_shape) == 1 - return self.label_shape[0] - - @property - def has_labels(self): - return any(x != 0 for x in self.label_shape) - - @property - def has_onehot_labels(self): - return self._get_raw_labels().dtype == np.int64 - -# ---------------------------------------------------------------------------- - - -class ImageFolderDataset(Dataset): - def __init__(self, - path, # Path to directory or zip. - # Ensure specific resolution, None = highest available. - resolution=None, - square=False, - # Additional arguments for the Dataset base class. - **super_kwargs, - ): - self._path = path - self._zipfile = None - self._square = square - - if os.path.isdir(self._path): - self._type = 'dir' - self._all_fnames = {os.path.relpath(os.path.join( - root, fname), start=self._path) for root, _dirs, files in os.walk(self._path) for fname in files} - elif self._file_ext(self._path) == '.zip': - self._type = 'zip' - self._all_fnames = set(self._get_zipfile().namelist()) - else: - raise IOError('Path must point to a directory or zip') - - PIL.Image.init() - self._image_fnames = sorted( - fname for fname in self._all_fnames if self._file_ext(fname) in PIL.Image.EXTENSION) - if len(self._image_fnames) == 0: - raise IOError('No image files found in the specified path') - - name = os.path.splitext(os.path.basename(self._path))[0] - raw_shape = [len(self._image_fnames)] + \ - list(self._load_raw_image(0).shape) - # if resolution is not None and (raw_shape[2] != resolution or raw_shape[3] != resolution): - # raise IOError('Image files do not match the specified resolution') - if resolution is not None: - if self._square: - raw_shape[2] = raw_shape[3] = resolution - else: - raw_shape[2] = resolution - raw_shape[3] = resolution // 2 - # print(raw_shape) - super().__init__(name=name, raw_shape=raw_shape, square=square, **super_kwargs) - - @staticmethod - def _file_ext(fname): - return os.path.splitext(fname)[1].lower() - - def _get_zipfile(self): - assert self._type == 'zip' - if self._zipfile is None: - self._zipfile = zipfile.ZipFile(self._path) - return self._zipfile - - def _open_file(self, fname): - if self._type == 'dir': - return open(os.path.join(self._path, fname), 'rb') - if self._type == 'zip': - return self._get_zipfile().open(fname, 'r') - return None - - def close(self): - try: - if self._zipfile is not None: - self._zipfile.close() - finally: - self._zipfile = None - - def __getstate__(self): - return dict(super().__getstate__(), _zipfile=None) - - def _load_raw_image(self, raw_idx): # load single image - fname = self._image_fnames[raw_idx] - with self._open_file(fname) as f: - if pyspng is not None and self._file_ext(fname) == '.png': - image = pyspng.load(f.read()) - else: - image = np.array(PIL.Image.open(f)) - if image.ndim == 2: - image = image[:, :, np.newaxis] # HW => HWC - image = image.transpose(2, 0, 1) # HWC => CHW - return image - - def _load_raw_labels(self): - fname = 'dataset.json' - if fname not in self._all_fnames: - return None - with self._open_file(fname) as f: - labels = json.load(f)['labels'] - if labels is None: - return None - labels = dict(labels) - labels = [labels[fname.replace('\\', '/')] - for fname in self._image_fnames] - labels = np.array(labels) - labels = labels.astype({1: np.int64, 2: np.float32}[labels.ndim]) - return labels - - -# ---------------------------------------------------------------------------- diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/unet.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/unet.md deleted file mode 100644 index 9a488a3231a658ddc81b5c31636f208d768038a8..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/unet.md +++ /dev/null @@ -1,13 +0,0 @@ -# UNet1DModel - -The [UNet](https://huggingface.co/papers/1505.04597) model was originally introduced by Ronneberger et al for biomedical image segmentation, but it is also commonly used in 🤗 Diffusers because it outputs images that are the same size as the input. It is one of the most important components of a diffusion system because it facilitates the actual diffusion process. There are several variants of the UNet model in 🤗 Diffusers, depending on it's number of dimensions and whether it is a conditional model or not. This is a 1D UNet model. - -The abstract from the paper is: - -*There is large consent that successful training of deep networks requires many thousand annotated training samples. In this paper, we present a network and training strategy that relies on the strong use of data augmentation to use the available annotated samples more efficiently. The architecture consists of a contracting path to capture context and a symmetric expanding path that enables precise localization. We show that such a network can be trained end-to-end from very few images and outperforms the prior best method (a sliding-window convolutional network) on the ISBI challenge for segmentation of neuronal structures in electron microscopic stacks. Using the same network trained on transmitted light microscopy images (phase contrast and DIC) we won the ISBI cell tracking challenge 2015 in these categories by a large margin. Moreover, the network is fast. Segmentation of a 512x512 image takes less than a second on a recent GPU. The full implementation (based on Caffe) and the trained networks are available at http://lmb.informatik.uni-freiburg.de/people/ronneber/u-net.* - -## UNet1DModel -[[autodoc]] UNet1DModel - -## UNet1DOutput -[[autodoc]] models.unet_1d.UNet1DOutput \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_paradigms.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_paradigms.py deleted file mode 100644 index 781cbcbd69a1a7d038bb217f7f789cbf9e43a920..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_paradigms.py +++ /dev/null @@ -1,227 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import unittest - -import numpy as np -import torch -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer - -from diffusers import ( - AutoencoderKL, - DDIMParallelScheduler, - DDPMParallelScheduler, - StableDiffusionParadigmsPipeline, - UNet2DConditionModel, -) -from diffusers.utils import slow, torch_device -from diffusers.utils.testing_utils import ( - enable_full_determinism, - require_torch_gpu, -) - -from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS -from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin - - -enable_full_determinism() - - -class StableDiffusionParadigmsPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): - pipeline_class = StableDiffusionParadigmsPipeline - params = TEXT_TO_IMAGE_PARAMS - batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - image_params = TEXT_TO_IMAGE_IMAGE_PARAMS - image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS - - def get_dummy_components(self): - torch.manual_seed(0) - unet = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=4, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - # SD2-specific config below - attention_head_dim=(2, 4), - use_linear_projection=True, - ) - scheduler = DDIMParallelScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - torch.manual_seed(0) - vae = AutoencoderKL( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=4, - sample_size=128, - ) - torch.manual_seed(0) - text_encoder_config = CLIPTextConfig( - bos_token_id=0, - eos_token_id=2, - hidden_size=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=1000, - # SD2-specific config below - hidden_act="gelu", - projection_dim=512, - ) - text_encoder = CLIPTextModel(text_encoder_config) - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - components = { - "unet": unet, - "scheduler": scheduler, - "vae": vae, - "text_encoder": text_encoder, - "tokenizer": tokenizer, - "safety_checker": None, - "feature_extractor": None, - } - return components - - def get_dummy_inputs(self, device, seed=0): - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - inputs = { - "prompt": "a photograph of an astronaut riding a horse", - "generator": generator, - "num_inference_steps": 10, - "guidance_scale": 6.0, - "output_type": "numpy", - "parallel": 3, - "debug": True, - } - return inputs - - def test_stable_diffusion_paradigms_default_case(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - sd_pipe = StableDiffusionParadigmsPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - image = sd_pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1] - assert image.shape == (1, 64, 64, 3) - - expected_slice = np.array([0.4773, 0.5417, 0.4723, 0.4925, 0.5631, 0.4752, 0.5240, 0.4935, 0.5023]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - def test_stable_diffusion_paradigms_default_case_ddpm(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - torch.manual_seed(0) - components["scheduler"] = DDPMParallelScheduler() - torch.manual_seed(0) - sd_pipe = StableDiffusionParadigmsPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - image = sd_pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1] - assert image.shape == (1, 64, 64, 3) - - expected_slice = np.array([0.3573, 0.4420, 0.4960, 0.4799, 0.3796, 0.3879, 0.4819, 0.4365, 0.4468]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - # override to speed the overall test timing up. - def test_inference_batch_consistent(self): - super().test_inference_batch_consistent(batch_sizes=[1, 2]) - - # override to speed the overall test timing up. - def test_inference_batch_single_identical(self): - super().test_inference_batch_single_identical(batch_size=2, expected_max_diff=3e-3) - - def test_stable_diffusion_paradigms_negative_prompt(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - sd_pipe = StableDiffusionParadigmsPipeline(**components) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - inputs = self.get_dummy_inputs(device) - negative_prompt = "french fries" - output = sd_pipe(**inputs, negative_prompt=negative_prompt) - image = output.images - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 64, 64, 3) - - expected_slice = np.array([0.4771, 0.5420, 0.4683, 0.4918, 0.5636, 0.4725, 0.5230, 0.4923, 0.5015]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - - -@slow -@require_torch_gpu -class StableDiffusionParadigmsPipelineSlowTests(unittest.TestCase): - def tearDown(self): - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def get_inputs(self, seed=0): - generator = torch.Generator(device=torch_device).manual_seed(seed) - inputs = { - "prompt": "a photograph of an astronaut riding a horse", - "generator": generator, - "num_inference_steps": 10, - "guidance_scale": 7.5, - "output_type": "numpy", - "parallel": 3, - "debug": True, - } - return inputs - - def test_stable_diffusion_paradigms_default(self): - model_ckpt = "stabilityai/stable-diffusion-2-base" - scheduler = DDIMParallelScheduler.from_pretrained(model_ckpt, subfolder="scheduler") - pipe = StableDiffusionParadigmsPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - inputs = self.get_inputs() - image = pipe(**inputs).images - image_slice = image[0, -3:, -3:, -1].flatten() - - assert image.shape == (1, 512, 512, 3) - - expected_slice = np.array([0.9622, 0.9602, 0.9748, 0.9591, 0.9630, 0.9691, 0.9661, 0.9631, 0.9741]) - - assert np.abs(expected_slice - image_slice).max() < 1e-2 diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/unidiffuser/test_unidiffuser.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/unidiffuser/test_unidiffuser.py deleted file mode 100644 index 06cb451281c9deec14f38d7740cc182b261efae8..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/unidiffuser/test_unidiffuser.py +++ /dev/null @@ -1,673 +0,0 @@ -import gc -import random -import unittest - -import numpy as np -import torch -from PIL import Image -from transformers import ( - CLIPImageProcessor, - CLIPTextModel, - CLIPTokenizer, - CLIPVisionModelWithProjection, - GPT2Tokenizer, -) - -from diffusers import ( - AutoencoderKL, - DPMSolverMultistepScheduler, - UniDiffuserModel, - UniDiffuserPipeline, - UniDiffuserTextDecoder, -) -from diffusers.utils import floats_tensor, load_image, randn_tensor, slow, torch_device -from diffusers.utils.testing_utils import require_torch_gpu - -from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS -from ..test_pipelines_common import PipelineTesterMixin - - -class UniDiffuserPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = UniDiffuserPipeline - params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS - - def get_dummy_components(self): - unet = UniDiffuserModel.from_pretrained( - "hf-internal-testing/unidiffuser-diffusers-test", - subfolder="unet", - ) - - scheduler = DPMSolverMultistepScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - solver_order=3, - ) - - vae = AutoencoderKL.from_pretrained( - "hf-internal-testing/unidiffuser-diffusers-test", - subfolder="vae", - ) - - text_encoder = CLIPTextModel.from_pretrained( - "hf-internal-testing/unidiffuser-diffusers-test", - subfolder="text_encoder", - ) - clip_tokenizer = CLIPTokenizer.from_pretrained( - "hf-internal-testing/unidiffuser-diffusers-test", - subfolder="clip_tokenizer", - ) - - image_encoder = CLIPVisionModelWithProjection.from_pretrained( - "hf-internal-testing/unidiffuser-diffusers-test", - subfolder="image_encoder", - ) - # From the Stable Diffusion Image Variation pipeline tests - image_processor = CLIPImageProcessor(crop_size=32, size=32) - # image_processor = CLIPImageProcessor.from_pretrained("hf-internal-testing/tiny-random-clip") - - text_tokenizer = GPT2Tokenizer.from_pretrained( - "hf-internal-testing/unidiffuser-diffusers-test", - subfolder="text_tokenizer", - ) - text_decoder = UniDiffuserTextDecoder.from_pretrained( - "hf-internal-testing/unidiffuser-diffusers-test", - subfolder="text_decoder", - ) - - components = { - "vae": vae, - "text_encoder": text_encoder, - "image_encoder": image_encoder, - "image_processor": image_processor, - "clip_tokenizer": clip_tokenizer, - "text_decoder": text_decoder, - "text_tokenizer": text_tokenizer, - "unet": unet, - "scheduler": scheduler, - } - - return components - - def get_dummy_inputs(self, device, seed=0): - image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) - image = image.cpu().permute(0, 2, 3, 1)[0] - image = Image.fromarray(np.uint8(image)).convert("RGB") - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - inputs = { - "prompt": "an elephant under the sea", - "image": image, - "generator": generator, - "num_inference_steps": 2, - "guidance_scale": 6.0, - "output_type": "numpy", - } - return inputs - - def get_fixed_latents(self, device, seed=0): - if type(device) == str: - device = torch.device(device) - generator = torch.Generator(device=device).manual_seed(seed) - # Hardcode the shapes for now. - prompt_latents = randn_tensor((1, 77, 32), generator=generator, device=device, dtype=torch.float32) - vae_latents = randn_tensor((1, 4, 16, 16), generator=generator, device=device, dtype=torch.float32) - clip_latents = randn_tensor((1, 1, 32), generator=generator, device=device, dtype=torch.float32) - - latents = { - "prompt_latents": prompt_latents, - "vae_latents": vae_latents, - "clip_latents": clip_latents, - } - return latents - - def get_dummy_inputs_with_latents(self, device, seed=0): - # image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) - # image = image.cpu().permute(0, 2, 3, 1)[0] - # image = Image.fromarray(np.uint8(image)).convert("RGB") - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg", - ) - image = image.resize((32, 32)) - latents = self.get_fixed_latents(device, seed=seed) - - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - - inputs = { - "prompt": "an elephant under the sea", - "image": image, - "generator": generator, - "num_inference_steps": 2, - "guidance_scale": 6.0, - "output_type": "numpy", - "prompt_latents": latents.get("prompt_latents"), - "vae_latents": latents.get("vae_latents"), - "clip_latents": latents.get("clip_latents"), - } - return inputs - - def test_unidiffuser_default_joint_v0(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - unidiffuser_pipe = UniDiffuserPipeline(**components) - unidiffuser_pipe = unidiffuser_pipe.to(device) - unidiffuser_pipe.set_progress_bar_config(disable=None) - - # Set mode to 'joint' - unidiffuser_pipe.set_joint_mode() - assert unidiffuser_pipe.mode == "joint" - - # inputs = self.get_dummy_inputs(device) - inputs = self.get_dummy_inputs_with_latents(device) - # Delete prompt and image for joint inference. - del inputs["prompt"] - del inputs["image"] - sample = unidiffuser_pipe(**inputs) - image = sample.images - text = sample.text - assert image.shape == (1, 32, 32, 3) - - image_slice = image[0, -3:, -3:, -1] - expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716]) - assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 - - expected_text_prefix = " no no no " - assert text[0][:10] == expected_text_prefix - - def test_unidiffuser_default_joint_no_cfg_v0(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - unidiffuser_pipe = UniDiffuserPipeline(**components) - unidiffuser_pipe = unidiffuser_pipe.to(device) - unidiffuser_pipe.set_progress_bar_config(disable=None) - - # Set mode to 'joint' - unidiffuser_pipe.set_joint_mode() - assert unidiffuser_pipe.mode == "joint" - - # inputs = self.get_dummy_inputs(device) - inputs = self.get_dummy_inputs_with_latents(device) - # Delete prompt and image for joint inference. - del inputs["prompt"] - del inputs["image"] - # Set guidance scale to 1.0 to turn off CFG - inputs["guidance_scale"] = 1.0 - sample = unidiffuser_pipe(**inputs) - image = sample.images - text = sample.text - assert image.shape == (1, 32, 32, 3) - - image_slice = image[0, -3:, -3:, -1] - expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716]) - assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 - - expected_text_prefix = " no no no " - assert text[0][:10] == expected_text_prefix - - def test_unidiffuser_default_text2img_v0(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - unidiffuser_pipe = UniDiffuserPipeline(**components) - unidiffuser_pipe = unidiffuser_pipe.to(device) - unidiffuser_pipe.set_progress_bar_config(disable=None) - - # Set mode to 'text2img' - unidiffuser_pipe.set_text_to_image_mode() - assert unidiffuser_pipe.mode == "text2img" - - inputs = self.get_dummy_inputs_with_latents(device) - # Delete image for text-conditioned image generation - del inputs["image"] - image = unidiffuser_pipe(**inputs).images - assert image.shape == (1, 32, 32, 3) - - image_slice = image[0, -3:, -3:, -1] - expected_slice = np.array([0.5758, 0.6269, 0.6570, 0.4967, 0.4639, 0.5664, 0.5257, 0.5067, 0.5715]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - - def test_unidiffuser_default_image_0(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - unidiffuser_pipe = UniDiffuserPipeline(**components) - unidiffuser_pipe = unidiffuser_pipe.to(device) - unidiffuser_pipe.set_progress_bar_config(disable=None) - - # Set mode to 'img' - unidiffuser_pipe.set_image_mode() - assert unidiffuser_pipe.mode == "img" - - inputs = self.get_dummy_inputs(device) - # Delete prompt and image for unconditional ("marginal") text generation. - del inputs["prompt"] - del inputs["image"] - image = unidiffuser_pipe(**inputs).images - assert image.shape == (1, 32, 32, 3) - - image_slice = image[0, -3:, -3:, -1] - expected_slice = np.array([0.5760, 0.6270, 0.6571, 0.4966, 0.4638, 0.5663, 0.5254, 0.5068, 0.5715]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - - def test_unidiffuser_default_text_v0(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - unidiffuser_pipe = UniDiffuserPipeline(**components) - unidiffuser_pipe = unidiffuser_pipe.to(device) - unidiffuser_pipe.set_progress_bar_config(disable=None) - - # Set mode to 'img' - unidiffuser_pipe.set_text_mode() - assert unidiffuser_pipe.mode == "text" - - inputs = self.get_dummy_inputs(device) - # Delete prompt and image for unconditional ("marginal") text generation. - del inputs["prompt"] - del inputs["image"] - text = unidiffuser_pipe(**inputs).text - - expected_text_prefix = " no no no " - assert text[0][:10] == expected_text_prefix - - def test_unidiffuser_default_img2text_v0(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - unidiffuser_pipe = UniDiffuserPipeline(**components) - unidiffuser_pipe = unidiffuser_pipe.to(device) - unidiffuser_pipe.set_progress_bar_config(disable=None) - - # Set mode to 'img2text' - unidiffuser_pipe.set_image_to_text_mode() - assert unidiffuser_pipe.mode == "img2text" - - inputs = self.get_dummy_inputs_with_latents(device) - # Delete text for image-conditioned text generation - del inputs["prompt"] - text = unidiffuser_pipe(**inputs).text - - expected_text_prefix = " no no no " - assert text[0][:10] == expected_text_prefix - - def test_unidiffuser_default_joint_v1(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1") - unidiffuser_pipe = unidiffuser_pipe.to(device) - unidiffuser_pipe.set_progress_bar_config(disable=None) - - # Set mode to 'joint' - unidiffuser_pipe.set_joint_mode() - assert unidiffuser_pipe.mode == "joint" - - # inputs = self.get_dummy_inputs(device) - inputs = self.get_dummy_inputs_with_latents(device) - # Delete prompt and image for joint inference. - del inputs["prompt"] - del inputs["image"] - inputs["data_type"] = 1 - sample = unidiffuser_pipe(**inputs) - image = sample.images - text = sample.text - assert image.shape == (1, 32, 32, 3) - - image_slice = image[0, -3:, -3:, -1] - expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716]) - assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 - - expected_text_prefix = " no no no " - assert text[0][:10] == expected_text_prefix - - def test_unidiffuser_default_text2img_v1(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1") - unidiffuser_pipe = unidiffuser_pipe.to(device) - unidiffuser_pipe.set_progress_bar_config(disable=None) - - # Set mode to 'text2img' - unidiffuser_pipe.set_text_to_image_mode() - assert unidiffuser_pipe.mode == "text2img" - - inputs = self.get_dummy_inputs_with_latents(device) - # Delete image for text-conditioned image generation - del inputs["image"] - image = unidiffuser_pipe(**inputs).images - assert image.shape == (1, 32, 32, 3) - - image_slice = image[0, -3:, -3:, -1] - expected_slice = np.array([0.5758, 0.6269, 0.6570, 0.4967, 0.4639, 0.5664, 0.5257, 0.5067, 0.5715]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 - - def test_unidiffuser_default_img2text_v1(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1") - unidiffuser_pipe = unidiffuser_pipe.to(device) - unidiffuser_pipe.set_progress_bar_config(disable=None) - - # Set mode to 'img2text' - unidiffuser_pipe.set_image_to_text_mode() - assert unidiffuser_pipe.mode == "img2text" - - inputs = self.get_dummy_inputs_with_latents(device) - # Delete text for image-conditioned text generation - del inputs["prompt"] - text = unidiffuser_pipe(**inputs).text - - expected_text_prefix = " no no no " - assert text[0][:10] == expected_text_prefix - - def test_unidiffuser_text2img_multiple_images(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - unidiffuser_pipe = UniDiffuserPipeline(**components) - unidiffuser_pipe = unidiffuser_pipe.to(device) - unidiffuser_pipe.set_progress_bar_config(disable=None) - - # Set mode to 'text2img' - unidiffuser_pipe.set_text_to_image_mode() - assert unidiffuser_pipe.mode == "text2img" - - inputs = self.get_dummy_inputs(device) - # Delete image for text-conditioned image generation - del inputs["image"] - inputs["num_images_per_prompt"] = 2 - inputs["num_prompts_per_image"] = 3 - image = unidiffuser_pipe(**inputs).images - assert image.shape == (2, 32, 32, 3) - - def test_unidiffuser_img2text_multiple_prompts(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - unidiffuser_pipe = UniDiffuserPipeline(**components) - unidiffuser_pipe = unidiffuser_pipe.to(device) - unidiffuser_pipe.set_progress_bar_config(disable=None) - - # Set mode to 'img2text' - unidiffuser_pipe.set_image_to_text_mode() - assert unidiffuser_pipe.mode == "img2text" - - inputs = self.get_dummy_inputs(device) - # Delete text for image-conditioned text generation - del inputs["prompt"] - inputs["num_images_per_prompt"] = 2 - inputs["num_prompts_per_image"] = 3 - text = unidiffuser_pipe(**inputs).text - - assert len(text) == 3 - - def test_unidiffuser_text2img_multiple_images_with_latents(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - unidiffuser_pipe = UniDiffuserPipeline(**components) - unidiffuser_pipe = unidiffuser_pipe.to(device) - unidiffuser_pipe.set_progress_bar_config(disable=None) - - # Set mode to 'text2img' - unidiffuser_pipe.set_text_to_image_mode() - assert unidiffuser_pipe.mode == "text2img" - - inputs = self.get_dummy_inputs_with_latents(device) - # Delete image for text-conditioned image generation - del inputs["image"] - inputs["num_images_per_prompt"] = 2 - inputs["num_prompts_per_image"] = 3 - image = unidiffuser_pipe(**inputs).images - assert image.shape == (2, 32, 32, 3) - - def test_unidiffuser_img2text_multiple_prompts_with_latents(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - components = self.get_dummy_components() - unidiffuser_pipe = UniDiffuserPipeline(**components) - unidiffuser_pipe = unidiffuser_pipe.to(device) - unidiffuser_pipe.set_progress_bar_config(disable=None) - - # Set mode to 'img2text' - unidiffuser_pipe.set_image_to_text_mode() - assert unidiffuser_pipe.mode == "img2text" - - inputs = self.get_dummy_inputs_with_latents(device) - # Delete text for image-conditioned text generation - del inputs["prompt"] - inputs["num_images_per_prompt"] = 2 - inputs["num_prompts_per_image"] = 3 - text = unidiffuser_pipe(**inputs).text - - assert len(text) == 3 - - def test_inference_batch_single_identical(self): - super().test_inference_batch_single_identical(expected_max_diff=2e-4) - - @require_torch_gpu - def test_unidiffuser_default_joint_v1_cuda_fp16(self): - device = "cuda" - unidiffuser_pipe = UniDiffuserPipeline.from_pretrained( - "hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16 - ) - unidiffuser_pipe = unidiffuser_pipe.to(device) - unidiffuser_pipe.set_progress_bar_config(disable=None) - - # Set mode to 'joint' - unidiffuser_pipe.set_joint_mode() - assert unidiffuser_pipe.mode == "joint" - - inputs = self.get_dummy_inputs_with_latents(device) - # Delete prompt and image for joint inference. - del inputs["prompt"] - del inputs["image"] - inputs["data_type"] = 1 - sample = unidiffuser_pipe(**inputs) - image = sample.images - text = sample.text - assert image.shape == (1, 32, 32, 3) - - image_slice = image[0, -3:, -3:, -1] - expected_img_slice = np.array([0.5049, 0.5498, 0.5854, 0.3052, 0.4460, 0.6489, 0.5122, 0.4810, 0.6138]) - assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 - - expected_text_prefix = '" This This' - assert text[0][: len(expected_text_prefix)] == expected_text_prefix - - @require_torch_gpu - def test_unidiffuser_default_text2img_v1_cuda_fp16(self): - device = "cuda" - unidiffuser_pipe = UniDiffuserPipeline.from_pretrained( - "hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16 - ) - unidiffuser_pipe = unidiffuser_pipe.to(device) - unidiffuser_pipe.set_progress_bar_config(disable=None) - - # Set mode to 'text2img' - unidiffuser_pipe.set_text_to_image_mode() - assert unidiffuser_pipe.mode == "text2img" - - inputs = self.get_dummy_inputs_with_latents(device) - # Delete prompt and image for joint inference. - del inputs["image"] - inputs["data_type"] = 1 - sample = unidiffuser_pipe(**inputs) - image = sample.images - assert image.shape == (1, 32, 32, 3) - - image_slice = image[0, -3:, -3:, -1] - expected_img_slice = np.array([0.5054, 0.5498, 0.5854, 0.3052, 0.4458, 0.6489, 0.5122, 0.4810, 0.6138]) - assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 - - @require_torch_gpu - def test_unidiffuser_default_img2text_v1_cuda_fp16(self): - device = "cuda" - unidiffuser_pipe = UniDiffuserPipeline.from_pretrained( - "hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16 - ) - unidiffuser_pipe = unidiffuser_pipe.to(device) - unidiffuser_pipe.set_progress_bar_config(disable=None) - - # Set mode to 'img2text' - unidiffuser_pipe.set_image_to_text_mode() - assert unidiffuser_pipe.mode == "img2text" - - inputs = self.get_dummy_inputs_with_latents(device) - # Delete prompt and image for joint inference. - del inputs["prompt"] - inputs["data_type"] = 1 - text = unidiffuser_pipe(**inputs).text - - expected_text_prefix = '" This This' - assert text[0][: len(expected_text_prefix)] == expected_text_prefix - - -@slow -@require_torch_gpu -class UniDiffuserPipelineSlowTests(unittest.TestCase): - def tearDown(self): - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def get_inputs(self, device, seed=0, generate_latents=False): - generator = torch.manual_seed(seed) - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg" - ) - inputs = { - "prompt": "an elephant under the sea", - "image": image, - "generator": generator, - "num_inference_steps": 3, - "guidance_scale": 8.0, - "output_type": "numpy", - } - if generate_latents: - latents = self.get_fixed_latents(device, seed=seed) - for latent_name, latent_tensor in latents.items(): - inputs[latent_name] = latent_tensor - return inputs - - def get_fixed_latents(self, device, seed=0): - if type(device) == str: - device = torch.device(device) - latent_device = torch.device("cpu") - generator = torch.Generator(device=latent_device).manual_seed(seed) - # Hardcode the shapes for now. - prompt_latents = randn_tensor((1, 77, 768), generator=generator, device=device, dtype=torch.float32) - vae_latents = randn_tensor((1, 4, 64, 64), generator=generator, device=device, dtype=torch.float32) - clip_latents = randn_tensor((1, 1, 512), generator=generator, device=device, dtype=torch.float32) - - # Move latents onto desired device. - prompt_latents = prompt_latents.to(device) - vae_latents = vae_latents.to(device) - clip_latents = clip_latents.to(device) - - latents = { - "prompt_latents": prompt_latents, - "vae_latents": vae_latents, - "clip_latents": clip_latents, - } - return latents - - def test_unidiffuser_default_joint_v1(self): - pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1") - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - # inputs = self.get_dummy_inputs(device) - inputs = self.get_inputs(device=torch_device, generate_latents=True) - # Delete prompt and image for joint inference. - del inputs["prompt"] - del inputs["image"] - sample = pipe(**inputs) - image = sample.images - text = sample.text - assert image.shape == (1, 512, 512, 3) - - image_slice = image[0, -3:, -3:, -1] - expected_img_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520]) - assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-1 - - expected_text_prefix = "a living room" - assert text[0][: len(expected_text_prefix)] == expected_text_prefix - - def test_unidiffuser_default_text2img_v1(self): - pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1") - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - inputs = self.get_inputs(device=torch_device, generate_latents=True) - del inputs["image"] - sample = pipe(**inputs) - image = sample.images - assert image.shape == (1, 512, 512, 3) - - image_slice = image[0, -3:, -3:, -1] - expected_slice = np.array([0.0242, 0.0103, 0.0022, 0.0129, 0.0000, 0.0090, 0.0376, 0.0508, 0.0005]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 - - def test_unidiffuser_default_img2text_v1(self): - pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1") - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - inputs = self.get_inputs(device=torch_device, generate_latents=True) - del inputs["prompt"] - sample = pipe(**inputs) - text = sample.text - - expected_text_prefix = "An astronaut" - assert text[0][: len(expected_text_prefix)] == expected_text_prefix - - def test_unidiffuser_default_joint_v1_fp16(self): - pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - # inputs = self.get_dummy_inputs(device) - inputs = self.get_inputs(device=torch_device, generate_latents=True) - # Delete prompt and image for joint inference. - del inputs["prompt"] - del inputs["image"] - sample = pipe(**inputs) - image = sample.images - text = sample.text - assert image.shape == (1, 512, 512, 3) - - image_slice = image[0, -3:, -3:, -1] - expected_img_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520]) - assert np.abs(image_slice.flatten() - expected_img_slice).max() < 2e-1 - - expected_text_prefix = "a living room" - assert text[0][: len(expected_text_prefix)] == expected_text_prefix - - def test_unidiffuser_default_text2img_v1_fp16(self): - pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - inputs = self.get_inputs(device=torch_device, generate_latents=True) - del inputs["image"] - sample = pipe(**inputs) - image = sample.images - assert image.shape == (1, 512, 512, 3) - - image_slice = image[0, -3:, -3:, -1] - expected_slice = np.array([0.0242, 0.0103, 0.0022, 0.0129, 0.0000, 0.0090, 0.0376, 0.0508, 0.0005]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 - - def test_unidiffuser_default_img2text_v1_fp16(self): - pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - inputs = self.get_inputs(device=torch_device, generate_latents=True) - del inputs["prompt"] - sample = pipe(**inputs) - text = sample.text - - expected_text_prefix = "An astronaut" - assert text[0][: len(expected_text_prefix)] == expected_text_prefix diff --git a/spaces/Andy1621/uniformer_image_detection/configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py deleted file mode 100644 index 723ab0295f8457c03114ca535dede951e7d5b169..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' - -model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/point_assigner.py b/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/point_assigner.py deleted file mode 100644 index fb8f5e4edc63f4851e2067034c5e67a3558f31bc..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/point_assigner.py +++ /dev/null @@ -1,133 +0,0 @@ -import torch - -from ..builder import BBOX_ASSIGNERS -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - - -@BBOX_ASSIGNERS.register_module() -class PointAssigner(BaseAssigner): - """Assign a corresponding gt bbox or background to each point. - - Each proposals will be assigned with `0`, or a positive integer - indicating the ground truth index. - - - 0: negative sample, no assigned gt - - positive integer: positive sample, index (1-based) of assigned gt - """ - - def __init__(self, scale=4, pos_num=3): - self.scale = scale - self.pos_num = pos_num - - def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): - """Assign gt to points. - - This method assign a gt bbox to every points set, each points set - will be assigned with the background_label (-1), or a label number. - -1 is background, and semi-positive number is the index (0-based) of - assigned gt. - The assignment is done in following steps, the order matters. - - 1. assign every points to the background_label (-1) - 2. A point is assigned to some gt bbox if - (i) the point is within the k closest points to the gt bbox - (ii) the distance between this point and the gt is smaller than - other gt bboxes - - Args: - points (Tensor): points to be assigned, shape(n, 3) while last - dimension stands for (x, y, stride). - gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`, e.g., crowd boxes in COCO. - NOTE: currently unused. - gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). - - Returns: - :obj:`AssignResult`: The assign result. - """ - num_points = points.shape[0] - num_gts = gt_bboxes.shape[0] - - if num_gts == 0 or num_points == 0: - # If no truth assign everything to the background - assigned_gt_inds = points.new_full((num_points, ), - 0, - dtype=torch.long) - if gt_labels is None: - assigned_labels = None - else: - assigned_labels = points.new_full((num_points, ), - -1, - dtype=torch.long) - return AssignResult( - num_gts, assigned_gt_inds, None, labels=assigned_labels) - - points_xy = points[:, :2] - points_stride = points[:, 2] - points_lvl = torch.log2( - points_stride).int() # [3...,4...,5...,6...,7...] - lvl_min, lvl_max = points_lvl.min(), points_lvl.max() - - # assign gt box - gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2 - gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6) - scale = self.scale - gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) + - torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int() - gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max) - - # stores the assigned gt index of each point - assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long) - # stores the assigned gt dist (to this point) of each point - assigned_gt_dist = points.new_full((num_points, ), float('inf')) - points_range = torch.arange(points.shape[0]) - - for idx in range(num_gts): - gt_lvl = gt_bboxes_lvl[idx] - # get the index of points in this level - lvl_idx = gt_lvl == points_lvl - points_index = points_range[lvl_idx] - # get the points in this level - lvl_points = points_xy[lvl_idx, :] - # get the center point of gt - gt_point = gt_bboxes_xy[[idx], :] - # get width and height of gt - gt_wh = gt_bboxes_wh[[idx], :] - # compute the distance between gt center and - # all points in this level - points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1) - # find the nearest k points to gt center in this level - min_dist, min_dist_index = torch.topk( - points_gt_dist, self.pos_num, largest=False) - # the index of nearest k points to gt center in this level - min_dist_points_index = points_index[min_dist_index] - # The less_than_recorded_index stores the index - # of min_dist that is less then the assigned_gt_dist. Where - # assigned_gt_dist stores the dist from previous assigned gt - # (if exist) to each point. - less_than_recorded_index = min_dist < assigned_gt_dist[ - min_dist_points_index] - # The min_dist_points_index stores the index of points satisfy: - # (1) it is k nearest to current gt center in this level. - # (2) it is closer to current gt center than other gt center. - min_dist_points_index = min_dist_points_index[ - less_than_recorded_index] - # assign the result - assigned_gt_inds[min_dist_points_index] = idx + 1 - assigned_gt_dist[min_dist_points_index] = min_dist[ - less_than_recorded_index] - - if gt_labels is not None: - assigned_labels = assigned_gt_inds.new_full((num_points, ), -1) - pos_inds = torch.nonzero( - assigned_gt_inds > 0, as_tuple=False).squeeze() - if pos_inds.numel() > 0: - assigned_labels[pos_inds] = gt_labels[ - assigned_gt_inds[pos_inds] - 1] - else: - assigned_labels = None - - return AssignResult( - num_gts, assigned_gt_inds, None, labels=assigned_labels) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py deleted file mode 100644 index 6a622eae963401e143004a62ff53071ddbf61c01..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = '../pspnet/pspnet_r101-d8_512x512_160k_ade20k.py' -model = dict( - pretrained='open-mmlab://resnest101', - backbone=dict( - type='ResNeSt', - stem_channels=128, - radix=2, - reduction_factor=4, - avg_down_stride=True)) diff --git a/spaces/Anilegna/Colour-Personallity/info.md b/spaces/Anilegna/Colour-Personallity/info.md deleted file mode 100644 index cd1826dfc9ecd25a8cc4edaf32e461a54fa0f7fc..0000000000000000000000000000000000000000 --- a/spaces/Anilegna/Colour-Personallity/info.md +++ /dev/null @@ -1,16 +0,0 @@ -# 😌 Colour-Personallity - -### 🧐 Problem Statement and Research Summary -[add info about your problem statement and your research here!] - -### 🎣 Data Collection Plan -[Edit info.md - add info about what data you collected and why here!] - -### 💥 Ethical Considerations (Data Privacy and Bias) -* Data privacy: [Edit info.md - add info about you considered users' privacy here!] -* Bias: [Edit info.md - add info about you considered bias here!] - -### 👻 Our Team -[Edit info.md - add info about your team members here!] - -![aiEDU logo](https://images.squarespace-cdn.com/content/v1/5e4efdef6d10420691f02bc1/5db5a8a3-1761-4fce-a096-bd5f2515162f/aiEDU+_black+logo+stacked.png?format=100w) diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/config.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/config.py deleted file mode 100644 index 17149353aefac6d737c67bb2f35a3a6cd2147b0a..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/config.py +++ /dev/null @@ -1,688 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import ast -import copy -import os -import os.path as osp -import platform -import shutil -import sys -import tempfile -import uuid -import warnings -from argparse import Action, ArgumentParser -from collections import abc -from importlib import import_module - -from addict import Dict -from yapf.yapflib.yapf_api import FormatCode - -from .misc import import_modules_from_strings -from .path import check_file_exist - -if platform.system() == 'Windows': - import regex as re -else: - import re - -BASE_KEY = '_base_' -DELETE_KEY = '_delete_' -DEPRECATION_KEY = '_deprecation_' -RESERVED_KEYS = ['filename', 'text', 'pretty_text'] - - -class ConfigDict(Dict): - - def __missing__(self, name): - raise KeyError(name) - - def __getattr__(self, name): - try: - value = super(ConfigDict, self).__getattr__(name) - except KeyError: - ex = AttributeError(f"'{self.__class__.__name__}' object has no " - f"attribute '{name}'") - except Exception as e: - ex = e - else: - return value - raise ex - - -def add_args(parser, cfg, prefix=''): - for k, v in cfg.items(): - if isinstance(v, str): - parser.add_argument('--' + prefix + k) - elif isinstance(v, int): - parser.add_argument('--' + prefix + k, type=int) - elif isinstance(v, float): - parser.add_argument('--' + prefix + k, type=float) - elif isinstance(v, bool): - parser.add_argument('--' + prefix + k, action='store_true') - elif isinstance(v, dict): - add_args(parser, v, prefix + k + '.') - elif isinstance(v, abc.Iterable): - parser.add_argument('--' + prefix + k, type=type(v[0]), nargs='+') - else: - print(f'cannot parse key {prefix + k} of type {type(v)}') - return parser - - -class Config: - """A facility for config and config files. - - It supports common file formats as configs: python/json/yaml. The interface - is the same as a dict object and also allows access config values as - attributes. - - Example: - >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1]))) - >>> cfg.a - 1 - >>> cfg.b - {'b1': [0, 1]} - >>> cfg.b.b1 - [0, 1] - >>> cfg = Config.fromfile('tests/data/config/a.py') - >>> cfg.filename - "/home/kchen/projects/mmcv/tests/data/config/a.py" - >>> cfg.item4 - 'test' - >>> cfg - "Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: " - "{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}" - """ - - @staticmethod - def _validate_py_syntax(filename): - with open(filename, 'r', encoding='utf-8') as f: - # Setting encoding explicitly to resolve coding issue on windows - content = f.read() - try: - ast.parse(content) - except SyntaxError as e: - raise SyntaxError('There are syntax errors in config ' - f'file {filename}: {e}') - - @staticmethod - def _substitute_predefined_vars(filename, temp_config_name): - file_dirname = osp.dirname(filename) - file_basename = osp.basename(filename) - file_basename_no_extension = osp.splitext(file_basename)[0] - file_extname = osp.splitext(filename)[1] - support_templates = dict( - fileDirname=file_dirname, - fileBasename=file_basename, - fileBasenameNoExtension=file_basename_no_extension, - fileExtname=file_extname) - with open(filename, 'r', encoding='utf-8') as f: - # Setting encoding explicitly to resolve coding issue on windows - config_file = f.read() - for key, value in support_templates.items(): - regexp = r'\{\{\s*' + str(key) + r'\s*\}\}' - value = value.replace('\\', '/') - config_file = re.sub(regexp, value, config_file) - with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file: - tmp_config_file.write(config_file) - - @staticmethod - def _pre_substitute_base_vars(filename, temp_config_name): - """Substitute base variable placehoders to string, so that parsing - would work.""" - with open(filename, 'r', encoding='utf-8') as f: - # Setting encoding explicitly to resolve coding issue on windows - config_file = f.read() - base_var_dict = {} - regexp = r'\{\{\s*' + BASE_KEY + r'\.([\w\.]+)\s*\}\}' - base_vars = set(re.findall(regexp, config_file)) - for base_var in base_vars: - randstr = f'_{base_var}_{uuid.uuid4().hex.lower()[:6]}' - base_var_dict[randstr] = base_var - regexp = r'\{\{\s*' + BASE_KEY + r'\.' + base_var + r'\s*\}\}' - config_file = re.sub(regexp, f'"{randstr}"', config_file) - with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file: - tmp_config_file.write(config_file) - return base_var_dict - - @staticmethod - def _substitute_base_vars(cfg, base_var_dict, base_cfg): - """Substitute variable strings to their actual values.""" - cfg = copy.deepcopy(cfg) - - if isinstance(cfg, dict): - for k, v in cfg.items(): - if isinstance(v, str) and v in base_var_dict: - new_v = base_cfg - for new_k in base_var_dict[v].split('.'): - new_v = new_v[new_k] - cfg[k] = new_v - elif isinstance(v, (list, tuple, dict)): - cfg[k] = Config._substitute_base_vars( - v, base_var_dict, base_cfg) - elif isinstance(cfg, tuple): - cfg = tuple( - Config._substitute_base_vars(c, base_var_dict, base_cfg) - for c in cfg) - elif isinstance(cfg, list): - cfg = [ - Config._substitute_base_vars(c, base_var_dict, base_cfg) - for c in cfg - ] - elif isinstance(cfg, str) and cfg in base_var_dict: - new_v = base_cfg - for new_k in base_var_dict[cfg].split('.'): - new_v = new_v[new_k] - cfg = new_v - - return cfg - - @staticmethod - def _file2dict(filename, use_predefined_variables=True): - filename = osp.abspath(osp.expanduser(filename)) - check_file_exist(filename) - fileExtname = osp.splitext(filename)[1] - if fileExtname not in ['.py', '.json', '.yaml', '.yml']: - raise IOError('Only py/yml/yaml/json type are supported now!') - - with tempfile.TemporaryDirectory() as temp_config_dir: - temp_config_file = tempfile.NamedTemporaryFile( - dir=temp_config_dir, suffix=fileExtname) - if platform.system() == 'Windows': - temp_config_file.close() - temp_config_name = osp.basename(temp_config_file.name) - # Substitute predefined variables - if use_predefined_variables: - Config._substitute_predefined_vars(filename, - temp_config_file.name) - else: - shutil.copyfile(filename, temp_config_file.name) - # Substitute base variables from placeholders to strings - base_var_dict = Config._pre_substitute_base_vars( - temp_config_file.name, temp_config_file.name) - - if filename.endswith('.py'): - temp_module_name = osp.splitext(temp_config_name)[0] - sys.path.insert(0, temp_config_dir) - Config._validate_py_syntax(filename) - mod = import_module(temp_module_name) - sys.path.pop(0) - cfg_dict = { - name: value - for name, value in mod.__dict__.items() - if not name.startswith('__') - } - # delete imported module - del sys.modules[temp_module_name] - elif filename.endswith(('.yml', '.yaml', '.json')): - import annotator.uniformer.mmcv as mmcv - cfg_dict = mmcv.load(temp_config_file.name) - # close temp file - temp_config_file.close() - - # check deprecation information - if DEPRECATION_KEY in cfg_dict: - deprecation_info = cfg_dict.pop(DEPRECATION_KEY) - warning_msg = f'The config file {filename} will be deprecated ' \ - 'in the future.' - if 'expected' in deprecation_info: - warning_msg += f' Please use {deprecation_info["expected"]} ' \ - 'instead.' - if 'reference' in deprecation_info: - warning_msg += ' More information can be found at ' \ - f'{deprecation_info["reference"]}' - warnings.warn(warning_msg) - - cfg_text = filename + '\n' - with open(filename, 'r', encoding='utf-8') as f: - # Setting encoding explicitly to resolve coding issue on windows - cfg_text += f.read() - - if BASE_KEY in cfg_dict: - cfg_dir = osp.dirname(filename) - base_filename = cfg_dict.pop(BASE_KEY) - base_filename = base_filename if isinstance( - base_filename, list) else [base_filename] - - cfg_dict_list = list() - cfg_text_list = list() - for f in base_filename: - _cfg_dict, _cfg_text = Config._file2dict(osp.join(cfg_dir, f)) - cfg_dict_list.append(_cfg_dict) - cfg_text_list.append(_cfg_text) - - base_cfg_dict = dict() - for c in cfg_dict_list: - duplicate_keys = base_cfg_dict.keys() & c.keys() - if len(duplicate_keys) > 0: - raise KeyError('Duplicate key is not allowed among bases. ' - f'Duplicate keys: {duplicate_keys}') - base_cfg_dict.update(c) - - # Substitute base variables from strings to their actual values - cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict, - base_cfg_dict) - - base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict) - cfg_dict = base_cfg_dict - - # merge cfg_text - cfg_text_list.append(cfg_text) - cfg_text = '\n'.join(cfg_text_list) - - return cfg_dict, cfg_text - - @staticmethod - def _merge_a_into_b(a, b, allow_list_keys=False): - """merge dict ``a`` into dict ``b`` (non-inplace). - - Values in ``a`` will overwrite ``b``. ``b`` is copied first to avoid - in-place modifications. - - Args: - a (dict): The source dict to be merged into ``b``. - b (dict): The origin dict to be fetch keys from ``a``. - allow_list_keys (bool): If True, int string keys (e.g. '0', '1') - are allowed in source ``a`` and will replace the element of the - corresponding index in b if b is a list. Default: False. - - Returns: - dict: The modified dict of ``b`` using ``a``. - - Examples: - # Normally merge a into b. - >>> Config._merge_a_into_b( - ... dict(obj=dict(a=2)), dict(obj=dict(a=1))) - {'obj': {'a': 2}} - - # Delete b first and merge a into b. - >>> Config._merge_a_into_b( - ... dict(obj=dict(_delete_=True, a=2)), dict(obj=dict(a=1))) - {'obj': {'a': 2}} - - # b is a list - >>> Config._merge_a_into_b( - ... {'0': dict(a=2)}, [dict(a=1), dict(b=2)], True) - [{'a': 2}, {'b': 2}] - """ - b = b.copy() - for k, v in a.items(): - if allow_list_keys and k.isdigit() and isinstance(b, list): - k = int(k) - if len(b) <= k: - raise KeyError(f'Index {k} exceeds the length of list {b}') - b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys) - elif isinstance(v, - dict) and k in b and not v.pop(DELETE_KEY, False): - allowed_types = (dict, list) if allow_list_keys else dict - if not isinstance(b[k], allowed_types): - raise TypeError( - f'{k}={v} in child config cannot inherit from base ' - f'because {k} is a dict in the child config but is of ' - f'type {type(b[k])} in base config. You may set ' - f'`{DELETE_KEY}=True` to ignore the base config') - b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys) - else: - b[k] = v - return b - - @staticmethod - def fromfile(filename, - use_predefined_variables=True, - import_custom_modules=True): - cfg_dict, cfg_text = Config._file2dict(filename, - use_predefined_variables) - if import_custom_modules and cfg_dict.get('custom_imports', None): - import_modules_from_strings(**cfg_dict['custom_imports']) - return Config(cfg_dict, cfg_text=cfg_text, filename=filename) - - @staticmethod - def fromstring(cfg_str, file_format): - """Generate config from config str. - - Args: - cfg_str (str): Config str. - file_format (str): Config file format corresponding to the - config str. Only py/yml/yaml/json type are supported now! - - Returns: - obj:`Config`: Config obj. - """ - if file_format not in ['.py', '.json', '.yaml', '.yml']: - raise IOError('Only py/yml/yaml/json type are supported now!') - if file_format != '.py' and 'dict(' in cfg_str: - # check if users specify a wrong suffix for python - warnings.warn( - 'Please check "file_format", the file format may be .py') - with tempfile.NamedTemporaryFile( - 'w', encoding='utf-8', suffix=file_format, - delete=False) as temp_file: - temp_file.write(cfg_str) - # on windows, previous implementation cause error - # see PR 1077 for details - cfg = Config.fromfile(temp_file.name) - os.remove(temp_file.name) - return cfg - - @staticmethod - def auto_argparser(description=None): - """Generate argparser from config file automatically (experimental)""" - partial_parser = ArgumentParser(description=description) - partial_parser.add_argument('config', help='config file path') - cfg_file = partial_parser.parse_known_args()[0].config - cfg = Config.fromfile(cfg_file) - parser = ArgumentParser(description=description) - parser.add_argument('config', help='config file path') - add_args(parser, cfg) - return parser, cfg - - def __init__(self, cfg_dict=None, cfg_text=None, filename=None): - if cfg_dict is None: - cfg_dict = dict() - elif not isinstance(cfg_dict, dict): - raise TypeError('cfg_dict must be a dict, but ' - f'got {type(cfg_dict)}') - for key in cfg_dict: - if key in RESERVED_KEYS: - raise KeyError(f'{key} is reserved for config file') - - super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict)) - super(Config, self).__setattr__('_filename', filename) - if cfg_text: - text = cfg_text - elif filename: - with open(filename, 'r') as f: - text = f.read() - else: - text = '' - super(Config, self).__setattr__('_text', text) - - @property - def filename(self): - return self._filename - - @property - def text(self): - return self._text - - @property - def pretty_text(self): - - indent = 4 - - def _indent(s_, num_spaces): - s = s_.split('\n') - if len(s) == 1: - return s_ - first = s.pop(0) - s = [(num_spaces * ' ') + line for line in s] - s = '\n'.join(s) - s = first + '\n' + s - return s - - def _format_basic_types(k, v, use_mapping=False): - if isinstance(v, str): - v_str = f"'{v}'" - else: - v_str = str(v) - - if use_mapping: - k_str = f"'{k}'" if isinstance(k, str) else str(k) - attr_str = f'{k_str}: {v_str}' - else: - attr_str = f'{str(k)}={v_str}' - attr_str = _indent(attr_str, indent) - - return attr_str - - def _format_list(k, v, use_mapping=False): - # check if all items in the list are dict - if all(isinstance(_, dict) for _ in v): - v_str = '[\n' - v_str += '\n'.join( - f'dict({_indent(_format_dict(v_), indent)}),' - for v_ in v).rstrip(',') - if use_mapping: - k_str = f"'{k}'" if isinstance(k, str) else str(k) - attr_str = f'{k_str}: {v_str}' - else: - attr_str = f'{str(k)}={v_str}' - attr_str = _indent(attr_str, indent) + ']' - else: - attr_str = _format_basic_types(k, v, use_mapping) - return attr_str - - def _contain_invalid_identifier(dict_str): - contain_invalid_identifier = False - for key_name in dict_str: - contain_invalid_identifier |= \ - (not str(key_name).isidentifier()) - return contain_invalid_identifier - - def _format_dict(input_dict, outest_level=False): - r = '' - s = [] - - use_mapping = _contain_invalid_identifier(input_dict) - if use_mapping: - r += '{' - for idx, (k, v) in enumerate(input_dict.items()): - is_last = idx >= len(input_dict) - 1 - end = '' if outest_level or is_last else ',' - if isinstance(v, dict): - v_str = '\n' + _format_dict(v) - if use_mapping: - k_str = f"'{k}'" if isinstance(k, str) else str(k) - attr_str = f'{k_str}: dict({v_str}' - else: - attr_str = f'{str(k)}=dict({v_str}' - attr_str = _indent(attr_str, indent) + ')' + end - elif isinstance(v, list): - attr_str = _format_list(k, v, use_mapping) + end - else: - attr_str = _format_basic_types(k, v, use_mapping) + end - - s.append(attr_str) - r += '\n'.join(s) - if use_mapping: - r += '}' - return r - - cfg_dict = self._cfg_dict.to_dict() - text = _format_dict(cfg_dict, outest_level=True) - # copied from setup.cfg - yapf_style = dict( - based_on_style='pep8', - blank_line_before_nested_class_or_def=True, - split_before_expression_after_opening_paren=True) - text, _ = FormatCode(text, style_config=yapf_style, verify=True) - - return text - - def __repr__(self): - return f'Config (path: {self.filename}): {self._cfg_dict.__repr__()}' - - def __len__(self): - return len(self._cfg_dict) - - def __getattr__(self, name): - return getattr(self._cfg_dict, name) - - def __getitem__(self, name): - return self._cfg_dict.__getitem__(name) - - def __setattr__(self, name, value): - if isinstance(value, dict): - value = ConfigDict(value) - self._cfg_dict.__setattr__(name, value) - - def __setitem__(self, name, value): - if isinstance(value, dict): - value = ConfigDict(value) - self._cfg_dict.__setitem__(name, value) - - def __iter__(self): - return iter(self._cfg_dict) - - def __getstate__(self): - return (self._cfg_dict, self._filename, self._text) - - def __setstate__(self, state): - _cfg_dict, _filename, _text = state - super(Config, self).__setattr__('_cfg_dict', _cfg_dict) - super(Config, self).__setattr__('_filename', _filename) - super(Config, self).__setattr__('_text', _text) - - def dump(self, file=None): - cfg_dict = super(Config, self).__getattribute__('_cfg_dict').to_dict() - if self.filename.endswith('.py'): - if file is None: - return self.pretty_text - else: - with open(file, 'w', encoding='utf-8') as f: - f.write(self.pretty_text) - else: - import annotator.uniformer.mmcv as mmcv - if file is None: - file_format = self.filename.split('.')[-1] - return mmcv.dump(cfg_dict, file_format=file_format) - else: - mmcv.dump(cfg_dict, file) - - def merge_from_dict(self, options, allow_list_keys=True): - """Merge list into cfg_dict. - - Merge the dict parsed by MultipleKVAction into this cfg. - - Examples: - >>> options = {'model.backbone.depth': 50, - ... 'model.backbone.with_cp':True} - >>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet')))) - >>> cfg.merge_from_dict(options) - >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict') - >>> assert cfg_dict == dict( - ... model=dict(backbone=dict(depth=50, with_cp=True))) - - # Merge list element - >>> cfg = Config(dict(pipeline=[ - ... dict(type='LoadImage'), dict(type='LoadAnnotations')])) - >>> options = dict(pipeline={'0': dict(type='SelfLoadImage')}) - >>> cfg.merge_from_dict(options, allow_list_keys=True) - >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict') - >>> assert cfg_dict == dict(pipeline=[ - ... dict(type='SelfLoadImage'), dict(type='LoadAnnotations')]) - - Args: - options (dict): dict of configs to merge from. - allow_list_keys (bool): If True, int string keys (e.g. '0', '1') - are allowed in ``options`` and will replace the element of the - corresponding index in the config if the config is a list. - Default: True. - """ - option_cfg_dict = {} - for full_key, v in options.items(): - d = option_cfg_dict - key_list = full_key.split('.') - for subkey in key_list[:-1]: - d.setdefault(subkey, ConfigDict()) - d = d[subkey] - subkey = key_list[-1] - d[subkey] = v - - cfg_dict = super(Config, self).__getattribute__('_cfg_dict') - super(Config, self).__setattr__( - '_cfg_dict', - Config._merge_a_into_b( - option_cfg_dict, cfg_dict, allow_list_keys=allow_list_keys)) - - -class DictAction(Action): - """ - argparse action to split an argument into KEY=VALUE form - on the first = and append to a dictionary. List options can - be passed as comma separated values, i.e 'KEY=V1,V2,V3', or with explicit - brackets, i.e. 'KEY=[V1,V2,V3]'. It also support nested brackets to build - list/tuple values. e.g. 'KEY=[(V1,V2),(V3,V4)]' - """ - - @staticmethod - def _parse_int_float_bool(val): - try: - return int(val) - except ValueError: - pass - try: - return float(val) - except ValueError: - pass - if val.lower() in ['true', 'false']: - return True if val.lower() == 'true' else False - return val - - @staticmethod - def _parse_iterable(val): - """Parse iterable values in the string. - - All elements inside '()' or '[]' are treated as iterable values. - - Args: - val (str): Value string. - - Returns: - list | tuple: The expanded list or tuple from the string. - - Examples: - >>> DictAction._parse_iterable('1,2,3') - [1, 2, 3] - >>> DictAction._parse_iterable('[a, b, c]') - ['a', 'b', 'c'] - >>> DictAction._parse_iterable('[(1, 2, 3), [a, b], c]') - [(1, 2, 3), ['a', 'b'], 'c'] - """ - - def find_next_comma(string): - """Find the position of next comma in the string. - - If no ',' is found in the string, return the string length. All - chars inside '()' and '[]' are treated as one element and thus ',' - inside these brackets are ignored. - """ - assert (string.count('(') == string.count(')')) and ( - string.count('[') == string.count(']')), \ - f'Imbalanced brackets exist in {string}' - end = len(string) - for idx, char in enumerate(string): - pre = string[:idx] - # The string before this ',' is balanced - if ((char == ',') and (pre.count('(') == pre.count(')')) - and (pre.count('[') == pre.count(']'))): - end = idx - break - return end - - # Strip ' and " characters and replace whitespace. - val = val.strip('\'\"').replace(' ', '') - is_tuple = False - if val.startswith('(') and val.endswith(')'): - is_tuple = True - val = val[1:-1] - elif val.startswith('[') and val.endswith(']'): - val = val[1:-1] - elif ',' not in val: - # val is a single value - return DictAction._parse_int_float_bool(val) - - values = [] - while len(val) > 0: - comma_idx = find_next_comma(val) - element = DictAction._parse_iterable(val[:comma_idx]) - values.append(element) - val = val[comma_idx + 1:] - if is_tuple: - values = tuple(values) - return values - - def __call__(self, parser, namespace, values, option_string=None): - options = {} - for kv in values: - key, val = kv.split('=', maxsplit=1) - options[key] = self._parse_iterable(val) - setattr(namespace, self.dest, options) diff --git a/spaces/Arnasltlt/KlauskKnygos/README.md b/spaces/Arnasltlt/KlauskKnygos/README.md deleted file mode 100644 index a6cd587b312997849ae96f3e0feb9f6b3449fc90..0000000000000000000000000000000000000000 --- a/spaces/Arnasltlt/KlauskKnygos/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: QandA -emoji: 🏃 -colorFrom: indigo -colorTo: green -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false -duplicated_from: Arnasltlt/KlauskD ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/johabfreq.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/johabfreq.py deleted file mode 100644 index c12969990d73388f61a6ab98fb4ee8f0f5cbc44f..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/johabfreq.py +++ /dev/null @@ -1,2382 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -# The frequency data itself is the same as euc-kr. -# This is just a mapping table to euc-kr. - -JOHAB_TO_EUCKR_ORDER_TABLE = { - 0x8861: 0, - 0x8862: 1, - 0x8865: 2, - 0x8868: 3, - 0x8869: 4, - 0x886A: 5, - 0x886B: 6, - 0x8871: 7, - 0x8873: 8, - 0x8874: 9, - 0x8875: 10, - 0x8876: 11, - 0x8877: 12, - 0x8878: 13, - 0x8879: 14, - 0x887B: 15, - 0x887C: 16, - 0x887D: 17, - 0x8881: 18, - 0x8882: 19, - 0x8885: 20, - 0x8889: 21, - 0x8891: 22, - 0x8893: 23, - 0x8895: 24, - 0x8896: 25, - 0x8897: 26, - 0x88A1: 27, - 0x88A2: 28, - 0x88A5: 29, - 0x88A9: 30, - 0x88B5: 31, - 0x88B7: 32, - 0x88C1: 33, - 0x88C5: 34, - 0x88C9: 35, - 0x88E1: 36, - 0x88E2: 37, - 0x88E5: 38, - 0x88E8: 39, - 0x88E9: 40, - 0x88EB: 41, - 0x88F1: 42, - 0x88F3: 43, - 0x88F5: 44, - 0x88F6: 45, - 0x88F7: 46, - 0x88F8: 47, - 0x88FB: 48, - 0x88FC: 49, - 0x88FD: 50, - 0x8941: 51, - 0x8945: 52, - 0x8949: 53, - 0x8951: 54, - 0x8953: 55, - 0x8955: 56, - 0x8956: 57, - 0x8957: 58, - 0x8961: 59, - 0x8962: 60, - 0x8963: 61, - 0x8965: 62, - 0x8968: 63, - 0x8969: 64, - 0x8971: 65, - 0x8973: 66, - 0x8975: 67, - 0x8976: 68, - 0x8977: 69, - 0x897B: 70, - 0x8981: 71, - 0x8985: 72, - 0x8989: 73, - 0x8993: 74, - 0x8995: 75, - 0x89A1: 76, - 0x89A2: 77, - 0x89A5: 78, - 0x89A8: 79, - 0x89A9: 80, - 0x89AB: 81, - 0x89AD: 82, - 0x89B0: 83, - 0x89B1: 84, - 0x89B3: 85, - 0x89B5: 86, - 0x89B7: 87, - 0x89B8: 88, - 0x89C1: 89, - 0x89C2: 90, - 0x89C5: 91, - 0x89C9: 92, - 0x89CB: 93, - 0x89D1: 94, - 0x89D3: 95, - 0x89D5: 96, - 0x89D7: 97, - 0x89E1: 98, - 0x89E5: 99, - 0x89E9: 100, - 0x89F3: 101, - 0x89F6: 102, - 0x89F7: 103, - 0x8A41: 104, - 0x8A42: 105, - 0x8A45: 106, - 0x8A49: 107, - 0x8A51: 108, - 0x8A53: 109, - 0x8A55: 110, - 0x8A57: 111, - 0x8A61: 112, - 0x8A65: 113, - 0x8A69: 114, - 0x8A73: 115, - 0x8A75: 116, - 0x8A81: 117, - 0x8A82: 118, - 0x8A85: 119, - 0x8A88: 120, - 0x8A89: 121, - 0x8A8A: 122, - 0x8A8B: 123, - 0x8A90: 124, - 0x8A91: 125, - 0x8A93: 126, - 0x8A95: 127, - 0x8A97: 128, - 0x8A98: 129, - 0x8AA1: 130, - 0x8AA2: 131, - 0x8AA5: 132, - 0x8AA9: 133, - 0x8AB6: 134, - 0x8AB7: 135, - 0x8AC1: 136, - 0x8AD5: 137, - 0x8AE1: 138, - 0x8AE2: 139, - 0x8AE5: 140, - 0x8AE9: 141, - 0x8AF1: 142, - 0x8AF3: 143, - 0x8AF5: 144, - 0x8B41: 145, - 0x8B45: 146, - 0x8B49: 147, - 0x8B61: 148, - 0x8B62: 149, - 0x8B65: 150, - 0x8B68: 151, - 0x8B69: 152, - 0x8B6A: 153, - 0x8B71: 154, - 0x8B73: 155, - 0x8B75: 156, - 0x8B77: 157, - 0x8B81: 158, - 0x8BA1: 159, - 0x8BA2: 160, - 0x8BA5: 161, - 0x8BA8: 162, - 0x8BA9: 163, - 0x8BAB: 164, - 0x8BB1: 165, - 0x8BB3: 166, - 0x8BB5: 167, - 0x8BB7: 168, - 0x8BB8: 169, - 0x8BBC: 170, - 0x8C61: 171, - 0x8C62: 172, - 0x8C63: 173, - 0x8C65: 174, - 0x8C69: 175, - 0x8C6B: 176, - 0x8C71: 177, - 0x8C73: 178, - 0x8C75: 179, - 0x8C76: 180, - 0x8C77: 181, - 0x8C7B: 182, - 0x8C81: 183, - 0x8C82: 184, - 0x8C85: 185, - 0x8C89: 186, - 0x8C91: 187, - 0x8C93: 188, - 0x8C95: 189, - 0x8C96: 190, - 0x8C97: 191, - 0x8CA1: 192, - 0x8CA2: 193, - 0x8CA9: 194, - 0x8CE1: 195, - 0x8CE2: 196, - 0x8CE3: 197, - 0x8CE5: 198, - 0x8CE9: 199, - 0x8CF1: 200, - 0x8CF3: 201, - 0x8CF5: 202, - 0x8CF6: 203, - 0x8CF7: 204, - 0x8D41: 205, - 0x8D42: 206, - 0x8D45: 207, - 0x8D51: 208, - 0x8D55: 209, - 0x8D57: 210, - 0x8D61: 211, - 0x8D65: 212, - 0x8D69: 213, - 0x8D75: 214, - 0x8D76: 215, - 0x8D7B: 216, - 0x8D81: 217, - 0x8DA1: 218, - 0x8DA2: 219, - 0x8DA5: 220, - 0x8DA7: 221, - 0x8DA9: 222, - 0x8DB1: 223, - 0x8DB3: 224, - 0x8DB5: 225, - 0x8DB7: 226, - 0x8DB8: 227, - 0x8DB9: 228, - 0x8DC1: 229, - 0x8DC2: 230, - 0x8DC9: 231, - 0x8DD6: 232, - 0x8DD7: 233, - 0x8DE1: 234, - 0x8DE2: 235, - 0x8DF7: 236, - 0x8E41: 237, - 0x8E45: 238, - 0x8E49: 239, - 0x8E51: 240, - 0x8E53: 241, - 0x8E57: 242, - 0x8E61: 243, - 0x8E81: 244, - 0x8E82: 245, - 0x8E85: 246, - 0x8E89: 247, - 0x8E90: 248, - 0x8E91: 249, - 0x8E93: 250, - 0x8E95: 251, - 0x8E97: 252, - 0x8E98: 253, - 0x8EA1: 254, - 0x8EA9: 255, - 0x8EB6: 256, - 0x8EB7: 257, - 0x8EC1: 258, - 0x8EC2: 259, - 0x8EC5: 260, - 0x8EC9: 261, - 0x8ED1: 262, - 0x8ED3: 263, - 0x8ED6: 264, - 0x8EE1: 265, - 0x8EE5: 266, - 0x8EE9: 267, - 0x8EF1: 268, - 0x8EF3: 269, - 0x8F41: 270, - 0x8F61: 271, - 0x8F62: 272, - 0x8F65: 273, - 0x8F67: 274, - 0x8F69: 275, - 0x8F6B: 276, - 0x8F70: 277, - 0x8F71: 278, - 0x8F73: 279, - 0x8F75: 280, - 0x8F77: 281, - 0x8F7B: 282, - 0x8FA1: 283, - 0x8FA2: 284, - 0x8FA5: 285, - 0x8FA9: 286, - 0x8FB1: 287, - 0x8FB3: 288, - 0x8FB5: 289, - 0x8FB7: 290, - 0x9061: 291, - 0x9062: 292, - 0x9063: 293, - 0x9065: 294, - 0x9068: 295, - 0x9069: 296, - 0x906A: 297, - 0x906B: 298, - 0x9071: 299, - 0x9073: 300, - 0x9075: 301, - 0x9076: 302, - 0x9077: 303, - 0x9078: 304, - 0x9079: 305, - 0x907B: 306, - 0x907D: 307, - 0x9081: 308, - 0x9082: 309, - 0x9085: 310, - 0x9089: 311, - 0x9091: 312, - 0x9093: 313, - 0x9095: 314, - 0x9096: 315, - 0x9097: 316, - 0x90A1: 317, - 0x90A2: 318, - 0x90A5: 319, - 0x90A9: 320, - 0x90B1: 321, - 0x90B7: 322, - 0x90E1: 323, - 0x90E2: 324, - 0x90E4: 325, - 0x90E5: 326, - 0x90E9: 327, - 0x90EB: 328, - 0x90EC: 329, - 0x90F1: 330, - 0x90F3: 331, - 0x90F5: 332, - 0x90F6: 333, - 0x90F7: 334, - 0x90FD: 335, - 0x9141: 336, - 0x9142: 337, - 0x9145: 338, - 0x9149: 339, - 0x9151: 340, - 0x9153: 341, - 0x9155: 342, - 0x9156: 343, - 0x9157: 344, - 0x9161: 345, - 0x9162: 346, - 0x9165: 347, - 0x9169: 348, - 0x9171: 349, - 0x9173: 350, - 0x9176: 351, - 0x9177: 352, - 0x917A: 353, - 0x9181: 354, - 0x9185: 355, - 0x91A1: 356, - 0x91A2: 357, - 0x91A5: 358, - 0x91A9: 359, - 0x91AB: 360, - 0x91B1: 361, - 0x91B3: 362, - 0x91B5: 363, - 0x91B7: 364, - 0x91BC: 365, - 0x91BD: 366, - 0x91C1: 367, - 0x91C5: 368, - 0x91C9: 369, - 0x91D6: 370, - 0x9241: 371, - 0x9245: 372, - 0x9249: 373, - 0x9251: 374, - 0x9253: 375, - 0x9255: 376, - 0x9261: 377, - 0x9262: 378, - 0x9265: 379, - 0x9269: 380, - 0x9273: 381, - 0x9275: 382, - 0x9277: 383, - 0x9281: 384, - 0x9282: 385, - 0x9285: 386, - 0x9288: 387, - 0x9289: 388, - 0x9291: 389, - 0x9293: 390, - 0x9295: 391, - 0x9297: 392, - 0x92A1: 393, - 0x92B6: 394, - 0x92C1: 395, - 0x92E1: 396, - 0x92E5: 397, - 0x92E9: 398, - 0x92F1: 399, - 0x92F3: 400, - 0x9341: 401, - 0x9342: 402, - 0x9349: 403, - 0x9351: 404, - 0x9353: 405, - 0x9357: 406, - 0x9361: 407, - 0x9362: 408, - 0x9365: 409, - 0x9369: 410, - 0x936A: 411, - 0x936B: 412, - 0x9371: 413, - 0x9373: 414, - 0x9375: 415, - 0x9377: 416, - 0x9378: 417, - 0x937C: 418, - 0x9381: 419, - 0x9385: 420, - 0x9389: 421, - 0x93A1: 422, - 0x93A2: 423, - 0x93A5: 424, - 0x93A9: 425, - 0x93AB: 426, - 0x93B1: 427, - 0x93B3: 428, - 0x93B5: 429, - 0x93B7: 430, - 0x93BC: 431, - 0x9461: 432, - 0x9462: 433, - 0x9463: 434, - 0x9465: 435, - 0x9468: 436, - 0x9469: 437, - 0x946A: 438, - 0x946B: 439, - 0x946C: 440, - 0x9470: 441, - 0x9471: 442, - 0x9473: 443, - 0x9475: 444, - 0x9476: 445, - 0x9477: 446, - 0x9478: 447, - 0x9479: 448, - 0x947D: 449, - 0x9481: 450, - 0x9482: 451, - 0x9485: 452, - 0x9489: 453, - 0x9491: 454, - 0x9493: 455, - 0x9495: 456, - 0x9496: 457, - 0x9497: 458, - 0x94A1: 459, - 0x94E1: 460, - 0x94E2: 461, - 0x94E3: 462, - 0x94E5: 463, - 0x94E8: 464, - 0x94E9: 465, - 0x94EB: 466, - 0x94EC: 467, - 0x94F1: 468, - 0x94F3: 469, - 0x94F5: 470, - 0x94F7: 471, - 0x94F9: 472, - 0x94FC: 473, - 0x9541: 474, - 0x9542: 475, - 0x9545: 476, - 0x9549: 477, - 0x9551: 478, - 0x9553: 479, - 0x9555: 480, - 0x9556: 481, - 0x9557: 482, - 0x9561: 483, - 0x9565: 484, - 0x9569: 485, - 0x9576: 486, - 0x9577: 487, - 0x9581: 488, - 0x9585: 489, - 0x95A1: 490, - 0x95A2: 491, - 0x95A5: 492, - 0x95A8: 493, - 0x95A9: 494, - 0x95AB: 495, - 0x95AD: 496, - 0x95B1: 497, - 0x95B3: 498, - 0x95B5: 499, - 0x95B7: 500, - 0x95B9: 501, - 0x95BB: 502, - 0x95C1: 503, - 0x95C5: 504, - 0x95C9: 505, - 0x95E1: 506, - 0x95F6: 507, - 0x9641: 508, - 0x9645: 509, - 0x9649: 510, - 0x9651: 511, - 0x9653: 512, - 0x9655: 513, - 0x9661: 514, - 0x9681: 515, - 0x9682: 516, - 0x9685: 517, - 0x9689: 518, - 0x9691: 519, - 0x9693: 520, - 0x9695: 521, - 0x9697: 522, - 0x96A1: 523, - 0x96B6: 524, - 0x96C1: 525, - 0x96D7: 526, - 0x96E1: 527, - 0x96E5: 528, - 0x96E9: 529, - 0x96F3: 530, - 0x96F5: 531, - 0x96F7: 532, - 0x9741: 533, - 0x9745: 534, - 0x9749: 535, - 0x9751: 536, - 0x9757: 537, - 0x9761: 538, - 0x9762: 539, - 0x9765: 540, - 0x9768: 541, - 0x9769: 542, - 0x976B: 543, - 0x9771: 544, - 0x9773: 545, - 0x9775: 546, - 0x9777: 547, - 0x9781: 548, - 0x97A1: 549, - 0x97A2: 550, - 0x97A5: 551, - 0x97A8: 552, - 0x97A9: 553, - 0x97B1: 554, - 0x97B3: 555, - 0x97B5: 556, - 0x97B6: 557, - 0x97B7: 558, - 0x97B8: 559, - 0x9861: 560, - 0x9862: 561, - 0x9865: 562, - 0x9869: 563, - 0x9871: 564, - 0x9873: 565, - 0x9875: 566, - 0x9876: 567, - 0x9877: 568, - 0x987D: 569, - 0x9881: 570, - 0x9882: 571, - 0x9885: 572, - 0x9889: 573, - 0x9891: 574, - 0x9893: 575, - 0x9895: 576, - 0x9896: 577, - 0x9897: 578, - 0x98E1: 579, - 0x98E2: 580, - 0x98E5: 581, - 0x98E9: 582, - 0x98EB: 583, - 0x98EC: 584, - 0x98F1: 585, - 0x98F3: 586, - 0x98F5: 587, - 0x98F6: 588, - 0x98F7: 589, - 0x98FD: 590, - 0x9941: 591, - 0x9942: 592, - 0x9945: 593, - 0x9949: 594, - 0x9951: 595, - 0x9953: 596, - 0x9955: 597, - 0x9956: 598, - 0x9957: 599, - 0x9961: 600, - 0x9976: 601, - 0x99A1: 602, - 0x99A2: 603, - 0x99A5: 604, - 0x99A9: 605, - 0x99B7: 606, - 0x99C1: 607, - 0x99C9: 608, - 0x99E1: 609, - 0x9A41: 610, - 0x9A45: 611, - 0x9A81: 612, - 0x9A82: 613, - 0x9A85: 614, - 0x9A89: 615, - 0x9A90: 616, - 0x9A91: 617, - 0x9A97: 618, - 0x9AC1: 619, - 0x9AE1: 620, - 0x9AE5: 621, - 0x9AE9: 622, - 0x9AF1: 623, - 0x9AF3: 624, - 0x9AF7: 625, - 0x9B61: 626, - 0x9B62: 627, - 0x9B65: 628, - 0x9B68: 629, - 0x9B69: 630, - 0x9B71: 631, - 0x9B73: 632, - 0x9B75: 633, - 0x9B81: 634, - 0x9B85: 635, - 0x9B89: 636, - 0x9B91: 637, - 0x9B93: 638, - 0x9BA1: 639, - 0x9BA5: 640, - 0x9BA9: 641, - 0x9BB1: 642, - 0x9BB3: 643, - 0x9BB5: 644, - 0x9BB7: 645, - 0x9C61: 646, - 0x9C62: 647, - 0x9C65: 648, - 0x9C69: 649, - 0x9C71: 650, - 0x9C73: 651, - 0x9C75: 652, - 0x9C76: 653, - 0x9C77: 654, - 0x9C78: 655, - 0x9C7C: 656, - 0x9C7D: 657, - 0x9C81: 658, - 0x9C82: 659, - 0x9C85: 660, - 0x9C89: 661, - 0x9C91: 662, - 0x9C93: 663, - 0x9C95: 664, - 0x9C96: 665, - 0x9C97: 666, - 0x9CA1: 667, - 0x9CA2: 668, - 0x9CA5: 669, - 0x9CB5: 670, - 0x9CB7: 671, - 0x9CE1: 672, - 0x9CE2: 673, - 0x9CE5: 674, - 0x9CE9: 675, - 0x9CF1: 676, - 0x9CF3: 677, - 0x9CF5: 678, - 0x9CF6: 679, - 0x9CF7: 680, - 0x9CFD: 681, - 0x9D41: 682, - 0x9D42: 683, - 0x9D45: 684, - 0x9D49: 685, - 0x9D51: 686, - 0x9D53: 687, - 0x9D55: 688, - 0x9D57: 689, - 0x9D61: 690, - 0x9D62: 691, - 0x9D65: 692, - 0x9D69: 693, - 0x9D71: 694, - 0x9D73: 695, - 0x9D75: 696, - 0x9D76: 697, - 0x9D77: 698, - 0x9D81: 699, - 0x9D85: 700, - 0x9D93: 701, - 0x9D95: 702, - 0x9DA1: 703, - 0x9DA2: 704, - 0x9DA5: 705, - 0x9DA9: 706, - 0x9DB1: 707, - 0x9DB3: 708, - 0x9DB5: 709, - 0x9DB7: 710, - 0x9DC1: 711, - 0x9DC5: 712, - 0x9DD7: 713, - 0x9DF6: 714, - 0x9E41: 715, - 0x9E45: 716, - 0x9E49: 717, - 0x9E51: 718, - 0x9E53: 719, - 0x9E55: 720, - 0x9E57: 721, - 0x9E61: 722, - 0x9E65: 723, - 0x9E69: 724, - 0x9E73: 725, - 0x9E75: 726, - 0x9E77: 727, - 0x9E81: 728, - 0x9E82: 729, - 0x9E85: 730, - 0x9E89: 731, - 0x9E91: 732, - 0x9E93: 733, - 0x9E95: 734, - 0x9E97: 735, - 0x9EA1: 736, - 0x9EB6: 737, - 0x9EC1: 738, - 0x9EE1: 739, - 0x9EE2: 740, - 0x9EE5: 741, - 0x9EE9: 742, - 0x9EF1: 743, - 0x9EF5: 744, - 0x9EF7: 745, - 0x9F41: 746, - 0x9F42: 747, - 0x9F45: 748, - 0x9F49: 749, - 0x9F51: 750, - 0x9F53: 751, - 0x9F55: 752, - 0x9F57: 753, - 0x9F61: 754, - 0x9F62: 755, - 0x9F65: 756, - 0x9F69: 757, - 0x9F71: 758, - 0x9F73: 759, - 0x9F75: 760, - 0x9F77: 761, - 0x9F78: 762, - 0x9F7B: 763, - 0x9F7C: 764, - 0x9FA1: 765, - 0x9FA2: 766, - 0x9FA5: 767, - 0x9FA9: 768, - 0x9FB1: 769, - 0x9FB3: 770, - 0x9FB5: 771, - 0x9FB7: 772, - 0xA061: 773, - 0xA062: 774, - 0xA065: 775, - 0xA067: 776, - 0xA068: 777, - 0xA069: 778, - 0xA06A: 779, - 0xA06B: 780, - 0xA071: 781, - 0xA073: 782, - 0xA075: 783, - 0xA077: 784, - 0xA078: 785, - 0xA07B: 786, - 0xA07D: 787, - 0xA081: 788, - 0xA082: 789, - 0xA085: 790, - 0xA089: 791, - 0xA091: 792, - 0xA093: 793, - 0xA095: 794, - 0xA096: 795, - 0xA097: 796, - 0xA098: 797, - 0xA0A1: 798, - 0xA0A2: 799, - 0xA0A9: 800, - 0xA0B7: 801, - 0xA0E1: 802, - 0xA0E2: 803, - 0xA0E5: 804, - 0xA0E9: 805, - 0xA0EB: 806, - 0xA0F1: 807, - 0xA0F3: 808, - 0xA0F5: 809, - 0xA0F7: 810, - 0xA0F8: 811, - 0xA0FD: 812, - 0xA141: 813, - 0xA142: 814, - 0xA145: 815, - 0xA149: 816, - 0xA151: 817, - 0xA153: 818, - 0xA155: 819, - 0xA156: 820, - 0xA157: 821, - 0xA161: 822, - 0xA162: 823, - 0xA165: 824, - 0xA169: 825, - 0xA175: 826, - 0xA176: 827, - 0xA177: 828, - 0xA179: 829, - 0xA181: 830, - 0xA1A1: 831, - 0xA1A2: 832, - 0xA1A4: 833, - 0xA1A5: 834, - 0xA1A9: 835, - 0xA1AB: 836, - 0xA1B1: 837, - 0xA1B3: 838, - 0xA1B5: 839, - 0xA1B7: 840, - 0xA1C1: 841, - 0xA1C5: 842, - 0xA1D6: 843, - 0xA1D7: 844, - 0xA241: 845, - 0xA245: 846, - 0xA249: 847, - 0xA253: 848, - 0xA255: 849, - 0xA257: 850, - 0xA261: 851, - 0xA265: 852, - 0xA269: 853, - 0xA273: 854, - 0xA275: 855, - 0xA281: 856, - 0xA282: 857, - 0xA283: 858, - 0xA285: 859, - 0xA288: 860, - 0xA289: 861, - 0xA28A: 862, - 0xA28B: 863, - 0xA291: 864, - 0xA293: 865, - 0xA295: 866, - 0xA297: 867, - 0xA29B: 868, - 0xA29D: 869, - 0xA2A1: 870, - 0xA2A5: 871, - 0xA2A9: 872, - 0xA2B3: 873, - 0xA2B5: 874, - 0xA2C1: 875, - 0xA2E1: 876, - 0xA2E5: 877, - 0xA2E9: 878, - 0xA341: 879, - 0xA345: 880, - 0xA349: 881, - 0xA351: 882, - 0xA355: 883, - 0xA361: 884, - 0xA365: 885, - 0xA369: 886, - 0xA371: 887, - 0xA375: 888, - 0xA3A1: 889, - 0xA3A2: 890, - 0xA3A5: 891, - 0xA3A8: 892, - 0xA3A9: 893, - 0xA3AB: 894, - 0xA3B1: 895, - 0xA3B3: 896, - 0xA3B5: 897, - 0xA3B6: 898, - 0xA3B7: 899, - 0xA3B9: 900, - 0xA3BB: 901, - 0xA461: 902, - 0xA462: 903, - 0xA463: 904, - 0xA464: 905, - 0xA465: 906, - 0xA468: 907, - 0xA469: 908, - 0xA46A: 909, - 0xA46B: 910, - 0xA46C: 911, - 0xA471: 912, - 0xA473: 913, - 0xA475: 914, - 0xA477: 915, - 0xA47B: 916, - 0xA481: 917, - 0xA482: 918, - 0xA485: 919, - 0xA489: 920, - 0xA491: 921, - 0xA493: 922, - 0xA495: 923, - 0xA496: 924, - 0xA497: 925, - 0xA49B: 926, - 0xA4A1: 927, - 0xA4A2: 928, - 0xA4A5: 929, - 0xA4B3: 930, - 0xA4E1: 931, - 0xA4E2: 932, - 0xA4E5: 933, - 0xA4E8: 934, - 0xA4E9: 935, - 0xA4EB: 936, - 0xA4F1: 937, - 0xA4F3: 938, - 0xA4F5: 939, - 0xA4F7: 940, - 0xA4F8: 941, - 0xA541: 942, - 0xA542: 943, - 0xA545: 944, - 0xA548: 945, - 0xA549: 946, - 0xA551: 947, - 0xA553: 948, - 0xA555: 949, - 0xA556: 950, - 0xA557: 951, - 0xA561: 952, - 0xA562: 953, - 0xA565: 954, - 0xA569: 955, - 0xA573: 956, - 0xA575: 957, - 0xA576: 958, - 0xA577: 959, - 0xA57B: 960, - 0xA581: 961, - 0xA585: 962, - 0xA5A1: 963, - 0xA5A2: 964, - 0xA5A3: 965, - 0xA5A5: 966, - 0xA5A9: 967, - 0xA5B1: 968, - 0xA5B3: 969, - 0xA5B5: 970, - 0xA5B7: 971, - 0xA5C1: 972, - 0xA5C5: 973, - 0xA5D6: 974, - 0xA5E1: 975, - 0xA5F6: 976, - 0xA641: 977, - 0xA642: 978, - 0xA645: 979, - 0xA649: 980, - 0xA651: 981, - 0xA653: 982, - 0xA661: 983, - 0xA665: 984, - 0xA681: 985, - 0xA682: 986, - 0xA685: 987, - 0xA688: 988, - 0xA689: 989, - 0xA68A: 990, - 0xA68B: 991, - 0xA691: 992, - 0xA693: 993, - 0xA695: 994, - 0xA697: 995, - 0xA69B: 996, - 0xA69C: 997, - 0xA6A1: 998, - 0xA6A9: 999, - 0xA6B6: 1000, - 0xA6C1: 1001, - 0xA6E1: 1002, - 0xA6E2: 1003, - 0xA6E5: 1004, - 0xA6E9: 1005, - 0xA6F7: 1006, - 0xA741: 1007, - 0xA745: 1008, - 0xA749: 1009, - 0xA751: 1010, - 0xA755: 1011, - 0xA757: 1012, - 0xA761: 1013, - 0xA762: 1014, - 0xA765: 1015, - 0xA769: 1016, - 0xA771: 1017, - 0xA773: 1018, - 0xA775: 1019, - 0xA7A1: 1020, - 0xA7A2: 1021, - 0xA7A5: 1022, - 0xA7A9: 1023, - 0xA7AB: 1024, - 0xA7B1: 1025, - 0xA7B3: 1026, - 0xA7B5: 1027, - 0xA7B7: 1028, - 0xA7B8: 1029, - 0xA7B9: 1030, - 0xA861: 1031, - 0xA862: 1032, - 0xA865: 1033, - 0xA869: 1034, - 0xA86B: 1035, - 0xA871: 1036, - 0xA873: 1037, - 0xA875: 1038, - 0xA876: 1039, - 0xA877: 1040, - 0xA87D: 1041, - 0xA881: 1042, - 0xA882: 1043, - 0xA885: 1044, - 0xA889: 1045, - 0xA891: 1046, - 0xA893: 1047, - 0xA895: 1048, - 0xA896: 1049, - 0xA897: 1050, - 0xA8A1: 1051, - 0xA8A2: 1052, - 0xA8B1: 1053, - 0xA8E1: 1054, - 0xA8E2: 1055, - 0xA8E5: 1056, - 0xA8E8: 1057, - 0xA8E9: 1058, - 0xA8F1: 1059, - 0xA8F5: 1060, - 0xA8F6: 1061, - 0xA8F7: 1062, - 0xA941: 1063, - 0xA957: 1064, - 0xA961: 1065, - 0xA962: 1066, - 0xA971: 1067, - 0xA973: 1068, - 0xA975: 1069, - 0xA976: 1070, - 0xA977: 1071, - 0xA9A1: 1072, - 0xA9A2: 1073, - 0xA9A5: 1074, - 0xA9A9: 1075, - 0xA9B1: 1076, - 0xA9B3: 1077, - 0xA9B7: 1078, - 0xAA41: 1079, - 0xAA61: 1080, - 0xAA77: 1081, - 0xAA81: 1082, - 0xAA82: 1083, - 0xAA85: 1084, - 0xAA89: 1085, - 0xAA91: 1086, - 0xAA95: 1087, - 0xAA97: 1088, - 0xAB41: 1089, - 0xAB57: 1090, - 0xAB61: 1091, - 0xAB65: 1092, - 0xAB69: 1093, - 0xAB71: 1094, - 0xAB73: 1095, - 0xABA1: 1096, - 0xABA2: 1097, - 0xABA5: 1098, - 0xABA9: 1099, - 0xABB1: 1100, - 0xABB3: 1101, - 0xABB5: 1102, - 0xABB7: 1103, - 0xAC61: 1104, - 0xAC62: 1105, - 0xAC64: 1106, - 0xAC65: 1107, - 0xAC68: 1108, - 0xAC69: 1109, - 0xAC6A: 1110, - 0xAC6B: 1111, - 0xAC71: 1112, - 0xAC73: 1113, - 0xAC75: 1114, - 0xAC76: 1115, - 0xAC77: 1116, - 0xAC7B: 1117, - 0xAC81: 1118, - 0xAC82: 1119, - 0xAC85: 1120, - 0xAC89: 1121, - 0xAC91: 1122, - 0xAC93: 1123, - 0xAC95: 1124, - 0xAC96: 1125, - 0xAC97: 1126, - 0xACA1: 1127, - 0xACA2: 1128, - 0xACA5: 1129, - 0xACA9: 1130, - 0xACB1: 1131, - 0xACB3: 1132, - 0xACB5: 1133, - 0xACB7: 1134, - 0xACC1: 1135, - 0xACC5: 1136, - 0xACC9: 1137, - 0xACD1: 1138, - 0xACD7: 1139, - 0xACE1: 1140, - 0xACE2: 1141, - 0xACE3: 1142, - 0xACE4: 1143, - 0xACE5: 1144, - 0xACE8: 1145, - 0xACE9: 1146, - 0xACEB: 1147, - 0xACEC: 1148, - 0xACF1: 1149, - 0xACF3: 1150, - 0xACF5: 1151, - 0xACF6: 1152, - 0xACF7: 1153, - 0xACFC: 1154, - 0xAD41: 1155, - 0xAD42: 1156, - 0xAD45: 1157, - 0xAD49: 1158, - 0xAD51: 1159, - 0xAD53: 1160, - 0xAD55: 1161, - 0xAD56: 1162, - 0xAD57: 1163, - 0xAD61: 1164, - 0xAD62: 1165, - 0xAD65: 1166, - 0xAD69: 1167, - 0xAD71: 1168, - 0xAD73: 1169, - 0xAD75: 1170, - 0xAD76: 1171, - 0xAD77: 1172, - 0xAD81: 1173, - 0xAD85: 1174, - 0xAD89: 1175, - 0xAD97: 1176, - 0xADA1: 1177, - 0xADA2: 1178, - 0xADA3: 1179, - 0xADA5: 1180, - 0xADA9: 1181, - 0xADAB: 1182, - 0xADB1: 1183, - 0xADB3: 1184, - 0xADB5: 1185, - 0xADB7: 1186, - 0xADBB: 1187, - 0xADC1: 1188, - 0xADC2: 1189, - 0xADC5: 1190, - 0xADC9: 1191, - 0xADD7: 1192, - 0xADE1: 1193, - 0xADE5: 1194, - 0xADE9: 1195, - 0xADF1: 1196, - 0xADF5: 1197, - 0xADF6: 1198, - 0xAE41: 1199, - 0xAE45: 1200, - 0xAE49: 1201, - 0xAE51: 1202, - 0xAE53: 1203, - 0xAE55: 1204, - 0xAE61: 1205, - 0xAE62: 1206, - 0xAE65: 1207, - 0xAE69: 1208, - 0xAE71: 1209, - 0xAE73: 1210, - 0xAE75: 1211, - 0xAE77: 1212, - 0xAE81: 1213, - 0xAE82: 1214, - 0xAE85: 1215, - 0xAE88: 1216, - 0xAE89: 1217, - 0xAE91: 1218, - 0xAE93: 1219, - 0xAE95: 1220, - 0xAE97: 1221, - 0xAE99: 1222, - 0xAE9B: 1223, - 0xAE9C: 1224, - 0xAEA1: 1225, - 0xAEB6: 1226, - 0xAEC1: 1227, - 0xAEC2: 1228, - 0xAEC5: 1229, - 0xAEC9: 1230, - 0xAED1: 1231, - 0xAED7: 1232, - 0xAEE1: 1233, - 0xAEE2: 1234, - 0xAEE5: 1235, - 0xAEE9: 1236, - 0xAEF1: 1237, - 0xAEF3: 1238, - 0xAEF5: 1239, - 0xAEF7: 1240, - 0xAF41: 1241, - 0xAF42: 1242, - 0xAF49: 1243, - 0xAF51: 1244, - 0xAF55: 1245, - 0xAF57: 1246, - 0xAF61: 1247, - 0xAF62: 1248, - 0xAF65: 1249, - 0xAF69: 1250, - 0xAF6A: 1251, - 0xAF71: 1252, - 0xAF73: 1253, - 0xAF75: 1254, - 0xAF77: 1255, - 0xAFA1: 1256, - 0xAFA2: 1257, - 0xAFA5: 1258, - 0xAFA8: 1259, - 0xAFA9: 1260, - 0xAFB0: 1261, - 0xAFB1: 1262, - 0xAFB3: 1263, - 0xAFB5: 1264, - 0xAFB7: 1265, - 0xAFBC: 1266, - 0xB061: 1267, - 0xB062: 1268, - 0xB064: 1269, - 0xB065: 1270, - 0xB069: 1271, - 0xB071: 1272, - 0xB073: 1273, - 0xB076: 1274, - 0xB077: 1275, - 0xB07D: 1276, - 0xB081: 1277, - 0xB082: 1278, - 0xB085: 1279, - 0xB089: 1280, - 0xB091: 1281, - 0xB093: 1282, - 0xB096: 1283, - 0xB097: 1284, - 0xB0B7: 1285, - 0xB0E1: 1286, - 0xB0E2: 1287, - 0xB0E5: 1288, - 0xB0E9: 1289, - 0xB0EB: 1290, - 0xB0F1: 1291, - 0xB0F3: 1292, - 0xB0F6: 1293, - 0xB0F7: 1294, - 0xB141: 1295, - 0xB145: 1296, - 0xB149: 1297, - 0xB185: 1298, - 0xB1A1: 1299, - 0xB1A2: 1300, - 0xB1A5: 1301, - 0xB1A8: 1302, - 0xB1A9: 1303, - 0xB1AB: 1304, - 0xB1B1: 1305, - 0xB1B3: 1306, - 0xB1B7: 1307, - 0xB1C1: 1308, - 0xB1C2: 1309, - 0xB1C5: 1310, - 0xB1D6: 1311, - 0xB1E1: 1312, - 0xB1F6: 1313, - 0xB241: 1314, - 0xB245: 1315, - 0xB249: 1316, - 0xB251: 1317, - 0xB253: 1318, - 0xB261: 1319, - 0xB281: 1320, - 0xB282: 1321, - 0xB285: 1322, - 0xB289: 1323, - 0xB291: 1324, - 0xB293: 1325, - 0xB297: 1326, - 0xB2A1: 1327, - 0xB2B6: 1328, - 0xB2C1: 1329, - 0xB2E1: 1330, - 0xB2E5: 1331, - 0xB357: 1332, - 0xB361: 1333, - 0xB362: 1334, - 0xB365: 1335, - 0xB369: 1336, - 0xB36B: 1337, - 0xB370: 1338, - 0xB371: 1339, - 0xB373: 1340, - 0xB381: 1341, - 0xB385: 1342, - 0xB389: 1343, - 0xB391: 1344, - 0xB3A1: 1345, - 0xB3A2: 1346, - 0xB3A5: 1347, - 0xB3A9: 1348, - 0xB3B1: 1349, - 0xB3B3: 1350, - 0xB3B5: 1351, - 0xB3B7: 1352, - 0xB461: 1353, - 0xB462: 1354, - 0xB465: 1355, - 0xB466: 1356, - 0xB467: 1357, - 0xB469: 1358, - 0xB46A: 1359, - 0xB46B: 1360, - 0xB470: 1361, - 0xB471: 1362, - 0xB473: 1363, - 0xB475: 1364, - 0xB476: 1365, - 0xB477: 1366, - 0xB47B: 1367, - 0xB47C: 1368, - 0xB481: 1369, - 0xB482: 1370, - 0xB485: 1371, - 0xB489: 1372, - 0xB491: 1373, - 0xB493: 1374, - 0xB495: 1375, - 0xB496: 1376, - 0xB497: 1377, - 0xB4A1: 1378, - 0xB4A2: 1379, - 0xB4A5: 1380, - 0xB4A9: 1381, - 0xB4AC: 1382, - 0xB4B1: 1383, - 0xB4B3: 1384, - 0xB4B5: 1385, - 0xB4B7: 1386, - 0xB4BB: 1387, - 0xB4BD: 1388, - 0xB4C1: 1389, - 0xB4C5: 1390, - 0xB4C9: 1391, - 0xB4D3: 1392, - 0xB4E1: 1393, - 0xB4E2: 1394, - 0xB4E5: 1395, - 0xB4E6: 1396, - 0xB4E8: 1397, - 0xB4E9: 1398, - 0xB4EA: 1399, - 0xB4EB: 1400, - 0xB4F1: 1401, - 0xB4F3: 1402, - 0xB4F4: 1403, - 0xB4F5: 1404, - 0xB4F6: 1405, - 0xB4F7: 1406, - 0xB4F8: 1407, - 0xB4FA: 1408, - 0xB4FC: 1409, - 0xB541: 1410, - 0xB542: 1411, - 0xB545: 1412, - 0xB549: 1413, - 0xB551: 1414, - 0xB553: 1415, - 0xB555: 1416, - 0xB557: 1417, - 0xB561: 1418, - 0xB562: 1419, - 0xB563: 1420, - 0xB565: 1421, - 0xB569: 1422, - 0xB56B: 1423, - 0xB56C: 1424, - 0xB571: 1425, - 0xB573: 1426, - 0xB574: 1427, - 0xB575: 1428, - 0xB576: 1429, - 0xB577: 1430, - 0xB57B: 1431, - 0xB57C: 1432, - 0xB57D: 1433, - 0xB581: 1434, - 0xB585: 1435, - 0xB589: 1436, - 0xB591: 1437, - 0xB593: 1438, - 0xB595: 1439, - 0xB596: 1440, - 0xB5A1: 1441, - 0xB5A2: 1442, - 0xB5A5: 1443, - 0xB5A9: 1444, - 0xB5AA: 1445, - 0xB5AB: 1446, - 0xB5AD: 1447, - 0xB5B0: 1448, - 0xB5B1: 1449, - 0xB5B3: 1450, - 0xB5B5: 1451, - 0xB5B7: 1452, - 0xB5B9: 1453, - 0xB5C1: 1454, - 0xB5C2: 1455, - 0xB5C5: 1456, - 0xB5C9: 1457, - 0xB5D1: 1458, - 0xB5D3: 1459, - 0xB5D5: 1460, - 0xB5D6: 1461, - 0xB5D7: 1462, - 0xB5E1: 1463, - 0xB5E2: 1464, - 0xB5E5: 1465, - 0xB5F1: 1466, - 0xB5F5: 1467, - 0xB5F7: 1468, - 0xB641: 1469, - 0xB642: 1470, - 0xB645: 1471, - 0xB649: 1472, - 0xB651: 1473, - 0xB653: 1474, - 0xB655: 1475, - 0xB657: 1476, - 0xB661: 1477, - 0xB662: 1478, - 0xB665: 1479, - 0xB669: 1480, - 0xB671: 1481, - 0xB673: 1482, - 0xB675: 1483, - 0xB677: 1484, - 0xB681: 1485, - 0xB682: 1486, - 0xB685: 1487, - 0xB689: 1488, - 0xB68A: 1489, - 0xB68B: 1490, - 0xB691: 1491, - 0xB693: 1492, - 0xB695: 1493, - 0xB697: 1494, - 0xB6A1: 1495, - 0xB6A2: 1496, - 0xB6A5: 1497, - 0xB6A9: 1498, - 0xB6B1: 1499, - 0xB6B3: 1500, - 0xB6B6: 1501, - 0xB6B7: 1502, - 0xB6C1: 1503, - 0xB6C2: 1504, - 0xB6C5: 1505, - 0xB6C9: 1506, - 0xB6D1: 1507, - 0xB6D3: 1508, - 0xB6D7: 1509, - 0xB6E1: 1510, - 0xB6E2: 1511, - 0xB6E5: 1512, - 0xB6E9: 1513, - 0xB6F1: 1514, - 0xB6F3: 1515, - 0xB6F5: 1516, - 0xB6F7: 1517, - 0xB741: 1518, - 0xB742: 1519, - 0xB745: 1520, - 0xB749: 1521, - 0xB751: 1522, - 0xB753: 1523, - 0xB755: 1524, - 0xB757: 1525, - 0xB759: 1526, - 0xB761: 1527, - 0xB762: 1528, - 0xB765: 1529, - 0xB769: 1530, - 0xB76F: 1531, - 0xB771: 1532, - 0xB773: 1533, - 0xB775: 1534, - 0xB777: 1535, - 0xB778: 1536, - 0xB779: 1537, - 0xB77A: 1538, - 0xB77B: 1539, - 0xB77C: 1540, - 0xB77D: 1541, - 0xB781: 1542, - 0xB785: 1543, - 0xB789: 1544, - 0xB791: 1545, - 0xB795: 1546, - 0xB7A1: 1547, - 0xB7A2: 1548, - 0xB7A5: 1549, - 0xB7A9: 1550, - 0xB7AA: 1551, - 0xB7AB: 1552, - 0xB7B0: 1553, - 0xB7B1: 1554, - 0xB7B3: 1555, - 0xB7B5: 1556, - 0xB7B6: 1557, - 0xB7B7: 1558, - 0xB7B8: 1559, - 0xB7BC: 1560, - 0xB861: 1561, - 0xB862: 1562, - 0xB865: 1563, - 0xB867: 1564, - 0xB868: 1565, - 0xB869: 1566, - 0xB86B: 1567, - 0xB871: 1568, - 0xB873: 1569, - 0xB875: 1570, - 0xB876: 1571, - 0xB877: 1572, - 0xB878: 1573, - 0xB881: 1574, - 0xB882: 1575, - 0xB885: 1576, - 0xB889: 1577, - 0xB891: 1578, - 0xB893: 1579, - 0xB895: 1580, - 0xB896: 1581, - 0xB897: 1582, - 0xB8A1: 1583, - 0xB8A2: 1584, - 0xB8A5: 1585, - 0xB8A7: 1586, - 0xB8A9: 1587, - 0xB8B1: 1588, - 0xB8B7: 1589, - 0xB8C1: 1590, - 0xB8C5: 1591, - 0xB8C9: 1592, - 0xB8E1: 1593, - 0xB8E2: 1594, - 0xB8E5: 1595, - 0xB8E9: 1596, - 0xB8EB: 1597, - 0xB8F1: 1598, - 0xB8F3: 1599, - 0xB8F5: 1600, - 0xB8F7: 1601, - 0xB8F8: 1602, - 0xB941: 1603, - 0xB942: 1604, - 0xB945: 1605, - 0xB949: 1606, - 0xB951: 1607, - 0xB953: 1608, - 0xB955: 1609, - 0xB957: 1610, - 0xB961: 1611, - 0xB965: 1612, - 0xB969: 1613, - 0xB971: 1614, - 0xB973: 1615, - 0xB976: 1616, - 0xB977: 1617, - 0xB981: 1618, - 0xB9A1: 1619, - 0xB9A2: 1620, - 0xB9A5: 1621, - 0xB9A9: 1622, - 0xB9AB: 1623, - 0xB9B1: 1624, - 0xB9B3: 1625, - 0xB9B5: 1626, - 0xB9B7: 1627, - 0xB9B8: 1628, - 0xB9B9: 1629, - 0xB9BD: 1630, - 0xB9C1: 1631, - 0xB9C2: 1632, - 0xB9C9: 1633, - 0xB9D3: 1634, - 0xB9D5: 1635, - 0xB9D7: 1636, - 0xB9E1: 1637, - 0xB9F6: 1638, - 0xB9F7: 1639, - 0xBA41: 1640, - 0xBA45: 1641, - 0xBA49: 1642, - 0xBA51: 1643, - 0xBA53: 1644, - 0xBA55: 1645, - 0xBA57: 1646, - 0xBA61: 1647, - 0xBA62: 1648, - 0xBA65: 1649, - 0xBA77: 1650, - 0xBA81: 1651, - 0xBA82: 1652, - 0xBA85: 1653, - 0xBA89: 1654, - 0xBA8A: 1655, - 0xBA8B: 1656, - 0xBA91: 1657, - 0xBA93: 1658, - 0xBA95: 1659, - 0xBA97: 1660, - 0xBAA1: 1661, - 0xBAB6: 1662, - 0xBAC1: 1663, - 0xBAE1: 1664, - 0xBAE2: 1665, - 0xBAE5: 1666, - 0xBAE9: 1667, - 0xBAF1: 1668, - 0xBAF3: 1669, - 0xBAF5: 1670, - 0xBB41: 1671, - 0xBB45: 1672, - 0xBB49: 1673, - 0xBB51: 1674, - 0xBB61: 1675, - 0xBB62: 1676, - 0xBB65: 1677, - 0xBB69: 1678, - 0xBB71: 1679, - 0xBB73: 1680, - 0xBB75: 1681, - 0xBB77: 1682, - 0xBBA1: 1683, - 0xBBA2: 1684, - 0xBBA5: 1685, - 0xBBA8: 1686, - 0xBBA9: 1687, - 0xBBAB: 1688, - 0xBBB1: 1689, - 0xBBB3: 1690, - 0xBBB5: 1691, - 0xBBB7: 1692, - 0xBBB8: 1693, - 0xBBBB: 1694, - 0xBBBC: 1695, - 0xBC61: 1696, - 0xBC62: 1697, - 0xBC65: 1698, - 0xBC67: 1699, - 0xBC69: 1700, - 0xBC6C: 1701, - 0xBC71: 1702, - 0xBC73: 1703, - 0xBC75: 1704, - 0xBC76: 1705, - 0xBC77: 1706, - 0xBC81: 1707, - 0xBC82: 1708, - 0xBC85: 1709, - 0xBC89: 1710, - 0xBC91: 1711, - 0xBC93: 1712, - 0xBC95: 1713, - 0xBC96: 1714, - 0xBC97: 1715, - 0xBCA1: 1716, - 0xBCA5: 1717, - 0xBCB7: 1718, - 0xBCE1: 1719, - 0xBCE2: 1720, - 0xBCE5: 1721, - 0xBCE9: 1722, - 0xBCF1: 1723, - 0xBCF3: 1724, - 0xBCF5: 1725, - 0xBCF6: 1726, - 0xBCF7: 1727, - 0xBD41: 1728, - 0xBD57: 1729, - 0xBD61: 1730, - 0xBD76: 1731, - 0xBDA1: 1732, - 0xBDA2: 1733, - 0xBDA5: 1734, - 0xBDA9: 1735, - 0xBDB1: 1736, - 0xBDB3: 1737, - 0xBDB5: 1738, - 0xBDB7: 1739, - 0xBDB9: 1740, - 0xBDC1: 1741, - 0xBDC2: 1742, - 0xBDC9: 1743, - 0xBDD6: 1744, - 0xBDE1: 1745, - 0xBDF6: 1746, - 0xBE41: 1747, - 0xBE45: 1748, - 0xBE49: 1749, - 0xBE51: 1750, - 0xBE53: 1751, - 0xBE77: 1752, - 0xBE81: 1753, - 0xBE82: 1754, - 0xBE85: 1755, - 0xBE89: 1756, - 0xBE91: 1757, - 0xBE93: 1758, - 0xBE97: 1759, - 0xBEA1: 1760, - 0xBEB6: 1761, - 0xBEB7: 1762, - 0xBEE1: 1763, - 0xBF41: 1764, - 0xBF61: 1765, - 0xBF71: 1766, - 0xBF75: 1767, - 0xBF77: 1768, - 0xBFA1: 1769, - 0xBFA2: 1770, - 0xBFA5: 1771, - 0xBFA9: 1772, - 0xBFB1: 1773, - 0xBFB3: 1774, - 0xBFB7: 1775, - 0xBFB8: 1776, - 0xBFBD: 1777, - 0xC061: 1778, - 0xC062: 1779, - 0xC065: 1780, - 0xC067: 1781, - 0xC069: 1782, - 0xC071: 1783, - 0xC073: 1784, - 0xC075: 1785, - 0xC076: 1786, - 0xC077: 1787, - 0xC078: 1788, - 0xC081: 1789, - 0xC082: 1790, - 0xC085: 1791, - 0xC089: 1792, - 0xC091: 1793, - 0xC093: 1794, - 0xC095: 1795, - 0xC096: 1796, - 0xC097: 1797, - 0xC0A1: 1798, - 0xC0A5: 1799, - 0xC0A7: 1800, - 0xC0A9: 1801, - 0xC0B1: 1802, - 0xC0B7: 1803, - 0xC0E1: 1804, - 0xC0E2: 1805, - 0xC0E5: 1806, - 0xC0E9: 1807, - 0xC0F1: 1808, - 0xC0F3: 1809, - 0xC0F5: 1810, - 0xC0F6: 1811, - 0xC0F7: 1812, - 0xC141: 1813, - 0xC142: 1814, - 0xC145: 1815, - 0xC149: 1816, - 0xC151: 1817, - 0xC153: 1818, - 0xC155: 1819, - 0xC157: 1820, - 0xC161: 1821, - 0xC165: 1822, - 0xC176: 1823, - 0xC181: 1824, - 0xC185: 1825, - 0xC197: 1826, - 0xC1A1: 1827, - 0xC1A2: 1828, - 0xC1A5: 1829, - 0xC1A9: 1830, - 0xC1B1: 1831, - 0xC1B3: 1832, - 0xC1B5: 1833, - 0xC1B7: 1834, - 0xC1C1: 1835, - 0xC1C5: 1836, - 0xC1C9: 1837, - 0xC1D7: 1838, - 0xC241: 1839, - 0xC245: 1840, - 0xC249: 1841, - 0xC251: 1842, - 0xC253: 1843, - 0xC255: 1844, - 0xC257: 1845, - 0xC261: 1846, - 0xC271: 1847, - 0xC281: 1848, - 0xC282: 1849, - 0xC285: 1850, - 0xC289: 1851, - 0xC291: 1852, - 0xC293: 1853, - 0xC295: 1854, - 0xC297: 1855, - 0xC2A1: 1856, - 0xC2B6: 1857, - 0xC2C1: 1858, - 0xC2C5: 1859, - 0xC2E1: 1860, - 0xC2E5: 1861, - 0xC2E9: 1862, - 0xC2F1: 1863, - 0xC2F3: 1864, - 0xC2F5: 1865, - 0xC2F7: 1866, - 0xC341: 1867, - 0xC345: 1868, - 0xC349: 1869, - 0xC351: 1870, - 0xC357: 1871, - 0xC361: 1872, - 0xC362: 1873, - 0xC365: 1874, - 0xC369: 1875, - 0xC371: 1876, - 0xC373: 1877, - 0xC375: 1878, - 0xC377: 1879, - 0xC3A1: 1880, - 0xC3A2: 1881, - 0xC3A5: 1882, - 0xC3A8: 1883, - 0xC3A9: 1884, - 0xC3AA: 1885, - 0xC3B1: 1886, - 0xC3B3: 1887, - 0xC3B5: 1888, - 0xC3B7: 1889, - 0xC461: 1890, - 0xC462: 1891, - 0xC465: 1892, - 0xC469: 1893, - 0xC471: 1894, - 0xC473: 1895, - 0xC475: 1896, - 0xC477: 1897, - 0xC481: 1898, - 0xC482: 1899, - 0xC485: 1900, - 0xC489: 1901, - 0xC491: 1902, - 0xC493: 1903, - 0xC495: 1904, - 0xC496: 1905, - 0xC497: 1906, - 0xC4A1: 1907, - 0xC4A2: 1908, - 0xC4B7: 1909, - 0xC4E1: 1910, - 0xC4E2: 1911, - 0xC4E5: 1912, - 0xC4E8: 1913, - 0xC4E9: 1914, - 0xC4F1: 1915, - 0xC4F3: 1916, - 0xC4F5: 1917, - 0xC4F6: 1918, - 0xC4F7: 1919, - 0xC541: 1920, - 0xC542: 1921, - 0xC545: 1922, - 0xC549: 1923, - 0xC551: 1924, - 0xC553: 1925, - 0xC555: 1926, - 0xC557: 1927, - 0xC561: 1928, - 0xC565: 1929, - 0xC569: 1930, - 0xC571: 1931, - 0xC573: 1932, - 0xC575: 1933, - 0xC576: 1934, - 0xC577: 1935, - 0xC581: 1936, - 0xC5A1: 1937, - 0xC5A2: 1938, - 0xC5A5: 1939, - 0xC5A9: 1940, - 0xC5B1: 1941, - 0xC5B3: 1942, - 0xC5B5: 1943, - 0xC5B7: 1944, - 0xC5C1: 1945, - 0xC5C2: 1946, - 0xC5C5: 1947, - 0xC5C9: 1948, - 0xC5D1: 1949, - 0xC5D7: 1950, - 0xC5E1: 1951, - 0xC5F7: 1952, - 0xC641: 1953, - 0xC649: 1954, - 0xC661: 1955, - 0xC681: 1956, - 0xC682: 1957, - 0xC685: 1958, - 0xC689: 1959, - 0xC691: 1960, - 0xC693: 1961, - 0xC695: 1962, - 0xC697: 1963, - 0xC6A1: 1964, - 0xC6A5: 1965, - 0xC6A9: 1966, - 0xC6B7: 1967, - 0xC6C1: 1968, - 0xC6D7: 1969, - 0xC6E1: 1970, - 0xC6E2: 1971, - 0xC6E5: 1972, - 0xC6E9: 1973, - 0xC6F1: 1974, - 0xC6F3: 1975, - 0xC6F5: 1976, - 0xC6F7: 1977, - 0xC741: 1978, - 0xC745: 1979, - 0xC749: 1980, - 0xC751: 1981, - 0xC761: 1982, - 0xC762: 1983, - 0xC765: 1984, - 0xC769: 1985, - 0xC771: 1986, - 0xC773: 1987, - 0xC777: 1988, - 0xC7A1: 1989, - 0xC7A2: 1990, - 0xC7A5: 1991, - 0xC7A9: 1992, - 0xC7B1: 1993, - 0xC7B3: 1994, - 0xC7B5: 1995, - 0xC7B7: 1996, - 0xC861: 1997, - 0xC862: 1998, - 0xC865: 1999, - 0xC869: 2000, - 0xC86A: 2001, - 0xC871: 2002, - 0xC873: 2003, - 0xC875: 2004, - 0xC876: 2005, - 0xC877: 2006, - 0xC881: 2007, - 0xC882: 2008, - 0xC885: 2009, - 0xC889: 2010, - 0xC891: 2011, - 0xC893: 2012, - 0xC895: 2013, - 0xC896: 2014, - 0xC897: 2015, - 0xC8A1: 2016, - 0xC8B7: 2017, - 0xC8E1: 2018, - 0xC8E2: 2019, - 0xC8E5: 2020, - 0xC8E9: 2021, - 0xC8EB: 2022, - 0xC8F1: 2023, - 0xC8F3: 2024, - 0xC8F5: 2025, - 0xC8F6: 2026, - 0xC8F7: 2027, - 0xC941: 2028, - 0xC942: 2029, - 0xC945: 2030, - 0xC949: 2031, - 0xC951: 2032, - 0xC953: 2033, - 0xC955: 2034, - 0xC957: 2035, - 0xC961: 2036, - 0xC965: 2037, - 0xC976: 2038, - 0xC981: 2039, - 0xC985: 2040, - 0xC9A1: 2041, - 0xC9A2: 2042, - 0xC9A5: 2043, - 0xC9A9: 2044, - 0xC9B1: 2045, - 0xC9B3: 2046, - 0xC9B5: 2047, - 0xC9B7: 2048, - 0xC9BC: 2049, - 0xC9C1: 2050, - 0xC9C5: 2051, - 0xC9E1: 2052, - 0xCA41: 2053, - 0xCA45: 2054, - 0xCA55: 2055, - 0xCA57: 2056, - 0xCA61: 2057, - 0xCA81: 2058, - 0xCA82: 2059, - 0xCA85: 2060, - 0xCA89: 2061, - 0xCA91: 2062, - 0xCA93: 2063, - 0xCA95: 2064, - 0xCA97: 2065, - 0xCAA1: 2066, - 0xCAB6: 2067, - 0xCAC1: 2068, - 0xCAE1: 2069, - 0xCAE2: 2070, - 0xCAE5: 2071, - 0xCAE9: 2072, - 0xCAF1: 2073, - 0xCAF3: 2074, - 0xCAF7: 2075, - 0xCB41: 2076, - 0xCB45: 2077, - 0xCB49: 2078, - 0xCB51: 2079, - 0xCB57: 2080, - 0xCB61: 2081, - 0xCB62: 2082, - 0xCB65: 2083, - 0xCB68: 2084, - 0xCB69: 2085, - 0xCB6B: 2086, - 0xCB71: 2087, - 0xCB73: 2088, - 0xCB75: 2089, - 0xCB81: 2090, - 0xCB85: 2091, - 0xCB89: 2092, - 0xCB91: 2093, - 0xCB93: 2094, - 0xCBA1: 2095, - 0xCBA2: 2096, - 0xCBA5: 2097, - 0xCBA9: 2098, - 0xCBB1: 2099, - 0xCBB3: 2100, - 0xCBB5: 2101, - 0xCBB7: 2102, - 0xCC61: 2103, - 0xCC62: 2104, - 0xCC63: 2105, - 0xCC65: 2106, - 0xCC69: 2107, - 0xCC6B: 2108, - 0xCC71: 2109, - 0xCC73: 2110, - 0xCC75: 2111, - 0xCC76: 2112, - 0xCC77: 2113, - 0xCC7B: 2114, - 0xCC81: 2115, - 0xCC82: 2116, - 0xCC85: 2117, - 0xCC89: 2118, - 0xCC91: 2119, - 0xCC93: 2120, - 0xCC95: 2121, - 0xCC96: 2122, - 0xCC97: 2123, - 0xCCA1: 2124, - 0xCCA2: 2125, - 0xCCE1: 2126, - 0xCCE2: 2127, - 0xCCE5: 2128, - 0xCCE9: 2129, - 0xCCF1: 2130, - 0xCCF3: 2131, - 0xCCF5: 2132, - 0xCCF6: 2133, - 0xCCF7: 2134, - 0xCD41: 2135, - 0xCD42: 2136, - 0xCD45: 2137, - 0xCD49: 2138, - 0xCD51: 2139, - 0xCD53: 2140, - 0xCD55: 2141, - 0xCD57: 2142, - 0xCD61: 2143, - 0xCD65: 2144, - 0xCD69: 2145, - 0xCD71: 2146, - 0xCD73: 2147, - 0xCD76: 2148, - 0xCD77: 2149, - 0xCD81: 2150, - 0xCD89: 2151, - 0xCD93: 2152, - 0xCD95: 2153, - 0xCDA1: 2154, - 0xCDA2: 2155, - 0xCDA5: 2156, - 0xCDA9: 2157, - 0xCDB1: 2158, - 0xCDB3: 2159, - 0xCDB5: 2160, - 0xCDB7: 2161, - 0xCDC1: 2162, - 0xCDD7: 2163, - 0xCE41: 2164, - 0xCE45: 2165, - 0xCE61: 2166, - 0xCE65: 2167, - 0xCE69: 2168, - 0xCE73: 2169, - 0xCE75: 2170, - 0xCE81: 2171, - 0xCE82: 2172, - 0xCE85: 2173, - 0xCE88: 2174, - 0xCE89: 2175, - 0xCE8B: 2176, - 0xCE91: 2177, - 0xCE93: 2178, - 0xCE95: 2179, - 0xCE97: 2180, - 0xCEA1: 2181, - 0xCEB7: 2182, - 0xCEE1: 2183, - 0xCEE5: 2184, - 0xCEE9: 2185, - 0xCEF1: 2186, - 0xCEF5: 2187, - 0xCF41: 2188, - 0xCF45: 2189, - 0xCF49: 2190, - 0xCF51: 2191, - 0xCF55: 2192, - 0xCF57: 2193, - 0xCF61: 2194, - 0xCF65: 2195, - 0xCF69: 2196, - 0xCF71: 2197, - 0xCF73: 2198, - 0xCF75: 2199, - 0xCFA1: 2200, - 0xCFA2: 2201, - 0xCFA5: 2202, - 0xCFA9: 2203, - 0xCFB1: 2204, - 0xCFB3: 2205, - 0xCFB5: 2206, - 0xCFB7: 2207, - 0xD061: 2208, - 0xD062: 2209, - 0xD065: 2210, - 0xD069: 2211, - 0xD06E: 2212, - 0xD071: 2213, - 0xD073: 2214, - 0xD075: 2215, - 0xD077: 2216, - 0xD081: 2217, - 0xD082: 2218, - 0xD085: 2219, - 0xD089: 2220, - 0xD091: 2221, - 0xD093: 2222, - 0xD095: 2223, - 0xD096: 2224, - 0xD097: 2225, - 0xD0A1: 2226, - 0xD0B7: 2227, - 0xD0E1: 2228, - 0xD0E2: 2229, - 0xD0E5: 2230, - 0xD0E9: 2231, - 0xD0EB: 2232, - 0xD0F1: 2233, - 0xD0F3: 2234, - 0xD0F5: 2235, - 0xD0F7: 2236, - 0xD141: 2237, - 0xD142: 2238, - 0xD145: 2239, - 0xD149: 2240, - 0xD151: 2241, - 0xD153: 2242, - 0xD155: 2243, - 0xD157: 2244, - 0xD161: 2245, - 0xD162: 2246, - 0xD165: 2247, - 0xD169: 2248, - 0xD171: 2249, - 0xD173: 2250, - 0xD175: 2251, - 0xD176: 2252, - 0xD177: 2253, - 0xD181: 2254, - 0xD185: 2255, - 0xD189: 2256, - 0xD193: 2257, - 0xD1A1: 2258, - 0xD1A2: 2259, - 0xD1A5: 2260, - 0xD1A9: 2261, - 0xD1AE: 2262, - 0xD1B1: 2263, - 0xD1B3: 2264, - 0xD1B5: 2265, - 0xD1B7: 2266, - 0xD1BB: 2267, - 0xD1C1: 2268, - 0xD1C2: 2269, - 0xD1C5: 2270, - 0xD1C9: 2271, - 0xD1D5: 2272, - 0xD1D7: 2273, - 0xD1E1: 2274, - 0xD1E2: 2275, - 0xD1E5: 2276, - 0xD1F5: 2277, - 0xD1F7: 2278, - 0xD241: 2279, - 0xD242: 2280, - 0xD245: 2281, - 0xD249: 2282, - 0xD253: 2283, - 0xD255: 2284, - 0xD257: 2285, - 0xD261: 2286, - 0xD265: 2287, - 0xD269: 2288, - 0xD273: 2289, - 0xD275: 2290, - 0xD281: 2291, - 0xD282: 2292, - 0xD285: 2293, - 0xD289: 2294, - 0xD28E: 2295, - 0xD291: 2296, - 0xD295: 2297, - 0xD297: 2298, - 0xD2A1: 2299, - 0xD2A5: 2300, - 0xD2A9: 2301, - 0xD2B1: 2302, - 0xD2B7: 2303, - 0xD2C1: 2304, - 0xD2C2: 2305, - 0xD2C5: 2306, - 0xD2C9: 2307, - 0xD2D7: 2308, - 0xD2E1: 2309, - 0xD2E2: 2310, - 0xD2E5: 2311, - 0xD2E9: 2312, - 0xD2F1: 2313, - 0xD2F3: 2314, - 0xD2F5: 2315, - 0xD2F7: 2316, - 0xD341: 2317, - 0xD342: 2318, - 0xD345: 2319, - 0xD349: 2320, - 0xD351: 2321, - 0xD355: 2322, - 0xD357: 2323, - 0xD361: 2324, - 0xD362: 2325, - 0xD365: 2326, - 0xD367: 2327, - 0xD368: 2328, - 0xD369: 2329, - 0xD36A: 2330, - 0xD371: 2331, - 0xD373: 2332, - 0xD375: 2333, - 0xD377: 2334, - 0xD37B: 2335, - 0xD381: 2336, - 0xD385: 2337, - 0xD389: 2338, - 0xD391: 2339, - 0xD393: 2340, - 0xD397: 2341, - 0xD3A1: 2342, - 0xD3A2: 2343, - 0xD3A5: 2344, - 0xD3A9: 2345, - 0xD3B1: 2346, - 0xD3B3: 2347, - 0xD3B5: 2348, - 0xD3B7: 2349, -} diff --git a/spaces/Atualli/yoloxTeste/yoloxdetect2/configs/yolox_s.py b/spaces/Atualli/yoloxTeste/yoloxdetect2/configs/yolox_s.py deleted file mode 100644 index abb6a8bbbe4fd1c6aff71596621aaeec2a6a15d8..0000000000000000000000000000000000000000 --- a/spaces/Atualli/yoloxTeste/yoloxdetect2/configs/yolox_s.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -# Copyright (c) Megvii, Inc. and its affiliates. - -import os - -from yolox.exp import Exp as MyExp - - -class Exp(MyExp): - def __init__(self): - super(Exp, self).__init__() - self.depth = 0.33 - self.width = 0.50 - self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] diff --git a/spaces/Banbri/zcvzcv/src/components/ui/popover.tsx b/spaces/Banbri/zcvzcv/src/components/ui/popover.tsx deleted file mode 100644 index 8b35ce6d7b0dd78003308b09354e9f7197eb161a..0000000000000000000000000000000000000000 --- a/spaces/Banbri/zcvzcv/src/components/ui/popover.tsx +++ /dev/null @@ -1,31 +0,0 @@ -"use client" - -import * as React from "react" -import * as PopoverPrimitive from "@radix-ui/react-popover" - -import { cn } from "@/lib/utils" - -const Popover = PopoverPrimitive.Root - -const PopoverTrigger = PopoverPrimitive.Trigger - -const PopoverContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, align = "center", sideOffset = 4, ...props }, ref) => ( - - - -)) -PopoverContent.displayName = PopoverPrimitive.Content.displayName - -export { Popover, PopoverTrigger, PopoverContent } diff --git a/spaces/Benson/text-generation/Examples/30 Segundos Tamil Whatsapp Estado Vdeo Descarga 2018 Hdvd9.md b/spaces/Benson/text-generation/Examples/30 Segundos Tamil Whatsapp Estado Vdeo Descarga 2018 Hdvd9.md deleted file mode 100644 index d95be03b2940548363795e9d00052844bc8e49cc..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/30 Segundos Tamil Whatsapp Estado Vdeo Descarga 2018 Hdvd9.md +++ /dev/null @@ -1,151 +0,0 @@ -
-

Películas móviles hindi MP4 Descargar 2018

-

Si usted es un fan de las películas de Bollywood, es posible que esté interesado en descargar algunas de las últimas películas en hindi en 2018. Pero, ¿cómo puede hacer eso sin comprometer la calidad y la seguridad de su dispositivo? ¿Y cuáles son las mejores películas en hindi en 2018 que no debes perderte? En este artículo, responderemos a estas preguntas y le proporcionaremos información útil sobre cómo descargar películas móviles Hindi MP4 en 2018.

-

30 segundos tamil whatsapp estado vídeo descarga 2018 hdvd9


Download File ✏ ✏ ✏ https://bltlly.com/2v6KQF



-

¿Por qué descargar películas móviles hindi MP4 en 2018?

-

La popularidad y la demanda de las películas en hindi en 2018

-

Las películas hindi, también conocidas como películas de Bollywood, son una de las formas más populares e influyentes de entretenimiento en la India y en el extranjero. Según un informe de Deloitte, la industria cinematográfica india generó unos ingresos de 2.600 millones de dólares en 2017, y se espera que crezca a una tasa de crecimiento anual compuesta (TCAC) del 11% hasta 2020. En 2018, Bollywood produjo más de 200 películas, cubriendo varios géneros como comedia, drama, romance, acción, thriller, horror, biopic y más. Algunas de estas películas fueron aclamadas por la crítica y comercialmente exitosas, rompiendo récords en la taquilla nacional e internacional. Por ejemplo, Sanju, una película biográfica sobre el controvertido actor Sanjay Dutt, se convirtió en la película india más taquillera de 2018, ganando más de 90 millones de dólares en todo el mundo. Del mismo modo, Padmaavat, un drama histórico basado en un poema del siglo XVI, se convirtió en la primera película india en cruzar 50 millones de dólares en mercados extranjeros. Estas películas no solo entretuvieron al público sino que también mostraron el talento y la diversidad del cine indio.

-

Los beneficios y ventajas de descargar el formato MP4 para dispositivos móviles

- -
    -
  • Es compatible con la mayoría de los dispositivos móviles, como teléfonos inteligentes, tabletas, portátiles, etc.
  • -
  • Tiene una alta relación de compresión, lo que significa que puede reducir el tamaño del archivo sin perder mucha calidad.
  • -
  • Tiene velocidad de transmisión rápida, lo que significa que puede jugar sin problemas sin almacenamiento en búfer o retraso.
  • -
  • Tiene alta calidad, lo que significa que puede ofrecer imágenes claras y nítidas y sonido.
  • -
-

Por lo tanto, descargar películas móviles Hindi MP4 en 2018 es una opción inteligente para cualquiera que quiera disfrutar de las películas de Bollywood en sus dispositivos móviles.

-

¿Cómo descargar películas móviles hindi MP4 en 2018?

-

Los mejores y más seguros sitios web para descargar películas en hindi en formato MP4

-

Hay muchos sitios web que ofrecen descargas gratuitas de películas en hindi en formato MP4. Sin embargo, no todos son confiables y seguros. Algunos pueden contener virus o malware que pueden dañar su dispositivo o robar su información personal. Algunos pueden tener enlaces rotos o descargas de baja calidad que pueden arruinar su experiencia de visualización. Algunos incluso pueden tener contenido ilegal o pirata que puede meterte en problemas con la ley. Por lo tanto, debe ser cuidadoso y selectivo al elegir un sitio web para descargar películas en hindi en formato MP4. Estos son algunos de los mejores y más seguros sitios web que recomendamos para descargar películas móviles Hindi MP4 en 2018:

- - -Sitio web -Características - - -MP4Moviez - - - -Andhadhun -8.2/10 -$56.9 millones - - -2 -Padmaavat -7/10 -$88.5 millones - - -4 -Raazi -7.8/10 -$38.4 millones - - -6 -Hichki -7.5/10 -$28.4 millones - - -8 -Sonu Ke Titu Ki Sweety -7.1/10 -$24.8 millones - - -9 -102 Not Out -7.4/10$17.3 millones - - -

Una breve reseña y resumen de cada película con su enlace de tráiler

-

Aquí hay algunos breves comentarios y resúmenes de cada película con sus enlaces de tráiler para su comodidad:

-
    -
  1. Andhadhun: Un thriller de comedia negra que gira en torno a un pianista ciego que se enreda en una serie de asesinatos después de presenciar un crimen. La película está llena de giros y vueltas que mantienen a los espectadores en el borde de sus asientos. La película también cuenta con actuaciones estelares de los actores principales, especialmente Ayushmann Khurrana, que ganó el Premio Nacional de Cine al Mejor Actor por su papel. La película fue elogiada por su originalidad, guion, dirección y música. Vea el tráiler aquí.
  2. -
  3. Badhaai Ho: Un drama cómico que explora la incomodidad y la hilaridad que sobreviene cuando una pareja de mediana edad anuncia inesperadamente su embarazo a sus hijos adultos y a la sociedad. La película es una toma refrescante y relacionable sobre el tema tabú del embarazo tardío y la planificación familiar. La película también cuenta con excelentes actuaciones del elenco, especialmente Neena Gupta y Gajraj Rao, que interpretaron el papel de la pareja embarazada con gracia y humor. La película fue elogiada por su guion, diálogos, dirección y mensaje. Vea el trailer here.
  4. - -
  5. Hichki: Un drama que sigue el viaje de Naina Mathur, una mujer que sufre de síndrome de Tourette y se convierte en maestra para un grupo de estudiantes desfavorecidos. La película es una historia inspiradora y conmovedora de superar desafíos y prejuicios y hacer una diferencia. La película también cuenta con una actuación notable de Rani Mukerji, que interpretó el papel de Naina con autenticidad y sensibilidad. La película fue aplaudida por su trama, dirección, actuación y mensaje. Vea el trailer here.
  6. -
  7. Parmanu: La historia de Pokhran: Un thriller que representa la verdadera historia de los ensayos nucleares realizados por la India en Pokhran en 1998, que hizo de la India una potencia nuclear. La película es un relato emocionante y patriótico de la operación encubierta que involucró altos riesgos y desafíos. La película también cuenta con una espléndida actuación de John Abraham, que interpretó el papel de Ashwat Raina, el líder del equipo de operación. La película fue apreciada por su guion, dirección, actuación y acción. Vea el trailer https://bltlly.com/2v6LdT



    -

    Un archivo APK es un paquete de aplicaciones Android

    -

    Un archivo APK es un archivo comprimido que contiene todos los archivos y datos necesarios para que una aplicación Android se ejecute en su dispositivo. Es similar a un archivo ejecutable (.exe) para Windows o un archivo de paquete (.pkg) para Mac. Un archivo APK se puede descargar de varias fuentes, como tiendas de aplicaciones oficiales, sitios web de terceros o enlaces directos.

    -

    Un juego de conducción en 3D es un juego de simulación que te permite conducir diferentes vehículos en entornos realistas

    -

    Un juego de conducción en 3D es un tipo de juego de simulación que utiliza gráficos en 3D y la física para crear experiencias de conducción realistas e inmersivas. Puede elegir entre diferentes vehículos, como automóviles, autobuses, camiones o tanques, y conducirlos en varios entornos, como ciudades, carreteras, desiertos o montañas. También puede seguir diferentes reglas y reglamentos, como semáforos, límites de velocidad o señales de estacionamiento, dependiendo del país en el que conduce. Un juego de conducción en 3D suele tener diferentes misiones y modos, como autoescuela, roaming gratuito, carreras o multijugador, que ponen a prueba tus habilidades de conducción y proporcionan diversión y desafío.

    -

    ¿Por qué debe descargar un juego de conducción 3D APK?

    - -

    Puedes aprender reglas y regulaciones de conducción en diferentes países

    -

    Si quieres aprender a conducir en diferentes países, como Corea, Japón, EE.UU., o Alemania, se puede descargar un juego de conducción 3D APK que ofrece esta característica. Usted puede aprender las diferencias en las leyes de tráfico, señales de tráfico, marcas de carril, y la etiqueta de conducción en cada país. Esto puede ayudarle a prepararse para su examen de conducir o licencia, o simplemente ampliar su conocimiento y conciencia de la cultura global de conducción.

    -

    Puede mejorar sus habilidades de conducción y la confianza

    -

    Si desea mejorar sus habilidades de conducción y confianza, puede descargar un juego de conducción 3D APK que ofrece esta característica. Puede practicar sus habilidades de conducción en varios escenarios, como estacionamiento, marcha atrás, giro, adelantamiento o frenado de emergencia. También puede ajustar el nivel de dificultad, las condiciones meteorológicas, la densidad de tráfico o la hora del día para satisfacer sus necesidades y preferencias. También puedes obtener comentarios y consejos del juego para ayudarte a mejorar tu rendimiento y evitar errores.

    -

    Puedes divertirte y desafiarte con diferentes misiones y modos

    -

    Si quieres divertirte y desafiarte con diferentes misiones y modos, puedes descargar un juego de conducción 3D APK que ofrece esta característica. Puedes elegir entre diferentes misiones, como autoescuela, roaming gratuito, carreras o multijugador, que tienen diferentes objetivos y recompensas. También puedes competir con otros jugadores en línea o fuera de línea, o cooperar con ellos en modos basados en equipos. También puedes ganar monedas y puntos que puedes usar para desbloquear nuevos vehículos, mejoras o logros.

    -

    Puedes personalizar tus vehículos y ajustes de acuerdo a tus preferencias

    - -

    ¿Cómo descargar e instalar un juego de conducción 3D APK?

    -

    Si usted está interesado en descargar e instalar un juego de conducción 3D APK en su dispositivo Android, es necesario seguir estos sencillos pasos:

    -

    Es necesario encontrar una fuente confiable y segura para descargar un juego de conducción 3D APK

    -

    Hay muchas fuentes para descargar un juego de conducción 3D APK en Internet, pero no todos son fiables y seguros. Algunos de ellos pueden contener virus, malware o spyware que pueden dañar su dispositivo o robar su información personal. Por lo tanto, es necesario tener cuidado y elegir una fuente confiable para descargar un juego de conducción 3D APK.

    -

    Puede utilizar el enlace proporcionado en este artículo para descargar 3D Driving Class, uno de los mejores juegos de conducción 3D disponibles para dispositivos Android

    -

    Una de las mejores fuentes para descargar un juego de conducción 3D APK es el enlace proporcionado en este artículo. Este enlace te llevará al sitio web oficial de 3D Driving Class, uno de los juegos de conducción 3D más populares y realistas para dispositivos Android. Este juego tiene más de 10 millones de descargas y 4.4 estrellas en Google Play Store. Ofrece todas las características y beneficios mencionados anteriormente, como aprender las reglas de conducción y las regulaciones en diferentes países, mejorar las habilidades de conducción y la confianza, divertirse y desafiarse con diferentes misiones y modos, y personalización de sus vehículos y ajustes según sus preferencias.

    -

    -

    Necesitas habilitar la instalación de fuentes desconocidas en tu dispositivo

    -

    Antes de que pueda instalar un juego de conducción 3D APK en su dispositivo, es necesario habilitar la instalación de fuentes desconocidas en su dispositivo. Esto se debe a que un archivo APK no es de una tienda de aplicaciones oficial, como Google Play Store o Amazon Appstore. Por lo tanto, debe permitir que su dispositivo instale aplicaciones de fuentes distintas de estas tiendas de aplicaciones.

    -

    Puede hacer esto yendo a la configuración del dispositivo, la seguridad y permitiendo fuentes desconocidas

    - -

    Después de haber descargado el archivo APK juego de conducción 3D y habilitado la instalación de fuentes desconocidas en el dispositivo, es necesario localizar el archivo APK descargado y toque en él para instalarlo. Puedes hacer esto siguiendo estos pasos:

    -

    Puede usar una aplicación de administrador de archivos o la carpeta de descargas de su dispositivo para encontrar el archivo APK

    -

    Para localizar el archivo APK descargado, puede usar una aplicación de administrador de archivos o la carpeta de descargas de su dispositivo. Una aplicación de administrador de archivos es una aplicación que le permite navegar, organizar y administrar los archivos y carpetas en su dispositivo. Puede descargar una aplicación de administrador de archivos desde Google Play Store o Amazon Appstore, como ES File Explorer, Administrador de archivos o Archivos de Google. Una carpeta de descargas es una carpeta predeterminada en su dispositivo donde se almacenan todos los archivos que descarga de Internet. Puedes acceder a tu carpeta de descargas yendo al cajón de aplicaciones o a la pantalla de inicio de tu dispositivo y tocando las descargas.

    -

    Una vez que haya encontrado el archivo APK del juego de conducción 3D, toque en él para iniciar el proceso de instalación. Puede ver una ventana emergente que le pide permiso para instalar la aplicación. Pulse Instalar o Siguiente para continuar. Espera a que termine la instalación y luego toca Abrir o Listo para iniciar o salir del juego.

    -

    Conclusión

    - -

    Preguntas frecuentes

    -

    Aquí están algunas de las preguntas más frecuentes sobre 3D juego de conducción APK:

    - -QuestionAnswer -¿Cuáles son los requisitos para descargar e instalar un juego de conducción 3D APK? Necesitas un dispositivo Android que tenga suficiente espacio de almacenamiento, memoria y batería para ejecutar el juego sin problemas. También necesitas una conexión a Internet para descargar el archivo APK y acceder a algunas de las funciones en línea del juego. -¿Es la descarga e instalación de un juego de conducción 3D APK legal y seguro? Descargar e instalar un juego de conducción 3D APK es legal y seguro, siempre y cuando utilice una fuente confiable, como el enlace proporcionado en este artículo. Sin embargo, debes tener cuidado con otras fuentes que puedan contener virus, malware o spyware que puedan dañar tu dispositivo o robar tu información personal. -¿Cómo puedo actualizar un juego de conducción 3D APK? Puede actualizar un juego de conducción 3D APK mediante la descarga e instalación de la última versión del archivo APK de la misma fuente que utilizó antes. También puedes buscar actualizaciones en la configuración del juego o notificaciones. -¿Cómo puedo desinstalar un juego de conducción 3D APK? Puede desinstalar un juego de conducción 3D APK yendo a la configuración del dispositivo, aplicaciones, y encontrar el juego que desea desinstalar. Toque en él y luego toque en Desinstalar o Quitar. Confirme su elección tocando en OK o Yes. -¿Cuáles son algunos de los mejores juegos de conducción 3D para dispositivos Android? Algunos de los mejores juegos de conducción 3D para dispositivos Android son 3D Driving Class, Driving School Sim, Car Parking Multijugador, Simulador de autobús: Ultimate, Truck Simulator: Europe 2, Tank Stars, and Extreme Car Driving Simulator. -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Aparcamiento De Coches Multijugador Mod Apk Datos 4.7 4.md b/spaces/Benson/text-generation/Examples/Aparcamiento De Coches Multijugador Mod Apk Datos 4.7 4.md deleted file mode 100644 index ec49f6555bf232924299fd0727038aeac622de12..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Aparcamiento De Coches Multijugador Mod Apk Datos 4.7 4.md +++ /dev/null @@ -1,83 +0,0 @@ - -

    Aparcamiento Multijugador Mod APK Data 4.7 4: Todo lo que necesitas saber

    -

    Si usted es un fan de los juegos de coches, es posible que haya oído hablar de Car Parking Multijugador, un juego de simulador de aparcamiento de mundo abierto popular para dispositivos Android. Desarrollado por olzhass, este juego te permite conducir, aparcar, correr o incluso evadir a la policía en un entorno realista con más de 100 coches para elegir y personalizar.

    -

    aparcamiento de coches multijugador mod apk datos 4.7 4


    Download Zip ★★★ https://bltlly.com/2v6Jp4



    -

    Pero ¿y si quieres disfrutar del juego sin limitaciones ni restricciones? ¿Qué pasa si quieres tener dinero y recursos ilimitados, desbloquear todos los coches y características, y eliminar anuncios del juego? Bueno, hay una manera de hacer eso, y se llama Aparcamiento de coches multijugador Mod APK Data 4.7 4.

    -

    En este artículo, te diremos todo lo que necesitas saber sobre esta versión modificada del juego, incluyendo sus características, cómo descargarlo e instalarlo, sus pros y contras, y algunas alternativas que puedes probar. Así que, sin más preámbulos, empecemos.

    -

    Características de aparcamiento multijugador Mod APK Data 4.7 4

    -

    Aparcamiento Multijugador Mod APK Data 4.7 4 es una versión modificada del juego original que le da acceso a todas las características premium y contenido de forma gratuita. Estas son algunas de las características que puedes disfrutar con este mod:

    -
      -
    • Simulador de estacionamiento de mundo abierto con gráficos y física realistas: El juego te ofrece una experiencia de conducción realista con gráficos y física de alta calidad. Puede explorar diferentes lugares, como ciudades, aeropuertos, desiertos, montañas, etc., y aparcar su coche en varios escenarios.
    • -
    • Más de 100 coches para elegir y personalizar: El juego tiene una gran colección de coches de diferentes categorías, como clásicos, coches deportivos, camiones, SUV, etc. También puede personalizar su coche con diferentes colores de pintura, vinilos, calcomanías, piezas, etc.
    • - -
    • Modo multijugador en línea para competir o cooperar con otros jugadores:El juego también tiene un modo multijugador en línea, donde puede unirse o crear una habitación y jugar con otros jugadores de todo el mundo. Puede competir con ellos en el modo de carreras o de estacionamiento, o cooperar con ellos en el modo de policía o en el modo libre. También puedes chatear con ellos, intercambiar coches y hacer amigos.

      -
    • Modo de policía para experimentar emocionantes persecuciones y escapes: El juego también tiene un modo de policía, donde se puede jugar como un policía o un criminal. Si juegas como policía, tienes que perseguir y atrapar a los criminales que están infringiendo la ley. Si juegas como un criminal, tienes que evadir a los policías y escapar de ellos. También puedes usar diferentes armas y gadgets, como pistolas, granadas, picos, etc., para ayudarte en tu misión.
    • -
    • Interiores de coches reales, gasolineras, lavaderos de coches, y más: El juego también tiene interiores de coches realistas, donde se puede ver el salpicadero, volante, pedales, etc., de su coche. También puede interactuar con ellos, como encender las luces, tocar el claxon, abrir las puertas, etc. También puede visitar estaciones de servicio para llenar su tanque de combustible, lavaderos de autos para limpiar su automóvil y otros lugares para mejorar su juego.
    • -
    -

    Cómo descargar e instalar el aparcamiento de coches multijugador Mod APK Data 4.7 4

    -

    Si usted está interesado en descargar e instalar Car Parking Multijugador Mod APK Data 4.7 4 en su dispositivo Android, puede seguir estos sencillos pasos:

    -
      -
    1. Descargar el archivo APK de una fuente de confianza: El primer paso es descargar el archivo APK del mod de una fuente confiable. Puede utilizar el siguiente enlace para descargarlo directamente desde nuestro sitio web. El tamaño del archivo es de unos 26 MB y es libre de virus y seguro de usar.
    2. - -
    3. Instale el archivo APK y espere a que la instalación se complete: El tercer paso es instalar el archivo APK que descargó en el primer paso. Para hacer esto, localice el archivo en su administrador de archivos y toque en él. Luego, siga las instrucciones en la pantalla y espere a que termine la instalación.
    4. -
    5. Descargar el archivo de datos de la misma fuente que el archivo APK: El cuarto paso es descargar el archivo de datos del mod de la misma fuente que el archivo APK. Puede utilizar el siguiente enlace para descargarlo directamente desde nuestro sitio web. El tamaño del archivo es de unos 300 MB y contiene todos los datos y recursos del juego.
    6. -
    7. Extraiga el archivo de datos y copie la carpeta al directorio de Android/obb en su dispositivo: El quinto paso es extraer el archivo de datos que descargó en el cuarto paso. Para ello, necesitará una aplicación extractora de archivos, como ZArchiver o RAR. Luego, abra la aplicación y busque el archivo de datos en su administrador de archivos. Toque en él y seleccione Extraer aquí. Obtendrá una carpeta llamada com.olzhas.carparking.multyplayer. Copia esta carpeta y pégala en el directorio Android/obb de tu dispositivo.
    8. -
    9. Lanza el juego y disfruta: El paso final es lanzar el juego y disfrutar jugando con todas las características mod. Para ello, ve al cajón de la aplicación y toca el icono del juego. Luego, espera a que se cargue y empieza a jugar.
    10. -
    -

    Pros y contras de aparcamiento multijugador Mod APK datos 4.7 4

    -

    Aparcamiento Multijugador Mod APK Data 4.7 4 tiene muchas ventajas y desventajas que usted debe tener en cuenta antes de descargar e instalar. Estos son algunos de ellos:

    -

    - - -Pros -Contras - - -- Descargar y jugar gratis -- Puede que no sea compatible con algunos dispositivos - - -- Dinero y recursos ilimitados -- Puede causar retraso o fallos - - -- Desbloqueado todos los coches y características -- Puede ser detectado por sistemas anti-cheat - - -- No hay anuncios - - - -- No se requiere raíz - - - -

    Alternativas a Parking Multijugador Mod APK Data 4.7 4

    Si usted está buscando algunas alternativas a Car Parking Multijugador Mod APK Data 4.7 4, puede probar estos otros juegos de aparcamiento que también son divertidos y desafiantes:

    -
      -
    • Real Car Parking 2: Driving School 2020: Este es otro juego de simulador de aparcamiento realista que le permite aprender a conducir y aparcar diferentes coches en diversas situaciones. También puede personalizar su coche, disfrutar de los gráficos en 3D, y jugar en línea con otros jugadores. Puedes descargarlo desde Google Play Store o desde [este enlace].
    • -
    • Dr. Parking 4: Este es un juego de aparcamiento de coches simple pero adictivo que pone a prueba su velocidad y precisión. Usted tiene que aparcar su coche en el lugar dado dentro del límite de tiempo y sin chocar con ningún obstáculo. También puedes jugar online con otros jugadores y desafiarlos. Puedes descargarlo desde la Google Play Store o desde [este enlace].
    • -
    • Parking Jam 3D: Este es un juego de aparcamiento de coches casual y relajante que requiere que limpie el atasco de tráfico moviendo los coches en el orden correcto. Tienes que usar tu lógica y estrategia para resolver los puzzles y liberar los coches. También puedes disfrutar de los coloridos gráficos y sonidos. Puedes descargarlo desde Google Play Store o desde [este enlace].
    • -
    • Conducción manual de coches: Este es un juego de conducción de coches realista que le permite aprender a conducir un coche manual con un embrague y palanca de cambios. Tienes que seguir las reglas de tráfico y las señales, y aparcar tu coche en el lugar correcto. También puedes explorar diferentes mapas y escenarios. Puedes descargarlo desde Google Play Store o desde [este enlace].
    • - -
    -

    Conclusión

    -

    Car Parking Multijugador Mod APK Data 4.7 4 es una gran opción para los amantes de los coches que quieren disfrutar de un juego de simulador de aparcamiento realista y emocionante con dinero y recursos ilimitados, desbloqueado todos los coches y características, sin anuncios, sin raíz necesaria, y más. Sin embargo, también tiene algunos inconvenientes, como problemas de compatibilidad, retardo o fallos, detección anti-cheat y términos de violación del servicio.

    -

    Si desea descargar e instalar esta versión modificada del juego, puede seguir los pasos que hemos proporcionado en este artículo. Alternativamente, también puedes probar algunos de los otros juegos de estacionamiento que hemos sugerido.

    -

    Esperamos que este artículo haya sido útil e informativo para usted. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. Gracias por leer.

    -

    Preguntas frecuentes

    -

    Aquí están algunas de las preguntas más frecuentes sobre Aparcamiento Multijugador Mod APK Data 4.7 4:

    -
      -
    1. ¿Es el estacionamiento de coches multijugador Mod APK Data 4.7 4 seguro de usar?
    2. -

      Sí, Aparcamiento multijugador Mod APK Data 4.7 4 es seguro de usar, siempre y cuando se descarga de una fuente de confianza y siga las instrucciones de instalación cuidadosamente. Sin embargo, no podemos garantizar que no causará ningún daño a su dispositivo o cuenta, así que úselo bajo su propio riesgo.

      -
    3. ¿Tengo que rootear mi dispositivo para usar Car Parking Multiplayer Mod APK Data 4.7 4?
    4. -

      No, no es necesario rootear el dispositivo para usar Car Parking Multijugador Mod APK Data 4.7 4. Funciona tanto en dispositivos arraigados y no arraigados.

      -
    5. ¿Se me prohibirá el uso de Aparcamiento Multijugador Mod APK Data 4.7 4?
    6. -

      Posiblemente, sí. El uso de Aparcamiento Multijugador Mod APK Data 4.7 4 puede ser detectado por los sistemas anti-cheat del juego original y resultar en una prohibición o suspensión de su cuenta. Por lo tanto, le aconsejamos que lo utilice con precaución y discreción.

      - -

      Sí, puede jugar en línea con otros jugadores usando Car Parking Multijugador Mod APK Data 4.7 4. Sin embargo, puede encontrar algunos problemas o errores al hacerlo, como versiones no coincidentes, problemas de conexión, etc.

      -
    7. ¿Puedo actualizar los datos de APK de Mod de estacionamiento multijugador 4.7 4?
    8. -

      No, no se puede actualizar Car Parking Multijugador Mod APK Data 4.7 4. Si intenta actualizarlo desde la Google Play Store o el sitio web oficial del juego, perderá todas las características y datos de mod. Por lo tanto, le recomendamos que siga con la versión actual del mod y compruebe si hay nuevas actualizaciones de la fuente desde la que lo descargó.

      -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Apk Kinemaster Mod Digitbin 2021.md b/spaces/Benson/text-generation/Examples/Descargar Apk Kinemaster Mod Digitbin 2021.md deleted file mode 100644 index 5f6d334c2f95172f0efed29076576562dbf96a53..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Apk Kinemaster Mod Digitbin 2021.md +++ /dev/null @@ -1,82 +0,0 @@ -
    -

    Kinemaster Mod Digitbin APK Descargar 2021: Una guía completa

    -

    Si está buscando una aplicación de editor de vídeo potente y fácil de usar para su dispositivo Android, es posible que haya oído hablar de Kinemaster. Es una de las aplicaciones de edición de video más populares que ofrece muchas características y herramientas para crear videos impresionantes. Sin embargo, la versión gratuita de Kinemaster tiene algunas limitaciones, como una marca de agua, anuncios y acceso restringido a algunas características premium.

    -

    Es por eso que muchos usuarios buscan una versión modificada de Kinemaster que pueda eliminar estas limitaciones y desbloquear todas las características. Una de las mejores versiones modificadas de Kinemaster es el Kinemaster Mod Digitbin APK, que es desarrollado por DigitBin.com. En este artículo, le diremos todo lo que necesita saber sobre esta versión modificada, cómo descargarla e instalarla en su dispositivo Android y cómo usarla para editar videos. ¡Vamos a empezar!

    -

    descargar apk kinemaster mod digitbin 2021


    Download File ✒ ✒ ✒ https://bltlly.com/2v6KjH



    -

    ¿Qué es Kinemaster Mod Digitbin APK?

    -

    Kinemaster Mod Digitbin APK es una versión modificada de la aplicación original de Kinemaster que elimina la marca de agua, anuncios, y desbloquea todas las características premium. También añade algunas características adicionales, como la clave de croma, que le permite cambiar el fondo de sus vídeos con cualquier imagen o vídeo. Con esta versión modificada, puedes disfrutar de todos los beneficios de Kinemaster sin pagar nada.

    -

    Características de Kinemaster Mod Digitbin APK

    -

    Aquí están algunas de las principales características de Kinemaster Mod Digitbin APK que lo hacen diferente de la aplicación original:

    -

    Ninguna marca de agua

    -

    Una de las cosas más molestas de la versión gratuita de Kinemaster es que añade una marca de agua a tus vídeos cuando los exportas. Esto puede arruinar tu aspecto profesional y hacer que tus videos sean menos atractivos. Con Kinemaster Mod Digitbin APK, puede eliminar la marca de agua de sus vídeos y hacerlos ver más pulido y profesional.

    -

    No hay anuncios

    - -

    Clave de croma

    -

    Chroma key es una función que te permite cambiar el fondo de tus vídeos con cualquier imagen o vídeo. Esto puede ser muy útil para crear efectos especiales, como pantalla verde, o para cambiar el estado de ánimo o la atmósfera de sus videos. La versión original de Kinemaster solo admite la clave de croma para algunos dispositivos, pero con Kinemaster Mod Digitbin APK, puede usar la clave de croma en cualquier dispositivo.

    -

    Características premium desbloqueadas

    -

    Kinemaster tiene algunas características premium que solo están disponibles para los usuarios que pagan una suscripción mensual o anual. Estas características incluyen más temas, efectos, transiciones, pegatinas, fuentes, música y efectos de sonido. Con Kinemaster Mod Digitbin APK, puede acceder a todas estas características premium de forma gratuita y mejorar sus vídeos con más creatividad y variedad.

    -

    Cómo descargar e instalar Kinemaster Mod Digitbin APK para Android

    -

    Si desea descargar e instalar Kinemaster Mod Digitbin APK en su dispositivo Android, es necesario seguir estos sencillos pasos:

    -

    -

    Paso 1: Habilitar fuentes desconocidas

    -

    Dado que Kinemaster Mod Digitbin APK no está disponible en el Google Play Store, es necesario habilitar fuentes desconocidas en la configuración del dispositivo. Esto le permitirá instalar aplicaciones de fuentes de terceros. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y active.

    -

    Paso 2: Descargar el archivo APK desde el enlace de abajo

    -

    Siguiente, es necesario descargar el archivo APK de Kinemaster Mod Digitbin APK desde el enlace de abajo. Este es un enlace seguro y verificado que no dañará su dispositivo o datos. Haga clic en el enlace y espere a que se complete la descarga.

    -

    Kinemaster Mod Digitbin APK Enlace de descarga

    -

    Paso 3: Instalar el archivo APK en su dispositivo

    - -

    Paso 4: Iniciar la aplicación y disfrutar de la edición de vídeos

    -

    Finalmente, puede iniciar la aplicación desde el cajón de la aplicación o la pantalla de inicio y comenzar a editar videos con Kinemaster Mod Digitbin APK. Verá que no hay marca de agua, no hay anuncios, y todas las características premium están desbloqueadas. También puedes usar la tecla Chroma en cualquier dispositivo y crear vídeos increíbles con diferentes fondos.

    -

    Cómo utilizar Kinemaster Mod Digitbin APK para la edición de vídeo

    -

    Kinemaster Mod Digitbin APK es muy fácil de usar para la edición de vídeo. Tiene una interfaz fácil de usar que le permite acceder a todas las características y herramientas con unos pocos toques. Aquí hay algunos consejos sobre cómo utilizar Kinemaster Mod Digitbin APK para la edición de vídeo:

    -

    Explorar la interfaz de usuario

    -

    Cuando inicie la aplicación, verá una pantalla de bienvenida que le brinda tres opciones: Nuevo proyecto, Proyectos de navegador y Configuración. Toque en Nuevo proyecto para iniciar un nuevo proyecto de vídeo. A continuación, verá una pantalla que muestra el navegador multimedia, la línea de tiempo, la ventana de vista previa y la barra de herramientas.

    -

    El navegador de medios le permite importar archivos multimedia desde su dispositivo de almacenamiento o servicios en la nube. También puede grabar vídeos o tomar fotos directamente desde la aplicación. La línea de tiempo muestra sus clips de vídeo y pistas de audio en una secuencia lineal. Puede arrastrarlos y soltarlos para reorganizarlos o recortarlos utilizando las manijas en los bordes. La ventana de vista previa muestra cómo se ve el vídeo a medida que lo edita. También puede usar gestos para acercar o alejar, rotar o recortar el vídeo. La barra de herramientas tiene botones para deshacer, rehacer, agregar medios, agregar capas, voz en off, mezclador de audio y exportar.

    -

    Técnicas básicas de edición

    -

    Para editar los clips de vídeo en Kinemaster Mod Digitbin APK, es necesario seleccionarlos en la línea de tiempo y luego toque en el icono de tijeras en la barra de herramientas. Esto abrirá un menú que le da opciones como dividir, cortar, copiar, eliminar, silenciar, ajustar el volumen, control de velocidad, filtro de color, ajuste de color, recorte, rotación, reflejo y mezcla.

    - -

    Funciones avanzadas de edición

    -

    Kinemaster Mod Digitbin APK también ofrece algunas características avanzadas de edición que le permiten agregar más efectos y elementos a sus vídeos. Una de estas características es la adición de capas. Las capas son pistas adicionales que le permiten superponer imágenes, videos, texto, pegatinas, escritura a mano o efectos en la parte superior de la pista de vídeo principal. Para agregar una capa, toque en el botón Agregar capa en la barra de herramientas y luego elija el tipo de capa que desea agregar. A continuación, puede ajustar la posición, el tamaño, la opacidad y la animación de la capa en la ventana de vista previa.

    -

    Otra función de edición avanzada es chroma key. Chroma key es una función que te permite cambiar el fondo de tus vídeos con cualquier imagen o vídeo. Esto puede ser muy útil para crear efectos especiales, como pantalla verde, o para cambiar el estado de ánimo o la atmósfera de sus videos. Para usar la tecla de croma, necesita tener un clip de video que tenga un fondo de color sólido, como verde o azul. Luego, debe agregarlo como una capa encima de otro clip de video que desea usar como nuevo fondo. A continuación, toque en la capa y luego toque en el botón de la tecla de croma en la barra de herramientas. Verás un control deslizante que te permite ajustar la intensidad del efecto de la tecla de croma. También puede utilizar la herramienta cuentagotas para seleccionar el color que desea eliminar de la capa.

    -

    Agregar efectos y transiciones

    -

    Kinemaster Mod Digitbin APK también le permite agregar efectos y transiciones a sus vídeos para hacerlos más dinámicos e interesantes. Los efectos son mejoras visuales que puede aplicar a sus clips de video o capas, como desenfoque, mosaico, viñeta, distorsión y más. Las transiciones son animaciones que puedes usar para conectar tus clips de video o capas sin problemas, como fade, slide, wipe, zoom y más.

    - -

    Para agregar una transición a su clip de vídeo o capa, toque en él y luego toque en el botón de transición en la barra de herramientas. Verá una lista de categorías que contienen diferentes transiciones. Toque en una categoría y luego elija una transición que le guste. También puede ajustar la duración y dirección de la transición usando los controles deslizantes.

    -

    Mejora de audio en Kinemaster Mod Digitbin APK

    -

    Kinemaster Mod Digitbin APK también le permite mejorar la calidad de audio de sus vídeos mediante la adición de música, efectos de sonido, voces en off, o ajustar el volumen y el tono. Puede agregar pistas de audio a sus videos tocando el botón Agregar medios en la barra de herramientas y luego elegir un archivo de audio desde su dispositivo de almacenamiento o servicios en la nube. También puede grabar su propia voz tocando el botón de voz en off en la barra de herramientas y luego hablando en el micrófono de su dispositivo.

    -

    Para editar sus pistas de audio en Kinemaster Mod Digitbin APK, es necesario seleccionarlos en la línea de tiempo y luego toque en el icono de tijeras en la barra de herramientas. Esto abrirá un menú que le da opciones como dividir, cortar, copiar, borrar, silenciar, ajustar el volumen, control de velocidad, control de tono, reverberación, ecualizador y fundido in/out.

    -

    Puedes usar estas opciones para modificar tus pistas de audio según tus necesidades. Por ejemplo, puede dividir una pista de audio en dos partes arrastrando el cabezal de reproducción a donde desea dividirla y luego tocando en split. También puede cortar una parte de una pista de audio seleccionándola y luego tocando en el corte. También puede copiar o eliminar una pista de audio pulsando en copiar o eliminar.

    -

    Consejos para crear vídeos atractivos con Kinemaster Mod Digitbin APK

    -

    Ahora que sabes cómo usar Kinemaster Mod Digitbin APK para la edición de vídeo, aquí hay algunos consejos que pueden ayudarle a crear vídeos interesantes con ella:

    -

    Usar imágenes y metraje de alta calidad

    - -

    Elegir un tema adecuado y esquema de color

    -

    El tema y la combinación de colores de su video pueden afectar el estado de ánimo y el tono de su mensaje. Trate de elegir un tema y un esquema de color que coincida con el propósito y la audiencia de su video. Por ejemplo, si usted está haciendo un video para una presentación de negocios, es posible que desee utilizar un tema profesional y formal y esquema de color. Si estás haciendo un video para una fiesta de cumpleaños, es posible que quieras usar un tema divertido y festivo y un esquema de color.

    -

    Kinemaster Mod Digitbin APK ofrece una variedad de temas y esquemas de color que puede aplicar a sus vídeos. También puede personalizarlos cambiando el fondo, texto, pegatinas, efectos y transiciones. Para acceder a los temas y esquemas de color, toque en el botón de tema en la barra de herramientas y luego elija una categoría que se adapte a su video.

    -

    Añadir texto y pegatinas para transmitir su mensaje

    -

    El texto y las pegatinas son elementos útiles que pueden ayudarle a transmitir su mensaje de manera más clara y creativa. Puede usar texto y pegatinas para agregar títulos, subtítulos, subtítulos, citas, etiquetas, logotipos, emojis o cualquier otra información que desee incluir en su video. También puede usar texto y pegatinas para enfatizar ciertos puntos o agregar algo de humor o personalidad a su video.

    -

    Kinemaster Mod Digitbin APK ofrece una gran cantidad de opciones de texto y pegatina que puede agregar a sus vídeos. También puede personalizarlos cambiando la fuente, el tamaño, el color, la alineación, la animación y la mezcla. Para añadir texto o pegatinas a tu vídeo, toca el botón Añadir capa de la barra de herramientas y luego elige texto o pegatina. A continuación, puede escribir o seleccionar el texto o pegatina que desea agregar y ajustarlo en la ventana de vista previa.

    -

    Ajusta la velocidad y duración de tus clips

    - -

    Para ajustar la velocidad o la duración de sus clips en Kinemaster Mod Digitbin APK, toque en ellos y luego toque en el icono de tijeras en la barra de herramientas. Luego, toque en el control de velocidad o ajuste de duración. Verá un control deslizante que le permite cambiar la velocidad o la duración de sus clips por porcentaje o por segundos.

    -

    Exporta tu vídeo en la mejor resolución y formato

    -

    El paso final en la creación de un video con Kinemaster Mod Digitbin APK está exportando en la mejor resolución y formato para su propósito. La resolución es la calidad o claridad del vídeo, medida por píxeles. El formato es el tipo o la extensión de su archivo de video, como MP4, MOV, AVI, etc. La resolución y el formato de su video pueden afectar su tamaño, compatibilidad y rendimiento.

    -

    Kinemaster Mod Digitbin APK le permite exportar su vídeo en diferentes resoluciones y formatos dependiendo de las capacidades y preferencias de su dispositivo. También puede elegir la velocidad de fotogramas (FPS) y la velocidad de bits (Mbps) de su vídeo para optimizar su suavidad y nitidez. Para exportar el vídeo en Kinemaster Mod Digitbin APK, toque en el botón de exportación en la barra de herramientas y luego elegir la resolución, formato, velocidad de fotogramas, tasa de bits, y el nombre de archivo que desee. Luego, toque en exportar de nuevo y espere a que el proceso termine.

    -

    Conclusión

    -

    Kinemaster Mod Digitbin APK es una gran aplicación para la edición de vídeo en dispositivos Android. Ofrece una gran cantidad de características y herramientas que pueden ayudarle a crear videos impresionantes sin marca de agua, anuncios o restricciones. También puedes usar funciones premium y chroma key gratis y mejorar tus vídeos con más creatividad y variedad. Para utilizar Kinemaster Mod Digitbin APK, es necesario descargar e instalar desde el enlace de abajo y luego siga los consejos e instrucciones en este artículo. Esperamos que haya encontrado este artículo útil e informativo. ¡Feliz edición de video!

    -

    Preguntas frecuentes

    -

    Aquí hay algunas preguntas frecuentes sobre Kinemaster Mod Digitbin APK:

    -
      - -

      Sí, Kinemaster Mod Digitbin APK es seguro de usar siempre y cuando se descarga desde el enlace de abajo. Esta es una fuente verificada y confiable que no dañará su dispositivo o datos. Sin embargo, siempre debes tener cuidado al instalar aplicaciones de fuentes desconocidas y escanearlas con una aplicación antivirus antes de usarlas.

      -
    1. ¿Es Kinemaster Mod Digitbin APK legal de usar?
    2. -

      Kinemaster Mod Digitbin APK no es una aplicación oficial de Kinemaster Corporation, pero una versión modificada por DigitBin.com. Por lo tanto, no es legal usarlo, ya que viola los términos y condiciones de la aplicación original. Sin embargo, no hay informes de ninguna acción legal tomada contra los usuarios de Kinemaster Mod Digitbin APK hasta el momento. Sin embargo, debe usarlo bajo su propio riesgo y responsabilidad.

      -
    3. ¿Kinemaster Mod Digitbin APK requieren acceso root?
    4. -

      No, Kinemaster Mod Digitbin APK no requiere acceso de root para trabajar en su dispositivo. Puede instalarlo y usarlo sin rootear su dispositivo.

      -
    5. ¿Puedo usar Kinemaster Mod Digitbin APK en PC o dispositivos iOS?
    6. -

      No, Kinemaster Mod Digitbin APK solo es compatible con dispositivos Android. No se puede utilizar en dispositivos PC o iOS.

      -
    7. ¿Cómo puedo actualizar Kinemaster Mod Digitbin APK?
    8. -

      Kinemaster Mod Digitbin APK no tiene una función de actualización automática, por lo que debe verificar las actualizaciones manualmente. Puedes visitar el siguiente enlace para ver si hay una nueva versión disponible y descargarla si la hay. También puede seguir DigitBin.com en sus plataformas de redes sociales para recibir notificaciones de cualquier actualización.

      -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/build/wheel_legacy.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/build/wheel_legacy.py deleted file mode 100644 index c5f0492ccbe9c727c835c12c84a1d8340366fa1e..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/build/wheel_legacy.py +++ /dev/null @@ -1,102 +0,0 @@ -import logging -import os.path -from typing import List, Optional - -from pip._internal.cli.spinners import open_spinner -from pip._internal.utils.setuptools_build import make_setuptools_bdist_wheel_args -from pip._internal.utils.subprocess import call_subprocess, format_command_args - -logger = logging.getLogger(__name__) - - -def format_command_result( - command_args: List[str], - command_output: str, -) -> str: - """Format command information for logging.""" - command_desc = format_command_args(command_args) - text = f"Command arguments: {command_desc}\n" - - if not command_output: - text += "Command output: None" - elif logger.getEffectiveLevel() > logging.DEBUG: - text += "Command output: [use --verbose to show]" - else: - if not command_output.endswith("\n"): - command_output += "\n" - text += f"Command output:\n{command_output}" - - return text - - -def get_legacy_build_wheel_path( - names: List[str], - temp_dir: str, - name: str, - command_args: List[str], - command_output: str, -) -> Optional[str]: - """Return the path to the wheel in the temporary build directory.""" - # Sort for determinism. - names = sorted(names) - if not names: - msg = ("Legacy build of wheel for {!r} created no files.\n").format(name) - msg += format_command_result(command_args, command_output) - logger.warning(msg) - return None - - if len(names) > 1: - msg = ( - "Legacy build of wheel for {!r} created more than one file.\n" - "Filenames (choosing first): {}\n" - ).format(name, names) - msg += format_command_result(command_args, command_output) - logger.warning(msg) - - return os.path.join(temp_dir, names[0]) - - -def build_wheel_legacy( - name: str, - setup_py_path: str, - source_dir: str, - global_options: List[str], - build_options: List[str], - tempd: str, -) -> Optional[str]: - """Build one unpacked package using the "legacy" build process. - - Returns path to wheel if successfully built. Otherwise, returns None. - """ - wheel_args = make_setuptools_bdist_wheel_args( - setup_py_path, - global_options=global_options, - build_options=build_options, - destination_dir=tempd, - ) - - spin_message = f"Building wheel for {name} (setup.py)" - with open_spinner(spin_message) as spinner: - logger.debug("Destination directory: %s", tempd) - - try: - output = call_subprocess( - wheel_args, - command_desc="python setup.py bdist_wheel", - cwd=source_dir, - spinner=spinner, - ) - except Exception: - spinner.finish("error") - logger.error("Failed building wheel for %s", name) - return None - - names = os.listdir(tempd) - wheel_path = get_legacy_build_wheel_path( - names=names, - temp_dir=tempd, - name=name, - command_args=wheel_args, - command_output=output, - ) - return wheel_path diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/live_render.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/live_render.py deleted file mode 100644 index b90fbf7f35097694f727e201b0b378942d70a443..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/live_render.py +++ /dev/null @@ -1,113 +0,0 @@ -import sys -from typing import Optional, Tuple - -if sys.version_info >= (3, 8): - from typing import Literal -else: - from pip._vendor.typing_extensions import Literal # pragma: no cover - - -from ._loop import loop_last -from .console import Console, ConsoleOptions, RenderableType, RenderResult -from .control import Control -from .segment import ControlType, Segment -from .style import StyleType -from .text import Text - -VerticalOverflowMethod = Literal["crop", "ellipsis", "visible"] - - -class LiveRender: - """Creates a renderable that may be updated. - - Args: - renderable (RenderableType): Any renderable object. - style (StyleType, optional): An optional style to apply to the renderable. Defaults to "". - """ - - def __init__( - self, - renderable: RenderableType, - style: StyleType = "", - vertical_overflow: VerticalOverflowMethod = "ellipsis", - ) -> None: - self.renderable = renderable - self.style = style - self.vertical_overflow = vertical_overflow - self._shape: Optional[Tuple[int, int]] = None - - def set_renderable(self, renderable: RenderableType) -> None: - """Set a new renderable. - - Args: - renderable (RenderableType): Any renderable object, including str. - """ - self.renderable = renderable - - def position_cursor(self) -> Control: - """Get control codes to move cursor to beginning of live render. - - Returns: - Control: A control instance that may be printed. - """ - if self._shape is not None: - _, height = self._shape - return Control( - ControlType.CARRIAGE_RETURN, - (ControlType.ERASE_IN_LINE, 2), - *( - ( - (ControlType.CURSOR_UP, 1), - (ControlType.ERASE_IN_LINE, 2), - ) - * (height - 1) - ) - ) - return Control() - - def restore_cursor(self) -> Control: - """Get control codes to clear the render and restore the cursor to its previous position. - - Returns: - Control: A Control instance that may be printed. - """ - if self._shape is not None: - _, height = self._shape - return Control( - ControlType.CARRIAGE_RETURN, - *((ControlType.CURSOR_UP, 1), (ControlType.ERASE_IN_LINE, 2)) * height - ) - return Control() - - def __rich_console__( - self, console: Console, options: ConsoleOptions - ) -> RenderResult: - - renderable = self.renderable - style = console.get_style(self.style) - lines = console.render_lines(renderable, options, style=style, pad=False) - shape = Segment.get_shape(lines) - - _, height = shape - if height > options.size.height: - if self.vertical_overflow == "crop": - lines = lines[: options.size.height] - shape = Segment.get_shape(lines) - elif self.vertical_overflow == "ellipsis": - lines = lines[: (options.size.height - 1)] - overflow_text = Text( - "...", - overflow="crop", - justify="center", - end="", - style="live.ellipsis", - ) - lines.append(list(console.render(overflow_text))) - shape = Segment.get_shape(lines) - self._shape = shape - - new_line = Segment.line() - for last, line in loop_last(lines): - yield from line - if not last: - yield new_line diff --git a/spaces/BilalQ/Stable_Difussion/README.md b/spaces/BilalQ/Stable_Difussion/README.md deleted file mode 100644 index 5d51e7cb7574f781bb63fb81ab6bd1374dd6486d..0000000000000000000000000000000000000000 --- a/spaces/BilalQ/Stable_Difussion/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stable Difussion -emoji: ⚡ -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/CVPR/BrAD/README.md b/spaces/CVPR/BrAD/README.md deleted file mode 100644 index 1ff61a495e7ec3623afe83d889a95f80da4ab8e0..0000000000000000000000000000000000000000 --- a/spaces/CVPR/BrAD/README.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: BrAD -emoji: 🏢 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.0.20 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - - -This is a demo for the paper: "Unsupervised Domain Generalization by Learning a Bridge Across Domains" - -https://openaccess.thecvf.com/content/CVPR2022/papers/Harary_Unsupervised_Domain_Generalization_by_Learning_a_Bridge_Across_Domains_CVPR_2022_paper.pdf \ No newline at end of file diff --git a/spaces/CVPR/LIVE/thrust/thrust/detail/algorithm_wrapper.h b/spaces/CVPR/LIVE/thrust/thrust/detail/algorithm_wrapper.h deleted file mode 100644 index c09b9a0a0b4dcc52924425d1549093668e5d2952..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/detail/algorithm_wrapper.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2020 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -// When a compiler uses Thrust as part of its implementation of Standard C++ -// algorithms, a cycle of included files may result when Thrust code tries to -// use a standard algorithm. Having a macro that is defined only when Thrust -// is including an algorithms-related header gives the compiler a chance to -// detect and break the cycle of includes. - -#define THRUST_INCLUDING_ALGORITHMS_HEADER -#include -#undef THRUST_INCLUDING_ALGORITHMS_HEADER diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/core/agent_launcher.h b/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/core/agent_launcher.h deleted file mode 100644 index 7788481c7b85124d0873be11b8563372e457e724..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/core/agent_launcher.h +++ /dev/null @@ -1,1184 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ -#pragma once - -#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC -#include -#include -#include -#include -#include - -#if 0 -#define __THRUST__TEMPLATE_DEBUG -#endif - -#if __THRUST__TEMPLATE_DEBUG -template class ID_impl; -template class Foo { ID_impl t;}; -#endif - -namespace thrust -{ -namespace cuda_cub { -namespace core { - - -#if defined(__CUDA_ARCH__) || defined(__NVCOMPILER_CUDA__) -#if 0 - template - void __global__ - __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent(Args... args) - { - extern __shared__ char shmem[]; - Agent::entry(args..., shmem); - } -#else - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent(_0 x0) - { - extern __shared__ char shmem[]; - Agent::entry(x0, shmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent(_0 x0, _1 x1) - { - extern __shared__ char shmem[]; - Agent::entry(x0, x1, shmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent(_0 x0, _1 x1, _2 x2) - { - extern __shared__ char shmem[]; - Agent::entry(x0, x1, x2, shmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3) - { - extern __shared__ char shmem[]; - Agent::entry(x0, x1, x2, x3, shmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4) - { - extern __shared__ char shmem[]; - Agent::entry(x0, x1, x2, x3, x4, shmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) - { - extern __shared__ char shmem[]; - Agent::entry(x0, x1, x2, x3, x4, x5, shmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) - { - extern __shared__ char shmem[]; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, shmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) - { - extern __shared__ char shmem[]; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, shmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) - { - extern __shared__ char shmem[]; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, shmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) - { - extern __shared__ char shmem[]; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, shmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA) - { - extern __shared__ char shmem[]; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, shmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB) - { - extern __shared__ char shmem[]; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, shmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC) - { - extern __shared__ char shmem[]; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, shmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD) - { - extern __shared__ char shmem[]; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, shmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD, _xE xE) - { - extern __shared__ char shmem[]; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE, shmem); - } -#endif - - //////////////////////////////////////////////////////////// - - -#if 0 - template - void __global__ - __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent_vshmem(char* vshmem, Args... args) - { - extern __shared__ char shmem[]; - vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; - Agent::entry(args..., vshmem); - } -#else - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent_vshmem(char* vshmem, _0 x0) - { - extern __shared__ char shmem[]; - vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; - Agent::entry(x0, vshmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1) - { - extern __shared__ char shmem[]; - vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; - Agent::entry(x0, x1, vshmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2) - { - extern __shared__ char shmem[]; - vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; - Agent::entry(x0, x1, x2, vshmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3) - { - extern __shared__ char shmem[]; - vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; - Agent::entry(x0, x1, x2, x3, vshmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4) - { - extern __shared__ char shmem[]; - vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; - Agent::entry(x0, x1, x2, x3, x4, vshmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) - { - extern __shared__ char shmem[]; - vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; - Agent::entry(x0, x1, x2, x3, x4, x5, vshmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) - { - extern __shared__ char shmem[]; - vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, vshmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) - { - extern __shared__ char shmem[]; - vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, vshmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) - { - extern __shared__ char shmem[]; - vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, vshmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) - { - extern __shared__ char shmem[]; - vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, vshmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA) - { - extern __shared__ char shmem[]; - vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, vshmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB) - { - extern __shared__ char shmem[]; - vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, vshmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC) - { - extern __shared__ char shmem[]; - vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, vshmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD) - { - extern __shared__ char shmem[]; - vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, vshmem); - } - template - void __global__ __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) - _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD, _xE xE) - { - extern __shared__ char shmem[]; - vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; - Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE, vshmem); - } -#endif -#else -#if 0 - template - void __global__ _kernel_agent(Args... args) {} - template - void __global__ _kernel_agent_vshmem(char*, Args... args) {} -#else - template - void __global__ _kernel_agent(_0) {} - template - void __global__ _kernel_agent(_0,_1) {} - template - void __global__ _kernel_agent(_0,_1,_2) {} - template - void __global__ _kernel_agent(_0,_1,_2,_3) {} - template - void __global__ _kernel_agent(_0,_1,_2,_3, _4) {} - template - void __global__ _kernel_agent(_0,_1,_2,_3, _4, _5) {} - template - void __global__ _kernel_agent(_0,_1,_2,_3, _4, _5, _6) {} - template - void __global__ _kernel_agent(_0,_1,_2,_3, _4, _5, _6, _7) {} - template - void __global__ _kernel_agent(_0,_1,_2,_3, _4, _5, _6, _7, _8) {} - template - void __global__ _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9) {} - template - void __global__ _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA) {} - template - void __global__ _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB) {} - template - void __global__ _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB,_xC) {} - template - void __global__ _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB,_xC, _xD) {} - template - void __global__ _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB,_xC, _xD, _xE) {} - //////////////////////////////////////////////////////////// - template - void __global__ _kernel_agent_vshmem(char*,_0) {} - template - void __global__ _kernel_agent_vshmem(char*,_0,_1) {} - template - void __global__ _kernel_agent_vshmem(char*,_0,_1,_2) {} - template - void __global__ _kernel_agent_vshmem(char*,_0,_1,_2,_3) {} - template - void __global__ _kernel_agent_vshmem(char*,_0,_1,_2,_3, _4) {} - template - void __global__ _kernel_agent_vshmem(char*,_0,_1,_2,_3, _4, _5) {} - template - void __global__ _kernel_agent_vshmem(char*,_0,_1,_2,_3, _4, _5, _6) {} - template - void __global__ _kernel_agent_vshmem(char*,_0,_1,_2,_3, _4, _5, _6, _7) {} - template - void __global__ _kernel_agent_vshmem(char*,_0,_1,_2,_3, _4, _5, _6, _7, _8) {} - template - void __global__ _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9) {} - template - void __global__ _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA) {} - template - void __global__ _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB) {} - template - void __global__ _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC) {} - template - void __global__ _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC, _xD) {} - template - void __global__ _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC, _xD, _xE) {} -#endif -#endif - - - template - struct AgentLauncher : Agent - { - core::AgentPlan plan; - size_t count; - cudaStream_t stream; - char const* name; - bool debug_sync; - unsigned int grid; - char* vshmem; - bool has_shmem; - size_t shmem_size; - - enum - { - MAX_SHMEM_PER_BLOCK = 48 * 1024, - }; - typedef - typename has_enough_shmem::type has_enough_shmem_t; - typedef - has_enough_shmem shm1; - - template - THRUST_RUNTIME_FUNCTION - AgentLauncher(AgentPlan plan_, - Size count_, - cudaStream_t stream_, - char const* name_, - bool debug_sync_) - : plan(plan_), - count((size_t)count_), - stream(stream_), - name(name_), - debug_sync(debug_sync_), - grid(static_cast((count + plan.items_per_tile - 1) / plan.items_per_tile)), - vshmem(NULL), - has_shmem((size_t)core::get_max_shared_memory_per_block() >= (size_t)plan.shared_memory_size), - shmem_size(has_shmem ? plan.shared_memory_size : 0) - { - assert(count > 0); - } - - template - THRUST_RUNTIME_FUNCTION - AgentLauncher(AgentPlan plan_, - Size count_, - cudaStream_t stream_, - char* vshmem, - char const* name_, - bool debug_sync_) - : plan(plan_), - count((size_t)count_), - stream(stream_), - name(name_), - debug_sync(debug_sync_), - grid(static_cast((count + plan.items_per_tile - 1) / plan.items_per_tile)), - vshmem(vshmem), - has_shmem((size_t)core::get_max_shared_memory_per_block() >= (size_t)plan.shared_memory_size), - shmem_size(has_shmem ? plan.shared_memory_size : 0) - { - assert(count > 0); - } - - THRUST_RUNTIME_FUNCTION - AgentLauncher(AgentPlan plan_, - cudaStream_t stream_, - char const* name_, - bool debug_sync_) - : plan(plan_), - count(0), - stream(stream_), - name(name_), - debug_sync(debug_sync_), - grid(plan.grid_size), - vshmem(NULL), - has_shmem((size_t)core::get_max_shared_memory_per_block() >= (size_t)plan.shared_memory_size), - shmem_size(has_shmem ? plan.shared_memory_size : 0) - { - assert(plan.grid_size > 0); - } - - THRUST_RUNTIME_FUNCTION - AgentLauncher(AgentPlan plan_, - cudaStream_t stream_, - char* vshmem, - char const* name_, - bool debug_sync_) - : plan(plan_), - count(0), - stream(stream_), - name(name_), - debug_sync(debug_sync_), - grid(plan.grid_size), - vshmem(vshmem), - has_shmem((size_t)core::get_max_shared_memory_per_block() >= (size_t)plan.shared_memory_size), - shmem_size(has_shmem ? plan.shared_memory_size : 0) - { - assert(plan.grid_size > 0); - } - -#if 0 - THRUST_RUNTIME_FUNCTION - AgentPlan static get_plan(cudaStream_t s, void* d_ptr = 0) - { - // in separable compilation mode, we have no choice - // but to call kernel to get agent_plan - // otherwise the risk is something may fail - // if user mix & match ptx versions in a separably compiled function - // http://nvbugs/1772071 - // XXX may be it is too string of a requirements, consider relaxing it in - // the future -#ifdef __CUDACC_RDC__ - return core::get_agent_plan(s, d_ptr); -#else - core::cuda_optional ptx_version = core::get_ptx_version(); - //CUDA_CUB_RET_IF_FAIL(ptx_version.status()); - return get_agent_plan(ptx_version); -#endif - } - THRUST_RUNTIME_FUNCTION - AgentPlan static get_plan_default() - { - return get_agent_plan(sm_arch<0>::type::ver); - } -#endif - - THRUST_RUNTIME_FUNCTION - typename core::get_plan::type static get_plan(cudaStream_t , void* d_ptr = 0) - { - THRUST_UNUSED_VAR(d_ptr); - core::cuda_optional ptx_version = core::get_ptx_version(); - return get_agent_plan(ptx_version); - } - - THRUST_RUNTIME_FUNCTION - typename core::get_plan::type static get_plan() - { - return get_agent_plan(lowest_supported_sm_arch::ver); - } - - THRUST_RUNTIME_FUNCTION void sync() const - { - if (debug_sync) - { - if (THRUST_IS_DEVICE_CODE) { - #if THRUST_INCLUDE_DEVICE_CODE - cudaDeviceSynchronize(); - #endif - } else { - #if THRUST_INCLUDE_HOST_CODE - cudaStreamSynchronize(stream); - #endif - } - } - } - - template - static cuda_optional THRUST_RUNTIME_FUNCTION - max_blocks_per_sm_impl(K k, int block_threads) - { - int occ; - cudaError_t status = cub::MaxSmOccupancy(occ, k, block_threads); - return cuda_optional(status == cudaSuccess ? occ : -1, status); - } - - template - cuda_optional THRUST_RUNTIME_FUNCTION - max_sm_occupancy(K k) const - { - return max_blocks_per_sm_impl(k, plan.block_threads); - } - - - - template - THRUST_RUNTIME_FUNCTION - void print_info(K k) const - { - if (debug_sync) - { - cuda_optional occ = max_sm_occupancy(k); - core::cuda_optional ptx_version = core::get_ptx_version(); - if (count > 0) - { - _CubLog("Invoking %s<<<%u, %d, %d, %lld>>>(), %llu items total, %d items per thread, %d SM occupancy, %d vshmem size, %d ptx_version \n", - name, - grid, - plan.block_threads, - (has_shmem ? (int)plan.shared_memory_size : 0), - (long long)stream, - (long long)count, - plan.items_per_thread, - (int)occ, - (!has_shmem ? (int)plan.shared_memory_size : 0), - (int)ptx_version); - } - else - { - _CubLog("Invoking %s<<<%u, %d, %d, %lld>>>(), %d items per thread, %d SM occupancy, %d vshmem size, %d ptx_version\n", - name, - grid, - plan.block_threads, - (has_shmem ? (int)plan.shared_memory_size : 0), - (long long)stream, - plan.items_per_thread, - (int)occ, - (!has_shmem ? (int)plan.shared_memory_size : 0), - (int)ptx_version); - } - } - } - - //////////////////// - // Variadic code - //////////////////// - -#if 0 - template - static cuda_optional THRUST_RUNTIME_FUNCTION - get_max_blocks_per_sm(AgentPlan plan) - { - return max_blocks_per_sm_impl(_kernel_agent, plan.block_threads); - } -#else - template - static cuda_optional THRUST_RUNTIME_FUNCTION - get_max_blocks_per_sm(AgentPlan plan) - { - void (*ptr)(_0) = _kernel_agent; - return max_blocks_per_sm_impl(ptr, plan.block_threads); - } - template - static cuda_optional THRUST_RUNTIME_FUNCTION - get_max_blocks_per_sm(AgentPlan plan) - { - void (*ptr)(_0, _1) = _kernel_agent; - return max_blocks_per_sm_impl(ptr, plan.block_threads); - } - template - static cuda_optional THRUST_RUNTIME_FUNCTION - get_max_blocks_per_sm(AgentPlan plan) - { - void (*ptr)(_0,_1,_2) = _kernel_agent; - return max_blocks_per_sm_impl(ptr, plan.block_threads); - } - template - static cuda_optional THRUST_RUNTIME_FUNCTION - get_max_blocks_per_sm(AgentPlan plan) - { - void (*ptr)(_0,_1,_2,_3) = _kernel_agent; - return max_blocks_per_sm_impl(ptr, plan.block_threads); - } - template - static cuda_optional THRUST_RUNTIME_FUNCTION - get_max_blocks_per_sm(AgentPlan plan) - { - void (*ptr)(_0,_1,_2,_3,_4) = _kernel_agent; - return max_blocks_per_sm_impl(ptr, plan.block_threads); - } - template - static cuda_optional THRUST_RUNTIME_FUNCTION - get_max_blocks_per_sm(AgentPlan plan) - { - void (*ptr)(_0,_1,_2,_3,_4,_5) = _kernel_agent; - return max_blocks_per_sm_impl(ptr, plan.block_threads); - } - template - static cuda_optional THRUST_RUNTIME_FUNCTION - get_max_blocks_per_sm(AgentPlan plan) - { - void (*ptr)(_0,_1,_2,_3,_4,_5,_6) = _kernel_agent; - return max_blocks_per_sm_impl(ptr, plan.block_threads); - } - template - static cuda_optional THRUST_RUNTIME_FUNCTION - get_max_blocks_per_sm(AgentPlan plan) - { - void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7) = _kernel_agent; - return max_blocks_per_sm_impl(ptr, plan.block_threads); - } - template - static cuda_optional THRUST_RUNTIME_FUNCTION - get_max_blocks_per_sm(AgentPlan plan) - { - void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8) = _kernel_agent; - return max_blocks_per_sm_impl(ptr, plan.block_threads); - } - template - static cuda_optional THRUST_RUNTIME_FUNCTION - get_max_blocks_per_sm(AgentPlan plan) - { - void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9) = _kernel_agent; - return max_blocks_per_sm_impl(ptr, plan.block_threads); - } - template - static cuda_optional THRUST_RUNTIME_FUNCTION - get_max_blocks_per_sm(AgentPlan plan) - { - void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA) = _kernel_agent; - return max_blocks_per_sm_impl(ptr, plan.block_threads); - } - template - static cuda_optional THRUST_RUNTIME_FUNCTION - get_max_blocks_per_sm(AgentPlan plan) - { - void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB) = _kernel_agent; - return max_blocks_per_sm_impl(ptr, plan.block_threads); - } - template - static cuda_optional THRUST_RUNTIME_FUNCTION - get_max_blocks_per_sm(AgentPlan plan) - { - void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC) = _kernel_agent; - return max_blocks_per_sm_impl(ptr, plan.block_threads); - } - template - static cuda_optional THRUST_RUNTIME_FUNCTION - get_max_blocks_per_sm(AgentPlan plan) - { - void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC,_xD) = _kernel_agent; - return max_blocks_per_sm_impl(ptr, plan.block_threads); - } - template - static cuda_optional THRUST_RUNTIME_FUNCTION - get_max_blocks_per_sm(AgentPlan plan) - { - void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC,_xD,_xE) = _kernel_agent; - return max_blocks_per_sm_impl(ptr, plan.block_threads); - } -#endif - - - -#if 0 - - // If we are guaranteed to have enough shared memory - // don't compile other kernel which accepts pointer - // and save on compilations - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::true_type, Args... args) const - { - assert(has_shmem && vshmem == NULL); - print_info(_kernel_agent); - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(_kernel_agent, args...); - } - - // If there is a risk of not having enough shared memory - // we compile generic kernel instead. - // This kernel is likely to be somewhat slower, but it can accomodate - // both shared and virtualized shared memories. - // Alternative option is to compile two kernels, one using shared and one - // using virtualized shared memory. While this can be slightly faster if we - // do actually have enough shared memory, the compilation time will double. - // - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::false_type, Args... args) const - { - assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); - print_info(_kernel_agent_vshmem); - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(_kernel_agent_vshmem, vshmem, args...); - } - - template - void THRUST_RUNTIME_FUNCTION - launch(Args... args) const - { -#if __THRUST__TEMPLATE_DEBUG -#ifdef __CUDA_ARCH__ - typedef typename Foo< - shm1::v1, - shm1::v2, - shm1::v3, - shm1::v4, - shm1::v5>::t tt; -#endif -#endif - launch_impl(has_enough_shmem_t(),args...); - sync(); - } -#else - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::false_type, _0 x0) const - { - assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); - void (*ptr)(char*, _0) = _kernel_agent_vshmem; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(ptr, vshmem, x0); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::false_type, _0 x0, _1 x1) const - { - assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); - void (*ptr)(char*, _0, _1) = _kernel_agent_vshmem; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(ptr, vshmem, x0, x1); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2) const - { - assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); - void (*ptr)(char*, _0, _1, _2) = _kernel_agent_vshmem; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(ptr, vshmem, x0, x1, x2); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3) const - { - assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); - void (*ptr)(char*, _0, _1, _2, _3) = _kernel_agent_vshmem; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(ptr, vshmem, x0, x1, x2, x3); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4) const - { - assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); - void (*ptr)(char*, _0, _1, _2, _3, _4) = _kernel_agent_vshmem; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(ptr, vshmem, x0, x1, x2, x3, x4); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) const - { - assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); - void (*ptr)(char*, _0, _1, _2, _3, _4, _5) = _kernel_agent_vshmem; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) const - { - assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); - void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6) = _kernel_agent_vshmem; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) const - { - assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); - void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7) = _kernel_agent_vshmem; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) const - { - assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); - void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8) = _kernel_agent_vshmem; - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) const - { - assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); - void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9) = _kernel_agent_vshmem; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9,_xA xA) const - { - assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); - void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA) = _kernel_agent_vshmem; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9,_xA xA,_xB xB) const - { - assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); - void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB) = _kernel_agent_vshmem; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9,_xA xA,_xB xB,_xC xC) const - { - assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); - void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC) = _kernel_agent_vshmem; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9,_xA xA,_xB xB,_xC xC,_xD xD) const - { - assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); - void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC, _xD) = _kernel_agent_vshmem; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9,_xA xA,_xB xB,_xC xC,_xD xD,_xE xE) const - { - assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); - void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC, _xD, _xE) = _kernel_agent_vshmem; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) - .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE); - } - - //////////////////////////////////////////////////////// - //////////////////////////////////////////////////////// - //////////////////////////////////////////////////////// - - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::true_type, _0 x0) const - { - assert(has_shmem && vshmem == NULL); - void (*ptr)(_0) = _kernel_agent; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) - .doit(ptr, x0); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::true_type, _0 x0, _1 x1) const - { - assert(has_shmem && vshmem == NULL); - void (*ptr)(_0, _1) = _kernel_agent; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) - .doit(ptr, x0, x1); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2) const - { - assert(has_shmem && vshmem == NULL); - void (*ptr)(_0,_1,_2) = _kernel_agent; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) - .doit(ptr, x0, x1, x2); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3) const - { - assert(has_shmem && vshmem == NULL); - void (*ptr)(_0,_1,_2,_3) = _kernel_agent; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) - .doit(ptr, x0, x1, x2, x3); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4) const - { - assert(has_shmem && vshmem == NULL); - void (*ptr)(_0,_1,_2,_3,_4) = _kernel_agent; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) - .doit(ptr, x0, x1, x2, x3, x4); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) const - { - assert(has_shmem && vshmem == NULL); - void (*ptr)(_0,_1,_2,_3,_4,_5) = _kernel_agent; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) - .doit(ptr, x0, x1, x2, x3, x4, x5); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) const - { - assert(has_shmem && vshmem == NULL); - void (*ptr)(_0,_1,_2,_3,_4,_5,_6) = _kernel_agent; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) - .doit(ptr, x0, x1, x2, x3, x4, x5, x6); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) const - { - assert(has_shmem && vshmem == NULL); - void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7) = _kernel_agent; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) - .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) const - { - assert(has_shmem && vshmem == NULL); - void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8) = _kernel_agent; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) - .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) const - { - assert(has_shmem && vshmem == NULL); - void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9) = _kernel_agent; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) - .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA) const - { - assert(has_shmem && vshmem == NULL); - void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA) = _kernel_agent; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) - .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB) const - { - assert(has_shmem && vshmem == NULL); - void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB) = _kernel_agent; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) - .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC) const - { - assert(has_shmem && vshmem == NULL); - void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC) = _kernel_agent; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) - .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD) const - { - assert(has_shmem && vshmem == NULL); - void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC,_xD) = _kernel_agent; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) - .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD); - } - template - void THRUST_RUNTIME_FUNCTION - launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD, _xE xE) const - { - assert(has_shmem && vshmem == NULL); - void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC,_xD,_xE) = _kernel_agent; - print_info(ptr); - launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) - .doit(ptr,x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE); - } - - //////////////////////////////////////////////////////// - //////////////////////////////////////////////////////// - //////////////////////////////////////////////////////// - - template - void THRUST_RUNTIME_FUNCTION - launch(_0 x0) const - { - launch_impl(has_enough_shmem_t(), x0); - sync(); - } - template - void THRUST_RUNTIME_FUNCTION - launch(_0 x0, _1 x1) const - { - launch_impl(has_enough_shmem_t(), x0, x1); - sync(); - } - template - void THRUST_RUNTIME_FUNCTION - launch(_0 x0, _1 x1, _2 x2) const - { - launch_impl(has_enough_shmem_t(), x0, x1, x2); - sync(); - } - template - void THRUST_RUNTIME_FUNCTION - launch(_0 x0, _1 x1, _2 x2, _3 x3) const - { - launch_impl(has_enough_shmem_t(), x0, x1, x2, x3); - sync(); - } - template - void THRUST_RUNTIME_FUNCTION - launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4) const - { - launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4); - sync(); - } - template - void THRUST_RUNTIME_FUNCTION - launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) const - { - launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5); - sync(); - } - template - void THRUST_RUNTIME_FUNCTION - launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) const - { - launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6); - sync(); - } - template - void THRUST_RUNTIME_FUNCTION - launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) const - { - launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7); - sync(); - } - template - void THRUST_RUNTIME_FUNCTION - launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) const - { - launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8); - sync(); - } - template - void THRUST_RUNTIME_FUNCTION - launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) const - { - launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9); - sync(); - } - template - void THRUST_RUNTIME_FUNCTION - launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA) const - { - launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA); - sync(); - } - template - void THRUST_RUNTIME_FUNCTION - launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB) const - { - launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB); - sync(); - } - template - void THRUST_RUNTIME_FUNCTION - launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC) const - { - launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC); - sync(); - } - template - void THRUST_RUNTIME_FUNCTION - launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD) const - { - launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD); - sync(); - } - template - void THRUST_RUNTIME_FUNCTION - launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD, _xE xE) const - { - launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE); - sync(); - } -#endif - - - }; - -} // namespace core -} -} // end namespace thrust -#endif diff --git a/spaces/CVPR/lama-example/saicinpainting/evaluation/utils.py b/spaces/CVPR/lama-example/saicinpainting/evaluation/utils.py deleted file mode 100644 index 6d7c15c9242ed8a9bc59fbb3b450cca394720bb8..0000000000000000000000000000000000000000 --- a/spaces/CVPR/lama-example/saicinpainting/evaluation/utils.py +++ /dev/null @@ -1,28 +0,0 @@ -from enum import Enum - -import yaml -from easydict import EasyDict as edict -import torch.nn as nn -import torch - - -def load_yaml(path): - with open(path, 'r') as f: - return edict(yaml.safe_load(f)) - - -def move_to_device(obj, device): - if isinstance(obj, nn.Module): - return obj.to(device) - if torch.is_tensor(obj): - return obj.to(device) - if isinstance(obj, (tuple, list)): - return [move_to_device(el, device) for el in obj] - if isinstance(obj, dict): - return {name: move_to_device(val, device) for name, val in obj.items()} - raise ValueError(f'Unexpected type {type(obj)}') - - -class SmallMode(Enum): - DROP = "drop" - UPSCALE = "upscale" diff --git a/spaces/CVPR/lama-example/saicinpainting/training/trainers/default.py b/spaces/CVPR/lama-example/saicinpainting/training/trainers/default.py deleted file mode 100644 index 86c7f0fab42924bfc93a031e851117634c70f593..0000000000000000000000000000000000000000 --- a/spaces/CVPR/lama-example/saicinpainting/training/trainers/default.py +++ /dev/null @@ -1,175 +0,0 @@ -import logging - -import torch -import torch.nn.functional as F -from omegaconf import OmegaConf - -from saicinpainting.training.data.datasets import make_constant_area_crop_params -from saicinpainting.training.losses.distance_weighting import make_mask_distance_weighter -from saicinpainting.training.losses.feature_matching import feature_matching_loss, masked_l1_loss -from saicinpainting.training.modules.fake_fakes import FakeFakesGenerator -from saicinpainting.training.trainers.base import BaseInpaintingTrainingModule, make_multiscale_noise -from saicinpainting.utils import add_prefix_to_keys, get_ramp - -LOGGER = logging.getLogger(__name__) - - -def make_constant_area_crop_batch(batch, **kwargs): - crop_y, crop_x, crop_height, crop_width = make_constant_area_crop_params(img_height=batch['image'].shape[2], - img_width=batch['image'].shape[3], - **kwargs) - batch['image'] = batch['image'][:, :, crop_y : crop_y + crop_height, crop_x : crop_x + crop_width] - batch['mask'] = batch['mask'][:, :, crop_y: crop_y + crop_height, crop_x: crop_x + crop_width] - return batch - - -class DefaultInpaintingTrainingModule(BaseInpaintingTrainingModule): - def __init__(self, *args, concat_mask=True, rescale_scheduler_kwargs=None, image_to_discriminator='predicted_image', - add_noise_kwargs=None, noise_fill_hole=False, const_area_crop_kwargs=None, - distance_weighter_kwargs=None, distance_weighted_mask_for_discr=False, - fake_fakes_proba=0, fake_fakes_generator_kwargs=None, - **kwargs): - super().__init__(*args, **kwargs) - self.concat_mask = concat_mask - self.rescale_size_getter = get_ramp(**rescale_scheduler_kwargs) if rescale_scheduler_kwargs is not None else None - self.image_to_discriminator = image_to_discriminator - self.add_noise_kwargs = add_noise_kwargs - self.noise_fill_hole = noise_fill_hole - self.const_area_crop_kwargs = const_area_crop_kwargs - self.refine_mask_for_losses = make_mask_distance_weighter(**distance_weighter_kwargs) \ - if distance_weighter_kwargs is not None else None - self.distance_weighted_mask_for_discr = distance_weighted_mask_for_discr - - self.fake_fakes_proba = fake_fakes_proba - if self.fake_fakes_proba > 1e-3: - self.fake_fakes_gen = FakeFakesGenerator(**(fake_fakes_generator_kwargs or {})) - - def forward(self, batch): - if self.training and self.rescale_size_getter is not None: - cur_size = self.rescale_size_getter(self.global_step) - batch['image'] = F.interpolate(batch['image'], size=cur_size, mode='bilinear', align_corners=False) - batch['mask'] = F.interpolate(batch['mask'], size=cur_size, mode='nearest') - - if self.training and self.const_area_crop_kwargs is not None: - batch = make_constant_area_crop_batch(batch, **self.const_area_crop_kwargs) - - img = batch['image'] - mask = batch['mask'] - - masked_img = img * (1 - mask) - - if self.add_noise_kwargs is not None: - noise = make_multiscale_noise(masked_img, **self.add_noise_kwargs) - if self.noise_fill_hole: - masked_img = masked_img + mask * noise[:, :masked_img.shape[1]] - masked_img = torch.cat([masked_img, noise], dim=1) - - if self.concat_mask: - masked_img = torch.cat([masked_img, mask], dim=1) - - batch['predicted_image'] = self.generator(masked_img) - batch['inpainted'] = mask * batch['predicted_image'] + (1 - mask) * batch['image'] - - if self.fake_fakes_proba > 1e-3: - if self.training and torch.rand(1).item() < self.fake_fakes_proba: - batch['fake_fakes'], batch['fake_fakes_masks'] = self.fake_fakes_gen(img, mask) - batch['use_fake_fakes'] = True - else: - batch['fake_fakes'] = torch.zeros_like(img) - batch['fake_fakes_masks'] = torch.zeros_like(mask) - batch['use_fake_fakes'] = False - - batch['mask_for_losses'] = self.refine_mask_for_losses(img, batch['predicted_image'], mask) \ - if self.refine_mask_for_losses is not None and self.training \ - else mask - - return batch - - def generator_loss(self, batch): - img = batch['image'] - predicted_img = batch[self.image_to_discriminator] - original_mask = batch['mask'] - supervised_mask = batch['mask_for_losses'] - - # L1 - l1_value = masked_l1_loss(predicted_img, img, supervised_mask, - self.config.losses.l1.weight_known, - self.config.losses.l1.weight_missing) - - total_loss = l1_value - metrics = dict(gen_l1=l1_value) - - # vgg-based perceptual loss - if self.config.losses.perceptual.weight > 0: - pl_value = self.loss_pl(predicted_img, img, mask=supervised_mask).sum() * self.config.losses.perceptual.weight - total_loss = total_loss + pl_value - metrics['gen_pl'] = pl_value - - # discriminator - # adversarial_loss calls backward by itself - mask_for_discr = supervised_mask if self.distance_weighted_mask_for_discr else original_mask - self.adversarial_loss.pre_generator_step(real_batch=img, fake_batch=predicted_img, - generator=self.generator, discriminator=self.discriminator) - discr_real_pred, discr_real_features = self.discriminator(img) - discr_fake_pred, discr_fake_features = self.discriminator(predicted_img) - adv_gen_loss, adv_metrics = self.adversarial_loss.generator_loss(real_batch=img, - fake_batch=predicted_img, - discr_real_pred=discr_real_pred, - discr_fake_pred=discr_fake_pred, - mask=mask_for_discr) - total_loss = total_loss + adv_gen_loss - metrics['gen_adv'] = adv_gen_loss - metrics.update(add_prefix_to_keys(adv_metrics, 'adv_')) - - # feature matching - if self.config.losses.feature_matching.weight > 0: - need_mask_in_fm = OmegaConf.to_container(self.config.losses.feature_matching).get('pass_mask', False) - mask_for_fm = supervised_mask if need_mask_in_fm else None - fm_value = feature_matching_loss(discr_fake_features, discr_real_features, - mask=mask_for_fm) * self.config.losses.feature_matching.weight - total_loss = total_loss + fm_value - metrics['gen_fm'] = fm_value - - if self.loss_resnet_pl is not None: - resnet_pl_value = self.loss_resnet_pl(predicted_img, img) - total_loss = total_loss + resnet_pl_value - metrics['gen_resnet_pl'] = resnet_pl_value - - return total_loss, metrics - - def discriminator_loss(self, batch): - total_loss = 0 - metrics = {} - - predicted_img = batch[self.image_to_discriminator].detach() - self.adversarial_loss.pre_discriminator_step(real_batch=batch['image'], fake_batch=predicted_img, - generator=self.generator, discriminator=self.discriminator) - discr_real_pred, discr_real_features = self.discriminator(batch['image']) - discr_fake_pred, discr_fake_features = self.discriminator(predicted_img) - adv_discr_loss, adv_metrics = self.adversarial_loss.discriminator_loss(real_batch=batch['image'], - fake_batch=predicted_img, - discr_real_pred=discr_real_pred, - discr_fake_pred=discr_fake_pred, - mask=batch['mask']) - total_loss = total_loss + adv_discr_loss - metrics['discr_adv'] = adv_discr_loss - metrics.update(add_prefix_to_keys(adv_metrics, 'adv_')) - - - if batch.get('use_fake_fakes', False): - fake_fakes = batch['fake_fakes'] - self.adversarial_loss.pre_discriminator_step(real_batch=batch['image'], fake_batch=fake_fakes, - generator=self.generator, discriminator=self.discriminator) - discr_fake_fakes_pred, _ = self.discriminator(fake_fakes) - fake_fakes_adv_discr_loss, fake_fakes_adv_metrics = self.adversarial_loss.discriminator_loss( - real_batch=batch['image'], - fake_batch=fake_fakes, - discr_real_pred=discr_real_pred, - discr_fake_pred=discr_fake_fakes_pred, - mask=batch['mask'] - ) - total_loss = total_loss + fake_fakes_adv_discr_loss - metrics['discr_adv_fake_fakes'] = fake_fakes_adv_discr_loss - metrics.update(add_prefix_to_keys(fake_fakes_adv_metrics, 'adv_')) - - return total_loss, metrics diff --git a/spaces/CVPR/regionclip-demo/detectron2/data/datasets/pascal_voc.py b/spaces/CVPR/regionclip-demo/detectron2/data/datasets/pascal_voc.py deleted file mode 100644 index dbbf82cb96442bfa0cf05ed0f4dddf3645434b7e..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/data/datasets/pascal_voc.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -import numpy as np -import os -import xml.etree.ElementTree as ET -from typing import List, Tuple, Union - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.structures import BoxMode -from detectron2.utils.file_io import PathManager - -__all__ = ["load_voc_instances", "register_pascal_voc"] - - -# fmt: off -CLASS_NAMES = ( - "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", - "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", - "pottedplant", "sheep", "sofa", "train", "tvmonitor" -) -# fmt: on - - -def load_voc_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]): - """ - Load Pascal VOC detection annotations to Detectron2 format. - - Args: - dirname: Contain "Annotations", "ImageSets", "JPEGImages" - split (str): one of "train", "test", "val", "trainval" - class_names: list or tuple of class names - """ - with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f: - fileids = np.loadtxt(f, dtype=np.str) - - # Needs to read many small annotation files. Makes sense at local - annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/")) - dicts = [] - for fileid in fileids: - anno_file = os.path.join(annotation_dirname, fileid + ".xml") - jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg") - - with PathManager.open(anno_file) as f: - tree = ET.parse(f) - - r = { - "file_name": jpeg_file, - "image_id": fileid, - "height": int(tree.findall("./size/height")[0].text), - "width": int(tree.findall("./size/width")[0].text), - } - instances = [] - - for obj in tree.findall("object"): - cls = obj.find("name").text - # We include "difficult" samples in training. - # Based on limited experiments, they don't hurt accuracy. - # difficult = int(obj.find("difficult").text) - # if difficult == 1: - # continue - bbox = obj.find("bndbox") - bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]] - # Original annotations are integers in the range [1, W or H] - # Assuming they mean 1-based pixel indices (inclusive), - # a box with annotation (xmin=1, xmax=W) covers the whole image. - # In coordinate space this is represented by (xmin=0, xmax=W) - bbox[0] -= 1.0 - bbox[1] -= 1.0 - instances.append( - {"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS} - ) - r["annotations"] = instances - dicts.append(r) - return dicts - - -def register_pascal_voc(name, dirname, split, year, class_names=CLASS_NAMES): - DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split, class_names)) - MetadataCatalog.get(name).set( - thing_classes=list(class_names), dirname=dirname, year=year, split=split - ) diff --git a/spaces/ChandraMohanNayal/AutoGPT/autogpt/json_utils/json_fix_llm.py b/spaces/ChandraMohanNayal/AutoGPT/autogpt/json_utils/json_fix_llm.py deleted file mode 100644 index 869aed125cfb8cd7a69ed02eeb389cc72a3e296b..0000000000000000000000000000000000000000 --- a/spaces/ChandraMohanNayal/AutoGPT/autogpt/json_utils/json_fix_llm.py +++ /dev/null @@ -1,220 +0,0 @@ -"""This module contains functions to fix JSON strings generated by LLM models, such as ChatGPT, using the assistance -of the ChatGPT API or LLM models.""" -from __future__ import annotations - -import contextlib -import json -from typing import Any, Dict - -from colorama import Fore -from regex import regex - -from autogpt.config import Config -from autogpt.json_utils.json_fix_general import correct_json -from autogpt.llm_utils import call_ai_function -from autogpt.logs import logger -from autogpt.speech import say_text - -JSON_SCHEMA = """ -{ - "command": { - "name": "command name", - "args": { - "arg name": "value" - } - }, - "thoughts": - { - "text": "thought", - "reasoning": "reasoning", - "plan": "- short bulleted\n- list that conveys\n- long-term plan", - "criticism": "constructive self-criticism", - "speak": "thoughts summary to say to user" - } -} -""" - -CFG = Config() - - -def auto_fix_json(json_string: str, schema: str) -> str: - """Fix the given JSON string to make it parseable and fully compliant with - the provided schema using GPT-3. - - Args: - json_string (str): The JSON string to fix. - schema (str): The schema to use to fix the JSON. - Returns: - str: The fixed JSON string. - """ - # Try to fix the JSON using GPT: - function_string = "def fix_json(json_string: str, schema:str=None) -> str:" - args = [f"'''{json_string}'''", f"'''{schema}'''"] - description_string = ( - "This function takes a JSON string and ensures that it" - " is parseable and fully compliant with the provided schema. If an object" - " or field specified in the schema isn't contained within the correct JSON," - " it is omitted. The function also escapes any double quotes within JSON" - " string values to ensure that they are valid. If the JSON string contains" - " any None or NaN values, they are replaced with null before being parsed." - ) - - # If it doesn't already start with a "`", add one: - if not json_string.startswith("`"): - json_string = "```json\n" + json_string + "\n```" - result_string = call_ai_function( - function_string, args, description_string, model=CFG.fast_llm_model - ) - logger.debug("------------ JSON FIX ATTEMPT ---------------") - logger.debug(f"Original JSON: {json_string}") - logger.debug("-----------") - logger.debug(f"Fixed JSON: {result_string}") - logger.debug("----------- END OF FIX ATTEMPT ----------------") - - try: - json.loads(result_string) # just check the validity - return result_string - except json.JSONDecodeError: # noqa: E722 - # Get the call stack: - # import traceback - # call_stack = traceback.format_exc() - # print(f"Failed to fix JSON: '{json_string}' "+call_stack) - return "failed" - - -def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]: - """Fix the given JSON string to make it parseable and fully compliant with two techniques. - - Args: - json_string (str): The JSON string to fix. - - Returns: - str: The fixed JSON string. - """ - - # Parse and print Assistant response - assistant_reply_json = fix_and_parse_json(assistant_reply) - if assistant_reply_json == {}: - assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets( - assistant_reply - ) - - if assistant_reply_json != {}: - return assistant_reply_json - - logger.error( - "Error: The following AI output couldn't be converted to a JSON:\n", - assistant_reply, - ) - if CFG.speak_mode: - say_text("I have received an invalid JSON response from the OpenAI API.") - - return {} - - -def fix_and_parse_json( - json_to_load: str, try_to_fix_with_gpt: bool = True -) -> Dict[Any, Any]: - """Fix and parse JSON string - - Args: - json_to_load (str): The JSON string. - try_to_fix_with_gpt (bool, optional): Try to fix the JSON with GPT. - Defaults to True. - - Returns: - str or dict[Any, Any]: The parsed JSON. - """ - - with contextlib.suppress(json.JSONDecodeError): - json_to_load = json_to_load.replace("\t", "") - return json.loads(json_to_load) - - with contextlib.suppress(json.JSONDecodeError): - json_to_load = correct_json(json_to_load) - return json.loads(json_to_load) - # Let's do something manually: - # sometimes GPT responds with something BEFORE the braces: - # "I'm sorry, I don't understand. Please try again." - # {"text": "I'm sorry, I don't understand. Please try again.", - # "confidence": 0.0} - # So let's try to find the first brace and then parse the rest - # of the string - try: - brace_index = json_to_load.index("{") - maybe_fixed_json = json_to_load[brace_index:] - last_brace_index = maybe_fixed_json.rindex("}") - maybe_fixed_json = maybe_fixed_json[: last_brace_index + 1] - return json.loads(maybe_fixed_json) - except (json.JSONDecodeError, ValueError) as e: - return try_ai_fix(try_to_fix_with_gpt, e, json_to_load) - - -def try_ai_fix( - try_to_fix_with_gpt: bool, exception: Exception, json_to_load: str -) -> Dict[Any, Any]: - """Try to fix the JSON with the AI - - Args: - try_to_fix_with_gpt (bool): Whether to try to fix the JSON with the AI. - exception (Exception): The exception that was raised. - json_to_load (str): The JSON string to load. - - Raises: - exception: If try_to_fix_with_gpt is False. - - Returns: - str or dict[Any, Any]: The JSON string or dictionary. - """ - if not try_to_fix_with_gpt: - raise exception - if CFG.debug_mode: - logger.warn( - "Warning: Failed to parse AI output, attempting to fix." - "\n If you see this warning frequently, it's likely that" - " your prompt is confusing the AI. Try changing it up" - " slightly." - ) - # Now try to fix this up using the ai_functions - ai_fixed_json = auto_fix_json(json_to_load, JSON_SCHEMA) - - if ai_fixed_json != "failed": - return json.loads(ai_fixed_json) - # This allows the AI to react to the error message, - # which usually results in it correcting its ways. - # logger.error("Failed to fix AI output, telling the AI.") - return {} - - -def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str): - if CFG.speak_mode and CFG.debug_mode: - say_text( - "I have received an invalid JSON response from the OpenAI API. " - "Trying to fix it now." - ) - logger.error("Attempting to fix JSON by finding outermost brackets\n") - - try: - json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}") - json_match = json_pattern.search(json_string) - - if json_match: - # Extract the valid JSON object from the string - json_string = json_match.group(0) - logger.typewriter_log( - title="Apparently json was fixed.", title_color=Fore.GREEN - ) - if CFG.speak_mode and CFG.debug_mode: - say_text("Apparently json was fixed.") - else: - return {} - - except (json.JSONDecodeError, ValueError): - if CFG.debug_mode: - logger.error(f"Error: Invalid JSON: {json_string}\n") - if CFG.speak_mode: - say_text("Didn't work. I will have to ignore this response then.") - logger.error("Error: Invalid JSON, setting it to empty JSON now.\n") - json_string = {} - - return fix_and_parse_json(json_string) diff --git a/spaces/CognitiveLabs/GPT-4-Vision-Chat/chainlit.md b/spaces/CognitiveLabs/GPT-4-Vision-Chat/chainlit.md deleted file mode 100644 index fbde825234e7221d3490b0f312bacde0cedc3e97..0000000000000000000000000000000000000000 --- a/spaces/CognitiveLabs/GPT-4-Vision-Chat/chainlit.md +++ /dev/null @@ -1,19 +0,0 @@ -# Welcome to GPT 4 turbo vision! 🚀🤖 - -## Upload an image 🔗 -- option 1) Drag & Drop -- option 2) Click in the "UPLOAD FILES" button, on the left of the chat input 💬 -- option 3) Copy a image and paste it in the chat input (ctrl + v) - -### ~~GPT-4-1106-preview~~ for messages that ARE NOT images 📝 -* change log: - - Changed GPT-4-1106-preview for gpt-3.5-turbo-1106, due high cost of GPT-4-1106-preview -### gpt-4-vision-preview for messages that ARE images 📷 -If you upload more than 1 image, it will take the first image, this is just for demo purposes -* change log: - - Change max_tokens from the output to 300 - - Clear image history after the response - - image size limit set to 1mb - - -For suggestions you can use the community tab or open an issue in the github repository: [gpt-4-vision-chat](https://github.com/GianfrancoCorrea/gpt-4-vision-chat) \ No newline at end of file diff --git a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/builders/video_caption_builder.py b/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/builders/video_caption_builder.py deleted file mode 100644 index e73ef9db75c4699e1a763b459a7c96c1147e192b..0000000000000000000000000000000000000000 --- a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/builders/video_caption_builder.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import logging -import warnings - -from video_llama.common.registry import registry -from video_llama.datasets.builders.base_dataset_builder import BaseDatasetBuilder -from video_llama.datasets.datasets.webvid_datasets import WebvidDataset - -@registry.register_builder("webvid") -class WebvidBuilder(BaseDatasetBuilder): - train_dataset_cls = WebvidDataset - DATASET_CONFIG_DICT = {"default": "configs/datasets/webvid/defaults.yaml"} - - def _download_ann(self): - pass - - def _download_vis(self): - pass - - def build(self): - self.build_processors() - datasets = dict() - split = "train" - - build_info = self.config.build_info - dataset_cls = self.train_dataset_cls - datasets[split] = dataset_cls( - vis_processor=self.vis_processors[split], - text_processor=self.text_processors[split], - vis_root=build_info.videos_dir, - ann_root=build_info.anno_dir - ) - - return datasets \ No newline at end of file diff --git a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/models/video_llama.py b/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/models/video_llama.py deleted file mode 100644 index c287887992ab48fdb7306cba2f6703e6b081712c..0000000000000000000000000000000000000000 --- a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/models/video_llama.py +++ /dev/null @@ -1,424 +0,0 @@ -import logging -import random - -import torch -from torch.cuda.amp import autocast as autocast -import torch.nn as nn - -from video_llama.common.registry import registry -from video_llama.models.blip2 import Blip2Base, disabled_train -from video_llama.models.modeling_llama import LlamaForCausalLM -# from video_llama.models.Qformer import BertEncoder -from transformers import LlamaTokenizer,BertConfig -# from transformers.models.bert.modeling_bert import BertEncoder -import einops -import copy -import os -from video_llama.models.Qformer import BertConfig, BertLMHeadModel -# from flamingo_pytorch import PerceiverResampler -@registry.register_model("video_llama") -class VideoLLAMA(Blip2Base): - """ - BLIP2 GPT-LLAMA model. - """ - - PRETRAINED_MODEL_CONFIG_DICT = { - "pretrain_vicuna": "configs/models/video_llama.yaml", - } - - @classmethod - def init_video_Qformer(cls, num_query_token, vision_width,num_hidden_layers =2): - encoder_config = BertConfig.from_pretrained("bert-base-uncased") - encoder_config.num_hidden_layers = num_hidden_layers - encoder_config.encoder_width = vision_width - # insert cross-attention layer every other block - encoder_config.add_cross_attention = True - encoder_config.cross_attention_freq = 1 - encoder_config.query_length = num_query_token - Qformer = BertLMHeadModel(config=encoder_config) - query_tokens = nn.Parameter( - torch.zeros(1, num_query_token, encoder_config.hidden_size) - ) - query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range) - return Qformer, query_tokens - - def __init__( - self, - vit_model="eva_clip_g", - q_former_model="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth", - img_size=224, - drop_path_rate=0, - use_grad_checkpoint=False, - vit_precision="fp16", - freeze_vit=True, - freeze_qformer=True, - num_query_token=32, - llama_model="", - prompt_path="", - prompt_template="", - max_txt_len=32, - end_sym='\n', - low_resource=False, # use 8 bit and put vit in cpu - device_8bit=0, # the device of 8bit model should be set when loading and cannot be changed anymore. - - frozen_llama_proj=True, - llama_proj_model='', - fusion_header_type= "seqTransf", - max_frame_pos= 32, - fusion_head_layers = 2, - num_video_query_token = 32, - ): - super().__init__() - - self.tokenizer = self.init_tokenizer() - self.low_resource = low_resource - - print('Loading VIT') - self.visual_encoder, self.ln_vision = self.init_vision_encoder( - vit_model, img_size, drop_path_rate, use_grad_checkpoint, vit_precision - ) - if freeze_vit: - for name, param in self.visual_encoder.named_parameters(): - param.requires_grad = False - self.visual_encoder = self.visual_encoder.eval() - self.visual_encoder.train = disabled_train - for name, param in self.ln_vision.named_parameters(): - param.requires_grad = False - self.ln_vision = self.ln_vision.eval() - self.ln_vision.train = disabled_train - logging.info("freeze vision encoder") - print('Loading VIT Done') - - print('Loading Q-Former') - self.Qformer, self.query_tokens = self.init_Qformer( - num_query_token, self.visual_encoder.num_features - ) - self.Qformer.cls = None - self.Qformer.bert.embeddings.word_embeddings = None - self.Qformer.bert.embeddings.position_embeddings = None - for layer in self.Qformer.bert.encoder.layer: - layer.output = None - layer.intermediate = None - self.load_from_pretrained(url_or_filename=q_former_model) - - if freeze_qformer: - for name, param in self.Qformer.named_parameters(): - param.requires_grad = False - self.Qformer = self.Qformer.eval() - self.Qformer.train = disabled_train - self.query_tokens.requires_grad = False - logging.info("freeze Qformer") - logging.info('Loading Q-Former Done') - - logging.info('Loading LLAMA Tokenizer') - self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model, use_fast=False, use_auth_token=os.environ["API_TOKEN"]) - if self.llama_tokenizer.pad_token is None: - self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token - DEFAULT_IMAGE_PATCH_TOKEN = '' - self.llama_tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) - self.IMAGE_PATCH_TOKEN_ID = self.llama_tokenizer.get_vocab()[DEFAULT_IMAGE_PATCH_TOKEN] - - logging.info('Loading LLAMA Model') - if self.low_resource: - self.llama_model = LlamaForCausalLM.from_pretrained( - llama_model, - torch_dtype=torch.float16, - load_in_8bit=True, - device_map={'': device_8bit}, - use_auth_token=os.environ["API_TOKEN"] - ) - else: - self.llama_model = LlamaForCausalLM.from_pretrained( - llama_model, - torch_dtype=torch.float16,use_auth_token=os.environ["API_TOKEN"] - ) - - for name, param in self.llama_model.named_parameters(): - param.requires_grad = False - logging.info('Loading LLAMA Done') - - - logging.info('Loading LLAMA proj') - self.llama_proj = nn.Linear( - self.Qformer.config.hidden_size, self.llama_model.config.hidden_size - ) - if llama_proj_model: - print("load llama proj weight: {}".format(llama_proj_model)) - llama_proj_weight = torch.load(llama_proj_model, map_location="cpu") - msg = model.load_state_dict(llama_proj_weight['model'], strict=False) - - if frozen_llama_proj: - # todo frozen llama_proj - for name, param in self.llama_proj.named_parameters(): - param.requires_grad = False - logging.info('LLAMA proj is frozen') - else: - for name, param in self.llama_proj.named_parameters(): - param.requires_grad = True - logging.info('LLAMA proj is not frozen') - - logging.info('Loading llama_proj Done') - - self.max_txt_len = max_txt_len - self.end_sym = end_sym - - if prompt_path: - with open(prompt_path, 'r') as f: - raw_prompts = f.read().splitlines() - filted_prompts = [raw_prompt for raw_prompt in raw_prompts if "" in raw_prompt] - self.prompt_list = [prompt_template.format(p) for p in filted_prompts] - print('Load {} training prompts'.format(len(self.prompt_list))) - print('Prompt Example \n{}'.format(random.choice(self.prompt_list))) - else: - self.prompt_list = [] - - self.video_frame_position_embedding = nn.Embedding(max_frame_pos, self.Qformer.config.hidden_size) - self.num_video_query_token = num_video_query_token - self.video_Qformer,self.video_query_tokens = self.init_video_Qformer(num_query_token = num_video_query_token,\ - vision_width=self.Qformer.config.hidden_size, num_hidden_layers =2) - - self.video_Qformer.cls = None - self.video_Qformer.bert.embeddings.word_embeddings = None - self.video_Qformer.bert.embeddings.position_embeddings = None - for layer in self.video_Qformer.bert.encoder.layer: - layer.output = None - layer.intermediate = None - - - def vit_to_cpu(self): - self.ln_vision.to("cpu") - self.ln_vision.float() - self.visual_encoder.to("cpu") - self.visual_encoder.float() - - def encode_img(self, image): - device = image.device - # if self.low_resource: - # self.vit_to_cpu() - # image = image.to("cpu") - - # input shape b,c,t,h,w - batch_size,_,time_length,_,_ = image.size() - image = einops.rearrange(image, 'b c t h w -> (b t) c h w') - with self.maybe_autocast(): - # embed image features with blip2, out: (b t) q h - image_embeds = self.ln_vision(self.visual_encoder(image)).to(device) - image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(device) - - query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) - query_output = self.Qformer.bert( - query_embeds=query_tokens, - encoder_hidden_states=image_embeds, - encoder_attention_mask=image_atts, - return_dict=True, - ) - - # add frame_pos embedding - position_ids = torch.arange(time_length, dtype=torch.long, device=query_tokens.device) - position_ids = position_ids.unsqueeze(0).expand(batch_size, -1) - frame_position_embeddings = self.video_frame_position_embedding(position_ids) - q_hidden_state = query_output.last_hidden_state - - frame_position_embeddings = frame_position_embeddings.unsqueeze(-2) - frame_hidden_state = einops.rearrange(q_hidden_state, '(b t) q h -> b t q h',b=batch_size,t=time_length) - frame_hidden_state = frame_position_embeddings + frame_hidden_state - - # frame attention - frame_hidden_state = einops.rearrange(frame_hidden_state, 'b t q h -> b (t q) h',b=batch_size,t=time_length) - frame_atts = torch.ones(frame_hidden_state.size()[:-1], dtype=torch.long).to(device) - video_query_tokens = self.video_query_tokens.expand(frame_hidden_state.shape[0], -1, -1) - - # print('attention') - # print(video_query_tokens.size()) - # print(frame_hidden_state.size()) - video_query_output = self.video_Qformer.bert( - query_embeds=video_query_tokens, - encoder_hidden_states=frame_hidden_state, - encoder_attention_mask=frame_atts, - return_dict=True, - ) - video_hidden = video_query_output.last_hidden_state - - inputs_llama = self.llama_proj(video_hidden) - atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(image_embeds.device) - return inputs_llama, atts_llama - - def prompt_wrap(self, img_embeds, atts_img, prompt): - if prompt: - batch_size = img_embeds.shape[0] - # print(prompt) - p_before, p_after = prompt.split('') - p_before_tokens = self.llama_tokenizer( - p_before, return_tensors="pt", add_special_tokens=False).to(img_embeds.device) - p_after_tokens = self.llama_tokenizer( - p_after, return_tensors="pt", add_special_tokens=False).to(img_embeds.device) - p_before_embeds = self.llama_model.model.embed_tokens(p_before_tokens.input_ids).expand(batch_size, -1, -1) - p_after_embeds = self.llama_model.model.embed_tokens(p_after_tokens.input_ids).expand(batch_size, -1, -1) - wrapped_img_embeds = torch.cat([p_before_embeds, img_embeds, p_after_embeds], dim=1) - wrapped_atts_img = atts_img[:, :1].expand(-1, wrapped_img_embeds.shape[1]) - - return wrapped_img_embeds, wrapped_atts_img - else: - return img_embeds, atts_img - - def forward(self, samples): - if 'conv_type' in samples.keys() and samples['conv_type']=='multi': - num_patch_tokens = self.num_video_query_token - im_patch_token_id = self.IMAGE_PATCH_TOKEN_ID - image = samples["images"] - input_ids = samples['input_ids'] - if len(image.size())==4: - time = 1 - image = einops.repeat(image, 'b c h w -> b c t h w',t = time) - img_embeds, atts_img = self.encode_img(image) - - temp_input_ids = copy.deepcopy(input_ids) - temp_input_ids[temp_input_ids == im_patch_token_id] = 0 - temp_input_embedding = self.llama_model.model.embed_tokens(temp_input_ids) - - new_input_embeds=[] - cur_image_idx = 0 - for cur_input_ids, cur_input_embeds in zip(input_ids, temp_input_embedding): - cur_image_features = img_embeds[cur_image_idx] - - if (cur_input_ids == im_patch_token_id).sum() != num_patch_tokens: - raise ValueError("The number of image patch tokens should be the same as the number of image patches.") - masked_indices = torch.where(cur_input_ids == im_patch_token_id)[0] - mask_index_start = masked_indices[0] - if (masked_indices != torch.arange(mask_index_start, mask_index_start+num_patch_tokens, device=masked_indices.device, dtype=masked_indices.dtype)).any(): - raise ValueError("The image patch tokens should be consecutive.") - - cur_new_input_embeds = torch.cat((cur_input_embeds[:mask_index_start], cur_image_features, cur_input_embeds[mask_index_start+num_patch_tokens:]), dim=0) - new_input_embeds.append(cur_new_input_embeds) - - cur_image_idx+=1 - inputs_embeds = torch.stack(new_input_embeds, dim=0) - targets = samples['labels'] - attention_mask = samples['attention_mask'] - with self.maybe_autocast(): - outputs = self.llama_model( - inputs_embeds=inputs_embeds, - attention_mask=attention_mask, - return_dict=True, - labels=targets, - ) - loss = outputs.loss - return {"loss": loss} - else: - image = samples["image"] - - if len(image.size()) != 5: - time = 1 - image = einops.repeat(image, 'b c h w -> b c t h w',t = time) - - img_embeds, atts_img = self.encode_img(image) - - if self.prompt_list: - prompt = random.choice(self.prompt_list) - img_embeds, atts_img = self.prompt_wrap(img_embeds, atts_img, prompt) - - - self.llama_tokenizer.padding_side = "right" - - text = [t + self.end_sym for t in samples["text_input"]] - - to_regress_tokens = self.llama_tokenizer( - text, - return_tensors="pt", - padding="longest", - truncation=True, - max_length=self.max_txt_len, - add_special_tokens=False - ).to(image.device) - - targets = to_regress_tokens.input_ids.masked_fill( - to_regress_tokens.input_ids == self.llama_tokenizer.pad_token_id, -100 - ) - - empty_targets = ( - torch.ones([atts_img.shape[0], atts_img.shape[1]+1], - dtype=torch.long).to(image.device).fill_(-100) # plus one for bos - ) - targets = torch.cat([empty_targets, targets], dim=1) - - batch_size = img_embeds.shape[0] - bos = torch.ones([batch_size, 1], - dtype=to_regress_tokens.input_ids.dtype, - device=to_regress_tokens.input_ids.device) * self.llama_tokenizer.bos_token_id - bos_embeds = self.llama_model.model.embed_tokens(bos) - atts_bos = atts_img[:, :1] - - to_regress_embeds = self.llama_model.model.embed_tokens(to_regress_tokens.input_ids) - inputs_embeds = torch.cat([bos_embeds, img_embeds, to_regress_embeds], dim=1) - attention_mask = torch.cat([atts_bos, atts_img, to_regress_tokens.attention_mask], dim=1) - - with self.maybe_autocast(): - outputs = self.llama_model( - inputs_embeds=inputs_embeds, - attention_mask=attention_mask, - return_dict=True, - labels=targets, - ) - loss = outputs.loss - - return {"loss": loss} - - @classmethod - def from_config(cls, cfg): - vit_model = cfg.get("vit_model", "eva_clip_g") - q_former_model = cfg.get("q_former_model", "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth") - img_size = cfg.get("image_size") - num_query_token = cfg.get("num_query_token") - llama_model = cfg.get("llama_model") - - drop_path_rate = cfg.get("drop_path_rate", 0) - use_grad_checkpoint = cfg.get("use_grad_checkpoint", False) - vit_precision = cfg.get("vit_precision", "fp16") - freeze_vit = cfg.get("freeze_vit", True) - freeze_qformer = cfg.get("freeze_qformer", True) - low_resource = cfg.get("low_resource", False) - device_8bit = cfg.get("device_8bit", 0) - - prompt_path = cfg.get("prompt_path", "") - prompt_template = cfg.get("prompt_template", "") - max_txt_len = cfg.get("max_txt_len", 32) - end_sym = cfg.get("end_sym", '\n') - - frozen_llama_proj = cfg.get("frozen_llama_proj", True) - llama_proj_model = cfg.get("llama_proj_model", '') - - fusion_header_type = cfg.get("fusion_header_type", 'seqTransf') - max_frame_pos = cfg.get("max_frame_pos", 32) - fusion_head_layers = cfg.get("fusion_head_layers", 2) - num_video_query_token = cfg.get("num_video_query_token", 32) - - model = cls( - vit_model=vit_model, - q_former_model=q_former_model, - img_size=img_size, - drop_path_rate=drop_path_rate, - use_grad_checkpoint=use_grad_checkpoint, - vit_precision=vit_precision, - freeze_vit=freeze_vit, - freeze_qformer=freeze_qformer, - num_query_token=num_query_token, - llama_model=llama_model, - prompt_path=prompt_path, - prompt_template=prompt_template, - max_txt_len=max_txt_len, - end_sym=end_sym, - low_resource=low_resource, - device_8bit=device_8bit, - fusion_header_type=fusion_header_type, - max_frame_pos=max_frame_pos, - fusion_head_layers=fusion_head_layers, - frozen_llama_proj=frozen_llama_proj, - num_video_query_token=num_video_query_token - ) - - ckpt_path = cfg.get("ckpt", "") # load weights of MiniGPT-4 - if ckpt_path: - print("Load BLIP2-LLM Checkpoint: {}".format(ckpt_path)) - ckpt = torch.load(ckpt_path, map_location="cpu") - msg = model.load_state_dict(ckpt['model'], strict=False) - return model diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/JpegImagePlugin.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/JpegImagePlugin.py deleted file mode 100644 index dfc7e6e9f569e05e3a1f9e3fd1407b5f202a6d56..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/JpegImagePlugin.py +++ /dev/null @@ -1,849 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# JPEG (JFIF) file handling -# -# See "Digital Compression and Coding of Continuous-Tone Still Images, -# Part 1, Requirements and Guidelines" (CCITT T.81 / ISO 10918-1) -# -# History: -# 1995-09-09 fl Created -# 1995-09-13 fl Added full parser -# 1996-03-25 fl Added hack to use the IJG command line utilities -# 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug -# 1996-05-28 fl Added draft support, JFIF version (0.1) -# 1996-12-30 fl Added encoder options, added progression property (0.2) -# 1997-08-27 fl Save mode 1 images as BW (0.3) -# 1998-07-12 fl Added YCbCr to draft and save methods (0.4) -# 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1) -# 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2) -# 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3) -# 2003-04-25 fl Added experimental EXIF decoder (0.5) -# 2003-06-06 fl Added experimental EXIF GPSinfo decoder -# 2003-09-13 fl Extract COM markers -# 2009-09-06 fl Added icc_profile support (from Florian Hoech) -# 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6) -# 2009-03-08 fl Added subsampling support (from Justin Huff). -# -# Copyright (c) 1997-2003 by Secret Labs AB. -# Copyright (c) 1995-1996 by Fredrik Lundh. -# -# See the README file for information on usage and redistribution. -# -import array -import io -import math -import os -import struct -import subprocess -import sys -import tempfile -import warnings - -from . import Image, ImageFile -from ._binary import i16be as i16 -from ._binary import i32be as i32 -from ._binary import o8 -from ._binary import o16be as o16 -from .JpegPresets import presets - -# -# Parser - - -def Skip(self, marker): - n = i16(self.fp.read(2)) - 2 - ImageFile._safe_read(self.fp, n) - - -def APP(self, marker): - # - # Application marker. Store these in the APP dictionary. - # Also look for well-known application markers. - - n = i16(self.fp.read(2)) - 2 - s = ImageFile._safe_read(self.fp, n) - - app = "APP%d" % (marker & 15) - - self.app[app] = s # compatibility - self.applist.append((app, s)) - - if marker == 0xFFE0 and s[:4] == b"JFIF": - # extract JFIF information - self.info["jfif"] = version = i16(s, 5) # version - self.info["jfif_version"] = divmod(version, 256) - # extract JFIF properties - try: - jfif_unit = s[7] - jfif_density = i16(s, 8), i16(s, 10) - except Exception: - pass - else: - if jfif_unit == 1: - self.info["dpi"] = jfif_density - self.info["jfif_unit"] = jfif_unit - self.info["jfif_density"] = jfif_density - elif marker == 0xFFE1 and s[:5] == b"Exif\0": - if "exif" not in self.info: - # extract EXIF information (incomplete) - self.info["exif"] = s # FIXME: value will change - self._exif_offset = self.fp.tell() - n + 6 - elif marker == 0xFFE2 and s[:5] == b"FPXR\0": - # extract FlashPix information (incomplete) - self.info["flashpix"] = s # FIXME: value will change - elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0": - # Since an ICC profile can be larger than the maximum size of - # a JPEG marker (64K), we need provisions to split it into - # multiple markers. The format defined by the ICC specifies - # one or more APP2 markers containing the following data: - # Identifying string ASCII "ICC_PROFILE\0" (12 bytes) - # Marker sequence number 1, 2, etc (1 byte) - # Number of markers Total of APP2's used (1 byte) - # Profile data (remainder of APP2 data) - # Decoders should use the marker sequence numbers to - # reassemble the profile, rather than assuming that the APP2 - # markers appear in the correct sequence. - self.icclist.append(s) - elif marker == 0xFFED and s[:14] == b"Photoshop 3.0\x00": - # parse the image resource block - offset = 14 - photoshop = self.info.setdefault("photoshop", {}) - while s[offset : offset + 4] == b"8BIM": - try: - offset += 4 - # resource code - code = i16(s, offset) - offset += 2 - # resource name (usually empty) - name_len = s[offset] - # name = s[offset+1:offset+1+name_len] - offset += 1 + name_len - offset += offset & 1 # align - # resource data block - size = i32(s, offset) - offset += 4 - data = s[offset : offset + size] - if code == 0x03ED: # ResolutionInfo - data = { - "XResolution": i32(data, 0) / 65536, - "DisplayedUnitsX": i16(data, 4), - "YResolution": i32(data, 8) / 65536, - "DisplayedUnitsY": i16(data, 12), - } - photoshop[code] = data - offset += size - offset += offset & 1 # align - except struct.error: - break # insufficient data - - elif marker == 0xFFEE and s[:5] == b"Adobe": - self.info["adobe"] = i16(s, 5) - # extract Adobe custom properties - try: - adobe_transform = s[11] - except IndexError: - pass - else: - self.info["adobe_transform"] = adobe_transform - elif marker == 0xFFE2 and s[:4] == b"MPF\0": - # extract MPO information - self.info["mp"] = s[4:] - # offset is current location minus buffer size - # plus constant header size - self.info["mpoffset"] = self.fp.tell() - n + 4 - - # If DPI isn't in JPEG header, fetch from EXIF - if "dpi" not in self.info and "exif" in self.info: - try: - exif = self.getexif() - resolution_unit = exif[0x0128] - x_resolution = exif[0x011A] - try: - dpi = float(x_resolution[0]) / x_resolution[1] - except TypeError: - dpi = x_resolution - if math.isnan(dpi): - raise ValueError - if resolution_unit == 3: # cm - # 1 dpcm = 2.54 dpi - dpi *= 2.54 - self.info["dpi"] = dpi, dpi - except (TypeError, KeyError, SyntaxError, ValueError, ZeroDivisionError): - # SyntaxError for invalid/unreadable EXIF - # KeyError for dpi not included - # ZeroDivisionError for invalid dpi rational value - # ValueError or TypeError for dpi being an invalid float - self.info["dpi"] = 72, 72 - - -def COM(self, marker): - # - # Comment marker. Store these in the APP dictionary. - n = i16(self.fp.read(2)) - 2 - s = ImageFile._safe_read(self.fp, n) - - self.info["comment"] = s - self.app["COM"] = s # compatibility - self.applist.append(("COM", s)) - - -def SOF(self, marker): - # - # Start of frame marker. Defines the size and mode of the - # image. JPEG is colour blind, so we use some simple - # heuristics to map the number of layers to an appropriate - # mode. Note that this could be made a bit brighter, by - # looking for JFIF and Adobe APP markers. - - n = i16(self.fp.read(2)) - 2 - s = ImageFile._safe_read(self.fp, n) - self._size = i16(s, 3), i16(s, 1) - - self.bits = s[0] - if self.bits != 8: - msg = f"cannot handle {self.bits}-bit layers" - raise SyntaxError(msg) - - self.layers = s[5] - if self.layers == 1: - self.mode = "L" - elif self.layers == 3: - self.mode = "RGB" - elif self.layers == 4: - self.mode = "CMYK" - else: - msg = f"cannot handle {self.layers}-layer images" - raise SyntaxError(msg) - - if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]: - self.info["progressive"] = self.info["progression"] = 1 - - if self.icclist: - # fixup icc profile - self.icclist.sort() # sort by sequence number - if self.icclist[0][13] == len(self.icclist): - profile = [] - for p in self.icclist: - profile.append(p[14:]) - icc_profile = b"".join(profile) - else: - icc_profile = None # wrong number of fragments - self.info["icc_profile"] = icc_profile - self.icclist = [] - - for i in range(6, len(s), 3): - t = s[i : i + 3] - # 4-tuples: id, vsamp, hsamp, qtable - self.layer.append((t[0], t[1] // 16, t[1] & 15, t[2])) - - -def DQT(self, marker): - # - # Define quantization table. Note that there might be more - # than one table in each marker. - - # FIXME: The quantization tables can be used to estimate the - # compression quality. - - n = i16(self.fp.read(2)) - 2 - s = ImageFile._safe_read(self.fp, n) - while len(s): - v = s[0] - precision = 1 if (v // 16 == 0) else 2 # in bytes - qt_length = 1 + precision * 64 - if len(s) < qt_length: - msg = "bad quantization table marker" - raise SyntaxError(msg) - data = array.array("B" if precision == 1 else "H", s[1:qt_length]) - if sys.byteorder == "little" and precision > 1: - data.byteswap() # the values are always big-endian - self.quantization[v & 15] = [data[i] for i in zigzag_index] - s = s[qt_length:] - - -# -# JPEG marker table - -MARKER = { - 0xFFC0: ("SOF0", "Baseline DCT", SOF), - 0xFFC1: ("SOF1", "Extended Sequential DCT", SOF), - 0xFFC2: ("SOF2", "Progressive DCT", SOF), - 0xFFC3: ("SOF3", "Spatial lossless", SOF), - 0xFFC4: ("DHT", "Define Huffman table", Skip), - 0xFFC5: ("SOF5", "Differential sequential DCT", SOF), - 0xFFC6: ("SOF6", "Differential progressive DCT", SOF), - 0xFFC7: ("SOF7", "Differential spatial", SOF), - 0xFFC8: ("JPG", "Extension", None), - 0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF), - 0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF), - 0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF), - 0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip), - 0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF), - 0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF), - 0xFFCF: ("SOF15", "Differential spatial (AC)", SOF), - 0xFFD0: ("RST0", "Restart 0", None), - 0xFFD1: ("RST1", "Restart 1", None), - 0xFFD2: ("RST2", "Restart 2", None), - 0xFFD3: ("RST3", "Restart 3", None), - 0xFFD4: ("RST4", "Restart 4", None), - 0xFFD5: ("RST5", "Restart 5", None), - 0xFFD6: ("RST6", "Restart 6", None), - 0xFFD7: ("RST7", "Restart 7", None), - 0xFFD8: ("SOI", "Start of image", None), - 0xFFD9: ("EOI", "End of image", None), - 0xFFDA: ("SOS", "Start of scan", Skip), - 0xFFDB: ("DQT", "Define quantization table", DQT), - 0xFFDC: ("DNL", "Define number of lines", Skip), - 0xFFDD: ("DRI", "Define restart interval", Skip), - 0xFFDE: ("DHP", "Define hierarchical progression", SOF), - 0xFFDF: ("EXP", "Expand reference component", Skip), - 0xFFE0: ("APP0", "Application segment 0", APP), - 0xFFE1: ("APP1", "Application segment 1", APP), - 0xFFE2: ("APP2", "Application segment 2", APP), - 0xFFE3: ("APP3", "Application segment 3", APP), - 0xFFE4: ("APP4", "Application segment 4", APP), - 0xFFE5: ("APP5", "Application segment 5", APP), - 0xFFE6: ("APP6", "Application segment 6", APP), - 0xFFE7: ("APP7", "Application segment 7", APP), - 0xFFE8: ("APP8", "Application segment 8", APP), - 0xFFE9: ("APP9", "Application segment 9", APP), - 0xFFEA: ("APP10", "Application segment 10", APP), - 0xFFEB: ("APP11", "Application segment 11", APP), - 0xFFEC: ("APP12", "Application segment 12", APP), - 0xFFED: ("APP13", "Application segment 13", APP), - 0xFFEE: ("APP14", "Application segment 14", APP), - 0xFFEF: ("APP15", "Application segment 15", APP), - 0xFFF0: ("JPG0", "Extension 0", None), - 0xFFF1: ("JPG1", "Extension 1", None), - 0xFFF2: ("JPG2", "Extension 2", None), - 0xFFF3: ("JPG3", "Extension 3", None), - 0xFFF4: ("JPG4", "Extension 4", None), - 0xFFF5: ("JPG5", "Extension 5", None), - 0xFFF6: ("JPG6", "Extension 6", None), - 0xFFF7: ("JPG7", "Extension 7", None), - 0xFFF8: ("JPG8", "Extension 8", None), - 0xFFF9: ("JPG9", "Extension 9", None), - 0xFFFA: ("JPG10", "Extension 10", None), - 0xFFFB: ("JPG11", "Extension 11", None), - 0xFFFC: ("JPG12", "Extension 12", None), - 0xFFFD: ("JPG13", "Extension 13", None), - 0xFFFE: ("COM", "Comment", COM), -} - - -def _accept(prefix): - # Magic number was taken from https://en.wikipedia.org/wiki/JPEG - return prefix[:3] == b"\xFF\xD8\xFF" - - -## -# Image plugin for JPEG and JFIF images. - - -class JpegImageFile(ImageFile.ImageFile): - format = "JPEG" - format_description = "JPEG (ISO 10918)" - - def _open(self): - s = self.fp.read(3) - - if not _accept(s): - msg = "not a JPEG file" - raise SyntaxError(msg) - s = b"\xFF" - - # Create attributes - self.bits = self.layers = 0 - - # JPEG specifics (internal) - self.layer = [] - self.huffman_dc = {} - self.huffman_ac = {} - self.quantization = {} - self.app = {} # compatibility - self.applist = [] - self.icclist = [] - - while True: - i = s[0] - if i == 0xFF: - s = s + self.fp.read(1) - i = i16(s) - else: - # Skip non-0xFF junk - s = self.fp.read(1) - continue - - if i in MARKER: - name, description, handler = MARKER[i] - if handler is not None: - handler(self, i) - if i == 0xFFDA: # start of scan - rawmode = self.mode - if self.mode == "CMYK": - rawmode = "CMYK;I" # assume adobe conventions - self.tile = [("jpeg", (0, 0) + self.size, 0, (rawmode, ""))] - # self.__offset = self.fp.tell() - break - s = self.fp.read(1) - elif i == 0 or i == 0xFFFF: - # padded marker or junk; move on - s = b"\xff" - elif i == 0xFF00: # Skip extraneous data (escaped 0xFF) - s = self.fp.read(1) - else: - msg = "no marker found" - raise SyntaxError(msg) - - def load_read(self, read_bytes): - """ - internal: read more image data - For premature EOF and LOAD_TRUNCATED_IMAGES adds EOI marker - so libjpeg can finish decoding - """ - s = self.fp.read(read_bytes) - - if not s and ImageFile.LOAD_TRUNCATED_IMAGES and not hasattr(self, "_ended"): - # Premature EOF. - # Pretend file is finished adding EOI marker - self._ended = True - return b"\xFF\xD9" - - return s - - def draft(self, mode, size): - if len(self.tile) != 1: - return - - # Protect from second call - if self.decoderconfig: - return - - d, e, o, a = self.tile[0] - scale = 1 - original_size = self.size - - if a[0] == "RGB" and mode in ["L", "YCbCr"]: - self.mode = mode - a = mode, "" - - if size: - scale = min(self.size[0] // size[0], self.size[1] // size[1]) - for s in [8, 4, 2, 1]: - if scale >= s: - break - e = ( - e[0], - e[1], - (e[2] - e[0] + s - 1) // s + e[0], - (e[3] - e[1] + s - 1) // s + e[1], - ) - self._size = ((self.size[0] + s - 1) // s, (self.size[1] + s - 1) // s) - scale = s - - self.tile = [(d, e, o, a)] - self.decoderconfig = (scale, 0) - - box = (0, 0, original_size[0] / scale, original_size[1] / scale) - return self.mode, box - - def load_djpeg(self): - # ALTERNATIVE: handle JPEGs via the IJG command line utilities - - f, path = tempfile.mkstemp() - os.close(f) - if os.path.exists(self.filename): - subprocess.check_call(["djpeg", "-outfile", path, self.filename]) - else: - try: - os.unlink(path) - except OSError: - pass - - msg = "Invalid Filename" - raise ValueError(msg) - - try: - with Image.open(path) as _im: - _im.load() - self.im = _im.im - finally: - try: - os.unlink(path) - except OSError: - pass - - self.mode = self.im.mode - self._size = self.im.size - - self.tile = [] - - def _getexif(self): - return _getexif(self) - - def _getmp(self): - return _getmp(self) - - def getxmp(self): - """ - Returns a dictionary containing the XMP tags. - Requires defusedxml to be installed. - - :returns: XMP tags in a dictionary. - """ - - for segment, content in self.applist: - if segment == "APP1": - marker, xmp_tags = content.rsplit(b"\x00", 1) - if marker == b"http://ns.adobe.com/xap/1.0/": - return self._getxmp(xmp_tags) - return {} - - -def _getexif(self): - if "exif" not in self.info: - return None - return self.getexif()._get_merged_dict() - - -def _getmp(self): - # Extract MP information. This method was inspired by the "highly - # experimental" _getexif version that's been in use for years now, - # itself based on the ImageFileDirectory class in the TIFF plugin. - - # The MP record essentially consists of a TIFF file embedded in a JPEG - # application marker. - try: - data = self.info["mp"] - except KeyError: - return None - file_contents = io.BytesIO(data) - head = file_contents.read(8) - endianness = ">" if head[:4] == b"\x4d\x4d\x00\x2a" else "<" - # process dictionary - from . import TiffImagePlugin - - try: - info = TiffImagePlugin.ImageFileDirectory_v2(head) - file_contents.seek(info.next) - info.load(file_contents) - mp = dict(info) - except Exception as e: - msg = "malformed MP Index (unreadable directory)" - raise SyntaxError(msg) from e - # it's an error not to have a number of images - try: - quant = mp[0xB001] - except KeyError as e: - msg = "malformed MP Index (no number of images)" - raise SyntaxError(msg) from e - # get MP entries - mpentries = [] - try: - rawmpentries = mp[0xB002] - for entrynum in range(0, quant): - unpackedentry = struct.unpack_from( - f"{endianness}LLLHH", rawmpentries, entrynum * 16 - ) - labels = ("Attribute", "Size", "DataOffset", "EntryNo1", "EntryNo2") - mpentry = dict(zip(labels, unpackedentry)) - mpentryattr = { - "DependentParentImageFlag": bool(mpentry["Attribute"] & (1 << 31)), - "DependentChildImageFlag": bool(mpentry["Attribute"] & (1 << 30)), - "RepresentativeImageFlag": bool(mpentry["Attribute"] & (1 << 29)), - "Reserved": (mpentry["Attribute"] & (3 << 27)) >> 27, - "ImageDataFormat": (mpentry["Attribute"] & (7 << 24)) >> 24, - "MPType": mpentry["Attribute"] & 0x00FFFFFF, - } - if mpentryattr["ImageDataFormat"] == 0: - mpentryattr["ImageDataFormat"] = "JPEG" - else: - msg = "unsupported picture format in MPO" - raise SyntaxError(msg) - mptypemap = { - 0x000000: "Undefined", - 0x010001: "Large Thumbnail (VGA Equivalent)", - 0x010002: "Large Thumbnail (Full HD Equivalent)", - 0x020001: "Multi-Frame Image (Panorama)", - 0x020002: "Multi-Frame Image: (Disparity)", - 0x020003: "Multi-Frame Image: (Multi-Angle)", - 0x030000: "Baseline MP Primary Image", - } - mpentryattr["MPType"] = mptypemap.get(mpentryattr["MPType"], "Unknown") - mpentry["Attribute"] = mpentryattr - mpentries.append(mpentry) - mp[0xB002] = mpentries - except KeyError as e: - msg = "malformed MP Index (bad MP Entry)" - raise SyntaxError(msg) from e - # Next we should try and parse the individual image unique ID list; - # we don't because I've never seen this actually used in a real MPO - # file and so can't test it. - return mp - - -# -------------------------------------------------------------------- -# stuff to save JPEG files - -RAWMODE = { - "1": "L", - "L": "L", - "RGB": "RGB", - "RGBX": "RGB", - "CMYK": "CMYK;I", # assume adobe conventions - "YCbCr": "YCbCr", -} - -# fmt: off -zigzag_index = ( - 0, 1, 5, 6, 14, 15, 27, 28, - 2, 4, 7, 13, 16, 26, 29, 42, - 3, 8, 12, 17, 25, 30, 41, 43, - 9, 11, 18, 24, 31, 40, 44, 53, - 10, 19, 23, 32, 39, 45, 52, 54, - 20, 22, 33, 38, 46, 51, 55, 60, - 21, 34, 37, 47, 50, 56, 59, 61, - 35, 36, 48, 49, 57, 58, 62, 63, -) - -samplings = { - (1, 1, 1, 1, 1, 1): 0, - (2, 1, 1, 1, 1, 1): 1, - (2, 2, 1, 1, 1, 1): 2, -} -# fmt: on - - -def get_sampling(im): - # There's no subsampling when images have only 1 layer - # (grayscale images) or when they are CMYK (4 layers), - # so set subsampling to the default value. - # - # NOTE: currently Pillow can't encode JPEG to YCCK format. - # If YCCK support is added in the future, subsampling code will have - # to be updated (here and in JpegEncode.c) to deal with 4 layers. - if not hasattr(im, "layers") or im.layers in (1, 4): - return -1 - sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3] - return samplings.get(sampling, -1) - - -def _save(im, fp, filename): - if im.width == 0 or im.height == 0: - msg = "cannot write empty image as JPEG" - raise ValueError(msg) - - try: - rawmode = RAWMODE[im.mode] - except KeyError as e: - msg = f"cannot write mode {im.mode} as JPEG" - raise OSError(msg) from e - - info = im.encoderinfo - - dpi = [round(x) for x in info.get("dpi", (0, 0))] - - quality = info.get("quality", -1) - subsampling = info.get("subsampling", -1) - qtables = info.get("qtables") - - if quality == "keep": - quality = -1 - subsampling = "keep" - qtables = "keep" - elif quality in presets: - preset = presets[quality] - quality = -1 - subsampling = preset.get("subsampling", -1) - qtables = preset.get("quantization") - elif not isinstance(quality, int): - msg = "Invalid quality setting" - raise ValueError(msg) - else: - if subsampling in presets: - subsampling = presets[subsampling].get("subsampling", -1) - if isinstance(qtables, str) and qtables in presets: - qtables = presets[qtables].get("quantization") - - if subsampling == "4:4:4": - subsampling = 0 - elif subsampling == "4:2:2": - subsampling = 1 - elif subsampling == "4:2:0": - subsampling = 2 - elif subsampling == "4:1:1": - # For compatibility. Before Pillow 4.3, 4:1:1 actually meant 4:2:0. - # Set 4:2:0 if someone is still using that value. - subsampling = 2 - elif subsampling == "keep": - if im.format != "JPEG": - msg = "Cannot use 'keep' when original image is not a JPEG" - raise ValueError(msg) - subsampling = get_sampling(im) - - def validate_qtables(qtables): - if qtables is None: - return qtables - if isinstance(qtables, str): - try: - lines = [ - int(num) - for line in qtables.splitlines() - for num in line.split("#", 1)[0].split() - ] - except ValueError as e: - msg = "Invalid quantization table" - raise ValueError(msg) from e - else: - qtables = [lines[s : s + 64] for s in range(0, len(lines), 64)] - if isinstance(qtables, (tuple, list, dict)): - if isinstance(qtables, dict): - qtables = [ - qtables[key] for key in range(len(qtables)) if key in qtables - ] - elif isinstance(qtables, tuple): - qtables = list(qtables) - if not (0 < len(qtables) < 5): - msg = "None or too many quantization tables" - raise ValueError(msg) - for idx, table in enumerate(qtables): - try: - if len(table) != 64: - raise TypeError - table = array.array("H", table) - except TypeError as e: - msg = "Invalid quantization table" - raise ValueError(msg) from e - else: - qtables[idx] = list(table) - return qtables - - if qtables == "keep": - if im.format != "JPEG": - msg = "Cannot use 'keep' when original image is not a JPEG" - raise ValueError(msg) - qtables = getattr(im, "quantization", None) - qtables = validate_qtables(qtables) - - extra = info.get("extra", b"") - - MAX_BYTES_IN_MARKER = 65533 - icc_profile = info.get("icc_profile") - if icc_profile: - ICC_OVERHEAD_LEN = 14 - MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN - markers = [] - while icc_profile: - markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER]) - icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:] - i = 1 - for marker in markers: - size = o16(2 + ICC_OVERHEAD_LEN + len(marker)) - extra += ( - b"\xFF\xE2" - + size - + b"ICC_PROFILE\0" - + o8(i) - + o8(len(markers)) - + marker - ) - i += 1 - - comment = info.get("comment", im.info.get("comment")) - - # "progressive" is the official name, but older documentation - # says "progression" - # FIXME: issue a warning if the wrong form is used (post-1.1.7) - progressive = info.get("progressive", False) or info.get("progression", False) - - optimize = info.get("optimize", False) - - exif = info.get("exif", b"") - if isinstance(exif, Image.Exif): - exif = exif.tobytes() - if len(exif) > MAX_BYTES_IN_MARKER: - msg = "EXIF data is too long" - raise ValueError(msg) - - # get keyword arguments - im.encoderconfig = ( - quality, - progressive, - info.get("smooth", 0), - optimize, - info.get("streamtype", 0), - dpi[0], - dpi[1], - subsampling, - qtables, - comment, - extra, - exif, - ) - - # if we optimize, libjpeg needs a buffer big enough to hold the whole image - # in a shot. Guessing on the size, at im.size bytes. (raw pixel size is - # channels*size, this is a value that's been used in a django patch. - # https://github.com/matthewwithanm/django-imagekit/issues/50 - bufsize = 0 - if optimize or progressive: - # CMYK can be bigger - if im.mode == "CMYK": - bufsize = 4 * im.size[0] * im.size[1] - # keep sets quality to -1, but the actual value may be high. - elif quality >= 95 or quality == -1: - bufsize = 2 * im.size[0] * im.size[1] - else: - bufsize = im.size[0] * im.size[1] - - # The EXIF info needs to be written as one block, + APP1, + one spare byte. - # Ensure that our buffer is big enough. Same with the icc_profile block. - bufsize = max(ImageFile.MAXBLOCK, bufsize, len(exif) + 5, len(extra) + 1) - - ImageFile._save(im, fp, [("jpeg", (0, 0) + im.size, 0, rawmode)], bufsize) - - -def _save_cjpeg(im, fp, filename): - # ALTERNATIVE: handle JPEGs via the IJG command line utilities. - tempfile = im._dump() - subprocess.check_call(["cjpeg", "-outfile", filename, tempfile]) - try: - os.unlink(tempfile) - except OSError: - pass - - -## -# Factory for making JPEG and MPO instances -def jpeg_factory(fp=None, filename=None): - im = JpegImageFile(fp, filename) - try: - mpheader = im._getmp() - if mpheader[45057] > 1: - # It's actually an MPO - from .MpoImagePlugin import MpoImageFile - - # Don't reload everything, just convert it. - im = MpoImageFile.adopt(im, mpheader) - except (TypeError, IndexError): - # It is really a JPEG - pass - except SyntaxError: - warnings.warn( - "Image appears to be a malformed MPO file, it will be " - "interpreted as a base JPEG file" - ) - return im - - -# --------------------------------------------------------------------- -# Registry stuff - -Image.register_open(JpegImageFile.format, jpeg_factory, _accept) -Image.register_save(JpegImageFile.format, _save) - -Image.register_extensions(JpegImageFile.format, [".jfif", ".jpe", ".jpg", ".jpeg"]) - -Image.register_mime(JpegImageFile.format, "image/jpeg") diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/abc/__init__.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/abc/__init__.py deleted file mode 100644 index 72c34e544e1634e4f42c005506bac9b61ab095f5..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/abc/__init__.py +++ /dev/null @@ -1,90 +0,0 @@ -from __future__ import annotations - -__all__ = ( - "AsyncResource", - "IPAddressType", - "IPSockAddrType", - "SocketAttribute", - "SocketStream", - "SocketListener", - "UDPSocket", - "UNIXSocketStream", - "UDPPacketType", - "ConnectedUDPSocket", - "UnreliableObjectReceiveStream", - "UnreliableObjectSendStream", - "UnreliableObjectStream", - "ObjectReceiveStream", - "ObjectSendStream", - "ObjectStream", - "ByteReceiveStream", - "ByteSendStream", - "ByteStream", - "AnyUnreliableByteReceiveStream", - "AnyUnreliableByteSendStream", - "AnyUnreliableByteStream", - "AnyByteReceiveStream", - "AnyByteSendStream", - "AnyByteStream", - "Listener", - "Process", - "Event", - "Condition", - "Lock", - "Semaphore", - "CapacityLimiter", - "CancelScope", - "TaskGroup", - "TaskStatus", - "TestRunner", - "BlockingPortal", -) - -from typing import Any - -from ._resources import AsyncResource -from ._sockets import ( - ConnectedUDPSocket, - IPAddressType, - IPSockAddrType, - SocketAttribute, - SocketListener, - SocketStream, - UDPPacketType, - UDPSocket, - UNIXSocketStream, -) -from ._streams import ( - AnyByteReceiveStream, - AnyByteSendStream, - AnyByteStream, - AnyUnreliableByteReceiveStream, - AnyUnreliableByteSendStream, - AnyUnreliableByteStream, - ByteReceiveStream, - ByteSendStream, - ByteStream, - Listener, - ObjectReceiveStream, - ObjectSendStream, - ObjectStream, - UnreliableObjectReceiveStream, - UnreliableObjectSendStream, - UnreliableObjectStream, -) -from ._subprocesses import Process -from ._tasks import TaskGroup, TaskStatus -from ._testing import TestRunner - -# Re-exported here, for backwards compatibility -# isort: off -from .._core._synchronization import CapacityLimiter, Condition, Event, Lock, Semaphore -from .._core._tasks import CancelScope -from ..from_thread import BlockingPortal - -# Re-export imports so they look like they live directly in this package -key: str -value: Any -for key, value in list(locals().items()): - if getattr(value, "__module__", "").startswith("anyio.abc."): - value.__module__ = __name__ diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/filelock/_unix.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/filelock/_unix.py deleted file mode 100644 index 40cec0ab189762ac9b4a0a950e65daf53bc5be16..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/filelock/_unix.py +++ /dev/null @@ -1,63 +0,0 @@ -from __future__ import annotations - -import os -import sys -from contextlib import suppress -from errno import ENOSYS -from typing import cast - -from ._api import BaseFileLock - -#: a flag to indicate if the fcntl API is available -has_fcntl = False -if sys.platform == "win32": # pragma: win32 cover - - class UnixFileLock(BaseFileLock): - """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.""" - - def _acquire(self) -> None: - raise NotImplementedError - - def _release(self) -> None: - raise NotImplementedError - -else: # pragma: win32 no cover - try: - import fcntl - except ImportError: - pass - else: - has_fcntl = True - - class UnixFileLock(BaseFileLock): - """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.""" - - def _acquire(self) -> None: - open_flags = os.O_RDWR | os.O_CREAT | os.O_TRUNC - fd = os.open(self.lock_file, open_flags, self._context.mode) - with suppress(PermissionError): # This locked is not owned by this UID - os.fchmod(fd, self._context.mode) - try: - fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) - except OSError as exception: - os.close(fd) - if exception.errno == ENOSYS: # NotImplemented error - msg = "FileSystem does not appear to support flock; user SoftFileLock instead" - raise NotImplementedError(msg) from exception - else: - self._context.lock_file_fd = fd - - def _release(self) -> None: - # Do not remove the lockfile: - # https://github.com/tox-dev/py-filelock/issues/31 - # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition - fd = cast(int, self._context.lock_file_fd) - self._context.lock_file_fd = None - fcntl.flock(fd, fcntl.LOCK_UN) - os.close(fd) - - -__all__ = [ - "has_fcntl", - "UnixFileLock", -] diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_transports/base.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_transports/base.py deleted file mode 100644 index f6fdfe694340ab00e0759c2cfb1a2ea53ed65736..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_transports/base.py +++ /dev/null @@ -1,82 +0,0 @@ -import typing -from types import TracebackType - -from .._models import Request, Response - -T = typing.TypeVar("T", bound="BaseTransport") -A = typing.TypeVar("A", bound="AsyncBaseTransport") - - -class BaseTransport: - def __enter__(self: T) -> T: - return self - - def __exit__( - self, - exc_type: typing.Optional[typing.Type[BaseException]] = None, - exc_value: typing.Optional[BaseException] = None, - traceback: typing.Optional[TracebackType] = None, - ) -> None: - self.close() - - def handle_request(self, request: Request) -> Response: - """ - Send a single HTTP request and return a response. - - Developers shouldn't typically ever need to call into this API directly, - since the Client class provides all the higher level user-facing API - niceties. - - In order to properly release any network resources, the response - stream should *either* be consumed immediately, with a call to - `response.stream.read()`, or else the `handle_request` call should - be followed with a try/finally block to ensuring the stream is - always closed. - - Example usage: - - with httpx.HTTPTransport() as transport: - req = httpx.Request( - method=b"GET", - url=(b"https", b"www.example.com", 443, b"/"), - headers=[(b"Host", b"www.example.com")], - ) - resp = transport.handle_request(req) - body = resp.stream.read() - print(resp.status_code, resp.headers, body) - - - Takes a `Request` instance as the only argument. - - Returns a `Response` instance. - """ - raise NotImplementedError( - "The 'handle_request' method must be implemented." - ) # pragma: no cover - - def close(self) -> None: - pass - - -class AsyncBaseTransport: - async def __aenter__(self: A) -> A: - return self - - async def __aexit__( - self, - exc_type: typing.Optional[typing.Type[BaseException]] = None, - exc_value: typing.Optional[BaseException] = None, - traceback: typing.Optional[TracebackType] = None, - ) -> None: - await self.aclose() - - async def handle_async_request( - self, - request: Request, - ) -> Response: - raise NotImplementedError( - "The 'handle_async_request' method must be implemented." - ) # pragma: no cover - - async def aclose(self) -> None: - pass diff --git a/spaces/Derni/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator/README.md b/spaces/Derni/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator/README.md deleted file mode 100644 index f7628c97181708a0258462128d4d79f7527e3748..0000000000000000000000000000000000000000 --- a/spaces/Derni/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Onodofthenorth-SD PixelArt SpriteSheet Generator -emoji: 🌍 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.28.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ECCV2022/bytetrack/tutorials/motr/transforms.py b/spaces/ECCV2022/bytetrack/tutorials/motr/transforms.py deleted file mode 100644 index 064d1f057a7084153db597ba9b723a8f2c14f243..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/tutorials/motr/transforms.py +++ /dev/null @@ -1,650 +0,0 @@ -# ------------------------------------------------------------------------ -# Copyright (c) 2021 megvii-model. All Rights Reserved. -# ------------------------------------------------------------------------ -# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) -# Copyright (c) 2020 SenseTime. All Rights Reserved. -# ------------------------------------------------------------------------ -# Modified from DETR (https://github.com/facebookresearch/detr) -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -# ------------------------------------------------------------------------ - -""" -Transforms and data augmentation for both image + bbox. -""" -import copy -import random -import PIL -import torch -import torchvision.transforms as T -import torchvision.transforms.functional as F -from PIL import Image, ImageDraw -from util.box_ops import box_xyxy_to_cxcywh -from util.misc import interpolate -import numpy as np -import os - - - -def crop_mot(image, target, region): - cropped_image = F.crop(image, *region) - - target = target.copy() - i, j, h, w = region - - # should we do something wrt the original size? - target["size"] = torch.tensor([h, w]) - - fields = ["labels", "area", "iscrowd"] - if 'obj_ids' in target: - fields.append('obj_ids') - - if "boxes" in target: - boxes = target["boxes"] - max_size = torch.as_tensor([w, h], dtype=torch.float32) - cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) - - for i, box in enumerate(cropped_boxes): - l, t, r, b = box -# if l < 0: -# l = 0 -# if r < 0: -# r = 0 -# if l > w: -# l = w -# if r > w: -# r = w -# if t < 0: -# t = 0 -# if b < 0: -# b = 0 -# if t > h: -# t = h -# if b > h: -# b = h - if l < 0 and r < 0: - l = r = 0 - if l > w and r > w: - l = r = w - if t < 0 and b < 0: - t = b = 0 - if t > h and b > h: - t = b = h - cropped_boxes[i] = torch.tensor([l, t, r, b], dtype=box.dtype) - - cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) - cropped_boxes = cropped_boxes.clamp(min=0) - area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) - target["boxes"] = cropped_boxes.reshape(-1, 4) - target["area"] = area - fields.append("boxes") - - if "masks" in target: - # FIXME should we update the area here if there are no boxes? - target['masks'] = target['masks'][:, i:i + h, j:j + w] - fields.append("masks") - - # remove elements for which the boxes or masks that have zero area - if "boxes" in target or "masks" in target: - # favor boxes selection when defining which elements to keep - # this is compatible with previous implementation - if "boxes" in target: - cropped_boxes = target['boxes'].reshape(-1, 2, 2) - keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) - else: - keep = target['masks'].flatten(1).any(1) - - for field in fields: - target[field] = target[field][keep] - - return cropped_image, target - - -def random_shift(image, target, region, sizes): - oh, ow = sizes - # step 1, shift crop and re-scale image firstly - cropped_image = F.crop(image, *region) - cropped_image = F.resize(cropped_image, sizes) - - target = target.copy() - i, j, h, w = region - - # should we do something wrt the original size? - target["size"] = torch.tensor([h, w]) - - fields = ["labels", "area", "iscrowd"] - if 'obj_ids' in target: - fields.append('obj_ids') - - if "boxes" in target: - boxes = target["boxes"] - max_size = torch.as_tensor([w, h], dtype=torch.float32) - cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) - - for i, box in enumerate(cropped_boxes): - l, t, r, b = box - if l < 0: - l = 0 - if r < 0: - r = 0 - if l > w: - l = w - if r > w: - r = w - if t < 0: - t = 0 - if b < 0: - b = 0 - if t > h: - t = h - if b > h: - b = h - # step 2, re-scale coords secondly - ratio_h = 1.0 * oh / h - ratio_w = 1.0 * ow / w - cropped_boxes[i] = torch.tensor([ratio_w * l, ratio_h * t, ratio_w * r, ratio_h * b], dtype=box.dtype) - - cropped_boxes = cropped_boxes.reshape(-1, 2, 2) - area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) - target["boxes"] = cropped_boxes.reshape(-1, 4) - target["area"] = area - fields.append("boxes") - - if "masks" in target: - # FIXME should we update the area here if there are no boxes? - target['masks'] = target['masks'][:, i:i + h, j:j + w] - fields.append("masks") - - # remove elements for which the boxes or masks that have zero area - if "boxes" in target or "masks" in target: - # favor boxes selection when defining which elements to keep - # this is compatible with previous implementation - if "boxes" in target: - cropped_boxes = target['boxes'].reshape(-1, 2, 2) - keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) - else: - keep = target['masks'].flatten(1).any(1) - - for field in fields: - target[field] = target[field][keep] - - return cropped_image, target - - -def crop(image, target, region): - cropped_image = F.crop(image, *region) - - target = target.copy() - i, j, h, w = region - - # should we do something wrt the original size? - target["size"] = torch.tensor([h, w]) - - fields = ["labels", "area", "iscrowd"] - if 'obj_ids' in target: - fields.append('obj_ids') - - if "boxes" in target: - boxes = target["boxes"] - max_size = torch.as_tensor([w, h], dtype=torch.float32) - cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) - cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) - cropped_boxes = cropped_boxes.clamp(min=0) - - area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) - target["boxes"] = cropped_boxes.reshape(-1, 4) - target["area"] = area - fields.append("boxes") - - if "masks" in target: - # FIXME should we update the area here if there are no boxes? - target['masks'] = target['masks'][:, i:i + h, j:j + w] - fields.append("masks") - - # remove elements for which the boxes or masks that have zero area - if "boxes" in target or "masks" in target: - # favor boxes selection when defining which elements to keep - # this is compatible with previous implementation - if "boxes" in target: - cropped_boxes = target['boxes'].reshape(-1, 2, 2) - keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) - else: - keep = target['masks'].flatten(1).any(1) - - for field in fields: - target[field] = target[field][keep] - - return cropped_image, target - - -def hflip(image, target): - flipped_image = F.hflip(image) - - w, h = image.size - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0]) - target["boxes"] = boxes - - if "masks" in target: - target['masks'] = target['masks'].flip(-1) - - return flipped_image, target - - -def resize(image, target, size, max_size=None): - # size can be min_size (scalar) or (w, h) tuple - - def get_size_with_aspect_ratio(image_size, size, max_size=None): - w, h = image_size - if max_size is not None: - min_original_size = float(min((w, h))) - max_original_size = float(max((w, h))) - if max_original_size / min_original_size * size > max_size: - size = int(round(max_size * min_original_size / max_original_size)) - - if (w <= h and w == size) or (h <= w and h == size): - return (h, w) - - if w < h: - ow = size - oh = int(size * h / w) - else: - oh = size - ow = int(size * w / h) - - return (oh, ow) - - def get_size(image_size, size, max_size=None): - if isinstance(size, (list, tuple)): - return size[::-1] - else: - return get_size_with_aspect_ratio(image_size, size, max_size) - - size = get_size(image.size, size, max_size) - rescaled_image = F.resize(image, size) - - if target is None: - return rescaled_image, None - - ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) - ratio_width, ratio_height = ratios - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height]) - target["boxes"] = scaled_boxes - - if "area" in target: - area = target["area"] - scaled_area = area * (ratio_width * ratio_height) - target["area"] = scaled_area - - h, w = size - target["size"] = torch.tensor([h, w]) - - if "masks" in target: - target['masks'] = interpolate( - target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5 - - return rescaled_image, target - - -def pad(image, target, padding): - # assumes that we only pad on the bottom right corners - padded_image = F.pad(image, (0, 0, padding[0], padding[1])) - if target is None: - return padded_image, None - target = target.copy() - # should we do something wrt the original size? - target["size"] = torch.tensor(padded_image[::-1]) - if "masks" in target: - target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1])) - return padded_image, target - - -class RandomCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - region = T.RandomCrop.get_params(img, self.size) - return crop(img, target, region) - - -class MotRandomCrop(RandomCrop): - def __call__(self, imgs: list, targets: list): - ret_imgs = [] - ret_targets = [] - region = T.RandomCrop.get_params(imgs[0], self.size) - for img_i, targets_i in zip(imgs, targets): - img_i, targets_i = crop(img_i, targets_i, region) - ret_imgs.append(img_i) - ret_targets.append(targets_i) - return ret_imgs, ret_targets - -class FixedMotRandomCrop(object): - def __init__(self, min_size: int, max_size: int): - self.min_size = min_size - self.max_size = max_size - - def __call__(self, imgs: list, targets: list): - ret_imgs = [] - ret_targets = [] - w = random.randint(self.min_size, min(imgs[0].width, self.max_size)) - h = random.randint(self.min_size, min(imgs[0].height, self.max_size)) - region = T.RandomCrop.get_params(imgs[0], [h, w]) - for img_i, targets_i in zip(imgs, targets): - img_i, targets_i = crop_mot(img_i, targets_i, region) - ret_imgs.append(img_i) - ret_targets.append(targets_i) - return ret_imgs, ret_targets - -class MotRandomShift(object): - def __init__(self, bs=1): - self.bs = bs - - def __call__(self, imgs: list, targets: list): - ret_imgs = copy.deepcopy(imgs) - ret_targets = copy.deepcopy(targets) - - n_frames = len(imgs) - select_i = random.choice(list(range(n_frames))) - w, h = imgs[select_i].size - - xshift = (100 * torch.rand(self.bs)).int() - xshift *= (torch.randn(self.bs) > 0.0).int() * 2 - 1 - yshift = (100 * torch.rand(self.bs)).int() - yshift *= (torch.randn(self.bs) > 0.0).int() * 2 - 1 - ymin = max(0, -yshift[0]) - ymax = min(h, h - yshift[0]) - xmin = max(0, -xshift[0]) - xmax = min(w, w - xshift[0]) - - region = (int(ymin), int(xmin), int(ymax-ymin), int(xmax-xmin)) - ret_imgs[select_i], ret_targets[select_i] = random_shift(imgs[select_i], targets[select_i], region, (h,w)) - - return ret_imgs, ret_targets - - -class FixedMotRandomShift(object): - def __init__(self, bs=1, padding=50): - self.bs = bs - self.padding = padding - - def __call__(self, imgs: list, targets: list): - ret_imgs = [] - ret_targets = [] - - n_frames = len(imgs) - w, h = imgs[0].size - xshift = (self.padding * torch.rand(self.bs)).int() + 1 - xshift *= (torch.randn(self.bs) > 0.0).int() * 2 - 1 - yshift = (self.padding * torch.rand(self.bs)).int() + 1 - yshift *= (torch.randn(self.bs) > 0.0).int() * 2 - 1 - ret_imgs.append(imgs[0]) - ret_targets.append(targets[0]) - for i in range(1, n_frames): - ymin = max(0, -yshift[0]) - ymax = min(h, h - yshift[0]) - xmin = max(0, -xshift[0]) - xmax = min(w, w - xshift[0]) - prev_img = ret_imgs[i-1].copy() - prev_target = copy.deepcopy(ret_targets[i-1]) - region = (int(ymin), int(xmin), int(ymax - ymin), int(xmax - xmin)) - img_i, target_i = random_shift(prev_img, prev_target, region, (h, w)) - ret_imgs.append(img_i) - ret_targets.append(target_i) - - return ret_imgs, ret_targets - - -class RandomSizeCrop(object): - def __init__(self, min_size: int, max_size: int): - self.min_size = min_size - self.max_size = max_size - - def __call__(self, img: PIL.Image.Image, target: dict): - w = random.randint(self.min_size, min(img.width, self.max_size)) - h = random.randint(self.min_size, min(img.height, self.max_size)) - region = T.RandomCrop.get_params(img, [h, w]) - return crop(img, target, region) - - -class MotRandomSizeCrop(RandomSizeCrop): - def __call__(self, imgs, targets): - w = random.randint(self.min_size, min(imgs[0].width, self.max_size)) - h = random.randint(self.min_size, min(imgs[0].height, self.max_size)) - region = T.RandomCrop.get_params(imgs[0], [h, w]) - ret_imgs = [] - ret_targets = [] - for img_i, targets_i in zip(imgs, targets): - img_i, targets_i = crop(img_i, targets_i, region) - ret_imgs.append(img_i) - ret_targets.append(targets_i) - return ret_imgs, ret_targets - - -class CenterCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - image_width, image_height = img.size - crop_height, crop_width = self.size - crop_top = int(round((image_height - crop_height) / 2.)) - crop_left = int(round((image_width - crop_width) / 2.)) - return crop(img, target, (crop_top, crop_left, crop_height, crop_width)) - - -class MotCenterCrop(CenterCrop): - def __call__(self, imgs, targets): - image_width, image_height = imgs[0].size - crop_height, crop_width = self.size - crop_top = int(round((image_height - crop_height) / 2.)) - crop_left = int(round((image_width - crop_width) / 2.)) - ret_imgs = [] - ret_targets = [] - for img_i, targets_i in zip(imgs, targets): - img_i, targets_i = crop(img_i, targets_i, (crop_top, crop_left, crop_height, crop_width)) - ret_imgs.append(img_i) - ret_targets.append(targets_i) - return ret_imgs, ret_targets - - -class RandomHorizontalFlip(object): - def __init__(self, p=0.5): - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return hflip(img, target) - return img, target - - -class MotRandomHorizontalFlip(RandomHorizontalFlip): - def __call__(self, imgs, targets): - if random.random() < self.p: - ret_imgs = [] - ret_targets = [] - for img_i, targets_i in zip(imgs, targets): - img_i, targets_i = hflip(img_i, targets_i) - ret_imgs.append(img_i) - ret_targets.append(targets_i) - return ret_imgs, ret_targets - return imgs, targets - - -class RandomResize(object): - def __init__(self, sizes, max_size=None): - assert isinstance(sizes, (list, tuple)) - self.sizes = sizes - self.max_size = max_size - - def __call__(self, img, target=None): - size = random.choice(self.sizes) - return resize(img, target, size, self.max_size) - - -class MotRandomResize(RandomResize): - def __call__(self, imgs, targets): - size = random.choice(self.sizes) - ret_imgs = [] - ret_targets = [] - for img_i, targets_i in zip(imgs, targets): - img_i, targets_i = resize(img_i, targets_i, size, self.max_size) - ret_imgs.append(img_i) - ret_targets.append(targets_i) - return ret_imgs, ret_targets - - -class RandomPad(object): - def __init__(self, max_pad): - self.max_pad = max_pad - - def __call__(self, img, target): - pad_x = random.randint(0, self.max_pad) - pad_y = random.randint(0, self.max_pad) - return pad(img, target, (pad_x, pad_y)) - - -class MotRandomPad(RandomPad): - def __call__(self, imgs, targets): - pad_x = random.randint(0, self.max_pad) - pad_y = random.randint(0, self.max_pad) - ret_imgs = [] - ret_targets = [] - for img_i, targets_i in zip(imgs, targets): - img_i, target_i = pad(img_i, targets_i, (pad_x, pad_y)) - ret_imgs.append(img_i) - ret_targets.append(targets_i) - return ret_imgs, ret_targets - - -class RandomSelect(object): - """ - Randomly selects between transforms1 and transforms2, - with probability p for transforms1 and (1 - p) for transforms2 - """ - def __init__(self, transforms1, transforms2, p=0.5): - self.transforms1 = transforms1 - self.transforms2 = transforms2 - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return self.transforms1(img, target) - return self.transforms2(img, target) - - -class MotRandomSelect(RandomSelect): - """ - Randomly selects between transforms1 and transforms2, - with probability p for transforms1 and (1 - p) for transforms2 - """ - def __call__(self, imgs, targets): - if random.random() < self.p: - return self.transforms1(imgs, targets) - return self.transforms2(imgs, targets) - - -class ToTensor(object): - def __call__(self, img, target): - return F.to_tensor(img), target - - -class MotToTensor(ToTensor): - def __call__(self, imgs, targets): - ret_imgs = [] - for img in imgs: - ret_imgs.append(F.to_tensor(img)) - return ret_imgs, targets - - -class RandomErasing(object): - - def __init__(self, *args, **kwargs): - self.eraser = T.RandomErasing(*args, **kwargs) - - def __call__(self, img, target): - return self.eraser(img), target - - -class MotRandomErasing(RandomErasing): - def __call__(self, imgs, targets): - # TODO: Rewrite this part to ensure the data augmentation is same to each image. - ret_imgs = [] - for img_i, targets_i in zip(imgs, targets): - ret_imgs.append(self.eraser(img_i)) - return ret_imgs, targets - - -class MoTColorJitter(T.ColorJitter): - def __call__(self, imgs, targets): - transform = self.get_params(self.brightness, self.contrast, - self.saturation, self.hue) - ret_imgs = [] - for img_i, targets_i in zip(imgs, targets): - ret_imgs.append(transform(img_i)) - return ret_imgs, targets - - -class Normalize(object): - def __init__(self, mean, std): - self.mean = mean - self.std = std - - def __call__(self, image, target=None): - if target is not None: - target['ori_img'] = image.clone() - image = F.normalize(image, mean=self.mean, std=self.std) - if target is None: - return image, None - target = target.copy() - h, w = image.shape[-2:] - if "boxes" in target: - boxes = target["boxes"] - boxes = box_xyxy_to_cxcywh(boxes) - boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32) - target["boxes"] = boxes - return image, target - - -class MotNormalize(Normalize): - def __call__(self, imgs, targets=None): - ret_imgs = [] - ret_targets = [] - for i in range(len(imgs)): - img_i = imgs[i] - targets_i = targets[i] if targets is not None else None - img_i, targets_i = super().__call__(img_i, targets_i) - ret_imgs.append(img_i) - ret_targets.append(targets_i) - return ret_imgs, ret_targets - - -class Compose(object): - def __init__(self, transforms): - self.transforms = transforms - - def __call__(self, image, target): - for t in self.transforms: - image, target = t(image, target) - return image, target - - def __repr__(self): - format_string = self.__class__.__name__ + "(" - for t in self.transforms: - format_string += "\n" - format_string += " {0}".format(t) - format_string += "\n)" - return format_string - - -class MotCompose(Compose): - def __call__(self, imgs, targets): - for t in self.transforms: - imgs, targets = t(imgs, targets) - return imgs, targets diff --git a/spaces/EDGAhab/Aatrox-Talking/text/__init__.py b/spaces/EDGAhab/Aatrox-Talking/text/__init__.py deleted file mode 100644 index 227cdc4a81d7bcdd3dbae299947278998d12276b..0000000000000000000000000000000000000000 --- a/spaces/EDGAhab/Aatrox-Talking/text/__init__.py +++ /dev/null @@ -1,66 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols,symbols_zh - - -# Mappings from symbol to numeric ID and vice versa: -# _symbol_to_id = {s: i for i, s in enumerate(symbols)} -# _id_to_symbol = {i: s for i, s in enumerate(symbols)} - -chinese_mode = True -if chinese_mode: - _symbol_to_id = {s: i for i, s in enumerate(symbols_zh)} - _id_to_symbol = {i: s for i, s in enumerate(symbols_zh)} -else: - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - _id_to_symbol = {i: s for i, s in enumerate(symbols)} - -def text_to_sequence(text, cleaner_names, ): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - coutinue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence - - -def cleaned_text_to_sequence(cleaned_text, chinese_mode=True): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - # if chinese_mode: - # sequence = [_symbol_to_id_zh[symbol] for symbol in cleaned_text] - # else: - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/EuroPython2022/Warehouse_Apparel_Detection/metadata/predictor_yolo_detector/utils/google_utils.py b/spaces/EuroPython2022/Warehouse_Apparel_Detection/metadata/predictor_yolo_detector/utils/google_utils.py deleted file mode 100644 index 08cae912ee36d3e989f0a8d18f4aba7f950e7a88..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/Warehouse_Apparel_Detection/metadata/predictor_yolo_detector/utils/google_utils.py +++ /dev/null @@ -1,122 +0,0 @@ -# This file contains google utils: https://cloud.google.com/storage/docs/reference/libraries -# pip install --upgrade google-cloud-storage -# from google.cloud import storage - -import os -import platform -import subprocess -import time -from pathlib import Path - -import torch - - -def gsutil_getsize(url=''): - # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du - s = subprocess.check_output('gsutil du %s' % url, shell=True).decode('utf-8') - return eval(s.split(' ')[0]) if len(s) else 0 # bytes - - -def attempt_download(weights): - # Attempt to download pretrained weights if not found locally - weights = weights.strip().replace("'", '') - file = Path(weights).name - - msg = weights + ' missing, try downloading from https://github.com/ultralytics/yolov5/releases/' - models = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt'] # available models - - if file in models and not os.path.isfile(weights): - # Google Drive - # d = {'yolov5s.pt': '1R5T6rIyy3lLwgFXNms8whc-387H0tMQO', - # 'yolov5m.pt': '1vobuEExpWQVpXExsJ2w-Mbf3HJjWkQJr', - # 'yolov5l.pt': '1hrlqD1Wdei7UT4OgT785BEk1JwnSvNEV', - # 'yolov5x.pt': '1mM8aZJlWTxOg7BZJvNUMrTnA2AbeCVzS'} - # r = gdrive_download(id=d[file], name=weights) if file in d else 1 - # if r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6: # check - # return - - try: # GitHub - url = 'https://github.com/ultralytics/yolov5/releases/download/v3.1/' + file - print('Downloading %s to %s...' % (url, weights)) - torch.hub.download_url_to_file(url, weights) - assert os.path.exists(weights) and os.path.getsize(weights) > 1E6 # check - except Exception as e: # GCP - print('Download error: %s' % e) - url = 'https://storage.googleapis.com/ultralytics/yolov5/ckpt/' + file - print('Downloading %s to %s...' % (url, weights)) - r = os.system('curl -L %s -o %s' % (url, weights)) # torch.hub.download_url_to_file(url, weights) - finally: - if not (os.path.exists(weights) and os.path.getsize(weights) > 1E6): # check - os.remove(weights) if os.path.exists(weights) else None # remove partial downloads - print('ERROR: Download failure: %s' % msg) - print('') - return - - -def gdrive_download(id='1n_oKgR81BJtqk75b00eAjdv03qVCQn2f', name='coco128.zip'): - # Downloads a file from Google Drive. from utils.google_utils import *; gdrive_download() - t = time.time() - - print('Downloading https://drive.google.com/uc?export=download&id=%s as %s... ' % (id, name), end='') - os.remove(name) if os.path.exists(name) else None # remove existing - os.remove('cookie') if os.path.exists('cookie') else None - - # Attempt file download - out = "NUL" if platform.system() == "Windows" else "/dev/null" - os.system('curl -c ./cookie -s -L "drive.google.com/uc?export=download&id=%s" > %s ' % (id, out)) - if os.path.exists('cookie'): # large file - s = 'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm=%s&id=%s" -o %s' % (get_token(), id, name) - else: # small file - s = 'curl -s -L -o %s "drive.google.com/uc?export=download&id=%s"' % (name, id) - r = os.system(s) # execute, capture return - os.remove('cookie') if os.path.exists('cookie') else None - - # Error check - if r != 0: - os.remove(name) if os.path.exists(name) else None # remove partial - print('Download error ') # raise Exception('Download error') - return r - - # Unzip if archive - if name.endswith('.zip'): - print('unzipping... ', end='') - os.system('unzip -q %s' % name) # unzip - os.remove(name) # remove zip to free space - - print('Done (%.1fs)' % (time.time() - t)) - return r - - -def get_token(cookie="./cookie"): - with open(cookie) as f: - for line in f: - if "download" in line: - return line.split()[-1] - return "" - -# def upload_blob(bucket_name, source_file_name, destination_blob_name): -# # Uploads a file to a bucket -# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python -# -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(destination_blob_name) -# -# blob.upload_from_filename(source_file_name) -# -# print('File {} uploaded to {}.'.format( -# source_file_name, -# destination_blob_name)) -# -# -# def download_blob(bucket_name, source_blob_name, destination_file_name): -# # Uploads a blob from a bucket -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(source_blob_name) -# -# blob.download_to_filename(destination_file_name) -# -# print('Blob {} downloaded to {}.'.format( -# source_blob_name, -# destination_file_name)) diff --git a/spaces/EuroPython2022/mmocr-demo/configs/textrecog/robust_scanner/README.md b/spaces/EuroPython2022/mmocr-demo/configs/textrecog/robust_scanner/README.md deleted file mode 100644 index 165ef248c56640f55772ac5c1d2aae29e69d42e8..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/mmocr-demo/configs/textrecog/robust_scanner/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# RobustScanner - -> [RobustScanner: Dynamically Enhancing Positional Clues for Robust Text Recognition](https://arxiv.org/abs/2007.07542) - - - -## Abstract - -The attention-based encoder-decoder framework has recently achieved impressive results for scene text recognition, and many variants have emerged with improvements in recognition quality. However, it performs poorly on contextless texts (e.g., random character sequences) which is unacceptable in most of real application scenarios. In this paper, we first deeply investigate the decoding process of the decoder. We empirically find that a representative character-level sequence decoder utilizes not only context information but also positional information. Contextual information, which the existing approaches heavily rely on, causes the problem of attention drift. To suppress such side-effect, we propose a novel position enhancement branch, and dynamically fuse its outputs with those of the decoder attention module for scene text recognition. Specifically, it contains a position aware module to enable the encoder to output feature vectors encoding their own spatial positions, and an attention module to estimate glimpses using the positional clue (i.e., the current decoding time step) only. The dynamic fusion is conducted for more robust feature via an element-wise gate mechanism. Theoretically, our proposed method, dubbed \\emph{RobustScanner}, decodes individual characters with dynamic ratio between context and positional clues, and utilizes more positional ones when the decoding sequences with scarce context, and thus is robust and practical. Empirically, it has achieved new state-of-the-art results on popular regular and irregular text recognition benchmarks while without much performance drop on contextless benchmarks, validating its robustness in both contextual and contextless application scenarios. - -
    - -
    - -## Dataset - -### Train Dataset - -| trainset | instance_num | repeat_num | source | -| :--------: | :----------: | :--------: | :------------------------: | -| icdar_2011 | 3567 | 20 | real | -| icdar_2013 | 848 | 20 | real | -| icdar2015 | 4468 | 20 | real | -| coco_text | 42142 | 20 | real | -| IIIT5K | 2000 | 20 | real | -| SynthText | 2400000 | 1 | synth | -| SynthAdd | 1216889 | 1 | synth, 1.6m in [\[1\]](#1) | -| Syn90k | 2400000 | 1 | synth | - -### Test Dataset - -| testset | instance_num | type | -| :-----: | :----------: | :---------------------------: | -| IIIT5K | 3000 | regular | -| SVT | 647 | regular | -| IC13 | 1015 | regular | -| IC15 | 2077 | irregular | -| SVTP | 645 | irregular, 639 in [\[1\]](#1) | -| CT80 | 288 | irregular | - -## Results and Models - -| Methods | GPUs | | Regular Text | | | | Irregular Text | | download | -| :------------------------------------------------------------------------: | :--: | :----: | :----------: | :--: | :-: | :--: | :------------: | :--: | :-------------------------------------------------------------------------: | -| | | IIIT5K | SVT | IC13 | | IC15 | SVTP | CT80 | | -| [RobustScanner](configs/textrecog/robust_scanner/robustscanner_r31_academic.py) | 16 | 95.1 | 89.2 | 93.1 | | 77.8 | 80.3 | 90.3 | [model](https://download.openmmlab.com/mmocr/textrecog/robustscanner/robustscanner_r31_academic-5f05874f.pth) \| [log](https://download.openmmlab.com/mmocr/textrecog/robustscanner/20210401_170932.log.json) | - -## References - -\[1\] Li, Hui and Wang, Peng and Shen, Chunhua and Zhang, Guyu. Show, attend and read: A simple and strong baseline for irregular text recognition. In AAAI 2019. - -## Citation - -```bibtex -@inproceedings{yue2020robustscanner, - title={RobustScanner: Dynamically Enhancing Positional Clues for Robust Text Recognition}, - author={Yue, Xiaoyu and Kuang, Zhanghui and Lin, Chenhao and Sun, Hongbin and Zhang, Wayne}, - booktitle={European Conference on Computer Vision}, - year={2020} -} -``` diff --git a/spaces/Fadil369/docker/app.py b/spaces/Fadil369/docker/app.py deleted file mode 100644 index 2c4dc6eea9e9317c85af90240e5d697ad8efaf17..0000000000000000000000000000000000000000 --- a/spaces/Fadil369/docker/app.py +++ /dev/null @@ -1,151 +0,0 @@ -from pathlib import Path -from typing import List, Dict, Tuple -import matplotlib.colors as mpl_colors - -import pandas as pd -import seaborn as sns -import shinyswatch - -from shiny import App, Inputs, Outputs, Session, reactive, render, req, ui - -sns.set_theme() - -www_dir = Path(__file__).parent.resolve() / "www" - -df = pd.read_csv(Path(__file__).parent / "penguins.csv", na_values="NA") -numeric_cols: List[str] = df.select_dtypes(include=["float64"]).columns.tolist() -species: List[str] = df["Species"].unique().tolist() -species.sort() - -app_ui = ui.page_fillable( - shinyswatch.theme.minty(), - ui.layout_sidebar( - ui.sidebar( - # Artwork by @allison_horst - ui.input_selectize( - "xvar", - "X variable", - numeric_cols, - selected="Bill Length (mm)", - ), - ui.input_selectize( - "yvar", - "Y variable", - numeric_cols, - selected="Bill Depth (mm)", - ), - ui.input_checkbox_group( - "species", "Filter by species", species, selected=species - ), - ui.hr(), - ui.input_switch("by_species", "Show species", value=True), - ui.input_switch("show_margins", "Show marginal plots", value=True), - ), - ui.output_ui("value_boxes"), - ui.output_plot("scatter", fill=True), - ui.help_text( - "Artwork by ", - ui.a("@allison_horst", href="https://twitter.com/allison_horst"), - class_="text-end", - ), - ), -) - - -def server(input: Inputs, output: Outputs, session: Session): - @reactive.Calc - def filtered_df() -> pd.DataFrame: - """Returns a Pandas data frame that includes only the desired rows""" - - # This calculation "req"uires that at least one species is selected - req(len(input.species()) > 0) - - # Filter the rows so we only include the desired species - return df[df["Species"].isin(input.species())] - - @output - @render.plot - def scatter(): - """Generates a plot for Shiny to display to the user""" - - # The plotting function to use depends on whether margins are desired - plotfunc = sns.jointplot if input.show_margins() else sns.scatterplot - - plotfunc( - data=filtered_df(), - x=input.xvar(), - y=input.yvar(), - palette=palette, - hue="Species" if input.by_species() else None, - hue_order=species, - legend=False, - ) - - @output - @render.ui - def value_boxes(): - df = filtered_df() - - def penguin_value_box(title: str, count: int, bgcol: str, showcase_img: str): - return ui.value_box( - title, - count, - {"class_": "pt-1 pb-0"}, - showcase=ui.fill.as_fill_item( - ui.tags.img( - {"style": "object-fit:contain;"}, - src=showcase_img, - ) - ), - theme_color=None, - style=f"background-color: {bgcol};", - ) - - if not input.by_species(): - return penguin_value_box( - "Penguins", - len(df.index), - bg_palette["default"], - # Artwork by @allison_horst - showcase_img="penguins.png", - ) - - value_boxes = [ - penguin_value_box( - name, - len(df[df["Species"] == name]), - bg_palette[name], - # Artwork by @allison_horst - showcase_img=f"{name}.png", - ) - for name in species - # Only include boxes for _selected_ species - if name in input.species() - ] - - return ui.layout_column_wrap(*value_boxes, width = 1 / len(value_boxes)) - - -# "darkorange", "purple", "cyan4" -colors = [[255, 140, 0], [160, 32, 240], [0, 139, 139]] -colors = [(r / 255.0, g / 255.0, b / 255.0) for r, g, b in colors] - -palette: Dict[str, Tuple[float, float, float]] = { - "Adelie": colors[0], - "Chinstrap": colors[1], - "Gentoo": colors[2], - "default": sns.color_palette()[0], # type: ignore -} - -bg_palette = {} -# Use `sns.set_style("whitegrid")` to help find approx alpha value -for name, col in palette.items(): - # Adjusted n_colors until `axe` accessibility did not complain about color contrast - bg_palette[name] = mpl_colors.to_hex(sns.light_palette(col, n_colors=7)[1]) # type: ignore - - -app = App( - app_ui, - server, - static_assets=str(www_dir), -) diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/utils/matlab_functions.py b/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/utils/matlab_functions.py deleted file mode 100644 index c6ce1004a2c9f8521505c4b5889d3c24a909c70d..0000000000000000000000000000000000000000 --- a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/utils/matlab_functions.py +++ /dev/null @@ -1,347 +0,0 @@ -import math -import numpy as np -import torch - - -def cubic(x): - """cubic function used for calculate_weights_indices.""" - absx = torch.abs(x) - absx2 = absx**2 - absx3 = absx**3 - return (1.5 * absx3 - 2.5 * absx2 + 1) * ( - (absx <= 1).type_as(absx)) + (-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2) * (((absx > 1) * - (absx <= 2)).type_as(absx)) - - -def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing): - """Calculate weights and indices, used for imresize function. - - Args: - in_length (int): Input length. - out_length (int): Output length. - scale (float): Scale factor. - kernel_width (int): Kernel width. - antialisaing (bool): Whether to apply anti-aliasing when downsampling. - """ - - if (scale < 1) and antialiasing: - # Use a modified kernel (larger kernel width) to simultaneously - # interpolate and antialias - kernel_width = kernel_width / scale - - # Output-space coordinates - x = torch.linspace(1, out_length, out_length) - - # Input-space coordinates. Calculate the inverse mapping such that 0.5 - # in output space maps to 0.5 in input space, and 0.5 + scale in output - # space maps to 1.5 in input space. - u = x / scale + 0.5 * (1 - 1 / scale) - - # What is the left-most pixel that can be involved in the computation? - left = torch.floor(u - kernel_width / 2) - - # What is the maximum number of pixels that can be involved in the - # computation? Note: it's OK to use an extra pixel here; if the - # corresponding weights are all zero, it will be eliminated at the end - # of this function. - p = math.ceil(kernel_width) + 2 - - # The indices of the input pixels involved in computing the k-th output - # pixel are in row k of the indices matrix. - indices = left.view(out_length, 1).expand(out_length, p) + torch.linspace(0, p - 1, p).view(1, p).expand( - out_length, p) - - # The weights used to compute the k-th output pixel are in row k of the - # weights matrix. - distance_to_center = u.view(out_length, 1).expand(out_length, p) - indices - - # apply cubic kernel - if (scale < 1) and antialiasing: - weights = scale * cubic(distance_to_center * scale) - else: - weights = cubic(distance_to_center) - - # Normalize the weights matrix so that each row sums to 1. - weights_sum = torch.sum(weights, 1).view(out_length, 1) - weights = weights / weights_sum.expand(out_length, p) - - # If a column in weights is all zero, get rid of it. only consider the - # first and last column. - weights_zero_tmp = torch.sum((weights == 0), 0) - if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6): - indices = indices.narrow(1, 1, p - 2) - weights = weights.narrow(1, 1, p - 2) - if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6): - indices = indices.narrow(1, 0, p - 2) - weights = weights.narrow(1, 0, p - 2) - weights = weights.contiguous() - indices = indices.contiguous() - sym_len_s = -indices.min() + 1 - sym_len_e = indices.max() - in_length - indices = indices + sym_len_s - 1 - return weights, indices, int(sym_len_s), int(sym_len_e) - - -@torch.no_grad() -def imresize(img, scale, antialiasing=True): - """imresize function same as MATLAB. - - It now only supports bicubic. - The same scale applies for both height and width. - - Args: - img (Tensor | Numpy array): - Tensor: Input image with shape (c, h, w), [0, 1] range. - Numpy: Input image with shape (h, w, c), [0, 1] range. - scale (float): Scale factor. The same scale applies for both height - and width. - antialisaing (bool): Whether to apply anti-aliasing when downsampling. - Default: True. - - Returns: - Tensor: Output image with shape (c, h, w), [0, 1] range, w/o round. - """ - if type(img).__module__ == np.__name__: # numpy type - numpy_type = True - img = torch.from_numpy(img.transpose(2, 0, 1)).float() - else: - numpy_type = False - - in_c, in_h, in_w = img.size() - out_h, out_w = math.ceil(in_h * scale), math.ceil(in_w * scale) - kernel_width = 4 - kernel = 'cubic' - - # get weights and indices - weights_h, indices_h, sym_len_hs, sym_len_he = calculate_weights_indices(in_h, out_h, scale, kernel, kernel_width, - antialiasing) - weights_w, indices_w, sym_len_ws, sym_len_we = calculate_weights_indices(in_w, out_w, scale, kernel, kernel_width, - antialiasing) - # process H dimension - # symmetric copying - img_aug = torch.FloatTensor(in_c, in_h + sym_len_hs + sym_len_he, in_w) - img_aug.narrow(1, sym_len_hs, in_h).copy_(img) - - sym_patch = img[:, :sym_len_hs, :] - inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(1, inv_idx) - img_aug.narrow(1, 0, sym_len_hs).copy_(sym_patch_inv) - - sym_patch = img[:, -sym_len_he:, :] - inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(1, inv_idx) - img_aug.narrow(1, sym_len_hs + in_h, sym_len_he).copy_(sym_patch_inv) - - out_1 = torch.FloatTensor(in_c, out_h, in_w) - kernel_width = weights_h.size(1) - for i in range(out_h): - idx = int(indices_h[i][0]) - for j in range(in_c): - out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_h[i]) - - # process W dimension - # symmetric copying - out_1_aug = torch.FloatTensor(in_c, out_h, in_w + sym_len_ws + sym_len_we) - out_1_aug.narrow(2, sym_len_ws, in_w).copy_(out_1) - - sym_patch = out_1[:, :, :sym_len_ws] - inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(2, inv_idx) - out_1_aug.narrow(2, 0, sym_len_ws).copy_(sym_patch_inv) - - sym_patch = out_1[:, :, -sym_len_we:] - inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(2, inv_idx) - out_1_aug.narrow(2, sym_len_ws + in_w, sym_len_we).copy_(sym_patch_inv) - - out_2 = torch.FloatTensor(in_c, out_h, out_w) - kernel_width = weights_w.size(1) - for i in range(out_w): - idx = int(indices_w[i][0]) - for j in range(in_c): - out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_w[i]) - - if numpy_type: - out_2 = out_2.numpy().transpose(1, 2, 0) - return out_2 - - -def rgb2ycbcr(img, y_only=False): - """Convert a RGB image to YCbCr image. - - This function produces the same results as Matlab's `rgb2ycbcr` function. - It implements the ITU-R BT.601 conversion for standard-definition - television. See more details in - https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. - - It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`. - In OpenCV, it implements a JPEG conversion. See more details in - https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. - - Args: - img (ndarray): The input image. It accepts: - 1. np.uint8 type with range [0, 255]; - 2. np.float32 type with range [0, 1]. - y_only (bool): Whether to only return Y channel. Default: False. - - Returns: - ndarray: The converted YCbCr image. The output image has the same type - and range as input image. - """ - img_type = img.dtype - img = _convert_input_type_range(img) - if y_only: - out_img = np.dot(img, [65.481, 128.553, 24.966]) + 16.0 - else: - out_img = np.matmul( - img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], [24.966, 112.0, -18.214]]) + [16, 128, 128] - out_img = _convert_output_type_range(out_img, img_type) - return out_img - - -def bgr2ycbcr(img, y_only=False): - """Convert a BGR image to YCbCr image. - - The bgr version of rgb2ycbcr. - It implements the ITU-R BT.601 conversion for standard-definition - television. See more details in - https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. - - It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`. - In OpenCV, it implements a JPEG conversion. See more details in - https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. - - Args: - img (ndarray): The input image. It accepts: - 1. np.uint8 type with range [0, 255]; - 2. np.float32 type with range [0, 1]. - y_only (bool): Whether to only return Y channel. Default: False. - - Returns: - ndarray: The converted YCbCr image. The output image has the same type - and range as input image. - """ - img_type = img.dtype - img = _convert_input_type_range(img) - if y_only: - out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0 - else: - out_img = np.matmul( - img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, 112.0]]) + [16, 128, 128] - out_img = _convert_output_type_range(out_img, img_type) - return out_img - - -def ycbcr2rgb(img): - """Convert a YCbCr image to RGB image. - - This function produces the same results as Matlab's ycbcr2rgb function. - It implements the ITU-R BT.601 conversion for standard-definition - television. See more details in - https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. - - It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`. - In OpenCV, it implements a JPEG conversion. See more details in - https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. - - Args: - img (ndarray): The input image. It accepts: - 1. np.uint8 type with range [0, 255]; - 2. np.float32 type with range [0, 1]. - - Returns: - ndarray: The converted RGB image. The output image has the same type - and range as input image. - """ - img_type = img.dtype - img = _convert_input_type_range(img) * 255 - out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071], - [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836] # noqa: E126 - out_img = _convert_output_type_range(out_img, img_type) - return out_img - - -def ycbcr2bgr(img): - """Convert a YCbCr image to BGR image. - - The bgr version of ycbcr2rgb. - It implements the ITU-R BT.601 conversion for standard-definition - television. See more details in - https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. - - It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`. - In OpenCV, it implements a JPEG conversion. See more details in - https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. - - Args: - img (ndarray): The input image. It accepts: - 1. np.uint8 type with range [0, 255]; - 2. np.float32 type with range [0, 1]. - - Returns: - ndarray: The converted BGR image. The output image has the same type - and range as input image. - """ - img_type = img.dtype - img = _convert_input_type_range(img) * 255 - out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0.00791071, -0.00153632, 0], - [0, -0.00318811, 0.00625893]]) * 255.0 + [-276.836, 135.576, -222.921] # noqa: E126 - out_img = _convert_output_type_range(out_img, img_type) - return out_img - - -def _convert_input_type_range(img): - """Convert the type and range of the input image. - - It converts the input image to np.float32 type and range of [0, 1]. - It is mainly used for pre-processing the input image in colorspace - convertion functions such as rgb2ycbcr and ycbcr2rgb. - - Args: - img (ndarray): The input image. It accepts: - 1. np.uint8 type with range [0, 255]; - 2. np.float32 type with range [0, 1]. - - Returns: - (ndarray): The converted image with type of np.float32 and range of - [0, 1]. - """ - img_type = img.dtype - img = img.astype(np.float32) - if img_type == np.float32: - pass - elif img_type == np.uint8: - img /= 255. - else: - raise TypeError('The img type should be np.float32 or np.uint8, ' f'but got {img_type}') - return img - - -def _convert_output_type_range(img, dst_type): - """Convert the type and range of the image according to dst_type. - - It converts the image to desired type and range. If `dst_type` is np.uint8, - images will be converted to np.uint8 type with range [0, 255]. If - `dst_type` is np.float32, it converts the image to np.float32 type with - range [0, 1]. - It is mainly used for post-processing images in colorspace convertion - functions such as rgb2ycbcr and ycbcr2rgb. - - Args: - img (ndarray): The image to be converted with np.float32 type and - range [0, 255]. - dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it - converts the image to np.uint8 type with range [0, 255]. If - dst_type is np.float32, it converts the image to np.float32 type - with range [0, 1]. - - Returns: - (ndarray): The converted image with desired type and range. - """ - if dst_type not in (np.uint8, np.float32): - raise TypeError('The dst_type should be np.float32 or np.uint8, ' f'but got {dst_type}') - if dst_type == np.uint8: - img = img.round() - else: - img /= 255. - return img.astype(dst_type) diff --git a/spaces/FrankZxShen/vits-fast-finetuning-pcr/monotonic_align/core.py b/spaces/FrankZxShen/vits-fast-finetuning-pcr/monotonic_align/core.py deleted file mode 100644 index 1f940605fe4fd0738fa0006149fcba14ef88223a..0000000000000000000000000000000000000000 --- a/spaces/FrankZxShen/vits-fast-finetuning-pcr/monotonic_align/core.py +++ /dev/null @@ -1,36 +0,0 @@ -import numba - - -@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]), - nopython=True, nogil=True) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val = -1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y - 1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y - 1, x - 1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): - index = index - 1 diff --git a/spaces/GT4SD/paccmann_gp/utils.py b/spaces/GT4SD/paccmann_gp/utils.py deleted file mode 100644 index 9f8d421154665c3b990b425b581911e36ebc31cb..0000000000000000000000000000000000000000 --- a/spaces/GT4SD/paccmann_gp/utils.py +++ /dev/null @@ -1,76 +0,0 @@ -import logging -from collections import defaultdict -from typing import List, Callable -from gt4sd.properties import PropertyPredictorRegistry -from gt4sd.algorithms.prediction.paccmann.core import PaccMann, AffinityPredictor -import torch - -import mols2grid -import pandas as pd - -logger = logging.getLogger(__name__) -logger.addHandler(logging.NullHandler()) - - -def get_affinity_function(target: str) -> Callable: - return lambda mols: torch.stack( - list( - PaccMann( - AffinityPredictor(protein_targets=[target] * len(mols), ligands=mols) - ).sample(len(mols)) - ) - ).tolist() - - -EVAL_DICT = { - "qed": PropertyPredictorRegistry.get_property_predictor("qed"), - "sa": PropertyPredictorRegistry.get_property_predictor("sas"), -} - - -def draw_grid_generate( - samples: List[str], - properties: List[str], - protein_target: str, - n_cols: int = 3, - size=(140, 200), -) -> str: - """ - Uses mols2grid to draw a HTML grid for the generated molecules - - Args: - samples: The generated samples. - n_cols: Number of columns in grid. Defaults to 5. - size: Size of molecule in grid. Defaults to (140, 200). - - Returns: - HTML to display - """ - - if protein_target != "": - EVAL_DICT.update({"affinity": get_affinity_function(protein_target)}) - - result = defaultdict(list) - result.update( - {"SMILES": samples, "Name": [f"Generated_{i}" for i in range(len(samples))]}, - ) - if "affinity" in properties: - properties.remove("affinity") - vals = EVAL_DICT["affinity"](samples) - result["affinity"] = vals - # Fill properties - for sample in samples: - for prop in properties: - value = EVAL_DICT[prop](sample) - result[prop].append(f"{prop} = {value}") - - result_df = pd.DataFrame(result) - obj = mols2grid.display( - result_df, - tooltip=list(result.keys()), - height=1100, - n_cols=n_cols, - name="Results", - size=size, - ) - return obj.data diff --git a/spaces/GaenKoki/voicevox/voicevox_engine/utility/connect_base64_waves.py b/spaces/GaenKoki/voicevox/voicevox_engine/utility/connect_base64_waves.py deleted file mode 100644 index 37f95240966f9bfed1cfe6e9090f871cea331ef7..0000000000000000000000000000000000000000 --- a/spaces/GaenKoki/voicevox/voicevox_engine/utility/connect_base64_waves.py +++ /dev/null @@ -1,60 +0,0 @@ -import base64 -import io -from typing import List, Tuple - -import numpy as np -import soundfile -from scipy.signal import resample - - -class ConnectBase64WavesException(Exception): - def __init__(self, message: str): - self.message = message - - -def decode_base64_waves(waves: List[str]) -> List[Tuple[np.ndarray, int]]: - """ - base64エンコードされた複数のwavデータをデコードする - Parameters - ---------- - waves: list[str] - base64エンコードされたwavデータのリスト - Returns - ------- - waves_nparray_sr: List[Tuple[np.ndarray, int]] - (NumPy配列の音声波形データ, サンプリングレート) 形式のタプルのリスト - """ - if len(waves) == 0: - raise ConnectBase64WavesException("wavファイルが含まれていません") - - waves_nparray_sr = [] - for wave in waves: - try: - wav_bin = base64.standard_b64decode(wave) - except ValueError: - raise ConnectBase64WavesException("base64デコードに失敗しました") - try: - _data = soundfile.read(io.BytesIO(wav_bin)) - except Exception: - raise ConnectBase64WavesException("wavファイルを読み込めませんでした") - waves_nparray_sr.append(_data) - - return waves_nparray_sr - - -def connect_base64_waves(waves: List[str]) -> Tuple[np.ndarray, int]: - waves_nparray_sr = decode_base64_waves(waves) - - max_sampling_rate = max([sr for _, sr in waves_nparray_sr]) - max_channels = max([x.ndim for x, _ in waves_nparray_sr]) - assert 0 < max_channels <= 2 - - waves_nparray_list = [] - for nparray, sr in waves_nparray_sr: - if sr != max_sampling_rate: - nparray = resample(nparray, max_sampling_rate * len(nparray) // sr) - if nparray.ndim < max_channels: - nparray = np.array([nparray, nparray]).T - waves_nparray_list.append(nparray) - - return np.concatenate(waves_nparray_list), max_sampling_rate diff --git a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/app.py b/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/app.py deleted file mode 100644 index ee8daa994c00306fa145393fd4020da58722a1fb..0000000000000000000000000000000000000000 --- a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/app.py +++ /dev/null @@ -1,601 +0,0 @@ -import os -import random -import zipfile -import findfile -import PIL.Image -import autocuda -from pyabsa.utils.pyabsa_utils import fprint - -try: - for z_file in findfile.find_cwd_files(and_key=['.zip'], - exclude_key=['.ignore', 'git', 'SuperResolutionAnimeDiffusion'], - recursive=10): - fprint(f"Extracting {z_file}...") - with zipfile.ZipFile(z_file, 'r') as zip_ref: - zip_ref.extractall(os.path.dirname(z_file)) -except Exception as e: - os.system('unzip random_examples.zip') - -from diffusers import ( - AutoencoderKL, - UNet2DConditionModel, - StableDiffusionPipeline, - StableDiffusionImg2ImgPipeline, - DPMSolverMultistepScheduler, -) -import gradio as gr -import torch -from PIL import Image -import utils -import datetime -import time -import psutil -from Waifu2x.magnify import ImageMagnifier -from RealESRGANv030.interface import realEsrgan - -magnifier = ImageMagnifier() - -start_time = time.time() -is_colab = utils.is_google_colab() - -CUDA_VISIBLE_DEVICES = "" -device = autocuda.auto_cuda() - -dtype = torch.float16 if device != "cpu" else torch.float32 - - - -class Model: - def __init__(self, name, path="", prefix=""): - self.name = name - self.path = path - self.prefix = prefix - self.pipe_t2i = None - self.pipe_i2i = None - - -models = [ - # Model("anything v3", "Linaqruf/anything-v3.0", "anything v3 style"), - Model("anything v4.5", "andite/anything-v4.0", "anything v4.5 style"), -] -# Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style "), -# Model("Balloon Art", "Fictiverse/Stable_Diffusion_BalloonArt_Model", "BalloonArt "), -# Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style "), -# Model("Tron Legacy", "dallinmackay/Tron-Legacy-diffusion", "trnlgcy ") -# Model("Pokémon", "lambdalabs/sd-pokemon-diffusers", ""), -# Model("Pony Diffusion", "AstraliteHeart/pony-diffusion", ""), -# Model("Robo Diffusion", "nousr/robo-diffusion", ""), - -scheduler = DPMSolverMultistepScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - num_train_timesteps=1000, - trained_betas=None, - predict_epsilon=True, - thresholding=False, - algorithm_type="dpmsolver++", - solver_type="midpoint", - solver_order=2, - # lower_order_final=True, -) - -custom_model = None -if is_colab: - models.insert(0, Model("Custom model")) - custom_model = models[0] - -last_mode = "txt2img" -current_model = models[1] if is_colab else models[0] -current_model_path = current_model.path - -if is_colab: - pipe = StableDiffusionPipeline.from_pretrained( - current_model.path, - torch_dtype=dtype, - scheduler=scheduler, - safety_checker=lambda images, clip_input: (images, False), - ) - -else: # download all models - print(f"{datetime.datetime.now()} Downloading vae...") - vae = AutoencoderKL.from_pretrained( - current_model.path, subfolder="vae", torch_dtype=dtype - ) - for model in models: - try: - print(f"{datetime.datetime.now()} Downloading {model.name} model...") - unet = UNet2DConditionModel.from_pretrained( - model.path, subfolder="unet", torch_dtype=dtype - ) - model.pipe_t2i = StableDiffusionPipeline.from_pretrained( - model.path, - unet=unet, - vae=vae, - torch_dtype=dtype, - scheduler=scheduler, - safety_checker=None, - ) - model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained( - model.path, - unet=unet, - vae=vae, - torch_dtype=dtype, - scheduler=scheduler, - safety_checker=None, - ) - except Exception as e: - print( - f"{datetime.datetime.now()} Failed to load model " - + model.name - + ": " - + str(e) - ) - models.remove(model) - pipe = models[0].pipe_t2i - -# model.pipe_i2i = torch.compile(model.pipe_i2i) -# model.pipe_t2i = torch.compile(model.pipe_t2i) -if torch.cuda.is_available(): - pipe = pipe.to(device) - - -# device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶" - - -def error_str(error, title="Error"): - return ( - f"""#### {title} - {error}""" - if error - else "" - ) - - -def custom_model_changed(path): - models[0].path = path - global current_model - current_model = models[0] - - -def on_model_change(model_name): - prefix = ( - 'Enter prompt. "' - + next((m.prefix for m in models if m.name == model_name), None) - + '" is prefixed automatically' - if model_name != models[0].name - else "Don't forget to use the custom model prefix in the prompt!" - ) - - return ( - gr.update(visible=model_name == models[0].name), - gr.update(placeholder=prefix), - ) - - -def inference( - model_name, - prompt, - guidance, - steps, - width=512, - height=512, - seed=0, - img=None, - strength=0.5, - neg_prompt="", - scale="ESRGAN4x", - scale_factor=2, -): - fprint(psutil.virtual_memory()) # print memory usage - - fprint(f"Prompt: {prompt}") - global current_model - for model in models: - if model.name == model_name: - current_model = model - model_path = current_model.path - - generator = torch.Generator(device).manual_seed(seed) if seed != 0 else None - - try: - if img is not None: - return ( - img_to_img( - model_path, - prompt, - neg_prompt, - img, - strength, - guidance, - steps, - width, - height, - generator, - scale, - scale_factor, - ), - None, - ) - else: - return ( - txt_to_img( - model_path, - prompt, - neg_prompt, - guidance, - steps, - width, - height, - generator, - scale, - scale_factor, - ), - None, - ) - except Exception as e: - return None, error_str(e) - # if img is not None: - # return img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, - # generator, scale, scale_factor), None - # else: - # return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator, scale, scale_factor), None - - -def txt_to_img( - model_path, - prompt, - neg_prompt, - guidance, - steps, - width, - height, - generator, - scale, - scale_factor, -): - print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}") - - global last_mode - global pipe - global current_model_path - if model_path != current_model_path or last_mode != "txt2img": - current_model_path = model_path - - if is_colab or current_model == custom_model: - pipe = StableDiffusionPipeline.from_pretrained( - current_model_path, - torch_dtype=dtype, - scheduler=scheduler, - safety_checker=lambda images, clip_input: (images, False), - ) - else: - # pipe = pipe.to("cpu") - pipe = current_model.pipe_t2i - - if torch.cuda.is_available(): - pipe = pipe.to(device) - last_mode = "txt2img" - - prompt = current_model.prefix + prompt - result = pipe( - prompt, - negative_prompt=neg_prompt, - # num_images_per_prompt=n_images, - num_inference_steps=int(steps), - guidance_scale=guidance, - width=width, - height=height, - generator=generator, - ) - - # result.images[0] = magnifier.magnify(result.images[0], scale_factor=scale_factor) - # enhance resolution - if scale_factor > 1: - if scale == "ESRGAN4x": - fp32 = True if device == "cpu" else False - result.images[0] = realEsrgan( - input_dir=result.images[0], - suffix="", - output_dir="imgs", - fp32=fp32, - outscale=scale_factor, - )[0] - else: - result.images[0] = magnifier.magnify( - result.images[0], scale_factor=scale_factor - ) - # save image - result.images[0].save( - "imgs/result-{}.png".format(datetime.datetime.now().strftime("%Y%m%d-%H%M%S")) - ) - return replace_nsfw_images(result) - - -def img_to_img( - model_path, - prompt, - neg_prompt, - img, - strength, - guidance, - steps, - width, - height, - generator, - scale, - scale_factor, -): - fprint(f"{datetime.datetime.now()} img_to_img, model: {model_path}") - - global last_mode - global pipe - global current_model_path - if model_path != current_model_path or last_mode != "img2img": - current_model_path = model_path - - if is_colab or current_model == custom_model: - pipe = StableDiffusionImg2ImgPipeline.from_pretrained( - current_model_path, - torch_dtype=dtype, - scheduler=scheduler, - safety_checker=lambda images, clip_input: (images, False), - ) - else: - # pipe = pipe.to("cpu") - pipe = current_model.pipe_i2i - - if torch.cuda.is_available(): - pipe = pipe.to(device) - last_mode = "img2img" - - prompt = current_model.prefix + prompt - ratio = min(height / img.height, width / img.width) - img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) - result = pipe( - prompt, - negative_prompt=neg_prompt, - # num_images_per_prompt=n_images, - image=img, - num_inference_steps=int(steps), - strength=strength, - guidance_scale=guidance, - # width=width, - # height=height, - generator=generator, - ) - if scale_factor > 1: - if scale == "ESRGAN4x": - fp32 = True if device == "cpu" else False - result.images[0] = realEsrgan( - input_dir=result.images[0], - suffix="", - output_dir="imgs", - fp32=fp32, - outscale=scale_factor, - )[0] - else: - result.images[0] = magnifier.magnify( - result.images[0], scale_factor=scale_factor - ) - # save image - result.images[0].save( - "imgs/result-{}.png".format(datetime.datetime.now().strftime("%Y%m%d-%H%M%S")) - ) - return replace_nsfw_images(result) - - -def replace_nsfw_images(results): - if is_colab: - return results.images[0] - if hasattr(results, "nsfw_content_detected") and results.nsfw_content_detected: - for i in range(len(results.images)): - if results.nsfw_content_detected[i]: - results.images[i] = Image.open("nsfw.png") - return results.images[0] - - -css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem} -""" -with gr.Blocks(css=css) as demo: - if not os.path.exists("imgs"): - os.mkdir("imgs") - - gr.Markdown("# Super Resolution Anime Diffusion") - gr.Markdown( - "## Author: [yangheng95](https://github.com/yangheng95) Github:[Github](https://github.com/yangheng95/stable-diffusion-webui)" - ) - gr.Markdown( - "### This demo is running on a CPU, so it will take at least 20 minutes. " - "If you have a GPU, you can clone from [Github](https://github.com/yangheng95/SuperResolutionAnimeDiffusion) and run it locally." - ) - gr.Markdown( - "### FYI: to generate a 512*512 image and magnify 4x, it only takes 5~8 seconds on a RTX 2080 GPU" - ) - gr.Markdown( - "### You can duplicate this demo on HuggingFace Spaces, click [here](https://huggingface.co/spaces/yangheng/Super-Resolution-Anime-Diffusion?duplicate=true)" - ) - - with gr.Row(): - with gr.Column(scale=55): - with gr.Group(): - gr.Markdown("Text to image") - - model_name = gr.Dropdown( - label="Model", - choices=[m.name for m in models], - value=current_model.name, - ) - - with gr.Box(visible=False) as custom_model_group: - custom_model_path = gr.Textbox( - label="Custom model path", - placeholder="Path to model, e.g. nitrosocke/Arcane-Diffusion", - interactive=True, - ) - gr.HTML( - "
    Custom models have to be downloaded first, so give it some time.
    " - ) - - with gr.Row(): - prompt = gr.Textbox( - label="Prompt", - show_label=False, - max_lines=2, - placeholder="Enter prompt. Style applied automatically", - ).style(container=False) - with gr.Row(): - generate = gr.Button(value="Generate") - - with gr.Row(): - with gr.Group(): - neg_prompt = gr.Textbox( - label="Negative prompt", - value="bad result, worst, random, invalid, inaccurate, imperfect, blurry, deformed," - " disfigured, mutation, mutated, ugly, out of focus, bad anatomy, text, error," - " extra digit, fewer digits, worst quality, low quality, normal quality, noise, " - "jpeg artifact, compression artifact, signature, watermark, username, logo, " - "low resolution, worst resolution, bad resolution, normal resolution, bad detail," - " bad details, bad lighting, bad shadow, bad shading, bad background," - " worst background.", - ) - - image_out = gr.Image(height="auto", width="auto") - error_output = gr.Markdown() - - with gr.Row(): - gr.Markdown( - "# Random Image Generation Preview (512*768)x4 magnified" - ) - for f_img in findfile.find_cwd_files(".png", recursive=2): - with gr.Row(): - image = gr.Image(height=512, value=PIL.Image.open(f_img)) - # gallery = gr.Gallery( - # label="Generated images", show_label=False, elem_id="gallery" - # ).style(grid=[1], height="auto") - - with gr.Column(scale=45): - with gr.Group(): - gr.Markdown("Image to Image") - - with gr.Row(): - with gr.Group(): - image = gr.Image( - label="Image", height=256, tool="editor", type="pil" - ) - strength = gr.Slider( - label="Transformation strength", - minimum=0, - maximum=1, - step=0.01, - value=0.5, - ) - - with gr.Row(): - with gr.Group(): - # n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1) - - with gr.Row(): - guidance = gr.Slider( - label="Guidance scale", value=7.5, maximum=15 - ) - steps = gr.Slider( - label="Steps", value=15, minimum=2, maximum=75, step=1 - ) - - with gr.Row(): - width = gr.Slider( - label="Width", - value=512, - minimum=64, - maximum=1024, - step=8, - ) - height = gr.Slider( - label="Height", - value=768, - minimum=64, - maximum=1024, - step=8, - ) - with gr.Row(): - scale = gr.Radio( - label="Scale", - choices=["Waifu2x", "ESRGAN4x"], - value="Waifu2x", - ) - with gr.Row(): - scale_factor = gr.Slider( - 1, - 8, - label="Scale factor (to magnify image) (1, 2, 4, 8)", - value=1, - step=1, - ) - - seed = gr.Slider( - 0, 2147483647, label="Seed (0 = random)", value=0, step=1 - ) - - if is_colab: - model_name.change( - on_model_change, - inputs=model_name, - outputs=[custom_model_group, prompt], - queue=False, - ) - custom_model_path.change( - custom_model_changed, inputs=custom_model_path, outputs=None - ) - # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery) - - gr.Markdown( - "### based on [Anything V4.5] and [Anything V3](https://huggingface.co/Linaqruf/anything-v3.0)" - ) - - inputs = [ - model_name, - prompt, - guidance, - steps, - width, - height, - seed, - image, - strength, - neg_prompt, - scale, - scale_factor, - ] - outputs = [image_out, error_output] - prompt.submit(inference, inputs=inputs, outputs=outputs) - generate.click(inference, inputs=inputs, outputs=outputs, api_name="generate") - - prompt_keys = [ - "girl", - "lovely", - "cute", - "beautiful eyes", - "cumulonimbus clouds", - random.choice(["dress"]), - random.choice(["white hair"]), - random.choice(["blue eyes"]), - random.choice(["flower meadow"]), - random.choice(["Elif", "Angel"]), - ] - prompt.value = ",".join(prompt_keys) - ex = gr.Examples( - [ - [models[0].name, prompt.value, 7.5, 15], - ], - inputs=[model_name, prompt, guidance, steps, seed], - outputs=outputs, - fn=inference, - cache_examples=False, - ) - -print(f"Space built in {time.time() - start_time:.2f} seconds") - -if not is_colab: - demo.queue(concurrency_count=1) -demo.launch(debug=is_colab, enable_queue=True, share=is_colab) \ No newline at end of file diff --git a/spaces/Gradio-Blocks/CBNetV2/app.py b/spaces/Gradio-Blocks/CBNetV2/app.py deleted file mode 100644 index e7efdc5a3cd58fc23f83ea44f287f39b1966df70..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/CBNetV2/app.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import pathlib - -import gradio as gr - -from model import Model - -DESCRIPTION = '# [CBNetV2](https://github.com/VDIGPKU/CBNetV2)' - -model = Model() - -with gr.Blocks(css='style.css') as demo: - gr.Markdown(DESCRIPTION) - - with gr.Row(): - with gr.Column(): - with gr.Row(): - input_image = gr.Image(label='Input Image', type='numpy') - with gr.Row(): - detector_name = gr.Dropdown(label='Detector', - choices=list(model.models.keys()), - value=model.model_name) - with gr.Row(): - detect_button = gr.Button('Detect') - detection_results = gr.Variable() - with gr.Column(): - with gr.Row(): - detection_visualization = gr.Image(label='Detection Result', - type='numpy') - with gr.Row(): - visualization_score_threshold = gr.Slider( - label='Visualization Score Threshold', - minimum=0, - maximum=1, - step=0.05, - value=0.3) - with gr.Row(): - redraw_button = gr.Button('Redraw') - - with gr.Row(): - paths = sorted(pathlib.Path('images').rglob('*.jpg')) - gr.Examples(examples=[[path.as_posix()] for path in paths], - inputs=input_image) - - detector_name.change(fn=model.set_model_name, - inputs=[detector_name], - outputs=None) - detect_button.click(fn=model.detect_and_visualize, - inputs=[ - input_image, - visualization_score_threshold, - ], - outputs=[ - detection_results, - detection_visualization, - ]) - redraw_button.click(fn=model.visualize_detection_results, - inputs=[ - input_image, - detection_results, - visualization_score_threshold, - ], - outputs=[detection_visualization]) -demo.queue(max_size=10).launch() diff --git a/spaces/Gradio-Blocks/anime-colorization/pixel_guide_diffusion/losses.py b/spaces/Gradio-Blocks/anime-colorization/pixel_guide_diffusion/losses.py deleted file mode 100644 index 251e42e4f36a31bb5e1aeda874b3a45d722000a2..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/anime-colorization/pixel_guide_diffusion/losses.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -Helpers for various likelihood-based losses. These are ported from the original -Ho et al. diffusion models codebase: -https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py -""" - -import numpy as np - -import torch as th - - -def normal_kl(mean1, logvar1, mean2, logvar2): - """ - Compute the KL divergence between two gaussians. - - Shapes are automatically broadcasted, so batches can be compared to - scalars, among other use cases. - """ - tensor = None - for obj in (mean1, logvar1, mean2, logvar2): - if isinstance(obj, th.Tensor): - tensor = obj - break - assert tensor is not None, "at least one argument must be a Tensor" - - # Force variances to be Tensors. Broadcasting helps convert scalars to - # Tensors, but it does not work for th.exp(). - logvar1, logvar2 = [ - x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor) - for x in (logvar1, logvar2) - ] - - return 0.5 * ( - -1.0 - + logvar2 - - logvar1 - + th.exp(logvar1 - logvar2) - + ((mean1 - mean2) ** 2) * th.exp(-logvar2) - ) - - -def approx_standard_normal_cdf(x): - """ - A fast approximation of the cumulative distribution function of the - standard normal. - """ - return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3)))) - - -def discretized_gaussian_log_likelihood(x, *, means, log_scales): - """ - Compute the log-likelihood of a Gaussian distribution discretizing to a - given image. - - :param x: the target images. It is assumed that this was uint8 values, - rescaled to the range [-1, 1]. - :param means: the Gaussian mean Tensor. - :param log_scales: the Gaussian log stddev Tensor. - :return: a tensor like x of log probabilities (in nats). - """ - assert x.shape == means.shape == log_scales.shape - centered_x = x - means - inv_stdv = th.exp(-log_scales) - plus_in = inv_stdv * (centered_x + 1.0 / 255.0) - cdf_plus = approx_standard_normal_cdf(plus_in) - min_in = inv_stdv * (centered_x - 1.0 / 255.0) - cdf_min = approx_standard_normal_cdf(min_in) - log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12)) - log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12)) - cdf_delta = cdf_plus - cdf_min - log_probs = th.where( - x < -0.999, - log_cdf_plus, - th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))), - ) - assert log_probs.shape == x.shape - return log_probs diff --git a/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/model/tf/__init__.py b/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/model/tf/__init__.py deleted file mode 100644 index 6c520687f67754b0488690287f941854c8cf6133..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/model/tf/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2021 DeepMind Technologies Limited -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Alphafold model TensorFlow code.""" diff --git a/spaces/Gradio-Blocks/speech-to-text-app/app.py b/spaces/Gradio-Blocks/speech-to-text-app/app.py deleted file mode 100644 index 62e4fc0eb0cc37e8f712d1882675c7855a2019cb..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/speech-to-text-app/app.py +++ /dev/null @@ -1,97 +0,0 @@ -import json -import requests -import os - -api_key = os.environ.get("api_key") - -# list_of_transcriptiions = [['ol5bfcn3yv-2e65-44df-91c6-5c198b517ced', 'completed']] - -headers = { - 'authorization': api_key, - 'content-type': 'application/json' - } - -def sumbit_transcription(body): - endpoint = "https://api.assemblyai.com/v2/transcript" - try: - res = requests.post(endpoint, json=body, headers=headers) - return {'status': res.json().get('status'), - 'id': res.json().get('id') - } - except Exception as e: - return {'status': 'error'} - - -def hello(audio_url, speaker_labeling): - body = { - 'audio_url': audio_url - } - - res = sumbit_transcription(body) - - list_of_transcriptions['entries'][res.get('id')] = res.get('status') - with open('transcriptions.json', 'w') as f: - json.dump(list_of_transcriptions, f) - - return res.get('id') - -def get_status(transcription_id): - print('--->', transcription_id) - endpoint_status = f"https://api.assemblyai.com/v2/transcript/{transcription_id}" - print('--->', endpoint_status) - res = requests.get(endpoint_status, headers=headers) - list_of_transcriptions['entries'][transcription_id] = res.json().get('status') - with open('transcriptions.json', 'w') as f: - json.dump(list_of_transcriptions, f) - - return res.json().get('status') - -def get_results(transcription_id): - endpoint_result = f"https://api.assemblyai.com/v2/transcript/{transcription_id}" - res = requests.get(endpoint_result, headers=headers) - return res.json().get('text') - -def refresh_list(): - results = '''Transaction ID \t\t\t\t Status\n''' - for entry, status in list_of_transcriptions['entries'].items(): - results += f'{entry} {status}' - - return list_of_transcriptions['entries'].items() - -import gradio as gr - -demo = gr.Blocks() - -with demo: - gr.Markdown('![#c5f015](https://i.ytimg.com/vi/-NRGVCHI4WM/maxresdefault.jpg)') - with gr.Tabs(): - with gr.TabItem('Submit Audio for Transcription'): - text_input = gr.Textbox(label='Audio URL') - speaker_choice = gr.Checkbox(label='Speaker Labeling') - output = gr.Textbox(label='Transcrption ID') - text_button = gr.Button('Submit') - with gr.TabItem('Get Transcription Status'): - transcription_id_status = gr.Textbox(label='Transcription ID') - status = gr.Textbox(label='Status') - status_button = gr.Button('Get Status') - with gr.TabItem('Submittted Transcriptions'): - list_of_transcriptions = {} - with open('transcriptions.json', 'r') as f: - list_of_transcriptions = json.load(f) - print(list_of_transcriptions['entries']) - data_frame = gr.DataFrame(list(list_of_transcriptions['entries'].items()), headers=['Transaction ID', 'status']) - # refresh_button = gr.Button('Refresh') - with gr.TabItem('Get Transcription Result'): - transcription_id = gr.Textbox(label='Transcription ID') - results = gr.Textbox(label='Full Transcript') - results_button = gr.Button('Get Status') - - text_button.click(hello, inputs=[text_input, speaker_choice], outputs=output) - status_button.click(get_status, inputs=transcription_id_status, outputs=status) - results_button.click(get_results, inputs=[transcription_id], outputs=results) - # refresh_button.click(refresh_list, inputs = None, outputs=None) -# iface = gr.Interface(fn=hello, inputs=['text', 'checkbox'], outputs=['text']) - -if __name__ == "__main__": - # iface.launch(server_port=7860, debug=True) - demo.launch(server_port=7860, debug=True) \ No newline at end of file diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py deleted file mode 100644 index f506ea815fedd6faefad9a06d7f466b86e8d2622..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -# fp16 settings -fp16 = dict(loss_scale=512.) diff --git a/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/lpips/dist_model.py b/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/lpips/dist_model.py deleted file mode 100644 index 4ff0aa4ca6e4b217954c167787eaac1ca1f8e304..0000000000000000000000000000000000000000 --- a/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/lpips/dist_model.py +++ /dev/null @@ -1,284 +0,0 @@ - -from __future__ import absolute_import - -import sys -import numpy as np -import torch -from torch import nn -import os -from collections import OrderedDict -from torch.autograd import Variable -import itertools -from .base_model import BaseModel -from scipy.ndimage import zoom -import fractions -import functools -import skimage.transform -from tqdm import tqdm - -from IPython import embed - -from . import networks_basic as networks -import lpips as util - -class DistModel(BaseModel): - def name(self): - return self.model_name - - def initialize(self, model='net-lin', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False, model_path=None, - use_gpu=True, printNet=False, spatial=False, - is_train=False, lr=.0001, beta1=0.5, version='0.1', gpu_ids=[0]): - ''' - INPUTS - model - ['net-lin'] for linearly calibrated network - ['net'] for off-the-shelf network - ['L2'] for L2 distance in Lab colorspace - ['SSIM'] for ssim in RGB colorspace - net - ['squeeze','alex','vgg'] - model_path - if None, will look in weights/[NET_NAME].pth - colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM - use_gpu - bool - whether or not to use a GPU - printNet - bool - whether or not to print network architecture out - spatial - bool - whether to output an array containing varying distances across spatial dimensions - spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below). - spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images. - spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear). - is_train - bool - [True] for training mode - lr - float - initial learning rate - beta1 - float - initial momentum term for adam - version - 0.1 for latest, 0.0 was original (with a bug) - gpu_ids - int array - [0] by default, gpus to use - ''' - BaseModel.initialize(self, use_gpu=use_gpu, gpu_ids=gpu_ids) - - self.model = model - self.net = net - self.is_train = is_train - self.spatial = spatial - self.gpu_ids = gpu_ids - self.model_name = '%s [%s]'%(model,net) - - if(self.model == 'net-lin'): # pretrained net + linear layer - self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net, - use_dropout=True, spatial=spatial, version=version, lpips=True) - kw = {} - if not use_gpu: - kw['map_location'] = 'cpu' - if(model_path is None): - import inspect - model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', 'weights/v%s/%s.pth'%(version,net))) - - if(not is_train): - print('Loading model from: %s'%model_path) - self.net.load_state_dict(torch.load(model_path, **kw), strict=False) - - elif(self.model=='net'): # pretrained network - self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False) - elif(self.model in ['L2','l2']): - self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing - self.model_name = 'L2' - elif(self.model in ['DSSIM','dssim','SSIM','ssim']): - self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace) - self.model_name = 'SSIM' - else: - raise ValueError("Model [%s] not recognized." % self.model) - - self.parameters = list(self.net.parameters()) - - if self.is_train: # training mode - # extra network on top to go from distances (d0,d1) => predicted human judgment (h*) - self.rankLoss = networks.BCERankingLoss() - self.parameters += list(self.rankLoss.net.parameters()) - self.lr = lr - self.old_lr = lr - self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999)) - else: # test mode - self.net.eval() - - if(use_gpu): - self.net.to(gpu_ids[0]) - self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids) - if(self.is_train): - self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) # just put this on GPU0 - - if(printNet): - print('---------- Networks initialized -------------') - networks.print_network(self.net) - print('-----------------------------------------------') - - def forward(self, in0, in1, retPerLayer=False): - ''' Function computes the distance between image patches in0 and in1 - INPUTS - in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1] - OUTPUT - computed distances between in0 and in1 - ''' - - return self.net.forward(in0, in1, retPerLayer=retPerLayer) - - # ***** TRAINING FUNCTIONS ***** - def optimize_parameters(self): - self.forward_train() - self.optimizer_net.zero_grad() - self.backward_train() - self.optimizer_net.step() - self.clamp_weights() - - def clamp_weights(self): - for module in self.net.modules(): - if(hasattr(module, 'weight') and module.kernel_size==(1,1)): - module.weight.data = torch.clamp(module.weight.data,min=0) - - def set_input(self, data): - self.input_ref = data['ref'] - self.input_p0 = data['p0'] - self.input_p1 = data['p1'] - self.input_judge = data['judge'] - - if(self.use_gpu): - self.input_ref = self.input_ref.to(device=self.gpu_ids[0]) - self.input_p0 = self.input_p0.to(device=self.gpu_ids[0]) - self.input_p1 = self.input_p1.to(device=self.gpu_ids[0]) - self.input_judge = self.input_judge.to(device=self.gpu_ids[0]) - - self.var_ref = Variable(self.input_ref,requires_grad=True) - self.var_p0 = Variable(self.input_p0,requires_grad=True) - self.var_p1 = Variable(self.input_p1,requires_grad=True) - - def forward_train(self): # run forward pass - # print(self.net.module.scaling_layer.shift) - # print(torch.norm(self.net.module.net.slice1[0].weight).item(), torch.norm(self.net.module.lin0.model[1].weight).item()) - - self.d0 = self.forward(self.var_ref, self.var_p0) - self.d1 = self.forward(self.var_ref, self.var_p1) - self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge) - - self.var_judge = Variable(1.*self.input_judge).view(self.d0.size()) - - self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.) - - return self.loss_total - - def backward_train(self): - torch.mean(self.loss_total).backward() - - def compute_accuracy(self,d0,d1,judge): - ''' d0, d1 are Variables, judge is a Tensor ''' - d1_lt_d0 = (d1 %f' % (type,self.old_lr, lr)) - self.old_lr = lr - -def score_2afc_dataset(data_loader, func, name=''): - ''' Function computes Two Alternative Forced Choice (2AFC) score using - distance function 'func' in dataset 'data_loader' - INPUTS - data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside - func - callable distance function - calling d=func(in0,in1) should take 2 - pytorch tensors with shape Nx3xXxY, and return numpy array of length N - OUTPUTS - [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators - [1] - dictionary with following elements - d0s,d1s - N arrays containing distances between reference patch to perturbed patches - gts - N array in [0,1], preferred patch selected by human evaluators - (closer to "0" for left patch p0, "1" for right patch p1, - "0.6" means 60pct people preferred right patch, 40pct preferred left) - scores - N array in [0,1], corresponding to what percentage function agreed with humans - CONSTS - N - number of test triplets in data_loader - ''' - - d0s = [] - d1s = [] - gts = [] - - for data in tqdm(data_loader.load_data(), desc=name): - d0s+=func(data['ref'],data['p0']).data.cpu().numpy().flatten().tolist() - d1s+=func(data['ref'],data['p1']).data.cpu().numpy().flatten().tolist() - gts+=data['judge'].cpu().numpy().flatten().tolist() - - d0s = np.array(d0s) - d1s = np.array(d1s) - gts = np.array(gts) - scores = (d0s MAX_DIMENSION or img.width > MAX_DIMENSION: - logger.info(f"Dimensions too large. Resizing to {MAX_DIMENSION}px.") - img.thumbnail((MAX_DIMENSION, MAX_DIMENSION), Image.ANTIALIAS) - - return img - - -def inference(img, style): - img = adjust_image_for_model(img) - - # load image - input_image = img.convert(COLOUR_MODEL) - input_image = np.asarray(input_image) - # RGB -> BGR - input_image = input_image[:, :, [2, 1, 0]] - input_image = transforms.ToTensor()(input_image).unsqueeze(0) - # preprocess, (-1, 1) - input_image = -1 + 2 * input_image - - if enable_gpu: - logger.info(f"CUDA found. Using GPU.") - # Allows to specify a card for calculation - input_image = Variable(input_image).to(device) - else: - logger.info(f"CUDA not found. Using CPU.") - input_image = Variable(input_image).float() - - # forward - model = get_model(style) - output_image = model(input_image) - output_image = output_image[0] - # BGR -> RGB - output_image = output_image[[2, 1, 0], :, :] - output_image = output_image.data.cpu().float() * 0.5 + 0.5 - - return transforms.ToPILImage()(output_image) - - -# Gradio setup - -title = "Anime Background GAN" -description = "Gradio Demo for CartoonGAN by Chen Et. Al. Models are Shinkai Makoto, Hosoda Mamoru, Kon Satoshi, and Miyazaki Hayao." -article = "

    CartoonGAN Whitepaper from Chen et.al

    Github Repo

    Original Implementation from Yijunmaverick

    visitor badge

    " - -examples = [ - ["examples/garden_in.jpg", STYLE_SHINKAI], - ["examples/library_in.jpg", STYLE_KON], -] - - -gr.Interface( - fn=inference, - inputs=[ - gr.inputs.Image( - type="pil", - label="Input Photo (less than 1280px on both width and height)", - ), - gr.inputs.Dropdown( - STYLE_CHOICE_LIST, - type="value", - default=DEFAULT_STYLE, - label="Style", - ), - ], - outputs=gr.outputs.Image( - type="pil", - label="Output Image", - ), - title=title, - description=description, - article=article, - examples=examples, - allow_flagging="never", - allow_screenshot=False, -).launch(enable_queue=True) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/criss/sentence_retrieval/sentence_retrieval_tatoeba.sh b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/criss/sentence_retrieval/sentence_retrieval_tatoeba.sh deleted file mode 100644 index 0428d8bef9d426ac3e664cd281ce0b688f5f580f..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/criss/sentence_retrieval/sentence_retrieval_tatoeba.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# -source_lang=kk_KZ -target_lang=en_XX -MODEL=criss_checkpoints/criss.3rd.pt -SPM=criss_checkpoints/sentence.bpe.model -SPLIT=test -LANG_DICT=criss_checkpoints/lang_dict.txt -ENCODER_ANALYSIS=sentence_retrieval/encoder_analysis.py -SAVE_ENCODER=save_encoder.py -ENCODER_SAVE_ROOT=sentence_embeddings/$MODEL - - - -DATA_DIR=data_tmp -INPUT_DIR=$DATA_DIR/${source_lang}-${target_lang}-tatoeba -ENCODER_SAVE_DIR=${ENCODER_SAVE_ROOT}/${source_lang}-${target_lang} -mkdir -p $ENCODER_SAVE_DIR/${target_lang} -mkdir -p $ENCODER_SAVE_DIR/${source_lang} - -# Save encoder outputs for source sentences -python $SAVE_ENCODER \ - ${INPUT_DIR} \ - --path ${MODEL} \ - --task translation_multi_simple_epoch \ - --lang-dict ${LANG_DICT} \ - --gen-subset ${SPLIT} \ - --bpe 'sentencepiece' \ - --lang-pairs ${source_lang}-${target_lang} \ - -s ${source_lang} -t ${target_lang} \ - --sentencepiece-model ${SPM} \ - --remove-bpe 'sentencepiece' \ - --beam 1 \ - --lang-tok-style mbart \ - --encoder-save-dir ${ENCODER_SAVE_DIR}/${source_lang} - -# Save encoder outputs for target sentences -python $SAVE_ENCODER \ - ${INPUT_DIR} \ - --path ${MODEL} \ - --lang-dict ${LANG_DICT} \ - --task translation_multi_simple_epoch \ - --gen-subset ${SPLIT} \ - --bpe 'sentencepiece' \ - --lang-pairs ${target_lang}-${source_lang} \ - -t ${source_lang} -s ${target_lang} \ - --sentencepiece-model ${SPM} \ - --remove-bpe 'sentencepiece' \ - --beam 1 \ - --lang-tok-style mbart \ - --encoder-save-dir ${ENCODER_SAVE_DIR}/${target_lang} - -# Analyze sentence retrieval accuracy -python $ENCODER_ANALYSIS --langs "${source_lang},${target_lang}" ${ENCODER_SAVE_DIR} diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/latent_depth/README.md b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/latent_depth/README.md deleted file mode 100644 index 7774c333053b95d15b180fdfc3ee3cd817790520..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/latent_depth/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Deep Transformers with Latent Depth (Li et al., 2020) - -[https://arxiv.org/abs/2009.13102](https://arxiv.org/abs/2009.13102). - -## Introduction - -We present a probabilistic framework to automatically learn which layer(s) to use by learning the posterior distributions of layer selection. As an extension of this framework, we propose a novel method to train one shared Transformer network for multilingual machine translation with different layer selection posteriors for each language pair. - -## Training a multilingual model with latent depth - -Below is an example of training with latent depth in decoder for one-to-many (O2M) related languages. We use the same preprocessed (numberized and binarized) TED8 dataset as in [Balancing Training for Multilingual Neural Machine Translation (Wang et al., 2020)](https://github.com/cindyxinyiwang/multiDDS), which could be generated by [the script](https://github.com/cindyxinyiwang/multiDDS/blob/multiDDS/util_scripts/prepare_multilingual_data.sh) the author provided. -```bash -lang_pairs_str="eng-aze,eng-bel,eng-ces,eng-glg,eng-por,eng-rus,eng-slk,eng-tur" -databin_dir= - -fairseq-train ${databin_dir} \ - --user-dir examples/latent_depth/latent_depth_src \ - --lang-pairs "${lang_pairs_str}" \ - --arch multilingual_transformer_iwslt_de_en \ - --task multilingual_translation_latent_depth \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --share-encoders \ - --share-decoders \ - --decoder-langtok \ - --share-decoder-input-output-embed \ - --dropout 0.3 --attention-dropout 0.3 \ - --optimizer adam --adam-eps 1e-06 --adam-betas '(0.9, 0.98)' \ - --lr-scheduler inverse_sqrt --stop-min-lr 1e-9 --warmup-init-lr 1e-7 --warmup-updates 8000 \ - --max-tokens 4096 --update-freq 1 \ - --lr 0.0015 \ - --clip-norm 1.0 \ - --seed 2 \ - --ddp-backend=legacy_ddp \ - --encoder-layers 12 \ - --decoder-layers 24 \ - --decoder-latent-layer \ - --sparsity-weight 0.1 \ - --anneal-updates 5000 \ - --soft-update 500 \ - --target-layers 12 \ - --share-weight 0.1 -``` -## Inference command - -```bash -lang_pairs_str="eng-aze,eng-bel,eng-ces,eng-glg,eng-por,eng-rus,eng-slk,eng-tur" -databin_dir= -model_path= -src_lang= -tgt_lang= -gen_data= - -fairseq-generate ${databin_dir} \ - --path ${model_path} \ - --task multilingual_translation_latent_depth \ - --decoder-latent-layer \ - --lang-pairs "${lang_pairs_str}" \ - -s ${src_lang} -t ${tgt_lang} \ - --gen-subset $gen_data \ - --scoring sacrebleu \ - --remove-bpe 'sentencepiece' \ - --lenpen 1.0 \ - --beam 5 \ - --decoder-langtok \ - --max-tokens 4096 -``` - - -## Citation -```bibtex -@article{li2020deep, - title={Deep Transformers with Latent Depth}, - author={Li, Xian and Stickland, Asa Cooper and Tang, Yuqing and Kong, Xiang}, - journal={arXiv preprint arXiv:2009.13102}, - year={2020} -} -``` diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/linformer/linformer_src/modules/linformer_sentence_encoder_layer.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/linformer/linformer_src/modules/linformer_sentence_encoder_layer.py deleted file mode 100644 index 7e2caa03400129ac0bb34ae35274cdf46f27a055..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/linformer/linformer_src/modules/linformer_sentence_encoder_layer.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from fairseq import utils -from fairseq.modules import TransformerEncoderLayer - -from .multihead_linear_attention import MultiheadLinearAttention - - -class LinformerTransformerEncoderLayer(TransformerEncoderLayer): - """ - Implements a Linformer Encoder Layer used in BERT/XLM style pre-trained - models. - """ - - def __init__(self, args, shared_compress_layer): - # wrap in a list so it's not automatically registered by PyTorch - self.shared_compress_layer = [shared_compress_layer] - - super().__init__(args) - - self.register_buffer("version", torch.tensor(2)) - - def build_self_attention(self, embed_dim, args): - return MultiheadLinearAttention( - embed_dim, - args.encoder_attention_heads, - dropout=args.dropout, - self_attention=True, - q_noise=args.quant_noise_pq, - qn_block_size=args.quant_noise_pq_block_size, - compressed=args.compressed, - max_seq_len=args.max_positions, - shared_kv_compressed=args.shared_kv_compressed, - shared_compress_layer=self.shared_compress_layer[0], - freeze_compress=args.freeze_compress, - ) - - def upgrade_state_dict_named(self, state_dict, name): - super().upgrade_state_dict_named(state_dict, name) - prefix = name + "." if name != "" else "" - - # some old checkpoints had weight sharing implemented incorrectly - # (note: this was correct in the original paper code) - if utils.item(state_dict.get(f"{prefix}version", torch.tensor(1))) < 2: - state_dict[f"{prefix}version"] = torch.tensor(1) - # check compression layer sharing - if f"{prefix}shared_compress_layer.weight" in state_dict: - # reinitialize block without sharing compression layer to match - # old behavior - self.shared_compress_layer = [ - torch.nn.Linear( - self.shared_compress_layer[0].weight.size(1), - self.shared_compress_layer[0].weight.size(0), - ) - ] - self.self_attn = self.build_self_attention(self.embed_dim, self.args) - # delete shared_compress_layer, since it's already copied to - # self_attn.compress_k.weight - del state_dict[f"{prefix}shared_compress_layer.weight"] - if f"{prefix}shared_compress_layer.bias" in state_dict: - del state_dict[f"{prefix}shared_compress_layer.bias"] diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/__init__.py deleted file mode 100644 index 337c77ac7bfb7e11a0662b86b98c4c0a02da26df..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/__init__.py +++ /dev/null @@ -1,236 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -"""isort:skip_file""" - -import argparse -import importlib -import os -from contextlib import ExitStack - -from fairseq.dataclass import FairseqDataclass -from fairseq.dataclass.utils import merge_with_parent -from hydra.core.config_store import ConfigStore -from omegaconf import open_dict, OmegaConf - -from .composite_encoder import CompositeEncoder -from .distributed_fairseq_model import DistributedFairseqModel -from .fairseq_decoder import FairseqDecoder -from .fairseq_encoder import FairseqEncoder -from .fairseq_incremental_decoder import FairseqIncrementalDecoder -from .fairseq_model import ( - BaseFairseqModel, - FairseqEncoderDecoderModel, - FairseqEncoderModel, - FairseqLanguageModel, - FairseqModel, - FairseqMultiModel, -) - - -MODEL_REGISTRY = {} -MODEL_DATACLASS_REGISTRY = {} -ARCH_MODEL_REGISTRY = {} -ARCH_MODEL_NAME_REGISTRY = {} -ARCH_MODEL_INV_REGISTRY = {} -ARCH_CONFIG_REGISTRY = {} - - -__all__ = [ - "BaseFairseqModel", - "CompositeEncoder", - "DistributedFairseqModel", - "FairseqDecoder", - "FairseqEncoder", - "FairseqEncoderDecoderModel", - "FairseqEncoderModel", - "FairseqIncrementalDecoder", - "FairseqLanguageModel", - "FairseqModel", - "FairseqMultiModel", -] - - -def build_model(cfg: FairseqDataclass, task): - - model = None - model_type = getattr(cfg, "_name", None) or getattr(cfg, "arch", None) - - if not model_type and len(cfg) == 1: - # this is hit if config object is nested in directory that is named after model type - - model_type = next(iter(cfg)) - if model_type in MODEL_DATACLASS_REGISTRY: - cfg = cfg[model_type] - else: - raise Exception( - "Could not infer model type from directory. Please add _name field to indicate model type. " - "Available models: " - + str(MODEL_DATACLASS_REGISTRY.keys()) - + " Requested model type: " - + model_type - ) - - if model_type in ARCH_MODEL_REGISTRY: - # case 1: legacy models - model = ARCH_MODEL_REGISTRY[model_type] - elif model_type in MODEL_DATACLASS_REGISTRY: - # case 2: config-driven models - model = MODEL_REGISTRY[model_type] - - if model_type in MODEL_DATACLASS_REGISTRY: - # set defaults from dataclass. note that arch name and model name can be the same - dc = MODEL_DATACLASS_REGISTRY[model_type] - - if isinstance(cfg, argparse.Namespace): - cfg = dc.from_namespace(cfg) - else: - cfg = merge_with_parent(dc(), cfg) - else: - if model_type in ARCH_CONFIG_REGISTRY: - with open_dict(cfg) if OmegaConf.is_config(cfg) else ExitStack(): - # this calls the different "arch" functions (like base_architecture()) that you indicate - # if you specify --arch on the command line. this is only applicable to the old argparse based models - # hydra models should expose different architectures via different config files - # it will modify the cfg object and default parameters according to the arch - ARCH_CONFIG_REGISTRY[model_type](cfg) - - assert model is not None, ( - f"Could not infer model type from {cfg}. " - "Available models: {}".format( - MODEL_DATACLASS_REGISTRY.keys() - ) - + f" Requested model type: {model_type}" - ) - - return model.build_model(cfg, task) - - -def register_model(name, dataclass=None): - """ - New model types can be added to fairseq with the :func:`register_model` - function decorator. - - For example:: - - @register_model('lstm') - class LSTM(FairseqEncoderDecoderModel): - (...) - - .. note:: All models must implement the :class:`BaseFairseqModel` interface. - Typically you will extend :class:`FairseqEncoderDecoderModel` for - sequence-to-sequence tasks or :class:`FairseqLanguageModel` for - language modeling tasks. - - Args: - name (str): the name of the model - """ - - def register_model_cls(cls): - if name in MODEL_REGISTRY: - raise ValueError("Cannot register duplicate model ({})".format(name)) - if not issubclass(cls, BaseFairseqModel): - raise ValueError( - "Model ({}: {}) must extend BaseFairseqModel".format(name, cls.__name__) - ) - MODEL_REGISTRY[name] = cls - if dataclass is not None and not issubclass(dataclass, FairseqDataclass): - raise ValueError( - "Dataclass {} must extend FairseqDataclass".format(dataclass) - ) - - cls.__dataclass = dataclass - if dataclass is not None: - MODEL_DATACLASS_REGISTRY[name] = dataclass - - cs = ConfigStore.instance() - node = dataclass() - node._name = name - cs.store(name=name, group="model", node=node, provider="fairseq") - - @register_model_architecture(name, name) - def noop(_): - pass - - return cls - - return register_model_cls - - -def register_model_architecture(model_name, arch_name): - """ - New model architectures can be added to fairseq with the - :func:`register_model_architecture` function decorator. After registration, - model architectures can be selected with the ``--arch`` command-line - argument. - - For example:: - - @register_model_architecture('lstm', 'lstm_luong_wmt_en_de') - def lstm_luong_wmt_en_de(cfg): - args.encoder_embed_dim = getattr(cfg.model, 'encoder_embed_dim', 1000) - (...) - - The decorated function should take a single argument *cfg*, which is a - :class:`omegaconf.DictConfig`. The decorated function should modify these - arguments in-place to match the desired architecture. - - Args: - model_name (str): the name of the Model (Model must already be - registered) - arch_name (str): the name of the model architecture (``--arch``) - """ - - def register_model_arch_fn(fn): - if model_name not in MODEL_REGISTRY: - raise ValueError( - "Cannot register model architecture for unknown model type ({})".format( - model_name - ) - ) - if arch_name in ARCH_MODEL_REGISTRY: - raise ValueError( - "Cannot register duplicate model architecture ({})".format(arch_name) - ) - if not callable(fn): - raise ValueError( - "Model architecture must be callable ({})".format(arch_name) - ) - ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name] - ARCH_MODEL_NAME_REGISTRY[arch_name] = model_name - ARCH_MODEL_INV_REGISTRY.setdefault(model_name, []).append(arch_name) - ARCH_CONFIG_REGISTRY[arch_name] = fn - return fn - - return register_model_arch_fn - - -def import_models(models_dir, namespace): - for file in os.listdir(models_dir): - path = os.path.join(models_dir, file) - if ( - not file.startswith("_") - and not file.startswith(".") - and (file.endswith(".py") or os.path.isdir(path)) - ): - model_name = file[: file.find(".py")] if file.endswith(".py") else file - importlib.import_module(namespace + "." + model_name) - - # extra `model_parser` for sphinx - if model_name in MODEL_REGISTRY: - parser = argparse.ArgumentParser(add_help=False) - group_archs = parser.add_argument_group("Named architectures") - group_archs.add_argument( - "--arch", choices=ARCH_MODEL_INV_REGISTRY[model_name] - ) - group_args = parser.add_argument_group( - "Additional command-line arguments" - ) - MODEL_REGISTRY[model_name].add_args(group_args) - globals()[model_name + "_parser"] = parser - - -# automatically import any Python files in the models/ directory -models_dir = os.path.dirname(__file__) -import_models(models_dir, "fairseq.models") diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/base_layer.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/base_layer.py deleted file mode 100644 index e7ef155b25fc73e74780879f665288c9bc95fd80..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/base_layer.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch.nn as nn -import torch -import sys -from fairseq import utils -from fairseq.distributed import utils as distributed_utils -from fairseq.modules.layer_norm import LayerNorm - - -class BaseLayer(nn.Module): - - def __init__(self, args): - super().__init__() - self.num_workers = distributed_utils.get_data_parallel_world_size() - expert_centroids = torch.empty(self.num_workers, args.decoder_embed_dim) - torch.nn.init.orthogonal_(expert_centroids, gain=0.1) - self.register_parameter("expert_centroids", torch.nn.Parameter(expert_centroids)) - self.expert_network = nn.Sequential(*([BaseSublayer(args) for _ in range(args.base_sublayers)])) - self.expert_id = distributed_utils.get_data_parallel_rank() - self.shuffle = args.base_shuffle - self.cpp = self.load_assignment() - - # Add a special attribute to the expert parameters, so we know not to sync their gradients - for param in self.expert_network.parameters(): - param.expert = True - - def forward(self, input_features, *args, **kwargs): - features = input_features.reshape(-1, input_features.size(-1)) - is_training = input_features.requires_grad - - if self.shuffle and is_training: - # Send each token to a random worker, to break correlations within the batch - shuffle_sort = torch.randperm(features.size(0), device=features.device) - features = All2All.apply(features[shuffle_sort]) - - with torch.no_grad(): - # Compute similarity of each token to each expert, for routing - token_expert_affinities = features.matmul(self.expert_centroids.transpose(0, 1)) - - # Compute which token goes to which expert - sort_by_expert, input_splits, output_splits = self.balanced_assignment(token_expert_affinities) \ - if is_training else self.greedy_assignment(token_expert_affinities) - # Swap these tokens for the right ones for our expert - routed_features = All2All.apply(features[sort_by_expert], output_splits, input_splits) - - if routed_features.size(0) > 0: - # Mix in the expert network based on how appropriate it is for these tokens - alpha = torch.sigmoid(routed_features.mv(self.expert_centroids[self.expert_id])).unsqueeze(1) - routed_features = alpha * self.expert_network(routed_features) + (1 - alpha) * routed_features - # Return to original worker and ordering - result = All2All.apply(routed_features, input_splits, output_splits)[self.inverse_sort(sort_by_expert)] - - if self.shuffle and is_training: - # Undo shuffling - result = All2All.apply(result)[self.inverse_sort(shuffle_sort)] - - # Return additional Nones for compatibility with TransformerDecoderLayer - return result.view(input_features.size()), None, None - - def inverse_sort(self, order): - # Creates an index that undoes a sort: xs==xs[order][inverse_sort(order)] - return torch.empty_like(order).scatter_(0, order, torch.arange(0, order.size(0), device=order.device)) - - def balanced_assignment(self, scores): - ok = scores.isfinite() - if not ok.all(): - # NaNs here can break the assignment algorithm - scores[~ok] = scores[ok].min() - return self.cpp.balanced_assignment(scores), None, None - - # Assigns each token to the top k experts - def greedy_assignment(self, scores, k=1): - token_to_workers = torch.topk(scores, dim=1, k=k, largest=True).indices.view(-1) - token_to_workers, sort_ordering = torch.sort(token_to_workers) - worker2token = sort_ordering // k - - # Find how many tokens we're sending to each other worker (being careful for sending 0 tokens to some workers) - output_splits = torch.zeros((self.num_workers,), dtype=torch.long, device=scores.device) - workers, counts = torch.unique_consecutive(token_to_workers, return_counts=True) - output_splits[workers] = counts - # Tell other workers how many tokens to expect from us - input_splits = All2All.apply(output_splits) - return worker2token, input_splits.tolist(), output_splits.tolist() - - def load_assignment(self): - try: - from fairseq import libbase - - return libbase - - except ImportError as e: - sys.stderr.write( - "ERROR: missing libbase. run `python setup.py build_ext --inplace`\n" - ) - raise e - - -class BaseSublayer(nn.Module): - def __init__(self, args): - super().__init__() - self.activation_fn = utils.get_activation_fn( - activation=getattr(args, 'activation_fn', 'relu') or "relu" - ) - self.norm = LayerNorm(args.decoder_embed_dim, export=False) - self.ff1 = torch.nn.Linear(args.decoder_embed_dim, args.decoder_ffn_embed_dim) - self.ff2 = torch.nn.Linear(args.decoder_ffn_embed_dim, args.decoder_embed_dim) - self.ff2.weight.data.zero_() - - def forward(self, xs): - return xs + self.ff2(self.activation_fn(self.ff1(self.norm(xs)))) - - -# Wraps torch.distributed.all_to_all_single as a function that supports autograd -class All2All(torch.autograd.Function): - @staticmethod - def forward(ctx, xs, input_splits=None, output_splits=None): - ctx.input_splits = input_splits - ctx.output_splits = output_splits - - ys = torch.empty_like(xs) if output_splits is None else \ - xs.new_empty(size=[sum(output_splits)] + list(xs.size()[1:])) - torch.distributed.all_to_all_single(ys, xs, output_split_sizes=output_splits, input_split_sizes=input_splits) - return ys - - @staticmethod - def backward(ctx, grad_output): - result = torch.empty_like(grad_output) if ctx.input_splits is None else \ - grad_output.new_empty(size=[sum(ctx.input_splits)] + list(grad_output.size()[1:])) - torch.distributed.all_to_all_single(result, grad_output, - output_split_sizes=ctx.input_splits, input_split_sizes=ctx.output_splits) - return result, None, None diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_constraints.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_constraints.py deleted file mode 100644 index 1c37f7e1fb26d8ea5349fedd3a60f566d09cf598..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_constraints.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import sys -import unittest - -import torch -from fairseq.token_generation_constraints import * - - -def tensorize(constraints: List[List[int]]) -> torch.Tensor: - return [torch.tensor(x) for x in constraints] - - -class TestHelperRoutines(unittest.TestCase): - def setUp(self): - self.examples = [ - ([[]], torch.tensor([[0]])), - ([[], []], torch.tensor([[0], [0]])), - ([[torch.tensor([1, 2])], []], torch.tensor([[1, 1, 2, 0], [0, 0, 0, 0]])), - ( - [ - [ - torch.tensor([3, 1, 2]), - torch.tensor([3]), - torch.tensor([4, 5, 6, 7]), - ], - [], - [torch.tensor([1, 8, 9, 10, 1, 4, 11, 12])], - ], - torch.tensor( - [ - [3, 3, 1, 2, 0, 3, 0, 4, 5, 6, 7, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [1, 1, 8, 9, 10, 1, 4, 11, 12, 0, 0, 0], - ] - ), - ), - ] - - def test_packing(self): - """Ensures the list of lists of tensors gets packed correctly.""" - for batch_constraints, expected_tensor in self.examples: - packed = pack_constraints(batch_constraints) - assert torch.equal(packed, expected_tensor) - - -class TestUnorderedConstraintState(unittest.TestCase): - def setUp(self): - # Tuples of (contraint set, expected printed graph, token counts per node) - self.examples = [ - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - "([None].False#6 ([1].True#4 ([2].False#1 [3].True#1) [3].True#1 [4].True#1) ([4].False#2 ([5].True#2 ([6].False#1 [7].True#1))))", - {1: 4, 2: 1, 3: 2, 4: 3, 5: 2, 6: 1, 7: 1}, - ), - ([], "[None].False#0", {}), - (tensorize([[0]]), "([None].False#1 [0].True#1)", {0: 1}), - ( - tensorize([[100000, 1, 2, 3, 4, 5]]), - "([None].False#1 ([100000].False#1 ([1].False#1 ([2].False#1 ([3].False#1 ([4].False#1 [5].True#1))))))", - {100000: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, - ), - ( - tensorize([[1, 2], [1, 2]]), - "([None].False#2 ([1].False#2 [2].True#2))", - {1: 2, 2: 2}, - ), - ( - tensorize([[1, 2], [3, 4]]), - "([None].False#2 ([1].False#1 [2].True#1) ([3].False#1 [4].True#1))", - {1: 1, 2: 1, 3: 1, 4: 1}, - ), - ] - - self.sequences = [ - ( - self.examples[0][0], - [], - {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, - ), - ( - self.examples[0][0], - [1, 2], - {"bank": 2, "num_completed": 0, "finished": False, "is_root": False}, - ), - ( - self.examples[0][0], - [1, 2, 94], - {"bank": 1, "num_completed": 1, "finished": False, "is_root": True}, - ), - ( - self.examples[0][0], - [1, 3, 999, 1, 4], - {"bank": 4, "num_completed": 2, "finished": False, "is_root": False}, - ), - ( - self.examples[0][0], - [1, 3, 999, 1, 4, 999], - {"bank": 4, "num_completed": 2, "finished": False, "is_root": True}, - ), - ( - self.examples[0][0], - [4, 5, 6, 8], - {"bank": 2, "num_completed": 1, "finished": False, "is_root": True}, - ), - ( - self.examples[0][0], - # Tricky, because in last three, goes down [1->4] branch, could miss [1] and [4->5] - # [[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]], - [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5], - {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, - ), - ( - self.examples[0][0], - [1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117], - {"bank": 14, "num_completed": 6, "finished": True, "is_root": True}, - ), - ( - tensorize([[1], [2, 3]]), - # Should not be able to get credit for entering 1 a second time - [1, 1], - {"bank": 1, "num_completed": 1, "finished": False, "is_root": True}, - ), - ( - self.examples[4][0], - [1, 2, 1, 2], - {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, - ), - ( - self.examples[4][0], - [1, 2, 1, 2, 1], - {"bank": 4, "num_completed": 2, "finished": True, "is_root": True}, - ), - ( - self.examples[5][0], - [1, 2, 3, 4, 5], - {"bank": 4, "num_completed": 2, "finished": True, "is_root": True}, - ), - ] - - def test_graphs(self): - """ - Test whether unordered graph systems are created correctly. - """ - for example in self.examples: - constraints, expected, gold_counts = example - c = ConstraintNode.create(constraints) - assert ( - ConstraintNode.print_graph(c) == expected - ), f"got {ConstraintNode.print_graph(c)}, expected {expected}" - assert ( - c.token_counts() == gold_counts - ), f"{c} got {c.token_counts()} wanted {gold_counts}" - - def test_next_tokens(self): - """ - Tests that the set of next tokens is correct. - """ - for example in self.examples: - constraints, expected, gold_counts = example - root = ConstraintNode.create(constraints) - - root_tokens = set(root.children.keys()) - for sequence in constraints: - state = UnorderedConstraintState(root) - for token in sequence: - all_tokens = root_tokens.union(state.node.children.keys()) - assert ( - all_tokens == state.next_tokens() - ), f"ALL {all_tokens} NEXT {state.next_tokens()}" - state = state.advance(token) - - def test_sequences(self): - for constraints, tokens, expected in self.sequences: - state = UnorderedConstraintState.create(pack_constraints([constraints])[0]) - for token in tokens: - state = state.advance(token) - result = {} - for attr in expected.keys(): - result[attr] = getattr(state, attr) - - assert ( - result == expected - ), f"TEST({tokens}) GOT: {result} WANTED: {expected}" - - -class TestOrderedConstraintState(unittest.TestCase): - def setUp(self): - self.sequences = [ - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - [], - {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, - ), - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - [1, 2], - {"bank": 2, "num_completed": 0, "finished": False, "is_root": False}, - ), - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - [1, 2, 94], - {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, - ), - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - [1, 3, 999, 1, 4], - {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, - ), - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - [1, 2, 3, 999, 999], - {"bank": 3, "num_completed": 1, "finished": False, "is_root": False}, - ), - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - [1, 2, 3, 77, 1, 3, 1], - {"bank": 6, "num_completed": 2, "finished": False, "is_root": False}, - ), - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5], - {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, - ), - ( - tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), - [1, 2, 999, 1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117], - {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, - ), - ( - tensorize([[1], [2, 3]]), - [1, 1], - {"bank": 1, "num_completed": 1, "finished": False, "is_root": False}, - ), - ( - tensorize([[1, 2], [1, 2]]), - [1, 2, 1, 2], - {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, - ), - ( - tensorize([[1, 2], [1, 2]]), - [1, 2, 1, 2, 1], - {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, - ), - ( - tensorize([[1, 2], [3, 4]]), - [1, 2, 3, 4, 5], - {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, - ), - ] - - def test_sequences(self): - for i, (constraints, tokens, expected) in enumerate(self.sequences): - state = OrderedConstraintState.create(pack_constraints([constraints])[0]) - for token in tokens: - state = state.advance(token) - result = {} - for attr in expected.keys(): - result[attr] = getattr(state, attr) - assert ( - result == expected - ), f"TEST({tokens}) GOT: {result} WANTED: {expected}" - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/glow_tts/attentions.py b/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/glow_tts/attentions.py deleted file mode 100644 index 62b8c83acbd3150b6d6686f21f3627781107c1ba..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/glow_tts/attentions.py +++ /dev/null @@ -1,378 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=None, - block_length=None, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - self.block_length = block_length - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - window_size=window_size, - p_dropout=p_dropout, - block_length=block_length, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - for i in range(self.n_layers): - x = x * x_mask - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class CouplingBlock(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - sigmoid_scale=False, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - self.sigmoid_scale = sigmoid_scale - - start = torch.nn.Conv1d(in_channels // 2, hidden_channels, 1) - start = torch.nn.utils.weight_norm(start) - self.start = start - # Initializing last layer to 0 makes the affine coupling layers - # do nothing at first. It helps to stabilze training. - end = torch.nn.Conv1d(hidden_channels, in_channels, 1) - end.weight.data.zero_() - end.bias.data.zero_() - self.end = end - - self.wn = modules.WN( - in_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels, - p_dropout, - ) - - def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs): - b, c, t = x.size() - if x_mask is None: - x_mask = 1 - x_0, x_1 = x[:, : self.in_channels // 2], x[:, self.in_channels // 2 :] - - x = self.start(x_0) * x_mask - x = self.wn(x, x_mask, g) - out = self.end(x) - - z_0 = x_0 - m = out[:, : self.in_channels // 2, :] - logs = out[:, self.in_channels // 2 :, :] - if self.sigmoid_scale: - logs = torch.log(1e-6 + torch.sigmoid(logs + 2)) - - if reverse: - z_1 = (x_1 - m) * torch.exp(-logs) * x_mask - logdet = None - else: - z_1 = (m + torch.exp(logs) * x_1) * x_mask - logdet = torch.sum(logs * x_mask, [1, 2]) - - z = torch.cat([z_0, z_1], 1) - return z, logdet - - def store_inverse(self): - self.wn.remove_weight_norm() - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - window_size=None, - heads_share=True, - p_dropout=0.0, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.p_dropout = p_dropout - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels ** -0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - if proximal_init: - self.conv_k.weight.data.copy_(self.conv_q.weight.data) - self.conv_k.bias.data.copy_(self.conv_q.bias.data) - nn.init.xavier_uniform_(self.conv_v.weight) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings) - rel_logits = self._relative_position_to_absolute_position(rel_logits) - scores_local = rel_logits / math.sqrt(self.k_channels) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores * block_mask + -1e4 * (1 - block_mask) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - - self.conv_1 = nn.Conv1d( - in_channels, filter_channels, kernel_size, padding=kernel_size // 2 - ) - self.conv_2 = nn.Conv1d( - filter_channels, out_channels, kernel_size, padding=kernel_size // 2 - ) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(x * x_mask) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - return x * x_mask diff --git a/spaces/Harveenchadha/oiTrans/indic_nlp_library/indicnlp/tokenize/indic_detokenize.py b/spaces/Harveenchadha/oiTrans/indic_nlp_library/indicnlp/tokenize/indic_detokenize.py deleted file mode 100644 index 71fa2ace3c9cd851021e66c01a34e1c99338d294..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/oiTrans/indic_nlp_library/indicnlp/tokenize/indic_detokenize.py +++ /dev/null @@ -1,134 +0,0 @@ -# -# Copyright (c) 2013-present, Anoop Kunchukuttan -# All rights reserved. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -# - -#Program for detokenizing Indian language input -# -# @author Anoop Kunchukuttan -# -""" -De-tokenizer for Indian languages. -""" - -import string, re, sys -from indicnlp.common import IndicNlpException - -## detokenizer patterns -left_attach=r'!%)\]},.:;>?\u0964\u0965' -pat_la=re.compile(r'[ ](['+left_attach+r'])') - -right_attach=r'#$(\[{<@' -pat_ra=re.compile(r'(['+right_attach+r'])[ ]') - -lr_attach=r'-/\\' -pat_lra=re.compile(r'[ ](['+lr_attach+r'])[ ]') - -#donknow=u'&*+=^_|~' - -## date, numbers, section/article numbering -## TODO: handle indic numbers -pat_num_seq=re.compile(r'([0-9]+ [,.:/] )+[0-9]+') - -### e-mail address -#pat_num=re.compile(ur'[a-zA-Z]+[ ]? - -def trivial_detokenize_indic(text): - """detokenize string for Indian language scripts using Brahmi-derived scripts - - A trivial detokenizer which: - - - decides whether punctuation attaches to left/right or both - - handles number sequences - - handles quotes smartly (deciding left or right attachment) - - Args: - text (str): tokenized text to process - - Returns: - str: detokenized string - """ - - s=text - ### some normalizations - - #numbers and dates - new_s='' - prev=0 - for m in pat_num_seq.finditer(s): - start=m.start() - end=m.end() - if start>prev: - new_s=new_s+s[prev:start] - new_s=new_s+s[start:end].replace(' ','') - prev=end - - new_s=new_s+s[prev:] - s=new_s - - ### consective single quotes or backslashes become double quotes - #s=s.replace("' '", "''") - #s=s.replace("` `", '``') - - s=pat_lra.sub('\\1',s) - s=pat_la.sub('\\1',s) - s=pat_ra.sub('\\1',s) - - # assumes well formedness of quotes and alternates between right and left attach - - alt_attach='\'"`' - for punc in alt_attach: - cnt=0 - out_str=[] - for c in s: - if c == punc: - if cnt%2==0: - out_str.append('@RA') - else: - out_str.append('@LA') - cnt+=1 - else: - out_str.append(c) - - s=''.join(out_str).replace('@RA ',punc).replace(' @LA',punc - ).replace('@RA',punc).replace('@LA',punc) - - return s - -def trivial_detokenize(text,lang='hi'): - """detokenize string for languages of the Indian subcontinent - - A trivial detokenizer which: - - - decides whether punctuation attaches to left/right or both - - handles number sequences - - handles quotes smartly (deciding left or right attachment) - - Args: - text (str): tokenized text to process - - Returns: - str: detokenized string - - Raises: - IndicNlpException: If language is not supported - """ - if lang=='ur': - raise IndicNlpException('No detokenizer available for Urdu') - else: - return trivial_detokenize_indic(text) - -# if __name__ == '__main__': - -# if len(sys.argv)<4: -# print("Usage: python indic_detokenize.py ") -# sys.exit(1) - -# with open(sys.argv[1],'r', encoding='utf-8') as ifile: -# with open(sys.argv[2],'w', encoding='utf-8') as ofile: -# for line in ifile: -# detokenized_line=trivial_detokenize(line,sys.argv[3]) -# ofile.write(detokenized_line) diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/benchmark/dummy_lm.py b/spaces/ICML2022/OFA/fairseq/fairseq/benchmark/dummy_lm.py deleted file mode 100644 index c6246a0c0e338fa36244b3aa4fb57f189fbffcb6..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/benchmark/dummy_lm.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from dataclasses import dataclass, field -from typing import Optional - -import torch -from .dummy_dataset import DummyDataset -from fairseq.data import Dictionary -from fairseq.dataclass import FairseqDataclass -from fairseq.tasks import FairseqTask, register_task -from omegaconf import II - - -logger = logging.getLogger(__name__) - - -@dataclass -class DummyLMConfig(FairseqDataclass): - dict_size: int = 49996 - dataset_size: int = 100000 - tokens_per_sample: int = field( - default=512, metadata={"help": "max sequence length"} - ) - add_bos_token: bool = False - batch_size: Optional[int] = II("dataset.batch_size") - max_tokens: Optional[int] = II("dataset.max_tokens") - max_target_positions: int = II("task.tokens_per_sample") - - -@register_task("dummy_lm", dataclass=DummyLMConfig) -class DummyLMTask(FairseqTask): - def __init__(self, cfg: DummyLMConfig): - super().__init__(cfg) - - # load dictionary - self.dictionary = Dictionary() - for i in range(cfg.dict_size): - self.dictionary.add_symbol("word{}".format(i)) - self.dictionary.pad_to_multiple_(8) # often faster if divisible by 8 - logger.info("dictionary: {} types".format(len(self.dictionary))) - - seq = torch.arange(cfg.tokens_per_sample + 1) + self.dictionary.pad() + 1 - - self.dummy_src = seq[:-1] - self.dummy_tgt = seq[1:] - - def load_dataset(self, split, epoch=1, combine=False, **kwargs): - """Load a given dataset split. - Args: - split (str): name of the split (e.g., train, valid, test) - """ - if self.cfg.batch_size is not None: - bsz = self.cfg.batch_size - else: - bsz = max(1, self.cfg.max_tokens // self.cfg.tokens_per_sample) - self.datasets[split] = DummyDataset( - { - "id": 1, - "net_input": { - "src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]), - "src_lengths": torch.full( - (bsz,), self.cfg.tokens_per_sample, dtype=torch.long - ), - }, - "target": torch.stack([self.dummy_tgt for _ in range(bsz)]), - "nsentences": bsz, - "ntokens": bsz * self.cfg.tokens_per_sample, - }, - num_items=self.cfg.dataset_size, - item_size=self.cfg.tokens_per_sample, - ) - - @property - def source_dictionary(self): - return self.dictionary - - @property - def target_dictionary(self): - return self.dictionary diff --git a/spaces/Iceclear/StableSR/StableSR/taming/data/base.py b/spaces/Iceclear/StableSR/StableSR/taming/data/base.py deleted file mode 100644 index e21667df4ce4baa6bb6aad9f8679bd756e2ffdb7..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/taming/data/base.py +++ /dev/null @@ -1,70 +0,0 @@ -import bisect -import numpy as np -import albumentations -from PIL import Image -from torch.utils.data import Dataset, ConcatDataset - - -class ConcatDatasetWithIndex(ConcatDataset): - """Modified from original pytorch code to return dataset idx""" - def __getitem__(self, idx): - if idx < 0: - if -idx > len(self): - raise ValueError("absolute value of index should not exceed dataset length") - idx = len(self) + idx - dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) - if dataset_idx == 0: - sample_idx = idx - else: - sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] - return self.datasets[dataset_idx][sample_idx], dataset_idx - - -class ImagePaths(Dataset): - def __init__(self, paths, size=None, random_crop=False, labels=None): - self.size = size - self.random_crop = random_crop - - self.labels = dict() if labels is None else labels - self.labels["file_path_"] = paths - self._length = len(paths) - - if self.size is not None and self.size > 0: - self.rescaler = albumentations.SmallestMaxSize(max_size = self.size) - if not self.random_crop: - self.cropper = albumentations.CenterCrop(height=self.size,width=self.size) - else: - self.cropper = albumentations.RandomCrop(height=self.size,width=self.size) - self.preprocessor = albumentations.Compose([self.rescaler, self.cropper]) - else: - self.preprocessor = lambda **kwargs: kwargs - - def __len__(self): - return self._length - - def preprocess_image(self, image_path): - image = Image.open(image_path) - if not image.mode == "RGB": - image = image.convert("RGB") - image = np.array(image).astype(np.uint8) - image = self.preprocessor(image=image)["image"] - image = (image/127.5 - 1.0).astype(np.float32) - return image - - def __getitem__(self, i): - example = dict() - example["image"] = self.preprocess_image(self.labels["file_path_"][i]) - for k in self.labels: - example[k] = self.labels[k][i] - return example - - -class NumpyPaths(ImagePaths): - def preprocess_image(self, image_path): - image = np.load(image_path).squeeze(0) # 3 x 1024 x 1024 - image = np.transpose(image, (1,2,0)) - image = Image.fromarray(image, mode="RGB") - image = np.array(image).astype(np.uint8) - image = self.preprocessor(image=image)["image"] - image = (image/127.5 - 1.0).astype(np.float32) - return image diff --git a/spaces/Jackflack09/diffuse-custom/Waifu2x/magnify.py b/spaces/Jackflack09/diffuse-custom/Waifu2x/magnify.py deleted file mode 100644 index 0cac3a8d1a562d15c84937889d19fbb63612cbbd..0000000000000000000000000000000000000000 --- a/spaces/Jackflack09/diffuse-custom/Waifu2x/magnify.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- -# file: test.py -# time: 05/12/2022 -# author: yangheng -# github: https://github.com/yangheng95 -# huggingface: https://huggingface.co/yangheng -# google scholar: https://scholar.google.com/citations?user=NPq5a_0AAAAJ&hl=en -# Copyright (C) 2021. All Rights Reserved. -from pathlib import Path -from typing import Union - -import autocuda -import findfile -from pyabsa.utils.pyabsa_utils import fprint -from torchvision import transforms -from .utils.prepare_images import * -from .Models import * - - -class ImageMagnifier: - - def __init__(self): - self.device = autocuda.auto_cuda() - self.model_cran_v2 = CARN_V2(color_channels=3, mid_channels=64, conv=nn.Conv2d, - single_conv_size=3, single_conv_group=1, - scale=2, activation=nn.LeakyReLU(0.1), - SEBlock=True, repeat_blocks=3, atrous=(1, 1, 1)) - - self.model_cran_v2 = network_to_half(self.model_cran_v2) - self.checkpoint = findfile.find_cwd_file("CARN_model_checkpoint.pt") - self.model_cran_v2.load_state_dict(torch.load(self.checkpoint, map_location='cpu')) - # if use GPU, then comment out the next line so it can use fp16. - self.model_cran_v2 = self.model_cran_v2.float().to(self.device) - self.model_cran_v2.to(self.device) - - def __image_scale(self, img, scale_factor: int = 2): - img_splitter = ImageSplitter(seg_size=64, scale_factor=scale_factor, boarder_pad_size=3) - img_patches = img_splitter.split_img_tensor(img, scale_method=None, img_pad=0) - with torch.no_grad(): - if self.device != 'cpu': - with torch.cuda.amp.autocast(): - out = [self.model_cran_v2(i.to(self.device)) for i in img_patches] - else: - with torch.cpu.amp.autocast(): - out = [self.model_cran_v2(i) for i in img_patches] - img_upscale = img_splitter.merge_img_tensor(out) - - final = torch.cat([img_upscale]) - - return transforms.ToPILImage()(final[0]) - - def magnify(self, img, scale_factor: int = 2): - fprint("scale factor reset to:", scale_factor//2*2) - _scale_factor = scale_factor - while _scale_factor // 2 > 0: - img = self.__image_scale(img, scale_factor=2) - _scale_factor = _scale_factor // 2 - return img - - def magnify_from_file(self, img_path: Union[str, Path], scale_factor: int = 2, save_img: bool = True): - - if not os.path.exists(img_path): - raise FileNotFoundError("Path is not found.") - if os.path.isfile(img_path): - try: - img = Image.open(img_path) - img = self.magnify(img, scale_factor) - if save_img: - img.save(os.path.join(img_path)) - except Exception as e: - fprint(img_path, e) - fprint(img_path, "Done.") - - elif os.path.isdir(img_path): - for path in os.listdir(img_path): - try: - img = Image.open(os.path.join(img_path, path)) - img = self.magnify(img, scale_factor) - if save_img: - img.save(os.path.join(img_path, path)) - except Exception as e: - fprint(path, e) - continue - fprint(path, "Done.") - else: - raise TypeError("Path is not a file or directory.") diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/stochastic_karras_ve/pipeline_stochastic_karras_ve.py b/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/stochastic_karras_ve/pipeline_stochastic_karras_ve.py deleted file mode 100644 index 739de8ebe620b5c99168720340a2485fa61d5a06..0000000000000000000000000000000000000000 --- a/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/stochastic_karras_ve/pipeline_stochastic_karras_ve.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Optional, Tuple, Union - -import torch - -from ...models import UNet2DModel -from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput -from ...schedulers import KarrasVeScheduler - - -class KarrasVePipeline(DiffusionPipeline): - r""" - Stochastic sampling from Karras et al. [1] tailored to the Variance-Expanding (VE) models [2]. Use Algorithm 2 and - the VE column of Table 1 from [1] for reference. - - [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models." - https://arxiv.org/abs/2206.00364 [2] Song, Yang, et al. "Score-based generative modeling through stochastic - differential equations." https://arxiv.org/abs/2011.13456 - - Parameters: - unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image. - scheduler ([`KarrasVeScheduler`]): - Scheduler for the diffusion process to be used in combination with `unet` to denoise the encoded image. - """ - - # add type hints for linting - unet: UNet2DModel - scheduler: KarrasVeScheduler - - def __init__(self, unet: UNet2DModel, scheduler: KarrasVeScheduler): - super().__init__() - self.register_modules(unet=unet, scheduler=scheduler) - - @torch.no_grad() - def __call__( - self, - batch_size: int = 1, - num_inference_steps: int = 50, - generator: Optional[torch.Generator] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - **kwargs, - ) -> Union[Tuple, ImagePipelineOutput]: - r""" - Args: - batch_size (`int`, *optional*, defaults to 1): - The number of images to generate. - generator (`torch.Generator`, *optional*): - A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation - deterministic. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple. - - Returns: - [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if - `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the - generated images. - """ - - img_size = self.unet.config.sample_size - shape = (batch_size, 3, img_size, img_size) - - model = self.unet - - # sample x_0 ~ N(0, sigma_0^2 * I) - sample = torch.randn(*shape) * self.scheduler.init_noise_sigma - sample = sample.to(self.device) - - self.scheduler.set_timesteps(num_inference_steps) - - for t in self.progress_bar(self.scheduler.timesteps): - # here sigma_t == t_i from the paper - sigma = self.scheduler.schedule[t] - sigma_prev = self.scheduler.schedule[t - 1] if t > 0 else 0 - - # 1. Select temporarily increased noise level sigma_hat - # 2. Add new noise to move from sample_i to sample_hat - sample_hat, sigma_hat = self.scheduler.add_noise_to_input(sample, sigma, generator=generator) - - # 3. Predict the noise residual given the noise magnitude `sigma_hat` - # The model inputs and output are adjusted by following eq. (213) in [1]. - model_output = (sigma_hat / 2) * model((sample_hat + 1) / 2, sigma_hat / 2).sample - - # 4. Evaluate dx/dt at sigma_hat - # 5. Take Euler step from sigma to sigma_prev - step_output = self.scheduler.step(model_output, sigma_hat, sigma_prev, sample_hat) - - if sigma_prev != 0: - # 6. Apply 2nd order correction - # The model inputs and output are adjusted by following eq. (213) in [1]. - model_output = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2, sigma_prev / 2).sample - step_output = self.scheduler.step_correct( - model_output, - sigma_hat, - sigma_prev, - sample_hat, - step_output.prev_sample, - step_output["derivative"], - ) - sample = step_output.prev_sample - - sample = (sample / 2 + 0.5).clamp(0, 1) - image = sample.cpu().permute(0, 2, 3, 1).numpy() - if output_type == "pil": - image = self.numpy_to_pil(sample) - - if not return_dict: - return (image,) - - return ImagePipelineOutput(images=image) diff --git a/spaces/Jamkonams/AutoGPT/autogpt/__main__.py b/spaces/Jamkonams/AutoGPT/autogpt/__main__.py deleted file mode 100644 index 128f9eea4900429e88276abdde3419b806001ac7..0000000000000000000000000000000000000000 --- a/spaces/Jamkonams/AutoGPT/autogpt/__main__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Auto-GPT: A GPT powered AI Assistant""" -import autogpt.cli - -if __name__ == "__main__": - autogpt.cli.main() diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/locale/extract_locale.py b/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/locale/extract_locale.py deleted file mode 100644 index 32b0924bd6dffe150cb3e481ddadef836b91b83c..0000000000000000000000000000000000000000 --- a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/locale/extract_locale.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import json -import re - -# Define regular expression patterns -pattern = r'i18n\((\"{3}.*?\"{3}|\".*?\")\)' - -# Load the .py file -with open('ChuanhuChatbot.py', 'r', encoding='utf-8') as f: - contents = f.read() - -# Load the .py files in the modules folder -for filename in os.listdir("modules"): - if filename.endswith(".py"): - with open(os.path.join("modules", filename), "r", encoding="utf-8") as f: - contents += f.read() - -# Matching with regular expressions -matches = re.findall(pattern, contents, re.DOTALL) - -# Convert to key/value pairs -data = {match.strip('()"'): '' for match in matches} - -# Save as a JSON file -with open('labels.json', 'w', encoding='utf-8') as f: - json.dump(data, f, ensure_ascii=False, indent=4) \ No newline at end of file diff --git a/spaces/Jonni/04-Gradio_SOTA/README.md b/spaces/Jonni/04-Gradio_SOTA/README.md deleted file mode 100644 index 61c62e18a7f447fb7b6df4ad1c5e83803ce624ea..0000000000000000000000000000000000000000 --- a/spaces/Jonni/04-Gradio_SOTA/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 04-Gradio SOTA -emoji: 🚀 -colorFrom: pink -colorTo: indigo -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/JunchuanYu/SydneyAI-plus/app.py b/spaces/JunchuanYu/SydneyAI-plus/app.py deleted file mode 100644 index f86a3fd8ced55ae5b81bedbcdf4bb423129d56a4..0000000000000000000000000000000000000000 --- a/spaces/JunchuanYu/SydneyAI-plus/app.py +++ /dev/null @@ -1,115 +0,0 @@ -import gradio as gr -import os -import sys -import argparse - -from huggingface_hub import hf_hub_download - -my_api_key = os.environ.get('my_api_key') - -if my_api_key == "empty": - print("Please give a api key!") - sys.exit(1) - -token = os.environ['HUB_TOKEN'] -loc1 =hf_hub_download(repo_id="JunchuanYu/files_sydney", filename="utils.py",repo_type="dataset",local_dir='.',token=token) -loc2 =hf_hub_download(repo_id="JunchuanYu/files_sydney", filename="chat_func.py",repo_type="dataset",local_dir='.',token=token) - -sys.path.append(loc1) -sys.path.append(loc2) - -from utils import * -from chat_func import * - -gr.Chatbot.postprocess = postprocess - - -with open("css_new.css", "r", encoding="utf-8") as f: - css = f.read() - -with gr.Blocks(css=css,theme='gradio/soft') as demo: - history = gr.State([]) - token_count = gr.State([]) - promptTemplates = gr.State(load_template('myprompts.json', mode=2)) - user_api_key = gr.State(my_api_key) - TRUECOMSTANT = gr.State(True) - FALSECONSTANT = gr.State(False) - gr.Markdown(title) - - with gr.Row().style(equal_height=True): - with gr.Column(scale=12): - # gr.Markdown(""" - # ##### openai方面算力紧张导致Sydney网站运行很不稳定时常掉线,据报道OpenAI 的Plus版API一度停售,且限制3小时内只能问100个问题,预计这种情况要持续一段时间,请大家耐心等待,目前没有好的解决方法 - # """) - with gr.Accordion("Build by [45度科研人](WeChat Public Accounts)", open=False): - gr.Markdown(description) - with gr.Column(scale=1): - with gr.Box(): - toggle_dark = gr.Button(value="Toggle Dark").style(full_width=True) - - with gr.Row(scale=1).style(equal_height=True): - with gr.Column(scale=5): - with gr.Column(): - chatbot = gr.Chatbot() - user_input = gr.Textbox(show_label=False, placeholder="Enter text and press submit", visible=True).style(container=False) - submitBtn = gr.Button("Submit",variant="primary").style(container=False) - emptyBtn = gr.Button("Restart Conversation",variant="secondary") - status_display = gr.Markdown("") - - with gr.Column(): - with gr.Column(min_width=50): - with gr.Tab(label="ChatGPT"): - with gr.Column(): - with gr.Row(): - keyTxt = gr.Textbox(show_label=False, placeholder=f"You can input your own openAI API-key",value=hide_middle_chars(my_api_key),visible=True, type="password", label="API-Key") - systemPromptTxt = gr.Textbox(show_label=True,placeholder=f"Set a custom insruction for the chatbot: You are a helpful assistant.",label="Custom prompt",value=initial_prompt,lines=10,) - - with gr.Row(): - templateSelectDropdown = gr.Dropdown(label="load from template",choices=load_template('myprompts.json', mode=1), - multiselect=False,value=load_template('myprompts.json', mode=1)[0],).style(container=False) - - with gr.Tab(label="Settings"): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=3): - saveFileName = gr.Textbox(show_label=True, placeholder=f"output file name...",label='Save conversation history', value="") - with gr.Column(scale=1): - exportMarkdownBtn = gr.Button("Save") - with gr.Row(): - with gr.Column(scale=1): - downloadFile = gr.File(interactive=False) - gr.Markdown(""" - ###
    you can follow the WeChat public account [45度科研人] and leave me a message!
    -
    -
    -
    - -
    - -
    - """) - - toggle_dark.click(None,_js=""" - () => { - document.body.classList.toggle('dark'); - document.querySelector('gradio-app').style.backgroundColor = 'var(--color-background-primary)' - }""",) - - keyTxt.submit(submit_key, keyTxt, [user_api_key, status_display]) - keyTxt.change(submit_key, keyTxt, [user_api_key, status_display]) - # Chatbot - user_input.submit(predict,[user_api_key,systemPromptTxt,history,user_input,chatbot,token_count,],[chatbot, history, status_display, token_count],show_progress=True) - user_input.submit(reset_textbox, [], [user_input]) - - submitBtn.click(predict,[user_api_key,systemPromptTxt,history,user_input,chatbot,token_count,],[chatbot, history, status_display, token_count],show_progress=True) - submitBtn.click(reset_textbox, [], [user_input]) - - emptyBtn.click(reset_state,outputs=[chatbot, history, token_count, status_display],show_progress=True,) - - templateSelectDropdown.change(get_template_content,[promptTemplates, templateSelectDropdown, systemPromptTxt],[systemPromptTxt],show_progress=True,) - exportMarkdownBtn.click(export_markdown,[saveFileName, systemPromptTxt, history, chatbot],downloadFile,show_progress=True,) - downloadFile.change(load_chat_history,[downloadFile, systemPromptTxt, history, chatbot],[saveFileName, systemPromptTxt, history, chatbot],) - - -if __name__ == "__main__": - demo.queue().launch(debug=False,show_api=False) diff --git a/spaces/Justin-12138/FSALA/app.py b/spaces/Justin-12138/FSALA/app.py deleted file mode 100644 index 8372b108bf1038c003660f24ad48159608e784f6..0000000000000000000000000000000000000000 --- a/spaces/Justin-12138/FSALA/app.py +++ /dev/null @@ -1,21 +0,0 @@ -import gradio as gr -from src import des, fs - -if __name__ == '__main__': - iface = gr.Interface( - fn=fs, - title=des("title"), - description=des("description"), - article=des("article"), - inputs=des("inputs"), - outputs=des("outputs"), - examples=[ - ["example_data.csv", 'MRMR_FCQ', 20, 'RF', "test.csv"], - ["example_data.csv", 'MRMR_FCD', 10, 'SVM', "test.csv"], - ["example_data.csv", 'MRMR_FCD', 30, 'KNN', "test.csv"], - ["example_data.csv", 'Lasso', 30, 'DT', "test.csv"], - ["example_data.csv", 'Lasso', 20, 'Naive Bayes', "test.csv"], - ], - allow_flagging="never" - ) - iface.launch() diff --git a/spaces/Justin-Choo/Dreamlikeart-Anime-ZH/app.py b/spaces/Justin-Choo/Dreamlikeart-Anime-ZH/app.py deleted file mode 100644 index b62e6ab056e2d4e2d096624797f2481716583bc9..0000000000000000000000000000000000000000 --- a/spaces/Justin-Choo/Dreamlikeart-Anime-ZH/app.py +++ /dev/null @@ -1,151 +0,0 @@ -import gradio as gr -import os -import sys -from pathlib import Path -import random -import string -import time -from queue import Queue -from threading import Thread -import emoji - -text_gen=gr.Interface.load("spaces/phenomenon1981/MagicPrompt-Stable-Diffusion") -def get_prompts(prompt_text): - if prompt_text: - return text_gen("photo anime, masterpiece, high quality, absurdres, " + prompt_text) - else: - return text_gen("") -proc1=gr.Interface.load("models/dreamlike-art/dreamlike-anime-1.0") - -def restart_script_periodically(): - while True: - random_time = random.randint(540, 600) - time.sleep(random_time) - os.execl(sys.executable, sys.executable, *sys.argv) - - -restart_thread = Thread(target=restart_script_periodically, daemon=True) -restart_thread.start() - - -queue = Queue() -queue_threshold = 100 - -def add_random_noise(prompt, noise_level=0.00): - if noise_level == 0: - noise_level = 0.00 - percentage_noise = noise_level * 5 - num_noise_chars = int(len(prompt) * (percentage_noise/100)) - noise_indices = random.sample(range(len(prompt)), num_noise_chars) - prompt_list = list(prompt) - noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits) - noise_chars.extend(['😍', '💩', '😂', '🤔', '😊', '🤗', '😭', '🙄', '😷', '🤯', '🤫', '🥴', '😴', '🤩', '🥳', '😔', '😩', '🤪', '😇', '🤢', '😈', '👹', '👻', '🤖', '👽', '💀', '🎃', '🎅', '🎄', '🎁', '🎂', '🎉', '🎈', '🎊', '🎮', '❤️', '💔', '💕', '💖', '💗', '🐶', '🐱', '🐭', '🐹', '🦊', '🐻', '🐨', '🐯', '🦁', '🐘', '🔥', '🌧️', '🌞', '🌈', '💥', '🌴', '🌊', '🌺', '🌻', '🌸', '🎨', '🌅', '🌌', '☁️', '⛈️', '❄️', '☀️', '🌤️', '⛅️', '🌥️', '🌦️', '🌧️', '🌩️', '🌨️', '🌫️', '☔️', '🌬️', '💨', '🌪️', '🌈']) - for index in noise_indices: - prompt_list[index] = random.choice(noise_chars) - return "".join(prompt_list) - - - -def send_it1(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(2) - queue.put(prompt_with_noise) - output1 = proc1(prompt_with_noise) - return output1 - -def send_it2(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(2) - queue.put(prompt_with_noise) - output2 = proc1(prompt_with_noise) - return output2 - -#def send_it3(inputs, noise_level, proc1=proc1): - #prompt_with_noise = add_random_noise(inputs, noise_level) - #while queue.qsize() >= queue_threshold: - #time.sleep(2) - #queue.put(prompt_with_noise) - #output3 = proc1(prompt_with_noise) - #return output3 - -#def send_it4(inputs, noise_level, proc1=proc1): - #prompt_with_noise = add_random_noise(inputs, noise_level) - #while queue.qsize() >= queue_threshold: - #time.sleep(2) - #queue.put(prompt_with_noise) - #output4 = proc1(prompt_with_noise) - #return output4 - - -with gr.Blocks(css='style.css') as demo: - gr.HTML( - """ -
    -
    -

    - Dreamlike Anime 1.0 -

    -
    -

    - Noise Level: 控制输入在发送到模型之前的随机性程度。Noise Level越高,输出越多样化,而Noise Level越低,输出越相似, -

    -

    -

    -
    - """ - ) - with gr.Column(elem_id="col-container"): - with gr.Row(variant="compact"): - input_text = gr.Textbox( - label="Short Prompt", - show_label=False, - max_lines=2, - placeholder="没有想法?没问题,只需输入基本的点子(用英文谢谢),点击“Magic Prompt”按钮即可", - ).style( - container=False, - ) - see_prompts = gr.Button("✨ Magic Prompt ✨").style(full_width=False) - - - with gr.Row(variant="compact"): - prompt = gr.Textbox( - label="Enter your prompt", - show_label=False, - max_lines=2, - placeholder="完整的提示词(一样,用英文蟹蟹):", - ).style( - container=False, - ) - run = gr.Button("生成图片").style(full_width=False) - - with gr.Row(): - with gr.Row(): - noise_level = gr.Slider(minimum=0.0, maximum=3, step=0.1, label="Noise Level") - with gr.Row(): - with gr.Row(): - output1=gr.Image(label="Dreamlike Anime 1.0",show_label=False) - output2=gr.Image(label="Dreamlike Anime 1.0",show_label=False) - - - see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt], queue=False) - run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1]) - run.click(send_it2, inputs=[prompt, noise_level], outputs=[output2]) - - - with gr.Row(): - gr.HTML( - """ - -
    -

    -
    - """ -) - - demo.launch(enable_queue=True, inline=True) - block.queue(concurrency_count=100) \ No newline at end of file diff --git a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/tokenize/sentence_tokenize.py b/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/tokenize/sentence_tokenize.py deleted file mode 100644 index 9b451355f3f1dc15e68cf9bd7660615c153339e1..0000000000000000000000000000000000000000 --- a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/tokenize/sentence_tokenize.py +++ /dev/null @@ -1,273 +0,0 @@ -# -# Copyright (c) 2013-present, Anoop Kunchukuttan -# All rights reserved. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -# - -#Program for sentence splitting of Indian language input -# -# @author Anoop Kunchukuttan -# -""" -Sentence splitter for Indian languages. Contains a rule-based -sentence splitter that can understand common non-breaking phrases -in many Indian languages. -""" - -import re - -from indicnlp.transliterate import unicode_transliterate -from indicnlp import langinfo - - -## for language which have danda as delimiter -## period is not part of the sentence delimiters -DELIM_PAT_DANDA=re.compile(r'[\?!\u0964\u0965]') - -## for languages which don't have danda as delimiter -DELIM_PAT_NO_DANDA=re.compile(r'[\.\?!\u0964\u0965\uAAF1\uAAF0\uABEB\uABEC\uABED\uABEE\uABEF\u1C7E\u1C7F]') - -## pattern to check for presence of danda in text -CONTAINS_DANDA=re.compile(r'[\u0964\u0965]') - -def is_acronym_abbvr(text,lang): - """Is the text a non-breaking phrase - - Args: - text (str): text to check for non-breaking phrase - lang (str): ISO 639-2 language code - - Returns: - boolean: true if `text` is a non-breaking phrase - """ - - ack_chars = { - ## acronym for latin characters - 'ए', 'ऎ', - 'बी', 'बि', - 'सी', 'सि', - 'डी', 'डि', - 'ई', 'इ', - 'एफ', 'ऎफ', - 'जी', 'जि', - 'एच','ऎच', - 'आई', 'आइ','ऐ', - 'जे', 'जॆ', - 'के', 'कॆ', - 'एल', 'ऎल', - 'एम','ऎम', - 'एन','ऎन', - 'ओ', 'ऒ', - 'पी', 'पि', - 'क्यू', 'क्यु', - 'आर', - 'एस','ऎस', - 'टी', 'टि', - 'यू', 'यु', - 'वी', 'वि', 'व्ही', 'व्हि', - 'डब्ल्यू', 'डब्ल्यु', - 'एक्स','ऎक्स', - 'वाय', - 'जेड', 'ज़ेड', - ## add halant to the previous English character mappings. - 'एफ्', - 'ऎफ्', - 'एच्', - 'ऎच्', - 'एल्', - 'ऎल्', - 'एम्', - 'ऎम्', - 'एन्', - 'ऎन्', - 'आर्', - 'एस्', - 'ऎस्', - 'एक्स्', - 'ऎक्स्', - 'वाय्', - 'जेड्', 'ज़ेड्', - - #Indic vowels - 'ऄ', - 'अ', - 'आ', - 'इ', - 'ई', - 'उ', - 'ऊ', - 'ऋ', - 'ऌ', - 'ऍ', - 'ऎ', - 'ए', - 'ऐ', - 'ऑ', - 'ऒ', - 'ओ', - 'औ', - 'ॠ', - 'ॡ', - - #Indic consonants - 'क', - 'ख', - 'ग', - 'घ', - 'ङ', - 'च', - 'छ', - 'ज', - 'झ', - 'ञ', - 'ट', - 'ठ', - 'ड', - 'ढ', - 'ण', - 'त', - 'थ', - 'द', - 'ध', - 'न', - 'ऩ', - 'प', - 'फ', - 'ब', - 'भ', - 'म', - 'य', - 'र', - 'ऱ', - 'ल', - 'ळ', - 'ऴ', - 'व', - 'श', - 'ष', - 'स', - 'ह', - - ## abbreviation - 'श्री', - 'डॉ', - 'कु', - 'चि', - 'सौ', - } - - return unicode_transliterate.UnicodeIndicTransliterator.transliterate(text,lang,'hi') in ack_chars - -def sentence_split(text,lang,delim_pat='auto'): ## New signature - """split the text into sentences - - A rule-based sentence splitter for Indian languages written in - Brahmi-derived scripts. The text is split at sentence delimiter - boundaries. The delimiters can be configured by passing appropriate - parameters. - - The sentence splitter can identify non-breaking phrases like - single letter, common abbreviations/honorofics for some Indian - languages. - - Args: - text (str): text to split into sentence - lang (str): ISO 639-2 language code - delim_pat (str): regular expression to identify sentence delimiter characters. If set to 'auto', the delimiter pattern is chosen automatically based on the language and text. - - - Returns: - list: list of sentences identified from the input text - """ - - if lang == "ur": - from urduhack.tokenization import sentence_tokenizer - sentences = sentence_tokenizer(text) - return sentences - - #print('Input: {}'.format(delim_pat)) - if delim_pat=='auto': - if langinfo.is_danda_delim(lang): - # in modern texts it is possible that period is used as delimeter - # instead of DANDA. Hence, a check. Use danda delimiter pattern - # only if text contains at least one danda - if CONTAINS_DANDA.search(text) is None: - delim_pat=DELIM_PAT_NO_DANDA - #print('LANG has danda delim. TEXT_CONTAINS_DANDA: FALSE --> DELIM_PAT_NO_DANDA') - else: - delim_pat=DELIM_PAT_DANDA - #print('LANG has danda delim. TEXT_CONTAINS_DANDA: TRUE --> DELIM_PAT_DANDA') - else: - delim_pat=DELIM_PAT_NO_DANDA - #print('LANG has no danda delim --> DELIM_PAT_NO_DANDA') - - ## otherwise, assume the caller set the delimiter pattern - - ### Phase 1: break on sentence delimiters. - cand_sentences=[] - begin=0 - text = text.strip() - for mo in delim_pat.finditer(text): - p1=mo.start() - p2=mo.end() - - ## NEW - if p1>0 and text[p1-1].isnumeric(): - continue - - end=p1+1 - s= text[begin:end].strip() - if len(s)>0: - cand_sentences.append(s) - begin=p1+1 - - s= text[begin:].strip() - if len(s)>0: - cand_sentences.append(s) - - if not delim_pat.search('.'): - ## run phase 2 only if delimiter pattern contains period - #print('No need to run phase2') - return cand_sentences -# print(cand_sentences) -# print('====') - -# return cand_sentences - - ### Phase 2: Address the fact that '.' may not always be a sentence delimiter - ### Method: If there is a run of lines containing only a word (optionally) and '.', - ### merge these lines as well one sentence preceding and succeeding this run of lines. - final_sentences=[] - sen_buffer='' - bad_state=False - - for i, sentence in enumerate(cand_sentences): - words=sentence.split(' ') - #if len(words)<=2 and words[-1]=='.': - if len(words)==1 and sentence[-1]=='.': - bad_state=True - sen_buffer = sen_buffer + ' ' + sentence - ## NEW condition - elif sentence[-1]=='.' and is_acronym_abbvr(words[-1][:-1],lang): - if len(sen_buffer)>0 and not bad_state: - final_sentences.append(sen_buffer) - bad_state=True - sen_buffer = sentence - elif bad_state: - sen_buffer = sen_buffer + ' ' + sentence - if len(sen_buffer)>0: - final_sentences.append(sen_buffer) - sen_buffer='' - bad_state=False - else: ## good state - if len(sen_buffer)>0: - final_sentences.append(sen_buffer) - sen_buffer=sentence - bad_state=False - - if len(sen_buffer)>0: - final_sentences.append(sen_buffer) - - return final_sentences diff --git a/spaces/Kunal7/Gradio-Squats/utils.py b/spaces/Kunal7/Gradio-Squats/utils.py deleted file mode 100644 index d3efcc808774ddaa11a60aee59e2151c87e9080a..0000000000000000000000000000000000000000 --- a/spaces/Kunal7/Gradio-Squats/utils.py +++ /dev/null @@ -1,160 +0,0 @@ -import cv2 -import mediapipe as mp -import numpy as np - -correct = cv2.imread('right.png') -correct = cv2.cvtColor(correct, cv2.COLOR_BGR2RGB) -incorrect = cv2.imread('wrong.png') -incorrect = cv2.cvtColor(incorrect, cv2.COLOR_BGR2RGB) - -def draw_rounded_rect(img, rect_start, rect_end, corner_width, box_color): - - x1, y1 = rect_start - x2, y2 = rect_end - w = corner_width - - # draw filled rectangles - cv2.rectangle(img, (x1 + w, y1), (x2 - w, y1 + w), box_color, -1) - cv2.rectangle(img, (x1 + w, y2 - w), (x2 - w, y2), box_color, -1) - cv2.rectangle(img, (x1, y1 + w), (x1 + w, y2 - w), box_color, -1) - cv2.rectangle(img, (x2 - w, y1 + w), (x2, y2 - w), box_color, -1) - cv2.rectangle(img, (x1 + w, y1 + w), (x2 - w, y2 - w), box_color, -1) - - - # draw filled ellipses - cv2.ellipse(img, (x1 + w, y1 + w), (w, w), - angle = 0, startAngle = -90, endAngle = -180, color = box_color, thickness = -1) - - cv2.ellipse(img, (x2 - w, y1 + w), (w, w), - angle = 0, startAngle = 0, endAngle = -90, color = box_color, thickness = -1) - - cv2.ellipse(img, (x1 + w, y2 - w), (w, w), - angle = 0, startAngle = 90, endAngle = 180, color = box_color, thickness = -1) - - cv2.ellipse(img, (x2 - w, y2 - w), (w, w), - angle = 0, startAngle = 0, endAngle = 90, color = box_color, thickness = -1) - - return img - - -def draw_dotted_line(frame, lm_coord, start, end, line_color): - pix_step = 0 - - for i in range(start, end+1, 8): - cv2.circle(frame, (lm_coord[0], i+pix_step), 2, line_color, -1, lineType=cv2.LINE_AA) - - return frame - - -def draw_text( - img, - msg, - width = 7, - font=cv2.FONT_HERSHEY_SIMPLEX, - pos=(0, 0), - font_scale=1, - font_thickness=2, - text_color=(0, 255, 0), - text_color_bg=(0, 0, 0), - box_offset=(20, 10), - overlay_image = False, - overlay_type = None -): - - offset = box_offset - x, y = pos - text_size, _ = cv2.getTextSize(msg, font, font_scale, font_thickness) - text_w, text_h = text_size - - rec_start = tuple(p - o for p, o in zip(pos, offset)) - rec_end = tuple(m + n - o for m, n, o in zip((x + text_w, y + text_h), offset, (25, 0))) - - resize_height = 0 - - if overlay_image: - resize_height = rec_end[1] - rec_start[1] - - img = draw_rounded_rect(img, rec_start, (rec_end[0]+resize_height, rec_end[1]), width, text_color_bg) - if overlay_type == "correct": - overlay_res = cv2.resize(correct, (resize_height, resize_height), interpolation = cv2.INTER_AREA) - elif overlay_type == "incorrect": - overlay_res = cv2.resize(incorrect, (resize_height, resize_height), interpolation = cv2.INTER_AREA) - - img[rec_start[1]:rec_start[1]+resize_height, rec_start[0]+width:rec_start[0]+width+resize_height] = overlay_res - - else: - img = draw_rounded_rect(img, rec_start, rec_end, width, text_color_bg) - - - cv2.putText( - img, - msg, - (int(rec_start[0]+resize_height + 8), int(y + text_h + font_scale - 1)), - font, - font_scale, - text_color, - font_thickness, - cv2.LINE_AA, - ) - - return text_size - - -def find_angle(p1, p2, ref_pt = np.array([0,0])): - p1_ref = p1 - ref_pt - p2_ref = p2 - ref_pt - - cos_theta = (np.dot(p1_ref,p2_ref)) / (1.0 * np.linalg.norm(p1_ref) * np.linalg.norm(p2_ref)) - theta = np.arccos(np.clip(cos_theta, -1.0, 1.0)) - - degree = int(180 / np.pi) * theta - - return int(degree) - - - -def get_landmark_array(pose_landmark, key, frame_width, frame_height): - - denorm_x = int(pose_landmark[key].x * frame_width) - denorm_y = int(pose_landmark[key].y * frame_height) - - return np.array([denorm_x, denorm_y]) - - - -def get_landmark_features(kp_results, dict_features, feature, frame_width, frame_height): - - if feature == 'nose': - return get_landmark_array(kp_results, dict_features[feature], frame_width, frame_height) - - elif feature == 'left' or 'right': - shldr_coord = get_landmark_array(kp_results, dict_features[feature]['shoulder'], frame_width, frame_height) - elbow_coord = get_landmark_array(kp_results, dict_features[feature]['elbow'], frame_width, frame_height) - wrist_coord = get_landmark_array(kp_results, dict_features[feature]['wrist'], frame_width, frame_height) - hip_coord = get_landmark_array(kp_results, dict_features[feature]['hip'], frame_width, frame_height) - knee_coord = get_landmark_array(kp_results, dict_features[feature]['knee'], frame_width, frame_height) - ankle_coord = get_landmark_array(kp_results, dict_features[feature]['ankle'], frame_width, frame_height) - foot_coord = get_landmark_array(kp_results, dict_features[feature]['foot'], frame_width, frame_height) - - return shldr_coord, elbow_coord, wrist_coord, hip_coord, knee_coord, ankle_coord, foot_coord - - else: - raise ValueError("feature needs to be either 'nose', 'left' or 'right") - - -def get_mediapipe_pose( - static_image_mode = False, - model_complexity = 1, - smooth_landmarks = True, - min_detection_confidence = 0.5, - min_tracking_confidence = 0.5 - - ): - pose = mp.solutions.pose.Pose( - static_image_mode = static_image_mode, - model_complexity = model_complexity, - smooth_landmarks = smooth_landmarks, - min_detection_confidence = min_detection_confidence, - min_tracking_confidence = min_tracking_confidence - ) - return pose \ No newline at end of file diff --git a/spaces/KyanChen/RSPrompter/mmpl/models/necks/sam_prompt_generator.py b/spaces/KyanChen/RSPrompter/mmpl/models/necks/sam_prompt_generator.py deleted file mode 100644 index 1a78f0c9926b3a0c60cb9285b9c27bf4c6346962..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpl/models/necks/sam_prompt_generator.py +++ /dev/null @@ -1,971 +0,0 @@ -import copy -import math -from typing import Type, Tuple - -import einops -import torch -import torch.nn as nn -from einops import rearrange -from mmcv.cnn import ConvModule -from mmcv.cnn.bricks.transformer import build_transformer_layer -from torch import Tensor - -from mmdet.models import SinePositionalEncoding -from mmpl.registry import MODELS -import torch.nn.functional as F - - -@MODELS.register_module() -class SAMTransformerPromptGenNeck(nn.Module): - def __init__( - self, - prompt_shape=(100, 6), - in_channels=[1280]*16, - out_channels=256, - positional_encoding=dict(num_feats=128, normalize=True), - n_classes=2, - kernel_size=3, - stride=1, - norm_cfg=None, - act_cfg=dict(type='ReLU') - ): - super(SAMTransformerPromptGenNeck, self).__init__() - self.in_channels = in_channels - self.kernel_size = kernel_size - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.out_put_channels = out_channels - self.n_classes = n_classes - self.stride = stride - - self.prompt_shape = prompt_shape - self.num_queries = prompt_shape[0] - self.per_query_point = prompt_shape[1] - - if isinstance(in_channels, list): - self.pre_layers = nn.ModuleList() - inner_channel = 32 - for idx, channel in enumerate(in_channels): - self.pre_layers.append( - nn.Sequential( - ConvModule( - channel, - inner_channel, - kernel_size=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ), - ConvModule( - inner_channel, - inner_channel*2, - kernel_size=kernel_size, - padding=kernel_size // 2, - stride=self.stride, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ), - ConvModule( - inner_channel*2, - inner_channel, - kernel_size=kernel_size, - padding=kernel_size // 2, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ), - ) - ) - self.pre_layers.append( - nn.Sequential( - ConvModule( - inner_channel * len(in_channels), - out_channels, - kernel_size=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ), - ConvModule( - out_channels, - out_channels, - kernel_size=kernel_size, - padding=kernel_size // 2, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ), - ) - ) - - self.generator_pe = SinePositionalEncoding(**positional_encoding) - self.transformer = self.build_transformer() - self.query_feat = nn.Embedding(self.num_queries, out_channels) - self.query_emb = nn.Embedding(self.num_queries, out_channels) - - self.output_upscaling = nn.Sequential( - nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), - nn.BatchNorm2d(out_channels), - nn.GELU(), - nn.UpsamplingBilinear2d(scale_factor=2), - nn.Conv2d(out_channels, out_channels // 4, kernel_size=3, padding=1), - nn.BatchNorm2d(out_channels // 4), - nn.GELU(), - nn.UpsamplingBilinear2d(scale_factor=2), - nn.Conv2d(out_channels // 4, out_channels // 8, kernel_size=3, padding=1), - nn.BatchNorm2d(out_channels // 8), - nn.GELU(), - nn.UpsamplingBilinear2d(scale_factor=2), - nn.Conv2d(out_channels // 8, out_channels // 8, kernel_size=3, padding=1), - ) - - self.cls_head = nn.Sequential( - nn.Linear(out_channels, out_channels//2), - nn.ReLU(), - nn.Linear(out_channels//2, n_classes) - ) - - # self.point_emb = nn.Sequential( - # nn.Linear(out_channels, out_channels), - # nn.ReLU(), - # nn.Linear(out_channels, out_channels), - # nn.ReLU(), - # nn.Linear(out_channels, self.per_query_point * out_channels) - # ) - self.output_hypernetworks_mlps = MLP(out_channels, out_channels, out_channels // 8, 3) - - - def build_transformer( - self, num_encoder_layers=2, num_decoder_layers=3, embed_dims=256, num_heads=8, - mlp_ratio=2, dropout_rate=0.0, act_cfg=dict(type="gelu")): - """Build transformer decoder.""" - # transformer = nn.Transformer( - # d_model=embed_dims, nhead=num_heads, num_encoder_layers=num_encoder_layers, - # num_decoder_layers=num_decoder_layers, dim_feedforward=mlp_ratio * embed_dims, - # dropout=dropout_rate, activation=act_cfg['type'], batch_first=True, norm_first=True, - # ) - transformer = Transformer(depth=2) - return transformer - - def init_weights(self): - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - - def forward(self, inputs, prompt_encoder, mask_decoder): - - img_embs, inner_states = inputs - if hasattr(self, 'pre_layers'): - inner_states = inner_states[-len(self.in_channels):] - inner_states = [einops.rearrange(x, 'b h w c -> b c h w') for x in inner_states] - inner_states = [layer(x) for layer, x in zip(self.pre_layers[:-1], inner_states)] - img_feats = self.pre_layers[-1](torch.cat(inner_states, dim=1)) - bs, c, h, w = img_feats.shape - mask_pe = torch.zeros((bs, h, w), device=img_feats.device) - img_feats_pe = self.generator_pe(mask_pe) - query_feat = self.query_feat.weight.unsqueeze(0).expand(bs, -1, -1) # Bx256x256 - query_emb = self.query_emb.weight.unsqueeze(0).expand(bs, -1, -1) - img_feats, query_feats = self.transformer( - image_embedding=img_feats, - image_pe=img_feats_pe, - point_embedding=query_feat, - point_pe=query_emb) - cls_logits = self.cls_head(query_feats) - # point_embs = self.point_emb(query_feats) - # point_embs = rearrange(point_embs, 'b n (t c) -> b n t c', t=self.per_query_point) # Bx100x6x256 - - src = img_feats.transpose(1, 2).view(bs, c, h, w) - upscaled_embedding = self.output_upscaling(src) - hyper_in = self.output_hypernetworks_mlps(query_feats) - b, c, h, w = upscaled_embedding.shape - l1_masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) - - # dense_masks = einops.rearrange(l1_masks, 'b (n t) h w -> (b n) t h w', t=1) - # sparse, dense = prompt_encoder(points=None, boxes=None, masks=dense_masks) - # dense = einops.rearrange(dense, '(b n) t h w -> b n t h w', n=self.num_queries) - - # l2_masks = [] - # iou_preds = [] - # for curr_embedding, sparse_embeddings, dense_embeddings in zip(img_embs, point_embs, dense): - # low_res_masks, iou_predictions = mask_decoder( - # image_embeddings=curr_embedding.unsqueeze(0), - # image_pe=prompt_encoder.get_dense_pe(), - # sparse_prompt_embeddings=sparse_embeddings, - # dense_prompt_embeddings=dense_embeddings - # ) - # l2_masks.append(low_res_masks[:, 0]) - # iou_preds.append(iou_predictions[:, 0]) - # l2_masks = torch.stack(l2_masks, dim=0) - # iou_preds = torch.stack(iou_preds, dim=0) - - l2_masks = None - iou_preds = None - - return cls_logits, l1_masks, l2_masks, iou_preds - - -@MODELS.register_module() -class SAMPromptConvNeck(nn.Module): - def __init__( - self, - prompt_shape=(100, 5), - img_feat_channels=1280, - out_put_channels=256, - num_img_feat_level=16, - n_cls=2, - ): - super(SAMPromptConvNeck, self).__init__() - self.prompt_shape = prompt_shape - self.num_queries = prompt_shape[0] - self.per_query_point = prompt_shape[1] - self.point_size = int(math.sqrt(prompt_shape[0])) - - self.img_feat_channels = img_feat_channels - self.out_put_channels = out_put_channels - self.num_img_feat_level = num_img_feat_level - self.n_cls = n_cls - - # decoder_embed_dims = img_feat_channels // 32 - decoder_embed_dims = 32 - self.decoder_input_projs = nn.ModuleList() - # from low resolution to high resolution - for _ in range(num_img_feat_level): - self.decoder_input_projs.append( - nn.Sequential( - nn.Conv2d(img_feat_channels, decoder_embed_dims, kernel_size=1), - # nn.BatchNorm2d(decoder_embed_dims), - nn.ReLU(), - nn.Conv2d(decoder_embed_dims, decoder_embed_dims, kernel_size=3, padding=1), - # nn.BatchNorm2d(decoder_embed_dims), - nn.ReLU(), - )) - self.level_embed = nn.Embedding(self.num_img_feat_level, decoder_embed_dims) - self.gather_img_feats = nn.Sequential( - nn.Conv2d(num_img_feat_level * decoder_embed_dims, out_put_channels, kernel_size=1), - # nn.BatchNorm2d(out_put_channels), - nn.ReLU(), - nn.Conv2d(out_put_channels, out_put_channels, 3, stride=2, padding=1), - nn.ReLU(), - nn.Conv2d(out_put_channels, out_put_channels*2, 3, stride=2, padding=1), - nn.ReLU(), - nn.Conv2d(out_put_channels * 2, out_put_channels * 2, 3, padding=1), - ) - - self.img_feats_pe = nn.Parameter(torch.zeros(1, out_put_channels*2, self.point_size, self.point_size)) - - self.cls_head = nn.Sequential( - nn.Conv2d(out_put_channels * 2, out_put_channels, 3, padding=1), - nn.ReLU(), - nn.Conv2d(out_put_channels, n_cls, 1) - ) - - self.point_emb = nn.Sequential( - nn.Conv2d(out_put_channels * 2, out_put_channels, 3, padding=1), - nn.ReLU(), - nn.Conv2d(out_put_channels, out_put_channels, 3, padding=1), - nn.ReLU(), - nn.Conv2d(out_put_channels, self.per_query_point * out_put_channels, 1) - ) - - def forward(self, inputs): - inner_states = [x.permute(0, 3, 1, 2) for x in inputs] # from low2high, all 4 layers - bs = inner_states[0].shape[0] - # inputs: list([B, C, H, W]) - num_layers = len(inputs) - # import ipdb; ipdb.set_trace() - # select the feature maps from the selected layers - layer_start_id = num_layers - self.num_img_feat_level - decoder_inputs = [] - for i in range(self.num_img_feat_level): - decoder_input = self.decoder_input_projs[i](inner_states[i + layer_start_id]) # Bx256x64x64 - level_embed = self.level_embed.weight[i].unsqueeze(0).unsqueeze(-1).unsqueeze(-1).expand(bs, -1, -1, -1) - decoder_input = decoder_input + level_embed - decoder_inputs.append(decoder_input) - decoder_inputs = torch.cat(decoder_inputs, dim=1) # Bx256x64x64 - decoder_inputs = self.gather_img_feats(decoder_inputs) - # import pdb; - # pdb.set_trace() - decoder_inputs = torch.nn.functional.interpolate(decoder_inputs, size=(self.point_size, self.point_size), mode='bilinear', align_corners=True) - img_pe = self.img_feats_pe.expand(bs, -1, -1, -1) # Bx256x64x64 - decoder_inputs = decoder_inputs + img_pe - - cls_logits = self.cls_head(decoder_inputs) # b c h w - cls_logits = rearrange(cls_logits, 'b c h w -> b (h w) c') - point_embs = self.point_emb(decoder_inputs) # b c h w - point_embs = rearrange(point_embs, 'b (t c) h w -> b (h w) t c', t=self.per_query_point) # Bx100x6x256 - - return point_embs, cls_logits - - - - -class MLPBlock(nn.Module): - def __init__( - self, - embedding_dim: int, - mlp_dim: int, - act: Type[nn.Module] = nn.GELU, - ) -> None: - super().__init__() - self.lin1 = nn.Linear(embedding_dim, mlp_dim) - self.lin2 = nn.Linear(mlp_dim, embedding_dim) - self.act = act() - - def forward(self, x: torch.Tensor) -> torch.Tensor: - return self.lin2(self.act(self.lin1(x))) - - -class Transformer(nn.Module): - def __init__( - self, - depth: int = 2, - embedding_dim: int = 256, - num_heads: int = 8, - mlp_dim: int = 1024, - activation: Type[nn.Module] = nn.GELU, - attention_downsample_rate: int = 2, - ) -> None: - super().__init__() - self.depth = depth - self.embedding_dim = embedding_dim - self.num_heads = num_heads - self.mlp_dim = mlp_dim - self.layers = nn.ModuleList() - - for i in range(depth): - self.layers.append( - AttentionBlock( - embedding_dim=embedding_dim, - num_heads=num_heads, - mlp_dim=mlp_dim, - activation=activation, - attention_downsample_rate=attention_downsample_rate - ) - ) - - self.final_attn_token_to_image = Attention( - embedding_dim, num_heads, downsample_rate=attention_downsample_rate - ) - self.norm_final_attn = nn.LayerNorm(embedding_dim) - - def forward( - self, - image_embedding: Tensor, - image_pe: Tensor, - point_embedding: Tensor, - point_pe: Tensor, - ) -> Tuple[Tensor, Tensor]: - """ - Args: - image_embedding (torch.Tensor): image to attend to. Should be shape - B x embedding_dim x h x w for any h and w. - image_pe (torch.Tensor): the positional encoding to add to the image. Must - have the same shape as image_embedding. - point_embedding (torch.Tensor): the embedding to add to the query points. - Must have shape B x N_points x embedding_dim for any N_points. - - Returns: - torch.Tensor: the processed point_embedding - torch.Tensor: the processed image_embedding - """ - # BxCxHxW -> BxHWxC == B x N_image_tokens x C - bs, c, h, w = image_embedding.shape - image_embedding = image_embedding.flatten(2).permute(0, 2, 1) - image_pe = image_pe.flatten(2).permute(0, 2, 1) - - # Apply transformer blocks and final layernorm - for layer in self.layers: - queries, keys = layer( - queries=image_embedding, - query_pe=image_pe, - keys=point_embedding, - key_pe=point_pe, - ) - - # Apply the final attention layer from the points to the image - q = queries + image_pe - k = keys + point_embedding - - attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) - queries = queries + attn_out - queries = self.norm_final_attn(queries) - - return queries, keys - - -class AttentionBlock(nn.Module): - def __init__( - self, - embedding_dim: int, - num_heads: int, - mlp_dim: int = 2048, - activation: Type[nn.Module] = nn.ReLU, - attention_downsample_rate: int = 2, - skip_first_layer_pe: bool = False, - ) -> None: - """ - A transformer block with four layers: (1) self-attention of sparse - inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp - block on sparse inputs, and (4) cross attention of dense inputs to sparse - inputs. - - Arguments: - embedding_dim (int): the channel dimension of the embeddings - num_heads (int): the number of heads in the attention layers - mlp_dim (int): the hidden dimension of the mlp block - activation (nn.Module): the activation of the mlp block - skip_first_layer_pe (bool): skip the PE on the first layer - """ - super().__init__() - self.self_attn = Attention(embedding_dim, num_heads) - self.norm1 = nn.LayerNorm(embedding_dim) - - self.cross_attn_token_to_image = Attention( - embedding_dim, num_heads, downsample_rate=attention_downsample_rate - ) - self.norm2 = nn.LayerNorm(embedding_dim) - - self.mlp = MLPBlock(embedding_dim, mlp_dim, activation) - self.norm3 = nn.LayerNorm(embedding_dim) - - self.norm4 = nn.LayerNorm(embedding_dim) - self.cross_attn_image_to_token = Attention( - embedding_dim, num_heads, downsample_rate=attention_downsample_rate - ) - - self.skip_first_layer_pe = skip_first_layer_pe - - def forward( - self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor - ) -> Tuple[Tensor, Tensor]: - # Self attention block - if self.skip_first_layer_pe: - queries = self.self_attn(q=queries, k=queries, v=queries) - else: - q = queries + query_pe - attn_out = self.self_attn(q=q, k=q, v=queries) - queries = queries + attn_out - queries = self.norm1(queries) - - # Cross attention block, tokens attending to image embedding - q = queries + query_pe - k = keys + key_pe - attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) - queries = queries + attn_out - queries = self.norm2(queries) - - # MLP block - mlp_out = self.mlp(queries) - queries = queries + mlp_out - queries = self.norm3(queries) - - # Cross attention block, image embedding attending to tokens - q = queries + query_pe - k = keys + key_pe - attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) - keys = keys + attn_out - keys = self.norm4(keys) - - return queries, keys - - -class Attention(nn.Module): - """ - An attention layer that allows for downscaling the size of the embedding - after projection to queries, keys, and values. - """ - - def __init__( - self, - embedding_dim: int, - num_heads: int, - downsample_rate: int = 1, - ) -> None: - super().__init__() - self.embedding_dim = embedding_dim - self.internal_dim = embedding_dim // downsample_rate - self.num_heads = num_heads - assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim." - - self.q_proj = nn.Linear(embedding_dim, self.internal_dim) - self.k_proj = nn.Linear(embedding_dim, self.internal_dim) - self.v_proj = nn.Linear(embedding_dim, self.internal_dim) - self.out_proj = nn.Linear(self.internal_dim, embedding_dim) - - def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: - b, n, c = x.shape - x = x.reshape(b, n, num_heads, c // num_heads) - return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head - - def _recombine_heads(self, x: Tensor) -> Tensor: - b, n_heads, n_tokens, c_per_head = x.shape - x = x.transpose(1, 2) - return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C - - def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: - # Input projections - q = self.q_proj(q) - k = self.k_proj(k) - v = self.v_proj(v) - - # Separate into heads - q = self._separate_heads(q, self.num_heads) - k = self._separate_heads(k, self.num_heads) - v = self._separate_heads(v, self.num_heads) - - # Attention - _, _, _, c_per_head = q.shape - attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens - attn = attn / math.sqrt(c_per_head) - attn = torch.softmax(attn, dim=-1) - - # Get output - out = attn @ v - out = self._recombine_heads(out) - out = self.out_proj(out) - - return out - - - -class LayerNorm2d(nn.Module): - def __init__(self, num_channels: int, eps: float = 1e-6) -> None: - super().__init__() - self.weight = nn.Parameter(torch.ones(num_channels)) - self.bias = nn.Parameter(torch.zeros(num_channels)) - self.eps = eps - - def forward(self, x: torch.Tensor) -> torch.Tensor: - u = x.mean(1, keepdim=True) - s = (x - u).pow(2).mean(1, keepdim=True) - x = (x - u) / torch.sqrt(s + self.eps) - x = self.weight[:, None, None] * x + self.bias[:, None, None] - return x - - -class MLP(nn.Module): - def __init__( - self, - input_dim: int, - hidden_dim: int, - output_dim: int, - num_layers: int, - sigmoid_output: bool = False, - ) -> None: - super().__init__() - self.num_layers = num_layers - h = [hidden_dim] * (num_layers - 1) - self.layers = nn.ModuleList( - nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) - ) - self.sigmoid_output = sigmoid_output - - def forward(self, x): - for i, layer in enumerate(self.layers): - x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) - if self.sigmoid_output: - x = F.sigmoid(x) - return x - - -@MODELS.register_module() -class SAMTransformerEDPromptGenNeck(nn.Module): - def __init__( - self, - prompt_shape=(100, 5), - in_channels=[1280]*16, - inner_channels=128, - selected_channels: list=None, - num_encoders=2, - num_decoders=2, - out_channels=256, - positional_encoding=dict(num_feats=128, normalize=True), - kernel_size=3, - stride=1, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='ReLU', inplace=True), - init_cfg=None, - **kwargs - ): - super().__init__() - self.in_channels = in_channels - self.kernel_size = kernel_size - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.out_channels = out_channels - self.stride = stride - self.selected_channels = selected_channels - - self.prompt_shape = prompt_shape - self.num_queries = prompt_shape[0] - self.per_query_point = prompt_shape[1] - - self.down_sample_layers = nn.ModuleList() - for idx in self.selected_channels: - self.down_sample_layers.append( - nn.Sequential( - ConvModule( - in_channels[idx], - inner_channels, - kernel_size=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ), - ConvModule( - inner_channels, - inner_channels, - kernel_size=3, - padding=1, - stride=2, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ), - ) - ) - self.fusion_layers = nn.ModuleList() - for idx in self.selected_channels: - self.fusion_layers.append( - ConvModule( - inner_channels, - inner_channels, - kernel_size=3, - padding=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ) - ) - self.up_layers = nn.ModuleList() - self.up_layers.append( - nn.Sequential( - ConvModule( - inner_channels, - inner_channels, - kernel_size=3, - padding=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ), - ConvModule( - inner_channels, - inner_channels, - kernel_size=3, - padding=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ) - ) - ) - self.up_layers.append( - ConvModule( - inner_channels, - out_channels, - kernel_size=1, - norm_cfg=self.norm_cfg, - act_cfg=None - ) - ) - - self.generator_pe = SinePositionalEncoding(**positional_encoding) - - self.en_layers = nn.ModuleList() - self.de_layers = nn.ModuleList() - self.build_transformer(num_encoders=num_encoders, num_decoders=num_decoders) - - self.embed_dims = self.en_layers[0].embed_dims - self.pre_norm = self.en_layers[0].pre_norm - - self.query_feat = nn.Embedding(self.num_queries, out_channels) - self.query_embed = nn.Embedding(self.num_queries, out_channels) - - # self.output_upscaling = nn.Sequential( - # nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), - # nn.BatchNorm2d(out_channels), - # nn.GELU(), - # nn.UpsamplingBilinear2d(scale_factor=2), - # nn.Conv2d(out_channels, out_channels // 4, kernel_size=3, padding=1), - # nn.BatchNorm2d(out_channels // 4), - # nn.GELU(), - # nn.UpsamplingBilinear2d(scale_factor=2), - # nn.Conv2d(out_channels // 4, out_channels // 8, kernel_size=3, padding=1), - # nn.BatchNorm2d(out_channels // 8), - # nn.GELU(), - # nn.UpsamplingBilinear2d(scale_factor=2), - # nn.Conv2d(out_channels // 8, out_channels // 8, kernel_size=3, padding=1), - # ) - # self.output_hypernetworks_mlps = MLP(out_channels, out_channels, out_channels // 8, 3) - - self.init_weights() - - def build_transformer(self, num_encoders=2, num_decoders=2, embed_dims=256, num_heads=8, mlp_ratio=4): - transformer_encoder_layer = dict( - type='BaseTransformerLayer', - attn_cfgs=[ - dict( - type='MultiheadAttention', - embed_dims=embed_dims, - num_heads=num_heads, - attn_drop=0.1, - proj_drop=0.1, - dropout_layer=dict(type='Dropout', drop_prob=0.1) - ), - ], - ffn_cfgs=dict( - type='FFN', - embed_dims=embed_dims, - feedforward_channels=embed_dims * mlp_ratio, - num_fcs=2, - act_cfg=dict(type='GELU'), - ffn_drop=0.1, - add_identity=True), - operation_order=('norm', 'self_attn', 'norm', 'ffn'), - norm_cfg=dict(type='LN'), - batch_first=True - ) - transformer_decoder_layer = dict( - type='BaseTransformerLayer', - attn_cfgs=dict( - type='MultiheadAttention', - embed_dims=embed_dims, - num_heads=num_heads, - attn_drop=0.1, - proj_drop=0.1, - dropout_layer=dict(type='Dropout', drop_prob=0.1) - ), - ffn_cfgs=dict( - type='FFN', - embed_dims=embed_dims, - feedforward_channels=embed_dims * mlp_ratio, - num_fcs=2, - act_cfg=dict(type='GELU'), - ffn_drop=0.1, - add_identity=True), - operation_order=('norm', 'self_attn', 'norm', 'cross_attn', 'norm', 'ffn'), - norm_cfg=dict(type='LN'), - batch_first=True - ) - - transformer_en_layers = [ - copy.deepcopy(transformer_encoder_layer) for _ in range(num_encoders) - ] - transformer_de_layers = [ - copy.deepcopy(transformer_decoder_layer) for _ in range(num_decoders) - ] - for i in range(num_encoders): - self.en_layers.append(build_transformer_layer(transformer_en_layers[i])) - for i in range(num_decoders): - self.de_layers.append(build_transformer_layer(transformer_de_layers[i])) - - def init_weights(self): - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - - def forward(self, inputs): - _, inner_states = inputs - inner_states = [einops.rearrange(inner_states[idx], 'b h w c -> b c h w') for idx in self.selected_channels] - inner_states = [layer(x) for layer, x in zip(self.down_sample_layers, inner_states)] - - x = None - for inner_state, layer in zip(inner_states, self.fusion_layers): - if x is not None: - inner_state = x + inner_state - x = inner_state + layer(inner_state) - x = self.up_layers[0](x) + x - img_feats = self.up_layers[1](x) - - bs, c, h, w = img_feats.shape - - mask_pe = torch.zeros((bs, h, w), device=img_feats.device, dtype=torch.bool) - img_feats_pe = self.generator_pe(mask_pe) - - query_feat = self.query_feat.weight.unsqueeze(0).repeat( - (bs, 1, 1)) - query_embed = self.query_embed.weight.unsqueeze(0).repeat( - (bs, 1, 1)) - - encoder_inputs = rearrange(img_feats, 'b c h w -> b (h w) c') - img_feats_pe = img_feats_pe.flatten(2).permute(0, 2, 1) - - # shape (batch_size, num_total_queries, c) - memory = encoder_inputs - for layer in self.en_layers: - memory = layer( - query=memory, - query_pos=img_feats_pe - ) - # (batch_size, num_total_queries, c) - - query_feat_list = [] - for layer in self.de_layers: - query_feat = layer( - query=query_feat, - key=memory, - value=memory, - query_pos=query_embed, - key_pos=img_feats_pe - ) - query_feat_list.append(query_feat) - - img_feat = rearrange(memory, 'b (h w) c -> b c h w', h=h, w=w) - return query_feat, query_feat_list, img_feat - - -@MODELS.register_module() -class SAMAggregatorNeck(nn.Module): - def __init__( - self, - in_channels=[1280]*16, - inner_channels=128, - selected_channels: list=None, - out_channels=256, - kernel_size=3, - stride=1, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='ReLU', inplace=True), - up_sample_scale=4, - init_cfg=None, - **kwargs - ): - super().__init__() - self.in_channels = in_channels - self.kernel_size = kernel_size - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.out_channels = out_channels - self.stride = stride - self.selected_channels = selected_channels - self.up_sample_scale = up_sample_scale - - self.down_sample_layers = nn.ModuleList() - for idx in self.selected_channels: - self.down_sample_layers.append( - nn.Sequential( - ConvModule( - in_channels[idx], - inner_channels, - kernel_size=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ), - ConvModule( - inner_channels, - inner_channels, - kernel_size=3, - padding=1, - stride=2, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ), - ) - ) - self.fusion_layers = nn.ModuleList() - for idx in self.selected_channels: - self.fusion_layers.append( - ConvModule( - inner_channels, - inner_channels, - kernel_size=3, - padding=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ) - ) - self.up_layers = nn.ModuleList() - self.up_layers.append( - nn.Sequential( - ConvModule( - inner_channels, - inner_channels, - kernel_size=3, - padding=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ), - ConvModule( - inner_channels, - inner_channels, - kernel_size=3, - padding=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ) - ) - ) - self.up_layers.append( - ConvModule( - inner_channels, - out_channels, - kernel_size=1, - norm_cfg=self.norm_cfg, - act_cfg=None - ) - ) - - self.up_sample_layers = nn.ModuleList() - assert up_sample_scale == 4 - self.up_sample_layers.append( - nn.Sequential( - nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), - ConvModule( - out_channels, - out_channels, - kernel_size=3, - padding=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ), - ConvModule( - out_channels, - out_channels, - kernel_size=3, - padding=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ) - ) - ) - - self.up_sample_layers.append( - nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) - ) - - self.up_sample_layers.append( - nn.Sequential( - nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), - ConvModule( - out_channels, - out_channels, - kernel_size=3, - padding=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ), - ConvModule( - out_channels, - out_channels, - kernel_size=3, - padding=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg - ) - ) - ) - - self.up_sample_layers.append( - nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) - ) - - def forward(self, inputs): - _, inner_states = inputs - inner_states = [einops.rearrange(inner_states[idx], 'b h w c -> b c h w') for idx in self.selected_channels] - inner_states = [layer(x) for layer, x in zip(self.down_sample_layers, inner_states)] - - x = None - for inner_state, layer in zip(inner_states, self.fusion_layers): - if x is not None: - inner_state = x + inner_state - x = inner_state + layer(inner_state) - x = self.up_layers[0](x) + x - img_feats_0 = self.up_layers[1](x) - - img_feats_1 = self.up_sample_layers[0](img_feats_0) + self.up_sample_layers[1](img_feats_0) - - img_feats_2 = self.up_sample_layers[2](img_feats_1) + self.up_sample_layers[3](img_feats_1) - - return img_feats_2, img_feats_1, img_feats_0 \ No newline at end of file diff --git a/spaces/LLaMaWhisperer/LegalLLaMa/legal_llama/bill_retrieval.py b/spaces/LLaMaWhisperer/LegalLLaMa/legal_llama/bill_retrieval.py deleted file mode 100644 index e406e7824bbb5dccd274822a07a1a5deeaff0e2e..0000000000000000000000000000000000000000 --- a/spaces/LLaMaWhisperer/LegalLLaMa/legal_llama/bill_retrieval.py +++ /dev/null @@ -1,145 +0,0 @@ -import requests -import streamlit as st -import xml.etree.ElementTree as ET - - -class BillRetriever: - """ - A class used to retrieve bills using the ProPublica Congress API & United States Congress API. - """ - PROPUBLICA_URL = "https://api.propublica.org/congress/v1/bills/search.json" - CONGRESS_URL_BASE = "https://api.congress.gov/v3/bill/{congress}/{billType}/{billNumber}/text" - - def __init__(self, api_key=None): - """ - Initialize the BillRetriever with API keys. - - Parameters: - api_key (str, optional): The API key to be used for authentication. Default is None. - """ - self.pro_publica_api_key = st.secrets["PRO_PUBLICA_API_KEY"] - self.congress_api_key = st.secrets["CONGRESS_API_KEY"] - - def make_api_call(self, api_url, api_key, params=None): - """ - Make an API call to the specified URL with optional parameters and API key. - - Parameters: - api_url (str): The URL of the API endpoint. - api_key (str): The API Key for the API - params (dict, optional): Optional parameters to pass with the API call. Default is None. - - Returns: - dict: JSON response data if the request is successful, None otherwise. - """ - headers = {"X-API-Key": api_key} if api_key else {} - - try: - response = requests.get(api_url, params=params, headers=headers) - response.raise_for_status() # Raise an exception for non-2xx status codes - return response.json() - except requests.exceptions.RequestException as e: - print(f"Error occurred: {e}") - return None - except ValueError as e: - print(f"Invalid response received: {e}") - return None - - def search_bill_propublica(self, query): - """ - Search for a bill using the ProPublica Congress API. - - Parameters: - query (str): The query string to search for. - - Returns: - dict: JSON response data if the request is successful, None otherwise. - """ - params = {"query": query, "sort": "date", "dir": "desc"} - return self.make_api_call(self.PROPUBLICA_URL, params=params, api_key=self.pro_publica_api_key) - - def get_bill_text_congress(self, congress, bill_type, bill_number): - """ - Retrieve the text of a bill using the Congress API. - - Parameters: - congress (str): The number of the congress. - bill_type (str): The type of the bill. - bill_number (str): The number of the bill. - - Returns: - dict: JSON response data if the request is successful, None otherwise. - """ - url = self.CONGRESS_URL_BASE.format(congress=congress, billType=bill_type, billNumber=bill_number) - return self.make_api_call(url, api_key=self.congress_api_key) - - def get_bill_by_query(self, query): - """ - Search for a bill by query and retrieve its text. - - Parameters: - query (str): The query string to search for. - - Returns: - str: The text of the bill if the request is successful, None otherwise. - """ - # First search for the bill using the ProPublica API - propublica_data = self.search_bill_propublica(query) - if propublica_data and 'results' in propublica_data: - # Iterate over the list of bills, till we find the bill which has text available on Congress Website - for bill_data in propublica_data['results'][0]['bills']: - congress = bill_data['bill_id'].split('-')[1] - bill_type = bill_data['bill_type'] - bill_number = bill_data['number'].split('.')[-1] - - # Then get the text of the bill using the Congress API - congress_data = self.get_bill_text_congress(congress, bill_type, bill_number) - if congress_data and 'textVersions' in congress_data and congress_data['textVersions']: - # Check if textVersions list is not empty - xml_url = congress_data['textVersions'][0]['formats'][2]['url'] - return self.extract_bill_text(xml_url) - return None - - def extract_bill_text(self, url): - """ - Extract the text content from a bill's XML data. - - Parameters: - url (str): The URL of the bill's XML data. - - Returns: - str: The text content of the bill. - """ - # Get the XML data from the URL - try: - xml_data = requests.get(url).content - except requests.exceptions.RequestException as e: - print(f"Error occurred: {e}") - return None - - # Decode bytes to string and parse XML - try: - root = ET.fromstring(xml_data.decode('utf-8')) - except ET.ParseError as e: - print(f"Error parsing XML: {e}") - return None - - return self.get_all_text(root) - - @staticmethod - def get_all_text(element): - """ - Recursively extract text from an XML element and its children. - - Parameters: - element (xml.etree.ElementTree.Element): An XML element. - - Returns: - str: The concatenated text from the element and its children. - """ - text = element.text or '' # Get the text of the current element, if it exists - for child in element: - text += BillRetriever.get_all_text(child) # Recursively get the text of all child elements - if child.tail: - text += child.tail # Add any trailing text of the child element - return text diff --git a/spaces/LanguageBind/LanguageBind/open_clip/pretrained.py b/spaces/LanguageBind/LanguageBind/open_clip/pretrained.py deleted file mode 100644 index 1465a2325652be7e7a1d7563698e38b9ec408cc6..0000000000000000000000000000000000000000 --- a/spaces/LanguageBind/LanguageBind/open_clip/pretrained.py +++ /dev/null @@ -1,427 +0,0 @@ -import hashlib -import os -import urllib -import warnings -from functools import partial -from typing import Dict, Union - -from tqdm import tqdm - -from .version import __version__ - -try: - from huggingface_hub import hf_hub_download - hf_hub_download = partial(hf_hub_download, library_name="open_clip", library_version=__version__) - _has_hf_hub = True -except ImportError: - hf_hub_download = None - _has_hf_hub = False - - -def _pcfg(url='', hf_hub='', mean=None, std=None): - return dict( - url=url, - hf_hub=hf_hub, - mean=mean, - std=std, - ) - - -_RN50 = dict( - openai=_pcfg( - "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt"), - yfcc15m=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt"), - cc12m=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt"), -) - -_RN50_quickgelu = dict( - openai=_pcfg( - "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt"), - yfcc15m=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt"), - cc12m=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt"), -) - -_RN101 = dict( - openai=_pcfg( - "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt"), - yfcc15m=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt"), -) - -_RN101_quickgelu = dict( - openai=_pcfg( - "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt"), - yfcc15m=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt"), -) - -_RN50x4 = dict( - openai=_pcfg( - "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt"), -) - -_RN50x16 = dict( - openai=_pcfg( - "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt"), -) - -_RN50x64 = dict( - openai=_pcfg( - "https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt"), -) - -_VITB32 = dict( - openai=_pcfg( - "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"), - laion400m_e31=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"), - laion400m_e32=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"), - laion2b_e16=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-laion2b_e16-af8dbd0c.pth"), - laion2b_s34b_b79k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-laion2B-s34B-b79K/'), - # DataComp-M models - datacomp_m_s128m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-DataComp.M-s128M-b4K/'), - commonpool_m_clip_s128m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.M.clip-s128M-b4K/'), - commonpool_m_laion_s128m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.M.laion-s128M-b4K/'), - commonpool_m_image_s128m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.M.image-s128M-b4K/'), - commonpool_m_text_s128m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.M.text-s128M-b4K/'), - commonpool_m_basic_s128m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.M.basic-s128M-b4K/'), - commonpool_m_s128m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.M-s128M-b4K/'), - # DataComp-S models - datacomp_s_s13m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-DataComp.S-s13M-b4K/'), - commonpool_s_clip_s13m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.S.clip-s13M-b4K/'), - commonpool_s_laion_s13m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.S.laion-s13M-b4K/'), - commonpool_s_image_s13m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.S.image-s13M-b4K/'), - commonpool_s_text_s13m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.S.text-s13M-b4K/'), - commonpool_s_basic_s13m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.S.basic-s13M-b4K/'), - commonpool_s_s13m_b4k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-CommonPool.S-s13M-b4K/'), -) - -_VITB32_quickgelu = dict( - openai=_pcfg( - "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"), - laion400m_e31=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"), - laion400m_e32=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"), -) - -_VITB16 = dict( - openai=_pcfg( - "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt"), - laion400m_e31=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e31-00efa78f.pt"), - laion400m_e32=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e32-55e67d44.pt"), - laion2b_s34b_b88k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-laion2B-s34B-b88K/'), - # DataComp-L models - datacomp_l_s1b_b8k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-DataComp.L-s1B-b8K/'), - commonpool_l_clip_s1b_b8k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-CommonPool.L.clip-s1B-b8K/'), - commonpool_l_laion_s1b_b8k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-CommonPool.L.laion-s1B-b8K/'), - commonpool_l_image_s1b_b8k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-CommonPool.L.image-s1B-b8K/'), - commonpool_l_text_s1b_b8k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-CommonPool.L.text-s1B-b8K/'), - commonpool_l_basic_s1b_b8k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-CommonPool.L.basic-s1B-b8K/'), - commonpool_l_s1b_b8k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-CommonPool.L-s1B-b8K/'), -) - -_VITB16_PLUS_240 = dict( - laion400m_e31=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e31-8fb26589.pt"), - laion400m_e32=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e32-699c4b84.pt"), -) - -_VITL14 = dict( - openai=_pcfg( - "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"), - laion400m_e31=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e31-69988bb6.pt"), - laion400m_e32=_pcfg( - "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e32-3d133497.pt"), - laion2b_s32b_b82k=_pcfg( - hf_hub='laion/CLIP-ViT-L-14-laion2B-s32B-b82K/', - mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)), - # DataComp-XL models - datacomp_xl_s13b_b90k=_pcfg(hf_hub='laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K/'), - commonpool_xl_clip_s13b_b90k=_pcfg(hf_hub='laion/CLIP-ViT-L-14-CommonPool.XL.clip-s13B-b90K/'), - commonpool_xl_laion_s13b_b90k=_pcfg(hf_hub='laion/CLIP-ViT-L-14-CommonPool.XL.laion-s13B-b90K/'), - commonpool_xl_s13b_b90k=_pcfg(hf_hub='laion/CLIP-ViT-L-14-CommonPool.XL-s13B-b90K/'), -) - -_VITL14_336 = dict( - openai=_pcfg( - "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt"), -) - -_VITH14 = dict( - laion2b_s32b_b79k=_pcfg(hf_hub='laion/CLIP-ViT-H-14-laion2B-s32B-b79K/'), -) - -_VITg14 = dict( - laion2b_s12b_b42k=_pcfg(hf_hub='laion/CLIP-ViT-g-14-laion2B-s12B-b42K/'), - laion2b_s34b_b88k=_pcfg(hf_hub='laion/CLIP-ViT-g-14-laion2B-s34B-b88K/'), -) - -_VITbigG14 = dict( - laion2b_s39b_b160k=_pcfg(hf_hub='laion/CLIP-ViT-bigG-14-laion2B-39B-b160k/'), -) - -_robertaViTB32 = dict( - laion2b_s12b_b32k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-roberta-base-laion2B-s12B-b32k/'), -) - -_xlmRobertaBaseViTB32 = dict( - laion5b_s13b_b90k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-xlm-roberta-base-laion5B-s13B-b90k/'), -) - -_xlmRobertaLargeFrozenViTH14 = dict( - frozen_laion5b_s13b_b90k=_pcfg(hf_hub='laion/CLIP-ViT-H-14-frozen-xlm-roberta-large-laion5B-s13B-b90k/'), -) - -_convnext_base = dict( - laion400m_s13b_b51k=_pcfg(hf_hub='laion/CLIP-convnext_base-laion400M-s13B-b51K/'), -) - -_convnext_base_w = dict( - laion2b_s13b_b82k=_pcfg(hf_hub='laion/CLIP-convnext_base_w-laion2B-s13B-b82K/'), - laion2b_s13b_b82k_augreg=_pcfg(hf_hub='laion/CLIP-convnext_base_w-laion2B-s13B-b82K-augreg/'), - laion_aesthetic_s13b_b82k=_pcfg(hf_hub='laion/CLIP-convnext_base_w-laion_aesthetic-s13B-b82K/'), -) - -_convnext_base_w_320 = dict( - laion_aesthetic_s13b_b82k=_pcfg(hf_hub='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K/'), - laion_aesthetic_s13b_b82k_augreg=_pcfg(hf_hub='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K-augreg/'), -) - -_convnext_large_d = dict( - laion2b_s26b_b102k_augreg=_pcfg(hf_hub='laion/CLIP-convnext_large_d.laion2B-s26B-b102K-augreg/'), -) - -_convnext_large_d_320 = dict( - laion2b_s29b_b131k_ft=_pcfg(hf_hub='laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft/'), - laion2b_s29b_b131k_ft_soup=_pcfg(hf_hub='laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft-soup/'), -) - -_convnext_xxlarge = dict( - laion2b_s34b_b82k_augreg=_pcfg(hf_hub='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg/'), - laion2b_s34b_b82k_augreg_rewind=_pcfg(hf_hub='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-rewind/'), - laion2b_s34b_b82k_augreg_soup=_pcfg(hf_hub='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-soup/'), -) - -_coca_VITB32 = dict( - laion2b_s13b_b90k=_pcfg(hf_hub='laion/CoCa-ViT-B-32-laion2B-s13B-b90k/'), - mscoco_finetuned_laion2b_s13b_b90k=_pcfg(hf_hub='laion/mscoco_finetuned_CoCa-ViT-B-32-laion2B-s13B-b90k/') -) - -_coca_VITL14 = dict( - laion2b_s13b_b90k=_pcfg(hf_hub='laion/CoCa-ViT-L-14-laion2B-s13B-b90k/'), - mscoco_finetuned_laion2b_s13b_b90k=_pcfg(hf_hub='laion/mscoco_finetuned_CoCa-ViT-L-14-laion2B-s13B-b90k/') -) - - -_PRETRAINED = { - "RN50": _RN50, - "RN50-quickgelu": _RN50_quickgelu, - "RN101": _RN101, - "RN101-quickgelu": _RN101_quickgelu, - "RN50x4": _RN50x4, - "RN50x16": _RN50x16, - "RN50x64": _RN50x64, - "ViT-B-32": _VITB32, - "ViT-B-32-quickgelu": _VITB32_quickgelu, - "ViT-B-16": _VITB16, - "ViT-B-16-plus-240": _VITB16_PLUS_240, - "ViT-L-14": _VITL14, - "ViT-L-14-336": _VITL14_336, - "ViT-H-14": _VITH14, - "ViT-g-14": _VITg14, - "ViT-bigG-14": _VITbigG14, - "roberta-ViT-B-32": _robertaViTB32, - "xlm-roberta-base-ViT-B-32": _xlmRobertaBaseViTB32, - "xlm-roberta-large-ViT-H-14": _xlmRobertaLargeFrozenViTH14, - "convnext_base": _convnext_base, - "convnext_base_w": _convnext_base_w, - "convnext_base_w_320": _convnext_base_w_320, - "convnext_large_d": _convnext_large_d, - "convnext_large_d_320": _convnext_large_d_320, - "convnext_xxlarge": _convnext_xxlarge, - "coca_ViT-B-32": _coca_VITB32, - "coca_ViT-L-14": _coca_VITL14, - "EVA01-g-14": dict( - # from QuanSun/EVA-CLIP/EVA01_CLIP_g_14_psz14_s11B.pt - laion400m_s11b_b41k=_pcfg(hf_hub='timm/eva_giant_patch14_clip_224.laion400m_s11b_b41k/'), - ), - "EVA01-g-14-plus": dict( - # from QuanSun/EVA-CLIP/EVA01_CLIP_g_14_plus_psz14_s11B.pt - merged2b_s11b_b114k=_pcfg(hf_hub='timm/eva_giant_patch14_plus_clip_224.merged2b_s11b_b114k/'), - ), - "EVA02-B-16": dict( - # from QuanSun/EVA-CLIP/EVA02_CLIP_B_psz16_s8B.pt - merged2b_s8b_b131k=_pcfg(hf_hub='timm/eva02_base_patch16_clip_224.merged2b_s8b_b131k/'), - ), - "EVA02-L-14": dict( - # from QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_s4B.pt - merged2b_s4b_b131k=_pcfg(hf_hub='timm/eva02_large_patch14_clip_224.merged2b_s4b_b131k/'), - ), - "EVA02-L-14-336": dict( - # from QuanSun/EVA-CLIP/EVA02_CLIP_L_336_psz14_s6B.pt - merged2b_s6b_b61k=_pcfg(hf_hub='timm/eva02_large_patch14_clip_336.merged2b_s6b_b61k/'), - ), - "EVA02-E-14": dict( - # from QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_s4B.pt - laion2b_s4b_b115k=_pcfg(hf_hub='timm/eva02_enormous_patch14_clip_224.laion2b_s4b_b115k/'), - ), - "EVA02-E-14-plus": dict( - # from QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_plus_s9B.pt - laion2b_s9b_b144k=_pcfg(hf_hub='timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k/'), - ) -} - - -def _clean_tag(tag: str): - # normalize pretrained tags - return tag.lower().replace('-', '_') - - -def list_pretrained(as_str: bool = False): - """ returns list of pretrained models - Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True - """ - return [':'.join([k, t]) if as_str else (k, t) for k in _PRETRAINED.keys() for t in _PRETRAINED[k].keys()] - - -def list_pretrained_models_by_tag(tag: str): - """ return all models having the specified pretrain tag """ - models = [] - tag = _clean_tag(tag) - for k in _PRETRAINED.keys(): - if tag in _PRETRAINED[k]: - models.append(k) - return models - - -def list_pretrained_tags_by_model(model: str): - """ return all pretrain tags for the specified model architecture """ - tags = [] - if model in _PRETRAINED: - tags.extend(_PRETRAINED[model].keys()) - return tags - - -def is_pretrained_cfg(model: str, tag: str): - if model not in _PRETRAINED: - return False - return _clean_tag(tag) in _PRETRAINED[model] - - -def get_pretrained_cfg(model: str, tag: str): - if model not in _PRETRAINED: - return {} - model_pretrained = _PRETRAINED[model] - return model_pretrained.get(_clean_tag(tag), {}) - - -def get_pretrained_url(model: str, tag: str): - cfg = get_pretrained_cfg(model, _clean_tag(tag)) - return cfg.get('url', '') - - -def download_pretrained_from_url( - url: str, - cache_dir: Union[str, None] = None, -): - if not cache_dir: - cache_dir = os.path.expanduser("~/.cache/clip") - os.makedirs(cache_dir, exist_ok=True) - filename = os.path.basename(url) - - if 'openaipublic' in url: - expected_sha256 = url.split("/")[-2] - elif 'mlfoundations' in url: - expected_sha256 = os.path.splitext(filename)[0].split("-")[-1] - else: - expected_sha256 = '' - - download_target = os.path.join(cache_dir, filename) - - if os.path.exists(download_target) and not os.path.isfile(download_target): - raise RuntimeError(f"{download_target} exists and is not a regular file") - - if os.path.isfile(download_target): - if expected_sha256: - if hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256): - return download_target - else: - warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") - else: - return download_target - - with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: - with tqdm(total=int(source.headers.get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop: - while True: - buffer = source.read(8192) - if not buffer: - break - - output.write(buffer) - loop.update(len(buffer)) - - if expected_sha256 and not hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256): - raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match") - - return download_target - - -def has_hf_hub(necessary=False): - if not _has_hf_hub and necessary: - # if no HF Hub module installed, and it is necessary to continue, raise error - raise RuntimeError( - 'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.') - return _has_hf_hub - - -def download_pretrained_from_hf( - model_id: str, - filename: str = 'open_clip_pytorch_model.bin', - revision=None, - cache_dir: Union[str, None] = None, -): - has_hf_hub(True) - cached_file = hf_hub_download(model_id, filename, revision=revision, cache_dir=cache_dir) - return cached_file - - -def download_pretrained( - cfg: Dict, - force_hf_hub: bool = False, - cache_dir: Union[str, None] = None, -): - target = '' - if not cfg: - return target - - download_url = cfg.get('url', '') - download_hf_hub = cfg.get('hf_hub', '') - if download_hf_hub and force_hf_hub: - # use HF hub even if url exists - download_url = '' - - if download_url: - target = download_pretrained_from_url(download_url, cache_dir=cache_dir) - elif download_hf_hub: - has_hf_hub(True) - # we assume the hf_hub entries in pretrained config combine model_id + filename in - # 'org/model_name/filename.pt' form. To specify just the model id w/o filename and - # use 'open_clip_pytorch_model.bin' default, there must be a trailing slash 'org/model_name/'. - model_id, filename = os.path.split(download_hf_hub) - if filename: - target = download_pretrained_from_hf(model_id, filename=filename, cache_dir=cache_dir) - else: - target = download_pretrained_from_hf(model_id, cache_dir=cache_dir) - - return target diff --git a/spaces/LanguageBind/LanguageBind/v_cls/zeroshot_cls.py b/spaces/LanguageBind/LanguageBind/v_cls/zeroshot_cls.py deleted file mode 100644 index 9242903df12d154a263d057d725913687c6684d5..0000000000000000000000000000000000000000 --- a/spaces/LanguageBind/LanguageBind/v_cls/zeroshot_cls.py +++ /dev/null @@ -1,97 +0,0 @@ -import json -import logging -import os - -import numpy as np -import torch -from scipy.special import softmax -from training.distributed import is_master -from .zero_shot import zero_shot_eval - - -def compute_video(lst): - i, video_id, data, label = lst - feat = [x for x in data] - feat = np.mean(feat, axis=0) - pred = np.argmax(feat) - top1 = (int(pred) == int(label)) * 1.0 - top5 = (int(label) in np.argsort(-feat)[:5]) * 1.0 - return [pred, top1, top5, int(label)] - -def merge(eval_path, num_tasks, method='prob'): - assert method in ['prob', 'score'] - dict_feats = {} - dict_label = {} - dict_pos = {} - # logging.info("Reading individual output files") - - for x in range(num_tasks): - file = os.path.join(eval_path, str(x) + '.txt') - lines = open(file, 'r').readlines()[1:] - for line in lines: - line = line.strip() - name = line.split('[')[0] - label = line.split(']')[1].split(' ')[1] - chunk_nb = line.split(']')[1].split(' ')[2] - split_nb = line.split(']')[1].split(' ')[3] - data = np.fromstring( - line.split('[')[1].split(']')[0], dtype=np.float, sep=',') - if name not in dict_feats: - dict_feats[name] = [] - dict_label[name] = 0 - dict_pos[name] = [] - if chunk_nb + split_nb in dict_pos[name]: - continue - if method == 'prob': - dict_feats[name].append(softmax(data)) - else: - dict_feats[name].append(data) - dict_pos[name].append(chunk_nb + split_nb) - dict_label[name] = label - # logging.info("Computing final results") - - input_lst = [] - # logging.info(f"{len(dict_feats)}") - for i, item in enumerate(dict_feats): - input_lst.append([i, item, dict_feats[item], dict_label[item]]) - from multiprocessing import Pool - p = Pool(64) - ans = p.map(compute_video, input_lst) - top1 = [x[1] for x in ans] - top5 = [x[2] for x in ans] - # pred = [x[0] for x in ans] - label = [x[3] for x in ans] - final_top1, final_top5 = np.mean(top1), np.mean(top5) - - return final_top1 * 100, final_top5 * 100 - - -def evaluate_v_cls(model, data, epoch, args, tb_writer=None): - model.eval() - dataloader = data['v_cls'] - args.output_dir = os.path.join(args.log_base_path, 'video_cls') - os.makedirs(args.output_dir, exist_ok=True) - if args.val_frequency and ((epoch % args.val_frequency) == 0 or epoch == args.epochs): - if is_master(args): - logging.info(f"Eval Epoch: {epoch}, accuracy of zero-shot classification under Kinetics-400 test videos") - zero_shot_eval(model, dataloader, epoch, args) - - torch.distributed.barrier() - - if is_master(args): - # logging.info("Start merging results...") - final_top1, final_top5 = merge(args.output_dir, args.world_size) - logging.info(f"\t>>> Acc@1: {final_top1:.2f}%, Acc@5: {final_top5:.2f}%") - metrics = {'top-1': final_top1, 'top-5': final_top5} - - if args.save_logs: - for name, val in metrics.items(): - if tb_writer is not None: - tb_writer.add_scalar(f"val/v_cls/{name}", val, epoch) - - with open(os.path.join(args.output_dir, "results.jsonl"), "a+") as f: - f.write(json.dumps(metrics)) - f.write("\n") - - return metrics - diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/Makefile b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/Makefile deleted file mode 100644 index e1ce27677fe21c85ac4f81799a739a19050e47af..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/Makefile +++ /dev/null @@ -1,63 +0,0 @@ -.PHONY: -.ONESHELL: - -help: ## Show this help and exit - @grep -hE '^[A-Za-z0-9_ \-]*?:.*##.*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' - -install: ## Install dependencies (Do everytime you start up a paperspace machine) - apt-get -y install build-essential python3-dev ffmpeg - pip install --upgrade setuptools wheel - pip install --upgrade pip - pip install faiss-gpu fairseq gradio ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.1 - pip install -r assets/requirements/requirements.txt - pip install --upgrade lxml - apt-get update - apt -y install -qq aria2 - -basev1: ## Download version 1 pre-trained models (Do only once after cloning the fork) - mkdir -p pretrained uvr5_weights - git pull - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d pretrained -o D32k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d pretrained -o D40k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d pretrained -o D48k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d pretrained -o G32k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d pretrained -o G40k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d pretrained -o G48k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth -d pretrained -o f0D32k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth -d pretrained -o f0D40k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth -d pretrained -o f0D48k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth -d pretrained -o f0G32k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth -d pretrained -o f0G40k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth -d pretrained -o f0G48k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d uvr5_weights -o HP2-人声vocals+非人声instrumentals.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d uvr5_weights -o HP5-主旋律人声vocals+其他instrumentals.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d ./ -o hubert_base.pt - -basev2: ## Download version 2 pre-trained models (Do only once after cloning the fork) - mkdir -p pretrained_v2 uvr5_weights - git pull - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D32k.pth -d pretrained_v2 -o D32k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d pretrained_v2 -o D40k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D48k.pth -d pretrained_v2 -o D48k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G32k.pth -d pretrained_v2 -o G32k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d pretrained_v2 -o G40k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G48k.pth -d pretrained_v2 -o G48k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D32k.pth -d pretrained_v2 -o f0D32k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth -d pretrained_v2 -o f0D40k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D48k.pth -d pretrained_v2 -o f0D48k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G32k.pth -d pretrained_v2 -o f0G32k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth -d pretrained_v2 -o f0G40k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G48k.pth -d pretrained_v2 -o f0G48k.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d uvr5_weights -o HP2-人声vocals+非人声instrumentals.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d uvr5_weights -o HP5-主旋律人声vocals+其他instrumentals.pth - aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d ./ -o hubert_base.pt - -run-ui: ## Run the python GUI - python infer-web.py --paperspace --pycmd python - -run-cli: ## Run the python CLI - python infer-web.py --pycmd python --is_cli - -tensorboard: ## Start the tensorboard (Run on separate terminal) - echo https://tensorboard-$$(hostname).clg07azjl.paperspacegradient.com - tensorboard --logdir logs --bind_all \ No newline at end of file diff --git a/spaces/Lianjd/stock_dashboard/backtrader/stores/vcstore.py b/spaces/Lianjd/stock_dashboard/backtrader/stores/vcstore.py deleted file mode 100644 index 7de96ca971303d883f3d14bba98f71c31a01cddb..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/stores/vcstore.py +++ /dev/null @@ -1,545 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - - -import collections -from datetime import date, datetime, time, timedelta -import os.path -import threading -import time as _timemod - -import ctypes - -from backtrader import TimeFrame, Position -from backtrader.feed import DataBase -from backtrader.metabase import MetaParams -from backtrader.utils.py3 import (MAXINT, range, queue, string_types, - with_metaclass) -from backtrader.utils import AutoDict - - -class _SymInfo(object): - # Replica of the SymbolInfo COM object to pass it over thread boundaries - _fields = ['Type', 'Description', 'Decimals', 'TimeOffset', - 'PointValue', 'MinMovement'] - - def __init__(self, syminfo): - for f in self._fields: - setattr(self, f, getattr(syminfo, f)) - -# This type is used inside 'PumpEvents', but if we create the type -# afresh each time 'PumpEvents' is called we end up creating cyclic -# garbage for each call. So we define it here instead. -_handles_type = ctypes.c_void_p * 1 - - -def PumpEvents(timeout=-1, hevt=None, cb=None): - """This following code waits for 'timeout' seconds in the way - required for COM, internally doing the correct things depending - on the COM appartment of the current thread. It is possible to - terminate the message loop by pressing CTRL+C, which will raise - a KeyboardInterrupt. - """ - # XXX Should there be a way to pass additional event handles which - # can terminate this function? - - # XXX XXX XXX - # - # It may be that I misunderstood the CoWaitForMultipleHandles - # function. Is a message loop required in a STA? Seems so... - # - # MSDN says: - # - # If the caller resides in a single-thread apartment, - # CoWaitForMultipleHandles enters the COM modal loop, and the - # thread's message loop will continue to dispatch messages using - # the thread's message filter. If no message filter is registered - # for the thread, the default COM message processing is used. - # - # If the calling thread resides in a multithread apartment (MTA), - # CoWaitForMultipleHandles calls the Win32 function - # MsgWaitForMultipleObjects. - - # Timeout expected as float in seconds - *1000 to miliseconds - # timeout = -1 -> INFINITE 0xFFFFFFFF; - # It can also be a callable which should return an amount in seconds - - if hevt is None: - hevt = ctypes.windll.kernel32.CreateEventA(None, True, False, None) - - handles = _handles_type(hevt) - RPC_S_CALLPENDING = -2147417835 - - # @ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_uint) - def HandlerRoutine(dwCtrlType): - if dwCtrlType == 0: # CTRL+C - ctypes.windll.kernel32.SetEvent(hevt) - return 1 - return 0 - - HandlerRoutine = ( - ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_uint)(HandlerRoutine) - ) - - ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 1) - while True: - try: - tmout = timeout() # check if it's a callable - except TypeError: - tmout = timeout # it seems to be a number - - if tmout > 0: - tmout *= 1000 - tmout = int(tmout) - - try: - res = ctypes.oledll.ole32.CoWaitForMultipleHandles( - 0, # COWAIT_FLAGS - int(tmout), # dwtimeout - len(handles), # number of handles in handles - handles, # handles array - # pointer to indicate which handle was signaled - ctypes.byref(ctypes.c_ulong()) - ) - - except WindowsError as details: - if details.args[0] == RPC_S_CALLPENDING: # timeout expired - if cb is not None: - cb() - - continue - - else: - ctypes.windll.kernel32.CloseHandle(hevt) - ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 0) - raise # something else happened - else: - ctypes.windll.kernel32.CloseHandle(hevt) - ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 0) - raise KeyboardInterrupt - - # finally: - # if False: - # ctypes.windll.kernel32.CloseHandle(hevt) - # ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 0) - # break - - -class RTEventSink(object): - def __init__(self, store): - self.store = store - self.vcrtmod = store.vcrtmod - self.lastconn = None - - def OnNewTicks(self, ArrayTicks): - pass - - def OnServerShutDown(self): - self.store._vcrt_connection(self.store._RT_SHUTDOWN) - - def OnInternalEvent(self, p1, p2, p3): - if p1 != 1: # Apparently "Connection Event" - return - - if p2 == self.lastconn: - return # do not notify twice - - self.lastconn = p2 # keep new notification code - - # p2 should be 0 (disconn), 1 (conn) - self.store._vcrt_connection(self.store._RT_BASEMSG - p2) - - -class MetaSingleton(MetaParams): - '''Metaclass to make a metaclassed class a singleton''' - def __init__(cls, name, bases, dct): - super(MetaSingleton, cls).__init__(name, bases, dct) - cls._singleton = None - - def __call__(cls, *args, **kwargs): - if cls._singleton is None: - cls._singleton = ( - super(MetaSingleton, cls).__call__(*args, **kwargs)) - - return cls._singleton - - -class VCStore(with_metaclass(MetaSingleton, object)): - '''Singleton class wrapping an ibpy ibConnection instance. - - The parameters can also be specified in the classes which use this store, - like ``VCData`` and ``VCBroker`` - - ''' - BrokerCls = None # broker class will autoregister - DataCls = None # data class will auto register - - # 32 bit max unsigned int for openinterest correction - MAXUINT = 0xffffffff // 2 - - # to remove at least 1 sec or else there seem to be internal conv problems - MAXDATE1 = datetime.max - timedelta(days=1, seconds=1) - MAXDATE2 = datetime.max - timedelta(seconds=1) - - _RT_SHUTDOWN = -0xffff - _RT_BASEMSG = -0xfff0 - _RT_DISCONNECTED = -0xfff0 - _RT_CONNECTED = -0xfff1 - _RT_LIVE = -0xfff2 - _RT_DELAYED = -0xfff3 - _RT_TYPELIB = -0xffe0 - _RT_TYPEOBJ = -0xffe1 - _RT_COMTYPES = -0xffe2 - - @classmethod - def getdata(cls, *args, **kwargs): - '''Returns ``DataCls`` with args, kwargs''' - return cls.DataCls(*args, **kwargs) - - @classmethod - def getbroker(cls, *args, **kwargs): - '''Returns broker with *args, **kwargs from registered ``BrokerCls``''' - return cls.BrokerCls(*args, **kwargs) - - # DLLs to parse if found for TypeLibs - VC64_DLLS = ('VCDataSource64.dll', 'VCRealTimeLib64.dll', - 'COMTraderInterfaces64.dll',) - - VC_DLLS = ('VCDataSource.dll', 'VCRealTimeLib.dll', - 'COMTraderInterfaces.dll',) - - # Well known CLSDI - VC_TLIBS = ( - ['{EB2A77DC-A317-4160-8833-DECF16275A05}', 1, 0], # vcdatasource64 - ['{86F1DB04-2591-4866-A361-BB053D77FA18}', 1, 0], # vcrealtime64 - ['{20F8873C-35BE-4DB4-8C2A-0A8D40F8AEC3}', 1, 0], # raderinterface64 - ) - - VC_KEYNAME = r'SOFTWARE\VCG\Visual Chart 6\Config' - VC_KEYVAL = 'Directory' - VC_BINPATH = 'bin' - - def find_vchart(self): - # Tries to locate VisualChart in the registry to get the installation - # directory - # If not found returns well-known typelibs clsid - # Else it will scan the directory to locate the 64/32 bit dlls and - # return the paths - import _winreg # keep import local to avoid breaking test cases - - vcdir = None - - # Search for Directory in the usual root keys - for rkey in (_winreg.HKEY_CURRENT_USER, _winreg.HKEY_LOCAL_MACHINE,): - try: - vckey = _winreg.OpenKey(rkey, self.VC_KEYNAME) - except WindowsError as e: - continue - - # Try to get the key value - try: - vcdir, _ = _winreg.QueryValueEx(vckey, self.VC_KEYVAL) - except WindowsError as e: - continue - else: - break # found vcdir - - if vcdir is None: - return self.VC_TLIBS # no dir found, last resort - - # DLLs are in the bin directory - vcbin = os.path.join(vcdir, self.VC_BINPATH) - - # Search for the 3 libraries (64/32 bits) in the found dir - for dlls in (self.VC64_DLLS, self.VC_DLLS,): - dfound = [] - for dll in dlls: - fpath = os.path.join(vcbin, dll) - if not os.path.isfile(fpath): - break - dfound.append(fpath) - - if len(dfound) == len(dlls): - return dfound - - # not all dlls were found, last resort - return self.VC_TLIBS - - def _load_comtypes(self): - # Keep comtypes imports local to avoid breaking testcases - try: - import comtypes - self.comtypes = comtypes - - from comtypes.client import CreateObject, GetEvents, GetModule - self.CreateObject = CreateObject - self.GetEvents = GetEvents - self.GetModule = GetModule - except ImportError: - return False - - return True # notifiy comtypes was loaded - - def __init__(self): - self._connected = False # modules/objects created - - self.notifs = collections.deque() # hold notifications to deliver - - self.t_vcconn = None # control connection status - - # hold deques to market data symbols - self._dqs = collections.deque() - self._qdatas = dict() - self._tftable = dict() - - if not self._load_comtypes(): - txt = 'Failed to import comtypes' - msg = self._RT_COMTYPES, txt - self.put_notification(msg, *msg) - return - - vctypelibs = self.find_vchart() - # Try to load the modules - try: - self.vcdsmod = self.GetModule(vctypelibs[0]) - self.vcrtmod = self.GetModule(vctypelibs[1]) - self.vcctmod = self.GetModule(vctypelibs[2]) - except WindowsError as e: - self.vcdsmod = None - self.vcrtmod = None - self.vcctmod = None - txt = 'Failed to Load COM TypeLib Modules {}'.format(e) - msg = self._RT_TYPELIB, txt - self.put_notification(msg, *msg) - return - - # Try to load the main objects - try: - self.vcds = self.CreateObject(self.vcdsmod.DataSourceManager) - # self.vcrt = self.CreateObject(self.vcrtmod.RealTime) - self.vcct = self.CreateObject(self.vcctmod.Trader) - except WindowsError as e: - txt = ('Failed to Load COM TypeLib Objects but the COM TypeLibs ' - 'have been loaded. If VisualChart has been recently ' - 'installed/updated, restarting Windows may be necessary ' - 'to register the Objects: {}'.format(e)) - msg = self._RT_TYPELIB, txt - self.put_notification(msg, *msg) - self.vcds = None - self.vcrt = None - self.vcct = None - return - - self._connected = True - - # Build a table of VCRT Field_XX mappings for debugging purposes - self.vcrtfields = dict() - for name in dir(self.vcrtmod): - if name.startswith('Field'): - self.vcrtfields[getattr(self.vcrtmod, name)] = name - - # Modules and objects can be created - self._tftable = { - TimeFrame.Ticks: (self.vcdsmod.CT_Ticks, 1), - TimeFrame.MicroSeconds: (self.vcdsmod.CT_Ticks, 1), # To Resample - TimeFrame.Seconds: (self.vcdsmod.CT_Ticks, 1), # To Resample - TimeFrame.Minutes: (self.vcdsmod.CT_Minutes, 1), - TimeFrame.Days: (self.vcdsmod.CT_Days, 1), - TimeFrame.Weeks: (self.vcdsmod.CT_Weeks, 1), - TimeFrame.Months: (self.vcdsmod.CT_Months, 1), - TimeFrame.Years: (self.vcdsmod.CT_Months, 12), - } - - def put_notification(self, msg, *args, **kwargs): - self.notifs.append((msg, args, kwargs)) - - def get_notifications(self): - '''Return the pending "store" notifications''' - self.notifs.append(None) # Mark current end of notifs - return [x for x in iter(self.notifs.popleft, None)] # popleft til None - - def start(self, data=None, broker=None): - if not self._connected: - return - - if self.t_vcconn is None: - # Kickstart connection thread check - self.t_vcconn = t = threading.Thread(target=self._start_vcrt) - t.daemon = True # Do not stop a general exit - t.start() - - if broker is not None: - t = threading.Thread(target=self._t_broker, args=(broker,)) - t.daemon = True - t.start() - - def stop(self): - pass # nothing to do - - def connected(self): - return self._connected - - def _start_vcrt(self): - # Use VCRealTime to monitor the connection status - self.comtypes.CoInitialize() # running in another thread - vcrt = self.CreateObject(self.vcrtmod.RealTime) - sink = RTEventSink(self) - conn = self.GetEvents(vcrt, sink) - PumpEvents() - self.comtypes.CoUninitialize() - - def _vcrt_connection(self, status): - if status == -0xffff: - txt = 'VisualChart shutting down', - # p2: 0 -> Disconnected / p2: 1 -> Reconnected - elif status == -0xfff0: - txt = 'VisualChart is Disconnected' - elif status == -0xfff1: - txt = 'VisualChart is Connected' - else: - txt = 'VisualChart unknown connection status ' - - msg = txt, status - self.put_notification(msg, *msg) - - for q in self._dqs: - q.put(status) - - def _tf2ct(self, timeframe, compression): - # Translates timeframes to known compression types in VisualChart - timeframe, extracomp = self._tftable[timeframe] - return timeframe, compression * extracomp - - def _ticking(self, timeframe): - # Translates timeframes to known compression types in VisualChart - vctimeframe, _ = self._tftable[timeframe] - return vctimeframe == self.vcdsmod.CT_Ticks - - def _getq(self, data): - q = queue.Queue() - self._dqs.append(q) - self._qdatas[q] = data - return q - - def _delq(self, q): - self._dqs.remove(q) - self._qdatas.pop(q) - - def _rtdata(self, data, symbol): - kwargs = dict(data=data, symbol=symbol) - t = threading.Thread(target=self._t_rtdata, kwargs=kwargs) - t.daemon = True - t.start() - - # Broker functions - def _t_rtdata(self, data, symbol): - self.comtypes.CoInitialize() # running in another thread - vcrt = self.CreateObject(self.vcrtmod.RealTime) - conn = self.GetEvents(vcrt, data) - data._vcrt = vcrt - vcrt.RequestSymbolFeed(symbol, False) # no limits - PumpEvents() - del conn # ensure events go away - self.comtypes.CoUninitialize() - - def _symboldata(self, symbol): - - # Assumption -> we are connected and the symbol has been found - self.vcds.ActiveEvents = 0 - # self.vcds.EventsType = self.vcdsmod.EF_Always - - serie = self.vcds.NewDataSerie(symbol, - self.vcdsmod.CT_Days, 1, - self.MAXDATE1, self.MAXDATE2) - - syminfo = _SymInfo(serie.GetSymbolInfo()) - self.vcds.DeleteDataSource(serie) - return syminfo - - def _canceldirectdata(self, q): - self._delq(q) - - def _directdata(self, data, - symbol, timeframe, compression, d1, d2=None, - historical=False): - - # Assume the data has checked the existence of the symbol - timeframe, compression = self._tf2ct(timeframe, compression) - kwargs = locals().copy() # make a copy of the args - kwargs.pop('self') - kwargs['q'] = q = self._getq(data) - - t = threading.Thread(target=self._t_directdata, kwargs=kwargs) - t.daemon = True - t.start() - - # use the queue to synchronize until symbolinfo has been gotten - return q # tell the caller where to expect the hist data - - def _t_directdata(self, data, - symbol, timeframe, compression, d1, d2, q, - historical): - - self.comtypes.CoInitialize() # start com threading - vcds = self.CreateObject(self.vcdsmod.DataSourceManager) - - historical = historical or d2 is not None - if not historical: - vcds.ActiveEvents = 1 - vcds.EventsType = self.vcdsmod.EF_Always - else: - vcds.ActiveEvents = 0 - - if d2 is not None: - serie = vcds.NewDataSerie(symbol, timeframe, compression, d1, d2) - else: - serie = vcds.NewDataSerie(symbol, timeframe, compression, d1) - - data._setserie(serie) - - # processing of bars can continue - data.OnNewDataSerieBar(serie, forcepush=historical) - if historical: # push the last bar - q.put(None) # Signal end of transmission - dsconn = None - else: - dsconn = self.GetEvents(vcds, data) # finally connect the events - pass - - # pump events in this thread - call ping - PumpEvents(timeout=data._getpingtmout, cb=data.ping) - if dsconn is not None: - del dsconn # Docs recommend deleting the connection - - # Delete the series before coming out of the thread - vcds.DeleteDataSource(serie) - self.comtypes.CoUninitialize() # Terminate com threading - - # Broker functions - def _t_broker(self, broker): - self.comtypes.CoInitialize() # running in another thread - trader = self.CreateObject(self.vcctmod.Trader) - conn = self.GetEvents(trader, broker(trader)) - PumpEvents() - del conn # ensure events go away - self.comtypes.CoUninitialize() diff --git a/spaces/LittleYuan/My-Real-Bot/app.py b/spaces/LittleYuan/My-Real-Bot/app.py deleted file mode 100644 index 97c59221c429e335c3a2e3413c11cc155d5b6122..0000000000000000000000000000000000000000 --- a/spaces/LittleYuan/My-Real-Bot/app.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -os.system("pip install gradio==2.9b23") -import random -import gradio as gr -from PIL import Image -import torch -from random import randint -import sys -from subprocess import call -import psutil - - - - -torch.hub.download_url_to_file('http://people.csail.mit.edu/billf/project%20pages/sresCode/Markov%20Random%20Fields%20for%20Super-Resolution_files/100075_lowres.jpg', 'bear.jpg') - - -def run_cmd(command): - try: - print(command) - call(command, shell=True) - except KeyboardInterrupt: - print("Process interrupted") - sys.exit(1) -run_cmd("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P .") -run_cmd("pip install basicsr") -run_cmd("pip freeze") - -os.system("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P .") - - -def inference(img,mode): - _id = randint(1, 10000) - INPUT_DIR = "/tmp/input_image" + str(_id) + "/" - OUTPUT_DIR = "/tmp/output_image" + str(_id) + "/" - run_cmd("rm -rf " + INPUT_DIR) - run_cmd("rm -rf " + OUTPUT_DIR) - run_cmd("mkdir " + INPUT_DIR) - run_cmd("mkdir " + OUTPUT_DIR) - basewidth = 256 - wpercent = (basewidth/float(img.size[0])) - hsize = int((float(img.size[1])*float(wpercent))) - img = img.resize((basewidth,hsize), Image.ANTIALIAS) - img.save(INPUT_DIR + "1.jpg", "JPEG") - if mode == "base": - run_cmd("python inference_realesrgan.py -n RealESRGAN_x4plus -i "+ INPUT_DIR + " -o " + OUTPUT_DIR) - else: - os.system("python inference_realesrgan.py -n RealESRGAN_x4plus_anime_6B -i "+ INPUT_DIR + " -o " + OUTPUT_DIR) - return os.path.join(OUTPUT_DIR, "1_out.jpg") - - - - -title = "Real-ESRGAN" -description = "Gradio demo for Real-ESRGAN. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please click submit only once" -article = "

    Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data | Github Repo

    " - -gr.Interface( - inference, - [gr.inputs.Image(type="pil", label="Input"),gr.inputs.Radio(["base","anime"], type="value", default="base", label="model type")], - gr.outputs.Image(type="file", label="Output"), - title=title, - description=description, - article=article, - examples=[ - ['bear.jpg','base'], - ['anime.png','anime'] - ]).launch() \ No newline at end of file diff --git a/spaces/LittleYuan/My-Real-Bot/realesrgan/archs/__init__.py b/spaces/LittleYuan/My-Real-Bot/realesrgan/archs/__init__.py deleted file mode 100644 index f3fbbf3b78e33b61fd4c33a564a9a617010d90de..0000000000000000000000000000000000000000 --- a/spaces/LittleYuan/My-Real-Bot/realesrgan/archs/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -import importlib -from basicsr.utils import scandir -from os import path as osp - -# automatically scan and import arch modules for registry -# scan all the files that end with '_arch.py' under the archs folder -arch_folder = osp.dirname(osp.abspath(__file__)) -arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')] -# import all the arch modules -_arch_modules = [importlib.import_module(f'realesrgan.archs.{file_name}') for file_name in arch_filenames] diff --git a/spaces/Liu-LAB/GPT-academic/crazy_functions/crazy_functions_test.py b/spaces/Liu-LAB/GPT-academic/crazy_functions/crazy_functions_test.py deleted file mode 100644 index 0c623b8e027858b2579a021769bb304e34c4e373..0000000000000000000000000000000000000000 --- a/spaces/Liu-LAB/GPT-academic/crazy_functions/crazy_functions_test.py +++ /dev/null @@ -1,231 +0,0 @@ -""" -这是什么? - 这个文件用于函数插件的单元测试 - 运行方法 python crazy_functions/crazy_functions_test.py -""" - -# ============================================================================================================================== - -def validate_path(): - import os, sys - dir_name = os.path.dirname(__file__) - root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') - os.chdir(root_dir_assume) - sys.path.append(root_dir_assume) -validate_path() # validate path so you can run from base directory - -# ============================================================================================================================== - -from colorful import * -from toolbox import get_conf, ChatBotWithCookies -import contextlib -import os -import sys -from functools import wraps -proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \ - get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY') - -llm_kwargs = { - 'api_key': API_KEY, - 'llm_model': LLM_MODEL, - 'top_p':1.0, - 'max_length': None, - 'temperature':1.0, -} -plugin_kwargs = { } -chatbot = ChatBotWithCookies(llm_kwargs) -history = [] -system_prompt = "Serve me as a writing and programming assistant." -web_port = 1024 - -# ============================================================================================================================== - -def silence_stdout(func): - @wraps(func) - def wrapper(*args, **kwargs): - _original_stdout = sys.stdout - sys.stdout = open(os.devnull, 'w') - for q in func(*args, **kwargs): - sys.stdout = _original_stdout - yield q - sys.stdout = open(os.devnull, 'w') - sys.stdout.close() - sys.stdout = _original_stdout - return wrapper - -class CLI_Printer(): - def __init__(self) -> None: - self.pre_buf = "" - - def print(self, buf): - bufp = "" - for index, chat in enumerate(buf): - a, b = chat - bufp += sprint亮靛('[Me]:' + a) + '\n' - bufp += '[GPT]:' + b - if index < len(buf)-1: - bufp += '\n' - - if self.pre_buf!="" and bufp.startswith(self.pre_buf): - print(bufp[len(self.pre_buf):], end='') - else: - print('\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'+bufp, end='') - self.pre_buf = bufp - return - -cli_printer = CLI_Printer() -# ============================================================================================================================== -def test_解析一个Python项目(): - from crazy_functions.解析项目源代码 import 解析一个Python项目 - txt = "crazy_functions/test_project/python/dqn" - for cookies, cb, hist, msg in 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_解析一个Cpp项目(): - from crazy_functions.解析项目源代码 import 解析一个C项目 - txt = "crazy_functions/test_project/cpp/cppipc" - for cookies, cb, hist, msg in 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_Latex英文润色(): - from crazy_functions.Latex全文润色 import Latex英文润色 - txt = "crazy_functions/test_project/latex/attention" - for cookies, cb, hist, msg in Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_Markdown中译英(): - from crazy_functions.批量Markdown翻译 import Markdown中译英 - txt = "README.md" - for cookies, cb, hist, msg in Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_批量翻译PDF文档(): - from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档 - txt = "crazy_functions/test_project/pdf_and_word" - for cookies, cb, hist, msg in 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_谷歌检索小助手(): - from crazy_functions.谷歌检索小助手 import 谷歌检索小助手 - txt = "https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=auto+reinforcement+learning&btnG=" - for cookies, cb, hist, msg in 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_总结word文档(): - from crazy_functions.总结word文档 import 总结word文档 - txt = "crazy_functions/test_project/pdf_and_word" - for cookies, cb, hist, msg in 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_下载arxiv论文并翻译摘要(): - from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要 - txt = "1812.10695" - for cookies, cb, hist, msg in 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_联网回答问题(): - from crazy_functions.联网的ChatGPT import 连接网络回答问题 - # txt = "谁是应急食品?" - # >> '根据以上搜索结果可以得知,应急食品是“原神”游戏中的角色派蒙的外号。' - # txt = "道路千万条,安全第一条。后面两句是?" - # >> '行车不规范,亲人两行泪。' - # txt = "You should have gone for the head. What does that mean?" - # >> The phrase "You should have gone for the head" is a quote from the Marvel movies, Avengers: Infinity War and Avengers: Endgame. It was spoken by the character Thanos in Infinity War and by Thor in Endgame. - txt = "AutoGPT是什么?" - for cookies, cb, hist, msg in 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print("当前问答:", cb[-1][-1].replace("\n"," ")) - for i, it in enumerate(cb): print亮蓝(it[0]); print亮黄(it[1]) - -def test_解析ipynb文件(): - from crazy_functions.解析JupyterNotebook import 解析ipynb文件 - txt = "crazy_functions/test_samples" - for cookies, cb, hist, msg in 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - - -def test_数学动画生成manim(): - from crazy_functions.数学动画生成manim import 动画生成 - txt = "A ball split into 2, and then split into 4, and finally split into 8." - for cookies, cb, hist, msg in 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - - - -def test_Markdown多语言(): - from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言 - txt = "README.md" - history = [] - for lang in ["English", "French", "Japanese", "Korean", "Russian", "Italian", "German", "Portuguese", "Arabic"]: - plugin_kwargs = {"advanced_arg": lang} - for cookies, cb, hist, msg in Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - print(cb) - -def test_Langchain知识库(): - from crazy_functions.Langchain知识库 import 知识库问答 - txt = "./" - chatbot = ChatBotWithCookies(llm_kwargs) - for cookies, cb, hist, msg in silence_stdout(知识库问答)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - cli_printer.print(cb) # print(cb) - - chatbot = ChatBotWithCookies(cookies) - from crazy_functions.Langchain知识库 import 读取知识库作答 - txt = "What is the installation method?" - for cookies, cb, hist, msg in silence_stdout(读取知识库作答)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - cli_printer.print(cb) # print(cb) - -def test_Langchain知识库读取(): - from crazy_functions.Langchain知识库 import 读取知识库作答 - txt = "远程云服务器部署?" - for cookies, cb, hist, msg in silence_stdout(读取知识库作答)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - cli_printer.print(cb) # print(cb) - -def test_Latex(): - from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比, Latex翻译中文并重新编译PDF - - # txt = r"https://arxiv.org/abs/1706.03762" - # txt = r"https://arxiv.org/abs/1902.03185" - # txt = r"https://arxiv.org/abs/2305.18290" - # txt = r"https://arxiv.org/abs/2305.17608" - # txt = r"https://arxiv.org/abs/2211.16068" # ACE - # txt = r"C:\Users\x\arxiv_cache\2211.16068\workfolder" # ACE - # txt = r"https://arxiv.org/abs/2002.09253" - # txt = r"https://arxiv.org/abs/2306.07831" - # txt = r"https://arxiv.org/abs/2212.10156" - # txt = r"https://arxiv.org/abs/2211.11559" - # txt = r"https://arxiv.org/abs/2303.08774" - txt = r"https://arxiv.org/abs/2303.12712" - # txt = r"C:\Users\fuqingxu\arxiv_cache\2303.12712\workfolder" - - - for cookies, cb, hist, msg in (Latex翻译中文并重新编译PDF)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - cli_printer.print(cb) # print(cb) - - - - # txt = "2302.02948.tar" - # print(txt) - # main_tex, work_folder = Latex预处理(txt) - # print('main tex:', main_tex) - # res = 编译Latex(main_tex, work_folder) - # # for cookies, cb, hist, msg in silence_stdout(编译Latex)(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # cli_printer.print(cb) # print(cb) - - - -# test_解析一个Python项目() -# test_Latex英文润色() -# test_Markdown中译英() -# test_批量翻译PDF文档() -# test_谷歌检索小助手() -# test_总结word文档() -# test_下载arxiv论文并翻译摘要() -# test_解析一个Cpp项目() -# test_联网回答问题() -# test_解析ipynb文件() -# test_数学动画生成manim() -# test_Langchain知识库() -# test_Langchain知识库读取() -if __name__ == "__main__": - test_Latex() - input("程序完成,回车退出。") - print("退出。") \ No newline at end of file diff --git a/spaces/LuxOAI/ChatGpt-Web/app/config/build.ts b/spaces/LuxOAI/ChatGpt-Web/app/config/build.ts deleted file mode 100644 index 79ed5d3e89496bd52f807d7657a57c2357d484a7..0000000000000000000000000000000000000000 --- a/spaces/LuxOAI/ChatGpt-Web/app/config/build.ts +++ /dev/null @@ -1,24 +0,0 @@ -const COMMIT_ID: string = (() => { - try { - const childProcess = require("child_process"); - return childProcess - .execSync('git log -1 --format="%at000" --date=unix') - .toString() - .trim(); - } catch (e) { - console.error("[Build Config] No git or not from git repo."); - return "unknown"; - } -})(); - -export const getBuildConfig = () => { - if (typeof process === "undefined") { - throw Error( - "[Server Config] you are importing a nodejs-only module outside of nodejs", - ); - } - - return { - commitId: COMMIT_ID, - }; -}; diff --git a/spaces/MattyWhite/ChatGPT-ImageCaptioner2/detic/data/datasets/lvis_v1.py b/spaces/MattyWhite/ChatGPT-ImageCaptioner2/detic/data/datasets/lvis_v1.py deleted file mode 100644 index 4b9b279f17663def1c4913321efbb7490d591e90..0000000000000000000000000000000000000000 --- a/spaces/MattyWhite/ChatGPT-ImageCaptioner2/detic/data/datasets/lvis_v1.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -import os - -from fvcore.common.timer import Timer -from detectron2.structures import BoxMode -from fvcore.common.file_io import PathManager -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.data.datasets.lvis import get_lvis_instances_meta - -logger = logging.getLogger(__name__) - -__all__ = ["custom_load_lvis_json", "custom_register_lvis_instances"] - - -def custom_register_lvis_instances(name, metadata, json_file, image_root): - """ - """ - DatasetCatalog.register(name, lambda: custom_load_lvis_json( - json_file, image_root, name)) - MetadataCatalog.get(name).set( - json_file=json_file, image_root=image_root, - evaluator_type="lvis", **metadata - ) - - -def custom_load_lvis_json(json_file, image_root, dataset_name=None): - ''' - Modifications: - use `file_name` - convert neg_category_ids - add pos_category_ids - ''' - from lvis import LVIS - - json_file = PathManager.get_local_path(json_file) - - timer = Timer() - lvis_api = LVIS(json_file) - if timer.seconds() > 1: - logger.info("Loading {} takes {:.2f} seconds.".format( - json_file, timer.seconds())) - - catid2contid = {x['id']: i for i, x in enumerate( - sorted(lvis_api.dataset['categories'], key=lambda x: x['id']))} - if len(lvis_api.dataset['categories']) == 1203: - for x in lvis_api.dataset['categories']: - assert catid2contid[x['id']] == x['id'] - 1 - img_ids = sorted(lvis_api.imgs.keys()) - imgs = lvis_api.load_imgs(img_ids) - anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids] - - ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] - assert len(set(ann_ids)) == len(ann_ids), \ - "Annotation ids in '{}' are not unique".format(json_file) - - imgs_anns = list(zip(imgs, anns)) - logger.info("Loaded {} images in the LVIS v1 format from {}".format( - len(imgs_anns), json_file)) - - dataset_dicts = [] - - for (img_dict, anno_dict_list) in imgs_anns: - record = {} - if "file_name" in img_dict: - file_name = img_dict["file_name"] - if img_dict["file_name"].startswith("COCO"): - file_name = file_name[-16:] - record["file_name"] = os.path.join(image_root, file_name) - elif 'coco_url' in img_dict: - # e.g., http://images.cocodataset.org/train2017/000000391895.jpg - file_name = img_dict["coco_url"][30:] - record["file_name"] = os.path.join(image_root, file_name) - elif 'tar_index' in img_dict: - record['tar_index'] = img_dict['tar_index'] - - record["height"] = img_dict["height"] - record["width"] = img_dict["width"] - record["not_exhaustive_category_ids"] = img_dict.get( - "not_exhaustive_category_ids", []) - record["neg_category_ids"] = img_dict.get("neg_category_ids", []) - # NOTE: modified by Xingyi: convert to 0-based - record["neg_category_ids"] = [ - catid2contid[x] for x in record["neg_category_ids"]] - if 'pos_category_ids' in img_dict: - record['pos_category_ids'] = [ - catid2contid[x] for x in img_dict.get("pos_category_ids", [])] - if 'captions' in img_dict: - record['captions'] = img_dict['captions'] - if 'caption_features' in img_dict: - record['caption_features'] = img_dict['caption_features'] - image_id = record["image_id"] = img_dict["id"] - - objs = [] - for anno in anno_dict_list: - assert anno["image_id"] == image_id - if anno.get('iscrowd', 0) > 0: - continue - obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS} - obj["category_id"] = catid2contid[anno['category_id']] - if 'segmentation' in anno: - segm = anno["segmentation"] - valid_segm = [poly for poly in segm \ - if len(poly) % 2 == 0 and len(poly) >= 6] - # assert len(segm) == len( - # valid_segm - # ), "Annotation contains an invalid polygon with < 3 points" - if not len(segm) == len(valid_segm): - print('Annotation contains an invalid polygon with < 3 points') - assert len(segm) > 0 - obj["segmentation"] = segm - objs.append(obj) - record["annotations"] = objs - dataset_dicts.append(record) - - return dataset_dicts - -_CUSTOM_SPLITS_LVIS = { - "lvis_v1_train+coco": ("coco/", "lvis/lvis_v1_train+coco_mask.json"), - "lvis_v1_train_norare": ("coco/", "lvis/lvis_v1_train_norare.json"), -} - - -for key, (image_root, json_file) in _CUSTOM_SPLITS_LVIS.items(): - custom_register_lvis_instances( - key, - get_lvis_instances_meta(key), - os.path.join("datasets", json_file) if "://" not in json_file else json_file, - os.path.join("datasets", image_root), - ) - - -def get_lvis_22k_meta(): - from .lvis_22k_categories import CATEGORIES - cat_ids = [k["id"] for k in CATEGORIES] - assert min(cat_ids) == 1 and max(cat_ids) == len( - cat_ids - ), "Category ids are not in [1, #categories], as expected" - # Ensure that the category list is sorted by id - lvis_categories = sorted(CATEGORIES, key=lambda x: x["id"]) - thing_classes = [k["name"] for k in lvis_categories] - meta = {"thing_classes": thing_classes} - return meta - -_CUSTOM_SPLITS_LVIS_22K = { - "lvis_v1_train_22k": ("coco/", "lvis/lvis_v1_train_lvis-22k.json"), -} - -for key, (image_root, json_file) in _CUSTOM_SPLITS_LVIS_22K.items(): - custom_register_lvis_instances( - key, - get_lvis_22k_meta(), - os.path.join("datasets", json_file) if "://" not in json_file else json_file, - os.path.join("datasets", image_root), - ) \ No newline at end of file diff --git a/spaces/MoonQiu/LongerCrafter/README.md b/spaces/MoonQiu/LongerCrafter/README.md deleted file mode 100644 index f2cbb134762fdf945168464da1cb9d24632cdbc1..0000000000000000000000000000000000000000 --- a/spaces/MoonQiu/LongerCrafter/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: LongerCrafter -emoji: 🌖 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 4.1.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/robust_scanner/README.md b/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/robust_scanner/README.md deleted file mode 100644 index bc7403e3a104375973cdbf0c4e6cbf09cf08f1e8..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/robust_scanner/README.md +++ /dev/null @@ -1,62 +0,0 @@ -# RobustScanner - -> [RobustScanner: Dynamically Enhancing Positional Clues for Robust Text Recognition](https://arxiv.org/abs/2007.07542) - - - -## Abstract - -The attention-based encoder-decoder framework has recently achieved impressive results for scene text recognition, and many variants have emerged with improvements in recognition quality. However, it performs poorly on contextless texts (e.g., random character sequences) which is unacceptable in most of real application scenarios. In this paper, we first deeply investigate the decoding process of the decoder. We empirically find that a representative character-level sequence decoder utilizes not only context information but also positional information. Contextual information, which the existing approaches heavily rely on, causes the problem of attention drift. To suppress such side-effect, we propose a novel position enhancement branch, and dynamically fuse its outputs with those of the decoder attention module for scene text recognition. Specifically, it contains a position aware module to enable the encoder to output feature vectors encoding their own spatial positions, and an attention module to estimate glimpses using the positional clue (i.e., the current decoding time step) only. The dynamic fusion is conducted for more robust feature via an element-wise gate mechanism. Theoretically, our proposed method, dubbed \\emph{RobustScanner}, decodes individual characters with dynamic ratio between context and positional clues, and utilizes more positional ones when the decoding sequences with scarce context, and thus is robust and practical. Empirically, it has achieved new state-of-the-art results on popular regular and irregular text recognition benchmarks while without much performance drop on contextless benchmarks, validating its robustness in both contextual and contextless application scenarios. - -
    - -
    - -## Dataset - -### Train Dataset - -| trainset | instance_num | repeat_num | source | -| :--------: | :----------: | :--------: | :------------------------: | -| icdar_2011 | 3567 | 20 | real | -| icdar_2013 | 848 | 20 | real | -| icdar2015 | 4468 | 20 | real | -| coco_text | 42142 | 20 | real | -| IIIT5K | 2000 | 20 | real | -| SynthText | 2400000 | 1 | synth | -| SynthAdd | 1216889 | 1 | synth, 1.6m in [\[1\]](#1) | -| Syn90k | 2400000 | 1 | synth | - -### Test Dataset - -| testset | instance_num | type | -| :-----: | :----------: | :---------------------------: | -| IIIT5K | 3000 | regular | -| SVT | 647 | regular | -| IC13 | 1015 | regular | -| IC15 | 2077 | irregular | -| SVTP | 645 | irregular, 639 in [\[1\]](#1) | -| CT80 | 288 | irregular | - -## Results and Models - -| Methods | GPUs | | Regular Text | | | | Irregular Text | | download | -| :------------------------------------------------------------------: | :--: | :----: | :----------: | :-------: | :-: | :-------: | :------------: | :----: | :-------------------------------------------------------------------: | -| | | IIIT5K | SVT | IC13-1015 | | IC15-2077 | SVTP | CT80 | | -| [RobustScanner](/configs/textrecog/robust_scanner/robustscanner_resnet31_5e_st-sub_mj-sub_sa_real.py) | 4 | 0.9510 | 0.9011 | 0.9320 | | 0.7578 | 0.8078 | 0.8750 | [model](https://download.openmmlab.com/mmocr/textrecog/robust_scanner/robustscanner_resnet31_5e_st-sub_mj-sub_sa_real/robustscanner_resnet31_5e_st-sub_mj-sub_sa_real_20220915_152447-7fc35929.pth) \| [log](https://download.openmmlab.com/mmocr/textrecog/robust_scanner/robustscanner_resnet31_5e_st-sub_mj-sub_sa_real/20220915_152447.log) | -| [RobustScanner-TTA](/configs/textrecog/robust_scanner/robustscanner_resnet31_5e_st-sub_mj-sub_sa_real.py) | 4 | 0.9487 | 0.9011 | 0.9261 | | 0.7805 | 0.8124 | 0.8819 | | - -## References - -\[1\] Li, Hui and Wang, Peng and Shen, Chunhua and Zhang, Guyu. Show, attend and read: A simple and strong baseline for irregular text recognition. In AAAI 2019. - -## Citation - -```bibtex -@inproceedings{yue2020robustscanner, - title={RobustScanner: Dynamically Enhancing Positional Clues for Robust Text Recognition}, - author={Yue, Xiaoyu and Kuang, Zhanghui and Lin, Chenhao and Sun, Hongbin and Zhang, Wayne}, - booktitle={European Conference on Computer Vision}, - year={2020} -} -``` diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/utils/collect_env.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/utils/collect_env.py deleted file mode 100644 index cf56ecc77902841220cb3e9040033de82fe81e2c..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/utils/collect_env.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmengine.utils import get_git_hash -from mmengine.utils.dl_utils import collect_env as collect_base_env - -import mmocr - - -def collect_env(): - """Collect the information of the running environments.""" - env_info = collect_base_env() - env_info['MMOCR'] = mmocr.__version__ + '+' + get_git_hash()[:7] - return env_info - - -if __name__ == '__main__': - for name, val in collect_env().items(): - print(f'{name}: {val}') diff --git a/spaces/Mrchuw/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md b/spaces/Mrchuw/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md deleted file mode 100644 index 9af54dca9f1956d33877bf7df09b34c2d6ddeeaf..0000000000000000000000000000000000000000 --- a/spaces/Mrchuw/Image-Animation-using-Thin-Plate-Spline-Motion-Model/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image Animation Using Thin Plate Spline Motion Model -emoji: 👁 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.0.19 -app_file: app.py -pinned: false -duplicated_from: CVPR/Image-Animation-using-Thin-Plate-Spline-Motion-Model ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/__init__.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/attention.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/attention.py deleted file mode 100644 index 99692b281794385a97af341d03dea0ee6c46b95b..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/attention.py +++ /dev/null @@ -1,530 +0,0 @@ -# Lint as: python3 -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Keras-based attention layer.""" -# pylint: disable=g-classes-have-attributes -from __future__ import absolute_import -from __future__ import division -# from __future__ import google_type_annotations -from __future__ import print_function - -import collections -import math -import string - -import numpy as np -import tensorflow as tf - -from official.nlp.modeling.layers import masked_softmax - -EinsumDense = tf.keras.layers.experimental.EinsumDense -_CHR_IDX = string.ascii_lowercase - - -def _build_attention_equation(qkv_rank, attn_axes): - """Builds einsum equations for the attention computation. - - Query, key, value inputs after projection are expected to have the shape as: - (bs, , , num_heads, channels). - bs and are treated as . - The attention operations can be generalized: - (1) Query-key dot product: - (, , num_heads, channels), (, - , num_heads, channels) -> (, - num_heads, , ) - (2) Combination: - (, num_heads, , ), - (, , num_heads, channels) -> (, - , num_heads, channels) - - Args: - qkv_rank: the rank of query, key, value tensors. - attn_axes: a list/tuple of axes, [1, rank), that will do attention. - - Returns: - Einsum equations. - """ - target_notation = _CHR_IDX[:qkv_rank] - # `batch_dims` includes the head dim. - batch_dims = tuple(np.delete(range(qkv_rank), attn_axes + (qkv_rank - 1,))) - letter_offset = qkv_rank - source_notation = "" - for i in range(qkv_rank): - if i in batch_dims or i == qkv_rank - 1: - source_notation += target_notation[i] - else: - source_notation += _CHR_IDX[letter_offset] - letter_offset += 1 - - product_notation = "".join([target_notation[i] for i in batch_dims] + - [target_notation[i] for i in attn_axes] + - [source_notation[i] for i in attn_axes]) - dot_product_equation = "%s,%s->%s" % (source_notation, target_notation, - product_notation) - attn_scores_rank = len(product_notation) - combine_equation = "%s,%s->%s" % (product_notation, source_notation, - target_notation) - return dot_product_equation, combine_equation, attn_scores_rank - - -def _build_proj_equation(free_dims, bound_dims, output_dims): - """Builds an einsum equation for projections inside multi-head attention.""" - input_str = "" - kernel_str = "" - output_str = "" - bias_axes = "" - letter_offset = 0 - for i in range(free_dims): - char = _CHR_IDX[i + letter_offset] - input_str += char - output_str += char - - letter_offset += free_dims - for i in range(bound_dims): - char = _CHR_IDX[i + letter_offset] - input_str += char - kernel_str += char - - letter_offset += bound_dims - for i in range(output_dims): - char = _CHR_IDX[i + letter_offset] - kernel_str += char - output_str += char - bias_axes += char - equation = "%s,%s->%s" % (input_str, kernel_str, output_str) - - return equation, bias_axes, len(output_str) - - -def _get_output_shape(output_rank, known_last_dims): - return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims) - - -@tf.keras.utils.register_keras_serializable(package="Text") -class MultiHeadAttention(tf.keras.layers.Layer): - """MultiHeadAttention layer. - - This is an implementation of multi-headed attention based on "Attention - is all you Need". If `query`, `key,` `value` are the same, then - this is self-attention. Each timestep in `query` attends to the - corresponding sequence in `key`, and returns a fixed-width vector. - - This layer first projects `query`, `key` and `value`. These are - (effectively) a list of tensors of length `num_attention_heads`, where the - corresponding shapes are [batch_size, , key_size], - [batch_size, , key_size], - [batch_size, , value_size]. - - Then, the query and key tensors are dot-producted and scaled. These are - softmaxed to obtain attention probabilities. The value tensors are then - interpolated by these probabilities, then concatenated back to a single - tensor. - - Finally, the result tensor with the last dimension as value_size can take an - linear projection and return. - - Examples: - - Performs 1D cross-attention over two sequence inputs with an attention mask. - Returns the additional attention weights over heads. - - >>> layer = MultiHeadAttention(num_heads=2, key_size=2, - ... return_attention_scores=True) - >>> target = tf.keras.Input(shape=[8, 16]) - >>> source = tf.keras.Input(shape=[4, 16]) - >>> mask_tensor = tf.keras.Input(shape=[8, 4]) - >>> output_tensor, weights = layer([target, source]) - >>> print(output_tensor.shape), print(weights.shape) - (None, 8, 16) (None, 2, 8, 4) - - Performs 2D self-attention over a 5D input tensor on axes 2 and 3. - - >>> layer = MultiHeadAttention(num_heads=2, key_size=2, attention_axes=(2, 3)) - >>> input_tensor = tf.keras.Input(shape=[5, 3, 4, 16]) - >>> output_tensor = layer([input_tensor, input_tensor]) - >>> print(output_tensor.shape) - (None, 5, 3, 4, 16) - - Arguments: - num_heads: Number of attention heads. - key_size: Size of each attention head for query and key. - value_size: Size of each attention head for value. - dropout: Dropout probability. - use_bias: Boolean, whether the dense layers use bias vectors/matrices. - output_shape: The expected shape of an output tensor, besides the batch and - sequence dims. If not specified, projects back to the key feature dim. - attention_axes: axes over which the attention is applied. `None` means - attention over all axes, but batch, heads, and features. - return_attention_scores: bool, if `True`, returns the multi-head - attention scores as an additional output argument. - kernel_initializer: Initializer for dense layer kernels. - bias_initializer: Initializer for dense layer biases. - kernel_regularizer: Regularizer for dense layer kernels. - bias_regularizer: Regularizer for dense layer biases. - activity_regularizer: Regularizer for dense layer activity. - kernel_constraint: Constraint for dense layer kernels. - bias_constraint: Constraint for dense layer kernels. - """ - - def __init__(self, - num_heads, - key_size, - value_size=None, - dropout=0.0, - use_bias=True, - output_shape=None, - attention_axes=None, - return_attention_scores=False, - kernel_initializer="glorot_uniform", - bias_initializer="zeros", - kernel_regularizer=None, - bias_regularizer=None, - activity_regularizer=None, - kernel_constraint=None, - bias_constraint=None, - **kwargs): - super(MultiHeadAttention, self).__init__(**kwargs) - self._num_heads = num_heads - self._key_size = key_size - self._value_size = value_size if value_size else key_size - self._dropout = dropout - self._use_bias = use_bias - self._output_shape = output_shape - self._return_attention_scores = return_attention_scores - self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) - self._bias_initializer = tf.keras.initializers.get(bias_initializer) - self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) - self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) - self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) - self._bias_constraint = tf.keras.constraints.get(bias_constraint) - if attention_axes is not None and not isinstance(attention_axes, - collections.abc.Sized): - self._attention_axes = (attention_axes,) - else: - self._attention_axes = attention_axes - - def get_config(self): - config = { - "num_heads": - self._num_heads, - "key_size": - self._key_size, - "value_size": - self._value_size, - "dropout": - self._dropout, - "use_bias": - self._use_bias, - "output_shape": - self._output_shape, - "attention_axes": - self._attention_axes, - "return_attention_scores": - self._return_attention_scores, - "kernel_initializer": - tf.keras.initializers.serialize(self._kernel_initializer), - "bias_initializer": - tf.keras.initializers.serialize(self._bias_initializer), - "kernel_regularizer": - tf.keras.regularizers.serialize(self._kernel_regularizer), - "bias_regularizer": - tf.keras.regularizers.serialize(self._bias_regularizer), - "activity_regularizer": - tf.keras.regularizers.serialize(self._activity_regularizer), - "kernel_constraint": - tf.keras.constraints.serialize(self._kernel_constraint), - "bias_constraint": - tf.keras.constraints.serialize(self._bias_constraint) - } - base_config = super(MultiHeadAttention, self).get_config() - return dict(list(base_config.items()) + list(config.items())) - - def build(self, input_shape): - inputs_len = len(input_shape) - if inputs_len > 3 or inputs_len < 2: - raise ValueError( - "Expects inputs list of length 2 or 3, namely [query, value] or " - "[query, value, key]. " - "Given length: %d" % inputs_len) - tensor_shapes = tf.nest.map_structure(tf.TensorShape, input_shape) - query_shape = tensor_shapes[0] - value_shape = tensor_shapes[1] - key_shape = tensor_shapes[2] if inputs_len == 3 else value_shape - - common_kwargs = dict( - kernel_initializer=self._kernel_initializer, - bias_initializer=self._bias_initializer, - kernel_regularizer=self._kernel_regularizer, - bias_regularizer=self._bias_regularizer, - activity_regularizer=self._activity_regularizer, - kernel_constraint=self._kernel_constraint, - bias_constraint=self._bias_constraint) - - free_dims = query_shape.rank - 1 - einsum_equation, bias_axes, output_rank = _build_proj_equation( - free_dims, bound_dims=1, output_dims=2) - self._query_dense = EinsumDense( - einsum_equation, - output_shape=_get_output_shape(output_rank - 1, - [self._num_heads, self._key_size]), - bias_axes=bias_axes if self._use_bias else None, - name="query", - **common_kwargs) - einsum_equation, bias_axes, output_rank = _build_proj_equation( - key_shape.rank - 1, bound_dims=1, output_dims=2) - self._key_dense = EinsumDense( - einsum_equation, - output_shape=_get_output_shape(output_rank - 1, - [self._num_heads, self._key_size]), - bias_axes=bias_axes if self._use_bias else None, - name="key", - **common_kwargs) - einsum_equation, bias_axes, output_rank = _build_proj_equation( - value_shape.rank - 1, bound_dims=1, output_dims=2) - self._value_dense = EinsumDense( - einsum_equation, - output_shape=_get_output_shape(output_rank - 1, - [self._num_heads, self._value_size]), - bias_axes=bias_axes if self._use_bias else None, - name="value", - **common_kwargs) - - # Builds the attention computations for multi-head dot product attention. - # These computations could be wrapped into the keras attention layer once it - # support mult-head einsum computations. - self._build_attention(output_rank) - if self._output_shape: - if not isinstance(self._output_shape, collections.abc.Sized): - output_shape = [self._output_shape] - else: - output_shape = self._output_shape - else: - output_shape = [query_shape[-1]] - einsum_equation, bias_axes, output_rank = _build_proj_equation( - free_dims, bound_dims=2, output_dims=len(output_shape)) - self._output_dense = EinsumDense( - einsum_equation, - output_shape=_get_output_shape(output_rank - 1, output_shape), - bias_axes=bias_axes if self._use_bias else None, - name="attention_output", - **common_kwargs) - super(MultiHeadAttention, self).build(input_shape) - - def _build_attention(self, qkv_rank): - """Builds multi-head dot-product attention computations. - - This function builds attributes necessary for `_compute_attention` to - costomize attention computation to replace the default dot-product - attention. - - Args: - qkv_rank: the rank of query, key, value tensors. - """ - if self._attention_axes is None: - self._attention_axes = tuple(range(1, qkv_rank - 2)) - else: - self._attention_axes = tuple(self._attention_axes) - self._dot_product_equation, self._combine_equation, attn_scores_rank = ( - _build_attention_equation(qkv_rank, attn_axes=self._attention_axes)) - norm_axes = tuple( - range(attn_scores_rank - len(self._attention_axes), attn_scores_rank)) - self._masked_softmax = masked_softmax.MaskedSoftmax( - mask_expansion_axes=[1], normalization_axes=norm_axes) - self._dropout_layer = tf.keras.layers.Dropout(rate=self._dropout) - - def _compute_attention(self, - query_tensor, - key_tensor, - value_tensor, - attention_mask=None): - """Applies Dot-product attention with query, key, value tensors. - - This function defines the computation inside `call` with projected - multi-head Q, K, V inputs. Users can override this function for customized - attention implementation. - - Args: - query_tensor: Projected query `Tensor` of shape `[B, T, N, key_size]`. - key_tensor: Projected key `Tensor` of shape `[B, T, N, key_size]`. - value_tensor: Projected value `Tensor` of shape `[B, T, N, value_size]`. - attention_mask: a boolean mask of shape `[B, T, S]`, that prevents - attention to certain positions. - - Returns: - attention_output: Multi-headed outputs of attention computation. - attention_scores: Multi-headed attention weights. - """ - # Take the dot product between "query" and "key" to get the raw - # attention scores. - attention_scores = tf.einsum(self._dot_product_equation, key_tensor, - query_tensor) - attention_scores = tf.multiply(attention_scores, - 1.0 / math.sqrt(float(self._key_size))) - - # Normalize the attention scores to probabilities. - # `attention_scores` = [B, N, T, S] - attention_scores = self._masked_softmax(attention_scores, attention_mask) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_scores_dropout = self._dropout_layer(attention_scores) - - # `context_layer` = [B, T, N, H] - attention_output = tf.einsum(self._combine_equation, - attention_scores_dropout, value_tensor) - return attention_output, attention_scores - - def call(self, inputs, attention_mask=None): - """Implements the forward pass. - - Size glossary: - * Number of heads (H): the number of attention heads. - * Value size (V): the size of each value embedding per head. - * Key size (K): the size of each key embedding per head. Equally, the size - of each query embedding per head. Typically K <= V. - * Batch dimensions (B). - * Query (target) attention axes shape (T). - * Value (source) attention axes shape (S), the rank must match the target. - - Args: - inputs: List of the following tensors: - * query: Query `Tensor` of shape `[B, T, dim]`. - * value: Value `Tensor` of shape `[B, S, dim]`. - * key: Optional key `Tensor` of shape `[B, S, dim]`. If not given, will - use `value` for both `key` and `value`, which is the most common case. - attention_mask: a boolean mask of shape `[B, T, S]`, that prevents - attention to certain positions. - - Returns: - attention_output: The result of the computation, of shape [B, T, E], - where `T` is for target sequence shapes and `E` is the query input last - dimension if `output_shape` is `None`. Otherwise, the multi-head outputs - are project to the shape specified by `output_shape`. - attention_scores: [Optional] multi-head attention coeffients over - attention - axes. - """ - inputs_len = len(inputs) - if inputs_len > 3 or inputs_len < 2: - raise ValueError( - "Expects inputs list of length 2 or 3, namely [query, value] or " - "[query, value, key]. " - "Given length: %d" % inputs_len) - query = inputs[0] - value = inputs[1] - key = inputs[2] if inputs_len == 3 else value - - # N = `num_attention_heads` - # H = `size_per_head` - # `query_tensor` = [B, T, N ,H] - query_tensor = self._query_dense(query) - - # `key_tensor` = [B, S, N, H] - key_tensor = self._key_dense(key) - - # `value_tensor` = [B, S, N, H] - value_tensor = self._value_dense(value) - - attention_output, attention_scores = self._compute_attention( - query_tensor, key_tensor, value_tensor, attention_mask) - attention_output = self._output_dense(attention_output) - - if self._return_attention_scores: - return attention_output, attention_scores - return attention_output - - -@tf.keras.utils.register_keras_serializable(package="Text") -class CachedAttention(MultiHeadAttention): - """Attention layer with cache used for auto-agressive decoding. - - Arguments are the same as `MultiHeadAttention` layer. - """ - - def _update_cache(self, key_tensor, value_tensor, cache, decode_loop_step): - """Updates cache states and gets full-length key/value tensors.""" - # Combines cached keys and values with new keys and values. - if decode_loop_step is not None: - # TPU special case. - key_seq_dim = cache["key"].shape.as_list()[1] - indices = tf.reshape( - tf.one_hot(decode_loop_step, key_seq_dim, dtype=key_tensor.dtype), - [1, key_seq_dim, 1, 1]) - key_tensor = cache["key"] + key_tensor * indices - value_seq_dim = cache["value"].shape.as_list()[1] - indices = tf.reshape( - tf.one_hot(decode_loop_step, value_seq_dim, dtype=value_tensor.dtype), - [1, value_seq_dim, 1, 1]) - value_tensor = cache["value"] + value_tensor * indices - else: - key_tensor = tf.concat( - [tf.cast(cache["key"], key_tensor.dtype), key_tensor], axis=1) - value_tensor = tf.concat( - [tf.cast(cache["value"], value_tensor.dtype), value_tensor], axis=1) - - # Update cache - cache["key"] = key_tensor - cache["value"] = value_tensor - - return key_tensor, value_tensor - - def call(self, - inputs, - attention_mask=None, - cache=None, - decode_loop_step=None): - from_tensor = inputs[0] - to_tensor = inputs[1] - - # Scalar dimensions referenced here: - # B = batch size (number of sequences) - # F = `from_tensor` sequence length - # T = `to_tensor` sequence length - # N = `num_attention_heads` - # H = `size_per_head` - # `query_tensor` = [B, F, N ,H] - query_tensor = self._query_dense(from_tensor) - - # `key_tensor` = [B, T, N, H] - key_tensor = self._key_dense(to_tensor) - - # `value_tensor` = [B, T, N, H] - value_tensor = self._value_dense(to_tensor) - - if cache: - key_tensor, value_tensor = self._update_cache(key_tensor, value_tensor, - cache, decode_loop_step) - - # Take the dot product between "query" and "key" to get the raw - # attention scores. - attention_scores = tf.einsum(self._dot_product_equation, key_tensor, - query_tensor) - attention_scores = tf.multiply(attention_scores, - 1.0 / math.sqrt(float(self._key_size))) - - # Normalize the attention scores to probabilities. - # `attention_scores` = [B, N, F, T] - attention_scores = self._masked_softmax(attention_scores, attention_mask) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_scores = self._dropout_layer(attention_scores) - # `context_layer` = [B, F, N, H] - attention_output = tf.einsum(self._combine_equation, attention_scores, - value_tensor) - attention_output = self._output_dense(attention_output) - if self._return_attention_scores: - return attention_output, attention_scores, cache - return attention_output, cache diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/README.md b/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/README.md deleted file mode 100644 index 1215ed574b316030f69713de8dc3000ea64e3df6..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/README.md +++ /dev/null @@ -1,218 +0,0 @@ -# Transformer Translation Model -This is an implementation of the Transformer translation model as described in -the [Attention is All You Need](https://arxiv.org/abs/1706.03762) paper. The -implementation leverages tf.keras and makes sure it is compatible with TF 2.x. - -**Note: this transformer folder is subject to be integrated into official/nlp -folder. Due to its dependencies, we will finish the refactoring after the model -garden 2.1 release.** - -## Contents - * [Contents](#contents) - * [Walkthrough](#walkthrough) - * [Detailed instructions](#detailed-instructions) - * [Environment preparation](#environment-preparation) - * [Download and preprocess datasets](#download-and-preprocess-datasets) - * [Model training and evaluation](#model-training-and-evaluation) - * [Implementation overview](#implementation-overview) - * [Model Definition](#model-definition) - * [Model Trainer](#model-trainer) - * [Test dataset](#test-dataset) - -## Walkthrough - -Below are the commands for running the Transformer model. See the -[Detailed instructions](#detailed-instructions) for more details on running the -model. - -``` -# Ensure that PYTHONPATH is correctly defined as described in -# https://github.com/tensorflow/models/tree/master/official#requirements -export PYTHONPATH="$PYTHONPATH:/path/to/models" - -cd /path/to/models/official/nlp/transformer - -# Export variables -PARAM_SET=big -DATA_DIR=$HOME/transformer/data -MODEL_DIR=$HOME/transformer/model_$PARAM_SET -VOCAB_FILE=$DATA_DIR/vocab.ende.32768 - -# Download training/evaluation/test datasets -python3 data_download.py --data_dir=$DATA_DIR - -# Train the model for 100000 steps and evaluate every 5000 steps on a single GPU. -# Each train step, takes 4096 tokens as a batch budget with 64 as sequence -# maximal length. -python3 transformer_main.py --data_dir=$DATA_DIR --model_dir=$MODEL_DIR \ - --vocab_file=$VOCAB_FILE --param_set=$PARAM_SET \ - --train_steps=100000 --steps_between_evals=5000 \ - --batch_size=4096 --max_length=64 \ - --bleu_source=$DATA_DIR/newstest2014.en \ - --bleu_ref=$DATA_DIR/newstest2014.de \ - --num_gpus=1 \ - --enable_time_history=false - -# Run during training in a separate process to get continuous updates, -# or after training is complete. -tensorboard --logdir=$MODEL_DIR -``` - -## Detailed instructions - - -0. ### Environment preparation - - #### Add models repo to PYTHONPATH - Follow the instructions described in the [Requirements](https://github.com/tensorflow/models/tree/master/official#requirements) section to add the models folder to the python path. - - #### Export variables (optional) - - Export the following variables, or modify the values in each of the snippets below: - - ```shell - PARAM_SET=big - DATA_DIR=$HOME/transformer/data - MODEL_DIR=$HOME/transformer/model_$PARAM_SET - VOCAB_FILE=$DATA_DIR/vocab.ende.32768 - ``` - -1. ### Download and preprocess datasets - - [data_download.py](data_download.py) downloads and preprocesses the training and evaluation WMT datasets. After the data is downloaded and extracted, the training data is used to generate a vocabulary of subtokens. The evaluation and training strings are tokenized, and the resulting data is sharded, shuffled, and saved as TFRecords. - - 1.75GB of compressed data will be downloaded. In total, the raw files (compressed, extracted, and combined files) take up 8.4GB of disk space. The resulting TFRecord and vocabulary files are 722MB. The script takes around 40 minutes to run, with the bulk of the time spent downloading and ~15 minutes spent on preprocessing. - - Command to run: - ``` - python3 data_download.py --data_dir=$DATA_DIR - ``` - - Arguments: - * `--data_dir`: Path where the preprocessed TFRecord data, and vocab file will be saved. - * Use the `--help` or `-h` flag to get a full list of possible arguments. - -2. ### Model training and evaluation - - [transformer_main.py](transformer_main.py) creates a Transformer keras model, - and trains it uses keras model.fit(). - - Users need to adjust `batch_size` and `num_gpus` to get good performance - running multiple GPUs. - - **Note that:** - when using multiple GPUs or TPUs, this is the global batch size for all - devices. For example, if the batch size is `4096*4` and there are 4 devices, - each device will take 4096 tokens as a batch budget. - - Command to run: - ``` - python3 transformer_main.py --data_dir=$DATA_DIR --model_dir=$MODEL_DIR \ - --vocab_file=$VOCAB_FILE --param_set=$PARAM_SET - ``` - - Arguments: - * `--data_dir`: This should be set to the same directory given to the `data_download`'s `data_dir` argument. - * `--model_dir`: Directory to save Transformer model training checkpoints. - * `--vocab_file`: Path to subtoken vocabulary file. If data_download was used, you may find the file in `data_dir`. - * `--param_set`: Parameter set to use when creating and training the model. Options are `base` and `big` (default). - * `--enable_time_history`: Whether add TimeHistory call. If so, --log_steps must be specified. - * `--batch_size`: The number of tokens to consider in a batch. Combining with - `--max_length`, they decide how many sequences are used per batch. - * Use the `--help` or `-h` flag to get a full list of possible arguments. - - #### Using multiple GPUs - You can train these models on multiple GPUs using `tf.distribute.Strategy` API. - You can read more about them in this - [guide](https://www.tensorflow.org/guide/distribute_strategy). - - In this example, we have made it easier to use is with just a command line flag - `--num_gpus`. By default this flag is 1 if TensorFlow is compiled with CUDA, - and 0 otherwise. - - - --num_gpus=0: Uses tf.distribute.OneDeviceStrategy with CPU as the device. - - --num_gpus=1: Uses tf.distribute.OneDeviceStrategy with GPU as the device. - - --num_gpus=2+: Uses tf.distribute.MirroredStrategy to run synchronous - distributed training across the GPUs. - - #### Using Cloud TPUs - - You can train the Transformer model on Cloud TPUs using - `tf.distribute.TPUStrategy`. If you are not familiar with Cloud TPUs, it is - strongly recommended that you go through the - [quickstart](https://cloud.google.com/tpu/docs/quickstart) to learn how to - create a TPU and GCE VM. - - To run the Transformer model on a TPU, you must set - `--distribution_strategy=tpu`, `--tpu=$TPU_NAME`, and `--use_ctl=True` where - `$TPU_NAME` the name of your TPU in the Cloud Console. - - An example command to run Transformer on a v2-8 or v3-8 TPU would be: - - ```bash - python transformer_main.py \ - --tpu=$TPU_NAME \ - --model_dir=$MODEL_DIR \ - --data_dir=$DATA_DIR \ - --vocab_file=$DATA_DIR/vocab.ende.32768 \ - --bleu_source=$DATA_DIR/newstest2014.en \ - --bleu_ref=$DATA_DIR/newstest2014.end \ - --batch_size=6144 \ - --train_steps=2000 \ - --static_batch=true \ - --use_ctl=true \ - --param_set=big \ - --max_length=64 \ - --decode_batch_size=32 \ - --decode_max_length=97 \ - --padded_decode=true \ - --distribution_strategy=tpu - ``` - Note: `$MODEL_DIR` and `$DATA_DIR` must be GCS paths. - - #### Customizing training schedule - - By default, the model will train for 10 epochs, and evaluate after every epoch. The training schedule may be defined through the flags: - - * Training with steps: - * `--train_steps`: sets the total number of training steps to run. - * `--steps_between_evals`: Number of training steps to run between evaluations. - - #### Compute BLEU score during model evaluation - - Use these flags to compute the BLEU when the model evaluates: - - * `--bleu_source`: Path to file containing text to translate. - * `--bleu_ref`: Path to file containing the reference translation. - - When running `transformer_main.py`, use the flags: `--bleu_source=$DATA_DIR/newstest2014.en --bleu_ref=$DATA_DIR/newstest2014.de` - - #### Tensorboard - Training and evaluation metrics (loss, accuracy, approximate BLEU score, etc.) are logged, and can be displayed in the browser using Tensorboard. - ``` - tensorboard --logdir=$MODEL_DIR - ``` - The values are displayed at [localhost:6006](localhost:6006). - -## Implementation overview - -A brief look at each component in the code: - -### Model Definition -* [transformer.py](transformer.py): Defines a tf.keras.Model: `Transformer`. -* [embedding_layer.py](embedding_layer.py): Contains the layer that calculates the embeddings. The embedding weights are also used to calculate the pre-softmax probabilities from the decoder output. -* [attention_layer.py](attention_layer.py): Defines the multi-headed and self attention layers that are used in the encoder/decoder stacks. -* [ffn_layer.py](ffn_layer.py): Defines the feedforward network that is used in the encoder/decoder stacks. The network is composed of 2 fully connected layers. - -Other files: -* [beam_search.py](beam_search.py) contains the beam search implementation, which is used during model inference to find high scoring translations. - -### Model Trainer -[transformer_main.py](transformer_main.py) creates an `TransformerTask` to train and evaluate the model using tf.keras. - -### Test dataset -The [newstest2014 files](https://storage.googleapis.com/tf-perf-public/official_transformer/test_data/newstest2014.tgz) -are extracted from the [NMT Seq2Seq tutorial](https://google.github.io/seq2seq/nmt/#download-data). -The raw text files are converted from the SGM format of the -[WMT 2016](http://www.statmt.org/wmt16/translation-task.html) test sets. The -newstest2014 files are put into the `$DATA_DIR` when executing `data_download.py` diff --git a/spaces/NSect/multitrack-midi-music-generator/main.py b/spaces/NSect/multitrack-midi-music-generator/main.py deleted file mode 100644 index 2d9188d55bbaebad3518f93ba926213ff923d881..0000000000000000000000000000000000000000 --- a/spaces/NSect/multitrack-midi-music-generator/main.py +++ /dev/null @@ -1,157 +0,0 @@ -import os - -import gradio as gr - -from utils import ( - generate_song, - remove_last_instrument, - regenerate_last_instrument, - change_tempo, -) - - -os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python" - -DESCRIPTION = """ -

    🎵 Multitrack Midi Generator 🎶

    -

    AI-driven Music Composer: Creating Music One Instrument at a Time!

    -

    This interactive application uses an AI model to generate music sequences based on a chosen genre and various user inputs. The apps constructs the piece instrument by instrument

    - -
    -
    -

    Features:

    -
      -
    • 🎼 Select the genre for the music.
    • -
    • 🌡️ Use the "Temperature" slider to adjust the randomness of the music generated (higher values will produce more random outputs).
    • -
    • ⏱️ Adjust the "Tempo" slider to change the speed of the music.
    • -
    • 🎹 Use the buttons to generate a new song from scratch, continue generation with the current settings, remove the last added instrument, regenerate the last added instrument with a new one, or change the tempo of the current song.
    • -
    -
    -
    -

    Outputs:

    -

    The app outputs the following:

    -
      -
    • 🎧 The audio of the generated song.
    • -
    • 📁 A MIDI file of the song.
    • -
    • 📊 A plot of the song's sequence.
    • -
    • 🎸 A list of the generated instruments.
    • -
    • 📝 The text sequence of the song.
    • -
    -
    -
    - -
    - -

    This application is built upon the inspiring work of Dr. Tristan Behrens

    - -

    Enjoy creating your own music!

    -""" - - -genres = ["ROCK", "POP", "OTHER", "R&B/SOUL", "JAZZ", "ELECTRONIC", "RANDOM"] - -demo = gr.Blocks() - - -def run(): - with demo: - gr.HTML(DESCRIPTION) - gr.DuplicateButton(value="Duplicate Space for private use") - with gr.Row(): - with gr.Column(): - temp = gr.Slider( - minimum=0, maximum=1, step=0.05, value=0.85, label="Temperature" - ) - genre = gr.Dropdown( - choices=genres, value="POP", label="Select the genre" - ) - with gr.Row(): - btn_from_scratch = gr.Button("🧹 Start from scratch") - btn_continue = gr.Button("➡️ Continue Generation") - btn_remove_last = gr.Button("↩️ Remove last instrument") - btn_regenerate_last = gr.Button("🔄 Regenerate last instrument") - with gr.Column(): - with gr.Box(): - audio_output = gr.Video(show_share_button=True) - midi_file = gr.File() - with gr.Row(): - qpm = gr.Slider( - minimum=60, maximum=140, step=10, value=120, label="Tempo" - ) - btn_qpm = gr.Button("Change Tempo") - with gr.Row(): - with gr.Column(): - plot_output = gr.Plot() - with gr.Column(): - instruments_output = gr.Markdown("# List of generated instruments") - with gr.Row(): - text_sequence = gr.Text() - empty_sequence = gr.Text(visible=False) - with gr.Row(): - num_tokens = gr.Text(visible=False) - btn_from_scratch.click( - fn=generate_song, - inputs=[genre, temp, empty_sequence, qpm], - outputs=[ - audio_output, - midi_file, - plot_output, - instruments_output, - text_sequence, - num_tokens, - ], - ) - btn_continue.click( - fn=generate_song, - inputs=[genre, temp, text_sequence, qpm], - outputs=[ - audio_output, - midi_file, - plot_output, - instruments_output, - text_sequence, - num_tokens, - ], - ) - btn_remove_last.click( - fn=remove_last_instrument, - inputs=[text_sequence, qpm], - outputs=[ - audio_output, - midi_file, - plot_output, - instruments_output, - text_sequence, - num_tokens, - ], - ) - btn_regenerate_last.click( - fn=regenerate_last_instrument, - inputs=[text_sequence, qpm], - outputs=[ - audio_output, - midi_file, - plot_output, - instruments_output, - text_sequence, - num_tokens, - ], - ) - btn_qpm.click( - fn=change_tempo, - inputs=[text_sequence, qpm], - outputs=[ - audio_output, - midi_file, - plot_output, - instruments_output, - text_sequence, - num_tokens, - ], - ) - - demo.launch(server_name="0.0.0.0", server_port=7860) - - -if __name__ == "__main__": - run() diff --git a/spaces/NillJan/NelsonBot/README.md b/spaces/NillJan/NelsonBot/README.md deleted file mode 100644 index ea387cf93b7cf80d4e344943673b07f33c19a188..0000000000000000000000000000000000000000 --- a/spaces/NillJan/NelsonBot/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: NelsonBot -emoji: 💻 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.50.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Nyari/Super-Resolution-Anime-Diffusion/inference.py b/spaces/Nyari/Super-Resolution-Anime-Diffusion/inference.py deleted file mode 100644 index 25b887c92a1ac1af8d62a7a2d19d522dc75f48da..0000000000000000000000000000000000000000 --- a/spaces/Nyari/Super-Resolution-Anime-Diffusion/inference.py +++ /dev/null @@ -1,407 +0,0 @@ -import os -import random - -import autocuda -from pyabsa.utils.pyabsa_utils import fprint - -from diffusers import ( - AutoencoderKL, - UNet2DConditionModel, - StableDiffusionPipeline, - StableDiffusionImg2ImgPipeline, - DPMSolverMultistepScheduler, -) -import gradio as gr -import torch -from PIL import Image -import utils -import datetime -import time -import psutil - -from Waifu2x.magnify import ImageMagnifier - -start_time = time.time() -is_colab = utils.is_google_colab() - -device = autocuda.auto_cuda() - -magnifier = ImageMagnifier() - - -class Model: - def __init__(self, name, path="", prefix=""): - self.name = name - self.path = path - self.prefix = prefix - self.pipe_t2i = None - self.pipe_i2i = None - - -models = [ - # Model("anything v3", "anything-v3.0", "anything v3 style"), - Model("anything v3", "Linaqruf/anything-v3.0", "anything v3 style"), -] -# Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style "), -# Model("Balloon Art", "Fictiverse/Stable_Diffusion_BalloonArt_Model", "BalloonArt "), -# Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style "), -# Model("Tron Legacy", "dallinmackay/Tron-Legacy-diffusion", "trnlgcy ") -# Model("Pokémon", "lambdalabs/sd-pokemon-diffusers", ""), -# Model("Pony Diffusion", "AstraliteHeart/pony-diffusion", ""), -# Model("Robo Diffusion", "nousr/robo-diffusion", ""), - -scheduler = DPMSolverMultistepScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - num_train_timesteps=1000, - trained_betas=None, - predict_epsilon=True, - thresholding=False, - algorithm_type="dpmsolver++", - solver_type="midpoint", - lower_order_final=True, -) - -custom_model = None -if is_colab: - models.insert(0, Model("Custom model")) - custom_model = models[0] - -last_mode = "txt2img" -current_model = models[1] if is_colab else models[0] -current_model_path = current_model.path - -if is_colab: - pipe = StableDiffusionPipeline.from_pretrained( - current_model.path, - torch_dtype=torch.float16, - scheduler=scheduler, - safety_checker=lambda images, clip_input: (images, False), - ) - -else: # download all models - print(f"{datetime.datetime.now()} Downloading vae...") - vae = AutoencoderKL.from_pretrained( - current_model.path, subfolder="vae", torch_dtype=torch.float16 - ) - for model in models: - try: - print(f"{datetime.datetime.now()} Downloading {model.name} model...") - unet = UNet2DConditionModel.from_pretrained( - model.path, subfolder="unet", torch_dtype=torch.float16 - ) - model.pipe_t2i = StableDiffusionPipeline.from_pretrained( - model.path, - unet=unet, - vae=vae, - torch_dtype=torch.float16, - scheduler=scheduler, - ) - model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained( - model.path, - unet=unet, - vae=vae, - torch_dtype=torch.float16, - scheduler=scheduler, - ) - except Exception as e: - print( - f"{datetime.datetime.now()} Failed to load model " - + model.name - + ": " - + str(e) - ) - models.remove(model) - pipe = models[0].pipe_t2i - -if torch.cuda.is_available(): - pipe = pipe.to(device) - -device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶" - - -def error_str(error, title="Error"): - return ( - f"""#### {title} - {error}""" - if error - else "" - ) - - -def custom_model_changed(path): - models[0].path = path - global current_model - current_model = models[0] - - -def on_model_change(model_name): - prefix = ( - 'Enter prompt. "' - + next((m.prefix for m in models if m.name == model_name), None) - + '" is prefixed automatically' - if model_name != models[0].name - else "Don't forget to use the custom model prefix in the prompt!" - ) - - return gr.update(visible=model_name == models[0].name), gr.update( - placeholder=prefix - ) - - -def inference( - model_name, - prompt, - guidance, - steps, - width=512, - height=512, - seed=0, - img=None, - strength=0.5, - neg_prompt="", -): - print(psutil.virtual_memory()) # print memory usage - - global current_model - for model in models: - if model.name == model_name: - current_model = model - model_path = current_model.path - - generator = torch.Generator("cuda").manual_seed(seed) if seed != 0 else None - - try: - if img is not None: - return ( - img_to_img( - model_path, - prompt, - neg_prompt, - img, - strength, - guidance, - steps, - width, - height, - generator, - ), - None, - ) - else: - return ( - txt_to_img( - model_path, - prompt, - neg_prompt, - guidance, - steps, - width, - height, - generator, - ), - None, - ) - except Exception as e: - fprint(e) - return None, error_str(e) - - -def txt_to_img( - model_path, prompt, neg_prompt, guidance, steps, width, height, generator -): - print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}") - - global last_mode - global pipe - global current_model_path - if model_path != current_model_path or last_mode != "txt2img": - current_model_path = model_path - - if is_colab or current_model == custom_model: - pipe = StableDiffusionPipeline.from_pretrained( - current_model_path, - torch_dtype=torch.float16, - scheduler=scheduler, - safety_checker=lambda images, clip_input: (images, False), - ) - else: - pipe = pipe.to("cpu") - pipe = current_model.pipe_t2i - - if torch.cuda.is_available(): - pipe = pipe.to(device) - last_mode = "txt2img" - - prompt = current_model.prefix + prompt - result = pipe( - prompt, - negative_prompt=neg_prompt, - # num_images_per_prompt=n_images, - num_inference_steps=int(steps), - guidance_scale=guidance, - width=width, - height=height, - generator=generator, - ) - result.images[0] = magnifier.magnify(result.images[0]) - result.images[0] = magnifier.magnify(result.images[0]) - - # save image - result.images[0].save( - "{}/{}.{}.{}.{}.{}.{}.{}.{}.png".format( - saved_path, - datetime.datetime.now().strftime("%Y%m%d-%H%M%S"), - model_name, - prompt, - guidance, - steps, - width, - height, - seed, - ) - ) - return replace_nsfw_images(result) - - -def img_to_img( - model_path, - prompt, - neg_prompt, - img, - strength, - guidance, - steps, - width, - height, - generator, -): - print(f"{datetime.datetime.now()} img_to_img, model: {model_path}") - - global last_mode - global pipe - global current_model_path - if model_path != current_model_path or last_mode != "img2img": - current_model_path = model_path - - if is_colab or current_model == custom_model: - pipe = StableDiffusionImg2ImgPipeline.from_pretrained( - current_model_path, - torch_dtype=torch.float16, - scheduler=scheduler, - safety_checker=lambda images, clip_input: (images, False), - ) - else: - pipe = pipe.to("cpu") - pipe = current_model.pipe_i2i - - if torch.cuda.is_available(): - pipe = pipe.to(device) - last_mode = "img2img" - - prompt = current_model.prefix + prompt - ratio = min(height / img.height, width / img.width) - img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) - result = pipe( - prompt, - negative_prompt=neg_prompt, - # num_images_per_prompt=n_images, - init_image=img, - num_inference_steps=int(steps), - strength=strength, - guidance_scale=guidance, - width=width, - height=height, - generator=generator, - ) - result.images[0] = magnifier.magnify(result.images[0]) - result.images[0] = magnifier.magnify(result.images[0]) - - # save image - result.images[0].save( - "{}/{}.{}.{}.{}.{}.{}.{}.{}.png".format( - saved_path, - datetime.datetime.now().strftime("%Y%m%d-%H%M%S"), - model_name, - prompt, - guidance, - steps, - width, - height, - seed, - ) - ) - return replace_nsfw_images(result) - - -def replace_nsfw_images(results): - if is_colab: - return results.images[0] - - for i in range(len(results.images)): - if results.nsfw_content_detected[i]: - results.images[i] = Image.open("nsfw.png") - return results.images[0] - - -if __name__ == "__main__": - # inference("DALL-E", "a dog", 0, 1000, 512, 512, 0, None, 0.5, "") - model_name = "anything v3" - saved_path = r"imgs" - if not os.path.exists(saved_path): - os.mkdir(saved_path) - n = 0 - while True: - prompt_keys = [ - "beautiful eyes", - "cumulonimbus clouds", - "sky", - "detailed fingers", - random.choice( - [ - "white hair", - "red hair", - "blonde hair", - "black hair", - "green hair", - ] - ), - random.choice( - [ - "blue eyes", - "green eyes", - "red eyes", - "black eyes", - "yellow eyes", - ] - ), - random.choice(["flower meadow", "garden", "city", "river", "beach"]), - random.choice(["Elif", "Angel"]), - ] - guidance = 7.5 - steps = 25 - # width = 1024 - # height = 1024 - # width = 768 - # height = 1024 - width = 512 - height = 888 - seed = 0 - img = None - strength = 0.5 - neg_prompt = "" - inference( - model_name, - ".".join(prompt_keys), - guidance, - steps, - width=width, - height=height, - seed=seed, - img=img, - strength=strength, - neg_prompt=neg_prompt, - ) - n += 1 - fprint(n) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/m2m_100/process_data/remove_too_much_punc.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/m2m_100/process_data/remove_too_much_punc.py deleted file mode 100644 index 6c280de2403daffab477ac88e2008a68b9e61ff0..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/m2m_100/process_data/remove_too_much_punc.py +++ /dev/null @@ -1,36 +0,0 @@ -import gzip -import argparse -from string import punctuation - -def len_no_punc(s, punc): - return len([ch for ch in s if ch in punc]) - -def filter_overpunc(len_npunc, len_sen): - return len_npunc < 0.5*len_sen - -def main(args): - punc = punctuation + "—|–" - print('Processing file {}'.format(args.input)) - with gzip.open(args.input, 'rt', encoding=args.encoding) as tsv: - with open(args.bitext + '.' + args.src_lang, 'wt', encoding=args.encoding) as fsrc: - with open(args.bitext + '.' + args.tgt_lang, 'wt', encoding=args.encoding) as ftgt: - line = tsv.readline() - fields = line.split('\t') - - src, tgt = fields[1], fields[2] - - nchar_npunc_src = len_no_punc(src, punc) - nchar_npunc_tgt = len_no_punc(tgt, punc) - - if filter_overpunc(nchar_npunc_src, len(src)) and filter_overpunc(nchar_npunc_tgt, len(tgt)): - fsrc.write(src.strip() + '\n') - ftgt.write(tgt.strip() + '\n') - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("--input", required=True, type=str) - parser.add_argument('--encoding', default='utf-8', help='character encoding for input/output') - parser.add_argument('--bitext', type=str, required=True, help='language direction') - parser.add_argument('--src-lang', type=str, required=True, help='Source language') - parser.add_argument('--tgt-lang', type=str, required=True, help='Target language') - main(parser.parse_args()) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/multilingual/data_scripts/download_iitb.sh b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/multilingual/data_scripts/download_iitb.sh deleted file mode 100644 index a884e20839e2a41a57405cb6af362e37bd16ab6f..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/multilingual/data_scripts/download_iitb.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -if [ -z $WORKDIR_ROOT ] ; -then - echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..." - exit -fi - -IITB=$WORKDIR_ROOT/IITB -mkdir -p $IITB -pushd $IITB - -wget http://www.cfilt.iitb.ac.in/~moses/iitb_en_hi_parallel/iitb_corpus_download/parallel.tgz -tar -xvzf parallel.tgz - -wget http://www.cfilt.iitb.ac.in/~moses/iitb_en_hi_parallel/iitb_corpus_download/dev_test.tgz -tar -xvzf dev_test.tgz - -DESTDIR=${WORKDIR_ROOT}/ML50/raw/ - -cp parallel/IITB.en-hi.en $DESTDIR/train.hi_IN-en_XX.en_XX -cp parallel/IITB.en-hi.hi $DESTDIR/train.hi_IN-en_XX.hi_IN - -cp dev_test/dev.en $DESTDIR/valid.hi_IN-en_XX.en_XX -cp dev_test/dev.hi $DESTDIR/valid.hi_IN-en_XX.hi_IN - -cp dev_test/test.en $DESTDIR/test.hi_IN-en_XX.en_XX -cp dev_test/test.hi $DESTDIR/test.hi_IN-en_XX.hi_IN -popd \ No newline at end of file diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/constraints/extract.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/constraints/extract.py deleted file mode 100644 index f6155d0a0538aadb46bf612256b6b949728de69e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/constraints/extract.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -"""Extracts random constraints from reference files.""" - -import argparse -import random -import sys - -from sacrebleu import extract_ngrams - - -def get_phrase(words, index, length): - assert index < len(words) - length + 1 - phr = " ".join(words[index : index + length]) - for i in range(index, index + length): - words.pop(index) - return phr - - -def main(args): - - if args.seed: - random.seed(args.seed) - - for line in sys.stdin: - constraints = [] - - def add_constraint(constraint): - constraints.append(constraint) - - source = line.rstrip() - if "\t" in line: - source, target = line.split("\t") - if args.add_sos: - target = f" {target}" - if args.add_eos: - target = f"{target} " - - if len(target.split()) >= args.len: - words = [target] - - num = args.number - - choices = {} - for i in range(num): - if len(words) == 0: - break - segmentno = random.choice(range(len(words))) - segment = words.pop(segmentno) - tokens = segment.split() - phrase_index = random.choice(range(len(tokens))) - choice = " ".join( - tokens[phrase_index : min(len(tokens), phrase_index + args.len)] - ) - for j in range( - phrase_index, min(len(tokens), phrase_index + args.len) - ): - tokens.pop(phrase_index) - if phrase_index > 0: - words.append(" ".join(tokens[0:phrase_index])) - if phrase_index + 1 < len(tokens): - words.append(" ".join(tokens[phrase_index:])) - choices[target.find(choice)] = choice - - # mask out with spaces - target = target.replace(choice, " " * len(choice), 1) - - for key in sorted(choices.keys()): - add_constraint(choices[key]) - - print(source, *constraints, sep="\t") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--number", "-n", type=int, default=1, help="number of phrases") - parser.add_argument("--len", "-l", type=int, default=1, help="phrase length") - parser.add_argument( - "--add-sos", default=False, action="store_true", help="add token" - ) - parser.add_argument( - "--add-eos", default=False, action="store_true", help="add token" - ) - parser.add_argument("--seed", "-s", default=0, type=int) - args = parser.parse_args() - - main(args) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/linformer/linformer_src/__init__.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/linformer/linformer_src/__init__.py deleted file mode 100644 index 1c52f135ea6f99d0effe8ce1f7d77cbd66be3745..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/linformer/linformer_src/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .models import linformer_roberta # noqa diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/new/decoders/flashlight_decoder.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/new/decoders/flashlight_decoder.py deleted file mode 100644 index 38c7ac492f390a367a64769d7a72fe228df097c7..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/new/decoders/flashlight_decoder.py +++ /dev/null @@ -1,431 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import gc -import os.path as osp -import warnings -from collections import deque, namedtuple -from typing import Any, Dict, Tuple - -import numpy as np -import torch -from fairseq import tasks -from fairseq.data.dictionary import Dictionary -from fairseq.dataclass.utils import convert_namespace_to_omegaconf -from fairseq.models.fairseq_model import FairseqModel -from fairseq.utils import apply_to_sample -from omegaconf import open_dict, OmegaConf - -from typing import List - -from .decoder_config import FlashlightDecoderConfig -from .base_decoder import BaseDecoder - -try: - from flashlight.lib.text.decoder import ( - LM, - CriterionType, - DecodeResult, - KenLM, - LexiconDecoder, - LexiconDecoderOptions, - LexiconFreeDecoder, - LexiconFreeDecoderOptions, - LMState, - SmearingMode, - Trie, - ) - from flashlight.lib.text.dictionary import create_word_dict, load_words -except ImportError: - warnings.warn( - "flashlight python bindings are required to use this functionality. " - "Please install from " - "https://github.com/facebookresearch/flashlight/tree/master/bindings/python" - ) - LM = object - LMState = object - - -class KenLMDecoder(BaseDecoder): - def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None: - super().__init__(tgt_dict) - - self.nbest = cfg.nbest - self.unitlm = cfg.unitlm - - if cfg.lexicon: - self.lexicon = load_words(cfg.lexicon) - self.word_dict = create_word_dict(self.lexicon) - self.unk_word = self.word_dict.get_index("") - - self.lm = KenLM(cfg.lmpath, self.word_dict) - self.trie = Trie(self.vocab_size, self.silence) - - start_state = self.lm.start(False) - for word, spellings in self.lexicon.items(): - word_idx = self.word_dict.get_index(word) - _, score = self.lm.score(start_state, word_idx) - for spelling in spellings: - spelling_idxs = [tgt_dict.index(token) for token in spelling] - assert ( - tgt_dict.unk() not in spelling_idxs - ), f"{word} {spelling} {spelling_idxs}" - self.trie.insert(spelling_idxs, word_idx, score) - self.trie.smear(SmearingMode.MAX) - - self.decoder_opts = LexiconDecoderOptions( - beam_size=cfg.beam, - beam_size_token=cfg.beamsizetoken or len(tgt_dict), - beam_threshold=cfg.beamthreshold, - lm_weight=cfg.lmweight, - word_score=cfg.wordscore, - unk_score=cfg.unkweight, - sil_score=cfg.silweight, - log_add=False, - criterion_type=CriterionType.CTC, - ) - - self.decoder = LexiconDecoder( - self.decoder_opts, - self.trie, - self.lm, - self.silence, - self.blank, - self.unk_word, - [], - self.unitlm, - ) - else: - assert self.unitlm, "Lexicon-free decoding requires unit LM" - - d = {w: [[w]] for w in tgt_dict.symbols} - self.word_dict = create_word_dict(d) - self.lm = KenLM(cfg.lmpath, self.word_dict) - self.decoder_opts = LexiconFreeDecoderOptions( - beam_size=cfg.beam, - beam_size_token=cfg.beamsizetoken or len(tgt_dict), - beam_threshold=cfg.beamthreshold, - lm_weight=cfg.lmweight, - sil_score=cfg.silweight, - log_add=False, - criterion_type=CriterionType.CTC, - ) - self.decoder = LexiconFreeDecoder( - self.decoder_opts, self.lm, self.silence, self.blank, [] - ) - - def get_timesteps(self, token_idxs: List[int]) -> List[int]: - """Returns frame numbers corresponding to every non-blank token. - - Parameters - ---------- - token_idxs : List[int] - IDs of decoded tokens. - - Returns - ------- - List[int] - Frame numbers corresponding to every non-blank token. - """ - timesteps = [] - for i, token_idx in enumerate(token_idxs): - if token_idx == self.blank: - continue - if i == 0 or token_idx != token_idxs[i-1]: - timesteps.append(i) - return timesteps - - def decode( - self, - emissions: torch.FloatTensor, - ) -> List[List[Dict[str, torch.LongTensor]]]: - B, T, N = emissions.size() - hypos = [] - for b in range(B): - emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) - results = self.decoder.decode(emissions_ptr, T, N) - - nbest_results = results[: self.nbest] - hypos.append( - [ - { - "tokens": self.get_tokens(result.tokens), - "score": result.score, - "timesteps": self.get_timesteps(result.tokens), - "words": [ - self.word_dict.get_entry(x) for x in result.words if x >= 0 - ], - } - for result in nbest_results - ] - ) - return hypos - - -FairseqLMState = namedtuple( - "FairseqLMState", - [ - "prefix", - "incremental_state", - "probs", - ], -) - - -class FairseqLM(LM): - def __init__(self, dictionary: Dictionary, model: FairseqModel) -> None: - super().__init__() - - self.dictionary = dictionary - self.model = model - self.unk = self.dictionary.unk() - - self.save_incremental = False # this currently does not work properly - self.max_cache = 20_000 - - if torch.cuda.is_available(): - model.cuda() - model.eval() - model.make_generation_fast_() - - self.states = {} - self.stateq = deque() - - def start(self, start_with_nothing: bool) -> LMState: - state = LMState() - prefix = torch.LongTensor([[self.dictionary.eos()]]) - incremental_state = {} if self.save_incremental else None - with torch.no_grad(): - res = self.model(prefix.cuda(), incremental_state=incremental_state) - probs = self.model.get_normalized_probs(res, log_probs=True, sample=None) - - if incremental_state is not None: - incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state) - self.states[state] = FairseqLMState( - prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy() - ) - self.stateq.append(state) - - return state - - def score( - self, - state: LMState, - token_index: int, - no_cache: bool = False, - ) -> Tuple[LMState, int]: - """ - Evaluate language model based on the current lm state and new word - Parameters: - ----------- - state: current lm state - token_index: index of the word - (can be lexicon index then you should store inside LM the - mapping between indices of lexicon and lm, or lm index of a word) - Returns: - -------- - (LMState, float): pair of (new state, score for the current word) - """ - curr_state = self.states[state] - - def trim_cache(targ_size: int) -> None: - while len(self.stateq) > targ_size: - rem_k = self.stateq.popleft() - rem_st = self.states[rem_k] - rem_st = FairseqLMState(rem_st.prefix, None, None) - self.states[rem_k] = rem_st - - if curr_state.probs is None: - new_incremental_state = ( - curr_state.incremental_state.copy() - if curr_state.incremental_state is not None - else None - ) - with torch.no_grad(): - if new_incremental_state is not None: - new_incremental_state = apply_to_sample( - lambda x: x.cuda(), new_incremental_state - ) - elif self.save_incremental: - new_incremental_state = {} - - res = self.model( - torch.from_numpy(curr_state.prefix).cuda(), - incremental_state=new_incremental_state, - ) - probs = self.model.get_normalized_probs( - res, log_probs=True, sample=None - ) - - if new_incremental_state is not None: - new_incremental_state = apply_to_sample( - lambda x: x.cpu(), new_incremental_state - ) - - curr_state = FairseqLMState( - curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy() - ) - - if not no_cache: - self.states[state] = curr_state - self.stateq.append(state) - - score = curr_state.probs[token_index].item() - - trim_cache(self.max_cache) - - outstate = state.child(token_index) - if outstate not in self.states and not no_cache: - prefix = np.concatenate( - [curr_state.prefix, torch.LongTensor([[token_index]])], -1 - ) - incr_state = curr_state.incremental_state - - self.states[outstate] = FairseqLMState(prefix, incr_state, None) - - if token_index == self.unk: - score = float("-inf") - - return outstate, score - - def finish(self, state: LMState) -> Tuple[LMState, int]: - """ - Evaluate eos for language model based on the current lm state - Returns: - -------- - (LMState, float): pair of (new state, score for the current word) - """ - return self.score(state, self.dictionary.eos()) - - def empty_cache(self) -> None: - self.states = {} - self.stateq = deque() - gc.collect() - - -class FairseqLMDecoder(BaseDecoder): - def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None: - super().__init__(tgt_dict) - - self.nbest = cfg.nbest - self.unitlm = cfg.unitlm - - self.lexicon = load_words(cfg.lexicon) if cfg.lexicon else None - self.idx_to_wrd = {} - - checkpoint = torch.load(cfg.lmpath, map_location="cpu") - - if "cfg" in checkpoint and checkpoint["cfg"] is not None: - lm_args = checkpoint["cfg"] - else: - lm_args = convert_namespace_to_omegaconf(checkpoint["args"]) - - if not OmegaConf.is_dict(lm_args): - lm_args = OmegaConf.create(lm_args) - - with open_dict(lm_args.task): - lm_args.task.data = osp.dirname(cfg.lmpath) - - task = tasks.setup_task(lm_args.task) - model = task.build_model(lm_args.model) - model.load_state_dict(checkpoint["model"], strict=False) - - self.trie = Trie(self.vocab_size, self.silence) - - self.word_dict = task.dictionary - self.unk_word = self.word_dict.unk() - self.lm = FairseqLM(self.word_dict, model) - - if self.lexicon: - start_state = self.lm.start(False) - for i, (word, spellings) in enumerate(self.lexicon.items()): - if self.unitlm: - word_idx = i - self.idx_to_wrd[i] = word - score = 0 - else: - word_idx = self.word_dict.index(word) - _, score = self.lm.score(start_state, word_idx, no_cache=True) - - for spelling in spellings: - spelling_idxs = [tgt_dict.index(token) for token in spelling] - assert ( - tgt_dict.unk() not in spelling_idxs - ), f"{spelling} {spelling_idxs}" - self.trie.insert(spelling_idxs, word_idx, score) - self.trie.smear(SmearingMode.MAX) - - self.decoder_opts = LexiconDecoderOptions( - beam_size=cfg.beam, - beam_size_token=cfg.beamsizetoken or len(tgt_dict), - beam_threshold=cfg.beamthreshold, - lm_weight=cfg.lmweight, - word_score=cfg.wordscore, - unk_score=cfg.unkweight, - sil_score=cfg.silweight, - log_add=False, - criterion_type=CriterionType.CTC, - ) - - self.decoder = LexiconDecoder( - self.decoder_opts, - self.trie, - self.lm, - self.silence, - self.blank, - self.unk_word, - [], - self.unitlm, - ) - else: - assert self.unitlm, "Lexicon-free decoding requires unit LM" - - d = {w: [[w]] for w in tgt_dict.symbols} - self.word_dict = create_word_dict(d) - self.lm = KenLM(cfg.lmpath, self.word_dict) - self.decoder_opts = LexiconFreeDecoderOptions( - beam_size=cfg.beam, - beam_size_token=cfg.beamsizetoken or len(tgt_dict), - beam_threshold=cfg.beamthreshold, - lm_weight=cfg.lmweight, - sil_score=cfg.silweight, - log_add=False, - criterion_type=CriterionType.CTC, - ) - self.decoder = LexiconFreeDecoder( - self.decoder_opts, self.lm, self.silence, self.blank, [] - ) - - def decode( - self, - emissions: torch.FloatTensor, - ) -> List[List[Dict[str, torch.LongTensor]]]: - B, T, N = emissions.size() - hypos = [] - - def make_hypo(result: DecodeResult) -> Dict[str, Any]: - hypo = { - "tokens": self.get_tokens(result.tokens), - "score": result.score, - } - if self.lexicon: - hypo["words"] = [ - self.idx_to_wrd[x] if self.unitlm else self.word_dict[x] - for x in result.words - if x >= 0 - ] - return hypo - - for b in range(B): - emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) - results = self.decoder.decode(emissions_ptr, T, N) - - nbest_results = results[: self.nbest] - hypos.append([make_hypo(result) for result in nbest_results]) - self.lm.empty_cache() - - return hypos diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/unsupervised/scripts/prepare_text.sh b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/unsupervised/scripts/prepare_text.sh deleted file mode 100644 index 1caf13cb6a2a0bd84e5322c92124b2fa37368f9a..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/wav2vec/unsupervised/scripts/prepare_text.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env zsh -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -lg=$1 -text_path=$2 -target_dir=$3 -min_phones=$4 -phonemizer=$5 -lid_path=$6 - -if [ -z "$lid_path" ]; then - lid_path="lid.187.bin" -fi - -ph_lg=${lg:l} -if test "$lg" = 'fr'; then - ph_lg='fr-fr' -elif test "$lg" = 'en'; then - ph_lg='en-us' -elif test "$lg" = 'pt'; then - ph_lg='pt-br' -fi - -ESPEAK_PATH='' -if test "$phonemizer" = 'espeak'; then - ESPEAK_PATH=$(which espeak) -elif test "$phonemizer" = 'espeak-ng'; then - ESPEAK_PATH=$(which espeak-ng) -elif test "$phonemizer" = 'G2P'; then - ESPEAK_PATH='' -else - echo "Unknown phonemizer $phonemizer. Valid options are espeak, espean-ng and G2P" - exit 1 -fi - -echo $lg -echo $ph_lg -echo $text_path -echo $target_dir -echo "min phone seen threshold is $min_phones" - -mkdir -p $target_dir -python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/normalize_and_filter_text.py --lang $lg --fasttext-model $lid_path < $text_path | grep -v '\-\-\-' >! $target_dir/lm.upper.lid.txt -python $FAIRSEQ_ROOT/fairseq_cli/preprocess.py --dataset-impl mmap --trainpref $target_dir/lm.upper.lid.txt --only-source --destdir $target_dir --thresholdsrc 2 --padding-factor 1 --dict-only -cut -f1 -d' ' $target_dir/dict.txt | grep -v -x '[[:punct:]]*' | grep -Pv '\d\d\d\d\d+' >! $target_dir/words.txt - - -if [ -z "$ESPEAK_PATH" ]; then - python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/g2p_wrd_to_phn.py --compact < $target_dir/words.txt > $target_dir/phones.txt -else - # echoing 1 into corpus will prevent the mismatch lines between lexicon and phones in case the phonemizer fails - one=$(echo "1" | PHONEMIZER_ESPEAK_PATH=$ESPEAK_PATH phonemize -p ' ' -w '' -l $ph_lg --language-switch remove-flags) - sed 's/$/ 1/' $target_dir/words.txt | PHONEMIZER_ESPEAK_PATH=$ESPEAK_PATH phonemize -o $target_dir/phones.txt -p ' ' -w '' -l $ph_lg -j 70 --language-switch remove-flags - echo "one is ${one}" - sed -i "s/${one}$//" $target_dir/phones.txt -fi - -paste $target_dir/words.txt $target_dir/phones.txt >! $target_dir/lexicon.lst - -python $FAIRSEQ_ROOT/fairseq_cli/preprocess.py --dataset-impl mmap --trainpref $target_dir/phones.txt --only-source --destdir $target_dir/phones --thresholdsrc $min_phones --padding-factor 1 --dict-only - -python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/filter_lexicon.py -d $target_dir/phones/dict.txt < $target_dir/lexicon.lst >! $target_dir/lexicon_filtered.lst -python $FAIRSEQ_ROOT/examples/wav2vec/unsupervised/scripts/phonemize_with_sil.py -s 0.25 --surround --lexicon $target_dir/lexicon_filtered.lst < $target_dir/lm.upper.lid.txt >! $target_dir/phones/lm.phones.filtered.txt -cp $target_dir/phones/dict.txt $target_dir/phones/dict.phn.txt -echo " 0" >> $target_dir/phones/dict.phn.txt -python $FAIRSEQ_ROOT/fairseq_cli/preprocess.py --dataset-impl mmap --trainpref $target_dir/phones/lm.phones.filtered.txt --workers 70 --only-source --destdir $target_dir/phones --srcdict $target_dir/phones/dict.phn.txt - -$KENLM_ROOT/lmplz -o 4 < $target_dir/lm.upper.lid.txt --discount_fallback --prune 0 0 0 3 >! $target_dir/kenlm.wrd.o40003.arpa -$KENLM_ROOT/build_binary $target_dir/kenlm.wrd.o40003.arpa $target_dir/kenlm.wrd.o40003.bin - -lg=$lg python $FAIRSEQ_ROOT/examples/speech_recognition/kaldi/kaldi_initializer.py kaldi_root=$KALDI_ROOT fst_dir=$target_dir/fst/phn_to_words_sil lm_arpa=$target_dir/kenlm.wrd.o40003.arpa wav2letter_lexicon=$target_dir/lexicon_filtered.lst data_dir=$target_dir/phones in_labels=phn "blank_symbol=''" -lg=$lg python $FAIRSEQ_ROOT/examples/speech_recognition/kaldi/kaldi_initializer.py kaldi_root=$KALDI_ROOT fst_dir=$target_dir/fst/phn_to_words lm_arpa=$target_dir/kenlm.wrd.o40003.arpa wav2letter_lexicon=$target_dir/lexicon_filtered.lst data_dir=$target_dir/phones in_labels=phn - -$KENLM_ROOT/lmplz -o 4 < $target_dir/phones/lm.phones.filtered.txt --discount_fallback >! $target_dir/phones/lm.phones.filtered.04.arpa -$KENLM_ROOT/build_binary $target_dir/phones/lm.phones.filtered.04.arpa $target_dir/phones/lm.phones.filtered.04.bin -$KENLM_ROOT/lmplz -o 6 < $target_dir/phones/lm.phones.filtered.txt --discount_fallback >! $target_dir/phones/lm.phones.filtered.06.arpa -$KENLM_ROOT/build_binary $target_dir/phones/lm.phones.filtered.06.arpa $target_dir/phones/lm.phones.filtered.06.bin - -lg=$lg python $FAIRSEQ_ROOT/examples/speech_recognition/kaldi/kaldi_initializer.py kaldi_root=$KALDI_ROOT fst_dir=$target_dir/fst/phn_to_phn_sil lm_arpa=$target_dir/phones/lm.phones.filtered.06.arpa data_dir=$target_dir/phones in_labels=phn "blank_symbol=''" diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/composite_encoder.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/composite_encoder.py deleted file mode 100644 index 4e20fe3a833a2d87876cbec294ad2bebfba7f591..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/composite_encoder.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .fairseq_encoder import FairseqEncoder - - -class CompositeEncoder(FairseqEncoder): - """ - A wrapper around a dictionary of :class:`FairseqEncoder` objects. - - We run forward on each encoder and return a dictionary of outputs. The first - encoder's dictionary is used for initialization. - - Args: - encoders (dict): a dictionary of :class:`FairseqEncoder` objects. - """ - - def __init__(self, encoders): - super().__init__(next(iter(encoders.values())).dictionary) - self.encoders = encoders - for key in self.encoders: - self.add_module(key, self.encoders[key]) - - def forward(self, src_tokens, src_lengths): - """ - Args: - src_tokens (LongTensor): tokens in the source language of shape - `(batch, src_len)` - src_lengths (LongTensor): lengths of each source sentence of shape - `(batch)` - - Returns: - dict: - the outputs from each Encoder - """ - encoder_out = {} - for key in self.encoders: - encoder_out[key] = self.encoders[key](src_tokens, src_lengths) - return encoder_out - - def reorder_encoder_out(self, encoder_out, new_order): - """Reorder encoder output according to new_order.""" - for key in self.encoders: - encoder_out[key] = self.encoders[key].reorder_encoder_out( - encoder_out[key], new_order - ) - return encoder_out - - def max_positions(self): - return min(self.encoders[key].max_positions() for key in self.encoders) - - def upgrade_state_dict(self, state_dict): - for key in self.encoders: - self.encoders[key].upgrade_state_dict(state_dict) - return state_dict diff --git a/spaces/OFA-Sys/OFA-Image_Caption/utils/cider/pyciderevalcap/cider/cider_scorer.py b/spaces/OFA-Sys/OFA-Image_Caption/utils/cider/pyciderevalcap/cider/cider_scorer.py deleted file mode 100644 index d7f9505916f2210617cc529bf3c05acfa06d5a62..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/utils/cider/pyciderevalcap/cider/cider_scorer.py +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/env python -# Tsung-Yi Lin -# Ramakrishna Vedantam -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import copy -import six -from six.moves import cPickle -from collections import defaultdict -import numpy as np -import math -import os - -def precook(s, n=4, out=False): - """ - Takes a string as input and returns an object that can be given to - either cook_refs or cook_test. This is optional: cook_refs and cook_test - can take string arguments as well. - :param s: string : sentence to be converted into ngrams - :param n: int : number of ngrams for which representation is calculated - :return: term frequency vector for occuring ngrams - """ - words = s.split() - counts = defaultdict(int) - for k in range(1,n+1): - for i in range(len(words)-k+1): - ngram = tuple(words[i:i+k]) - counts[ngram] += 1 - return counts - -def cook_refs(refs, n=4): ## lhuang: oracle will call with "average" - '''Takes a list of reference sentences for a single segment - and returns an object that encapsulates everything that BLEU - needs to know about them. - :param refs: list of string : reference sentences for some image - :param n: int : number of ngrams for which (ngram) representation is calculated - :return: result (list of dict) - ''' - return [precook(ref, n) for ref in refs] - -def cook_test(test, n=4): - '''Takes a test sentence and returns an object that - encapsulates everything that BLEU needs to know about it. - :param test: list of string : hypothesis sentence for some image - :param n: int : number of ngrams for which (ngram) representation is calculated - :return: result (dict) - ''' - return precook(test, n, True) - -class CiderScorer(object): - """CIDEr scorer. - """ - - def copy(self): - ''' copy the refs.''' - new = CiderScorer(n=self.n) - new.ctest = copy.copy(self.ctest) - new.crefs = copy.copy(self.crefs) - return new - - def __init__(self, df_mode="corpus", test=None, refs=None, n=4, sigma=6.0): - ''' singular instance ''' - self.n = n - self.sigma = sigma - self.crefs = [] - self.ctest = [] - self.df_mode = df_mode - self.ref_len = None - if self.df_mode != "corpus": - pkl_file = cPickle.load(open(os.path.join('data', df_mode + '.p'),'rb'), **(dict(encoding='latin1') if six.PY3 else {})) - self.ref_len = np.log(float(pkl_file['ref_len'])) - self.document_frequency = pkl_file['document_frequency'] - self.cook_append(test, refs) - - def clear(self): - self.crefs = [] - self.ctest = [] - - def cook_append(self, test, refs): - '''called by constructor and __iadd__ to avoid creating new instances.''' - - if refs is not None: - self.crefs.append(cook_refs(refs)) - if test is not None: - self.ctest.append(cook_test(test)) ## N.B.: -1 - else: - self.ctest.append(None) # lens of crefs and ctest have to match - - def size(self): - assert len(self.crefs) == len(self.ctest), "refs/test mismatch! %d<>%d" % (len(self.crefs), len(self.ctest)) - return len(self.crefs) - - def __iadd__(self, other): - '''add an instance (e.g., from another sentence).''' - - if type(other) is tuple: - ## avoid creating new CiderScorer instances - self.cook_append(other[0], other[1]) - else: - self.ctest.extend(other.ctest) - self.crefs.extend(other.crefs) - - return self - def compute_doc_freq(self): - ''' - Compute term frequency for reference data. - This will be used to compute idf (inverse document frequency later) - The term frequency is stored in the object - :return: None - ''' - for refs in self.crefs: - # refs, k ref captions of one image - for ngram in set([ngram for ref in refs for (ngram,count) in ref.items()]): - self.document_frequency[ngram] += 1 - # maxcounts[ngram] = max(maxcounts.get(ngram,0), count) - - def compute_cider(self): - def counts2vec(cnts): - """ - Function maps counts of ngram to vector of tfidf weights. - The function returns vec, an array of dictionary that store mapping of n-gram and tf-idf weights. - The n-th entry of array denotes length of n-grams. - :param cnts: - :return: vec (array of dict), norm (array of float), length (int) - """ - vec = [defaultdict(float) for _ in range(self.n)] - length = 0 - norm = [0.0 for _ in range(self.n)] - for (ngram,term_freq) in cnts.items(): - # give word count 1 if it doesn't appear in reference corpus - df = np.log(max(1.0, self.document_frequency[ngram])) - # ngram index - n = len(ngram)-1 - # tf (term_freq) * idf (precomputed idf) for n-grams - vec[n][ngram] = float(term_freq)*(self.ref_len - df) - # compute norm for the vector. the norm will be used for - # computing similarity - norm[n] += pow(vec[n][ngram], 2) - - if n == 1: - length += term_freq - norm = [np.sqrt(n) for n in norm] - return vec, norm, length - - def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref): - ''' - Compute the cosine similarity of two vectors. - :param vec_hyp: array of dictionary for vector corresponding to hypothesis - :param vec_ref: array of dictionary for vector corresponding to reference - :param norm_hyp: array of float for vector corresponding to hypothesis - :param norm_ref: array of float for vector corresponding to reference - :param length_hyp: int containing length of hypothesis - :param length_ref: int containing length of reference - :return: array of score for each n-grams cosine similarity - ''' - delta = float(length_hyp - length_ref) - # measure consine similarity - val = np.array([0.0 for _ in range(self.n)]) - for n in range(self.n): - # ngram - for (ngram,count) in vec_hyp[n].items(): - val[n] += vec_hyp[n][ngram] * vec_ref[n][ngram] - - if (norm_hyp[n] != 0) and (norm_ref[n] != 0): - val[n] /= (norm_hyp[n]*norm_ref[n]) - - assert(not math.isnan(val[n])) - return val - - # compute log reference length - if self.df_mode == "corpus": - self.ref_len = np.log(float(len(self.crefs))) - - scores = [] - for test, refs in zip(self.ctest, self.crefs): - # compute vector for test captions - vec, norm, length = counts2vec(test) - # compute vector for ref captions - score = np.array([0.0 for _ in range(self.n)]) - for ref in refs: - vec_ref, norm_ref, length_ref = counts2vec(ref) - score += sim(vec, vec_ref, norm, norm_ref, length, length_ref) - # change by vrama91 - mean of ngram scores, instead of sum - score_avg = np.mean(score) - # divide by number of references - score_avg /= len(refs) - # multiply score by 10 - score_avg *= 10.0 - # append score of an image to the score list - scores.append(score_avg) - return scores - - def compute_score(self, option=None, verbose=0): - # compute idf - if self.df_mode == "corpus": - self.document_frequency = defaultdict(float) - self.compute_doc_freq() - # assert to check document frequency - assert(len(self.ctest) >= max(self.document_frequency.values())) - # import json for now and write the corresponding files - # compute cider score - score = self.compute_cider() - # debug - # print score - return np.mean(np.array(score)), np.array(score) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/discriminative_reranking_nmt/README.md b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/discriminative_reranking_nmt/README.md deleted file mode 100644 index b155e855f2f94e30ad22262f260008fda8ac1804..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/discriminative_reranking_nmt/README.md +++ /dev/null @@ -1,202 +0,0 @@ -# Discriminative Reranking for Neural Machine Translation -https://aclanthology.org/2021.acl-long.563/ - -This folder contains source code for training DrNMT, a discriminatively trained reranker for neural machine translation. - -## Data preparation -1. Follow the instructions under `examples/translation` to build a base MT model. Prepare three files, one with source sentences, one with ground truth target sentences, and one with hypotheses generated from the base MT model. Each line in the file contains one sentence in raw text (i.e. no sentencepiece, etc.). Below is an example of the files with _N_ hypotheses for each source sentence. - -``` -# Example of the source sentence file: (The file should contain L lines.) - -source_sentence_1 -source_sentence_2 -source_sentence_3 -... -source_sentence_L - -# Example of the target sentence file: (The file should contain L lines.) - -target_sentence_1 -target_sentence_2 -target_sentence_3 -... -target_sentence_L - -# Example of the hypotheses file: (The file should contain L*N lines.) - -source_sentence_1_hypo_1 -source_sentence_1_hypo_2 -... -source_sentence_1_hypo_N -source_sentence_2_hypo_1 -... -source_sentence_2_hypo_N -... -source_sentence_L_hypo_1 -... -source_sentence_L_hypo_N -``` - -2. Download the [XLMR model](https://github.com/fairinternal/fairseq-py/tree/main/examples/xlmr#pre-trained-models). -``` -wget https://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz -tar zxvf xlmr.base.tar.gz - -# The folder should contain dict.txt, model.pt and sentencepiece.bpe.model. -``` - -3. Prepare scores and BPE data. -* `N`: Number of hypotheses per each source sentence. We use 50 in the paper. -* `SPLIT`: Name of the data split, i.e. train, valid, test. Use split_name, split_name1, split_name2, ..., if there are multiple datasets for a split, e.g. train, train1, valid, valid1. -* `NUM_SHARDS`: Number of shards. Set this to 1 for non-train splits. -* `METRIC`: The metric for DrNMT to optimize for. We support either `bleu` or `ter`. -``` -# For each data split, e.g. train, valid, test, etc., run the following: - -SOURCE_FILE=/path/to/source_sentence_file -TARGET_FILE=/path/to/target_sentence_file -HYPO_FILE=/path/to/hypo_file -XLMR_DIR=/path/to/xlmr -OUTPUT_DIR=/path/to/output - -python scripts/prep_data.py \ - --input-source ${SOURCE_FILE} \ - --input-target ${TARGET_FILE} \ - --input-hypo ${HYPO_FILE} \ - --output-dir ${OUTPUT_DIR} \ - --split $SPLIT - --beam $N \ - --sentencepiece-model ${XLMR_DIR}/sentencepiece.bpe.model \ - --metric $METRIC \ - --num-shards ${NUM_SHARDS} - -# The script will create ${OUTPUT_DIR}/$METRIC with ${NUM_SHARDS} splits. -# Under split*/input_src, split*/input_tgt and split*/$METRIC, there will be $SPLIT.bpe and $SPLIT.$METRIC files, respectively. - -``` - -4. Pre-process the data into fairseq format. -``` -# use comma to separate if there are more than one train or valid set -for suffix in src tgt ; do - fairseq-preprocess --only-source \ - --trainpref ${OUTPUT_DIR}/$METRIC/split1/input_${suffix}/train.bpe \ - --validpref ${OUTPUT_DIR}/$METRIC/split1/input_${suffix}/valid.bpe \ - --destdir ${OUTPUT_DIR}/$METRIC/split1/input_${suffix} \ - --workers 60 \ - --srcdict ${XLMR_DIR}/dict.txt -done - -for i in `seq 2 ${NUM_SHARDS}`; do - for suffix in src tgt ; do - fairseq-preprocess --only-source \ - --trainpref ${OUTPUT_DIR}/$METRIC/split${i}/input_${suffix}/train.bpe \ - --destdir ${OUTPUT_DIR}/$METRIC/split${i}/input_${suffix} \ - --workers 60 \ - --srcdict ${XLMR_DIR}/dict.txt - - ln -s ${OUTPUT_DIR}/$METRIC/split1/input_${suffix}/valid* ${OUTPUT_DIR}/$METRIC/split${i}/input_${suffix}/. - done - - ln -s ${OUTPUT_DIR}/$METRIC/split1/$METRIC/valid* ${OUTPUT_DIR}/$METRIC/split${i}/$METRIC/. -done -``` - -## Training - -``` -EXP_DIR=/path/to/exp - -# An example of training the model with the config for De-En experiment in the paper. -# The config uses 16 GPUs and 50 hypotheses. -# For training with fewer number of GPUs, set -# distributed_training.distributed_world_size=k +optimization.update_freq='[x]' where x = 16/k -# For training with fewer number of hypotheses, set -# task.mt_beam=N dataset.batch_size=N dataset.required_batch_size_multiple=N - -fairseq-hydra-train -m \ - --config-dir config/ --config-name deen \ - task.data=${OUTPUT_DIR}/$METRIC/split1/ \ - task.num_data_splits=${NUM_SHARDS} \ - model.pretrained_model=${XLMR_DIR}/model.pt \ - common.user_dir=${FAIRSEQ_ROOT}/examples/discriminative_reranking_nmt \ - checkpoint.save_dir=${EXP_DIR} - -``` - -## Inference & scoring -Perform DrNMT reranking (fw + reranker score) -1. Tune weights on valid sets. -``` -# genrate N hypotheses with the base MT model (fw score) -VALID_SOURCE_FILE=/path/to/source_sentences # one sentence per line, converted to the sentencepiece used by the base MT model -VALID_TARGET_FILE=/path/to/target_sentences # one sentence per line in raw text, i.e. no sentencepiece and tokenization -MT_MODEL=/path/to/mt_model -MT_DATA_PATH=/path/to/mt_data - -cat ${VALID_SOURCE_FILE} | \ - fairseq-interactive ${MT_DATA_PATH} \ - --max-tokens 4000 --buffer-size 16 \ - --num-workers 32 --path ${MT_MODEL} \ - --beam $N --nbest $N \ - --post-process sentencepiece &> valid-hypo.out - -# replace "bleu" with "ter" to optimize for TER -python drnmt_rerank.py \ - ${OUTPUT_DIR}/$METRIC/split1/ \ - --path ${EXP_DIR}/checkpoint_best.pt \ - --in-text valid-hypo.out \ - --results-path ${EXP_DIR} \ - --gen-subset valid \ - --target-text ${VALID_TARGET_FILE} \ - --user-dir ${FAIRSEQ_ROOT}/examples/discriminative_reranking_nmt \ - --bpe sentencepiece \ - --sentencepiece-model ${XLMR_DIR}/sentencepiece.bpe.model \ - --beam $N \ - --batch-size $N \ - --metric bleu \ - --tune - -``` - -2. Apply best weights on test sets -``` -# genrate N hypotheses with the base MT model (fw score) -TEST_SOURCE_FILE=/path/to/source_sentences # one sentence per line, converted to the sentencepiece used by the base MT model - -cat ${TEST_SOURCE_FILE} | \ - fairseq-interactive ${MT_DATA_PATH} \ - --max-tokens 4000 --buffer-size 16 \ - --num-workers 32 --path ${MT_MODEL} \ - --beam $N --nbest $N \ - --post-process sentencepiece &> test-hypo.out - -# replace "bleu" with "ter" to evaluate TER -# Add --target-text for evaluating BLEU/TER, -# otherwise the script will only generate the hypotheses with the highest scores only. -python drnmt_rerank.py \ - ${OUTPUT_DIR}/$METRIC/split1/ \ - --path ${EXP_DIR}/checkpoint_best.pt \ - --in-text test-hypo.out \ - --results-path ${EXP_DIR} \ - --gen-subset test \ - --user-dir ${FAIRSEQ_ROOT}/examples/discriminative_reranking_nmt \ - --bpe sentencepiece \ - --sentencepiece-model ${XLMR_DIR}/sentencepiece.bpe.model \ - --beam $N \ - --batch-size $N \ - --metric bleu \ - --fw-weight ${BEST_FW_WEIGHT} \ - --lenpen ${BEST_LENPEN} -``` - -## Citation -```bibtex -@inproceedings{lee2021discriminative, - title={Discriminative Reranking for Neural Machine Translation}, - author={Lee, Ann and Auli, Michael and Ranzato, Marc'Aurelio}, - booktitle={ACL}, - year={2021} -} -``` diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/noisychannel/README.md b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/noisychannel/README.md deleted file mode 100644 index 9d101aa874ec36ff3bb5c1166169a4c4f38ffe2b..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/noisychannel/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# Simple and Effective Noisy Channel Modeling for Neural Machine Translation (Yee et al., 2019) -This page contains pointers to pre-trained models as well as instructions on how to run the reranking scripts. - -## Citation: -```bibtex -@inproceedings{yee2019simple, - title = {Simple and Effective Noisy Channel Modeling for Neural Machine Translation}, - author = {Kyra Yee and Yann Dauphin and Michael Auli}, - booktitle = {Conference on Empirical Methods in Natural Language Processing}, - year = {2019}, -} -``` - -## Pre-trained Models: - -Model | Description | Download ----|---|--- -`transformer.noisychannel.de-en` | De->En Forward Model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/noisychannel/forward_de2en.tar.bz2) -`transformer.noisychannel.en-de` | En->De Channel Model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/noisychannel/backward_en2de.tar.bz2) -`transformer_lm.noisychannel.en` | En Language model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/noisychannel/reranking_en_lm.tar.bz2) - -Test Data: [newstest_wmt17](https://dl.fbaipublicfiles.com/fairseq/models/noisychannel/wmt17test.tar.bz2) - -## Example usage - -``` -mkdir rerank_example -curl https://dl.fbaipublicfiles.com/fairseq/models/noisychannel/forward_de2en.tar.bz2 | tar xvjf - -C rerank_example -curl https://dl.fbaipublicfiles.com/fairseq/models/noisychannel/backward_en2de.tar.bz2 | tar xvjf - -C rerank_example -curl https://dl.fbaipublicfiles.com/fairseq/models/noisychannel/reranking_en_lm.tar.bz2 | tar xvjf - -C rerank_example -curl https://dl.fbaipublicfiles.com/fairseq/models/noisychannel/wmt17test.tar.bz2 | tar xvjf - -C rerank_example - -beam=50 -num_trials=1000 -fw_name=fw_model_ex -bw_name=bw_model_ex -lm_name=lm_ex -data_dir=rerank_example/hyphen-splitting-mixed-case-wmt17test-wmt14bpe -data_dir_name=wmt17 -lm=rerank_example/lm/checkpoint_best.pt -lm_bpe_code=rerank_example/lm/bpe32k.code -lm_dict=rerank_example/lm/dict.txt -batch_size=32 -bw=rerank_example/backward_en2de.pt -fw=rerank_example/forward_de2en.pt - -# reranking with P(T|S) P(S|T) and P(T) -python examples/noisychannel/rerank_tune.py $data_dir --tune-param lenpen weight1 weight3 \ - --lower-bound 0 0 0 --upper-bound 3 3 3 --data-dir-name $data_dir_name \ - --num-trials $num_trials --source-lang de --target-lang en --gen-model $fw \ - -n $beam --batch-size $batch_size --score-model2 $fw --score-model1 $bw \ - --backwards1 --weight2 1 \ - -lm $lm --lm-dict $lm_dict --lm-name en_newscrawl --lm-bpe-code $lm_bpe_code \ - --model2-name $fw_name --model1-name $bw_name --gen-model-name $fw_name - -# reranking with P(T|S) and P(T) -python examples/noisychannel/rerank_tune.py $data_dir --tune-param lenpen weight3 \ - --lower-bound 0 0 --upper-bound 3 3 --data-dir-name $data_dir_name \ - --num-trials $num_trials --source-lang de --target-lang en --gen-model $fw \ - -n $beam --batch-size $batch_size --score-model1 $fw \ - -lm $lm --lm-dict $lm_dict --lm-name en_newscrawl --lm-bpe-code $lm_bpe_code \ - --model1-name $fw_name --gen-model-name $fw_name - -# to run with a preconfigured set of hyperparameters for the lenpen and model weights, using rerank.py instead. -python examples/noisychannel/rerank.py $data_dir \ - --lenpen 0.269 --weight1 1 --weight2 0.929 --weight3 0.831 \ - --data-dir-name $data_dir_name --source-lang de --target-lang en --gen-model $fw \ - -n $beam --batch-size $batch_size --score-model2 $fw --score-model1 $bw --backwards1 \ - -lm $lm --lm-dict $lm_dict --lm-name en_newscrawl --lm-bpe-code $lm_bpe_code \ - --model2-name $fw_name --model1-name $bw_name --gen-model-name $fw_name -``` - diff --git a/spaces/OpenMotionLab/MotionGPT/mGPT/render/blender/camera.py b/spaces/OpenMotionLab/MotionGPT/mGPT/render/blender/camera.py deleted file mode 100644 index ee037c22ba76fb5cd501f7656ea55e4ed46d3edd..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/mGPT/render/blender/camera.py +++ /dev/null @@ -1,52 +0,0 @@ -import bpy - - -class Camera: - def __init__(self, *, first_root, mode, is_mesh): - camera = bpy.data.objects['Camera'] - - ## initial position - camera.location.x = 7.36 - camera.location.y = -6.93 - if is_mesh: - # camera.location.z = 5.45 - camera.location.z = 5.6 - else: - camera.location.z = 5.2 - - # wider point of view - if mode == "sequence": - if is_mesh: - camera.data.lens = 65 - else: - camera.data.lens = 85 - elif mode == "frame": - if is_mesh: - camera.data.lens = 130 - else: - camera.data.lens = 85 - elif mode == "video": - if is_mesh: - camera.data.lens = 110 - else: - # avoid cutting person - camera.data.lens = 85 - # camera.data.lens = 140 - - # camera.location.x += 0.75 - - self.mode = mode - self.camera = camera - - self.camera.location.x += first_root[0] - self.camera.location.y += first_root[1] - - self._root = first_root - - def update(self, newroot): - delta_root = newroot - self._root - - self.camera.location.x += delta_root[0] - self.camera.location.y += delta_root[1] - - self._root = newroot diff --git a/spaces/PAIR/PAIR-Diffusion/cldm/cldm.py b/spaces/PAIR/PAIR-Diffusion/cldm/cldm.py deleted file mode 100644 index 0a9e6ab954cdec7994e9bd8adffce15d99eb34e8..0000000000000000000000000000000000000000 --- a/spaces/PAIR/PAIR-Diffusion/cldm/cldm.py +++ /dev/null @@ -1,648 +0,0 @@ -import einops -import torch -import torch as th -import torch.nn as nn -import math - -from ldm.modules.diffusionmodules.util import ( - conv_nd, - linear, - zero_module, - timestep_embedding, -) -import torchvision -from einops import rearrange, repeat -from torchvision.utils import make_grid -from ldm.modules.attention import SpatialTransformer -from ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock -from ldm.models.diffusion.ddpm import LatentDiffusion -from ldm.util import log_txt_as_img, exists, instantiate_from_config -from ldm.models.diffusion.ddim import DDIMSampler - - -class VGGPerceptualLoss(torch.nn.Module): - def __init__(self, resize=True): - super(VGGPerceptualLoss, self).__init__() - blocks = [] - vgg_model = torchvision.models.vgg16(pretrained=True) - print('Loaded VGG weights') - blocks.append(vgg_model.features[:4].eval()) - blocks.append(vgg_model.features[4:9].eval()) - blocks.append(vgg_model.features[9:16].eval()) - blocks.append(vgg_model.features[16:23].eval()) - - for bl in blocks: - for p in bl.parameters(): - p.requires_grad = False - self.blocks = torch.nn.ModuleList(blocks) - self.transform = torch.nn.functional.interpolate - self.resize = resize - self.register_buffer("mean", torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)) - self.register_buffer("std", torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)) - print('Initialized VGG model') - - def forward(self, input, feature_layers=[0, 1, 2, 3], style_layers=[1,]): - if input.shape[1] != 3: - input = input.repeat(1, 3, 1, 1) - target = target.repeat(1, 3, 1, 1) - input = (input-self.mean) / self.std - if self.resize: - input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False) - x = input - gram_matrices_all = [] - feats = [] - for i, block in enumerate(self.blocks): - x = block(x) - if i in style_layers: - feats.append(x) - - return feats - - - -class ControlledUnetModel(UNetModel): - def forward(self, x, timesteps=None, context=None, control=None, only_mid_control=False, **kwargs): - hs = [] - with torch.no_grad(): - t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) - emb = self.time_embed(t_emb) - h = x.type(self.dtype) - for module in self.input_blocks: - h = module(h, emb, context) - hs.append(h) - h = self.middle_block(h, emb, context) - - if control is not None: - h += control.pop() - - for i, module in enumerate(self.output_blocks): - if only_mid_control or control is None: - h = torch.cat([h, hs.pop()], dim=1) - else: - h = torch.cat([h, hs.pop() + control.pop()], dim=1) - h = module(h, emb, context) - - h = h.type(x.dtype) - return self.out(h) - - -class ControlNet(nn.Module): - def __init__( - self, - image_size, - in_channels, - model_channels, - hint_channels, - num_res_blocks, - attention_resolutions, - dropout=0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - use_checkpoint=False, - use_fp16=False, - num_heads=-1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - resblock_updown=False, - use_new_attention_order=False, - use_spatial_transformer=False, # custom transformer support - transformer_depth=1, # custom transformer support - context_dim=None, # custom transformer support - n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model - legacy=True, - disable_self_attentions=None, - num_attention_blocks=None, - disable_middle_self_attn=False, - use_linear_in_transformer=False, - ): - super().__init__() - if use_spatial_transformer: - assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' - - if context_dim is not None: - assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' - from omegaconf.listconfig import ListConfig - if type(context_dim) == ListConfig: - context_dim = list(context_dim) - - if num_heads_upsample == -1: - num_heads_upsample = num_heads - - if num_heads == -1: - assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' - - if num_head_channels == -1: - assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' - - self.dims = dims - self.image_size = image_size - self.in_channels = in_channels - self.model_channels = model_channels - if isinstance(num_res_blocks, int): - self.num_res_blocks = len(channel_mult) * [num_res_blocks] - else: - if len(num_res_blocks) != len(channel_mult): - raise ValueError("provide num_res_blocks either as an int (globally constant) or " - "as a list/tuple (per-level) with the same length as channel_mult") - self.num_res_blocks = num_res_blocks - if disable_self_attentions is not None: - # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not - assert len(disable_self_attentions) == len(channel_mult) - if num_attention_blocks is not None: - assert len(num_attention_blocks) == len(self.num_res_blocks) - assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) - print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " - f"This option has LESS priority than attention_resolutions {attention_resolutions}, " - f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " - f"attention will still not be set.") - - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.num_heads = num_heads - self.num_head_channels = num_head_channels - self.num_heads_upsample = num_heads_upsample - self.predict_codebook_ids = n_embed is not None - - time_embed_dim = model_channels * 4 - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - - self.input_blocks = nn.ModuleList( - [ - TimestepEmbedSequential( - conv_nd(dims, in_channels, model_channels, 3, padding=1) - ) - ] - ) - self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels)]) - - self.input_hint_block = TimestepEmbedSequential( - conv_nd(dims, hint_channels, 16, 3, padding=1), - nn.SiLU(), - conv_nd(dims, 16, 16, 3, padding=1), - nn.SiLU(), - conv_nd(dims, 16, 32, 3, padding=1, stride=2), - nn.SiLU(), - conv_nd(dims, 32, 32, 3, padding=1), - nn.SiLU(), - conv_nd(dims, 32, 96, 3, padding=1, stride=2), - nn.SiLU(), - conv_nd(dims, 96, 96, 3, padding=1), - nn.SiLU(), - conv_nd(dims, 96, 256, 3, padding=1, stride=2), - nn.SiLU(), - zero_module(conv_nd(dims, 256, model_channels, 3, padding=1)) - ) - - self._feature_size = model_channels - input_block_chans = [model_channels] - ch = model_channels - ds = 1 - for level, mult in enumerate(channel_mult): - for nr in range(self.num_res_blocks[level]): - layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=mult * model_channels, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = mult * model_channels - if ds in attention_resolutions: - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - # num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - if exists(disable_self_attentions): - disabled_sa = disable_self_attentions[level] - else: - disabled_sa = False - - if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, - disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - self.zero_convs.append(self.make_zero_conv(ch)) - self._feature_size += ch - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) - if resblock_updown - else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch - ) - ) - ) - ch = out_ch - input_block_chans.append(ch) - self.zero_convs.append(self.make_zero_conv(ch)) - ds *= 2 - self._feature_size += ch - - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - # num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn - ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, - disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint - ), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - ) - self.middle_block_out = self.make_zero_conv(ch) - self._feature_size += ch - - def make_zero_conv(self, channels): - return TimestepEmbedSequential(zero_module(conv_nd(self.dims, channels, channels, 1, padding=0))) - - def forward(self, x, hint, timesteps, context, **kwargs): - t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) - emb = self.time_embed(t_emb) - guided_hint = self.input_hint_block(hint, emb, context, x.shape) - - outs = [] - - h = x.type(self.dtype) - for module, zero_conv in zip(self.input_blocks, self.zero_convs): - if guided_hint is not None: - h = module(h, emb, context) - h += guided_hint - guided_hint = None - else: - h = module(h, emb, context) - outs.append(zero_conv(h, emb, context)) - - h = self.middle_block(h, emb, context) - outs.append(self.middle_block_out(h, emb, context)) - - return outs - -class Interpolate(nn.Module): - def __init__(self, size, mode): - super(Interpolate, self).__init__() - self.interp = torch.nn.functional.interpolate - self.size = size - self.mode = mode - self.factor = 8 - - def forward(self, x): - h,w = x.shape[2]//self.factor, x.shape[3]//self.factor - x = self.interp(x, size=(h,w), mode=self.mode) - return x - -class ControlNetSAP(ControlNet): - def __init__( - self, - hint_channels, - model_channels, - input_hint_block='fixed', - size = 64, - mode='nearest', - *args, - **kwargs - ): - super().__init__( hint_channels=hint_channels, model_channels=model_channels, *args, **kwargs) - #hint channels are atleast 128 dims - - if input_hint_block == 'learnable': - ch = 2 ** (int(math.log2(hint_channels))) - self.input_hint_block = TimestepEmbedSequential( - conv_nd(self.dims, hint_channels, hint_channels, 3, padding=1), - nn.SiLU(), - conv_nd(self.dims, hint_channels, 2*ch, 3, padding=1, stride=2), - nn.SiLU(), - conv_nd(self.dims, 2*ch, 2*ch, 3, padding=1), - nn.SiLU(), - conv_nd(self.dims, 2*ch, 2*ch, 3, padding=1, stride=2), - nn.SiLU(), - conv_nd(self.dims, 2*ch, 2*ch, 3, padding=1), - nn.SiLU(), - conv_nd(self.dims, 2*ch, model_channels, 3, padding=1, stride=2), - nn.SiLU(), - zero_module(conv_nd(self.dims, model_channels, model_channels, 3, padding=1)) - ) - else: - print("Only interpolation") - self.input_hint_block = TimestepEmbedSequential( - Interpolate(size, mode), - zero_module(conv_nd(self.dims, hint_channels, model_channels, 3, padding=1))) - - -class ControlLDM(LatentDiffusion): - - def __init__(self, control_stage_config, control_key, only_mid_control, *args, **kwargs): - super().__init__(*args, **kwargs) - self.control_model = instantiate_from_config(control_stage_config) - self.control_key = control_key - self.only_mid_control = only_mid_control - self.control_scales = [1.0] * 13 - - @torch.no_grad() - def get_input(self, batch, k, bs=None, *args, **kwargs): - x, c = super().get_input(batch, self.first_stage_key, *args, **kwargs) - control = batch[self.control_key] - if bs is not None: - control = control[:bs] - control = control.to(self.device) - control = einops.rearrange(control, 'b h w c -> b c h w') - control = control.to(memory_format=torch.contiguous_format).float() - return x, dict(c_crossattn=[c], c_concat=[control]) - - def apply_model(self, x_noisy, t, cond, *args, **kwargs): - assert isinstance(cond, dict) - diffusion_model = self.model.diffusion_model - - cond_txt = torch.cat(cond['c_crossattn'], 1) - - if cond['c_concat'] is None: - eps = diffusion_model(x=x_noisy, timesteps=t, context=cond_txt, control=None, only_mid_control=self.only_mid_control) - else: - control = self.control_model(x=x_noisy, hint=torch.cat(cond['c_concat'], 1), timesteps=t, context=cond_txt) - control = [c * scale for c, scale in zip(control, self.control_scales)] - eps = diffusion_model(x=x_noisy, timesteps=t, context=cond_txt, control=control, only_mid_control=self.only_mid_control) - - return eps - - @torch.no_grad() - def get_unconditional_conditioning(self, N): - return self.get_learned_conditioning([""] * N) - - @torch.no_grad() - def log_images(self, batch, N=4, n_row=2, sample=False, ddim_steps=50, ddim_eta=0.0, return_keys=None, - quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, - plot_diffusion_rows=False, unconditional_guidance_scale=9.0, unconditional_guidance_label=None, - use_ema_scope=True, - **kwargs): - use_ddim = ddim_steps is not None - - log = dict() - z, c = self.get_input(batch, self.first_stage_key, bs=N) - c_cat, c = c["c_concat"][0][:N], c["c_crossattn"][0][:N] - N = min(z.shape[0], N) - n_row = min(z.shape[0], n_row) - log["reconstruction"] = self.decode_first_stage(z) - log["control"] = c_cat * 2.0 - 1.0 - log["conditioning"] = log_txt_as_img((512, 512), batch[self.cond_stage_key], size=16) - - if plot_diffusion_rows: - # get diffusion row - diffusion_row = list() - z_start = z[:n_row] - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(z_start) - z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) - diffusion_row.append(self.decode_first_stage(z_noisy)) - - diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') - diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) - log["diffusion_row"] = diffusion_grid - - if sample: - # get denoise row - samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]}, - batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - if plot_denoise_rows: - denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log["denoise_row"] = denoise_grid - - if unconditional_guidance_scale > 1.0: - uc_cross = self.get_unconditional_conditioning(N) - uc_cat = c_cat # torch.zeros_like(c_cat) - uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]} - samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]}, - batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=uc_full, - ) - x_samples_cfg = self.decode_first_stage(samples_cfg) - log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg - - return log - - @torch.no_grad() - def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): - ddim_sampler = DDIMSampler(self) - b, c, h, w = cond["c_concat"][0].shape - shape = (self.channels, h // 8, w // 8) - samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) - return samples, intermediates - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.control_model.parameters()) - if not self.sd_locked: - params += list(self.model.diffusion_model.output_blocks.parameters()) - params += list(self.model.diffusion_model.out.parameters()) - opt = torch.optim.AdamW(params, lr=lr) - return opt - - def low_vram_shift(self, is_diffusing): - if is_diffusing: - self.model = self.model.cuda() - self.control_model = self.control_model.cuda() - self.first_stage_model = self.first_stage_model.cpu() - self.cond_stage_model = self.cond_stage_model.cpu() - else: - self.model = self.model.cpu() - self.control_model = self.control_model.cpu() - self.first_stage_model = self.first_stage_model.cuda() - self.cond_stage_model = self.cond_stage_model.cuda() - - -class SAP(ControlLDM): - @torch.no_grad() - def __init__(self,control_stage_config, control_key, only_mid_control, *args, **kwargs): - super().__init__(control_stage_config=control_stage_config, - control_key=control_key, - only_mid_control=only_mid_control, - *args, **kwargs) - self.appearance_net = VGGPerceptualLoss().to(self.device) - print("Loaded VGG model") - - def get_appearance(self, img, mask, return_all=False): - img = (img + 1) * 0.5 - feat = self.appearance_net(img)[0] - empty_mask_flag = torch.sum(mask, dim=(1,2,3)) == 0 - - - empty_appearance = torch.zeros(feat.shape).to(self.device) - mask = torch.nn.functional.interpolate(mask.float(), (feat.shape[2:])).long() - one_hot = torch.nn.functional.one_hot(mask[:,0]).permute(0,3,1,2).float() - - feat = torch.einsum('nchw, nmhw->nmchw', feat, one_hot) - feat = torch.sum(feat, dim=(3,4)) - norm = torch.sum(one_hot, dim=(2,3)) + 1e-6 #nm - mean_feat = feat/norm[:,:,None] #nmc - mean_feat[:, 0] = torch.zeros(mean_feat[:,0].shape).to(self.device) #set edges in panopitc mask to empty appearance feature - - splatted_feat = torch.einsum('nmc, nmhw->nchw', mean_feat, one_hot) - splatted_feat[empty_mask_flag] = empty_appearance[empty_mask_flag] - splatted_feat = torch.nn.functional.normalize(splatted_feat) #l2 normalize on c dim - - if return_all: - return splatted_feat, mean_feat, one_hot, empty_mask_flag - - return splatted_feat - - def get_input(self, batch, k, bs=None, *args, **kwargs): - z, c, x_orig, x_recon = super(ControlLDM, self).get_input(batch, self.first_stage_key, return_first_stage_outputs=True , *args, **kwargs) - structure = batch['seg'].unsqueeze(1) - mask = batch['mask'].unsqueeze(1).to(self.device) - appearance = self.get_appearance(x_orig, mask) - if bs is not None: - structure = structure[:bs] - appearance = appearance[:bs] - - structure = structure.to(self.device) - appearance = appearance.to(self.device) - structure = structure.to(memory_format=torch.contiguous_format).float() - appearance = appearance.to(memory_format=torch.contiguous_format).float() - structure = torch.nn.functional.interpolate(structure, x_orig.shape[2:]) - appearance = torch.nn.functional.interpolate(appearance, x_orig.shape[2:]) - control = torch.cat([structure, appearance], dim=1) - return z, dict(c_crossattn=[c], c_concat=[control]) - - @torch.no_grad() - def log_images(self, batch, N=4, n_row=2, sample=False, ddim_steps=50, ddim_eta=0.0, return_keys=None, - quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=False, - plot_diffusion_rows=False, unconditional_guidance_scale=9.0, unconditional_guidance_label=None, - use_ema_scope=True, - **kwargs): - use_ddim = ddim_steps is not None - - log = dict() - z, c = self.get_input(batch, self.first_stage_key, bs=N) - c_cat, c = c["c_concat"][0][:N,], c["c_crossattn"][0][:N] - N = min(z.shape[0], N) - n_row = min(z.shape[0], n_row) - log["reconstruction"] = self.decode_first_stage(z) - log["control"] = c_cat[:, :1] - log["conditioning"] = log_txt_as_img((512, 512), batch[self.cond_stage_key], size=16) - - if plot_diffusion_rows: - # get diffusion row - diffusion_row = list() - z_start = z[:n_row] - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(z_start) - z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) - diffusion_row.append(self.decode_first_stage(z_noisy)) - - diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') - diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) - log["diffusion_row"] = diffusion_grid - - if plot_progressive_rows: - with self.ema_scope("Plotting Progressives"): - img, progressives = self.progressive_denoising({"c_concat": [c_cat], "c_crossattn": [c]}, - shape=(self.channels, self.image_size, self.image_size), - batch_size=N) - prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") - log["progressive_row"] = prog_row - - if sample: - # get denoise row - samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]}, - batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - if plot_denoise_rows: - denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log["denoise_row"] = denoise_grid - - if unconditional_guidance_scale > 1.0: - uc_cross = self.get_unconditional_conditioning(N) - uc_cat = c_cat # torch.zeros_like(c_cat) - uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]} - samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]}, - batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=uc_full, - ) - x_samples_cfg = self.decode_first_stage(samples_cfg) - log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg - - return log diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/display-woodwind-diagrams.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/display-woodwind-diagrams.go deleted file mode 100644 index 460784ccfa04576d4d703eb6d0eba03eabafe661..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/display-woodwind-diagrams.go and /dev/null differ diff --git a/spaces/PeepDaSlan9/candle-llama2/app-fc3fb0e73c846240.js b/spaces/PeepDaSlan9/candle-llama2/app-fc3fb0e73c846240.js deleted file mode 100644 index 3277564b3b05392d29c2926dea3eeba1e1cc5fe9..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/candle-llama2/app-fc3fb0e73c846240.js +++ /dev/null @@ -1,829 +0,0 @@ -let wasm; - -const heap = new Array(128).fill(undefined); - -heap.push(undefined, null, true, false); - -function getObject(idx) { return heap[idx]; } - -let heap_next = heap.length; - -function dropObject(idx) { - if (idx < 132) return; - heap[idx] = heap_next; - heap_next = idx; -} - -function takeObject(idx) { - const ret = getObject(idx); - dropObject(idx); - return ret; -} - -function addHeapObject(obj) { - if (heap_next === heap.length) heap.push(heap.length + 1); - const idx = heap_next; - heap_next = heap[idx]; - - heap[idx] = obj; - return idx; -} - -const cachedTextDecoder = (typeof TextDecoder !== 'undefined' ? new TextDecoder('utf-8', { ignoreBOM: true, fatal: true }) : { decode: () => { throw Error('TextDecoder not available') } } ); - -if (typeof TextDecoder !== 'undefined') { cachedTextDecoder.decode(); }; - -let cachedUint8Memory0 = null; - -function getUint8Memory0() { - if (cachedUint8Memory0 === null || cachedUint8Memory0.byteLength === 0) { - cachedUint8Memory0 = new Uint8Array(wasm.memory.buffer); - } - return cachedUint8Memory0; -} - -function getStringFromWasm0(ptr, len) { - ptr = ptr >>> 0; - return cachedTextDecoder.decode(getUint8Memory0().subarray(ptr, ptr + len)); -} - -function debugString(val) { - // primitive types - const type = typeof val; - if (type == 'number' || type == 'boolean' || val == null) { - return `${val}`; - } - if (type == 'string') { - return `"${val}"`; - } - if (type == 'symbol') { - const description = val.description; - if (description == null) { - return 'Symbol'; - } else { - return `Symbol(${description})`; - } - } - if (type == 'function') { - const name = val.name; - if (typeof name == 'string' && name.length > 0) { - return `Function(${name})`; - } else { - return 'Function'; - } - } - // objects - if (Array.isArray(val)) { - const length = val.length; - let debug = '['; - if (length > 0) { - debug += debugString(val[0]); - } - for(let i = 1; i < length; i++) { - debug += ', ' + debugString(val[i]); - } - debug += ']'; - return debug; - } - // Test for built-in - const builtInMatches = /\[object ([^\]]+)\]/.exec(toString.call(val)); - let className; - if (builtInMatches.length > 1) { - className = builtInMatches[1]; - } else { - // Failed to match the standard '[object ClassName]' - return toString.call(val); - } - if (className == 'Object') { - // we're a user defined class or Object - // JSON.stringify avoids problems with cycles, and is generally much - // easier than looping through ownProperties of `val`. - try { - return 'Object(' + JSON.stringify(val) + ')'; - } catch (_) { - return 'Object'; - } - } - // errors - if (val instanceof Error) { - return `${val.name}: ${val.message}\n${val.stack}`; - } - // TODO we could test for more things here, like `Set`s and `Map`s. - return className; -} - -let WASM_VECTOR_LEN = 0; - -const cachedTextEncoder = (typeof TextEncoder !== 'undefined' ? new TextEncoder('utf-8') : { encode: () => { throw Error('TextEncoder not available') } } ); - -const encodeString = (typeof cachedTextEncoder.encodeInto === 'function' - ? function (arg, view) { - return cachedTextEncoder.encodeInto(arg, view); -} - : function (arg, view) { - const buf = cachedTextEncoder.encode(arg); - view.set(buf); - return { - read: arg.length, - written: buf.length - }; -}); - -function passStringToWasm0(arg, malloc, realloc) { - - if (realloc === undefined) { - const buf = cachedTextEncoder.encode(arg); - const ptr = malloc(buf.length, 1) >>> 0; - getUint8Memory0().subarray(ptr, ptr + buf.length).set(buf); - WASM_VECTOR_LEN = buf.length; - return ptr; - } - - let len = arg.length; - let ptr = malloc(len, 1) >>> 0; - - const mem = getUint8Memory0(); - - let offset = 0; - - for (; offset < len; offset++) { - const code = arg.charCodeAt(offset); - if (code > 0x7F) break; - mem[ptr + offset] = code; - } - - if (offset !== len) { - if (offset !== 0) { - arg = arg.slice(offset); - } - ptr = realloc(ptr, len, len = offset + arg.length * 3, 1) >>> 0; - const view = getUint8Memory0().subarray(ptr + offset, ptr + len); - const ret = encodeString(arg, view); - - offset += ret.written; - } - - WASM_VECTOR_LEN = offset; - return ptr; -} - -let cachedInt32Memory0 = null; - -function getInt32Memory0() { - if (cachedInt32Memory0 === null || cachedInt32Memory0.byteLength === 0) { - cachedInt32Memory0 = new Int32Array(wasm.memory.buffer); - } - return cachedInt32Memory0; -} - -function makeClosure(arg0, arg1, dtor, f) { - const state = { a: arg0, b: arg1, cnt: 1, dtor }; - const real = (...args) => { - // First up with a closure we increment the internal reference - // count. This ensures that the Rust closure environment won't - // be deallocated while we're invoking it. - state.cnt++; - try { - return f(state.a, state.b, ...args); - } finally { - if (--state.cnt === 0) { - wasm.__wbindgen_export_2.get(state.dtor)(state.a, state.b); - state.a = 0; - - } - } - }; - real.original = state; - - return real; -} -function __wbg_adapter_18(arg0, arg1, arg2) { - wasm.wasm_bindgen__convert__closures__invoke1__hee69d633833ebd7c(arg0, arg1, addHeapObject(arg2)); -} - -function makeMutClosure(arg0, arg1, dtor, f) { - const state = { a: arg0, b: arg1, cnt: 1, dtor }; - const real = (...args) => { - // First up with a closure we increment the internal reference - // count. This ensures that the Rust closure environment won't - // be deallocated while we're invoking it. - state.cnt++; - const a = state.a; - state.a = 0; - try { - return f(a, state.b, ...args); - } finally { - if (--state.cnt === 0) { - wasm.__wbindgen_export_2.get(state.dtor)(a, state.b); - - } else { - state.a = a; - } - } - }; - real.original = state; - - return real; -} - -let stack_pointer = 128; - -function addBorrowedObject(obj) { - if (stack_pointer == 1) throw new Error('out of js stack'); - heap[--stack_pointer] = obj; - return stack_pointer; -} -function __wbg_adapter_21(arg0, arg1, arg2) { - try { - wasm._dyn_core__ops__function__FnMut___A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__hadab26222cba6f84(arg0, arg1, addBorrowedObject(arg2)); - } finally { - heap[stack_pointer++] = undefined; - } -} - -function __wbg_adapter_24(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__hfc3f0e78cf729c36(arg0, arg1, addHeapObject(arg2)); -} - -function isLikeNone(x) { - return x === undefined || x === null; -} - -let cachedUint32Memory0 = null; - -function getUint32Memory0() { - if (cachedUint32Memory0 === null || cachedUint32Memory0.byteLength === 0) { - cachedUint32Memory0 = new Uint32Array(wasm.memory.buffer); - } - return cachedUint32Memory0; -} - -function getArrayJsValueFromWasm0(ptr, len) { - ptr = ptr >>> 0; - const mem = getUint32Memory0(); - const slice = mem.subarray(ptr / 4, ptr / 4 + len); - const result = []; - for (let i = 0; i < slice.length; i++) { - result.push(takeObject(slice[i])); - } - return result; -} - -function handleError(f, args) { - try { - return f.apply(this, args); - } catch (e) { - wasm.__wbindgen_exn_store(addHeapObject(e)); - } -} - -async function __wbg_load(module, imports) { - if (typeof Response === 'function' && module instanceof Response) { - if (typeof WebAssembly.instantiateStreaming === 'function') { - try { - return await WebAssembly.instantiateStreaming(module, imports); - - } catch (e) { - if (module.headers.get('Content-Type') != 'application/wasm') { - console.warn("`WebAssembly.instantiateStreaming` failed because your server does not serve wasm with `application/wasm` MIME type. Falling back to `WebAssembly.instantiate` which is slower. Original error:\n", e); - - } else { - throw e; - } - } - } - - const bytes = await module.arrayBuffer(); - return await WebAssembly.instantiate(bytes, imports); - - } else { - const instance = await WebAssembly.instantiate(module, imports); - - if (instance instanceof WebAssembly.Instance) { - return { instance, module }; - - } else { - return instance; - } - } -} - -function __wbg_get_imports() { - const imports = {}; - imports.wbg = {}; - imports.wbg.__wbindgen_object_drop_ref = function(arg0) { - takeObject(arg0); - }; - imports.wbg.__wbindgen_object_clone_ref = function(arg0) { - const ret = getObject(arg0); - return addHeapObject(ret); - }; - imports.wbg.__wbg_log_3af90b48c052f90b = function(arg0, arg1) { - console.log(getStringFromWasm0(arg0, arg1)); - }; - imports.wbg.__wbindgen_cb_drop = function(arg0) { - const obj = takeObject(arg0).original; - if (obj.cnt-- == 1) { - obj.a = 0; - return true; - } - const ret = false; - return ret; - }; - imports.wbg.__wbindgen_string_new = function(arg0, arg1) { - const ret = getStringFromWasm0(arg0, arg1); - return addHeapObject(ret); - }; - imports.wbg.__wbg_listenerid_12315eee21527820 = function(arg0, arg1) { - const ret = getObject(arg1).__yew_listener_id; - getInt32Memory0()[arg0 / 4 + 1] = isLikeNone(ret) ? 0 : ret; - getInt32Memory0()[arg0 / 4 + 0] = !isLikeNone(ret); - }; - imports.wbg.__wbg_setlistenerid_3183aae8fa5840fb = function(arg0, arg1) { - getObject(arg0).__yew_listener_id = arg1 >>> 0; - }; - imports.wbg.__wbg_setsubtreeid_d32e6327eef1f7fc = function(arg0, arg1) { - getObject(arg0).__yew_subtree_id = arg1 >>> 0; - }; - imports.wbg.__wbg_subtreeid_e348577f7ef777e3 = function(arg0, arg1) { - const ret = getObject(arg1).__yew_subtree_id; - getInt32Memory0()[arg0 / 4 + 1] = isLikeNone(ret) ? 0 : ret; - getInt32Memory0()[arg0 / 4 + 0] = !isLikeNone(ret); - }; - imports.wbg.__wbg_cachekey_b61393159c57fd7b = function(arg0, arg1) { - const ret = getObject(arg1).__yew_subtree_cache_key; - getInt32Memory0()[arg0 / 4 + 1] = isLikeNone(ret) ? 0 : ret; - getInt32Memory0()[arg0 / 4 + 0] = !isLikeNone(ret); - }; - imports.wbg.__wbg_setcachekey_80183b7cfc421143 = function(arg0, arg1) { - getObject(arg0).__yew_subtree_cache_key = arg1 >>> 0; - }; - imports.wbg.__wbg_error_71d6845bf00a930f = function(arg0, arg1) { - var v0 = getArrayJsValueFromWasm0(arg0, arg1).slice(); - wasm.__wbindgen_free(arg0, arg1 * 4); - console.error(...v0); - }; - imports.wbg.__wbg_warn_0b90a269a514ae1d = function(arg0, arg1) { - var v0 = getArrayJsValueFromWasm0(arg0, arg1).slice(); - wasm.__wbindgen_free(arg0, arg1 * 4); - console.warn(...v0); - }; - imports.wbg.__wbg_new_abda76e883ba8a5f = function() { - const ret = new Error(); - return addHeapObject(ret); - }; - imports.wbg.__wbg_stack_658279fe44541cf6 = function(arg0, arg1) { - const ret = getObject(arg1).stack; - const ptr1 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len1 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len1; - getInt32Memory0()[arg0 / 4 + 0] = ptr1; - }; - imports.wbg.__wbg_error_f851667af71bcfc6 = function(arg0, arg1) { - let deferred0_0; - let deferred0_1; - try { - deferred0_0 = arg0; - deferred0_1 = arg1; - console.error(getStringFromWasm0(arg0, arg1)); - } finally { - wasm.__wbindgen_free(deferred0_0, deferred0_1, 1); - } - }; - imports.wbg.__wbg_location_7ac41949b772ef21 = function(arg0) { - const ret = getObject(arg0).location; - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }; - imports.wbg.__wbg_body_674aec4c1c0910cd = function(arg0) { - const ret = getObject(arg0).body; - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }; - imports.wbg.__wbg_createElement_4891554b28d3388b = function() { return handleError(function (arg0, arg1, arg2) { - const ret = getObject(arg0).createElement(getStringFromWasm0(arg1, arg2)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_createElementNS_119acf9e82482041 = function() { return handleError(function (arg0, arg1, arg2, arg3, arg4) { - const ret = getObject(arg0).createElementNS(arg1 === 0 ? undefined : getStringFromWasm0(arg1, arg2), getStringFromWasm0(arg3, arg4)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_createTextNode_2fd22cd7e543f938 = function(arg0, arg1, arg2) { - const ret = getObject(arg0).createTextNode(getStringFromWasm0(arg1, arg2)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_instanceof_Window_9029196b662bc42a = function(arg0) { - let result; - try { - result = getObject(arg0) instanceof Window; - } catch { - result = false; - } - const ret = result; - return ret; - }; - imports.wbg.__wbg_document_f7ace2b956f30a4f = function(arg0) { - const ret = getObject(arg0).document; - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }; - imports.wbg.__wbg_location_56243dba507f472d = function(arg0) { - const ret = getObject(arg0).location; - return addHeapObject(ret); - }; - imports.wbg.__wbg_performance_2c295061c8b01e0b = function(arg0) { - const ret = getObject(arg0).performance; - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }; - imports.wbg.__wbg_fetch_336b6f0cb426b46e = function(arg0, arg1) { - const ret = getObject(arg0).fetch(getObject(arg1)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_setchecked_e5a50baea447b8a8 = function(arg0, arg1) { - getObject(arg0).checked = arg1 !== 0; - }; - imports.wbg.__wbg_value_9423da9d988ee8cf = function(arg0, arg1) { - const ret = getObject(arg1).value; - const ptr1 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len1 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len1; - getInt32Memory0()[arg0 / 4 + 0] = ptr1; - }; - imports.wbg.__wbg_setvalue_1f95e61cbc382f7f = function(arg0, arg1, arg2) { - getObject(arg0).value = getStringFromWasm0(arg1, arg2); - }; - imports.wbg.__wbg_newwithstrandinit_cad5cd6038c7ff5d = function() { return handleError(function (arg0, arg1, arg2) { - const ret = new Request(getStringFromWasm0(arg0, arg1), getObject(arg2)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_setonmessage_f0bd0280573b7084 = function(arg0, arg1) { - getObject(arg0).onmessage = getObject(arg1); - }; - imports.wbg.__wbg_new_8e7322f46d5d019c = function() { return handleError(function (arg0, arg1) { - const ret = new Worker(getStringFromWasm0(arg0, arg1)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_newwithoptions_1bd20b45061ed935 = function() { return handleError(function (arg0, arg1, arg2) { - const ret = new Worker(getStringFromWasm0(arg0, arg1), getObject(arg2)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_postMessage_8c609e2bde333d9c = function() { return handleError(function (arg0, arg1) { - getObject(arg0).postMessage(getObject(arg1)); - }, arguments) }; - imports.wbg.__wbg_instanceof_Response_fc4327dbfcdf5ced = function(arg0) { - let result; - try { - result = getObject(arg0) instanceof Response; - } catch { - result = false; - } - const ret = result; - return ret; - }; - imports.wbg.__wbg_blob_34990e4300d45f53 = function() { return handleError(function (arg0) { - const ret = getObject(arg0).blob(); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_now_0cfdc90c97d0c24b = function(arg0) { - const ret = getObject(arg0).now(); - return ret; - }; - imports.wbg.__wbg_debug_9b8701f894da9929 = function(arg0, arg1, arg2, arg3) { - console.debug(getObject(arg0), getObject(arg1), getObject(arg2), getObject(arg3)); - }; - imports.wbg.__wbg_error_788ae33f81d3b84b = function(arg0) { - console.error(getObject(arg0)); - }; - imports.wbg.__wbg_error_d9bce418caafb712 = function(arg0, arg1, arg2, arg3) { - console.error(getObject(arg0), getObject(arg1), getObject(arg2), getObject(arg3)); - }; - imports.wbg.__wbg_info_bb52f40b06f679de = function(arg0, arg1, arg2, arg3) { - console.info(getObject(arg0), getObject(arg1), getObject(arg2), getObject(arg3)); - }; - imports.wbg.__wbg_log_ea7093e35e3efd07 = function(arg0, arg1, arg2, arg3) { - console.log(getObject(arg0), getObject(arg1), getObject(arg2), getObject(arg3)); - }; - imports.wbg.__wbg_warn_dfc0e0cf544a13bd = function(arg0, arg1, arg2, arg3) { - console.warn(getObject(arg0), getObject(arg1), getObject(arg2), getObject(arg3)); - }; - imports.wbg.__wbg_instanceof_Element_4622f5da1249a3eb = function(arg0) { - let result; - try { - result = getObject(arg0) instanceof Element; - } catch { - result = false; - } - const ret = result; - return ret; - }; - imports.wbg.__wbg_namespaceURI_31718ed49b5343a3 = function(arg0, arg1) { - const ret = getObject(arg1).namespaceURI; - var ptr1 = isLikeNone(ret) ? 0 : passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - var len1 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len1; - getInt32Memory0()[arg0 / 4 + 0] = ptr1; - }; - imports.wbg.__wbg_setinnerHTML_b089587252408b67 = function(arg0, arg1, arg2) { - getObject(arg0).innerHTML = getStringFromWasm0(arg1, arg2); - }; - imports.wbg.__wbg_outerHTML_f7749ceff37b5832 = function(arg0, arg1) { - const ret = getObject(arg1).outerHTML; - const ptr1 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len1 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len1; - getInt32Memory0()[arg0 / 4 + 0] = ptr1; - }; - imports.wbg.__wbg_children_27ed308801b57d3f = function(arg0) { - const ret = getObject(arg0).children; - return addHeapObject(ret); - }; - imports.wbg.__wbg_removeAttribute_d8404da431968808 = function() { return handleError(function (arg0, arg1, arg2) { - getObject(arg0).removeAttribute(getStringFromWasm0(arg1, arg2)); - }, arguments) }; - imports.wbg.__wbg_setAttribute_e7e80b478b7b8b2f = function() { return handleError(function (arg0, arg1, arg2, arg3, arg4) { - getObject(arg0).setAttribute(getStringFromWasm0(arg1, arg2), getStringFromWasm0(arg3, arg4)); - }, arguments) }; - imports.wbg.__wbg_origin_50aa482fa6784a0a = function() { return handleError(function (arg0, arg1) { - const ret = getObject(arg1).origin; - const ptr1 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len1 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len1; - getInt32Memory0()[arg0 / 4 + 0] = ptr1; - }, arguments) }; - imports.wbg.__wbg_pathname_c8fd5c498079312d = function() { return handleError(function (arg0, arg1) { - const ret = getObject(arg1).pathname; - const ptr1 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len1 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len1; - getInt32Memory0()[arg0 / 4 + 0] = ptr1; - }, arguments) }; - imports.wbg.__wbg_data_ab99ae4a2e1e8bc9 = function(arg0) { - const ret = getObject(arg0).data; - return addHeapObject(ret); - }; - imports.wbg.__wbg_target_f171e89c61e2bccf = function(arg0) { - const ret = getObject(arg0).target; - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }; - imports.wbg.__wbg_bubbles_63572b91f3885ef1 = function(arg0) { - const ret = getObject(arg0).bubbles; - return ret; - }; - imports.wbg.__wbg_cancelBubble_90d1c3aa2a76cbeb = function(arg0) { - const ret = getObject(arg0).cancelBubble; - return ret; - }; - imports.wbg.__wbg_composedPath_cf1bb5b8bcff496f = function(arg0) { - const ret = getObject(arg0).composedPath(); - return addHeapObject(ret); - }; - imports.wbg.__wbg_parentNode_9e53f8b17eb98c9d = function(arg0) { - const ret = getObject(arg0).parentNode; - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }; - imports.wbg.__wbg_parentElement_c75962bc9997ea5f = function(arg0) { - const ret = getObject(arg0).parentElement; - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }; - imports.wbg.__wbg_lastChild_0cee692010bac6c2 = function(arg0) { - const ret = getObject(arg0).lastChild; - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }; - imports.wbg.__wbg_nextSibling_304d9aac7c2774ae = function(arg0) { - const ret = getObject(arg0).nextSibling; - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }; - imports.wbg.__wbg_setnodeValue_d1c8382910b45e04 = function(arg0, arg1, arg2) { - getObject(arg0).nodeValue = arg1 === 0 ? undefined : getStringFromWasm0(arg1, arg2); - }; - imports.wbg.__wbg_textContent_c5d9e21ee03c63d4 = function(arg0, arg1) { - const ret = getObject(arg1).textContent; - var ptr1 = isLikeNone(ret) ? 0 : passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - var len1 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len1; - getInt32Memory0()[arg0 / 4 + 0] = ptr1; - }; - imports.wbg.__wbg_appendChild_51339d4cde00ee22 = function() { return handleError(function (arg0, arg1) { - const ret = getObject(arg0).appendChild(getObject(arg1)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_insertBefore_ffa01d4b747c95fc = function() { return handleError(function (arg0, arg1, arg2) { - const ret = getObject(arg0).insertBefore(getObject(arg1), getObject(arg2)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_removeChild_973429f368206138 = function() { return handleError(function (arg0, arg1) { - const ret = getObject(arg0).removeChild(getObject(arg1)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_createObjectURL_d82f2880bada6a1d = function() { return handleError(function (arg0, arg1) { - const ret = URL.createObjectURL(getObject(arg1)); - const ptr1 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len1 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len1; - getInt32Memory0()[arg0 / 4 + 0] = ptr1; - }, arguments) }; - imports.wbg.__wbg_newwithstrsequenceandoptions_fd88a547f6d15707 = function() { return handleError(function (arg0, arg1) { - const ret = new Blob(getObject(arg0), getObject(arg1)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_arrayBuffer_27cefaea55cbf063 = function(arg0) { - const ret = getObject(arg0).arrayBuffer(); - return addHeapObject(ret); - }; - imports.wbg.__wbg_addEventListener_a5963e26cd7b176b = function() { return handleError(function (arg0, arg1, arg2, arg3, arg4) { - getObject(arg0).addEventListener(getStringFromWasm0(arg1, arg2), getObject(arg3), getObject(arg4)); - }, arguments) }; - imports.wbg.__wbg_instanceof_ShadowRoot_b64337370f59fe2d = function(arg0) { - let result; - try { - result = getObject(arg0) instanceof ShadowRoot; - } catch { - result = false; - } - const ret = result; - return ret; - }; - imports.wbg.__wbg_host_e1c47c33975060d3 = function(arg0) { - const ret = getObject(arg0).host; - return addHeapObject(ret); - }; - imports.wbg.__wbg_value_3c5f08ffc2b7d6f9 = function(arg0, arg1) { - const ret = getObject(arg1).value; - const ptr1 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len1 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len1; - getInt32Memory0()[arg0 / 4 + 0] = ptr1; - }; - imports.wbg.__wbg_setvalue_0dc100d4b9908028 = function(arg0, arg1, arg2) { - getObject(arg0).value = getStringFromWasm0(arg1, arg2); - }; - imports.wbg.__wbg_get_44be0491f933a435 = function(arg0, arg1) { - const ret = getObject(arg0)[arg1 >>> 0]; - return addHeapObject(ret); - }; - imports.wbg.__wbg_length_fff51ee6522a1a18 = function(arg0) { - const ret = getObject(arg0).length; - return ret; - }; - imports.wbg.__wbg_new_898a68150f225f2e = function() { - const ret = new Array(); - return addHeapObject(ret); - }; - imports.wbg.__wbg_newnoargs_581967eacc0e2604 = function(arg0, arg1) { - const ret = new Function(getStringFromWasm0(arg0, arg1)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_call_cb65541d95d71282 = function() { return handleError(function (arg0, arg1) { - const ret = getObject(arg0).call(getObject(arg1)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_new_b51585de1b234aff = function() { - const ret = new Object(); - return addHeapObject(ret); - }; - imports.wbg.__wbg_self_1ff1d729e9aae938 = function() { return handleError(function () { - const ret = self.self; - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_window_5f4faef6c12b79ec = function() { return handleError(function () { - const ret = window.window; - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_globalThis_1d39714405582d3c = function() { return handleError(function () { - const ret = globalThis.globalThis; - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_global_651f05c6a0944d1c = function() { return handleError(function () { - const ret = global.global; - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbindgen_is_undefined = function(arg0) { - const ret = getObject(arg0) === undefined; - return ret; - }; - imports.wbg.__wbg_from_d7c216d4616bb368 = function(arg0) { - const ret = Array.from(getObject(arg0)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_push_ca1c26067ef907ac = function(arg0, arg1) { - const ret = getObject(arg0).push(getObject(arg1)); - return ret; - }; - imports.wbg.__wbg_is_205d914af04a8faa = function(arg0, arg1) { - const ret = Object.is(getObject(arg0), getObject(arg1)); - return ret; - }; - imports.wbg.__wbg_resolve_53698b95aaf7fcf8 = function(arg0) { - const ret = Promise.resolve(getObject(arg0)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_then_f7e06ee3c11698eb = function(arg0, arg1) { - const ret = getObject(arg0).then(getObject(arg1)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_then_b2267541e2a73865 = function(arg0, arg1, arg2) { - const ret = getObject(arg0).then(getObject(arg1), getObject(arg2)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_buffer_085ec1f694018c4f = function(arg0) { - const ret = getObject(arg0).buffer; - return addHeapObject(ret); - }; - imports.wbg.__wbg_newwithbyteoffsetandlength_6da8e527659b86aa = function(arg0, arg1, arg2) { - const ret = new Uint8Array(getObject(arg0), arg1 >>> 0, arg2 >>> 0); - return addHeapObject(ret); - }; - imports.wbg.__wbg_new_8125e318e6245eed = function(arg0) { - const ret = new Uint8Array(getObject(arg0)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_set_5cf90238115182c3 = function(arg0, arg1, arg2) { - getObject(arg0).set(getObject(arg1), arg2 >>> 0); - }; - imports.wbg.__wbg_length_72e2208bbc0efc61 = function(arg0) { - const ret = getObject(arg0).length; - return ret; - }; - imports.wbg.__wbg_set_092e06b0f9d71865 = function() { return handleError(function (arg0, arg1, arg2) { - const ret = Reflect.set(getObject(arg0), getObject(arg1), getObject(arg2)); - return ret; - }, arguments) }; - imports.wbg.__wbindgen_debug_string = function(arg0, arg1) { - const ret = debugString(getObject(arg1)); - const ptr1 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len1 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len1; - getInt32Memory0()[arg0 / 4 + 0] = ptr1; - }; - imports.wbg.__wbindgen_throw = function(arg0, arg1) { - throw new Error(getStringFromWasm0(arg0, arg1)); - }; - imports.wbg.__wbindgen_memory = function() { - const ret = wasm.memory; - return addHeapObject(ret); - }; - imports.wbg.__wbindgen_closure_wrapper179 = function(arg0, arg1, arg2) { - const ret = makeClosure(arg0, arg1, 38, __wbg_adapter_18); - return addHeapObject(ret); - }; - imports.wbg.__wbindgen_closure_wrapper391 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 127, __wbg_adapter_21); - return addHeapObject(ret); - }; - imports.wbg.__wbindgen_closure_wrapper670 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 235, __wbg_adapter_24); - return addHeapObject(ret); - }; - - return imports; -} - -function __wbg_init_memory(imports, maybe_memory) { - -} - -function __wbg_finalize_init(instance, module) { - wasm = instance.exports; - __wbg_init.__wbindgen_wasm_module = module; - cachedInt32Memory0 = null; - cachedUint32Memory0 = null; - cachedUint8Memory0 = null; - - wasm.__wbindgen_start(); - return wasm; -} - -function initSync(module) { - if (wasm !== undefined) return wasm; - - const imports = __wbg_get_imports(); - - __wbg_init_memory(imports); - - if (!(module instanceof WebAssembly.Module)) { - module = new WebAssembly.Module(module); - } - - const instance = new WebAssembly.Instance(module, imports); - - return __wbg_finalize_init(instance, module); -} - -async function __wbg_init(input) { - if (wasm !== undefined) return wasm; - - if (typeof input === 'undefined') { - input = new URL('app-fc3fb0e73c846240_bg.wasm', import.meta.url); - } - const imports = __wbg_get_imports(); - - if (typeof input === 'string' || (typeof Request === 'function' && input instanceof Request) || (typeof URL === 'function' && input instanceof URL)) { - input = fetch(input); - } - - __wbg_init_memory(imports); - - const { instance, module } = await __wbg_load(await input, imports); - - return __wbg_finalize_init(instance, module); -} - -export { initSync } -export default __wbg_init; diff --git a/spaces/Pennywise881/wiki-chat-v2/app.py b/spaces/Pennywise881/wiki-chat-v2/app.py deleted file mode 100644 index 083b764a0f55bf617d20b816dea3b16539eea620..0000000000000000000000000000000000000000 --- a/spaces/Pennywise881/wiki-chat-v2/app.py +++ /dev/null @@ -1,97 +0,0 @@ -import streamlit as st -import os - -from Article import Article -from VectorDB import VectorDB -from QuestionAnswer import QuestionAnswer - -from transformers import AutoTokenizer, AutoModelForQuestionAnswering -from sentence_transformers import models, SentenceTransformer - - -reader = AutoModelForQuestionAnswering.from_pretrained('Pennywise881/distilbert-base-uncased-finetuned-squad-v2') -tokenizer = AutoTokenizer.from_pretrained('Pennywise881/distilbert-base-uncased-finetuned-squad-v2') - -distilbert = models.Transformer("Pennywise881/distilbert-base-uncased-mnr-squadv2") -pooler = models.Pooling( - distilbert.get_word_embedding_dimension(), - pooling_mode_mean_tokens=True -) - -retreiver = SentenceTransformer(modules=[distilbert, pooler]) - -if 'found_article' not in st.session_state: - st.session_state.found_article = False - st.session_state.article_name = '' - st.session_state.db = None - st.session_state.qas = [] - -st.write(""" - # Wiki Chat V2 -""") -placeholder = st.empty() - -def get_article(retreiver): - article_name = placeholder.text_input("Enter the name of a Wikipedia article") - - if article_name: - article = Article() - article_data = article.get_article_data(article_name=article_name) - - if len(article_data) > 0: - API_KEY = os.environ['API_KEY'] - db = VectorDB(retreiver=retreiver, API_KEY=API_KEY) - db.upsert_data(article_data=article_data) - ask_questions(article_name=article_name, db=db) - - st.session_state.found_article = True - st.session_state.article_name = article_name - st.session_state.db = db - else: - st.write(f'Sorry, could not find Wikipedia article: {article_name}') - -def ask_questions(article_name, db : VectorDB): - question = placeholder.text_input(f"Ask questions about '{article_name}'", '') - st.header("Questions and Answers:") - - if question: - contexts = db.get_contexts(question.lower()) - # print(contexts) - - data = { - 'question': question.lower(), - 'context': contexts['matches'][0]['metadata']['text'] - } - qa = QuestionAnswer(data, reader, tokenizer, 'cpu') - results = qa.get_results() - - paragraph_index = contexts['matches'][0]['id'] - section = contexts['matches'][0]['metadata']['section'] - answer = '' - for r in results: - answer += r['text'] + ", " - - answer = answer[:len(answer) - 2] - st.session_state.qas.append( - { - 'question': question, - 'answer': answer, - 'section': section, - 'para': paragraph_index - } - ) - - if len(st.session_state.qas) > 0: - for data in st.session_state.qas: - st.text( - "Question: " + data['question'] + '\n' + - "Answer: " + data['answer'] + '\n' + - "Section: " + data['section'] + '\n' + - "Paragraph #: " + data['para'] - ) - -if st.session_state.found_article == False: - get_article(retreiver) - -else: - ask_questions(st.session_state.article_name, st.session_state.db) \ No newline at end of file diff --git a/spaces/PepijnvB/KappaNeuro-salomon-van-ruysdael-style/README.md b/spaces/PepijnvB/KappaNeuro-salomon-van-ruysdael-style/README.md deleted file mode 100644 index 9d872a0e5f895b1b8298d3207f2ce33b5d2f2e3d..0000000000000000000000000000000000000000 --- a/spaces/PepijnvB/KappaNeuro-salomon-van-ruysdael-style/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: KappaNeuro Salomon Van Ruysdael Style -emoji: 🔥 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.49.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/language_backbone/test_clip_tokenizer.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/language_backbone/test_clip_tokenizer.py deleted file mode 100644 index e9ffd6f242ba7cbb48b11afb77c26c931e232d21..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/language_backbone/test_clip_tokenizer.py +++ /dev/null @@ -1,8 +0,0 @@ -from maskrcnn_benchmark.modeling.language_backbone import build_tokenizer - -if __name__ == '__main__': - - tokenizer2 = build_tokenizer("clip") - tokenized2 = tokenizer2( - ["Detectest : fishid. jellyfishioasod. penguinasd. puffin.asd shark. starfish. round stingray"]) - print(tokenized2) diff --git a/spaces/Plachta/VALL-E-X/utils/symbol_table.py b/spaces/Plachta/VALL-E-X/utils/symbol_table.py deleted file mode 100644 index 7a86010a76280576f85490641623dbb27559aa99..0000000000000000000000000000000000000000 --- a/spaces/Plachta/VALL-E-X/utils/symbol_table.py +++ /dev/null @@ -1,287 +0,0 @@ -# Copyright 2020 Mobvoi Inc. (authors: Fangjun Kuang) -# -# See ../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from dataclasses import dataclass -from dataclasses import field -from typing import Dict -from typing import Generic -from typing import List -from typing import Optional -from typing import TypeVar -from typing import Union - -Symbol = TypeVar('Symbol') - - -# Disable __repr__ otherwise it could freeze e.g. Jupyter. -@dataclass(repr=False) -class SymbolTable(Generic[Symbol]): - '''SymbolTable that maps symbol IDs, found on the FSA arcs to - actual objects. These objects can be arbitrary Python objects - that can serve as keys in a dictionary (i.e. they need to be - hashable and immutable). - - The SymbolTable can only be read to/written from disk if the - symbols are strings. - ''' - _id2sym: Dict[int, Symbol] = field(default_factory=dict) - '''Map an integer to a symbol. - ''' - - _sym2id: Dict[Symbol, int] = field(default_factory=dict) - '''Map a symbol to an integer. - ''' - - _next_available_id: int = 1 - '''A helper internal field that helps adding new symbols - to the table efficiently. - ''' - - eps: Symbol = '' - '''Null symbol, always mapped to index 0. - ''' - - def __post_init__(self): - for idx, sym in self._id2sym.items(): - assert self._sym2id[sym] == idx - assert idx >= 0 - - for sym, idx in self._sym2id.items(): - assert idx >= 0 - assert self._id2sym[idx] == sym - - if 0 not in self._id2sym: - self._id2sym[0] = self.eps - self._sym2id[self.eps] = 0 - else: - assert self._id2sym[0] == self.eps - assert self._sym2id[self.eps] == 0 - - self._next_available_id = max(self._id2sym) + 1 - - @staticmethod - def from_str(s: str) -> 'SymbolTable': - '''Build a symbol table from a string. - - The string consists of lines. Every line has two fields separated - by space(s), tab(s) or both. The first field is the symbol and the - second the integer id of the symbol. - - Args: - s: - The input string with the format described above. - Returns: - An instance of :class:`SymbolTable`. - ''' - id2sym: Dict[int, str] = dict() - sym2id: Dict[str, int] = dict() - - for line in s.split('\n'): - fields = line.split() - if len(fields) == 0: - continue # skip empty lines - assert len(fields) == 2, \ - f'Expect a line with 2 fields. Given: {len(fields)}' - sym, idx = fields[0], int(fields[1]) - assert sym not in sym2id, f'Duplicated symbol {sym}' - assert idx not in id2sym, f'Duplicated id {idx}' - id2sym[idx] = sym - sym2id[sym] = idx - - eps = id2sym.get(0, '') - - return SymbolTable(_id2sym=id2sym, _sym2id=sym2id, eps=eps) - - @staticmethod - def from_file(filename: str) -> 'SymbolTable': - '''Build a symbol table from file. - - Every line in the symbol table file has two fields separated by - space(s), tab(s) or both. The following is an example file: - - .. code-block:: - - 0 - a 1 - b 2 - c 3 - - Args: - filename: - Name of the symbol table file. Its format is documented above. - - Returns: - An instance of :class:`SymbolTable`. - - ''' - with open(filename, 'r', encoding='utf-8') as f: - return SymbolTable.from_str(f.read().strip()) - - def to_str(self) -> str: - ''' - Returns: - Return a string representation of this object. You can pass - it to the method ``from_str`` to recreate an identical object. - ''' - s = '' - for idx, symbol in sorted(self._id2sym.items()): - s += f'{symbol} {idx}\n' - return s - - def to_file(self, filename: str): - '''Serialize the SymbolTable to a file. - - Every line in the symbol table file has two fields separated by - space(s), tab(s) or both. The following is an example file: - - .. code-block:: - - 0 - a 1 - b 2 - c 3 - - Args: - filename: - Name of the symbol table file. Its format is documented above. - ''' - with open(filename, 'w') as f: - for idx, symbol in sorted(self._id2sym.items()): - print(symbol, idx, file=f) - - def add(self, symbol: Symbol, index: Optional[int] = None) -> int: - '''Add a new symbol to the SymbolTable. - - Args: - symbol: - The symbol to be added. - index: - Optional int id to which the symbol should be assigned. - If it is not available, a ValueError will be raised. - - Returns: - The int id to which the symbol has been assigned. - ''' - # Already in the table? Return its ID. - if symbol in self._sym2id: - return self._sym2id[symbol] - # Specific ID not provided - use next available. - if index is None: - index = self._next_available_id - # Specific ID provided but not available. - if index in self._id2sym: - raise ValueError(f"Cannot assign id '{index}' to '{symbol}' - " - f"already occupied by {self._id2sym[index]}") - self._sym2id[symbol] = index - self._id2sym[index] = symbol - - # Update next available ID if needed - if self._next_available_id <= index: - self._next_available_id = index + 1 - - return index - - def get(self, k: Union[int, Symbol]) -> Union[Symbol, int]: - '''Get a symbol for an id or get an id for a symbol - - Args: - k: - If it is an id, it tries to find the symbol corresponding - to the id; if it is a symbol, it tries to find the id - corresponding to the symbol. - - Returns: - An id or a symbol depending on the given `k`. - ''' - if isinstance(k, int): - return self._id2sym[k] - else: - return self._sym2id[k] - - def merge(self, other: 'SymbolTable') -> 'SymbolTable': - '''Create a union of two SymbolTables. - Raises an AssertionError if the same IDs are occupied by - different symbols. - - Args: - other: - A symbol table to merge with ``self``. - - Returns: - A new symbol table. - ''' - self._check_compatible(other) - - id2sym = {**self._id2sym, **other._id2sym} - sym2id = {**self._sym2id, **other._sym2id} - - return SymbolTable(_id2sym=id2sym, _sym2id=sym2id, eps=self.eps) - - def _check_compatible(self, other: 'SymbolTable') -> None: - # Epsilon compatibility - assert self.eps == other.eps, f'Mismatched epsilon symbol: ' \ - f'{self.eps} != {other.eps}' - # IDs compatibility - common_ids = set(self._id2sym).intersection(other._id2sym) - for idx in common_ids: - assert self[idx] == other[idx], f'ID conflict for id: {idx}, ' \ - f'self[idx] = "{self[idx]}", ' \ - f'other[idx] = "{other[idx]}"' - # Symbols compatibility - common_symbols = set(self._sym2id).intersection(other._sym2id) - for sym in common_symbols: - assert self[sym] == other[sym], f'ID conflict for id: {sym}, ' \ - f'self[sym] = "{self[sym]}", ' \ - f'other[sym] = "{other[sym]}"' - - def __getitem__(self, item: Union[int, Symbol]) -> Union[Symbol, int]: - return self.get(item) - - def __contains__(self, item: Union[int, Symbol]) -> bool: - if isinstance(item, int): - return item in self._id2sym - else: - return item in self._sym2id - - def __len__(self) -> int: - return len(self._id2sym) - - def __eq__(self, other: 'SymbolTable') -> bool: - if len(self) != len(other): - return False - - for s in self.symbols: - if self[s] != other[s]: - return False - - return True - - @property - def ids(self) -> List[int]: - '''Returns a list of integer IDs corresponding to the symbols. - ''' - ans = list(self._id2sym.keys()) - ans.sort() - return ans - - @property - def symbols(self) -> List[Symbol]: - '''Returns a list of symbols (e.g., strings) corresponding to - the integer IDs. - ''' - ans = list(self._sym2id.keys()) - ans.sort() - return ans diff --git a/spaces/Politrees/RVC_V2_Huggingface_Version/lib/infer_pack/modules/F0Predictor/F0Predictor.py b/spaces/Politrees/RVC_V2_Huggingface_Version/lib/infer_pack/modules/F0Predictor/F0Predictor.py deleted file mode 100644 index f56e49e7f0e6eab3babf0711cae2933371b9f9cc..0000000000000000000000000000000000000000 --- a/spaces/Politrees/RVC_V2_Huggingface_Version/lib/infer_pack/modules/F0Predictor/F0Predictor.py +++ /dev/null @@ -1,16 +0,0 @@ -class F0Predictor(object): - def compute_f0(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length] - """ - pass - - def compute_f0_uv(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length],uv:[signal_length//hop_length] - """ - pass diff --git a/spaces/Politrees/RVC_V2_Huggingface_Version/run.sh b/spaces/Politrees/RVC_V2_Huggingface_Version/run.sh deleted file mode 100644 index 31d0be013006e9130e7b3b24d479272dd01c8acd..0000000000000000000000000000000000000000 --- a/spaces/Politrees/RVC_V2_Huggingface_Version/run.sh +++ /dev/null @@ -1,16 +0,0 @@ -# Install Debian packages -sudo apt-get update -sudo apt-get install -qq -y build-essential ffmpeg aria2 - -# Upgrade pip and setuptools -pip install --upgrade pip -pip install --upgrade setuptools - -# Install wheel package (built-package format for Python) -pip install wheel - -# Install Python packages using pip -pip install -r requirements.txt - -# Run application locally at http://127.0.0.1:7860 -python app.py diff --git a/spaces/Pranjal12345/Text_to_Speech/tortoise/models/classifier.py b/spaces/Pranjal12345/Text_to_Speech/tortoise/models/classifier.py deleted file mode 100644 index f92d99e511d08f8b9e9807fb5ef34e6e871a998c..0000000000000000000000000000000000000000 --- a/spaces/Pranjal12345/Text_to_Speech/tortoise/models/classifier.py +++ /dev/null @@ -1,148 +0,0 @@ -import torch -import torch.nn as nn - -from tortoise.models.arch_util import Upsample, Downsample, normalization, zero_module, AttentionBlock - - -class ResBlock(nn.Module): - def __init__( - self, - channels, - dropout, - out_channels=None, - use_conv=False, - use_scale_shift_norm=False, - dims=2, - up=False, - down=False, - kernel_size=3, - do_checkpoint=True, - ): - super().__init__() - self.channels = channels - self.dropout = dropout - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_scale_shift_norm = use_scale_shift_norm - self.do_checkpoint = do_checkpoint - padding = 1 if kernel_size == 3 else 2 - - self.in_layers = nn.Sequential( - normalization(channels), - nn.SiLU(), - nn.Conv1d(channels, self.out_channels, kernel_size, padding=padding), - ) - - self.updown = up or down - - if up: - self.h_upd = Upsample(channels, False, dims) - self.x_upd = Upsample(channels, False, dims) - elif down: - self.h_upd = Downsample(channels, False, dims) - self.x_upd = Downsample(channels, False, dims) - else: - self.h_upd = self.x_upd = nn.Identity() - - self.out_layers = nn.Sequential( - normalization(self.out_channels), - nn.SiLU(), - nn.Dropout(p=dropout), - zero_module( - nn.Conv1d(self.out_channels, self.out_channels, kernel_size, padding=padding) - ), - ) - - if self.out_channels == channels: - self.skip_connection = nn.Identity() - elif use_conv: - self.skip_connection = nn.Conv1d( - dims, channels, self.out_channels, kernel_size, padding=padding - ) - else: - self.skip_connection = nn.Conv1d(dims, channels, self.out_channels, 1) - - def forward(self, x): - if self.updown: - in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] - h = in_rest(x) - h = self.h_upd(h) - x = self.x_upd(x) - h = in_conv(h) - else: - h = self.in_layers(x) - h = self.out_layers(h) - return self.skip_connection(x) + h - - -class AudioMiniEncoder(nn.Module): - def __init__(self, - spec_dim, - embedding_dim, - base_channels=128, - depth=2, - resnet_blocks=2, - attn_blocks=4, - num_attn_heads=4, - dropout=0, - downsample_factor=2, - kernel_size=3): - super().__init__() - self.init = nn.Sequential( - nn.Conv1d(spec_dim, base_channels, 3, padding=1) - ) - ch = base_channels - res = [] - self.layers = depth - for l in range(depth): - for r in range(resnet_blocks): - res.append(ResBlock(ch, dropout, do_checkpoint=False, kernel_size=kernel_size)) - res.append(Downsample(ch, use_conv=True, out_channels=ch*2, factor=downsample_factor)) - ch *= 2 - self.res = nn.Sequential(*res) - self.final = nn.Sequential( - normalization(ch), - nn.SiLU(), - nn.Conv1d(ch, embedding_dim, 1) - ) - attn = [] - for a in range(attn_blocks): - attn.append(AttentionBlock(embedding_dim, num_attn_heads, do_checkpoint=False)) - self.attn = nn.Sequential(*attn) - self.dim = embedding_dim - - def forward(self, x): - h = self.init(x) - h = self.res(h) - h = self.final(h) - for blk in self.attn: - h = blk(h) - return h[:, :, 0] - - -class AudioMiniEncoderWithClassifierHead(nn.Module): - def __init__(self, classes, distribute_zero_label=True, **kwargs): - super().__init__() - self.enc = AudioMiniEncoder(**kwargs) - self.head = nn.Linear(self.enc.dim, classes) - self.num_classes = classes - self.distribute_zero_label = distribute_zero_label - - def forward(self, x, labels=None): - h = self.enc(x) - logits = self.head(h) - if labels is None: - return logits - else: - if self.distribute_zero_label: - oh_labels = nn.functional.one_hot(labels, num_classes=self.num_classes) - zeros_indices = (labels == 0).unsqueeze(-1) - # Distribute 20% of the probability mass on all classes when zero is specified, to compensate for dataset noise. - zero_extra_mass = torch.full_like(oh_labels, dtype=torch.float, fill_value=.2/(self.num_classes-1)) - zero_extra_mass[:, 0] = -.2 - zero_extra_mass = zero_extra_mass * zeros_indices - oh_labels = oh_labels + zero_extra_mass - else: - oh_labels = labels - loss = nn.functional.cross_entropy(logits, oh_labels) - return loss diff --git a/spaces/Qiukai/gpt/request_llm/bridge_tgui.py b/spaces/Qiukai/gpt/request_llm/bridge_tgui.py deleted file mode 100644 index 22a407557fa884f23dd768164b009d7bed841dd9..0000000000000000000000000000000000000000 --- a/spaces/Qiukai/gpt/request_llm/bridge_tgui.py +++ /dev/null @@ -1,167 +0,0 @@ -''' -Contributed by SagsMug. Modified by binary-husky -https://github.com/oobabooga/text-generation-webui/pull/175 -''' - -import asyncio -import json -import random -import string -import websockets -import logging -import time -import threading -import importlib -from toolbox import get_conf, update_ui -LLM_MODEL, = get_conf('LLM_MODEL') - -# "TGUI:galactica-1.3b@localhost:7860" -model_name, addr_port = LLM_MODEL.split('@') -assert ':' in addr_port, "LLM_MODEL 格式不正确!" + LLM_MODEL -addr, port = addr_port.split(':') - -def random_hash(): - letters = string.ascii_lowercase + string.digits - return ''.join(random.choice(letters) for i in range(9)) - -async def run(context, max_token=512): - params = { - 'max_new_tokens': max_token, - 'do_sample': True, - 'temperature': 0.5, - 'top_p': 0.9, - 'typical_p': 1, - 'repetition_penalty': 1.05, - 'encoder_repetition_penalty': 1.0, - 'top_k': 0, - 'min_length': 0, - 'no_repeat_ngram_size': 0, - 'num_beams': 1, - 'penalty_alpha': 0, - 'length_penalty': 1, - 'early_stopping': True, - 'seed': -1, - } - session = random_hash() - - async with websockets.connect(f"ws://{addr}:{port}/queue/join") as websocket: - while content := json.loads(await websocket.recv()): - #Python3.10 syntax, replace with if elif on older - if content["msg"] == "send_hash": - await websocket.send(json.dumps({ - "session_hash": session, - "fn_index": 12 - })) - elif content["msg"] == "estimation": - pass - elif content["msg"] == "send_data": - await websocket.send(json.dumps({ - "session_hash": session, - "fn_index": 12, - "data": [ - context, - params['max_new_tokens'], - params['do_sample'], - params['temperature'], - params['top_p'], - params['typical_p'], - params['repetition_penalty'], - params['encoder_repetition_penalty'], - params['top_k'], - params['min_length'], - params['no_repeat_ngram_size'], - params['num_beams'], - params['penalty_alpha'], - params['length_penalty'], - params['early_stopping'], - params['seed'], - ] - })) - elif content["msg"] == "process_starts": - pass - elif content["msg"] in ["process_generating", "process_completed"]: - yield content["output"]["data"][0] - # You can search for your desired end indicator and - # stop generation by closing the websocket here - if (content["msg"] == "process_completed"): - break - - - - - -def predict_tgui(inputs, top_p, temperature, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): - """ - 发送至chatGPT,流式获取输出。 - 用于基础的对话功能。 - inputs 是本次问询的输入 - top_p, temperature是chatGPT的内部调优参数 - history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误) - chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容 - additional_fn代表点击的哪个按钮,按钮见functional.py - """ - if additional_fn is not None: - import core_functional - importlib.reload(core_functional) # 热更新prompt - core_functional = core_functional.get_core_functions() - if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话) - inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"] - - raw_input = "What I would like to say is the following: " + inputs - logging.info(f'[raw_input] {raw_input}') - history.extend([inputs, ""]) - chatbot.append([inputs, ""]) - yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面 - - prompt = inputs - tgui_say = "" - - mutable = ["", time.time()] - def run_coorotine(mutable): - async def get_result(mutable): - async for response in run(prompt): - print(response[len(mutable[0]):]) - mutable[0] = response - if (time.time() - mutable[1]) > 3: - print('exit when no listener') - break - asyncio.run(get_result(mutable)) - - thread_listen = threading.Thread(target=run_coorotine, args=(mutable,), daemon=True) - thread_listen.start() - - while thread_listen.is_alive(): - time.sleep(1) - mutable[1] = time.time() - # Print intermediate steps - if tgui_say != mutable[0]: - tgui_say = mutable[0] - history[-1] = tgui_say - chatbot[-1] = (history[-2], history[-1]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - logging.info(f'[response] {tgui_say}') - - - -def predict_tgui_no_ui(inputs, top_p, temperature, history=[], sys_prompt=""): - raw_input = "What I would like to say is the following: " + inputs - prompt = inputs - tgui_say = "" - mutable = ["", time.time()] - def run_coorotine(mutable): - async def get_result(mutable): - async for response in run(prompt, max_token=20): - print(response[len(mutable[0]):]) - mutable[0] = response - if (time.time() - mutable[1]) > 3: - print('exit when no listener') - break - asyncio.run(get_result(mutable)) - thread_listen = threading.Thread(target=run_coorotine, args=(mutable,)) - thread_listen.start() - while thread_listen.is_alive(): - time.sleep(1) - mutable[1] = time.time() - tgui_say = mutable[0] - return tgui_say diff --git a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/configs/aspan/indoor/aspan_test.py b/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/configs/aspan/indoor/aspan_test.py deleted file mode 100644 index 00ea16cd35dc4362d0d9a294ad8a1762427bc382..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/configs/aspan/indoor/aspan_test.py +++ /dev/null @@ -1,11 +0,0 @@ -import sys -from pathlib import Path - -sys.path.append(str(Path(__file__).parent / "../../../")) -from src.config.default import _CN as cfg - -cfg.ASPAN.MATCH_COARSE.MATCH_TYPE = "dual_softmax" - -cfg.ASPAN.MATCH_COARSE.BORDER_RM = 0 -cfg.ASPAN.COARSE.COARSEST_LEVEL = [15, 20] -cfg.ASPAN.COARSE.TRAIN_RES = [480, 640] diff --git a/spaces/Rehman1603/SkinDisease/README.md b/spaces/Rehman1603/SkinDisease/README.md deleted file mode 100644 index 7ee14657717b8564ce1613832cbde5c282571f6d..0000000000000000000000000000000000000000 --- a/spaces/Rehman1603/SkinDisease/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: SkinDisease -emoji: 📚 -colorFrom: green -colorTo: yellow -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Rmpmartinspro2/Comic-Diffusion/app.py b/spaces/Rmpmartinspro2/Comic-Diffusion/app.py deleted file mode 100644 index 03afd6d526697d8090573a3635613cc4b2c0d92e..0000000000000000000000000000000000000000 --- a/spaces/Rmpmartinspro2/Comic-Diffusion/app.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import gradio as gr - -API_KEY=os.environ.get('HUGGING_FACE_HUB_TOKEN', None) - -article = """--- -This space was created using [SD Space Creator](https://huggingface.co/spaces/anzorq/sd-space-creator).""" - -gr.Interface.load( - name="models/ogkalu/Comic-Diffusion", - title="""Comic Diffusion""", - description="""Demo for Comic Diffusion Stable Diffusion model.""", - article=article, - api_key=API_KEY, - ).queue(concurrency_count=20).launch() diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/backbones/uniformer.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/backbones/uniformer.py deleted file mode 100644 index 5705a6dd7019f51bc04e4a2c7ff42021821dbd49..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/backbones/uniformer.py +++ /dev/null @@ -1,422 +0,0 @@ -# -------------------------------------------------------- -# UniFormer -# Copyright (c) 2022 SenseTime X-Lab -# Licensed under The MIT License [see LICENSE for details] -# Written by Kunchang Li -# -------------------------------------------------------- - -from collections import OrderedDict -import math - -from functools import partial -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as checkpoint -import numpy as np -from timm.models.layers import DropPath, to_2tuple, trunc_normal_ - -from mmcv_custom import load_checkpoint -from mmdet.utils import get_root_logger -from ..builder import BACKBONES - - -class Mlp(nn.Module): - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class CMlp(nn.Module): - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Conv2d(in_features, hidden_features, 1) - self.act = act_layer() - self.fc2 = nn.Conv2d(hidden_features, out_features, 1) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class CBlock(nn.Module): - def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim) - self.norm1 = nn.BatchNorm2d(dim) - self.conv1 = nn.Conv2d(dim, dim, 1) - self.conv2 = nn.Conv2d(dim, dim, 1) - self.attn = nn.Conv2d(dim, dim, 5, padding=2, groups=dim) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = nn.BatchNorm2d(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def forward(self, x): - x = x + self.pos_embed(x) - x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x))))) - x = x + self.drop_path(self.mlp(self.norm2(x))) - return x - - -class Attention(nn.Module): - def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): - super().__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights - self.scale = qk_scale or head_dim ** -0.5 - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - def forward(self, x): - B, N, C = x.shape - qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - attn = (q @ k.transpose(-2, -1)) * self.scale - attn = attn.softmax(dim=-1) - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class SABlock(nn.Module): - def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim) - self.norm1 = norm_layer(dim) - self.attn = Attention( - dim, - num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, - attn_drop=attn_drop, proj_drop=drop) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def forward(self, x): - x = x + self.pos_embed(x) - B, N, H, W = x.shape - x = x.flatten(2).transpose(1, 2) - x = x + self.drop_path(self.attn(self.norm1(x))) - x = x + self.drop_path(self.mlp(self.norm2(x))) - x = x.transpose(1, 2).reshape(B, N, H, W) - return x - - -def window_partition(x, window_size): - """ - Args: - x: (B, H, W, C) - window_size (int): window size - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - - -def window_reverse(windows, window_size, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - window_size (int): Window size - H (int): Height of image - W (int): Width of image - Returns: - x: (B, H, W, C) - """ - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class SABlock_Windows(nn.Module): - def __init__(self, dim, num_heads, window_size=14, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.window_size=window_size - self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim) - self.norm1 = norm_layer(dim) - self.attn = Attention( - dim, - num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, - attn_drop=attn_drop, proj_drop=drop) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def forward(self, x): - x = x + self.pos_embed(x) - x = x.permute(0, 2, 3, 1) - B, H, W, C = x.shape - shortcut = x - x = self.norm1(x) - - pad_l = pad_t = 0 - pad_r = (self.window_size - W % self.window_size) % self.window_size - pad_b = (self.window_size - H % self.window_size) % self.window_size - x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) - _, Hp, Wp, _ = x.shape - - x_windows = window_partition(x, self.window_size) # nW*B, window_size, window_size, C - x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows = self.attn(x_windows) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C - - # reverse cyclic shift - if pad_r > 0 or pad_b > 0: - x = x[:, :H, :W, :].contiguous() - - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - x = x.permute(0, 3, 1, 2).reshape(B, C, H, W) - return x - - -class PatchEmbed(nn.Module): - """ Image to Patch Embedding - """ - def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): - super().__init__() - img_size = to_2tuple(img_size) - patch_size = to_2tuple(patch_size) - num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) - self.img_size = img_size - self.patch_size = patch_size - self.num_patches = num_patches - self.norm = nn.LayerNorm(embed_dim) - self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) - - def forward(self, x): - B, _, H, W = x.shape - x = self.proj(x) - B, _, H, W = x.shape - x = x.flatten(2).transpose(1, 2) - x = self.norm(x) - x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() - return x - - -@BACKBONES.register_module() -class UniFormer(nn.Module): - """ Vision Transformer - A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - - https://arxiv.org/abs/2010.11929 - """ - def __init__(self, layers=[3, 4, 8, 3], img_size=224, in_chans=3, num_classes=80, embed_dim=[64, 128, 320, 512], - head_dim=64, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, - drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), - pretrained_path=None, use_checkpoint=False, checkpoint_num=[0, 0, 0, 0], - windows=False, hybrid=False, window_size=14): - """ - Args: - layer (list): number of block in each layer - img_size (int, tuple): input image size - in_chans (int): number of input channels - num_classes (int): number of classes for classification head - embed_dim (int): embedding dimension - head_dim (int): dimension of attention heads - mlp_ratio (int): ratio of mlp hidden dim to embedding dim - qkv_bias (bool): enable bias for qkv if True - qk_scale (float): override default qk scale of head_dim ** -0.5 if set - representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set - drop_rate (float): dropout rate - attn_drop_rate (float): attention dropout rate - drop_path_rate (float): stochastic depth rate - norm_layer (nn.Module): normalization layer - pretrained_path (str): path of pretrained model - use_checkpoint (bool): whether use checkpoint - checkpoint_num (list): index for using checkpoint in every stage - windows (bool): whether use window MHRA - hybrid (bool): whether use hybrid MHRA - window_size (int): size of window (>14) - """ - super().__init__() - self.num_classes = num_classes - self.use_checkpoint = use_checkpoint - self.checkpoint_num = checkpoint_num - self.windows = windows - print(f'Use Checkpoint: {self.use_checkpoint}') - print(f'Checkpoint Number: {self.checkpoint_num}') - self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models - norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) - - self.patch_embed1 = PatchEmbed( - img_size=img_size, patch_size=4, in_chans=in_chans, embed_dim=embed_dim[0]) - self.patch_embed2 = PatchEmbed( - img_size=img_size // 4, patch_size=2, in_chans=embed_dim[0], embed_dim=embed_dim[1]) - self.patch_embed3 = PatchEmbed( - img_size=img_size // 8, patch_size=2, in_chans=embed_dim[1], embed_dim=embed_dim[2]) - self.patch_embed4 = PatchEmbed( - img_size=img_size // 16, patch_size=2, in_chans=embed_dim[2], embed_dim=embed_dim[3]) - - self.pos_drop = nn.Dropout(p=drop_rate) - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(layers))] # stochastic depth decay rule - num_heads = [dim // head_dim for dim in embed_dim] - self.blocks1 = nn.ModuleList([ - CBlock( - dim=embed_dim[0], num_heads=num_heads[0], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) - for i in range(layers[0])]) - self.norm1=norm_layer(embed_dim[0]) - self.blocks2 = nn.ModuleList([ - CBlock( - dim=embed_dim[1], num_heads=num_heads[1], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]], norm_layer=norm_layer) - for i in range(layers[1])]) - self.norm2 = norm_layer(embed_dim[1]) - if self.windows: - print('Use local window for all blocks in stage3') - self.blocks3 = nn.ModuleList([ - SABlock_Windows( - dim=embed_dim[2], num_heads=num_heads[2], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer) - for i in range(layers[2])]) - elif hybrid: - print('Use hybrid window for blocks in stage3') - block3 = [] - for i in range(layers[2]): - if (i + 1) % 4 == 0: - block3.append(SABlock( - dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer)) - else: - block3.append(SABlock_Windows( - dim=embed_dim[2], num_heads=num_heads[2], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer)) - self.blocks3 = nn.ModuleList(block3) - else: - print('Use global window for all blocks in stage3') - self.blocks3 = nn.ModuleList([ - SABlock( - dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer) - for i in range(layers[2])]) - self.norm3 = norm_layer(embed_dim[2]) - self.blocks4 = nn.ModuleList([ - SABlock( - dim=embed_dim[3], num_heads=num_heads[3], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]+layers[2]], norm_layer=norm_layer) - for i in range(layers[3])]) - self.norm4 = norm_layer(embed_dim[3]) - - # Representation layer - if representation_size: - self.num_features = representation_size - self.pre_logits = nn.Sequential(OrderedDict([ - ('fc', nn.Linear(embed_dim, representation_size)), - ('act', nn.Tanh()) - ])) - else: - self.pre_logits = nn.Identity() - - self.apply(self._init_weights) - self.init_weights(pretrained=pretrained_path) - - def init_weights(self, pretrained): - if isinstance(pretrained, str): - logger = get_root_logger() - load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger) - print(f'Load pretrained model from {pretrained}') - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - @torch.jit.ignore - def no_weight_decay(self): - return {'pos_embed', 'cls_token'} - - def get_classifier(self): - return self.head - - def reset_classifier(self, num_classes, global_pool=''): - self.num_classes = num_classes - self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() - - def forward_features(self, x): - out = [] - x = self.patch_embed1(x) - x = self.pos_drop(x) - for i, blk in enumerate(self.blocks1): - if self.use_checkpoint and i < self.checkpoint_num[0]: - x = checkpoint.checkpoint(blk, x) - else: - x = blk(x) - x_out = self.norm1(x.permute(0, 2, 3, 1)) - out.append(x_out.permute(0, 3, 1, 2).contiguous()) - x = self.patch_embed2(x) - for i, blk in enumerate(self.blocks2): - if self.use_checkpoint and i < self.checkpoint_num[1]: - x = checkpoint.checkpoint(blk, x) - else: - x = blk(x) - x_out = self.norm2(x.permute(0, 2, 3, 1)) - out.append(x_out.permute(0, 3, 1, 2).contiguous()) - x = self.patch_embed3(x) - for i, blk in enumerate(self.blocks3): - if self.use_checkpoint and i < self.checkpoint_num[2]: - x = checkpoint.checkpoint(blk, x) - else: - x = blk(x) - x_out = self.norm3(x.permute(0, 2, 3, 1)) - out.append(x_out.permute(0, 3, 1, 2).contiguous()) - x = self.patch_embed4(x) - for i, blk in enumerate(self.blocks4): - if self.use_checkpoint and i < self.checkpoint_num[3]: - x = checkpoint.checkpoint(blk, x) - else: - x = blk(x) - x_out = self.norm4(x.permute(0, 2, 3, 1)) - out.append(x_out.permute(0, 3, 1, 2).contiguous()) - return tuple(out) - - def forward(self, x): - x = self.forward_features(x) - return x diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/bbox/coder/pseudo_bbox_coder.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/bbox/coder/pseudo_bbox_coder.py deleted file mode 100644 index 1c8346f4ae2c7db9719a70c7dc0244e088a9965b..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/bbox/coder/pseudo_bbox_coder.py +++ /dev/null @@ -1,18 +0,0 @@ -from ..builder import BBOX_CODERS -from .base_bbox_coder import BaseBBoxCoder - - -@BBOX_CODERS.register_module() -class PseudoBBoxCoder(BaseBBoxCoder): - """Pseudo bounding box coder.""" - - def __init__(self, **kwargs): - super(BaseBBoxCoder, self).__init__(**kwargs) - - def encode(self, bboxes, gt_bboxes): - """torch.Tensor: return the given ``bboxes``""" - return gt_bboxes - - def decode(self, bboxes, pred_bboxes): - """torch.Tensor: return the given ``pred_bboxes``""" - return pred_bboxes diff --git a/spaces/SantiagoTesla/Rai_AI/README.md b/spaces/SantiagoTesla/Rai_AI/README.md deleted file mode 100644 index 9d55bbe5ce7be31a4d196cb7a0fa78c4822f2968..0000000000000000000000000000000000000000 --- a/spaces/SantiagoTesla/Rai_AI/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Chatbot Final Version -emoji: 🏃 -colorFrom: indigo -colorTo: gray -sdk: gradio -sdk_version: 3.28.3 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/SPPE/src/utils/pose.py b/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/SPPE/src/utils/pose.py deleted file mode 100644 index 9fa2abe12d34a385704b34d9ab98c62b364a85d4..0000000000000000000000000000000000000000 --- a/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/SPPE/src/utils/pose.py +++ /dev/null @@ -1,169 +0,0 @@ -from .img import (load_image, drawGaussian, drawBigCircle, drawSmallCircle, cv_rotate, - cropBox, transformBox, transformBoxInvert, flip, shuffleLR, drawCOCO) -from .eval import getPrediction -import torch -import numpy as np -import random -from opt import opt - - -def rnd(x): - return max(-2 * x, min(2 * x, np.random.randn(1)[0] * x)) - - -def generateSampleBox(img_path, bndbox, part, nJoints, imgset, scale_factor, dataset, train=True): - - nJoints_coco = 17 - nJoints_mpii = 16 - img = load_image(img_path) - if train: - img[0].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1) - img[1].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1) - img[2].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1) - - ori_img = img.clone() - img[0].add_(-0.406) - img[1].add_(-0.457) - img[2].add_(-0.480) - - upLeft = torch.Tensor((int(bndbox[0][0]), int(bndbox[0][1]))) - bottomRight = torch.Tensor((int(bndbox[0][2]), int(bndbox[0][3]))) - ht = bottomRight[1] - upLeft[1] - width = bottomRight[0] - upLeft[0] - imght = img.shape[1] - imgwidth = img.shape[2] - scaleRate = random.uniform(*scale_factor) - - upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2) - upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2) - bottomRight[0] = min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2) - bottomRight[1] = min(imght - 1, bottomRight[1] + ht * scaleRate / 2) - - # Doing Random Sample - if opt.addDPG: - PatchScale = random.uniform(0, 1) - if PatchScale > 0.85: - ratio = ht / width - if (width < ht): - patchWidth = PatchScale * width - patchHt = patchWidth * ratio - else: - patchHt = PatchScale * ht - patchWidth = patchHt / ratio - - xmin = upLeft[0] + random.uniform(0, 1) * (width - patchWidth) - ymin = upLeft[1] + random.uniform(0, 1) * (ht - patchHt) - - xmax = xmin + patchWidth + 1 - ymax = ymin + patchHt + 1 - else: - xmin = max(1, min(upLeft[0] + np.random.normal(-0.0142, 0.1158) * width, imgwidth - 3)) - ymin = max(1, min(upLeft[1] + np.random.normal(0.0043, 0.068) * ht, imght - 3)) - xmax = min(max(xmin + 2, bottomRight[0] + np.random.normal(0.0154, 0.1337) * width), imgwidth - 3) - ymax = min(max(ymin + 2, bottomRight[1] + np.random.normal(-0.0013, 0.0711) * ht), imght - 3) - - upLeft[0] = xmin - upLeft[1] = ymin - bottomRight[0] = xmax - bottomRight[1] = ymax - - # Counting Joints number - jointNum = 0 - if imgset == 'coco': - for i in range(17): - if part[i][0] > 0 and part[i][0] > upLeft[0] and part[i][1] > upLeft[1] \ - and part[i][0] < bottomRight[0] and part[i][1] < bottomRight[1]: - jointNum += 1 - else: - for i in range(16): - if part[i][0] > 0 and part[i][0] > upLeft[0] and part[i][1] > upLeft[1] \ - and part[i][0] < bottomRight[0] and part[i][1] < bottomRight[1]: - jointNum += 1 - - # Doing Random Crop - if opt.addDPG: - if jointNum > 13 and train: - switch = random.uniform(0, 1) - if switch > 0.96: - bottomRight[0] = (upLeft[0] + bottomRight[0]) / 2 - bottomRight[1] = (upLeft[1] + bottomRight[1]) / 2 - elif switch > 0.92: - upLeft[0] = (upLeft[0] + bottomRight[0]) / 2 - bottomRight[1] = (upLeft[1] + bottomRight[1]) / 2 - elif switch > 0.88: - upLeft[1] = (upLeft[1] + bottomRight[1]) / 2 - bottomRight[0] = (upLeft[0] + bottomRight[0]) / 2 - elif switch > 0.84: - upLeft[0] = (upLeft[0] + bottomRight[0]) / 2 - upLeft[1] = (upLeft[1] + bottomRight[1]) / 2 - elif switch > 0.80: - bottomRight[0] = (upLeft[0] + bottomRight[0]) / 2 - elif switch > 0.76: - upLeft[0] = (upLeft[0] + bottomRight[0]) / 2 - elif switch > 0.72: - bottomRight[1] = (upLeft[1] + bottomRight[1]) / 2 - elif switch > 0.68: - upLeft[1] = (upLeft[1] + bottomRight[1]) / 2 - - ori_inp = cropBox(ori_img, upLeft, bottomRight, opt.inputResH, opt.inputResW) - inp = cropBox(img, upLeft, bottomRight, opt.inputResH, opt.inputResW) - if jointNum == 0: - inp = torch.zeros(3, opt.inputResH, opt.inputResW) - - out_bigcircle = torch.zeros(nJoints, opt.outputResH, opt.outputResW) - out_smallcircle = torch.zeros(nJoints, opt.outputResH, opt.outputResW) - out = torch.zeros(nJoints, opt.outputResH, opt.outputResW) - setMask = torch.zeros(nJoints, opt.outputResH, opt.outputResW) - - # Draw Label - if imgset == 'coco': - for i in range(nJoints_coco): - if part[i][0] > 0 and part[i][0] > upLeft[0] and part[i][1] > upLeft[1] \ - and part[i][0] < bottomRight[0] and part[i][1] < bottomRight[1]: - out_bigcircle[i] = drawBigCircle(out_bigcircle[i], transformBox(part[i], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss * 2) - out_smallcircle[i] = drawSmallCircle(out_smallcircle[i], transformBox(part[i], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss) - out[i] = drawGaussian(out[i], transformBox(part[i], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss) - setMask[i].add_(1) - elif imgset == 'mpii': - for i in range(nJoints_coco, nJoints_coco + nJoints_mpii): - if part[i - nJoints_coco][0] > 0 and part[i - nJoints_coco][0] > upLeft[0] and part[i - nJoints_coco][1] > upLeft[1] \ - and part[i - nJoints_coco][0] < bottomRight[0] and part[i - nJoints_coco][1] < bottomRight[1]: - out_bigcircle[i] = drawBigCircle(out_bigcircle[i], transformBox(part[i - nJoints_coco], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss * 2) - out_smallcircle[i] = drawSmallCircle(out_smallcircle[i], transformBox(part[i - nJoints_coco], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss) - out[i] = drawGaussian(out[i], transformBox(part[i - nJoints_coco], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss) - setMask[i].add_(1) - else: - for i in range(nJoints_coco, nJoints_coco + nJoints_mpii): - if part[i - nJoints_coco][0] > 0 and part[i - nJoints_coco][0] > upLeft[0] and part[i - nJoints_coco][1] > upLeft[1] \ - and part[i - nJoints_coco][0] < bottomRight[0] and part[i - nJoints_coco][1] < bottomRight[1]: - out_bigcircle[i] = drawBigCircle(out_bigcircle[i], transformBox(part[i - nJoints_coco], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss * 2) - out_smallcircle[i] = drawSmallCircle(out_smallcircle[i], transformBox(part[i - nJoints_coco], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss) - out[i] = drawGaussian(out[i], transformBox(part[i - nJoints_coco], upLeft, bottomRight, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW), opt.hmGauss) - if i != 6 + nJoints_coco and i != 7 + nJoints_coco: - setMask[i].add_(1) - - if opt.debug: - preds_hm, preds_img, preds_scores = getPrediction(out.unsqueeze(0), upLeft.unsqueeze(0), bottomRight.unsqueeze(0), opt.inputResH, - opt.inputResW, opt.outputResH, opt.outputResW) - tmp_preds = preds_hm.mul(opt.inputResH / opt.outputResH) - drawCOCO(ori_inp.unsqueeze(0), tmp_preds, preds_scores) - - if train: - # Flip - if random.uniform(0, 1) < 0.5: - inp = flip(inp) - ori_inp = flip(ori_inp) - out_bigcircle = shuffleLR(flip(out_bigcircle), dataset) - out_smallcircle = shuffleLR(flip(out_smallcircle), dataset) - out = shuffleLR(flip(out), dataset) - # Rotate - r = rnd(opt.rotate) - if random.uniform(0, 1) < 0.6: - r = 0 - if r != 0: - inp = cv_rotate(inp, r, opt.inputResW, opt.inputResH) - out_bigcircle = cv_rotate(out_bigcircle, r, opt.outputResW, opt.outputResH) - out_smallcircle = cv_rotate(out_smallcircle, r, opt.outputResW, opt.outputResH) - out = cv_rotate(out, r, opt.outputResW, opt.outputResH) - - return inp, out_bigcircle, out_smallcircle, out, setMask diff --git a/spaces/SeViLA/SeViLA/lavis/datasets/datasets/vqa_datasets.py b/spaces/SeViLA/SeViLA/lavis/datasets/datasets/vqa_datasets.py deleted file mode 100644 index 8803b25d7fdff8f80764d95db1f8bb0bd26e93ae..0000000000000000000000000000000000000000 --- a/spaces/SeViLA/SeViLA/lavis/datasets/datasets/vqa_datasets.py +++ /dev/null @@ -1,44 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import torch - -from lavis.datasets.datasets.base_dataset import BaseDataset - - -class VQADataset(BaseDataset): - def __init__(self, vis_processor, text_processor, vis_root, ann_paths): - super().__init__(vis_processor, text_processor, vis_root, ann_paths) - - def collater(self, samples): - image_list, question_list, answer_list, weight_list = [], [], [], [] - - num_answers = [] - - for sample in samples: - image_list.append(sample["image"]) - question_list.append(sample["text_input"]) - - weight_list.extend(sample["weights"]) - - answers = sample["answers"] - - answer_list.extend(answers) - num_answers.append(len(answers)) - - return { - "image": torch.stack(image_list, dim=0), - "text_input": question_list, - "answer": answer_list, - "weight": torch.Tensor(weight_list), - "n_answers": torch.LongTensor(num_answers), - } - - -class VQAEvalDataset(BaseDataset): - def __init__(self, vis_processor, text_processor, vis_root, ann_paths): - super().__init__(vis_processor, text_processor, vis_root, ann_paths) diff --git a/spaces/ShkShahid/Auto-encoder_For_Image_Reconstruction/app.py b/spaces/ShkShahid/Auto-encoder_For_Image_Reconstruction/app.py deleted file mode 100644 index a699bc5b3c2e987102ca93e0ee28d601e0a93d02..0000000000000000000000000000000000000000 --- a/spaces/ShkShahid/Auto-encoder_For_Image_Reconstruction/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import gradio as gr - -def greet(name): - return "Hello " + name + "!!" - -iface = gr.Interface(fn=greet, inputs="text", outputs="text") -iface.launch() \ No newline at end of file diff --git a/spaces/SpaceNMagic/OPEN_AI/README.md b/spaces/SpaceNMagic/OPEN_AI/README.md deleted file mode 100644 index 8aaebad6ffa763d0980539b263971a6d8dac80d3..0000000000000000000000000000000000000000 --- a/spaces/SpaceNMagic/OPEN_AI/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: OPEN AI -emoji: 🌍 -colorFrom: blue -colorTo: purple -sdk: docker -pinned: false -duplicated_from: Tigerjack23/OPEN_AI ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SuYuanS/AudioCraft_Plus/scripts/templates/survey.html b/spaces/SuYuanS/AudioCraft_Plus/scripts/templates/survey.html deleted file mode 100644 index 785d1e61b7ac21619416ba70dd4719ff250f3f4b..0000000000000000000000000000000000000000 --- a/spaces/SuYuanS/AudioCraft_Plus/scripts/templates/survey.html +++ /dev/null @@ -1,131 +0,0 @@ -{% extends "base.html" %} -{% block content %} -

    Survey #{{signature}}

    -{% if success %} -

    Your ratings have been saved! -You have been moved to the next random seed, if you want -to keep rating more samples.

    -{% endif %} -{% if already_filled %} -

    You already rated those samples in the past, - filling this form will override your previous ratings. -

    -{% endif %} -

    Welcome {{session['user']}} to the survey #{{signature}}. -Go to the result page to check the results. Go to the home page to start a new survey. -

    - -{% for error in errors %} -

    {{error}}

    -{% endfor %} - -{% if not blind %} -

    Base config is: {{ref_name}}

    -

    The following experiments are compared:

    -
      - {% for experiment in experiments %} -
    • {{experiment.xp.sig}} ({{experiment.epoch}} epochs): {{experiment.name}}
    • - {% endfor %} -
    -{% else %} -

    This is a blind experiment, the order of all XPs is shuffled with every sample.

    -{% endif %} -

    The current random seed is {{seed}}. You can change it with the following form, and also update blind/non blind. -

    -
    - - - - - - -
    - -

    Samples

    -
    -
    -{% for id in model_ids %} -
    -

    {{id}}

    - {% for model in models_by_id[id] %} - {% if loop.index == 1 and model.is_prompted %} -
    -

    Prompt is

    - -

    Ground truth is

    - -
    - {% endif %} - {% for err in model['errors'] %} -

    {{err}}

    - {% endfor %} -
    - {% if not blind %} -

    {{model.xp.sig}}:

    - {% endif %} - -

    Rating:

    -
    - {% for rating in ratings %} - {{rating}} - {% endfor %} - -
    -

    -
    - {% endfor %} -
    -
    -{% endfor %} - - -
    - -{% endblock %} diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/launcher/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/launcher/__init__.py deleted file mode 100644 index a6e0934f14566185a819390ff0dbd288632c601c..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/launcher/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See LICENSE in the project root -# for license information. - -__all__ = [] - - -adapter_host = None -"""The host on which adapter is running and listening for incoming connections -from the launcher and the servers.""" - -channel = None -"""DAP message channel to the adapter.""" - - -def connect(host, port): - from debugpy.common import log, messaging, sockets - from debugpy.launcher import handlers - - global channel, adapter_host - assert channel is None - assert adapter_host is None - - log.info("Connecting to adapter at {0}:{1}", host, port) - - sock = sockets.create_client() - sock.connect((host, port)) - adapter_host = host - - stream = messaging.JsonIOStream.from_socket(sock, "Adapter") - channel = messaging.JsonMessageChannel(stream, handlers=handlers) - channel.start() diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/data/samplers/distributed_sampler.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/data/samplers/distributed_sampler.py deleted file mode 100644 index cd4724eac8fbff2456bd26f95e6fea5e914b73e2..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/data/samplers/distributed_sampler.py +++ /dev/null @@ -1,278 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import itertools -import logging -import math -from collections import defaultdict -from typing import Optional -import torch -from torch.utils.data.sampler import Sampler - -from annotator.oneformer.detectron2.utils import comm - -logger = logging.getLogger(__name__) - - -class TrainingSampler(Sampler): - """ - In training, we only care about the "infinite stream" of training data. - So this sampler produces an infinite stream of indices and - all workers cooperate to correctly shuffle the indices and sample different indices. - - The samplers in each worker effectively produces `indices[worker_id::num_workers]` - where `indices` is an infinite stream of indices consisting of - `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True) - or `range(size) + range(size) + ...` (if shuffle is False) - - Note that this sampler does not shard based on pytorch DataLoader worker id. - A sampler passed to pytorch DataLoader is used only with map-style dataset - and will not be executed inside workers. - But if this sampler is used in a way that it gets execute inside a dataloader - worker, then extra work needs to be done to shard its outputs based on worker id. - This is required so that workers don't produce identical data. - :class:`ToIterableDataset` implements this logic. - This note is true for all samplers in detectron2. - """ - - def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None): - """ - Args: - size (int): the total number of data of the underlying dataset to sample from - shuffle (bool): whether to shuffle the indices or not - seed (int): the initial seed of the shuffle. Must be the same - across all workers. If None, will use a random seed shared - among workers (require synchronization among all workers). - """ - if not isinstance(size, int): - raise TypeError(f"TrainingSampler(size=) expects an int. Got type {type(size)}.") - if size <= 0: - raise ValueError(f"TrainingSampler(size=) expects a positive int. Got {size}.") - self._size = size - self._shuffle = shuffle - if seed is None: - seed = comm.shared_random_seed() - self._seed = int(seed) - - self._rank = comm.get_rank() - self._world_size = comm.get_world_size() - - def __iter__(self): - start = self._rank - yield from itertools.islice(self._infinite_indices(), start, None, self._world_size) - - def _infinite_indices(self): - g = torch.Generator() - g.manual_seed(self._seed) - while True: - if self._shuffle: - yield from torch.randperm(self._size, generator=g).tolist() - else: - yield from torch.arange(self._size).tolist() - - -class RandomSubsetTrainingSampler(TrainingSampler): - """ - Similar to TrainingSampler, but only sample a random subset of indices. - This is useful when you want to estimate the accuracy vs data-number curves by - training the model with different subset_ratio. - """ - - def __init__( - self, - size: int, - subset_ratio: float, - shuffle: bool = True, - seed_shuffle: Optional[int] = None, - seed_subset: Optional[int] = None, - ): - """ - Args: - size (int): the total number of data of the underlying dataset to sample from - subset_ratio (float): the ratio of subset data to sample from the underlying dataset - shuffle (bool): whether to shuffle the indices or not - seed_shuffle (int): the initial seed of the shuffle. Must be the same - across all workers. If None, will use a random seed shared - among workers (require synchronization among all workers). - seed_subset (int): the seed to randomize the subset to be sampled. - Must be the same across all workers. If None, will use a random seed shared - among workers (require synchronization among all workers). - """ - super().__init__(size=size, shuffle=shuffle, seed=seed_shuffle) - - assert 0.0 < subset_ratio <= 1.0 - self._size_subset = int(size * subset_ratio) - assert self._size_subset > 0 - if seed_subset is None: - seed_subset = comm.shared_random_seed() - self._seed_subset = int(seed_subset) - - # randomly generate the subset indexes to be sampled from - g = torch.Generator() - g.manual_seed(self._seed_subset) - indexes_randperm = torch.randperm(self._size, generator=g) - self._indexes_subset = indexes_randperm[: self._size_subset] - - logger.info("Using RandomSubsetTrainingSampler......") - logger.info(f"Randomly sample {self._size_subset} data from the original {self._size} data") - - def _infinite_indices(self): - g = torch.Generator() - g.manual_seed(self._seed) # self._seed equals seed_shuffle from __init__() - while True: - if self._shuffle: - # generate a random permutation to shuffle self._indexes_subset - randperm = torch.randperm(self._size_subset, generator=g) - yield from self._indexes_subset[randperm].tolist() - else: - yield from self._indexes_subset.tolist() - - -class RepeatFactorTrainingSampler(Sampler): - """ - Similar to TrainingSampler, but a sample may appear more times than others based - on its "repeat factor". This is suitable for training on class imbalanced datasets like LVIS. - """ - - def __init__(self, repeat_factors, *, shuffle=True, seed=None): - """ - Args: - repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's - full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``. - shuffle (bool): whether to shuffle the indices or not - seed (int): the initial seed of the shuffle. Must be the same - across all workers. If None, will use a random seed shared - among workers (require synchronization among all workers). - """ - self._shuffle = shuffle - if seed is None: - seed = comm.shared_random_seed() - self._seed = int(seed) - - self._rank = comm.get_rank() - self._world_size = comm.get_world_size() - - # Split into whole number (_int_part) and fractional (_frac_part) parts. - self._int_part = torch.trunc(repeat_factors) - self._frac_part = repeat_factors - self._int_part - - @staticmethod - def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh): - """ - Compute (fractional) per-image repeat factors based on category frequency. - The repeat factor for an image is a function of the frequency of the rarest - category labeled in that image. The "frequency of category c" in [0, 1] is defined - as the fraction of images in the training set (without repeats) in which category c - appears. - See :paper:`lvis` (>= v2) Appendix B.2. - - Args: - dataset_dicts (list[dict]): annotations in Detectron2 dataset format. - repeat_thresh (float): frequency threshold below which data is repeated. - If the frequency is half of `repeat_thresh`, the image will be - repeated twice. - - Returns: - torch.Tensor: - the i-th element is the repeat factor for the dataset image at index i. - """ - # 1. For each category c, compute the fraction of images that contain it: f(c) - category_freq = defaultdict(int) - for dataset_dict in dataset_dicts: # For each image (without repeats) - cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]} - for cat_id in cat_ids: - category_freq[cat_id] += 1 - num_images = len(dataset_dicts) - for k, v in category_freq.items(): - category_freq[k] = v / num_images - - # 2. For each category c, compute the category-level repeat factor: - # r(c) = max(1, sqrt(t / f(c))) - category_rep = { - cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq)) - for cat_id, cat_freq in category_freq.items() - } - - # 3. For each image I, compute the image-level repeat factor: - # r(I) = max_{c in I} r(c) - rep_factors = [] - for dataset_dict in dataset_dicts: - cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]} - rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0) - rep_factors.append(rep_factor) - - return torch.tensor(rep_factors, dtype=torch.float32) - - def _get_epoch_indices(self, generator): - """ - Create a list of dataset indices (with repeats) to use for one epoch. - - Args: - generator (torch.Generator): pseudo random number generator used for - stochastic rounding. - - Returns: - torch.Tensor: list of dataset indices to use in one epoch. Each index - is repeated based on its calculated repeat factor. - """ - # Since repeat factors are fractional, we use stochastic rounding so - # that the target repeat factor is achieved in expectation over the - # course of training - rands = torch.rand(len(self._frac_part), generator=generator) - rep_factors = self._int_part + (rands < self._frac_part).float() - # Construct a list of indices in which we repeat images as specified - indices = [] - for dataset_index, rep_factor in enumerate(rep_factors): - indices.extend([dataset_index] * int(rep_factor.item())) - return torch.tensor(indices, dtype=torch.int64) - - def __iter__(self): - start = self._rank - yield from itertools.islice(self._infinite_indices(), start, None, self._world_size) - - def _infinite_indices(self): - g = torch.Generator() - g.manual_seed(self._seed) - while True: - # Sample indices with repeats determined by stochastic rounding; each - # "epoch" may have a slightly different size due to the rounding. - indices = self._get_epoch_indices(g) - if self._shuffle: - randperm = torch.randperm(len(indices), generator=g) - yield from indices[randperm].tolist() - else: - yield from indices.tolist() - - -class InferenceSampler(Sampler): - """ - Produce indices for inference across all workers. - Inference needs to run on the __exact__ set of samples, - therefore when the total number of samples is not divisible by the number of workers, - this sampler produces different number of samples on different workers. - """ - - def __init__(self, size: int): - """ - Args: - size (int): the total number of data of the underlying dataset to sample from - """ - self._size = size - assert size > 0 - self._rank = comm.get_rank() - self._world_size = comm.get_world_size() - self._local_indices = self._get_local_indices(size, self._world_size, self._rank) - - @staticmethod - def _get_local_indices(total_size, world_size, rank): - shard_size = total_size // world_size - left = total_size % world_size - shard_sizes = [shard_size + int(r < left) for r in range(world_size)] - - begin = sum(shard_sizes[:rank]) - end = min(sum(shard_sizes[: rank + 1]), total_size) - return range(begin, end) - - def __iter__(self): - yield from self._local_indices - - def __len__(self): - return len(self._local_indices) diff --git a/spaces/TH5314/newbing/src/components/ui/alert-dialog.tsx b/spaces/TH5314/newbing/src/components/ui/alert-dialog.tsx deleted file mode 100644 index 17fec4d16510328deacc1416569173c97761ef72..0000000000000000000000000000000000000000 --- a/spaces/TH5314/newbing/src/components/ui/alert-dialog.tsx +++ /dev/null @@ -1,150 +0,0 @@ -'use client' - -import * as React from 'react' -import * as AlertDialogPrimitive from '@radix-ui/react-alert-dialog' - -import { cn } from '@/lib/utils' -import { buttonVariants } from '@/components/ui/button' - -const AlertDialog = AlertDialogPrimitive.Root - -const AlertDialogTrigger = AlertDialogPrimitive.Trigger - -const AlertDialogPortal = ({ - className, - children, - ...props -}: AlertDialogPrimitive.AlertDialogPortalProps) => ( - -
    - {children} -
    -
    -) -AlertDialogPortal.displayName = AlertDialogPrimitive.Portal.displayName - -const AlertDialogOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - -)) -AlertDialogOverlay.displayName = AlertDialogPrimitive.Overlay.displayName - -const AlertDialogContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - - - - -)) -AlertDialogContent.displayName = AlertDialogPrimitive.Content.displayName - -const AlertDialogHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
    -) -AlertDialogHeader.displayName = 'AlertDialogHeader' - -const AlertDialogFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
    -) -AlertDialogFooter.displayName = 'AlertDialogFooter' - -const AlertDialogTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogTitle.displayName = AlertDialogPrimitive.Title.displayName - -const AlertDialogDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogDescription.displayName = - AlertDialogPrimitive.Description.displayName - -const AlertDialogAction = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogAction.displayName = AlertDialogPrimitive.Action.displayName - -const AlertDialogCancel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogCancel.displayName = AlertDialogPrimitive.Cancel.displayName - -export { - AlertDialog, - AlertDialogTrigger, - AlertDialogContent, - AlertDialogHeader, - AlertDialogFooter, - AlertDialogTitle, - AlertDialogDescription, - AlertDialogAction, - AlertDialogCancel -} diff --git a/spaces/TabPFN/TabPFNPrediction/TabPFN/train.py b/spaces/TabPFN/TabPFNPrediction/TabPFN/train.py deleted file mode 100644 index 9dc8a8866ac727162f9b61afe36567223aea948b..0000000000000000000000000000000000000000 --- a/spaces/TabPFN/TabPFNPrediction/TabPFN/train.py +++ /dev/null @@ -1,358 +0,0 @@ -import os -import itertools -import argparse -import time -import datetime -import yaml -from contextlib import nullcontext - - -import torch -from torch import nn - -import utils -from transformer import TransformerModel -from utils import get_cosine_schedule_with_warmup, get_openai_lr, StoreDictKeyPair, get_weighted_single_eval_pos_sampler, get_uniform_single_eval_pos_sampler -import priors -import encoders -import positional_encodings -from utils import init_dist -from torch.cuda.amp import autocast, GradScaler -from torch import nn - -class Losses(): - gaussian = nn.GaussianNLLLoss(full=True, reduction='none') - mse = nn.MSELoss(reduction='none') - def ce(num_classes): - num_classes = num_classes.shape[0] if torch.is_tensor(num_classes) else num_classes - return nn.CrossEntropyLoss(reduction='none', weight=torch.ones(num_classes)) - bce = nn.BCEWithLogitsLoss(reduction='none') - - - -def train(priordataloader_class, criterion, encoder_generator, emsize=200, nhid=200, nlayers=6, nhead=2, dropout=0.0, - epochs=10, steps_per_epoch=100, batch_size=200, bptt=10, lr=None, weight_decay=0.0, warmup_epochs=10, input_normalization=False, - y_encoder_generator=None, pos_encoder_generator=None, decoder=None, extra_prior_kwargs_dict={}, scheduler=get_cosine_schedule_with_warmup, - load_weights_from_this_state_dict=None, validation_period=10, single_eval_pos_gen=None, bptt_extra_samples=None, gpu_device='cuda:0', - aggregate_k_gradients=1, verbose=True, style_encoder_generator=None, epoch_callback=None, - initializer=None, initialize_with_model=None, train_mixed_precision=False, efficient_eval_masking=True, **model_extra_args - ): - device = gpu_device if torch.cuda.is_available() else 'cpu:0' - print(f'Using {device} device') - using_dist, rank, device = init_dist(device) - single_eval_pos_gen = single_eval_pos_gen if callable(single_eval_pos_gen) else lambda: single_eval_pos_gen - - def eval_pos_seq_len_sampler(): - single_eval_pos = single_eval_pos_gen() - if bptt_extra_samples: - return single_eval_pos, single_eval_pos + bptt_extra_samples - else: - return single_eval_pos, bptt - dl = priordataloader_class(num_steps=steps_per_epoch, batch_size=batch_size, eval_pos_seq_len_sampler=eval_pos_seq_len_sampler, seq_len_maximum=bptt+(bptt_extra_samples if bptt_extra_samples else 0), device=device, **extra_prior_kwargs_dict) - - encoder = encoder_generator(dl.num_features, emsize) - style_def = dl.get_test_batch()[0][0] # the style in batch of the form ((style, x, y), target, single_eval_pos) - print(f'Style definition of first 3 examples: {style_def[:3] if style_def is not None else None}') - style_encoder = style_encoder_generator(style_def.shape[1], emsize) if (style_def is not None) else None - if isinstance(criterion, nn.GaussianNLLLoss): - n_out = 2 - elif isinstance(criterion, nn.CrossEntropyLoss): - n_out = criterion.weight.shape[0] - else: - n_out = 1 - - model = TransformerModel(encoder, n_out, emsize, nhead, nhid, nlayers, dropout, style_encoder=style_encoder, - y_encoder=y_encoder_generator(1, emsize), input_normalization=input_normalization, - pos_encoder=(pos_encoder_generator or positional_encodings.NoPositionalEncoding)(emsize, bptt*2), - decoder=decoder, init_method=initializer, efficient_eval_masking=efficient_eval_masking, **model_extra_args - ) - model.criterion = criterion - if load_weights_from_this_state_dict is not None: - model.load_state_dict(load_weights_from_this_state_dict) - if initialize_with_model is not None: - model.init_from_small_model(initialize_with_model) - - print(f"Using a Transformer with {sum(p.numel() for p in model.parameters())/1000/1000:.{2}f} M parameters") - - try: - for (k, v), (k2, v2) in zip(model.state_dict().items(), initialize_with_model.state_dict().items()): - print(k, ((v - v2) / v).abs().mean(), v.shape) - except Exception: - pass - - model.to(device) - if using_dist: - print("Distributed training") - model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank], output_device=rank, broadcast_buffers=False) - dl.model = model - - - # learning rate - if lr is None: - lr = get_openai_lr(model) - print(f"Using OpenAI max lr of {lr}.") - optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay) - scheduler = scheduler(optimizer, warmup_epochs, epochs if epochs is not None else 100) # when training for fixed time lr schedule takes 100 steps - - scaler = GradScaler() if train_mixed_precision else None - - # check that everything uses up-to-date APIs - utils.check_compatibility(dl) - - def train_epoch(): - model.train() # Turn on the train mode - total_loss = 0. - total_positional_losses = 0. - total_positional_losses_recorded = 0 - nan_steps = 0 - ignore_steps = 0 - before_get_batch = time.time() - assert len(dl) % aggregate_k_gradients == 0, 'Please set the number of steps per epoch s.t. `aggregate_k_gradients` divides it.' - for batch, (data, targets, single_eval_pos) in enumerate(dl): - if using_dist and not (batch % aggregate_k_gradients == aggregate_k_gradients - 1): - cm = model.no_sync() - else: - cm = nullcontext() - with cm: - time_to_get_batch = time.time() - before_get_batch - before_forward = time.time() - if bptt_extra_samples is None: - single_eval_pos = single_eval_pos_gen() if callable(single_eval_pos_gen) else single_eval_pos_gen - else: - single_eval_pos = targets.shape[0] - bptt_extra_samples - - with autocast(enabled=scaler is not None): - # If style is set to None, it should not be transferred to device - output = model(tuple(e.to(device) if torch.is_tensor(e) else e for e in data) if isinstance(data, tuple) else data.to(device) - , single_eval_pos=single_eval_pos) - - forward_time = time.time() - before_forward - - if single_eval_pos is not None: - targets = targets[single_eval_pos:] - if isinstance(criterion, nn.GaussianNLLLoss): - assert output.shape[-1] == 2, \ - 'need to write a little bit of code to handle multiple regression targets at once' - - mean_pred = output[..., 0] - var_pred = output[..., 1].abs() - losses = criterion(mean_pred.flatten(), targets.to(device).flatten(), var=var_pred.flatten()) - elif isinstance(criterion, (nn.MSELoss, nn.BCEWithLogitsLoss)): - losses = criterion(output.flatten(), targets.to(device).flatten()) - elif isinstance(criterion, nn.CrossEntropyLoss): - losses = criterion(output.reshape(-1, n_out), targets.to(device).long().flatten()) - else: - losses = criterion(output, targets) - losses = losses.view(*output.shape[0:2]) - loss, nan_share = utils.torch_nanmean(losses.mean(0), return_nanshare=True) - loss = loss / aggregate_k_gradients - - if scaler: loss = scaler.scale(loss) - loss.backward() - - if batch % aggregate_k_gradients == aggregate_k_gradients - 1: - if scaler: scaler.unscale_(optimizer) - torch.nn.utils.clip_grad_norm_(model.parameters(), 1.) - try: - if scaler: - scaler.step(optimizer) - scaler.update() - else: - optimizer.step() - except: - print("Invalid optimization step encountered") - optimizer.zero_grad() - - step_time = time.time() - before_forward - - if not torch.isnan(loss): - total_loss += losses.mean().cpu().detach().item() - total_positional_losses += losses.mean(1).cpu().detach() if single_eval_pos is None else \ - nn.functional.one_hot(torch.tensor(single_eval_pos), bptt)*\ - losses[:bptt-single_eval_pos].mean().cpu().detach() - - total_positional_losses_recorded += torch.ones(bptt) if single_eval_pos is None else \ - nn.functional.one_hot(torch.tensor(single_eval_pos), bptt) - nan_steps += nan_share - ignore_steps += (targets == -100).float().mean() - - - before_get_batch = time.time() - return total_loss / steps_per_epoch, (total_positional_losses / total_positional_losses_recorded).tolist(),\ - time_to_get_batch, forward_time, step_time, nan_steps.cpu().item()/(batch+1),\ - ignore_steps.cpu().item()/(batch+1) - - total_loss = float('inf') - total_positional_losses = float('inf') - try: - for epoch in (range(1, epochs + 1) if epochs is not None else itertools.count(1)): - - epoch_start_time = time.time() - total_loss, total_positional_losses, time_to_get_batch, forward_time, step_time, nan_share, ignore_share =\ - train_epoch() - if hasattr(dl, 'validate') and epoch % validation_period == 0: - with torch.no_grad(): - val_score = dl.validate(model) - else: - val_score = None - - if verbose: - print('-' * 89) - print( - f'| end of epoch {epoch:3d} | time: {(time.time() - epoch_start_time):5.2f}s | mean loss {total_loss:5.2f} | ' - f"pos losses {','.join([f'{l:5.2f}' for l in total_positional_losses])}, lr {scheduler.get_last_lr()[0]}" - f' data time {time_to_get_batch:5.2f} step time {step_time:5.2f}' - f' forward time {forward_time:5.2f}' - f' nan share {nan_share:5.2f} ignore share (for classification tasks) {ignore_share:5.4f}' - + (f'val score {val_score}' if val_score is not None else '')) - print('-' * 89) - - # stepping with wallclock time based scheduler - if epoch_callback is not None and rank == 0: - epoch_callback(model, epoch / epochs) - scheduler.step() - except KeyboardInterrupt: - pass - - if rank == 0: # trivially true for non-parallel training - if isinstance(model, torch.nn.parallel.DistributedDataParallel): - model = model.module - dl = None - return total_loss, total_positional_losses, model.to('cpu'), dl - -def _parse_args(config_parser, parser): - # Do we have a config file to parse? - args_config, remaining = config_parser.parse_known_args() - if args_config.config: - with open(args_config.config, 'r') as f: - cfg = yaml.safe_load(f) - parser.set_defaults(**cfg) - - # The main arg parser parses the rest of the args, the usual - # defaults will have been overridden if config file specified. - args = parser.parse_args(remaining) - - # Cache the args as a text string to save them in the output dir later - args_text = yaml.safe_dump(args.__dict__, default_flow_style=False) - return args, args_text - - -if __name__ == '__main__': - config_parser = argparse.ArgumentParser(description='Only used as a first parser for the config file path.') - config_parser.add_argument('--config') - parser = argparse.ArgumentParser() - parser.add_argument('prior') - parser.add_argument('--loss_function', default='barnll') - # Optional Arg's for `--loss_function barnll` - parser.add_argument('--min_y', type=float, help='barnll can only model y in strict ranges, this is the minimum y can take.') - parser.add_argument('--max_y', type=float, help='barnll can only model y in strict ranges, this is the maximum y can take.') - parser.add_argument('--num_buckets', default=100, type=int) - #parser.add_argument('--num_features', default=None, type=int, help='Specify depending on the prior.') - parser.add_argument("--extra_prior_kwargs_dict", default={}, dest="extra_prior_kwargs_dict", action=StoreDictKeyPair, nargs="+", metavar="KEY=VAL", help='Specify depending on the prior.') - parser.add_argument('--encoder', default='linear', type=str, help='Specify depending on the prior.') - parser.add_argument('--y_encoder', default='linear', type=str, help='Specify depending on the prior. You should specify this if you do not fuse x and y.') - parser.add_argument('--pos_encoder', default='none', type=str, help='Specify depending on the prior.') - parser.add_argument('--bptt', default=10, type=int) - parser.add_argument('--epochs', default=200, type=int) - parser.add_argument('--warmup_epochs', default=50, type=int) - parser.add_argument('--validation_period', default=10, type=int) - parser.add_argument('--permutation_invariant_max_eval_pos', default=None, type=int, help='Set this to an int to ') - parser.add_argument('--permutation_invariant_sampling', default='weighted', help="Only relevant if --permutation_invariant_max_eval_pos is set.") - parser.add_argument('--train_mixed_precision', action='store_true') - - # these can likely be mostly left at defaults - parser.add_argument('--emsize', default=512, type=int) # sometimes even larger is better e.g. 1024 - parser.add_argument('--nlayers', default=6, type=int) - parser.add_argument('--nhid', default=None, type=int) # 2*emsize is the default - parser.add_argument('--nhead', default=4, type=int) # nhead = emsize / 64 in the original paper - parser.add_argument('--dropout', default=.0, type=float) - parser.add_argument('--steps_per_epoch', default=10, type=int) - parser.add_argument('--batch_size', default=1000, type=int) - parser.add_argument('--lr', '--learning_rate', default=.001, type=float) # try also .0003, .0001, go lower with lower batch size - - args, _ = _parse_args(config_parser, parser) - - if args.nhid is None: - args.nhid = 2*args.emsize - - prior = args.__dict__.pop('prior') - - if prior == 'gp': - prior = priors.fast_gp.DataLoader - elif prior == 'ridge': - prior = priors.ridge.DataLoader - elif prior == 'stroke': - prior = priors.stroke.DataLoader - elif prior == 'mix_gp': - prior = priors.fast_gp_mix.DataLoader - else: - raise NotImplementedError(f'Prior == {prior}.') - - loss_function = args.__dict__.pop('loss_function') - - criterion = nn.GaussianNLLLoss(reduction='none', full=True) - classificiation_criterion = nn.CrossEntropyLoss(reduction='none') - num_buckets = args.__dict__.pop('num_buckets') - max_y = args.__dict__.pop('max_y') - min_y = args.__dict__.pop('min_y') - # criterion = nn.MSELoss(reduction='none') - - if loss_function == 'ce': - criterion = nn.CrossEntropyLoss(reduction='none') - elif loss_function == 'gaussnll': - criterion = nn.GaussianNLLLoss(reduction='none', full=True) - elif loss_function == 'mse': - criterion = nn.MSELoss(reduction='none') - else: - raise NotImplementedError(f'loss_function == {loss_function}.') - - - - encoder = args.__dict__.pop('encoder') - y_encoder = args.__dict__.pop('y_encoder') - - def get_encoder_generator(encoder): - if encoder == 'linear': - encoder_generator = encoders.Linear - elif encoder == 'mlp': - encoder_generator = encoders.MLP - elif encoder == 'positional': - encoder_generator = encoders.Positional - else: - raise NotImplementedError(f'A {encoder} encoder is not valid.') - return encoder_generator - - encoder_generator = get_encoder_generator(encoder) - y_encoder_generator = get_encoder_generator(y_encoder) - - pos_encoder = args.__dict__.pop('pos_encoder') - - if pos_encoder == 'none': - pos_encoder_generator = None - elif pos_encoder == 'sinus': - pos_encoder_generator = positional_encodings.PositionalEncoding - elif pos_encoder == 'learned': - pos_encoder_generator = positional_encodings.LearnedPositionalEncoding - elif pos_encoder == 'paired_scrambled_learned': - pos_encoder_generator = positional_encodings.PairedScrambledPositionalEncodings - else: - raise NotImplementedError(f'pos_encoer == {pos_encoder} is not valid.') - - permutation_invariant_max_eval_pos = args.__dict__.pop('permutation_invariant_max_eval_pos') - permutation_invariant_sampling = args.__dict__.pop('permutation_invariant_sampling') - if permutation_invariant_max_eval_pos is not None: - if permutation_invariant_sampling == 'weighted': - get_sampler = get_weighted_single_eval_pos_sampler - elif permutation_invariant_sampling == 'uniform': - get_sampler = get_uniform_single_eval_pos_sampler - else: - raise ValueError() - args.__dict__['single_eval_pos_gen'] = get_sampler(permutation_invariant_max_eval_pos) - - - print("ARGS for `train`:", args.__dict__) - - train(prior, criterion, encoder_generator, - y_encoder_generator=y_encoder_generator, pos_encoder_generator=pos_encoder_generator, - **args.__dict__) - diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/sessions.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/sessions.py deleted file mode 100644 index dbcf2a7b0ee2898b72714b756e4b27fbbad4beab..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/sessions.py +++ /dev/null @@ -1,833 +0,0 @@ -""" -requests.sessions -~~~~~~~~~~~~~~~~~ - -This module provides a Session object to manage and persist settings across -requests (cookies, auth, proxies). -""" -import os -import sys -import time -from collections import OrderedDict -from datetime import timedelta - -from ._internal_utils import to_native_string -from .adapters import HTTPAdapter -from .auth import _basic_auth_str -from .compat import Mapping, cookielib, urljoin, urlparse -from .cookies import ( - RequestsCookieJar, - cookiejar_from_dict, - extract_cookies_to_jar, - merge_cookies, -) -from .exceptions import ( - ChunkedEncodingError, - ContentDecodingError, - InvalidSchema, - TooManyRedirects, -) -from .hooks import default_hooks, dispatch_hook - -# formerly defined here, reexposed here for backward compatibility -from .models import ( # noqa: F401 - DEFAULT_REDIRECT_LIMIT, - REDIRECT_STATI, - PreparedRequest, - Request, -) -from .status_codes import codes -from .structures import CaseInsensitiveDict -from .utils import ( # noqa: F401 - DEFAULT_PORTS, - default_headers, - get_auth_from_url, - get_environ_proxies, - get_netrc_auth, - requote_uri, - resolve_proxies, - rewind_body, - should_bypass_proxies, - to_key_val_list, -) - -# Preferred clock, based on which one is more accurate on a given system. -if sys.platform == "win32": - preferred_clock = time.perf_counter -else: - preferred_clock = time.time - - -def merge_setting(request_setting, session_setting, dict_class=OrderedDict): - """Determines appropriate setting for a given request, taking into account - the explicit setting on that request, and the setting in the session. If a - setting is a dictionary, they will be merged together using `dict_class` - """ - - if session_setting is None: - return request_setting - - if request_setting is None: - return session_setting - - # Bypass if not a dictionary (e.g. verify) - if not ( - isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) - ): - return request_setting - - merged_setting = dict_class(to_key_val_list(session_setting)) - merged_setting.update(to_key_val_list(request_setting)) - - # Remove keys that are set to None. Extract keys first to avoid altering - # the dictionary during iteration. - none_keys = [k for (k, v) in merged_setting.items() if v is None] - for key in none_keys: - del merged_setting[key] - - return merged_setting - - -def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): - """Properly merges both requests and session hooks. - - This is necessary because when request_hooks == {'response': []}, the - merge breaks Session hooks entirely. - """ - if session_hooks is None or session_hooks.get("response") == []: - return request_hooks - - if request_hooks is None or request_hooks.get("response") == []: - return session_hooks - - return merge_setting(request_hooks, session_hooks, dict_class) - - -class SessionRedirectMixin: - def get_redirect_target(self, resp): - """Receives a Response. Returns a redirect URI or ``None``""" - # Due to the nature of how requests processes redirects this method will - # be called at least once upon the original response and at least twice - # on each subsequent redirect response (if any). - # If a custom mixin is used to handle this logic, it may be advantageous - # to cache the redirect location onto the response object as a private - # attribute. - if resp.is_redirect: - location = resp.headers["location"] - # Currently the underlying http module on py3 decode headers - # in latin1, but empirical evidence suggests that latin1 is very - # rarely used with non-ASCII characters in HTTP headers. - # It is more likely to get UTF8 header rather than latin1. - # This causes incorrect handling of UTF8 encoded location headers. - # To solve this, we re-encode the location in latin1. - location = location.encode("latin1") - return to_native_string(location, "utf8") - return None - - def should_strip_auth(self, old_url, new_url): - """Decide whether Authorization header should be removed when redirecting""" - old_parsed = urlparse(old_url) - new_parsed = urlparse(new_url) - if old_parsed.hostname != new_parsed.hostname: - return True - # Special case: allow http -> https redirect when using the standard - # ports. This isn't specified by RFC 7235, but is kept to avoid - # breaking backwards compatibility with older versions of requests - # that allowed any redirects on the same host. - if ( - old_parsed.scheme == "http" - and old_parsed.port in (80, None) - and new_parsed.scheme == "https" - and new_parsed.port in (443, None) - ): - return False - - # Handle default port usage corresponding to scheme. - changed_port = old_parsed.port != new_parsed.port - changed_scheme = old_parsed.scheme != new_parsed.scheme - default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) - if ( - not changed_scheme - and old_parsed.port in default_port - and new_parsed.port in default_port - ): - return False - - # Standard case: root URI must match - return changed_port or changed_scheme - - def resolve_redirects( - self, - resp, - req, - stream=False, - timeout=None, - verify=True, - cert=None, - proxies=None, - yield_requests=False, - **adapter_kwargs, - ): - """Receives a Response. Returns a generator of Responses or Requests.""" - - hist = [] # keep track of history - - url = self.get_redirect_target(resp) - previous_fragment = urlparse(req.url).fragment - while url: - prepared_request = req.copy() - - # Update history and keep track of redirects. - # resp.history must ignore the original request in this loop - hist.append(resp) - resp.history = hist[1:] - - try: - resp.content # Consume socket so it can be released - except (ChunkedEncodingError, ContentDecodingError, RuntimeError): - resp.raw.read(decode_content=False) - - if len(resp.history) >= self.max_redirects: - raise TooManyRedirects( - f"Exceeded {self.max_redirects} redirects.", response=resp - ) - - # Release the connection back into the pool. - resp.close() - - # Handle redirection without scheme (see: RFC 1808 Section 4) - if url.startswith("//"): - parsed_rurl = urlparse(resp.url) - url = ":".join([to_native_string(parsed_rurl.scheme), url]) - - # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) - parsed = urlparse(url) - if parsed.fragment == "" and previous_fragment: - parsed = parsed._replace(fragment=previous_fragment) - elif parsed.fragment: - previous_fragment = parsed.fragment - url = parsed.geturl() - - # Facilitate relative 'location' headers, as allowed by RFC 7231. - # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') - # Compliant with RFC3986, we percent encode the url. - if not parsed.netloc: - url = urljoin(resp.url, requote_uri(url)) - else: - url = requote_uri(url) - - prepared_request.url = to_native_string(url) - - self.rebuild_method(prepared_request, resp) - - # https://github.com/psf/requests/issues/1084 - if resp.status_code not in ( - codes.temporary_redirect, - codes.permanent_redirect, - ): - # https://github.com/psf/requests/issues/3490 - purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding") - for header in purged_headers: - prepared_request.headers.pop(header, None) - prepared_request.body = None - - headers = prepared_request.headers - headers.pop("Cookie", None) - - # Extract any cookies sent on the response to the cookiejar - # in the new request. Because we've mutated our copied prepared - # request, use the old one that we haven't yet touched. - extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) - merge_cookies(prepared_request._cookies, self.cookies) - prepared_request.prepare_cookies(prepared_request._cookies) - - # Rebuild auth and proxy information. - proxies = self.rebuild_proxies(prepared_request, proxies) - self.rebuild_auth(prepared_request, resp) - - # A failed tell() sets `_body_position` to `object()`. This non-None - # value ensures `rewindable` will be True, allowing us to raise an - # UnrewindableBodyError, instead of hanging the connection. - rewindable = prepared_request._body_position is not None and ( - "Content-Length" in headers or "Transfer-Encoding" in headers - ) - - # Attempt to rewind consumed file-like object. - if rewindable: - rewind_body(prepared_request) - - # Override the original request. - req = prepared_request - - if yield_requests: - yield req - else: - - resp = self.send( - req, - stream=stream, - timeout=timeout, - verify=verify, - cert=cert, - proxies=proxies, - allow_redirects=False, - **adapter_kwargs, - ) - - extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) - - # extract redirect url, if any, for the next loop - url = self.get_redirect_target(resp) - yield resp - - def rebuild_auth(self, prepared_request, response): - """When being redirected we may want to strip authentication from the - request to avoid leaking credentials. This method intelligently removes - and reapplies authentication where possible to avoid credential loss. - """ - headers = prepared_request.headers - url = prepared_request.url - - if "Authorization" in headers and self.should_strip_auth( - response.request.url, url - ): - # If we get redirected to a new host, we should strip out any - # authentication headers. - del headers["Authorization"] - - # .netrc might have more auth for us on our new host. - new_auth = get_netrc_auth(url) if self.trust_env else None - if new_auth is not None: - prepared_request.prepare_auth(new_auth) - - def rebuild_proxies(self, prepared_request, proxies): - """This method re-evaluates the proxy configuration by considering the - environment variables. If we are redirected to a URL covered by - NO_PROXY, we strip the proxy configuration. Otherwise, we set missing - proxy keys for this URL (in case they were stripped by a previous - redirect). - - This method also replaces the Proxy-Authorization header where - necessary. - - :rtype: dict - """ - headers = prepared_request.headers - scheme = urlparse(prepared_request.url).scheme - new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env) - - if "Proxy-Authorization" in headers: - del headers["Proxy-Authorization"] - - try: - username, password = get_auth_from_url(new_proxies[scheme]) - except KeyError: - username, password = None, None - - # urllib3 handles proxy authorization for us in the standard adapter. - # Avoid appending this to TLS tunneled requests where it may be leaked. - if not scheme.startswith('https') and username and password: - headers["Proxy-Authorization"] = _basic_auth_str(username, password) - - return new_proxies - - def rebuild_method(self, prepared_request, response): - """When being redirected we may want to change the method of the request - based on certain specs or browser behavior. - """ - method = prepared_request.method - - # https://tools.ietf.org/html/rfc7231#section-6.4.4 - if response.status_code == codes.see_other and method != "HEAD": - method = "GET" - - # Do what the browsers do, despite standards... - # First, turn 302s into GETs. - if response.status_code == codes.found and method != "HEAD": - method = "GET" - - # Second, if a POST is responded to with a 301, turn it into a GET. - # This bizarre behaviour is explained in Issue 1704. - if response.status_code == codes.moved and method == "POST": - method = "GET" - - prepared_request.method = method - - -class Session(SessionRedirectMixin): - """A Requests session. - - Provides cookie persistence, connection-pooling, and configuration. - - Basic Usage:: - - >>> import requests - >>> s = requests.Session() - >>> s.get('https://httpbin.org/get') - - - Or as a context manager:: - - >>> with requests.Session() as s: - ... s.get('https://httpbin.org/get') - - """ - - __attrs__ = [ - "headers", - "cookies", - "auth", - "proxies", - "hooks", - "params", - "verify", - "cert", - "adapters", - "stream", - "trust_env", - "max_redirects", - ] - - def __init__(self): - - #: A case-insensitive dictionary of headers to be sent on each - #: :class:`Request ` sent from this - #: :class:`Session `. - self.headers = default_headers() - - #: Default Authentication tuple or object to attach to - #: :class:`Request `. - self.auth = None - - #: Dictionary mapping protocol or protocol and host to the URL of the proxy - #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to - #: be used on each :class:`Request `. - self.proxies = {} - - #: Event-handling hooks. - self.hooks = default_hooks() - - #: Dictionary of querystring data to attach to each - #: :class:`Request `. The dictionary values may be lists for - #: representing multivalued query parameters. - self.params = {} - - #: Stream response content default. - self.stream = False - - #: SSL Verification default. - #: Defaults to `True`, requiring requests to verify the TLS certificate at the - #: remote end. - #: If verify is set to `False`, requests will accept any TLS certificate - #: presented by the server, and will ignore hostname mismatches and/or - #: expired certificates, which will make your application vulnerable to - #: man-in-the-middle (MitM) attacks. - #: Only set this to `False` for testing. - self.verify = True - - #: SSL client certificate default, if String, path to ssl client - #: cert file (.pem). If Tuple, ('cert', 'key') pair. - self.cert = None - - #: Maximum number of redirects allowed. If the request exceeds this - #: limit, a :class:`TooManyRedirects` exception is raised. - #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is - #: 30. - self.max_redirects = DEFAULT_REDIRECT_LIMIT - - #: Trust environment settings for proxy configuration, default - #: authentication and similar. - self.trust_env = True - - #: A CookieJar containing all currently outstanding cookies set on this - #: session. By default it is a - #: :class:`RequestsCookieJar `, but - #: may be any other ``cookielib.CookieJar`` compatible object. - self.cookies = cookiejar_from_dict({}) - - # Default connection adapters. - self.adapters = OrderedDict() - self.mount("https://", HTTPAdapter()) - self.mount("http://", HTTPAdapter()) - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - - def prepare_request(self, request): - """Constructs a :class:`PreparedRequest ` for - transmission and returns it. The :class:`PreparedRequest` has settings - merged from the :class:`Request ` instance and those of the - :class:`Session`. - - :param request: :class:`Request` instance to prepare with this - session's settings. - :rtype: requests.PreparedRequest - """ - cookies = request.cookies or {} - - # Bootstrap CookieJar. - if not isinstance(cookies, cookielib.CookieJar): - cookies = cookiejar_from_dict(cookies) - - # Merge with session cookies - merged_cookies = merge_cookies( - merge_cookies(RequestsCookieJar(), self.cookies), cookies - ) - - # Set environment's basic authentication if not explicitly set. - auth = request.auth - if self.trust_env and not auth and not self.auth: - auth = get_netrc_auth(request.url) - - p = PreparedRequest() - p.prepare( - method=request.method.upper(), - url=request.url, - files=request.files, - data=request.data, - json=request.json, - headers=merge_setting( - request.headers, self.headers, dict_class=CaseInsensitiveDict - ), - params=merge_setting(request.params, self.params), - auth=merge_setting(auth, self.auth), - cookies=merged_cookies, - hooks=merge_hooks(request.hooks, self.hooks), - ) - return p - - def request( - self, - method, - url, - params=None, - data=None, - headers=None, - cookies=None, - files=None, - auth=None, - timeout=None, - allow_redirects=True, - proxies=None, - hooks=None, - stream=None, - verify=None, - cert=None, - json=None, - ): - """Constructs a :class:`Request `, prepares it and sends it. - Returns :class:`Response ` object. - - :param method: method for the new :class:`Request` object. - :param url: URL for the new :class:`Request` object. - :param params: (optional) Dictionary or bytes to be sent in the query - string for the :class:`Request`. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param json: (optional) json to send in the body of the - :class:`Request`. - :param headers: (optional) Dictionary of HTTP Headers to send with the - :class:`Request`. - :param cookies: (optional) Dict or CookieJar object to send with the - :class:`Request`. - :param files: (optional) Dictionary of ``'filename': file-like-objects`` - for multipart encoding upload. - :param auth: (optional) Auth tuple or callable to enable - Basic/Digest/Custom HTTP Auth. - :param timeout: (optional) How long to wait for the server to send - data before giving up, as a float, or a :ref:`(connect timeout, - read timeout) ` tuple. - :type timeout: float or tuple - :param allow_redirects: (optional) Set to True by default. - :type allow_redirects: bool - :param proxies: (optional) Dictionary mapping protocol or protocol and - hostname to the URL of the proxy. - :param stream: (optional) whether to immediately download the response - content. Defaults to ``False``. - :param verify: (optional) Either a boolean, in which case it controls whether we verify - the server's TLS certificate, or a string, in which case it must be a path - to a CA bundle to use. Defaults to ``True``. When set to - ``False``, requests will accept any TLS certificate presented by - the server, and will ignore hostname mismatches and/or expired - certificates, which will make your application vulnerable to - man-in-the-middle (MitM) attacks. Setting verify to ``False`` - may be useful during local development or testing. - :param cert: (optional) if String, path to ssl client cert file (.pem). - If Tuple, ('cert', 'key') pair. - :rtype: requests.Response - """ - # Create the Request. - req = Request( - method=method.upper(), - url=url, - headers=headers, - files=files, - data=data or {}, - json=json, - params=params or {}, - auth=auth, - cookies=cookies, - hooks=hooks, - ) - prep = self.prepare_request(req) - - proxies = proxies or {} - - settings = self.merge_environment_settings( - prep.url, proxies, stream, verify, cert - ) - - # Send the request. - send_kwargs = { - "timeout": timeout, - "allow_redirects": allow_redirects, - } - send_kwargs.update(settings) - resp = self.send(prep, **send_kwargs) - - return resp - - def get(self, url, **kwargs): - r"""Sends a GET request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - kwargs.setdefault("allow_redirects", True) - return self.request("GET", url, **kwargs) - - def options(self, url, **kwargs): - r"""Sends a OPTIONS request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - kwargs.setdefault("allow_redirects", True) - return self.request("OPTIONS", url, **kwargs) - - def head(self, url, **kwargs): - r"""Sends a HEAD request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - kwargs.setdefault("allow_redirects", False) - return self.request("HEAD", url, **kwargs) - - def post(self, url, data=None, json=None, **kwargs): - r"""Sends a POST request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param json: (optional) json to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - return self.request("POST", url, data=data, json=json, **kwargs) - - def put(self, url, data=None, **kwargs): - r"""Sends a PUT request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - return self.request("PUT", url, data=data, **kwargs) - - def patch(self, url, data=None, **kwargs): - r"""Sends a PATCH request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - return self.request("PATCH", url, data=data, **kwargs) - - def delete(self, url, **kwargs): - r"""Sends a DELETE request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - return self.request("DELETE", url, **kwargs) - - def send(self, request, **kwargs): - """Send a given PreparedRequest. - - :rtype: requests.Response - """ - # Set defaults that the hooks can utilize to ensure they always have - # the correct parameters to reproduce the previous request. - kwargs.setdefault("stream", self.stream) - kwargs.setdefault("verify", self.verify) - kwargs.setdefault("cert", self.cert) - if "proxies" not in kwargs: - kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env) - - # It's possible that users might accidentally send a Request object. - # Guard against that specific failure case. - if isinstance(request, Request): - raise ValueError("You can only send PreparedRequests.") - - # Set up variables needed for resolve_redirects and dispatching of hooks - allow_redirects = kwargs.pop("allow_redirects", True) - stream = kwargs.get("stream") - hooks = request.hooks - - # Get the appropriate adapter to use - adapter = self.get_adapter(url=request.url) - - # Start time (approximately) of the request - start = preferred_clock() - - # Send the request - r = adapter.send(request, **kwargs) - - # Total elapsed time of the request (approximately) - elapsed = preferred_clock() - start - r.elapsed = timedelta(seconds=elapsed) - - # Response manipulation hooks - r = dispatch_hook("response", hooks, r, **kwargs) - - # Persist cookies - if r.history: - - # If the hooks create history then we want those cookies too - for resp in r.history: - extract_cookies_to_jar(self.cookies, resp.request, resp.raw) - - extract_cookies_to_jar(self.cookies, request, r.raw) - - # Resolve redirects if allowed. - if allow_redirects: - # Redirect resolving generator. - gen = self.resolve_redirects(r, request, **kwargs) - history = [resp for resp in gen] - else: - history = [] - - # Shuffle things around if there's history. - if history: - # Insert the first (original) request at the start - history.insert(0, r) - # Get the last request made - r = history.pop() - r.history = history - - # If redirects aren't being followed, store the response on the Request for Response.next(). - if not allow_redirects: - try: - r._next = next( - self.resolve_redirects(r, request, yield_requests=True, **kwargs) - ) - except StopIteration: - pass - - if not stream: - r.content - - return r - - def merge_environment_settings(self, url, proxies, stream, verify, cert): - """ - Check the environment and merge it with some settings. - - :rtype: dict - """ - # Gather clues from the surrounding environment. - if self.trust_env: - # Set environment's proxies. - no_proxy = proxies.get("no_proxy") if proxies is not None else None - env_proxies = get_environ_proxies(url, no_proxy=no_proxy) - for (k, v) in env_proxies.items(): - proxies.setdefault(k, v) - - # Look for requests environment configuration - # and be compatible with cURL. - if verify is True or verify is None: - verify = ( - os.environ.get("REQUESTS_CA_BUNDLE") - or os.environ.get("CURL_CA_BUNDLE") - or verify - ) - - # Merge all the kwargs. - proxies = merge_setting(proxies, self.proxies) - stream = merge_setting(stream, self.stream) - verify = merge_setting(verify, self.verify) - cert = merge_setting(cert, self.cert) - - return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert} - - def get_adapter(self, url): - """ - Returns the appropriate connection adapter for the given URL. - - :rtype: requests.adapters.BaseAdapter - """ - for (prefix, adapter) in self.adapters.items(): - - if url.lower().startswith(prefix.lower()): - return adapter - - # Nothing matches :-/ - raise InvalidSchema(f"No connection adapters were found for {url!r}") - - def close(self): - """Closes all adapters and as such the session""" - for v in self.adapters.values(): - v.close() - - def mount(self, prefix, adapter): - """Registers a connection adapter to a prefix. - - Adapters are sorted in descending order by prefix length. - """ - self.adapters[prefix] = adapter - keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] - - for key in keys_to_move: - self.adapters[key] = self.adapters.pop(key) - - def __getstate__(self): - state = {attr: getattr(self, attr, None) for attr in self.__attrs__} - return state - - def __setstate__(self, state): - for attr, value in state.items(): - setattr(self, attr, value) - - -def session(): - """ - Returns a :class:`Session` for context-management. - - .. deprecated:: 1.0.0 - - This method has been deprecated since version 1.0.0 and is only kept for - backwards compatibility. New code should use :class:`~requests.sessions.Session` - to create a session. This may be removed at a future date. - - :rtype: Session - """ - return Session() diff --git a/spaces/VIPLab/Track-Anything/tools/base_segmenter.py b/spaces/VIPLab/Track-Anything/tools/base_segmenter.py deleted file mode 100644 index 2b975bb779b47485f9e6ba7435646b4db40a2c6a..0000000000000000000000000000000000000000 --- a/spaces/VIPLab/Track-Anything/tools/base_segmenter.py +++ /dev/null @@ -1,129 +0,0 @@ -import time -import torch -import cv2 -from PIL import Image, ImageDraw, ImageOps -import numpy as np -from typing import Union -from segment_anything import sam_model_registry, SamPredictor, SamAutomaticMaskGenerator -import matplotlib.pyplot as plt -import PIL -from .mask_painter import mask_painter - - -class BaseSegmenter: - def __init__(self, SAM_checkpoint, model_type, device='cuda:0'): - """ - device: model device - SAM_checkpoint: path of SAM checkpoint - model_type: vit_b, vit_l, vit_h - """ - print(f"Initializing BaseSegmenter to {device}") - assert model_type in ['vit_b', 'vit_l', 'vit_h'], 'model_type must be vit_b, vit_l, or vit_h' - - self.device = device - self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 - self.model = sam_model_registry[model_type](checkpoint=SAM_checkpoint) - self.model.to(device=self.device) - self.predictor = SamPredictor(self.model) - self.embedded = False - - @torch.no_grad() - def set_image(self, image: np.ndarray): - # PIL.open(image_path) 3channel: RGB - # image embedding: avoid encode the same image multiple times - self.orignal_image = image - if self.embedded: - print('repeat embedding, please reset_image.') - return - self.predictor.set_image(image) - self.embedded = True - return - - @torch.no_grad() - def reset_image(self): - # reset image embeding - self.predictor.reset_image() - self.embedded = False - - def predict(self, prompts, mode, multimask=True): - """ - image: numpy array, h, w, 3 - prompts: dictionary, 3 keys: 'point_coords', 'point_labels', 'mask_input' - prompts['point_coords']: numpy array [N,2] - prompts['point_labels']: numpy array [1,N] - prompts['mask_input']: numpy array [1,256,256] - mode: 'point' (points only), 'mask' (mask only), 'both' (consider both) - mask_outputs: True (return 3 masks), False (return 1 mask only) - whem mask_outputs=True, mask_input=logits[np.argmax(scores), :, :][None, :, :] - """ - assert self.embedded, 'prediction is called before set_image (feature embedding).' - assert mode in ['point', 'mask', 'both'], 'mode must be point, mask, or both' - - if mode == 'point': - masks, scores, logits = self.predictor.predict(point_coords=prompts['point_coords'], - point_labels=prompts['point_labels'], - multimask_output=multimask) - elif mode == 'mask': - masks, scores, logits = self.predictor.predict(mask_input=prompts['mask_input'], - multimask_output=multimask) - elif mode == 'both': # both - masks, scores, logits = self.predictor.predict(point_coords=prompts['point_coords'], - point_labels=prompts['point_labels'], - mask_input=prompts['mask_input'], - multimask_output=multimask) - else: - raise("Not implement now!") - # masks (n, h, w), scores (n,), logits (n, 256, 256) - return masks, scores, logits - - -if __name__ == "__main__": - # load and show an image - image = cv2.imread('/hhd3/gaoshang/truck.jpg') - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # numpy array (h, w, 3) - - # initialise BaseSegmenter - SAM_checkpoint= '/ssd1/gaomingqi/checkpoints/sam_vit_h_4b8939.pth' - model_type = 'vit_h' - device = "cuda:4" - base_segmenter = BaseSegmenter(SAM_checkpoint=SAM_checkpoint, model_type=model_type, device=device) - - # image embedding (once embedded, multiple prompts can be applied) - base_segmenter.set_image(image) - - # examples - # point only ------------------------ - mode = 'point' - prompts = { - 'point_coords': np.array([[500, 375], [1125, 625]]), - 'point_labels': np.array([1, 1]), - } - masks, scores, logits = base_segmenter.predict(prompts, mode, multimask=False) # masks (n, h, w), scores (n,), logits (n, 256, 256) - painted_image = mask_painter(image, masks[np.argmax(scores)].astype('uint8'), background_alpha=0.8) - painted_image = cv2.cvtColor(painted_image, cv2.COLOR_RGB2BGR) # numpy array (h, w, 3) - cv2.imwrite('/hhd3/gaoshang/truck_point.jpg', painted_image) - - # both ------------------------ - mode = 'both' - mask_input = logits[np.argmax(scores), :, :] - prompts = {'mask_input': mask_input [None, :, :]} - prompts = { - 'point_coords': np.array([[500, 375], [1125, 625]]), - 'point_labels': np.array([1, 0]), - 'mask_input': mask_input[None, :, :] - } - masks, scores, logits = base_segmenter.predict(prompts, mode, multimask=True) # masks (n, h, w), scores (n,), logits (n, 256, 256) - painted_image = mask_painter(image, masks[np.argmax(scores)].astype('uint8'), background_alpha=0.8) - painted_image = cv2.cvtColor(painted_image, cv2.COLOR_RGB2BGR) # numpy array (h, w, 3) - cv2.imwrite('/hhd3/gaoshang/truck_both.jpg', painted_image) - - # mask only ------------------------ - mode = 'mask' - mask_input = logits[np.argmax(scores), :, :] - - prompts = {'mask_input': mask_input[None, :, :]} - - masks, scores, logits = base_segmenter.predict(prompts, mode, multimask=True) # masks (n, h, w), scores (n,), logits (n, 256, 256) - painted_image = mask_painter(image, masks[np.argmax(scores)].astype('uint8'), background_alpha=0.8) - painted_image = cv2.cvtColor(painted_image, cv2.COLOR_RGB2BGR) # numpy array (h, w, 3) - cv2.imwrite('/hhd3/gaoshang/truck_mask.jpg', painted_image) diff --git a/spaces/Vegecken/sovits4dzl/hubert/hubert_model_onnx.py b/spaces/Vegecken/sovits4dzl/hubert/hubert_model_onnx.py deleted file mode 100644 index d18f3c2a0fc29592a573a9780308d38f059640b9..0000000000000000000000000000000000000000 --- a/spaces/Vegecken/sovits4dzl/hubert/hubert_model_onnx.py +++ /dev/null @@ -1,217 +0,0 @@ -import copy -import random -from typing import Optional, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as t_func -from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present - - -class Hubert(nn.Module): - def __init__(self, num_label_embeddings: int = 100, mask: bool = True): - super().__init__() - self._mask = mask - self.feature_extractor = FeatureExtractor() - self.feature_projection = FeatureProjection() - self.positional_embedding = PositionalConvEmbedding() - self.norm = nn.LayerNorm(768) - self.dropout = nn.Dropout(0.1) - self.encoder = TransformerEncoder( - nn.TransformerEncoderLayer( - 768, 12, 3072, activation="gelu", batch_first=True - ), - 12, - ) - self.proj = nn.Linear(768, 256) - - self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_()) - self.label_embedding = nn.Embedding(num_label_embeddings, 256) - - def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - mask = None - if self.training and self._mask: - mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2) - x[mask] = self.masked_spec_embed.to(x.dtype) - return x, mask - - def encode( - self, x: torch.Tensor, layer: Optional[int] = None - ) -> Tuple[torch.Tensor, torch.Tensor]: - x = self.feature_extractor(x) - x = self.feature_projection(x.transpose(1, 2)) - x, mask = self.mask(x) - x = x + self.positional_embedding(x) - x = self.dropout(self.norm(x)) - x = self.encoder(x, output_layer=layer) - return x, mask - - def logits(self, x: torch.Tensor) -> torch.Tensor: - logits = torch.cosine_similarity( - x.unsqueeze(2), - self.label_embedding.weight.unsqueeze(0).unsqueeze(0), - dim=-1, - ) - return logits / 0.1 - - -class HubertSoft(Hubert): - def __init__(self): - super().__init__() - - def units(self, wav: torch.Tensor) -> torch.Tensor: - wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2)) - x, _ = self.encode(wav) - return self.proj(x) - - def forward(self, x): - return self.units(x) - -class FeatureExtractor(nn.Module): - def __init__(self): - super().__init__() - self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False) - self.norm0 = nn.GroupNorm(512, 512) - self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False) - self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = t_func.gelu(self.norm0(self.conv0(x))) - x = t_func.gelu(self.conv1(x)) - x = t_func.gelu(self.conv2(x)) - x = t_func.gelu(self.conv3(x)) - x = t_func.gelu(self.conv4(x)) - x = t_func.gelu(self.conv5(x)) - x = t_func.gelu(self.conv6(x)) - return x - - -class FeatureProjection(nn.Module): - def __init__(self): - super().__init__() - self.norm = nn.LayerNorm(512) - self.projection = nn.Linear(512, 768) - self.dropout = nn.Dropout(0.1) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.norm(x) - x = self.projection(x) - x = self.dropout(x) - return x - - -class PositionalConvEmbedding(nn.Module): - def __init__(self): - super().__init__() - self.conv = nn.Conv1d( - 768, - 768, - kernel_size=128, - padding=128 // 2, - groups=16, - ) - self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.conv(x.transpose(1, 2)) - x = t_func.gelu(x[:, :, :-1]) - return x.transpose(1, 2) - - -class TransformerEncoder(nn.Module): - def __init__( - self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int - ) -> None: - super(TransformerEncoder, self).__init__() - self.layers = nn.ModuleList( - [copy.deepcopy(encoder_layer) for _ in range(num_layers)] - ) - self.num_layers = num_layers - - def forward( - self, - src: torch.Tensor, - mask: torch.Tensor = None, - src_key_padding_mask: torch.Tensor = None, - output_layer: Optional[int] = None, - ) -> torch.Tensor: - output = src - for layer in self.layers[:output_layer]: - output = layer( - output, src_mask=mask, src_key_padding_mask=src_key_padding_mask - ) - return output - - -def _compute_mask( - shape: Tuple[int, int], - mask_prob: float, - mask_length: int, - device: torch.device, - min_masks: int = 0, -) -> torch.Tensor: - batch_size, sequence_length = shape - - if mask_length < 1: - raise ValueError("`mask_length` has to be bigger than 0.") - - if mask_length > sequence_length: - raise ValueError( - f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`" - ) - - # compute number of masked spans in batch - num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random()) - num_masked_spans = max(num_masked_spans, min_masks) - - # make sure num masked indices <= sequence_length - if num_masked_spans * mask_length > sequence_length: - num_masked_spans = sequence_length // mask_length - - # SpecAugment mask to fill - mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool) - - # uniform distribution to sample from, make sure that offset samples are < sequence_length - uniform_dist = torch.ones( - (batch_size, sequence_length - (mask_length - 1)), device=device - ) - - # get random indices to mask - mask_indices = torch.multinomial(uniform_dist, num_masked_spans) - - # expand masked indices to masked spans - mask_indices = ( - mask_indices.unsqueeze(dim=-1) - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - offsets = ( - torch.arange(mask_length, device=device)[None, None, :] - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - mask_idxs = mask_indices + offsets - - # scatter indices to mask - mask = mask.scatter(1, mask_idxs, True) - - return mask - - -def hubert_soft( - path: str, -) -> HubertSoft: - r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`. - Args: - path (str): path of a pretrained model - """ - hubert = HubertSoft() - checkpoint = torch.load(path) - consume_prefix_in_state_dict_if_present(checkpoint, "module.") - hubert.load_state_dict(checkpoint) - hubert.eval() - return hubert diff --git a/spaces/Vivekdunuka/MyAIChat/README.md b/spaces/Vivekdunuka/MyAIChat/README.md deleted file mode 100644 index ec2aae7d84b2e85cb4f8fda1fa9d8aed758b208a..0000000000000000000000000000000000000000 --- a/spaces/Vivekdunuka/MyAIChat/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: MyAIChat -emoji: 🦀 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Widium/Image-Recreation/functions/image.py b/spaces/Widium/Image-Recreation/functions/image.py deleted file mode 100644 index 00e4adc2f3b799ae935a05df362a331a40fe5fd8..0000000000000000000000000000000000000000 --- a/spaces/Widium/Image-Recreation/functions/image.py +++ /dev/null @@ -1,98 +0,0 @@ -# *************************************************************************** # -# # -# image.py # -# # -# By: Widium # -# Github : https://github.com/widium # -# # -# Created: 2022/11/10 08:51:01 by ebennace # -# Updated: 2023/05/03 16:05:48 by Widium # -# # -# **************************************************************************** # - -import tensorflow as tf -import numpy as np - -from tensorflow import Tensor -from PIL import Image - -from cv2 import cvtColor -from cv2 import imread -from cv2 import COLOR_BGR2RGB - -from .processing import Normalize_image -from .processing import inverse_normalize_image -from .processing import remove_batch_dimension - -# ======================================== # - -def load_image_path(path : str): - """ - Load and preprocess the color of imag with OpenCV - - Args: - path (str): filepath of image - - Returns: - np.array: img in Numpy Array Format - """ - img = imread(path) - img = cvtColor(img, COLOR_BGR2RGB) - img = Normalize_image(img) - - return (img) - -# ======================================== # - -def tensor_to_image(tensor : Tensor): - """ - Convert a tensor to an image in PIL format. - - Args: - tensor: The input image as a tensor. - - Returns: - Image: The converted image in PIL format. - """ - tensor = inverse_normalize_image(tensor) - array = np.array(tensor, dtype=np.uint8) - array = remove_batch_dimension(array) - img_pil = Image.fromarray(array) - return img_pil - -# ======================================== # - -def clip_pixel(image : Tensor): - """ - Clip pixel values of an image tensor between 0 and 1. - - Args: - image: The input image as a tensor. - - Returns: - Tensor: The clipped image tensor. - """ - cliped_image = tf.clip_by_value( - t=image, - clip_value_min=0.0, - clip_value_max=1.0 - ) - - return (cliped_image) - -# ======================================== # - -def create_noisy_imag(img : Tensor): - """ - Create Noisy image with Random pixel with same shape of input img - - Args: - img: The input image as a tensor. - - Returns: - np.ndarray: The noisy image as a NumPy array. - """ - noise_img = np.random.randn(*img.shape) - return (noise_img) - -# ===================================================== # \ No newline at end of file diff --git a/spaces/XzJosh/Diana-Bert-VITS2/data_utils.py b/spaces/XzJosh/Diana-Bert-VITS2/data_utils.py deleted file mode 100644 index be3a29a93188c5b3386f22e5db29e5e96d78109a..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Diana-Bert-VITS2/data_utils.py +++ /dev/null @@ -1,321 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data -import commons -from mel_processing import spectrogram_torch, mel_spectrogram_torch, spec_to_mel_torch -from utils import load_wav_to_torch, load_filepaths_and_text -from text import cleaned_text_to_sequence, get_bert - -"""Multi speaker version""" - - -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths_sid_text, hparams): - self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text) - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - self.spk_map = hparams.spk2id - self.hparams = hparams - - self.use_mel_spec_posterior = getattr(hparams, "use_mel_posterior_encoder", False) - if self.use_mel_spec_posterior: - self.n_mel_channels = getattr(hparams, "n_mel_channels", 80) - - self.cleaned_text = getattr(hparams, "cleaned_text", False) - - self.add_blank = hparams.add_blank - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 300) - - random.seed(1234) - random.shuffle(self.audiopaths_sid_text) - self._filter() - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - - audiopaths_sid_text_new = [] - lengths = [] - skipped = 0 - for _id, spk, language, text, phones, tone, word2ph in self.audiopaths_sid_text: - audiopath = f'{_id}' - if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len: - phones = phones.split(" ") - tone = [int(i) for i in tone.split(" ")] - word2ph = [int(i) for i in word2ph.split(" ")] - audiopaths_sid_text_new.append([audiopath, spk, language, text, phones, tone, word2ph]) - lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) - else: - skipped += 1 - print("skipped: ", skipped, ", total: ", len(self.audiopaths_sid_text)) - self.audiopaths_sid_text = audiopaths_sid_text_new - self.lengths = lengths - - def get_audio_text_speaker_pair(self, audiopath_sid_text): - # separate filename, speaker_id and text - audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text - - bert, phones, tone, language = self.get_text(text, word2ph, phones, tone, language, audiopath) - - spec, wav = self.get_audio(audiopath) - sid = torch.LongTensor([int(self.spk_map[sid])]) - return (phones, spec, wav, sid, tone, language, bert) - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} {} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if self.use_mel_spec_posterior: - spec_filename = spec_filename.replace(".spec.pt", ".mel.pt") - try: - spec = torch.load(spec_filename) - except: - if self.use_mel_spec_posterior: - spec = mel_spectrogram_torch(audio_norm, self.filter_length, - self.n_mel_channels, self.sampling_rate, self.hop_length, - self.win_length, self.hparams.mel_fmin, self.hparams.mel_fmax, center=False) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - return spec, audio_norm - - def get_text(self, text, word2ph, phone, tone, language_str, wav_path): - pold = phone - w2pho = [i for i in word2ph] - word2ph = [i for i in word2ph] - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - pold2 = phone - - if self.add_blank: - p1 = len(phone) - phone = commons.intersperse(phone, 0) - p2 = len(phone) - t1 = len(tone) - tone = commons.intersperse(tone, 0) - t2 = len(tone) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - bert_path = wav_path.replace(".wav", ".bert.pt") - try: - bert = torch.load(bert_path) - assert bert.shape[-1] == len(phone) - except: - bert = get_bert(text, word2ph, language_str) - torch.save(bert, bert_path) - #print(bert.shape[-1], bert_path, text, pold) - assert bert.shape[-1] == len(phone) - - assert bert.shape[-1] == len(phone), ( - bert.shape, len(phone), sum(word2ph), p1, p2, t1, t2, pold, pold2, word2ph, text, w2pho) - phone = torch.LongTensor(phone) - tone = torch.LongTensor(tone) - language = torch.LongTensor(language) - return bert, phone, tone, language - - def get_sid(self, sid): - sid = torch.LongTensor([int(sid)]) - return sid - - def __getitem__(self, index): - return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index]) - - def __len__(self): - return len(self.audiopaths_sid_text) - - -class TextAudioSpeakerCollate(): - """ Zero-pads model inputs and targets - """ - - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text, audio and speaker identities - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized, sid] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), - dim=0, descending=True) - - max_text_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - text_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - sid = torch.LongTensor(len(batch)) - - text_padded = torch.LongTensor(len(batch), max_text_len) - tone_padded = torch.LongTensor(len(batch), max_text_len) - language_padded = torch.LongTensor(len(batch), max_text_len) - bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len) - - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - text_padded.zero_() - tone_padded.zero_() - language_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - bert_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - text = row[0] - text_padded[i, :text.size(0)] = text - text_lengths[i] = text.size(0) - - spec = row[1] - spec_padded[i, :, :spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, :wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - sid[i] = row[3] - - tone = row[4] - tone_padded[i, :tone.size(0)] = tone - - language = row[5] - language_padded[i, :language.size(0)] = language - - bert = row[6] - bert_padded[i, :, :bert.size(1)] = bert - - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, tone_padded, language_padded, bert_padded - - -class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): - """ - Maintain similar input lengths in a batch. - Length groups are specified by boundaries. - Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. - - It removes samples which are not included in the boundaries. - Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. - """ - - def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): - super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) - self.lengths = dataset.lengths - self.batch_size = batch_size - self.boundaries = boundaries - - self.buckets, self.num_samples_per_bucket = self._create_buckets() - self.total_size = sum(self.num_samples_per_bucket) - self.num_samples = self.total_size // self.num_replicas - - def _create_buckets(self): - buckets = [[] for _ in range(len(self.boundaries) - 1)] - for i in range(len(self.lengths)): - length = self.lengths[i] - idx_bucket = self._bisect(length) - if idx_bucket != -1: - buckets[idx_bucket].append(i) - - for i in range(len(buckets) - 1, 0, -1): - if len(buckets[i]) == 0: - buckets.pop(i) - self.boundaries.pop(i + 1) - - num_samples_per_bucket = [] - for i in range(len(buckets)): - len_bucket = len(buckets[i]) - total_batch_size = self.num_replicas * self.batch_size - rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size - num_samples_per_bucket.append(len_bucket + rem) - return buckets, num_samples_per_bucket - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - - indices = [] - if self.shuffle: - for bucket in self.buckets: - indices.append(torch.randperm(len(bucket), generator=g).tolist()) - else: - for bucket in self.buckets: - indices.append(list(range(len(bucket)))) - - batches = [] - for i in range(len(self.buckets)): - bucket = self.buckets[i] - len_bucket = len(bucket) - if (len_bucket == 0): - continue - ids_bucket = indices[i] - num_samples_bucket = self.num_samples_per_bucket[i] - - # add extra samples to make it evenly divisible - rem = num_samples_bucket - len_bucket - ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] - - # subsample - ids_bucket = ids_bucket[self.rank::self.num_replicas] - - # batching - for j in range(len(ids_bucket) // self.batch_size): - batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]] - batches.append(batch) - - if self.shuffle: - batch_ids = torch.randperm(len(batches), generator=g).tolist() - batches = [batches[i] for i in batch_ids] - self.batches = batches - - assert len(self.batches) * self.batch_size == self.num_samples - return iter(self.batches) - - def _bisect(self, x, lo=0, hi=None): - if hi is None: - hi = len(self.boundaries) - 1 - - if hi > lo: - mid = (hi + lo) // 2 - if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]: - return mid - elif x <= self.boundaries[mid]: - return self._bisect(x, lo, mid) - else: - return self._bisect(x, mid + 1, hi) - else: - return -1 - - def __len__(self): - return self.num_samples // self.batch_size diff --git a/spaces/XzJosh/ranran-Bert-VITS2/utils.py b/spaces/XzJosh/ranran-Bert-VITS2/utils.py deleted file mode 100644 index c6aa6cfc64c33e2eed33e9845239e831fc1c4a1a..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/ranran-Bert-VITS2/utils.py +++ /dev/null @@ -1,293 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - elif optimizer is None and not skip_optimizer: - #else: #Disable this line if Infer ,and enable the line upper - new_opt_dict = optimizer.state_dict() - new_opt_dict_params = new_opt_dict['param_groups'][0]['params'] - new_opt_dict['param_groups'] = checkpoint_dict['optimizer']['param_groups'] - new_opt_dict['param_groups'][0]['params'] = new_opt_dict_params - optimizer.load_state_dict(new_opt_dict) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - #assert "emb_g" not in k - # print("load", k) - new_state_dict[k] = saved_state_dict[k] - assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape) - except: - print("error, %s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict, strict=False) - else: - model.load_state_dict(new_state_dict, strict=False) - print("load ") - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, default="./OUTPUT_MODEL", - help='Model name') - parser.add_argument('--cont', dest='cont', action="store_true", default=False, help="whether to continue training on the latest checkpoint") - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - hparams.cont = args.cont - return hparams - - -def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True): - """Freeing up space by deleting saved ckpts - - Arguments: - path_to_models -- Path to the model directory - n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth - sort_by_time -- True -> chronologically delete ckpts - False -> lexicographically delete ckpts - """ - import re - ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))] - name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1))) - time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f))) - sort_key = time_key if sort_by_time else name_key - x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], - key=sort_key) - to_del = [os.path.join(path_to_models, fn) for fn in - (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])] - del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}") - del_routine = lambda x: [os.remove(x), del_info(x)] - rs = [del_routine(fn) for fn in to_del] - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/YUANAI/DiffspeechResearch/utils/audio/rnnoise.py b/spaces/YUANAI/DiffspeechResearch/utils/audio/rnnoise.py deleted file mode 100644 index 47f4eb6471918ca8144f217580a71d1720cd8c36..0000000000000000000000000000000000000000 --- a/spaces/YUANAI/DiffspeechResearch/utils/audio/rnnoise.py +++ /dev/null @@ -1,48 +0,0 @@ -# rnnoise.py, requirements: ffmpeg, sox, rnnoise, python -import os -import subprocess - -INSTALL_STR = """ -RNNoise library not found. Please install RNNoise (https://github.com/xiph/rnnoise) to $REPO/rnnoise: -sudo apt-get install -y autoconf automake libtool ffmpeg sox -git clone https://github.com/xiph/rnnoise.git -rm -rf rnnoise/.git -cd rnnoise -./autogen.sh && ./configure && make -cd .. -""" - - -def rnnoise(filename, out_fn=None, verbose=False, out_sample_rate=22050): - assert os.path.exists('./rnnoise/examples/rnnoise_demo'), INSTALL_STR - if out_fn is None: - out_fn = f"{filename[:-4]}.denoised.wav" - out_48k_fn = f"{out_fn}.48000.wav" - tmp0_fn = f"{out_fn}.0.wav" - tmp1_fn = f"{out_fn}.1.wav" - tmp2_fn = f"{out_fn}.2.raw" - tmp3_fn = f"{out_fn}.3.raw" - if verbose: - print("Pre-processing audio...") # wav to pcm raw - subprocess.check_call( - f'sox "{filename}" -G -r48000 "{tmp0_fn}"', shell=True, stdin=subprocess.PIPE) # convert to raw - subprocess.check_call( - f'sox -v 0.95 "{tmp0_fn}" "{tmp1_fn}"', shell=True, stdin=subprocess.PIPE) # convert to raw - subprocess.check_call( - f'ffmpeg -y -i "{tmp1_fn}" -loglevel quiet -f s16le -ac 1 -ar 48000 "{tmp2_fn}"', - shell=True, stdin=subprocess.PIPE) # convert to raw - if verbose: - print("Applying rnnoise algorithm to audio...") # rnnoise - subprocess.check_call( - f'./rnnoise/examples/rnnoise_demo "{tmp2_fn}" "{tmp3_fn}"', shell=True) - - if verbose: - print("Post-processing audio...") # pcm raw to wav - if filename == out_fn: - subprocess.check_call(f'rm -f "{out_fn}"', shell=True) - subprocess.check_call( - f'sox -t raw -r 48000 -b 16 -e signed-integer -c 1 "{tmp3_fn}" "{out_48k_fn}"', shell=True) - subprocess.check_call(f'sox "{out_48k_fn}" -G -r{out_sample_rate} "{out_fn}"', shell=True) - subprocess.check_call(f'rm -f "{tmp0_fn}" "{tmp1_fn}" "{tmp2_fn}" "{tmp3_fn}" "{out_48k_fn}"', shell=True) - if verbose: - print("Audio-filtering completed!") diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/schedulers/scheduling_sde_vp.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/schedulers/scheduling_sde_vp.py deleted file mode 100644 index 5e4fe40229cfdb915aaca768fc484366ef6d60e1..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/schedulers/scheduling_sde_vp.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2022 Google Brain and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch - -import math -from typing import Union - -import torch - -from ..configuration_utils import ConfigMixin, register_to_config -from .scheduling_utils import SchedulerMixin - - -class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin): - """ - The variance preserving stochastic differential equation (SDE) scheduler. - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and - [`~SchedulerMixin.from_pretrained`] functions. - - For more information, see the original paper: https://arxiv.org/abs/2011.13456 - - UNDER CONSTRUCTION - - """ - - order = 1 - - @register_to_config - def __init__(self, num_train_timesteps=2000, beta_min=0.1, beta_max=20, sampling_eps=1e-3): - self.sigmas = None - self.discrete_sigmas = None - self.timesteps = None - - def set_timesteps(self, num_inference_steps, device: Union[str, torch.device] = None): - self.timesteps = torch.linspace(1, self.config.sampling_eps, num_inference_steps, device=device) - - def step_pred(self, score, x, t, generator=None): - if self.timesteps is None: - raise ValueError( - "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" - ) - - # TODO(Patrick) better comments + non-PyTorch - # postprocess model score - log_mean_coeff = ( - -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min - ) - std = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff)) - std = std.flatten() - while len(std.shape) < len(score.shape): - std = std.unsqueeze(-1) - score = -score / std - - # compute - dt = -1.0 / len(self.timesteps) - - beta_t = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) - beta_t = beta_t.flatten() - while len(beta_t.shape) < len(x.shape): - beta_t = beta_t.unsqueeze(-1) - drift = -0.5 * beta_t * x - - diffusion = torch.sqrt(beta_t) - drift = drift - diffusion**2 * score - x_mean = x + drift * dt - - # add noise - noise = torch.randn(x.shape, layout=x.layout, generator=generator).to(x.device) - x = x_mean + diffusion * math.sqrt(-dt) * noise - - return x, x_mean - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/export/c10.py b/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/export/c10.py deleted file mode 100644 index 25ee23009547913733dc528fb8a39ca995fd9e31..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/export/c10.py +++ /dev/null @@ -1,534 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -import math -import torch -import torch.nn.functional as F - -from detectron2.layers import cat -from detectron2.layers.roi_align_rotated import ROIAlignRotated -from detectron2.modeling import poolers -from detectron2.modeling.proposal_generator import rpn -from detectron2.modeling.roi_heads.mask_head import mask_rcnn_inference -from detectron2.structures import Boxes, ImageList, Instances, Keypoints - -from .shared import alias, to_device - - -""" -This file contains caffe2-compatible implementation of several detectron2 components. -""" - - -class Caffe2Boxes(Boxes): - """ - Representing a list of detectron2.structures.Boxes from minibatch, each box - is represented by a 5d vector (batch index + 4 coordinates), or a 6d vector - (batch index + 5 coordinates) for RotatedBoxes. - """ - - def __init__(self, tensor): - assert isinstance(tensor, torch.Tensor) - assert tensor.dim() == 2 and tensor.size(-1) in [4, 5, 6], tensor.size() - # TODO: make tensor immutable when dim is Nx5 for Boxes, - # and Nx6 for RotatedBoxes? - self.tensor = tensor - - -# TODO clean up this class, maybe just extend Instances -class InstancesList(object): - """ - Tensor representation of a list of Instances object for a batch of images. - - When dealing with a batch of images with Caffe2 ops, a list of bboxes - (instances) are usually represented by single Tensor with size - (sigma(Ni), 5) or (sigma(Ni), 4) plus a batch split Tensor. This class is - for providing common functions to convert between these two representations. - """ - - def __init__(self, im_info, indices, extra_fields=None): - # [N, 3] -> (H, W, Scale) - self.im_info = im_info - # [N,] -> indice of batch to which the instance belongs - self.indices = indices - # [N, ...] - self.batch_extra_fields = extra_fields or {} - - self.image_size = self.im_info - - def get_fields(self): - """like `get_fields` in the Instances object, - but return each field in tensor representations""" - ret = {} - for k, v in self.batch_extra_fields.items(): - # if isinstance(v, torch.Tensor): - # tensor_rep = v - # elif isinstance(v, (Boxes, Keypoints)): - # tensor_rep = v.tensor - # else: - # raise ValueError("Can't find tensor representation for: {}".format()) - ret[k] = v - return ret - - def has(self, name): - return name in self.batch_extra_fields - - def set(self, name, value): - data_len = len(value) - if len(self.batch_extra_fields): - assert ( - len(self) == data_len - ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self)) - self.batch_extra_fields[name] = value - - def __setattr__(self, name, val): - if name in ["im_info", "indices", "batch_extra_fields", "image_size"]: - super().__setattr__(name, val) - else: - self.set(name, val) - - def __getattr__(self, name): - if name not in self.batch_extra_fields: - raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) - return self.batch_extra_fields[name] - - def __len__(self): - return len(self.indices) - - def flatten(self): - ret = [] - for _, v in self.batch_extra_fields.items(): - if isinstance(v, (Boxes, Keypoints)): - ret.append(v.tensor) - else: - ret.append(v) - return ret - - @staticmethod - def to_d2_instances_list(instances_list): - """ - Convert InstancesList to List[Instances]. The input `instances_list` can - also be a List[Instances], in this case this method is a non-op. - """ - if not isinstance(instances_list, InstancesList): - assert all(isinstance(x, Instances) for x in instances_list) - return instances_list - - ret = [] - for i, info in enumerate(instances_list.im_info): - instances = Instances(torch.Size([int(info[0].item()), int(info[1].item())])) - - ids = instances_list.indices == i - for k, v in instances_list.batch_extra_fields.items(): - if isinstance(v, torch.Tensor): - instances.set(k, v[ids]) - continue - elif isinstance(v, Boxes): - instances.set(k, v[ids, -4:]) - continue - - target_type, tensor_source = v - assert isinstance(tensor_source, torch.Tensor) - assert tensor_source.shape[0] == instances_list.indices.shape[0] - tensor_source = tensor_source[ids] - - if issubclass(target_type, Boxes): - instances.set(k, Boxes(tensor_source[:, -4:])) - elif issubclass(target_type, Keypoints): - instances.set(k, Keypoints(tensor_source)) - elif issubclass(target_type, torch.Tensor): - instances.set(k, tensor_source) - else: - raise ValueError("Can't handle targe type: {}".format(target_type)) - - ret.append(instances) - return ret - - -class Caffe2Compatible(object): - """ - A model can inherit this class to indicate that it can be traced and deployed with caffe2. - """ - - def _get_tensor_mode(self): - return self._tensor_mode - - def _set_tensor_mode(self, v): - self._tensor_mode = v - - tensor_mode = property(_get_tensor_mode, _set_tensor_mode) - """ - If true, the model expects C2-style tensor only inputs/outputs format. - """ - - -class Caffe2RPN(Caffe2Compatible, rpn.RPN): - def _generate_proposals( - self, images, objectness_logits_pred, anchor_deltas_pred, gt_instances=None - ): - assert isinstance(images, ImageList) - if self.tensor_mode: - im_info = images.image_sizes - else: - im_info = torch.tensor([[im_sz[0], im_sz[1], 1.0] for im_sz in images.image_sizes]).to( - images.tensor.device - ) - assert isinstance(im_info, torch.Tensor) - - rpn_rois_list = [] - rpn_roi_probs_list = [] - for scores, bbox_deltas, cell_anchors_tensor, feat_stride in zip( - objectness_logits_pred, - anchor_deltas_pred, - iter(self.anchor_generator.cell_anchors), - self.anchor_generator.strides, - ): - scores = scores.detach() - bbox_deltas = bbox_deltas.detach() - - rpn_rois, rpn_roi_probs = torch.ops._caffe2.GenerateProposals( - scores, - bbox_deltas, - im_info, - cell_anchors_tensor, - spatial_scale=1.0 / feat_stride, - pre_nms_topN=self.pre_nms_topk[self.training], - post_nms_topN=self.post_nms_topk[self.training], - nms_thresh=self.nms_thresh, - min_size=self.min_box_size, - # correct_transform_coords=True, # deprecated argument - angle_bound_on=True, # Default - angle_bound_lo=-180, - angle_bound_hi=180, - clip_angle_thresh=1.0, # Default - legacy_plus_one=False, - ) - rpn_rois_list.append(rpn_rois) - rpn_roi_probs_list.append(rpn_roi_probs) - - # For FPN in D2, in RPN all proposals from different levels are concated - # together, ranked and picked by top post_nms_topk. Then in ROIPooler - # it calculates level_assignments and calls the RoIAlign from - # the corresponding level. - - if len(objectness_logits_pred) == 1: - rpn_rois = rpn_rois_list[0] - rpn_roi_probs = rpn_roi_probs_list[0] - else: - assert len(rpn_rois_list) == len(rpn_roi_probs_list) - rpn_post_nms_topN = self.post_nms_topk[self.training] - - device = rpn_rois_list[0].device - input_list = [to_device(x, "cpu") for x in (rpn_rois_list + rpn_roi_probs_list)] - - # TODO remove this after confirming rpn_max_level/rpn_min_level - # is not needed in CollectRpnProposals. - feature_strides = list(self.anchor_generator.strides) - rpn_min_level = int(math.log2(feature_strides[0])) - rpn_max_level = int(math.log2(feature_strides[-1])) - assert (rpn_max_level - rpn_min_level + 1) == len( - rpn_rois_list - ), "CollectRpnProposals requires continuous levels" - - rpn_rois = torch.ops._caffe2.CollectRpnProposals( - input_list, - # NOTE: in current implementation, rpn_max_level and rpn_min_level - # are not needed, only the subtraction of two matters and it - # can be infer from the number of inputs. Keep them now for - # consistency. - rpn_max_level=2 + len(rpn_rois_list) - 1, - rpn_min_level=2, - rpn_post_nms_topN=rpn_post_nms_topN, - ) - rpn_rois = to_device(rpn_rois, device) - rpn_roi_probs = [] - - proposals = self.c2_postprocess(im_info, rpn_rois, rpn_roi_probs, self.tensor_mode) - return proposals, {} - - def forward(self, images, features, gt_instances=None): - assert not self.training - features = [features[f] for f in self.in_features] - objectness_logits_pred, anchor_deltas_pred = self.rpn_head(features) - return self._generate_proposals( - images, - objectness_logits_pred, - anchor_deltas_pred, - gt_instances, - ) - - @staticmethod - def c2_postprocess(im_info, rpn_rois, rpn_roi_probs, tensor_mode): - proposals = InstancesList( - im_info=im_info, - indices=rpn_rois[:, 0], - extra_fields={ - "proposal_boxes": Caffe2Boxes(rpn_rois), - "objectness_logits": (torch.Tensor, rpn_roi_probs), - }, - ) - if not tensor_mode: - proposals = InstancesList.to_d2_instances_list(proposals) - else: - proposals = [proposals] - return proposals - - -class Caffe2ROIPooler(Caffe2Compatible, poolers.ROIPooler): - @staticmethod - def c2_preprocess(box_lists): - assert all(isinstance(x, Boxes) for x in box_lists) - if all(isinstance(x, Caffe2Boxes) for x in box_lists): - # input is pure-tensor based - assert len(box_lists) == 1 - pooler_fmt_boxes = box_lists[0].tensor - else: - pooler_fmt_boxes = poolers.convert_boxes_to_pooler_format(box_lists) - return pooler_fmt_boxes - - def forward(self, x, box_lists): - assert not self.training - - pooler_fmt_boxes = self.c2_preprocess(box_lists) - num_level_assignments = len(self.level_poolers) - - if num_level_assignments == 1: - if isinstance(self.level_poolers[0], ROIAlignRotated): - c2_roi_align = torch.ops._caffe2.RoIAlignRotated - aligned = True - else: - c2_roi_align = torch.ops._caffe2.RoIAlign - aligned = self.level_poolers[0].aligned - - x0 = x[0] - if x0.is_quantized: - x0 = x0.dequantize() - - out = c2_roi_align( - x0, - pooler_fmt_boxes, - order="NCHW", - spatial_scale=float(self.level_poolers[0].spatial_scale), - pooled_h=int(self.output_size[0]), - pooled_w=int(self.output_size[1]), - sampling_ratio=int(self.level_poolers[0].sampling_ratio), - aligned=aligned, - ) - return out - - device = pooler_fmt_boxes.device - assert ( - self.max_level - self.min_level + 1 == 4 - ), "Currently DistributeFpnProposals only support 4 levels" - fpn_outputs = torch.ops._caffe2.DistributeFpnProposals( - to_device(pooler_fmt_boxes, "cpu"), - roi_canonical_scale=self.canonical_box_size, - roi_canonical_level=self.canonical_level, - roi_max_level=self.max_level, - roi_min_level=self.min_level, - legacy_plus_one=False, - ) - fpn_outputs = [to_device(x, device) for x in fpn_outputs] - - rois_fpn_list = fpn_outputs[:-1] - rois_idx_restore_int32 = fpn_outputs[-1] - - roi_feat_fpn_list = [] - for roi_fpn, x_level, pooler in zip(rois_fpn_list, x, self.level_poolers): - if isinstance(pooler, ROIAlignRotated): - c2_roi_align = torch.ops._caffe2.RoIAlignRotated - aligned = True - else: - c2_roi_align = torch.ops._caffe2.RoIAlign - aligned = bool(pooler.aligned) - - if x_level.is_quantized: - x_level = x_level.dequantize() - - roi_feat_fpn = c2_roi_align( - x_level, - roi_fpn, - order="NCHW", - spatial_scale=float(pooler.spatial_scale), - pooled_h=int(self.output_size[0]), - pooled_w=int(self.output_size[1]), - sampling_ratio=int(pooler.sampling_ratio), - aligned=aligned, - ) - roi_feat_fpn_list.append(roi_feat_fpn) - - roi_feat_shuffled = cat(roi_feat_fpn_list, dim=0) - assert roi_feat_shuffled.numel() > 0 and rois_idx_restore_int32.numel() > 0, ( - "Caffe2 export requires tracing with a model checkpoint + input that can produce valid" - " detections. But no detections were obtained with the given checkpoint and input!" - ) - roi_feat = torch.ops._caffe2.BatchPermutation(roi_feat_shuffled, rois_idx_restore_int32) - return roi_feat - - -class Caffe2FastRCNNOutputsInference: - def __init__(self, tensor_mode): - self.tensor_mode = tensor_mode # whether the output is caffe2 tensor mode - - def __call__(self, box_predictor, predictions, proposals): - """equivalent to FastRCNNOutputLayers.inference""" - num_classes = box_predictor.num_classes - score_thresh = box_predictor.test_score_thresh - nms_thresh = box_predictor.test_nms_thresh - topk_per_image = box_predictor.test_topk_per_image - is_rotated = len(box_predictor.box2box_transform.weights) == 5 - - if is_rotated: - box_dim = 5 - assert box_predictor.box2box_transform.weights[4] == 1, ( - "The weights for Rotated BBoxTransform in C2 have only 4 dimensions," - + " thus enforcing the angle weight to be 1 for now" - ) - box2box_transform_weights = box_predictor.box2box_transform.weights[:4] - else: - box_dim = 4 - box2box_transform_weights = box_predictor.box2box_transform.weights - - class_logits, box_regression = predictions - if num_classes + 1 == class_logits.shape[1]: - class_prob = F.softmax(class_logits, -1) - else: - assert num_classes == class_logits.shape[1] - class_prob = F.sigmoid(class_logits) - # BoxWithNMSLimit will infer num_classes from the shape of the class_prob - # So append a zero column as placeholder for the background class - class_prob = torch.cat((class_prob, torch.zeros(class_prob.shape[0], 1)), dim=1) - - assert box_regression.shape[1] % box_dim == 0 - cls_agnostic_bbox_reg = box_regression.shape[1] // box_dim == 1 - - input_tensor_mode = proposals[0].proposal_boxes.tensor.shape[1] == box_dim + 1 - - rois = type(proposals[0].proposal_boxes).cat([p.proposal_boxes for p in proposals]) - device, dtype = rois.tensor.device, rois.tensor.dtype - if input_tensor_mode: - im_info = proposals[0].image_size - rois = rois.tensor - else: - im_info = torch.tensor( - [[sz[0], sz[1], 1.0] for sz in [x.image_size for x in proposals]] - ) - batch_ids = cat( - [ - torch.full((b, 1), i, dtype=dtype, device=device) - for i, b in enumerate(len(p) for p in proposals) - ], - dim=0, - ) - rois = torch.cat([batch_ids, rois.tensor], dim=1) - - roi_pred_bbox, roi_batch_splits = torch.ops._caffe2.BBoxTransform( - to_device(rois, "cpu"), - to_device(box_regression, "cpu"), - to_device(im_info, "cpu"), - weights=box2box_transform_weights, - apply_scale=True, - rotated=is_rotated, - angle_bound_on=True, - angle_bound_lo=-180, - angle_bound_hi=180, - clip_angle_thresh=1.0, - legacy_plus_one=False, - ) - roi_pred_bbox = to_device(roi_pred_bbox, device) - roi_batch_splits = to_device(roi_batch_splits, device) - - nms_outputs = torch.ops._caffe2.BoxWithNMSLimit( - to_device(class_prob, "cpu"), - to_device(roi_pred_bbox, "cpu"), - to_device(roi_batch_splits, "cpu"), - score_thresh=float(score_thresh), - nms=float(nms_thresh), - detections_per_im=int(topk_per_image), - soft_nms_enabled=False, - soft_nms_method="linear", - soft_nms_sigma=0.5, - soft_nms_min_score_thres=0.001, - rotated=is_rotated, - cls_agnostic_bbox_reg=cls_agnostic_bbox_reg, - input_boxes_include_bg_cls=False, - output_classes_include_bg_cls=False, - legacy_plus_one=False, - ) - roi_score_nms = to_device(nms_outputs[0], device) - roi_bbox_nms = to_device(nms_outputs[1], device) - roi_class_nms = to_device(nms_outputs[2], device) - roi_batch_splits_nms = to_device(nms_outputs[3], device) - roi_keeps_nms = to_device(nms_outputs[4], device) - roi_keeps_size_nms = to_device(nms_outputs[5], device) - if not self.tensor_mode: - roi_class_nms = roi_class_nms.to(torch.int64) - - roi_batch_ids = cat( - [ - torch.full((b, 1), i, dtype=dtype, device=device) - for i, b in enumerate(int(x.item()) for x in roi_batch_splits_nms) - ], - dim=0, - ) - - roi_class_nms = alias(roi_class_nms, "class_nms") - roi_score_nms = alias(roi_score_nms, "score_nms") - roi_bbox_nms = alias(roi_bbox_nms, "bbox_nms") - roi_batch_splits_nms = alias(roi_batch_splits_nms, "batch_splits_nms") - roi_keeps_nms = alias(roi_keeps_nms, "keeps_nms") - roi_keeps_size_nms = alias(roi_keeps_size_nms, "keeps_size_nms") - - results = InstancesList( - im_info=im_info, - indices=roi_batch_ids[:, 0], - extra_fields={ - "pred_boxes": Caffe2Boxes(roi_bbox_nms), - "scores": roi_score_nms, - "pred_classes": roi_class_nms, - }, - ) - - if not self.tensor_mode: - results = InstancesList.to_d2_instances_list(results) - batch_splits = roi_batch_splits_nms.int().tolist() - kept_indices = list(roi_keeps_nms.to(torch.int64).split(batch_splits)) - else: - results = [results] - kept_indices = [roi_keeps_nms] - - return results, kept_indices - - -class Caffe2MaskRCNNInference: - def __call__(self, pred_mask_logits, pred_instances): - """equivalent to mask_head.mask_rcnn_inference""" - if all(isinstance(x, InstancesList) for x in pred_instances): - assert len(pred_instances) == 1 - mask_probs_pred = pred_mask_logits.sigmoid() - mask_probs_pred = alias(mask_probs_pred, "mask_fcn_probs") - pred_instances[0].pred_masks = mask_probs_pred - else: - mask_rcnn_inference(pred_mask_logits, pred_instances) - - -class Caffe2KeypointRCNNInference: - def __init__(self, use_heatmap_max_keypoint): - self.use_heatmap_max_keypoint = use_heatmap_max_keypoint - - def __call__(self, pred_keypoint_logits, pred_instances): - # just return the keypoint heatmap for now, - # there will be option to call HeatmapMaxKeypointOp - output = alias(pred_keypoint_logits, "kps_score") - if all(isinstance(x, InstancesList) for x in pred_instances): - assert len(pred_instances) == 1 - if self.use_heatmap_max_keypoint: - device = output.device - output = torch.ops._caffe2.HeatmapMaxKeypoint( - to_device(output, "cpu"), - pred_instances[0].pred_boxes.tensor, - should_output_softmax=True, # worth make it configerable? - ) - output = to_device(output, device) - output = alias(output, "keypoints_out") - pred_instances[0].pred_keypoints = output - return pred_keypoint_logits diff --git a/spaces/Yuelili/RealNagrse/realesrgan/archs/__init__.py b/spaces/Yuelili/RealNagrse/realesrgan/archs/__init__.py deleted file mode 100644 index f3fbbf3b78e33b61fd4c33a564a9a617010d90de..0000000000000000000000000000000000000000 --- a/spaces/Yuelili/RealNagrse/realesrgan/archs/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -import importlib -from basicsr.utils import scandir -from os import path as osp - -# automatically scan and import arch modules for registry -# scan all the files that end with '_arch.py' under the archs folder -arch_folder = osp.dirname(osp.abspath(__file__)) -arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')] -# import all the arch modules -_arch_modules = [importlib.import_module(f'realesrgan.archs.{file_name}') for file_name in arch_filenames] diff --git a/spaces/Yuliang/ICON/lib/__init__.py b/spaces/Yuliang/ICON/lib/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/three_nn.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/three_nn.py deleted file mode 100644 index 2b01047a129989cd5545a0a86f23a487f4a13ce1..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/ops/three_nn.py +++ /dev/null @@ -1,51 +0,0 @@ -from typing import Tuple - -import torch -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', ['three_nn_forward']) - - -class ThreeNN(Function): - """Find the top-3 nearest neighbors of the target set from the source set. - - Please refer to `Paper of PointNet++ `_ - for more details. - """ - - @staticmethod - def forward(ctx, target: torch.Tensor, - source: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Args: - target (Tensor): shape (B, N, 3), points set that needs to - find the nearest neighbors. - source (Tensor): shape (B, M, 3), points set that is used - to find the nearest neighbors of points in target set. - - Returns: - Tensor: shape (B, N, 3), L2 distance of each point in target - set to their corresponding nearest neighbors. - """ - target = target.contiguous() - source = source.contiguous() - - B, N, _ = target.size() - m = source.size(1) - dist2 = torch.cuda.FloatTensor(B, N, 3) - idx = torch.cuda.IntTensor(B, N, 3) - - ext_module.three_nn_forward(target, source, dist2, idx, b=B, n=N, m=m) - if torch.__version__ != 'parrots': - ctx.mark_non_differentiable(idx) - - return torch.sqrt(dist2), idx - - @staticmethod - def backward(ctx, a=None, b=None): - return None, None - - -three_nn = ThreeNN.apply diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/necks/pafpn.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/necks/pafpn.py deleted file mode 100644 index d7c0b50f29e882aacb5158b33ead3d4566d0ce0b..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/necks/pafpn.py +++ /dev/null @@ -1,142 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import auto_fp16 - -from ..builder import NECKS -from .fpn import FPN - - -@NECKS.register_module() -class PAFPN(FPN): - """Path Aggregation Network for Instance Segmentation. - - This is an implementation of the `PAFPN in Path Aggregation Network - `_. - - Args: - in_channels (List[int]): Number of input channels per scale. - out_channels (int): Number of output channels (used at each scale) - num_outs (int): Number of output scales. - start_level (int): Index of the start input backbone level used to - build the feature pyramid. Default: 0. - end_level (int): Index of the end input backbone level (exclusive) to - build the feature pyramid. Default: -1, which means the last level. - add_extra_convs (bool): Whether to add conv layers on top of the - original feature maps. Default: False. - extra_convs_on_inputs (bool): Whether to apply extra conv on - the original feature from the backbone. Default: False. - relu_before_extra_convs (bool): Whether to apply relu before the extra - conv. Default: False. - no_norm_on_lateral (bool): Whether to apply norm on lateral. - Default: False. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Config dict for normalization layer. Default: None. - act_cfg (str): Config dict for activation layer in ConvModule. - Default: None. - """ - - def __init__(self, - in_channels, - out_channels, - num_outs, - start_level=0, - end_level=-1, - add_extra_convs=False, - extra_convs_on_inputs=True, - relu_before_extra_convs=False, - no_norm_on_lateral=False, - conv_cfg=None, - norm_cfg=None, - act_cfg=None): - super(PAFPN, - self).__init__(in_channels, out_channels, num_outs, start_level, - end_level, add_extra_convs, extra_convs_on_inputs, - relu_before_extra_convs, no_norm_on_lateral, - conv_cfg, norm_cfg, act_cfg) - # add extra bottom up pathway - self.downsample_convs = nn.ModuleList() - self.pafpn_convs = nn.ModuleList() - for i in range(self.start_level + 1, self.backbone_end_level): - d_conv = ConvModule( - out_channels, - out_channels, - 3, - stride=2, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - inplace=False) - pafpn_conv = ConvModule( - out_channels, - out_channels, - 3, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - inplace=False) - self.downsample_convs.append(d_conv) - self.pafpn_convs.append(pafpn_conv) - - @auto_fp16() - def forward(self, inputs): - """Forward function.""" - assert len(inputs) == len(self.in_channels) - - # build laterals - laterals = [ - lateral_conv(inputs[i + self.start_level]) - for i, lateral_conv in enumerate(self.lateral_convs) - ] - - # build top-down path - used_backbone_levels = len(laterals) - for i in range(used_backbone_levels - 1, 0, -1): - prev_shape = laterals[i - 1].shape[2:] - laterals[i - 1] += F.interpolate( - laterals[i], size=prev_shape, mode='nearest') - - # build outputs - # part 1: from original levels - inter_outs = [ - self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) - ] - - # part 2: add bottom-up path - for i in range(0, used_backbone_levels - 1): - inter_outs[i + 1] += self.downsample_convs[i](inter_outs[i]) - - outs = [] - outs.append(inter_outs[0]) - outs.extend([ - self.pafpn_convs[i - 1](inter_outs[i]) - for i in range(1, used_backbone_levels) - ]) - - # part 3: add extra levels - if self.num_outs > len(outs): - # use max pool to get more levels on top of outputs - # (e.g., Faster R-CNN, Mask R-CNN) - if not self.add_extra_convs: - for i in range(self.num_outs - used_backbone_levels): - outs.append(F.max_pool2d(outs[-1], 1, stride=2)) - # add conv layers on top of original feature maps (RetinaNet) - else: - if self.add_extra_convs == 'on_input': - orig = inputs[self.backbone_end_level - 1] - outs.append(self.fpn_convs[used_backbone_levels](orig)) - elif self.add_extra_convs == 'on_lateral': - outs.append(self.fpn_convs[used_backbone_levels]( - laterals[-1])) - elif self.add_extra_convs == 'on_output': - outs.append(self.fpn_convs[used_backbone_levels](outs[-1])) - else: - raise NotImplementedError - for i in range(used_backbone_levels + 1, self.num_outs): - if self.relu_before_extra_convs: - outs.append(self.fpn_convs[i](F.relu(outs[-1]))) - else: - outs.append(self.fpn_convs[i](outs[-1])) - return tuple(outs) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/anchor/anchor_generator.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/anchor/anchor_generator.py deleted file mode 100644 index 5b0d0cc0c352bf4599a629979c614616ab383454..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/anchor/anchor_generator.py +++ /dev/null @@ -1,727 +0,0 @@ -import annotator.uniformer.mmcv as mmcv -import numpy as np -import torch -from torch.nn.modules.utils import _pair - -from .builder import ANCHOR_GENERATORS - - -@ANCHOR_GENERATORS.register_module() -class AnchorGenerator(object): - """Standard anchor generator for 2D anchor-based detectors. - - Args: - strides (list[int] | list[tuple[int, int]]): Strides of anchors - in multiple feature levels in order (w, h). - ratios (list[float]): The list of ratios between the height and width - of anchors in a single level. - scales (list[int] | None): Anchor scales for anchors in a single level. - It cannot be set at the same time if `octave_base_scale` and - `scales_per_octave` are set. - base_sizes (list[int] | None): The basic sizes - of anchors in multiple levels. - If None is given, strides will be used as base_sizes. - (If strides are non square, the shortest stride is taken.) - scale_major (bool): Whether to multiply scales first when generating - base anchors. If true, the anchors in the same row will have the - same scales. By default it is True in V2.0 - octave_base_scale (int): The base scale of octave. - scales_per_octave (int): Number of scales for each octave. - `octave_base_scale` and `scales_per_octave` are usually used in - retinanet and the `scales` should be None when they are set. - centers (list[tuple[float, float]] | None): The centers of the anchor - relative to the feature grid center in multiple feature levels. - By default it is set to be None and not used. If a list of tuple of - float is given, they will be used to shift the centers of anchors. - center_offset (float): The offset of center in proportion to anchors' - width and height. By default it is 0 in V2.0. - - Examples: - >>> from mmdet.core import AnchorGenerator - >>> self = AnchorGenerator([16], [1.], [1.], [9]) - >>> all_anchors = self.grid_anchors([(2, 2)], device='cpu') - >>> print(all_anchors) - [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], - [11.5000, -4.5000, 20.5000, 4.5000], - [-4.5000, 11.5000, 4.5000, 20.5000], - [11.5000, 11.5000, 20.5000, 20.5000]])] - >>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18]) - >>> all_anchors = self.grid_anchors([(2, 2), (1, 1)], device='cpu') - >>> print(all_anchors) - [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], - [11.5000, -4.5000, 20.5000, 4.5000], - [-4.5000, 11.5000, 4.5000, 20.5000], - [11.5000, 11.5000, 20.5000, 20.5000]]), \ - tensor([[-9., -9., 9., 9.]])] - """ - - def __init__(self, - strides, - ratios, - scales=None, - base_sizes=None, - scale_major=True, - octave_base_scale=None, - scales_per_octave=None, - centers=None, - center_offset=0.): - # check center and center_offset - if center_offset != 0: - assert centers is None, 'center cannot be set when center_offset' \ - f'!=0, {centers} is given.' - if not (0 <= center_offset <= 1): - raise ValueError('center_offset should be in range [0, 1], ' - f'{center_offset} is given.') - if centers is not None: - assert len(centers) == len(strides), \ - 'The number of strides should be the same as centers, got ' \ - f'{strides} and {centers}' - - # calculate base sizes of anchors - self.strides = [_pair(stride) for stride in strides] - self.base_sizes = [min(stride) for stride in self.strides - ] if base_sizes is None else base_sizes - assert len(self.base_sizes) == len(self.strides), \ - 'The number of strides should be the same as base sizes, got ' \ - f'{self.strides} and {self.base_sizes}' - - # calculate scales of anchors - assert ((octave_base_scale is not None - and scales_per_octave is not None) ^ (scales is not None)), \ - 'scales and octave_base_scale with scales_per_octave cannot' \ - ' be set at the same time' - if scales is not None: - self.scales = torch.Tensor(scales) - elif octave_base_scale is not None and scales_per_octave is not None: - octave_scales = np.array( - [2**(i / scales_per_octave) for i in range(scales_per_octave)]) - scales = octave_scales * octave_base_scale - self.scales = torch.Tensor(scales) - else: - raise ValueError('Either scales or octave_base_scale with ' - 'scales_per_octave should be set') - - self.octave_base_scale = octave_base_scale - self.scales_per_octave = scales_per_octave - self.ratios = torch.Tensor(ratios) - self.scale_major = scale_major - self.centers = centers - self.center_offset = center_offset - self.base_anchors = self.gen_base_anchors() - - @property - def num_base_anchors(self): - """list[int]: total number of base anchors in a feature grid""" - return [base_anchors.size(0) for base_anchors in self.base_anchors] - - @property - def num_levels(self): - """int: number of feature levels that the generator will be applied""" - return len(self.strides) - - def gen_base_anchors(self): - """Generate base anchors. - - Returns: - list(torch.Tensor): Base anchors of a feature grid in multiple \ - feature levels. - """ - multi_level_base_anchors = [] - for i, base_size in enumerate(self.base_sizes): - center = None - if self.centers is not None: - center = self.centers[i] - multi_level_base_anchors.append( - self.gen_single_level_base_anchors( - base_size, - scales=self.scales, - ratios=self.ratios, - center=center)) - return multi_level_base_anchors - - def gen_single_level_base_anchors(self, - base_size, - scales, - ratios, - center=None): - """Generate base anchors of a single level. - - Args: - base_size (int | float): Basic size of an anchor. - scales (torch.Tensor): Scales of the anchor. - ratios (torch.Tensor): The ratio between between the height - and width of anchors in a single level. - center (tuple[float], optional): The center of the base anchor - related to a single feature grid. Defaults to None. - - Returns: - torch.Tensor: Anchors in a single-level feature maps. - """ - w = base_size - h = base_size - if center is None: - x_center = self.center_offset * w - y_center = self.center_offset * h - else: - x_center, y_center = center - - h_ratios = torch.sqrt(ratios) - w_ratios = 1 / h_ratios - if self.scale_major: - ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) - hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) - else: - ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) - hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) - - # use float anchor and the anchor's center is aligned with the - # pixel center - base_anchors = [ - x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws, - y_center + 0.5 * hs - ] - base_anchors = torch.stack(base_anchors, dim=-1) - - return base_anchors - - def _meshgrid(self, x, y, row_major=True): - """Generate mesh grid of x and y. - - Args: - x (torch.Tensor): Grids of x dimension. - y (torch.Tensor): Grids of y dimension. - row_major (bool, optional): Whether to return y grids first. - Defaults to True. - - Returns: - tuple[torch.Tensor]: The mesh grids of x and y. - """ - # use shape instead of len to keep tracing while exporting to onnx - xx = x.repeat(y.shape[0]) - yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1) - if row_major: - return xx, yy - else: - return yy, xx - - def grid_anchors(self, featmap_sizes, device='cuda'): - """Generate grid anchors in multiple feature levels. - - Args: - featmap_sizes (list[tuple]): List of feature map sizes in - multiple feature levels. - device (str): Device where the anchors will be put on. - - Return: - list[torch.Tensor]: Anchors in multiple feature levels. \ - The sizes of each tensor should be [N, 4], where \ - N = width * height * num_base_anchors, width and height \ - are the sizes of the corresponding feature level, \ - num_base_anchors is the number of anchors for that level. - """ - assert self.num_levels == len(featmap_sizes) - multi_level_anchors = [] - for i in range(self.num_levels): - anchors = self.single_level_grid_anchors( - self.base_anchors[i].to(device), - featmap_sizes[i], - self.strides[i], - device=device) - multi_level_anchors.append(anchors) - return multi_level_anchors - - def single_level_grid_anchors(self, - base_anchors, - featmap_size, - stride=(16, 16), - device='cuda'): - """Generate grid anchors of a single level. - - Note: - This function is usually called by method ``self.grid_anchors``. - - Args: - base_anchors (torch.Tensor): The base anchors of a feature grid. - featmap_size (tuple[int]): Size of the feature maps. - stride (tuple[int], optional): Stride of the feature map in order - (w, h). Defaults to (16, 16). - device (str, optional): Device the tensor will be put on. - Defaults to 'cuda'. - - Returns: - torch.Tensor: Anchors in the overall feature maps. - """ - # keep as Tensor, so that we can covert to ONNX correctly - feat_h, feat_w = featmap_size - shift_x = torch.arange(0, feat_w, device=device) * stride[0] - shift_y = torch.arange(0, feat_h, device=device) * stride[1] - - shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) - shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) - shifts = shifts.type_as(base_anchors) - # first feat_w elements correspond to the first row of shifts - # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get - # shifted anchors (K, A, 4), reshape to (K*A, 4) - - all_anchors = base_anchors[None, :, :] + shifts[:, None, :] - all_anchors = all_anchors.view(-1, 4) - # first A rows correspond to A anchors of (0, 0) in feature map, - # then (0, 1), (0, 2), ... - return all_anchors - - def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): - """Generate valid flags of anchors in multiple feature levels. - - Args: - featmap_sizes (list(tuple)): List of feature map sizes in - multiple feature levels. - pad_shape (tuple): The padded shape of the image. - device (str): Device where the anchors will be put on. - - Return: - list(torch.Tensor): Valid flags of anchors in multiple levels. - """ - assert self.num_levels == len(featmap_sizes) - multi_level_flags = [] - for i in range(self.num_levels): - anchor_stride = self.strides[i] - feat_h, feat_w = featmap_sizes[i] - h, w = pad_shape[:2] - valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h) - valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w) - flags = self.single_level_valid_flags((feat_h, feat_w), - (valid_feat_h, valid_feat_w), - self.num_base_anchors[i], - device=device) - multi_level_flags.append(flags) - return multi_level_flags - - def single_level_valid_flags(self, - featmap_size, - valid_size, - num_base_anchors, - device='cuda'): - """Generate the valid flags of anchor in a single feature map. - - Args: - featmap_size (tuple[int]): The size of feature maps. - valid_size (tuple[int]): The valid size of the feature maps. - num_base_anchors (int): The number of base anchors. - device (str, optional): Device where the flags will be put on. - Defaults to 'cuda'. - - Returns: - torch.Tensor: The valid flags of each anchor in a single level \ - feature map. - """ - feat_h, feat_w = featmap_size - valid_h, valid_w = valid_size - assert valid_h <= feat_h and valid_w <= feat_w - valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) - valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) - valid_x[:valid_w] = 1 - valid_y[:valid_h] = 1 - valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) - valid = valid_xx & valid_yy - valid = valid[:, None].expand(valid.size(0), - num_base_anchors).contiguous().view(-1) - return valid - - def __repr__(self): - """str: a string that describes the module""" - indent_str = ' ' - repr_str = self.__class__.__name__ + '(\n' - repr_str += f'{indent_str}strides={self.strides},\n' - repr_str += f'{indent_str}ratios={self.ratios},\n' - repr_str += f'{indent_str}scales={self.scales},\n' - repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' - repr_str += f'{indent_str}scale_major={self.scale_major},\n' - repr_str += f'{indent_str}octave_base_scale=' - repr_str += f'{self.octave_base_scale},\n' - repr_str += f'{indent_str}scales_per_octave=' - repr_str += f'{self.scales_per_octave},\n' - repr_str += f'{indent_str}num_levels={self.num_levels}\n' - repr_str += f'{indent_str}centers={self.centers},\n' - repr_str += f'{indent_str}center_offset={self.center_offset})' - return repr_str - - -@ANCHOR_GENERATORS.register_module() -class SSDAnchorGenerator(AnchorGenerator): - """Anchor generator for SSD. - - Args: - strides (list[int] | list[tuple[int, int]]): Strides of anchors - in multiple feature levels. - ratios (list[float]): The list of ratios between the height and width - of anchors in a single level. - basesize_ratio_range (tuple(float)): Ratio range of anchors. - input_size (int): Size of feature map, 300 for SSD300, - 512 for SSD512. - scale_major (bool): Whether to multiply scales first when generating - base anchors. If true, the anchors in the same row will have the - same scales. It is always set to be False in SSD. - """ - - def __init__(self, - strides, - ratios, - basesize_ratio_range, - input_size=300, - scale_major=True): - assert len(strides) == len(ratios) - assert mmcv.is_tuple_of(basesize_ratio_range, float) - - self.strides = [_pair(stride) for stride in strides] - self.input_size = input_size - self.centers = [(stride[0] / 2., stride[1] / 2.) - for stride in self.strides] - self.basesize_ratio_range = basesize_ratio_range - - # calculate anchor ratios and sizes - min_ratio, max_ratio = basesize_ratio_range - min_ratio = int(min_ratio * 100) - max_ratio = int(max_ratio * 100) - step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2)) - min_sizes = [] - max_sizes = [] - for ratio in range(int(min_ratio), int(max_ratio) + 1, step): - min_sizes.append(int(self.input_size * ratio / 100)) - max_sizes.append(int(self.input_size * (ratio + step) / 100)) - if self.input_size == 300: - if basesize_ratio_range[0] == 0.15: # SSD300 COCO - min_sizes.insert(0, int(self.input_size * 7 / 100)) - max_sizes.insert(0, int(self.input_size * 15 / 100)) - elif basesize_ratio_range[0] == 0.2: # SSD300 VOC - min_sizes.insert(0, int(self.input_size * 10 / 100)) - max_sizes.insert(0, int(self.input_size * 20 / 100)) - else: - raise ValueError( - 'basesize_ratio_range[0] should be either 0.15' - 'or 0.2 when input_size is 300, got ' - f'{basesize_ratio_range[0]}.') - elif self.input_size == 512: - if basesize_ratio_range[0] == 0.1: # SSD512 COCO - min_sizes.insert(0, int(self.input_size * 4 / 100)) - max_sizes.insert(0, int(self.input_size * 10 / 100)) - elif basesize_ratio_range[0] == 0.15: # SSD512 VOC - min_sizes.insert(0, int(self.input_size * 7 / 100)) - max_sizes.insert(0, int(self.input_size * 15 / 100)) - else: - raise ValueError('basesize_ratio_range[0] should be either 0.1' - 'or 0.15 when input_size is 512, got' - f' {basesize_ratio_range[0]}.') - else: - raise ValueError('Only support 300 or 512 in SSDAnchorGenerator' - f', got {self.input_size}.') - - anchor_ratios = [] - anchor_scales = [] - for k in range(len(self.strides)): - scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])] - anchor_ratio = [1.] - for r in ratios[k]: - anchor_ratio += [1 / r, r] # 4 or 6 ratio - anchor_ratios.append(torch.Tensor(anchor_ratio)) - anchor_scales.append(torch.Tensor(scales)) - - self.base_sizes = min_sizes - self.scales = anchor_scales - self.ratios = anchor_ratios - self.scale_major = scale_major - self.center_offset = 0 - self.base_anchors = self.gen_base_anchors() - - def gen_base_anchors(self): - """Generate base anchors. - - Returns: - list(torch.Tensor): Base anchors of a feature grid in multiple \ - feature levels. - """ - multi_level_base_anchors = [] - for i, base_size in enumerate(self.base_sizes): - base_anchors = self.gen_single_level_base_anchors( - base_size, - scales=self.scales[i], - ratios=self.ratios[i], - center=self.centers[i]) - indices = list(range(len(self.ratios[i]))) - indices.insert(1, len(indices)) - base_anchors = torch.index_select(base_anchors, 0, - torch.LongTensor(indices)) - multi_level_base_anchors.append(base_anchors) - return multi_level_base_anchors - - def __repr__(self): - """str: a string that describes the module""" - indent_str = ' ' - repr_str = self.__class__.__name__ + '(\n' - repr_str += f'{indent_str}strides={self.strides},\n' - repr_str += f'{indent_str}scales={self.scales},\n' - repr_str += f'{indent_str}scale_major={self.scale_major},\n' - repr_str += f'{indent_str}input_size={self.input_size},\n' - repr_str += f'{indent_str}scales={self.scales},\n' - repr_str += f'{indent_str}ratios={self.ratios},\n' - repr_str += f'{indent_str}num_levels={self.num_levels},\n' - repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' - repr_str += f'{indent_str}basesize_ratio_range=' - repr_str += f'{self.basesize_ratio_range})' - return repr_str - - -@ANCHOR_GENERATORS.register_module() -class LegacyAnchorGenerator(AnchorGenerator): - """Legacy anchor generator used in MMDetection V1.x. - - Note: - Difference to the V2.0 anchor generator: - - 1. The center offset of V1.x anchors are set to be 0.5 rather than 0. - 2. The width/height are minused by 1 when calculating the anchors' \ - centers and corners to meet the V1.x coordinate system. - 3. The anchors' corners are quantized. - - Args: - strides (list[int] | list[tuple[int]]): Strides of anchors - in multiple feature levels. - ratios (list[float]): The list of ratios between the height and width - of anchors in a single level. - scales (list[int] | None): Anchor scales for anchors in a single level. - It cannot be set at the same time if `octave_base_scale` and - `scales_per_octave` are set. - base_sizes (list[int]): The basic sizes of anchors in multiple levels. - If None is given, strides will be used to generate base_sizes. - scale_major (bool): Whether to multiply scales first when generating - base anchors. If true, the anchors in the same row will have the - same scales. By default it is True in V2.0 - octave_base_scale (int): The base scale of octave. - scales_per_octave (int): Number of scales for each octave. - `octave_base_scale` and `scales_per_octave` are usually used in - retinanet and the `scales` should be None when they are set. - centers (list[tuple[float, float]] | None): The centers of the anchor - relative to the feature grid center in multiple feature levels. - By default it is set to be None and not used. It a list of float - is given, this list will be used to shift the centers of anchors. - center_offset (float): The offset of center in propotion to anchors' - width and height. By default it is 0.5 in V2.0 but it should be 0.5 - in v1.x models. - - Examples: - >>> from mmdet.core import LegacyAnchorGenerator - >>> self = LegacyAnchorGenerator( - >>> [16], [1.], [1.], [9], center_offset=0.5) - >>> all_anchors = self.grid_anchors(((2, 2),), device='cpu') - >>> print(all_anchors) - [tensor([[ 0., 0., 8., 8.], - [16., 0., 24., 8.], - [ 0., 16., 8., 24.], - [16., 16., 24., 24.]])] - """ - - def gen_single_level_base_anchors(self, - base_size, - scales, - ratios, - center=None): - """Generate base anchors of a single level. - - Note: - The width/height of anchors are minused by 1 when calculating \ - the centers and corners to meet the V1.x coordinate system. - - Args: - base_size (int | float): Basic size of an anchor. - scales (torch.Tensor): Scales of the anchor. - ratios (torch.Tensor): The ratio between between the height. - and width of anchors in a single level. - center (tuple[float], optional): The center of the base anchor - related to a single feature grid. Defaults to None. - - Returns: - torch.Tensor: Anchors in a single-level feature map. - """ - w = base_size - h = base_size - if center is None: - x_center = self.center_offset * (w - 1) - y_center = self.center_offset * (h - 1) - else: - x_center, y_center = center - - h_ratios = torch.sqrt(ratios) - w_ratios = 1 / h_ratios - if self.scale_major: - ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) - hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) - else: - ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) - hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) - - # use float anchor and the anchor's center is aligned with the - # pixel center - base_anchors = [ - x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1), - x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1) - ] - base_anchors = torch.stack(base_anchors, dim=-1).round() - - return base_anchors - - -@ANCHOR_GENERATORS.register_module() -class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator): - """Legacy anchor generator used in MMDetection V1.x. - - The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator` - can be found in `LegacyAnchorGenerator`. - """ - - def __init__(self, - strides, - ratios, - basesize_ratio_range, - input_size=300, - scale_major=True): - super(LegacySSDAnchorGenerator, - self).__init__(strides, ratios, basesize_ratio_range, input_size, - scale_major) - self.centers = [((stride - 1) / 2., (stride - 1) / 2.) - for stride in strides] - self.base_anchors = self.gen_base_anchors() - - -@ANCHOR_GENERATORS.register_module() -class YOLOAnchorGenerator(AnchorGenerator): - """Anchor generator for YOLO. - - Args: - strides (list[int] | list[tuple[int, int]]): Strides of anchors - in multiple feature levels. - base_sizes (list[list[tuple[int, int]]]): The basic sizes - of anchors in multiple levels. - """ - - def __init__(self, strides, base_sizes): - self.strides = [_pair(stride) for stride in strides] - self.centers = [(stride[0] / 2., stride[1] / 2.) - for stride in self.strides] - self.base_sizes = [] - num_anchor_per_level = len(base_sizes[0]) - for base_sizes_per_level in base_sizes: - assert num_anchor_per_level == len(base_sizes_per_level) - self.base_sizes.append( - [_pair(base_size) for base_size in base_sizes_per_level]) - self.base_anchors = self.gen_base_anchors() - - @property - def num_levels(self): - """int: number of feature levels that the generator will be applied""" - return len(self.base_sizes) - - def gen_base_anchors(self): - """Generate base anchors. - - Returns: - list(torch.Tensor): Base anchors of a feature grid in multiple \ - feature levels. - """ - multi_level_base_anchors = [] - for i, base_sizes_per_level in enumerate(self.base_sizes): - center = None - if self.centers is not None: - center = self.centers[i] - multi_level_base_anchors.append( - self.gen_single_level_base_anchors(base_sizes_per_level, - center)) - return multi_level_base_anchors - - def gen_single_level_base_anchors(self, base_sizes_per_level, center=None): - """Generate base anchors of a single level. - - Args: - base_sizes_per_level (list[tuple[int, int]]): Basic sizes of - anchors. - center (tuple[float], optional): The center of the base anchor - related to a single feature grid. Defaults to None. - - Returns: - torch.Tensor: Anchors in a single-level feature maps. - """ - x_center, y_center = center - base_anchors = [] - for base_size in base_sizes_per_level: - w, h = base_size - - # use float anchor and the anchor's center is aligned with the - # pixel center - base_anchor = torch.Tensor([ - x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w, - y_center + 0.5 * h - ]) - base_anchors.append(base_anchor) - base_anchors = torch.stack(base_anchors, dim=0) - - return base_anchors - - def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'): - """Generate responsible anchor flags of grid cells in multiple scales. - - Args: - featmap_sizes (list(tuple)): List of feature map sizes in multiple - feature levels. - gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). - device (str): Device where the anchors will be put on. - - Return: - list(torch.Tensor): responsible flags of anchors in multiple level - """ - assert self.num_levels == len(featmap_sizes) - multi_level_responsible_flags = [] - for i in range(self.num_levels): - anchor_stride = self.strides[i] - flags = self.single_level_responsible_flags( - featmap_sizes[i], - gt_bboxes, - anchor_stride, - self.num_base_anchors[i], - device=device) - multi_level_responsible_flags.append(flags) - return multi_level_responsible_flags - - def single_level_responsible_flags(self, - featmap_size, - gt_bboxes, - stride, - num_base_anchors, - device='cuda'): - """Generate the responsible flags of anchor in a single feature map. - - Args: - featmap_size (tuple[int]): The size of feature maps. - gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). - stride (tuple(int)): stride of current level - num_base_anchors (int): The number of base anchors. - device (str, optional): Device where the flags will be put on. - Defaults to 'cuda'. - - Returns: - torch.Tensor: The valid flags of each anchor in a single level \ - feature map. - """ - feat_h, feat_w = featmap_size - gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device) - gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device) - gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long() - gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long() - - # row major indexing - gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x - - responsible_grid = torch.zeros( - feat_h * feat_w, dtype=torch.uint8, device=device) - responsible_grid[gt_bboxes_grid_idx] = 1 - - responsible_grid = responsible_grid[:, None].expand( - responsible_grid.size(0), num_base_anchors).contiguous().view(-1) - return responsible_grid diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/dense_heads/yolact_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/dense_heads/yolact_head.py deleted file mode 100644 index 10d311f94ee99e1bf65ee3e5827f1699c28a23e3..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/dense_heads/yolact_head.py +++ /dev/null @@ -1,943 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, xavier_init -from mmcv.runner import force_fp32 - -from mmdet.core import build_sampler, fast_nms, images_to_levels, multi_apply -from ..builder import HEADS, build_loss -from .anchor_head import AnchorHead - - -@HEADS.register_module() -class YOLACTHead(AnchorHead): - """YOLACT box head used in https://arxiv.org/abs/1904.02689. - - Note that YOLACT head is a light version of RetinaNet head. - Four differences are described as follows: - - 1. YOLACT box head has three-times fewer anchors. - 2. YOLACT box head shares the convs for box and cls branches. - 3. YOLACT box head uses OHEM instead of Focal loss. - 4. YOLACT box head predicts a set of mask coefficients for each box. - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - anchor_generator (dict): Config dict for anchor generator - loss_cls (dict): Config of classification loss. - loss_bbox (dict): Config of localization loss. - num_head_convs (int): Number of the conv layers shared by - box and cls branches. - num_protos (int): Number of the mask coefficients. - use_ohem (bool): If true, ``loss_single_OHEM`` will be used for - cls loss calculation. If false, ``loss_single`` will be used. - conv_cfg (dict): Dictionary to construct and config conv layer. - norm_cfg (dict): Dictionary to construct and config norm layer. - """ - - def __init__(self, - num_classes, - in_channels, - anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=3, - scales_per_octave=1, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - reduction='none', - loss_weight=1.0), - loss_bbox=dict( - type='SmoothL1Loss', beta=1.0, loss_weight=1.5), - num_head_convs=1, - num_protos=32, - use_ohem=True, - conv_cfg=None, - norm_cfg=None, - **kwargs): - self.num_head_convs = num_head_convs - self.num_protos = num_protos - self.use_ohem = use_ohem - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - super(YOLACTHead, self).__init__( - num_classes, - in_channels, - loss_cls=loss_cls, - loss_bbox=loss_bbox, - anchor_generator=anchor_generator, - **kwargs) - if self.use_ohem: - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - self.sampling = False - - def _init_layers(self): - """Initialize layers of the head.""" - self.relu = nn.ReLU(inplace=True) - self.head_convs = nn.ModuleList() - for i in range(self.num_head_convs): - chn = self.in_channels if i == 0 else self.feat_channels - self.head_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.conv_cls = nn.Conv2d( - self.feat_channels, - self.num_anchors * self.cls_out_channels, - 3, - padding=1) - self.conv_reg = nn.Conv2d( - self.feat_channels, self.num_anchors * 4, 3, padding=1) - self.conv_coeff = nn.Conv2d( - self.feat_channels, - self.num_anchors * self.num_protos, - 3, - padding=1) - - def init_weights(self): - """Initialize weights of the head.""" - for m in self.head_convs: - xavier_init(m.conv, distribution='uniform', bias=0) - xavier_init(self.conv_cls, distribution='uniform', bias=0) - xavier_init(self.conv_reg, distribution='uniform', bias=0) - xavier_init(self.conv_coeff, distribution='uniform', bias=0) - - def forward_single(self, x): - """Forward feature of a single scale level. - - Args: - x (Tensor): Features of a single scale level. - - Returns: - tuple: - cls_score (Tensor): Cls scores for a single scale level \ - the channels number is num_anchors * num_classes. - bbox_pred (Tensor): Box energies / deltas for a single scale \ - level, the channels number is num_anchors * 4. - coeff_pred (Tensor): Mask coefficients for a single scale \ - level, the channels number is num_anchors * num_protos. - """ - for head_conv in self.head_convs: - x = head_conv(x) - cls_score = self.conv_cls(x) - bbox_pred = self.conv_reg(x) - coeff_pred = self.conv_coeff(x).tanh() - return cls_score, bbox_pred, coeff_pred - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """A combination of the func:``AnchorHead.loss`` and - func:``SSDHead.loss``. - - When ``self.use_ohem == True``, it functions like ``SSDHead.loss``, - otherwise, it follows ``AnchorHead.loss``. Besides, it additionally - returns ``sampling_results``. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): Class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): Specify which bounding - boxes can be ignored when computing the loss. Default: None - - Returns: - tuple: - dict[str, Tensor]: A dictionary of loss components. - List[:obj:``SamplingResult``]: Sampler results for each image. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.anchor_generator.num_levels - - device = cls_scores[0].device - - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels, - unmap_outputs=not self.use_ohem, - return_sampling_results=True) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg, sampling_results) = cls_reg_targets - - if self.use_ohem: - num_images = len(img_metas) - all_cls_scores = torch.cat([ - s.permute(0, 2, 3, 1).reshape( - num_images, -1, self.cls_out_channels) for s in cls_scores - ], 1) - all_labels = torch.cat(labels_list, -1).view(num_images, -1) - all_label_weights = torch.cat(label_weights_list, - -1).view(num_images, -1) - all_bbox_preds = torch.cat([ - b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) - for b in bbox_preds - ], -2) - all_bbox_targets = torch.cat(bbox_targets_list, - -2).view(num_images, -1, 4) - all_bbox_weights = torch.cat(bbox_weights_list, - -2).view(num_images, -1, 4) - - # concat all level anchors to a single tensor - all_anchors = [] - for i in range(num_images): - all_anchors.append(torch.cat(anchor_list[i])) - - # check NaN and Inf - assert torch.isfinite(all_cls_scores).all().item(), \ - 'classification scores become infinite or NaN!' - assert torch.isfinite(all_bbox_preds).all().item(), \ - 'bbox predications become infinite or NaN!' - - losses_cls, losses_bbox = multi_apply( - self.loss_single_OHEM, - all_cls_scores, - all_bbox_preds, - all_anchors, - all_labels, - all_label_weights, - all_bbox_targets, - all_bbox_weights, - num_total_samples=num_total_pos) - else: - num_total_samples = ( - num_total_pos + - num_total_neg if self.sampling else num_total_pos) - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - # concat all level anchors and flags to a single tensor - concat_anchor_list = [] - for i in range(len(anchor_list)): - concat_anchor_list.append(torch.cat(anchor_list[i])) - all_anchor_list = images_to_levels(concat_anchor_list, - num_level_anchors) - losses_cls, losses_bbox = multi_apply( - self.loss_single, - cls_scores, - bbox_preds, - all_anchor_list, - labels_list, - label_weights_list, - bbox_targets_list, - bbox_weights_list, - num_total_samples=num_total_samples) - - return dict( - loss_cls=losses_cls, loss_bbox=losses_bbox), sampling_results - - def loss_single_OHEM(self, cls_score, bbox_pred, anchors, labels, - label_weights, bbox_targets, bbox_weights, - num_total_samples): - """"See func:``SSDHead.loss``.""" - loss_cls_all = self.loss_cls(cls_score, labels, label_weights) - - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero( - as_tuple=False).reshape(-1) - neg_inds = (labels == self.num_classes).nonzero( - as_tuple=False).view(-1) - - num_pos_samples = pos_inds.size(0) - if num_pos_samples == 0: - num_neg_samples = neg_inds.size(0) - else: - num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples - if num_neg_samples > neg_inds.size(0): - num_neg_samples = neg_inds.size(0) - topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) - loss_cls_pos = loss_cls_all[pos_inds].sum() - loss_cls_neg = topk_loss_cls_neg.sum() - loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples - if self.reg_decoded_bbox: - # When the regression loss (e.g. `IouLoss`, `GIouLoss`) - # is applied directly on the decoded bounding boxes, it - # decodes the already encoded coordinates to absolute format. - bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) - loss_bbox = self.loss_bbox( - bbox_pred, - bbox_targets, - bbox_weights, - avg_factor=num_total_samples) - return loss_cls[None], loss_bbox - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'coeff_preds')) - def get_bboxes(self, - cls_scores, - bbox_preds, - coeff_preds, - img_metas, - cfg=None, - rescale=False): - """"Similiar to func:``AnchorHead.get_bboxes``, but additionally - processes coeff_preds. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - with shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - coeff_preds (list[Tensor]): Mask coefficients for each scale - level with shape (N, num_anchors * num_protos, H, W) - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - cfg (mmcv.Config | None): Test / postprocessing configuration, - if None, test_cfg would be used - rescale (bool): If True, return boxes in original image space. - Default: False. - - Returns: - list[tuple[Tensor, Tensor, Tensor]]: Each item in result_list is - a 3-tuple. The first item is an (n, 5) tensor, where the - first 4 columns are bounding box positions - (tl_x, tl_y, br_x, br_y) and the 5-th column is a score - between 0 and 1. The second item is an (n,) tensor where each - item is the predicted class label of the corresponding box. - The third item is an (n, num_protos) tensor where each item - is the predicted mask coefficients of instance inside the - corresponding box. - """ - assert len(cls_scores) == len(bbox_preds) - num_levels = len(cls_scores) - - device = cls_scores[0].device - featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] - mlvl_anchors = self.anchor_generator.grid_anchors( - featmap_sizes, device=device) - - det_bboxes = [] - det_labels = [] - det_coeffs = [] - for img_id in range(len(img_metas)): - cls_score_list = [ - cls_scores[i][img_id].detach() for i in range(num_levels) - ] - bbox_pred_list = [ - bbox_preds[i][img_id].detach() for i in range(num_levels) - ] - coeff_pred_list = [ - coeff_preds[i][img_id].detach() for i in range(num_levels) - ] - img_shape = img_metas[img_id]['img_shape'] - scale_factor = img_metas[img_id]['scale_factor'] - bbox_res = self._get_bboxes_single(cls_score_list, bbox_pred_list, - coeff_pred_list, mlvl_anchors, - img_shape, scale_factor, cfg, - rescale) - det_bboxes.append(bbox_res[0]) - det_labels.append(bbox_res[1]) - det_coeffs.append(bbox_res[2]) - return det_bboxes, det_labels, det_coeffs - - def _get_bboxes_single(self, - cls_score_list, - bbox_pred_list, - coeff_preds_list, - mlvl_anchors, - img_shape, - scale_factor, - cfg, - rescale=False): - """"Similiar to func:``AnchorHead._get_bboxes_single``, but - additionally processes coeff_preds_list and uses fast NMS instead of - traditional NMS. - - Args: - cls_score_list (list[Tensor]): Box scores for a single scale level - Has shape (num_anchors * num_classes, H, W). - bbox_pred_list (list[Tensor]): Box energies / deltas for a single - scale level with shape (num_anchors * 4, H, W). - coeff_preds_list (list[Tensor]): Mask coefficients for a single - scale level with shape (num_anchors * num_protos, H, W). - mlvl_anchors (list[Tensor]): Box reference for a single scale level - with shape (num_total_anchors, 4). - img_shape (tuple[int]): Shape of the input image, - (height, width, 3). - scale_factor (ndarray): Scale factor of the image arange as - (w_scale, h_scale, w_scale, h_scale). - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - - Returns: - tuple[Tensor, Tensor, Tensor]: The first item is an (n, 5) tensor, - where the first 4 columns are bounding box positions - (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between - 0 and 1. The second item is an (n,) tensor where each item is - the predicted class label of the corresponding box. The third - item is an (n, num_protos) tensor where each item is the - predicted mask coefficients of instance inside the - corresponding box. - """ - cfg = self.test_cfg if cfg is None else cfg - assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors) - mlvl_bboxes = [] - mlvl_scores = [] - mlvl_coeffs = [] - for cls_score, bbox_pred, coeff_pred, anchors in \ - zip(cls_score_list, bbox_pred_list, - coeff_preds_list, mlvl_anchors): - assert cls_score.size()[-2:] == bbox_pred.size()[-2:] - cls_score = cls_score.permute(1, 2, - 0).reshape(-1, self.cls_out_channels) - if self.use_sigmoid_cls: - scores = cls_score.sigmoid() - else: - scores = cls_score.softmax(-1) - bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) - coeff_pred = coeff_pred.permute(1, 2, - 0).reshape(-1, self.num_protos) - nms_pre = cfg.get('nms_pre', -1) - if nms_pre > 0 and scores.shape[0] > nms_pre: - # Get maximum scores for foreground classes. - if self.use_sigmoid_cls: - max_scores, _ = scores.max(dim=1) - else: - # remind that we set FG labels to [0, num_class-1] - # since mmdet v2.0 - # BG cat_id: num_class - max_scores, _ = scores[:, :-1].max(dim=1) - _, topk_inds = max_scores.topk(nms_pre) - anchors = anchors[topk_inds, :] - bbox_pred = bbox_pred[topk_inds, :] - scores = scores[topk_inds, :] - coeff_pred = coeff_pred[topk_inds, :] - bboxes = self.bbox_coder.decode( - anchors, bbox_pred, max_shape=img_shape) - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - mlvl_coeffs.append(coeff_pred) - mlvl_bboxes = torch.cat(mlvl_bboxes) - if rescale: - mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) - mlvl_scores = torch.cat(mlvl_scores) - mlvl_coeffs = torch.cat(mlvl_coeffs) - if self.use_sigmoid_cls: - # Add a dummy background class to the backend when using sigmoid - # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 - # BG cat_id: num_class - padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) - mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) - det_bboxes, det_labels, det_coeffs = fast_nms(mlvl_bboxes, mlvl_scores, - mlvl_coeffs, - cfg.score_thr, - cfg.iou_thr, cfg.top_k, - cfg.max_per_img) - return det_bboxes, det_labels, det_coeffs - - -@HEADS.register_module() -class YOLACTSegmHead(nn.Module): - """YOLACT segmentation head used in https://arxiv.org/abs/1904.02689. - - Apply a semantic segmentation loss on feature space using layers that are - only evaluated during training to increase performance with no speed - penalty. - - Args: - in_channels (int): Number of channels in the input feature map. - num_classes (int): Number of categories excluding the background - category. - loss_segm (dict): Config of semantic segmentation loss. - """ - - def __init__(self, - num_classes, - in_channels=256, - loss_segm=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0)): - super(YOLACTSegmHead, self).__init__() - self.in_channels = in_channels - self.num_classes = num_classes - self.loss_segm = build_loss(loss_segm) - self._init_layers() - self.fp16_enabled = False - - def _init_layers(self): - """Initialize layers of the head.""" - self.segm_conv = nn.Conv2d( - self.in_channels, self.num_classes, kernel_size=1) - - def init_weights(self): - """Initialize weights of the head.""" - xavier_init(self.segm_conv, distribution='uniform') - - def forward(self, x): - """Forward feature from the upstream network. - - Args: - x (Tensor): Feature from the upstream network, which is - a 4D-tensor. - - Returns: - Tensor: Predicted semantic segmentation map with shape - (N, num_classes, H, W). - """ - return self.segm_conv(x) - - @force_fp32(apply_to=('segm_pred', )) - def loss(self, segm_pred, gt_masks, gt_labels): - """Compute loss of the head. - - Args: - segm_pred (list[Tensor]): Predicted semantic segmentation map - with shape (N, num_classes, H, W). - gt_masks (list[Tensor]): Ground truth masks for each image with - the same shape of the input image. - gt_labels (list[Tensor]): Class indices corresponding to each box. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - loss_segm = [] - num_imgs, num_classes, mask_h, mask_w = segm_pred.size() - for idx in range(num_imgs): - cur_segm_pred = segm_pred[idx] - cur_gt_masks = gt_masks[idx].float() - cur_gt_labels = gt_labels[idx] - segm_targets = self.get_targets(cur_segm_pred, cur_gt_masks, - cur_gt_labels) - if segm_targets is None: - loss = self.loss_segm(cur_segm_pred, - torch.zeros_like(cur_segm_pred), - torch.zeros_like(cur_segm_pred)) - else: - loss = self.loss_segm( - cur_segm_pred, - segm_targets, - avg_factor=num_imgs * mask_h * mask_w) - loss_segm.append(loss) - return dict(loss_segm=loss_segm) - - def get_targets(self, segm_pred, gt_masks, gt_labels): - """Compute semantic segmentation targets for each image. - - Args: - segm_pred (Tensor): Predicted semantic segmentation map - with shape (num_classes, H, W). - gt_masks (Tensor): Ground truth masks for each image with - the same shape of the input image. - gt_labels (Tensor): Class indices corresponding to each box. - - Returns: - Tensor: Semantic segmentation targets with shape - (num_classes, H, W). - """ - if gt_masks.size(0) == 0: - return None - num_classes, mask_h, mask_w = segm_pred.size() - with torch.no_grad(): - downsampled_masks = F.interpolate( - gt_masks.unsqueeze(0), (mask_h, mask_w), - mode='bilinear', - align_corners=False).squeeze(0) - downsampled_masks = downsampled_masks.gt(0.5).float() - segm_targets = torch.zeros_like(segm_pred, requires_grad=False) - for obj_idx in range(downsampled_masks.size(0)): - segm_targets[gt_labels[obj_idx] - 1] = torch.max( - segm_targets[gt_labels[obj_idx] - 1], - downsampled_masks[obj_idx]) - return segm_targets - - -@HEADS.register_module() -class YOLACTProtonet(nn.Module): - """YOLACT mask head used in https://arxiv.org/abs/1904.02689. - - This head outputs the mask prototypes for YOLACT. - - Args: - in_channels (int): Number of channels in the input feature map. - proto_channels (tuple[int]): Output channels of protonet convs. - proto_kernel_sizes (tuple[int]): Kernel sizes of protonet convs. - include_last_relu (Bool): If keep the last relu of protonet. - num_protos (int): Number of prototypes. - num_classes (int): Number of categories excluding the background - category. - loss_mask_weight (float): Reweight the mask loss by this factor. - max_masks_to_train (int): Maximum number of masks to train for - each image. - """ - - def __init__(self, - num_classes, - in_channels=256, - proto_channels=(256, 256, 256, None, 256, 32), - proto_kernel_sizes=(3, 3, 3, -2, 3, 1), - include_last_relu=True, - num_protos=32, - loss_mask_weight=1.0, - max_masks_to_train=100): - super(YOLACTProtonet, self).__init__() - self.in_channels = in_channels - self.proto_channels = proto_channels - self.proto_kernel_sizes = proto_kernel_sizes - self.include_last_relu = include_last_relu - self.protonet = self._init_layers() - - self.loss_mask_weight = loss_mask_weight - self.num_protos = num_protos - self.num_classes = num_classes - self.max_masks_to_train = max_masks_to_train - self.fp16_enabled = False - - def _init_layers(self): - """A helper function to take a config setting and turn it into a - network.""" - # Possible patterns: - # ( 256, 3) -> conv - # ( 256,-2) -> deconv - # (None,-2) -> bilinear interpolate - in_channels = self.in_channels - protonets = nn.ModuleList() - for num_channels, kernel_size in zip(self.proto_channels, - self.proto_kernel_sizes): - if kernel_size > 0: - layer = nn.Conv2d( - in_channels, - num_channels, - kernel_size, - padding=kernel_size // 2) - else: - if num_channels is None: - layer = InterpolateModule( - scale_factor=-kernel_size, - mode='bilinear', - align_corners=False) - else: - layer = nn.ConvTranspose2d( - in_channels, - num_channels, - -kernel_size, - padding=kernel_size // 2) - protonets.append(layer) - protonets.append(nn.ReLU(inplace=True)) - in_channels = num_channels if num_channels is not None \ - else in_channels - if not self.include_last_relu: - protonets = protonets[:-1] - return nn.Sequential(*protonets) - - def init_weights(self): - """Initialize weights of the head.""" - for m in self.protonet: - if isinstance(m, nn.Conv2d): - xavier_init(m, distribution='uniform') - - def forward(self, x, coeff_pred, bboxes, img_meta, sampling_results=None): - """Forward feature from the upstream network to get prototypes and - linearly combine the prototypes, using masks coefficients, into - instance masks. Finally, crop the instance masks with given bboxes. - - Args: - x (Tensor): Feature from the upstream network, which is - a 4D-tensor. - coeff_pred (list[Tensor]): Mask coefficients for each scale - level with shape (N, num_anchors * num_protos, H, W). - bboxes (list[Tensor]): Box used for cropping with shape - (N, num_anchors * 4, H, W). During training, they are - ground truth boxes. During testing, they are predicted - boxes. - img_meta (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - sampling_results (List[:obj:``SamplingResult``]): Sampler results - for each image. - - Returns: - list[Tensor]: Predicted instance segmentation masks. - """ - prototypes = self.protonet(x) - prototypes = prototypes.permute(0, 2, 3, 1).contiguous() - - num_imgs = x.size(0) - # Training state - if self.training: - coeff_pred_list = [] - for coeff_pred_per_level in coeff_pred: - coeff_pred_per_level = \ - coeff_pred_per_level.permute(0, 2, 3, 1)\ - .reshape(num_imgs, -1, self.num_protos) - coeff_pred_list.append(coeff_pred_per_level) - coeff_pred = torch.cat(coeff_pred_list, dim=1) - - mask_pred_list = [] - for idx in range(num_imgs): - cur_prototypes = prototypes[idx] - cur_coeff_pred = coeff_pred[idx] - cur_bboxes = bboxes[idx] - cur_img_meta = img_meta[idx] - - # Testing state - if not self.training: - bboxes_for_cropping = cur_bboxes - else: - cur_sampling_results = sampling_results[idx] - pos_assigned_gt_inds = \ - cur_sampling_results.pos_assigned_gt_inds - bboxes_for_cropping = cur_bboxes[pos_assigned_gt_inds].clone() - pos_inds = cur_sampling_results.pos_inds - cur_coeff_pred = cur_coeff_pred[pos_inds] - - # Linearly combine the prototypes with the mask coefficients - mask_pred = cur_prototypes @ cur_coeff_pred.t() - mask_pred = torch.sigmoid(mask_pred) - - h, w = cur_img_meta['img_shape'][:2] - bboxes_for_cropping[:, 0] /= w - bboxes_for_cropping[:, 1] /= h - bboxes_for_cropping[:, 2] /= w - bboxes_for_cropping[:, 3] /= h - - mask_pred = self.crop(mask_pred, bboxes_for_cropping) - mask_pred = mask_pred.permute(2, 0, 1).contiguous() - mask_pred_list.append(mask_pred) - return mask_pred_list - - @force_fp32(apply_to=('mask_pred', )) - def loss(self, mask_pred, gt_masks, gt_bboxes, img_meta, sampling_results): - """Compute loss of the head. - - Args: - mask_pred (list[Tensor]): Predicted prototypes with shape - (num_classes, H, W). - gt_masks (list[Tensor]): Ground truth masks for each image with - the same shape of the input image. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - img_meta (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - sampling_results (List[:obj:``SamplingResult``]): Sampler results - for each image. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - loss_mask = [] - num_imgs = len(mask_pred) - total_pos = 0 - for idx in range(num_imgs): - cur_mask_pred = mask_pred[idx] - cur_gt_masks = gt_masks[idx].float() - cur_gt_bboxes = gt_bboxes[idx] - cur_img_meta = img_meta[idx] - cur_sampling_results = sampling_results[idx] - - pos_assigned_gt_inds = cur_sampling_results.pos_assigned_gt_inds - num_pos = pos_assigned_gt_inds.size(0) - # Since we're producing (near) full image masks, - # it'd take too much vram to backprop on every single mask. - # Thus we select only a subset. - if num_pos > self.max_masks_to_train: - perm = torch.randperm(num_pos) - select = perm[:self.max_masks_to_train] - cur_mask_pred = cur_mask_pred[select] - pos_assigned_gt_inds = pos_assigned_gt_inds[select] - num_pos = self.max_masks_to_train - total_pos += num_pos - - gt_bboxes_for_reweight = cur_gt_bboxes[pos_assigned_gt_inds] - - mask_targets = self.get_targets(cur_mask_pred, cur_gt_masks, - pos_assigned_gt_inds) - if num_pos == 0: - loss = cur_mask_pred.sum() * 0. - elif mask_targets is None: - loss = F.binary_cross_entropy(cur_mask_pred, - torch.zeros_like(cur_mask_pred), - torch.zeros_like(cur_mask_pred)) - else: - cur_mask_pred = torch.clamp(cur_mask_pred, 0, 1) - loss = F.binary_cross_entropy( - cur_mask_pred, mask_targets, - reduction='none') * self.loss_mask_weight - - h, w = cur_img_meta['img_shape'][:2] - gt_bboxes_width = (gt_bboxes_for_reweight[:, 2] - - gt_bboxes_for_reweight[:, 0]) / w - gt_bboxes_height = (gt_bboxes_for_reweight[:, 3] - - gt_bboxes_for_reweight[:, 1]) / h - loss = loss.mean(dim=(1, - 2)) / gt_bboxes_width / gt_bboxes_height - loss = torch.sum(loss) - loss_mask.append(loss) - - if total_pos == 0: - total_pos += 1 # avoid nan - loss_mask = [x / total_pos for x in loss_mask] - - return dict(loss_mask=loss_mask) - - def get_targets(self, mask_pred, gt_masks, pos_assigned_gt_inds): - """Compute instance segmentation targets for each image. - - Args: - mask_pred (Tensor): Predicted prototypes with shape - (num_classes, H, W). - gt_masks (Tensor): Ground truth masks for each image with - the same shape of the input image. - pos_assigned_gt_inds (Tensor): GT indices of the corresponding - positive samples. - Returns: - Tensor: Instance segmentation targets with shape - (num_instances, H, W). - """ - if gt_masks.size(0) == 0: - return None - mask_h, mask_w = mask_pred.shape[-2:] - gt_masks = F.interpolate( - gt_masks.unsqueeze(0), (mask_h, mask_w), - mode='bilinear', - align_corners=False).squeeze(0) - gt_masks = gt_masks.gt(0.5).float() - mask_targets = gt_masks[pos_assigned_gt_inds] - return mask_targets - - def get_seg_masks(self, mask_pred, label_pred, img_meta, rescale): - """Resize, binarize, and format the instance mask predictions. - - Args: - mask_pred (Tensor): shape (N, H, W). - label_pred (Tensor): shape (N, ). - img_meta (dict): Meta information of each image, e.g., - image size, scaling factor, etc. - rescale (bool): If rescale is False, then returned masks will - fit the scale of imgs[0]. - Returns: - list[ndarray]: Mask predictions grouped by their predicted classes. - """ - ori_shape = img_meta['ori_shape'] - scale_factor = img_meta['scale_factor'] - if rescale: - img_h, img_w = ori_shape[:2] - else: - img_h = np.round(ori_shape[0] * scale_factor[1]).astype(np.int32) - img_w = np.round(ori_shape[1] * scale_factor[0]).astype(np.int32) - - cls_segms = [[] for _ in range(self.num_classes)] - if mask_pred.size(0) == 0: - return cls_segms - - mask_pred = F.interpolate( - mask_pred.unsqueeze(0), (img_h, img_w), - mode='bilinear', - align_corners=False).squeeze(0) > 0.5 - mask_pred = mask_pred.cpu().numpy().astype(np.uint8) - - for m, l in zip(mask_pred, label_pred): - cls_segms[l].append(m) - return cls_segms - - def crop(self, masks, boxes, padding=1): - """Crop predicted masks by zeroing out everything not in the predicted - bbox. - - Args: - masks (Tensor): shape [H, W, N]. - boxes (Tensor): bbox coords in relative point form with - shape [N, 4]. - - Return: - Tensor: The cropped masks. - """ - h, w, n = masks.size() - x1, x2 = self.sanitize_coordinates( - boxes[:, 0], boxes[:, 2], w, padding, cast=False) - y1, y2 = self.sanitize_coordinates( - boxes[:, 1], boxes[:, 3], h, padding, cast=False) - - rows = torch.arange( - w, device=masks.device, dtype=x1.dtype).view(1, -1, - 1).expand(h, w, n) - cols = torch.arange( - h, device=masks.device, dtype=x1.dtype).view(-1, 1, - 1).expand(h, w, n) - - masks_left = rows >= x1.view(1, 1, -1) - masks_right = rows < x2.view(1, 1, -1) - masks_up = cols >= y1.view(1, 1, -1) - masks_down = cols < y2.view(1, 1, -1) - - crop_mask = masks_left * masks_right * masks_up * masks_down - - return masks * crop_mask.float() - - def sanitize_coordinates(self, x1, x2, img_size, padding=0, cast=True): - """Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0, - and x2 <= image_size. Also converts from relative to absolute - coordinates and casts the results to long tensors. - - Warning: this does things in-place behind the scenes so - copy if necessary. - - Args: - _x1 (Tensor): shape (N, ). - _x2 (Tensor): shape (N, ). - img_size (int): Size of the input image. - padding (int): x1 >= padding, x2 <= image_size-padding. - cast (bool): If cast is false, the result won't be cast to longs. - - Returns: - tuple: - x1 (Tensor): Sanitized _x1. - x2 (Tensor): Sanitized _x2. - """ - x1 = x1 * img_size - x2 = x2 * img_size - if cast: - x1 = x1.long() - x2 = x2.long() - x1 = torch.min(x1, x2) - x2 = torch.max(x1, x2) - x1 = torch.clamp(x1 - padding, min=0) - x2 = torch.clamp(x2 + padding, max=img_size) - return x1, x2 - - -class InterpolateModule(nn.Module): - """This is a module version of F.interpolate. - - Any arguments you give it just get passed along for the ride. - """ - - def __init__(self, *args, **kwargs): - super().__init__() - - self.args = args - self.kwargs = kwargs - - def forward(self, x): - """Forward features from the upstream network.""" - return F.interpolate(x, *self.args, **self.kwargs) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/builder.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/builder.py deleted file mode 100644 index 1f5b971252bfc971c3ffbaa27746d69b1d3ea9fd..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/builder.py +++ /dev/null @@ -1,46 +0,0 @@ -import warnings - -from annotator.uniformer.mmcv.cnn import MODELS as MMCV_MODELS -from annotator.uniformer.mmcv.utils import Registry - -MODELS = Registry('models', parent=MMCV_MODELS) - -BACKBONES = MODELS -NECKS = MODELS -HEADS = MODELS -LOSSES = MODELS -SEGMENTORS = MODELS - - -def build_backbone(cfg): - """Build backbone.""" - return BACKBONES.build(cfg) - - -def build_neck(cfg): - """Build neck.""" - return NECKS.build(cfg) - - -def build_head(cfg): - """Build head.""" - return HEADS.build(cfg) - - -def build_loss(cfg): - """Build loss.""" - return LOSSES.build(cfg) - - -def build_segmentor(cfg, train_cfg=None, test_cfg=None): - """Build segmentor.""" - if train_cfg is not None or test_cfg is not None: - warnings.warn( - 'train_cfg and test_cfg is deprecated, ' - 'please specify them in model', UserWarning) - assert cfg.get('train_cfg') is None or train_cfg is None, \ - 'train_cfg specified in both outer field and model field ' - assert cfg.get('test_cfg') is None or test_cfg is None, \ - 'test_cfg specified in both outer field and model field ' - return SEGMENTORS.build( - cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/decode_heads/cascade_decode_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/decode_heads/cascade_decode_head.py deleted file mode 100644 index d90f26da701087f56d9036bf034b06333b332c14..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/decode_heads/cascade_decode_head.py +++ /dev/null @@ -1,69 +0,0 @@ -''' - * Copyright (c) 2023 Salesforce, Inc. - * All rights reserved. - * SPDX-License-Identifier: Apache License 2.0 - * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/ - * By Can Qin - * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet - * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala - * Modified from MMCV repo: From https://github.com/open-mmlab/mmcv - * Copyright (c) OpenMMLab. All rights reserved. -''' - -from abc import ABCMeta, abstractmethod - -from .decode_head import BaseDecodeHead - - -class BaseCascadeDecodeHead(BaseDecodeHead, metaclass=ABCMeta): - """Base class for cascade decode head used in - :class:`CascadeEncoderDecoder.""" - - def __init__(self, *args, **kwargs): - super(BaseCascadeDecodeHead, self).__init__(*args, **kwargs) - - @abstractmethod - def forward(self, inputs, prev_output): - """Placeholder of forward function.""" - pass - - def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg, - train_cfg): - """Forward function for training. - Args: - inputs (list[Tensor]): List of multi-level img features. - prev_output (Tensor): The output of previous decode head. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - gt_semantic_seg (Tensor): Semantic segmentation masks - used if the architecture supports semantic segmentation task. - train_cfg (dict): The training config. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - seg_logits = self.forward(inputs, prev_output) - losses = self.losses(seg_logits, gt_semantic_seg) - - return losses - - def forward_test(self, inputs, prev_output, img_metas, test_cfg): - """Forward function for testing. - - Args: - inputs (list[Tensor]): List of multi-level img features. - prev_output (Tensor): The output of previous decode head. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - test_cfg (dict): The testing config. - - Returns: - Tensor: Output segmentation map. - """ - return self.forward(inputs, prev_output) diff --git a/spaces/adpro/Stable-Diffusion-Side-by-Side01/README.md b/spaces/adpro/Stable-Diffusion-Side-by-Side01/README.md deleted file mode 100644 index 8281e7ad61f790a4befc97e1ea87b1f47040dbc6..0000000000000000000000000000000000000000 --- a/spaces/adpro/Stable-Diffusion-Side-by-Side01/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Stable Diffusion On Intel CPUs -emoji: 🏢 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: wenjiao/Stable-Diffusion-Side-by-Side ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/akdeniz27/turkish-pos-tagging-with-xlm_roberta/app.py b/spaces/akdeniz27/turkish-pos-tagging-with-xlm_roberta/app.py deleted file mode 100644 index d8f0b1b0af846822906b4f371e200c49bd8fb5c8..0000000000000000000000000000000000000000 --- a/spaces/akdeniz27/turkish-pos-tagging-with-xlm_roberta/app.py +++ /dev/null @@ -1,69 +0,0 @@ -# Turkish POS Tagging with XLM-RoBERTa Model - -from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer -import sentencepiece -import streamlit as st -import pandas as pd - - -text_1 = "Mustafa Kemal Atatürk 1881 yılında Selanik'te doğdu." - -text_2 = """Dünya çapında 40 milyondan fazla insana bulaşan ve 1.1 milyondan fazla insanın ölümüne sebep olan \ -corona virüsüne karşı Pfizer ile BioNTech'in geliştirdiği aşının ilk görüntüleri ortaya çıktı. Aşının fabrikadaki \ -ilk görüntülerini değerlendiren Pfizer'ın Birleşik Krallık CEO'su, "Üretim bandında aşıyı görmek beni neşelendirdi" \ -dedi. ABD merkezli çokuluslu ilaç şirketi Pfizer ile Türk bilim insanlarının kurduğu BioNTech’in geliştirdiği corona \ -virüsü aşısında sona gelindi. Pfizer, paylaştığı video ile bütün dünyayı heyecanlandıran gelişmeyi duyurdu. Şirket, \ -Belçika’daki Puurs’ta geliştirilen Covid-19 aşılarının seri üretim bandındaki üretim aşamasını uluslararası kamuoyu \ -ile paylaştı. Almanya’nın Mainz kentinde Türk profesör Uğur Şahin ile eşi Özlem Türeci’nin kurduğu ve yönettiği \ -biyoteknoloji şirketi BioNTech ile aşı sürecini sürdüren Pfizer’ın küçük şişelerde binlerce corona virüsü aşısı \ -üretmeye başladığı belirtildi. Pfizer, aşının güvenli ve etkili olduğunun klinik olarak da kanıtlanması ve resmi \ -mercilerden de onay alınması durumunda üretilen aşının dağıtılacağını duyurdu.""" - - -st.title("Demo for Turkish POS Tagging with XLM-RoBERTa") -st.sidebar.write("Model : XLM-RoBERTa base Universal Dependencies v2.8 POS tagging: Turkish") -st.sidebar.write("For details of model: 'https://huggingface.co/wietsedv/xlm-roberta-base-ft-udpos28-tr/") -# st.sidebar.write("Please refer 'https://huggingface.co/transformers/_modules/transformers/pipelines/token_classification.html' for entity grouping with aggregation_strategy parameter.") -st.sidebar.write("For explanation of POS tags: https://universaldependencies.org/u/pos/") - -model_checkpoint = "wietsedv/xlm-roberta-base-ft-udpos28-tr" -aggregation = "simple" - -st.subheader("Select Text") -context_1 = st.text_area("Text #1", text_1, height=128) -context_2 = st.text_area("Text #2", text_2, height=128) -context_3 = st.text_area("New Text", value="", height=128) - -context = st.radio("Select Text", ("Text #1", "Text #2", "New Text")) - -if context == "Text #1": - input_text = context_1 -elif context == "Text #2": - input_text = context_2 -elif context == "New Text": - input_text = context_3 - -@st.cache(allow_output_mutation=True) -def setModel(model_checkpoint, aggregation): - model = AutoModelForTokenClassification.from_pretrained(model_checkpoint) - tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) - return pipeline('token-classification', model=model, tokenizer=tokenizer, aggregation_strategy=aggregation) - -Run_Button = st.button("Run", key=None) -if Run_Button == True: - - ner_pipeline = setModel(model_checkpoint, aggregation) - output = ner_pipeline(input_text) - - df = pd.DataFrame.from_dict(output) - - if aggregation != "none": - df.rename(index=str,columns={'entity_group':'POS Tag'},inplace=True) - else: - df.rename(index=str,columns={'entity_group':'POS Tag'},inplace=True) - - cols_to_keep = ['word','POS Tag','score','start','end'] - df_final = df[cols_to_keep] - - st.subheader("POS Tags") - st.dataframe(df_final) diff --git a/spaces/akhaliq/GPEN/sr_model/arch_util.py b/spaces/akhaliq/GPEN/sr_model/arch_util.py deleted file mode 100644 index ce5b9d92f418d3f8b5b8887a24491f65660b33f9..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/GPEN/sr_model/arch_util.py +++ /dev/null @@ -1,125 +0,0 @@ -import math -import torch -from torch import nn as nn -from torch.nn import functional as F -from torch.nn import init as init -from torch.nn.modules.batchnorm import _BatchNorm - -@torch.no_grad() -def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs): - """Initialize network weights. - - Args: - module_list (list[nn.Module] | nn.Module): Modules to be initialized. - scale (float): Scale initialized weights, especially for residual - blocks. Default: 1. - bias_fill (float): The value to fill bias. Default: 0 - kwargs (dict): Other arguments for initialization function. - """ - if not isinstance(module_list, list): - module_list = [module_list] - for module in module_list: - for m in module.modules(): - if isinstance(m, nn.Conv2d): - init.kaiming_normal_(m.weight, **kwargs) - m.weight.data *= scale - if m.bias is not None: - m.bias.data.fill_(bias_fill) - elif isinstance(m, nn.Linear): - init.kaiming_normal_(m.weight, **kwargs) - m.weight.data *= scale - if m.bias is not None: - m.bias.data.fill_(bias_fill) - elif isinstance(m, _BatchNorm): - init.constant_(m.weight, 1) - if m.bias is not None: - m.bias.data.fill_(bias_fill) - - -def make_layer(basic_block, num_basic_block, **kwarg): - """Make layers by stacking the same blocks. - - Args: - basic_block (nn.module): nn.module class for basic block. - num_basic_block (int): number of blocks. - - Returns: - nn.Sequential: Stacked blocks in nn.Sequential. - """ - layers = [] - for _ in range(num_basic_block): - layers.append(basic_block(**kwarg)) - return nn.Sequential(*layers) - - -class ResidualBlockNoBN(nn.Module): - """Residual block without BN. - - It has a style of: - ---Conv-ReLU-Conv-+- - |________________| - - Args: - num_feat (int): Channel number of intermediate features. - Default: 64. - res_scale (float): Residual scale. Default: 1. - pytorch_init (bool): If set to True, use pytorch default init, - otherwise, use default_init_weights. Default: False. - """ - - def __init__(self, num_feat=64, res_scale=1, pytorch_init=False): - super(ResidualBlockNoBN, self).__init__() - self.res_scale = res_scale - self.conv1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True) - self.conv2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True) - self.relu = nn.ReLU(inplace=True) - - if not pytorch_init: - default_init_weights([self.conv1, self.conv2], 0.1) - - def forward(self, x): - identity = x - out = self.conv2(self.relu(self.conv1(x))) - return identity + out * self.res_scale - - -class Upsample(nn.Sequential): - """Upsample module. - - Args: - scale (int): Scale factor. Supported scales: 2^n and 3. - num_feat (int): Channel number of intermediate features. - """ - - def __init__(self, scale, num_feat): - m = [] - if (scale & (scale - 1)) == 0: # scale = 2^n - for _ in range(int(math.log(scale, 2))): - m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1)) - m.append(nn.PixelShuffle(2)) - elif scale == 3: - m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1)) - m.append(nn.PixelShuffle(3)) - else: - raise ValueError(f'scale {scale} is not supported. ' - 'Supported scales: 2^n and 3.') - super(Upsample, self).__init__(*m) - -# TODO: may write a cpp file -def pixel_unshuffle(x, scale): - """ Pixel unshuffle. - - Args: - x (Tensor): Input feature with shape (b, c, hh, hw). - scale (int): Downsample ratio. - - Returns: - Tensor: the pixel unshuffled feature. - """ - b, c, hh, hw = x.size() - out_channel = c * (scale**2) - assert hh % scale == 0 and hw % scale == 0 - h = hh // scale - w = hw // scale - x_view = x.view(b, c, h, scale, w, scale) - return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w) \ No newline at end of file diff --git a/spaces/akhaliq/stylegan3_clip/metrics/__init__.py b/spaces/akhaliq/stylegan3_clip/metrics/__init__.py deleted file mode 100644 index 8dd34882519598c472f1224cfe68c9ff6952ce69..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/stylegan3_clip/metrics/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -# empty diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/network/utils.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/network/utils.py deleted file mode 100644 index 094cf1b4a97378c669f3440566e532fa8ef4535c..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/network/utils.py +++ /dev/null @@ -1,96 +0,0 @@ -from typing import Dict, Iterator - -from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response - -from pip._internal.exceptions import NetworkConnectionError - -# The following comments and HTTP headers were originally added by -# Donald Stufft in git commit 22c562429a61bb77172039e480873fb239dd8c03. -# -# We use Accept-Encoding: identity here because requests defaults to -# accepting compressed responses. This breaks in a variety of ways -# depending on how the server is configured. -# - Some servers will notice that the file isn't a compressible file -# and will leave the file alone and with an empty Content-Encoding -# - Some servers will notice that the file is already compressed and -# will leave the file alone, adding a Content-Encoding: gzip header -# - Some servers won't notice anything at all and will take a file -# that's already been compressed and compress it again, and set -# the Content-Encoding: gzip header -# By setting this to request only the identity encoding we're hoping -# to eliminate the third case. Hopefully there does not exist a server -# which when given a file will notice it is already compressed and that -# you're not asking for a compressed file and will then decompress it -# before sending because if that's the case I don't think it'll ever be -# possible to make this work. -HEADERS: Dict[str, str] = {"Accept-Encoding": "identity"} - - -def raise_for_status(resp: Response) -> None: - http_error_msg = "" - if isinstance(resp.reason, bytes): - # We attempt to decode utf-8 first because some servers - # choose to localize their reason strings. If the string - # isn't utf-8, we fall back to iso-8859-1 for all other - # encodings. - try: - reason = resp.reason.decode("utf-8") - except UnicodeDecodeError: - reason = resp.reason.decode("iso-8859-1") - else: - reason = resp.reason - - if 400 <= resp.status_code < 500: - http_error_msg = ( - f"{resp.status_code} Client Error: {reason} for url: {resp.url}" - ) - - elif 500 <= resp.status_code < 600: - http_error_msg = ( - f"{resp.status_code} Server Error: {reason} for url: {resp.url}" - ) - - if http_error_msg: - raise NetworkConnectionError(http_error_msg, response=resp) - - -def response_chunks( - response: Response, chunk_size: int = CONTENT_CHUNK_SIZE -) -> Iterator[bytes]: - """Given a requests Response, provide the data chunks.""" - try: - # Special case for urllib3. - for chunk in response.raw.stream( - chunk_size, - # We use decode_content=False here because we don't - # want urllib3 to mess with the raw bytes we get - # from the server. If we decompress inside of - # urllib3 then we cannot verify the checksum - # because the checksum will be of the compressed - # file. This breakage will only occur if the - # server adds a Content-Encoding header, which - # depends on how the server was configured: - # - Some servers will notice that the file isn't a - # compressible file and will leave the file alone - # and with an empty Content-Encoding - # - Some servers will notice that the file is - # already compressed and will leave the file - # alone and will add a Content-Encoding: gzip - # header - # - Some servers won't notice anything at all and - # will take a file that's already been compressed - # and compress it again and set the - # Content-Encoding: gzip header - # - # By setting this not to decode automatically we - # hope to eliminate problems with the second case. - decode_content=False, - ): - yield chunk - except AttributeError: - # Standard file-like object. - while True: - chunk = response.raw.read(chunk_size) - if not chunk: - break - yield chunk diff --git a/spaces/aliabd/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/docs/infinibatch/torch/index.html b/spaces/aliabd/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/docs/infinibatch/torch/index.html deleted file mode 100644 index 6468d9bc5da8da7fad63dee970ec8b1339134a10..0000000000000000000000000000000000000000 --- a/spaces/aliabd/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/docs/infinibatch/torch/index.html +++ /dev/null @@ -1,65 +0,0 @@ - - - - - - -infinibatch.torch API documentation - - - - - - - - - -
    - - -
    - - - - - \ No newline at end of file diff --git a/spaces/alistairmcleay/cambridge-masters-project/scripts/UBAR_code/preprocess2.1.py b/spaces/alistairmcleay/cambridge-masters-project/scripts/UBAR_code/preprocess2.1.py deleted file mode 100644 index 8fa75f9c338269130f3d97265dba0ca0f6d6cf13..0000000000000000000000000000000000000000 --- a/spaces/alistairmcleay/cambridge-masters-project/scripts/UBAR_code/preprocess2.1.py +++ /dev/null @@ -1,585 +0,0 @@ -import copy -import json -import os -import re -import zipfile -from collections import OrderedDict - -import spacy -from tqdm import tqdm - -from crazyneuraluser.UBAR_code import ontology, utils -from crazyneuraluser.UBAR_code.clean_dataset import clean_slot_values, clean_text -from crazyneuraluser.UBAR_code.config import global_config as cfg -from crazyneuraluser.UBAR_code.db_ops import MultiWozDB - - -def get_db_values( - value_set_path, -): # value_set.json, all the domain[slot] values in datasets - processed = {} - bspn_word = [] - nlp = spacy.load("en_core_web_sm") - - with open(value_set_path, "r") as f: # read value set file in lower - value_set = json.loads(f.read().lower()) - - with open("db/ontology.json", "r") as f: # read ontology in lower, all the domain-slot values - otlg = json.loads(f.read().lower()) - - for ( - domain, - slots, - ) in value_set.items(): # add all informable slots to bspn_word, create lists holder for values - processed[domain] = {} - bspn_word.append("[" + domain + "]") - for slot, values in slots.items(): - s_p = ontology.normlize_slot_names.get(slot, slot) - if s_p in ontology.informable_slots[domain]: - bspn_word.append(s_p) - processed[domain][s_p] = [] - - for ( - domain, - slots, - ) in value_set.items(): # add all words of values of informable slots to bspn_word - for slot, values in slots.items(): - s_p = ontology.normlize_slot_names.get(slot, slot) - if s_p in ontology.informable_slots[domain]: - for v in values: - _, v_p = clean_slot_values(domain, slot, v) - v_p = " ".join([token.text for token in nlp(v_p)]).strip() - processed[domain][s_p].append(v_p) - for x in v_p.split(): - if x not in bspn_word: - bspn_word.append(x) - - for domain_slot, values in otlg.items(): # split domain-slots to domains and slots - domain, slot = domain_slot.split("-") - if domain == "bus": - domain = "taxi" - if slot == "price range": - slot = "pricerange" - if slot == "book stay": - slot = "stay" - if slot == "book day": - slot = "day" - if slot == "book people": - slot = "people" - if slot == "book time": - slot = "time" - if slot == "arrive by": - slot = "arrive" - if slot == "leave at": - slot = "leave" - if slot == "leaveat": - slot = "leave" - if slot not in processed[domain]: # add all slots and words of values if not already in processed and bspn_word - processed[domain][slot] = [] - bspn_word.append(slot) - for v in values: - _, v_p = clean_slot_values(domain, slot, v) - v_p = " ".join([token.text for token in nlp(v_p)]).strip() - if v_p not in processed[domain][slot]: - processed[domain][slot].append(v_p) - for x in v_p.split(): - if x not in bspn_word: - bspn_word.append(x) - - with open(value_set_path.replace(".json", "_processed.json"), "w") as f: - json.dump(processed, f, indent=2) # save processed.json - with open("data/preprocessed/UBAR/multi-woz-processed/bspn_word_collection.json", "w") as f: - json.dump(bspn_word, f, indent=2) # save bspn_word - - print("DB value set processed! ") - - -def preprocess_db(db_paths): # apply clean_slot_values to all dbs - dbs = {} - nlp = spacy.load("en_core_web_sm") - for domain in ontology.all_domains: - with open(db_paths[domain], "r") as f: # for every db_domain, read json file - dbs[domain] = json.loads(f.read().lower()) - for idx, entry in enumerate(dbs[domain]): # entry has information about slots of said domain - new_entry = copy.deepcopy(entry) - for key, value in entry.items(): # key = slot - if type(value) is not str: - continue - del new_entry[key] - key, value = clean_slot_values(domain, key, value) - tokenize_and_back = " ".join([token.text for token in nlp(value)]).strip() - new_entry[key] = tokenize_and_back - dbs[domain][idx] = new_entry - with open(db_paths[domain].replace(".json", "_processed.json"), "w") as f: - json.dump(dbs[domain], f, indent=2) - print("[%s] DB processed! " % domain) - - -# 2.1 -class DataPreprocessor(object): - def __init__(self): - self.nlp = spacy.load("en_core_web_sm") - self.db = MultiWozDB(cfg.dbs) # load all processed dbs - # data_path = 'data/multi-woz/annotated_user_da_with_span_full.json' - data_path = "data/raw/UBAR/MultiWOZ_2.1/data.json" - archive = zipfile.ZipFile(data_path + ".zip", "r") - self.convlab_data = json.loads(archive.open(data_path.split("/")[-1], "r").read().lower()) - # self.delex_sg_valdict_path = 'data/multi-woz-processed/delex_single_valdict.json' - # self.delex_mt_valdict_path = 'data/multi-woz-processed/delex_multi_valdict.json' - # self.ambiguous_val_path = 'data/multi-woz-processed/ambiguous_values.json' - # self.delex_refs_path = 'data/multi-woz-processed/reference_no.json' - self.delex_sg_valdict_path = "data/preprocessed/UBAR/multi-woz-2.1-processed/delex_single_valdict.json" - self.delex_mt_valdict_path = "data/preprocessed/UBAR/multi-woz-2.1-processed/delex_multi_valdict.json" - self.ambiguous_val_path = "data/preprocessed/UBAR/multi-woz-2.1-processed/ambiguous_values.json" - self.delex_refs_path = "data/preprocessed/UBAR/multi-woz-2.1-processed/reference_no.json" - self.delex_refs = json.loads(open(self.delex_refs_path, "r").read()) - if not os.path.exists(self.delex_sg_valdict_path): - ( - self.delex_sg_valdict, - self.delex_mt_valdict, - self.ambiguous_vals, - ) = self.get_delex_valdict() - else: - self.delex_sg_valdict = json.loads(open(self.delex_sg_valdict_path, "r").read()) - self.delex_mt_valdict = json.loads(open(self.delex_mt_valdict_path, "r").read()) - self.ambiguous_vals = json.loads(open(self.ambiguous_val_path, "r").read()) - - self.vocab = utils.Vocab(cfg.vocab_size) - - def delex_by_annotation(self, dial_turn): - # add by yyy in 13:48 0803 - u = dial_turn["text"].split() - # u = my_clean_text(dial_turn['text']).split() - ## - span = dial_turn["span_info"] - for s in span: - slot = s[1] - if slot == "open": - continue - if ontology.da_abbr_to_slot_name.get(slot): - slot = ontology.da_abbr_to_slot_name[slot] - for idx in range(s[3], s[4] + 1): - u[idx] = "" - try: - u[s[3]] = "[value_" + slot + "]" - except Exception: - u[5] = "[value_" + slot + "]" - u_delex = " ".join([t for t in u if t != ""]) - u_delex = u_delex.replace("[value_address] , [value_address] , [value_address]", "[value_address]") - u_delex = u_delex.replace("[value_address] , [value_address]", "[value_address]") - u_delex = u_delex.replace("[value_name] [value_name]", "[value_name]") - u_delex = u_delex.replace("[value_name]([value_phone] )", "[value_name] ( [value_phone] )") - return u_delex - - def delex_by_valdict(self, text): - text = clean_text(text) - - text = re.sub(r"\d{5}\s?\d{5,7}", "[value_phone]", text) - text = re.sub(r"\d[\s-]stars?", "[value_stars]", text) - text = re.sub(r"\$\d+|\$?\d+.?(\d+)?\s(pounds?|gbps?)", "[value_price]", text) - text = re.sub(r"tr[\d]{4}", "[value_id]", text) - text = re.sub( - r"([a-z]{1}[\. ]?[a-z]{1}[\. ]?\d{1,2}[, ]+\d{1}[\. ]?[a-z]{1}[\. ]?[a-z]{1}|[a-z]{2}\d{2}[a-z]{2})", - "[value_postcode]", - text, - ) - - for value, slot in self.delex_mt_valdict.items(): - text = text.replace(value, "[value_%s]" % slot) - - for value, slot in self.delex_sg_valdict.items(): - tokens = text.split() - for idx, tk in enumerate(tokens): - if tk == value: - tokens[idx] = "[value_%s]" % slot - text = " ".join(tokens) - - for ambg_ent in self.ambiguous_vals: - start_idx = text.find(" " + ambg_ent) # ely is a place, but appears in words like moderately - if start_idx == -1: - continue - front_words = text[:start_idx].split() - ent_type = "time" if ":" in ambg_ent else "place" - - for fw in front_words[::-1]: - if fw in [ - "arrive", - "arrives", - "arrived", - "arriving", - "arrival", - "destination", - "there", - "reach", - "to", - "by", - "before", - ]: - slot = "[value_arrive]" if ent_type == "time" else "[value_destination]" - text = re.sub(" " + ambg_ent, " " + slot, text) - elif fw in [ - "leave", - "leaves", - "leaving", - "depart", - "departs", - "departing", - "departure", - "from", - "after", - "pulls", - ]: - slot = "[value_leave]" if ent_type == "time" else "[value_departure]" - text = re.sub(" " + ambg_ent, " " + slot, text) - - text = text.replace("[value_car] [value_car]", "[value_car]") - return text - - def get_delex_valdict( - self, - ): - skip_entry_type = { - "taxi": ["taxi_phone"], - "police": ["id"], - "hospital": ["id"], - "hotel": [ - "id", - "location", - "internet", - "parking", - "takesbookings", - "stars", - "price", - "n", - "postcode", - "phone", - ], - "attraction": [ - "id", - "location", - "pricerange", - "price", - "openhours", - "postcode", - "phone", - ], - "train": ["price", "id"], - "restaurant": [ - "id", - "location", - "introduction", - "signature", - "type", - "postcode", - "phone", - ], - } - entity_value_to_slot = {} - ambiguous_entities = [] - for domain, db_data in self.db.dbs.items(): - print("Processing entity values in [%s]" % domain) - if domain != "taxi": - for db_entry in db_data: - for slot, value in db_entry.items(): - if slot not in skip_entry_type[domain]: - if type(value) is not str: - raise TypeError("value '%s' in domain '%s' should be rechecked" % (slot, domain)) - else: - slot, value = clean_slot_values(domain, slot, value) - value = " ".join([token.text for token in self.nlp(value)]).strip() - if value in entity_value_to_slot and entity_value_to_slot[value] != slot: - # print(value, ": ",entity_value_to_slot[value], slot) - ambiguous_entities.append(value) - entity_value_to_slot[value] = slot - else: # taxi db specific - db_entry = db_data[0] - for slot, ent_list in db_entry.items(): - if slot not in skip_entry_type[domain]: - for ent in ent_list: - entity_value_to_slot[ent] = "car" - ambiguous_entities = set(ambiguous_entities) - ambiguous_entities.remove("cambridge") - ambiguous_entities = list(ambiguous_entities) - for amb_ent in ambiguous_entities: # departure or destination? arrive time or leave time? - entity_value_to_slot.pop(amb_ent) - entity_value_to_slot["parkside"] = "address" - entity_value_to_slot["parkside, cambridge"] = "address" - entity_value_to_slot["cambridge belfry"] = "name" - entity_value_to_slot["hills road"] = "address" - entity_value_to_slot["hills rd"] = "address" - entity_value_to_slot["Parkside Police Station"] = "name" - - single_token_values = {} - multi_token_values = {} - for val, slt in entity_value_to_slot.items(): - if val in ["cambridge"]: - continue - if len(val.split()) > 1: - multi_token_values[val] = slt - else: - single_token_values[val] = slt - - with open(self.delex_sg_valdict_path, "w") as f: - single_token_values = OrderedDict( - sorted(single_token_values.items(), key=lambda kv: len(kv[0]), reverse=True) - ) - json.dump(single_token_values, f, indent=2) - print("single delex value dict saved!") - with open(self.delex_mt_valdict_path, "w") as f: - multi_token_values = OrderedDict( - sorted(multi_token_values.items(), key=lambda kv: len(kv[0]), reverse=True) - ) - json.dump(multi_token_values, f, indent=2) - print("multi delex value dict saved!") - with open(self.ambiguous_val_path, "w") as f: - json.dump(ambiguous_entities, f, indent=2) - print("ambiguous value dict saved!") - - return single_token_values, multi_token_values, ambiguous_entities - - def preprocess_main(self, save_path=None, is_test=False): - """ """ - data = {} - count = 0 - self.unique_da = {} - ordered_sysact_dict = {} - # yyy - for fn, raw_dial in tqdm(list(self.convlab_data.items())): - if fn in [ - "pmul4707.json", - "pmul2245.json", - "pmul4776.json", - "pmul3872.json", - "pmul4859.json", - ]: - continue - count += 1 - # if count == 100: - # break - - compressed_goal = {} # for every dialog, keep track the goal, domains, requests - dial_domains, dial_reqs = [], [] - for dom, g in raw_dial["goal"].items(): - if dom != "topic" and dom != "message" and g: - if g.get("reqt"): # request info. eg. postcode/address/phone - for i, req_slot in enumerate(g["reqt"]): # normalize request slots - if ontology.normlize_slot_names.get(req_slot): - g["reqt"][i] = ontology.normlize_slot_names[req_slot] - dial_reqs.append(g["reqt"][i]) - compressed_goal[dom] = g - if dom in ontology.all_domains: - dial_domains.append(dom) - - dial_reqs = list(set(dial_reqs)) - - dial = {"goal": compressed_goal, "log": []} - single_turn = {} - constraint_dict = OrderedDict() - prev_constraint_dict = {} - prev_turn_domain = ["general"] - ordered_sysact_dict[fn] = {} - - for turn_num, dial_turn in enumerate(raw_dial["log"]): - # for user turn, have text - # sys turn: text, belief states(metadata), dialog_act, span_info - dial_state = dial_turn["metadata"] - dial_turn["text"] = " ".join([t.text for t in self.nlp(dial_turn["text"])]) - if not dial_state: # user - # delexicalize user utterance, either by annotation or by val_dict - u = " ".join(clean_text(dial_turn["text"]).split()) - if "span_info" in dial_turn and dial_turn["span_info"]: - u_delex = clean_text(self.delex_by_annotation(dial_turn)) - else: - u_delex = self.delex_by_valdict(dial_turn["text"]) - - single_turn["user"] = u - single_turn["user_delex"] = u_delex - - else: # system - # delexicalize system response, either by annotation or by val_dict - if "span_info" in dial_turn and dial_turn["span_info"]: - s_delex = clean_text(self.delex_by_annotation(dial_turn)) - else: - if not dial_turn["text"]: - print(fn) - s_delex = self.delex_by_valdict(dial_turn["text"]) - single_turn["resp"] = s_delex - single_turn["nodelx_resp"] = " ".join(clean_text(dial_turn["text"]).split()) - - # get belief state, semi=informable/book=requestable, put into constraint_dict - for domain in dial_domains: - if not constraint_dict.get(domain): - constraint_dict[domain] = OrderedDict() - info_sv = dial_state[domain]["semi"] - for s, v in info_sv.items(): - s, v = clean_slot_values(domain, s, v) - if len(v.split()) > 1: - v = " ".join([token.text for token in self.nlp(v)]).strip() - if v != "": - constraint_dict[domain][s] = v - book_sv = dial_state[domain]["book"] - for s, v in book_sv.items(): - if s == "booked": - continue - s, v = clean_slot_values(domain, s, v) - if len(v.split()) > 1: - v = " ".join([token.text for token in self.nlp(v)]).strip() - if v != "": - constraint_dict[domain][s] = v - - constraints = [] # list in format of [domain] slot value - cons_delex = [] - turn_dom_bs = [] - for domain, info_slots in constraint_dict.items(): - if info_slots: - constraints.append("[" + domain + "]") - cons_delex.append("[" + domain + "]") - for slot, value in info_slots.items(): - constraints.append(slot) - constraints.extend(value.split()) - cons_delex.append(slot) - if domain not in prev_constraint_dict: - turn_dom_bs.append(domain) - elif prev_constraint_dict[domain] != constraint_dict[domain]: - turn_dom_bs.append(domain) - - sys_act_dict = {} - turn_dom_da = set() - for act in dial_turn["dialog_act"]: - d, a = act.split("-") # split domain-act - turn_dom_da.add(d) - turn_dom_da = list(turn_dom_da) - if len(turn_dom_da) != 1 and "general" in turn_dom_da: - turn_dom_da.remove("general") - if len(turn_dom_da) != 1 and "booking" in turn_dom_da: - turn_dom_da.remove("booking") - - # get turn domain - turn_domain = turn_dom_bs - for dom in turn_dom_da: - if dom != "booking" and dom not in turn_domain: - turn_domain.append(dom) - if not turn_domain: - turn_domain = prev_turn_domain - if len(turn_domain) == 2 and "general" in turn_domain: - turn_domain.remove("general") - if len(turn_domain) == 2: - if len(prev_turn_domain) == 1 and prev_turn_domain[0] == turn_domain[1]: - turn_domain = turn_domain[::-1] - - # get system action - for dom in turn_domain: - sys_act_dict[dom] = {} - add_to_last_collect = [] - booking_act_map = {"inform": "offerbook", "book": "offerbooked"} - for act, params in dial_turn["dialog_act"].items(): - if act == "general-greet": - continue - d, a = act.split("-") - if d == "general" and d not in sys_act_dict: - sys_act_dict[d] = {} - if d == "booking": - d = turn_domain[0] - a = booking_act_map.get(a, a) - add_p = [] - for param in params: - p = param[0] - if p == "none": - continue - elif ontology.da_abbr_to_slot_name.get(p): - p = ontology.da_abbr_to_slot_name[p] - if p not in add_p: - add_p.append(p) - add_to_last = True if a in ["request", "reqmore", "bye", "offerbook"] else False - if add_to_last: - add_to_last_collect.append((d, a, add_p)) - else: - sys_act_dict[d][a] = add_p - for d, a, add_p in add_to_last_collect: - sys_act_dict[d][a] = add_p - - for d in copy.copy(sys_act_dict): - acts = sys_act_dict[d] - if not acts: - del sys_act_dict[d] - if "inform" in acts and "offerbooked" in acts: - for s in sys_act_dict[d]["inform"]: - sys_act_dict[d]["offerbooked"].append(s) - del sys_act_dict[d]["inform"] - - ordered_sysact_dict[fn][len(dial["log"])] = sys_act_dict - - sys_act = [] - if "general-greet" in dial_turn["dialog_act"]: - sys_act.extend(["[general]", "[greet]"]) - for d, acts in sys_act_dict.items(): - sys_act += ["[" + d + "]"] - for a, slots in acts.items(): - self.unique_da[d + "-" + a] = 1 - sys_act += ["[" + a + "]"] - sys_act += slots - - # get db pointers - matnums = self.db.get_match_num(constraint_dict) - match_dom = turn_domain[0] if len(turn_domain) == 1 else turn_domain[1] - match = matnums[match_dom] - dbvec = self.db.addDBPointer(match_dom, match) - bkvec = self.db.addBookingPointer(dial_turn["dialog_act"]) - - single_turn["pointer"] = ",".join( - [str(d) for d in dbvec + bkvec] - ) # 4 database pointer for domains, 2 for booking - single_turn["match"] = str(match) - single_turn["constraint"] = " ".join(constraints) - single_turn["cons_delex"] = " ".join(cons_delex) - single_turn["sys_act"] = " ".join(sys_act) - single_turn["turn_num"] = len(dial["log"]) - single_turn["turn_domain"] = " ".join(["[" + d + "]" for d in turn_domain]) - - prev_turn_domain = copy.deepcopy(turn_domain) - prev_constraint_dict = copy.deepcopy(constraint_dict) - - if "user" in single_turn: - dial["log"].append(single_turn) - for t in single_turn["user"].split() + single_turn["resp"].split() + constraints + sys_act: - self.vocab.add_word(t) - for t in single_turn["user_delex"].split(): - if "[" in t and "]" in t and not t.startswith("[") and not t.endswith("]"): - single_turn["user_delex"].replace(t, t[t.index("[") : t.index("]") + 1]) - elif not self.vocab.has_word(t): - self.vocab.add_word(t) - - single_turn = {} - - data[fn] = dial - # pprint(dial) - # if count == 20: - # break - self.vocab.construct() - self.vocab.save_vocab("data/preprocessed/UBAR/multi-woz-2.1-processed/vocab") - with open("data/interim/multi-woz-2.1-analysis/dialog_acts.json", "w") as f: - json.dump(ordered_sysact_dict, f, indent=2) - with open("data/interim/multi-woz-2.1-analysis/dialog_act_type.json", "w") as f: - json.dump(self.unique_da, f, indent=2) - return data - - -if __name__ == "__main__": - db_paths = { - "attraction": "db/raw/attraction_db.json", - "hospital": "db/raw/hospital_db.json", - "hotel": "db/raw/hotel_db.json", - "police": "db/raw/police_db.json", - "restaurant": "db/raw/restaurant_db.json", - "taxi": "db/raw/taxi_db.json", - "train": "db/raw/train_db.json", - } - # get_db_values('db/value_set.json') # - # preprocess_db(db_paths) - if not os.path.exists("data/preprocessed/UBAR/multi-woz-2.1-processed"): - os.mkdir("data/preprocessed/UBAR/multi-woz-2.1-processed") - dh = DataPreprocessor() - data = dh.preprocess_main() - - with open("data/preprocessed/UBAR/multi-woz-2.1-processed/data_for_ubar.json", "w") as f: - json.dump(data, f, indent=2) diff --git a/spaces/anaclaudia13ct/insect_detection/utils/loggers/wandb/log_dataset.py b/spaces/anaclaudia13ct/insect_detection/utils/loggers/wandb/log_dataset.py deleted file mode 100644 index 06e81fb693072c99703e5c52b169892b7fd9a8cc..0000000000000000000000000000000000000000 --- a/spaces/anaclaudia13ct/insect_detection/utils/loggers/wandb/log_dataset.py +++ /dev/null @@ -1,27 +0,0 @@ -import argparse - -from wandb_utils import WandbLogger - -from utils.general import LOGGER - -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def create_dataset_artifact(opt): - logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused - if not logger.wandb: - LOGGER.info("install wandb using `pip install wandb` to log the dataset") - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') - parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') - parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') - parser.add_argument('--entity', default=None, help='W&B entity') - parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') - - opt = parser.parse_args() - opt.resume = False # Explicitly disallow resume check for dataset upload job - - create_dataset_artifact(opt) diff --git a/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/g4f/Provider/Providers/DeepAi.py b/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/g4f/Provider/Providers/DeepAi.py deleted file mode 100644 index 02b08120ec8ef50c91c9237047a4f36c822a7bfc..0000000000000000000000000000000000000000 --- a/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/g4f/Provider/Providers/DeepAi.py +++ /dev/null @@ -1,46 +0,0 @@ -import os -import json -import random -import hashlib -import requests - -from ...typing import sha256, Dict, get_type_hints - -url = 'https://deepai.org' -model = ['gpt-3.5-turbo'] -supports_stream = True -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - def md5(text: str) -> str: - return hashlib.md5(text.encode()).hexdigest()[::-1] - - - def get_api_key(user_agent: str) -> str: - part1 = str(random.randint(0, 10**11)) - part2 = md5(user_agent + md5(user_agent + md5(user_agent + part1 + "x"))) - - return f"tryit-{part1}-{part2}" - - user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36' - - headers = { - "api-key": get_api_key(user_agent), - "user-agent": user_agent - } - - files = { - "chat_style": (None, "chat"), - "chatHistory": (None, json.dumps(messages)) - } - - r = requests.post("https://api.deepai.org/chat_response", headers=headers, files=files, stream=True) - - for chunk in r.iter_content(chunk_size=None): - r.raise_for_status() - yield chunk.decode() - - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join( - [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) diff --git a/spaces/anubhavmaity/bike-classification/app.py b/spaces/anubhavmaity/bike-classification/app.py deleted file mode 100644 index 4d61fc2ce1ee6aaf5b02776585903291b2baf42f..0000000000000000000000000000000000000000 --- a/spaces/anubhavmaity/bike-classification/app.py +++ /dev/null @@ -1,26 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: ../app.ipynb. - -# %% auto 0 -__all__ = ['learn', 'categories', 'image', 'label', 'examples', 'intf', 'classify_image'] - -# %% ../app.ipynb 4 -from fastai.vision.all import * -import gradio as gr - -# %% ../app.ipynb 6 -learn = load_learner('export.pkl') - -# %% ../app.ipynb 8 -categories = ('BMX', 'mountain bike', 'road bike') - -def classify_image(img): - pred, idx, probs = learn.predict(img) - return dict(zip(categories, map(float, probs))) - -# %% ../app.ipynb 14 -image = gr.inputs.Image(shape=(512, 512)) -label = gr.outputs.Label() -examples = ['road_bike.jpg', 'mountain_bike.jpg', 'BMX.jpg'] - -intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples) -intf.launch(inline=False) diff --git a/spaces/anzorq/hf-spaces-semantic-search/pages/index.js b/spaces/anzorq/hf-spaces-semantic-search/pages/index.js deleted file mode 100644 index 23fe8a7516489788edeb2c1070b37606bf85c2e3..0000000000000000000000000000000000000000 --- a/spaces/anzorq/hf-spaces-semantic-search/pages/index.js +++ /dev/null @@ -1,155 +0,0 @@ -import { Inter } from 'next/font/google'; -import SearchBar from '@/components/searchBar'; -import Card from '@/components/card'; -import { predict } from '@/pages/api/api_hf'; -import { get_space_info } from '@/pages/api/hf_space'; -import { useState, useEffect } from 'react'; - -const inter = Inter({ subsets: ['latin'] }); - -export default function Home() { - const [spaceInfo, setSpaceInfo] = useState(null); - const [sortedSpaceInfo, setSortedSpaceInfo] = useState(null); - const [searchResults, setSearchResults] = useState([]); - const [isLoading, setIsLoading] = useState(false); - const [sortBy, setSortBy] = useState('relevance'); - const [onlyRunning, setOnlyRunning] = useState(false); - - useEffect(() => { - if (searchResults.length > 0) { - fetchSpaceInfo(searchResults); - } else { - setSpaceInfo(null); - } - - async function fetchSpaceInfo(results) { - setIsLoading(true); - const spaceData = await Promise.all( - results.map(async ([id, description]) => { - const space = await get_space_info(id); - return space ? { ...space, description } : null; - }) - ); - setSpaceInfo(spaceData); - setIsLoading(false); - document.querySelector('.search-bar').scrollIntoView({ - behavior: 'smooth', - block: 'start', - }); - } - }, [searchResults]); - - useEffect(() => { - if (spaceInfo) { - setSortedSpaceInfo(filterResults(sortResults(spaceInfo, sortBy))); - } - }, [spaceInfo, sortBy, onlyRunning]); - - useEffect(() => { - document.querySelector('.search-bar')?.focus(); - }, []); - - async function onSearch(query) { - setIsLoading(true); - setSortBy('relevance'); - setSearchResults(query ? await predict(query, 24) : []); - } - - function sortResults(results, sortBy) { - return sortBy === 'likes' ? [...results].sort((a, b) => b.likes - a.likes) : results; - } - - function filterResults(results) { - return onlyRunning ? results.filter((space) => space.runtime_stage === 'RUNNING') : results; - } - - function toggleOnlyRunning() { - setOnlyRunning(!onlyRunning); - } - - const renderSortButtons = () => ( - <> - {['relevance', 'likes'].map((option) => ( -
    - -
    - ))} - - ); - - const renderCards = () => - sortedSpaceInfo.map( - (space, index) => - space && ( - - ) - ); - - return ( -
    -

    🤗 Hugging Face Spaces

    - - {isLoading ? ( -
    -
    -
    - ) : ( - sortedSpaceInfo && ( - <> -
    - - Sort by: - {renderSortButtons()} -
    -
    - {renderCards()} -
    - - ) - )} - -
    - ); -} \ No newline at end of file diff --git a/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/image_sharpening.py b/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/image_sharpening.py deleted file mode 100644 index 6d12b5d5bffa496c245a09d823cbaa9989c52435..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/image_sharpening.py +++ /dev/null @@ -1,22 +0,0 @@ -import cv2 -import numpy as np - -def unsharp_mask(img, kernel_size=(5, 5), sigma=1.0, amount=1.0, threshold=0, mask=None): - if amount == 0: - return img - # Return a sharpened version of the image, using an unsharp mask. - # If mask is not None, only areas under mask are handled - blurred = cv2.GaussianBlur(img, kernel_size, sigma) - sharpened = float(amount + 1) * img - float(amount) * blurred - sharpened = np.maximum(sharpened, np.zeros(sharpened.shape)) - sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape)) - sharpened = sharpened.round().astype(np.uint8) - if threshold > 0: - low_contrast_mask = np.absolute(img - blurred) < threshold - np.copyto(sharpened, img, where=low_contrast_mask) - if mask is not None: - mask = np.array(mask) - masked_sharpened = cv2.bitwise_and(sharpened, sharpened, mask=mask) - masked_img = cv2.bitwise_and(img, img, mask=255-mask) - sharpened = cv2.add(masked_img, masked_sharpened) - return sharpened diff --git a/spaces/aodianyun/stable-diffusion-webui/modules/scripts_postprocessing.py b/spaces/aodianyun/stable-diffusion-webui/modules/scripts_postprocessing.py deleted file mode 100644 index 68f588f664c679f2e61c10823295d38301cd9e44..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/modules/scripts_postprocessing.py +++ /dev/null @@ -1,152 +0,0 @@ -import os -import gradio as gr - -from modules import errors, shared - - -class PostprocessedImage: - def __init__(self, image): - self.image = image - self.info = {} - - -class ScriptPostprocessing: - filename = None - controls = None - args_from = None - args_to = None - - order = 1000 - """scripts will be ordred by this value in postprocessing UI""" - - name = None - """this function should return the title of the script.""" - - group = None - """A gr.Group component that has all script's UI inside it""" - - def ui(self): - """ - This function should create gradio UI elements. See https://gradio.app/docs/#components - The return value should be a dictionary that maps parameter names to components used in processing. - Values of those components will be passed to process() function. - """ - - pass - - def process(self, pp: PostprocessedImage, **args): - """ - This function is called to postprocess the image. - args contains a dictionary with all values returned by components from ui() - """ - - pass - - def image_changed(self): - pass - - - - -def wrap_call(func, filename, funcname, *args, default=None, **kwargs): - try: - res = func(*args, **kwargs) - return res - except Exception as e: - errors.display(e, f"calling {filename}/{funcname}") - - return default - - -class ScriptPostprocessingRunner: - def __init__(self): - self.scripts = None - self.ui_created = False - - def initialize_scripts(self, scripts_data): - self.scripts = [] - - for script_class, path, basedir, script_module in scripts_data: - script: ScriptPostprocessing = script_class() - script.filename = path - - if script.name == "Simple Upscale": - continue - - self.scripts.append(script) - - def create_script_ui(self, script, inputs): - script.args_from = len(inputs) - script.args_to = len(inputs) - - script.controls = wrap_call(script.ui, script.filename, "ui") - - for control in script.controls.values(): - control.custom_script_source = os.path.basename(script.filename) - - inputs += list(script.controls.values()) - script.args_to = len(inputs) - - def scripts_in_preferred_order(self): - if self.scripts is None: - import modules.scripts - self.initialize_scripts(modules.scripts.postprocessing_scripts_data) - - scripts_order = shared.opts.postprocessing_operation_order - - def script_score(name): - for i, possible_match in enumerate(scripts_order): - if possible_match == name: - return i - - return len(self.scripts) - - script_scores = {script.name: (script_score(script.name), script.order, script.name, original_index) for original_index, script in enumerate(self.scripts)} - - return sorted(self.scripts, key=lambda x: script_scores[x.name]) - - def setup_ui(self): - inputs = [] - - for script in self.scripts_in_preferred_order(): - with gr.Box() as group: - self.create_script_ui(script, inputs) - - script.group = group - - self.ui_created = True - return inputs - - def run(self, pp: PostprocessedImage, args): - for script in self.scripts_in_preferred_order(): - shared.state.job = script.name - - script_args = args[script.args_from:script.args_to] - - process_args = {} - for (name, component), value in zip(script.controls.items(), script_args): - process_args[name] = value - - script.process(pp, **process_args) - - def create_args_for_run(self, scripts_args): - if not self.ui_created: - with gr.Blocks(analytics_enabled=False): - self.setup_ui() - - scripts = self.scripts_in_preferred_order() - args = [None] * max([x.args_to for x in scripts]) - - for script in scripts: - script_args_dict = scripts_args.get(script.name, None) - if script_args_dict is not None: - - for i, name in enumerate(script.controls): - args[script.args_from + i] = script_args_dict.get(name, None) - - return args - - def image_changed(self): - for script in self.scripts_in_preferred_order(): - script.image_changed() - diff --git a/spaces/arbml/Ashaar/poetry_diacritizer/diacritizer.py b/spaces/arbml/Ashaar/poetry_diacritizer/diacritizer.py deleted file mode 100644 index 63fc3ed940a81dc560d68781dd4d73357cfc6350..0000000000000000000000000000000000000000 --- a/spaces/arbml/Ashaar/poetry_diacritizer/diacritizer.py +++ /dev/null @@ -1,98 +0,0 @@ -from typing import Dict -import torch -from .config_manager import ConfigManager - - -class Diacritizer: - def __init__( - self, config_path: str, model_kind: str, load_model: bool = False - ) -> None: - self.config_path = config_path - self.model_kind = model_kind - self.config_manager = ConfigManager( - config_path=config_path, model_kind=model_kind - ) - self.config = self.config_manager.config - self.text_encoder = self.config_manager.text_encoder - if self.config.get("device"): - self.device = self.config["device"] - else: - self.device = "cuda" if torch.cuda.is_available() else "cpu" - - if load_model: - self.model, self.global_step = self.config_manager.load_model() - self.model = self.model.to(self.device) - - self.start_symbol_id = self.text_encoder.start_symbol_id - - def set_model(self, model: torch.nn.Module): - self.model = model - - def diacritize_text(self, text: str): - seq = self.text_encoder.input_to_sequence(text) - output = self.diacritize_batch(torch.LongTensor([seq]).to(self.device)) - - def diacritize_batch(self, batch): - raise NotImplementedError() - - def diacritize_iterators(self, iterator): - pass - - -class CBHGDiacritizer(Diacritizer): - def diacritize_batch(self, batch): - self.model.eval() - inputs = batch["src"] - lengths = batch["lengths"] - outputs = self.model(inputs.to(self.device), lengths.to("cpu")) - diacritics = outputs["diacritics"] - predictions = torch.max(diacritics, 2).indices - sentences = [] - - for src, prediction in zip(inputs, predictions): - sentence = self.text_encoder.combine_text_and_haraqat( - list(src.detach().cpu().numpy()), - list(prediction.detach().cpu().numpy()), - ) - sentences.append(sentence) - - return sentences - - -class Seq2SeqDiacritizer(Diacritizer): - def diacritize_batch(self, batch): - self.model.eval() - inputs = batch["src"] - lengths = batch["lengths"] - outputs = self.model(inputs.to(self.device), lengths.to("cpu")) - diacritics = outputs["diacritics"] - predictions = torch.max(diacritics, 2).indices - sentences = [] - - for src, prediction in zip(inputs, predictions): - sentence = self.text_encoder.combine_text_and_haraqat( - list(src.detach().cpu().numpy()), - list(prediction.detach().cpu().numpy()), - ) - sentences.append(sentence) - - return sentences - -class GPTDiacritizer(Diacritizer): - def diacritize_batch(self, batch): - self.model.eval() - inputs = batch["src"] - lengths = batch["lengths"] - outputs = self.model(inputs.to(self.device), lengths.to("cpu")) - diacritics = outputs["diacritics"] - predictions = torch.max(diacritics, 2).indices - sentences = [] - - for src, prediction in zip(inputs, predictions): - sentence = self.text_encoder.combine_text_and_haraqat( - list(src.detach().cpu().numpy()), - list(prediction.detach().cpu().numpy()), - ) - sentences.append(sentence) - - return sentences diff --git a/spaces/artificialguybr/video-dubbing/TTS/tests/api_tests/__init__.py b/spaces/artificialguybr/video-dubbing/TTS/tests/api_tests/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/asciicorp/Legal-ai/ingestdocs.py b/spaces/asciicorp/Legal-ai/ingestdocs.py deleted file mode 100644 index 236e5620ce419daa8ca8f5b27175e7e1dfb6f9b3..0000000000000000000000000000000000000000 --- a/spaces/asciicorp/Legal-ai/ingestdocs.py +++ /dev/null @@ -1,45 +0,0 @@ -import pickle -from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter -from langchain.vectorstores.faiss import FAISS -from langchain.embeddings import OpenAIEmbeddings -from langchain.document_loaders import TextLoader, DirectoryLoader, PyPDFLoader -import shutil -import os - -def ingest_docs(text_splitter_cls, chunk_size=None, chunk_overlap=None): - loader = DirectoryLoader('docs', glob="*.pdf", loader_cls=PyPDFLoader) - docs = loader.load() - if text_splitter_cls == RecursiveCharacterTextSplitter: - text_splitter = RecursiveCharacterTextSplitter() - elif text_splitter_cls == CharacterTextSplitter: - text_splitter = CharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) - else: - raise ValueError("Invalid text splitter class") - documents = text_splitter.split_documents(docs) - embeddings = OpenAIEmbeddings() - vectorstore = FAISS.from_documents(documents, embeddings) - with open("vectorstore.pkl", "wb") as f: - pickle.dump(vectorstore, f) - -def ingest_new_docs(text_splitter_cls, chunk_size=None, chunk_overlap=None): - with open("vectorstore.pkl", "rb") as f: - vectorstore = pickle.load(f) - loader = DirectoryLoader('new_docs', glob="*.pdf", loader_cls=PyPDFLoader) - new_docs = loader.load() - - if text_splitter_cls == RecursiveCharacterTextSplitter: - text_splitter = RecursiveCharacterTextSplitter() - elif text_splitter_cls == CharacterTextSplitter: - text_splitter = CharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) - else: - raise ValueError("Invalid text splitter class") - new_documents = text_splitter.split_documents(new_docs) - vectorstore.add_documents(new_documents) - - with open("vectorstore.pkl", "wb") as f: - pickle.dump(vectorstore, f) - - # Move files from new_docs to docs directory - for filename in os.listdir("new_docs"): - if filename.endswith(".pdf"): - shutil.move(os.path.join("new_docs", filename), os.path.join("docs", filename)) \ No newline at end of file diff --git a/spaces/asd998877/TsGpt/modules/shared.py b/spaces/asd998877/TsGpt/modules/shared.py deleted file mode 100644 index a9e72580aa7ae48f907e923a09099513570a9ad8..0000000000000000000000000000000000000000 --- a/spaces/asd998877/TsGpt/modules/shared.py +++ /dev/null @@ -1,55 +0,0 @@ -from modules.presets import COMPLETION_URL, BALANCE_API_URL, USAGE_API_URL, API_HOST -import os -import queue - -class State: - interrupted = False - multi_api_key = False - completion_url = COMPLETION_URL - balance_api_url = BALANCE_API_URL - usage_api_url = USAGE_API_URL - - def interrupt(self): - self.interrupted = True - - def recover(self): - self.interrupted = False - - def set_api_host(self, api_host): - self.completion_url = f"https://{api_host}/v1/chat/completions" - self.balance_api_url = f"https://{api_host}/dashboard/billing/credit_grants" - self.usage_api_url = f"https://{api_host}/dashboard/billing/usage" - os.environ["OPENAI_API_BASE"] = f"https://{api_host}/v1" - - def reset_api_host(self): - self.completion_url = COMPLETION_URL - self.balance_api_url = BALANCE_API_URL - self.usage_api_url = USAGE_API_URL - os.environ["OPENAI_API_BASE"] = f"https://{API_HOST}/v1" - return API_HOST - - def reset_all(self): - self.interrupted = False - self.completion_url = COMPLETION_URL - - def set_api_key_queue(self, api_key_list): - self.multi_api_key = True - self.api_key_queue = queue.Queue() - for api_key in api_key_list: - self.api_key_queue.put(api_key) - - def switching_api_key(self, func): - if not hasattr(self, "api_key_queue"): - return func - - def wrapped(*args, **kwargs): - api_key = self.api_key_queue.get() - args[0].api_key = api_key - ret = func(*args, **kwargs) - self.api_key_queue.put(api_key) - return ret - - return wrapped - - -state = State() diff --git a/spaces/aseuteurideu/audio_deepfake_detector/data/dfdt_dataset.py b/spaces/aseuteurideu/audio_deepfake_detector/data/dfdt_dataset.py deleted file mode 100644 index f43889c970cc589ab42efaa0b252d650d666ecce..0000000000000000000000000000000000000000 --- a/spaces/aseuteurideu/audio_deepfake_detector/data/dfdt_dataset.py +++ /dev/null @@ -1,130 +0,0 @@ -'''Module for loading the fakeavceleb dataset from tfrecord format''' -import numpy as np -import tensorflow as tf -from data.augmentation_utils import create_frame_transforms, create_spec_transforms - -FEATURE_DESCRIPTION = { - 'video_path': tf.io.FixedLenFeature([], tf.string), - 'image/encoded': tf.io.FixedLenFeature([], tf.string), - 'clip/label/index': tf.io.FixedLenFeature([], tf.int64), - 'clip/label/text': tf.io.FixedLenFeature([], tf.string), - 'WAVEFORM/feature/floats': tf.io.FixedLenFeature([], tf.string) -} - -@tf.function -def _parse_function(example_proto): - - #Parse the input `tf.train.Example` proto using the dictionary above. - example = tf.io.parse_single_example(example_proto, FEATURE_DESCRIPTION) - - video_path = example['video_path'] - video = tf.io.decode_raw(example['image/encoded'], tf.int8) - spectrogram = tf.io.decode_raw(example['WAVEFORM/feature/floats'], tf.float32) - - label = example["clip/label/text"] - label_map = example["clip/label/index"] - - return video, spectrogram, label_map - -@tf.function -def decode_inputs(video, spectrogram, label_map): - '''Decode tensors to arrays with desired shape''' - frame = tf.reshape(video, [10, 3, 256, 256]) - frame = frame[0] / 255 #Pick the first frame and normalize it. - # frame = tf.cast(frame, tf.float32) - - label_map = tf.expand_dims(label_map, axis = 0) - - sample = {'video_reshaped': frame, 'spectrogram': spectrogram, 'label_map': label_map} - return sample - - -def decode_train_inputs(video, spectrogram, label_map): - #Data augmentation for spectograms - spectrogram_shape = spectrogram.shape - spec_augmented = tf.py_function(aug_spec_fn, [spectrogram], tf.float32) - spec_augmented.set_shape(spectrogram_shape) - - frame = tf.reshape(video, [10, 256, 256, 3]) - frame = frame[0] #Pick the first frame. - frame = frame / 255 #Normalize tensor. - - frame_augmented = tf.py_function(aug_img_fn, [frame], tf.uint8) - # frame_augmented.set_shape(frame_shape) - - frame_augmented.set_shape([3, 256, 256]) - label_map = tf.expand_dims(label_map, axis = 0) - - augmented_sample = {'video_reshaped': frame_augmented, 'spectrogram': spec_augmented, 'label_map': label_map} - return augmented_sample - - -def aug_img_fn(frame): - frame = frame.numpy().astype(np.uint8) - frame_data = {'image': frame} - aug_frame_data = create_frame_transforms(**frame_data) - aug_img = aug_frame_data['image'] - aug_img = aug_img.transpose(2, 0, 1) - return aug_img - -def aug_spec_fn(spec): - spec = spec.numpy() - spec_data = {'spec': spec} - aug_spec_data = create_spec_transforms(**spec_data) - aug_spec = aug_spec_data['spec'] - return aug_spec - - -class FakeAVCelebDatasetTrain: - - def __init__(self, args): - self.args = args - self.samples = self.load_features_from_tfrec() - - def load_features_from_tfrec(self): - '''Loads raw features from a tfrecord file and returns them as raw inputs''' - ds = tf.io.matching_files(self.args.data_dir) - files = tf.random.shuffle(ds) - - shards = tf.data.Dataset.from_tensor_slices(files) - dataset = shards.interleave(tf.data.TFRecordDataset) - dataset = dataset.shuffle(buffer_size=100) - - dataset = dataset.map(_parse_function, num_parallel_calls = tf.data.AUTOTUNE) - dataset = dataset.map(decode_train_inputs, num_parallel_calls = tf.data.AUTOTUNE) - dataset = dataset.padded_batch(batch_size = self.args.batch_size) - return dataset - - - def __len__(self): - self.samples = self.load_features_from_tfrec(self.args.data_dir) - cnt = self.samples.reduce(np.int64(0), lambda x, _: x + 1) - cnt = cnt.numpy() - return cnt - -class FakeAVCelebDatasetVal: - - def __init__(self, args): - self.args = args - self.samples = self.load_features_from_tfrec() - - def load_features_from_tfrec(self): - '''Loads raw features from a tfrecord file and returns them as raw inputs''' - ds = tf.io.matching_files(self.args.data_dir) - files = tf.random.shuffle(ds) - - shards = tf.data.Dataset.from_tensor_slices(files) - dataset = shards.interleave(tf.data.TFRecordDataset) - dataset = dataset.shuffle(buffer_size=100) - - dataset = dataset.map(_parse_function, num_parallel_calls = tf.data.AUTOTUNE) - dataset = dataset.map(decode_inputs, num_parallel_calls = tf.data.AUTOTUNE) - dataset = dataset.padded_batch(batch_size = self.args.batch_size) - return dataset - - - def __len__(self): - self.samples = self.load_features_from_tfrec(self.args.data_dir) - cnt = self.samples.reduce(np.int64(0), lambda x, _: x + 1) - cnt = cnt.numpy() - return cnt \ No newline at end of file diff --git a/spaces/ashiqabdulkhader/GPT2-Poet/README.md b/spaces/ashiqabdulkhader/GPT2-Poet/README.md deleted file mode 100644 index 3c7d62f8266aeac434c3363c267f22bd2ec93513..0000000000000000000000000000000000000000 --- a/spaces/ashiqabdulkhader/GPT2-Poet/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: GPT2 Poet -emoji: 💻 -colorFrom: yellow -colorTo: yellow -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/augmentedimaginationhackathon/paperstocode/frontend/src/app/uploader/uploader.component.ts b/spaces/augmentedimaginationhackathon/paperstocode/frontend/src/app/uploader/uploader.component.ts deleted file mode 100644 index 0acdca97385a09881c3939f0c0649b7f9cee97e2..0000000000000000000000000000000000000000 --- a/spaces/augmentedimaginationhackathon/paperstocode/frontend/src/app/uploader/uploader.component.ts +++ /dev/null @@ -1,38 +0,0 @@ -import { Component, OnInit } from '@angular/core'; -//import {RequestService} from "../services/request-service/request.service"; -import {take} from "rxjs/operators"; -import {ActivatedRoute} from "@angular/router"; - -@Component({ - selector: 'UploaderComponent', - templateUrl: './uploader.component.html', - styleUrls: ['./uploader.component.scss'] -}) -export class UploaderComponent implements OnInit { - public readonly numberOfSteps: number = 6; - public stepOne: number = 1; - public currStep: number = 1; - - - constructor(//private requestService: RequestService, - private activatedRoute: ActivatedRoute, - ) { } - - public ngOnInit(): void { - - } - public stepOneContinue() { - if(this.stepOne < this.numberOfSteps) { - this.stepOne++; - } else { - this.stepOne = 1 - } - setTimeout(() => { - if(this.currStep < this.numberOfSteps) { - this.currStep++; - } else { - this.currStep = 1 - } - }, 250); - } -} diff --git a/spaces/avivdm1/AutoGPT/tests/test_prompt_generator.py b/spaces/avivdm1/AutoGPT/tests/test_prompt_generator.py deleted file mode 100644 index 6a0bfd6c7bbdbfaa3750e9dee621bd25e17a448b..0000000000000000000000000000000000000000 --- a/spaces/avivdm1/AutoGPT/tests/test_prompt_generator.py +++ /dev/null @@ -1,114 +0,0 @@ -from unittest import TestCase - -from autogpt.promptgenerator import PromptGenerator - - -class TestPromptGenerator(TestCase): - """ - Test cases for the PromptGenerator class, which is responsible for generating - prompts for the AI with constraints, commands, resources, and performance evaluations. - """ - - @classmethod - def setUpClass(cls): - """ - Set up the initial state for each test method by creating an instance of PromptGenerator. - """ - cls.generator = PromptGenerator() - - # Test whether the add_constraint() method adds a constraint to the generator's constraints list - def test_add_constraint(self): - """ - Test if the add_constraint() method adds a constraint to the generator's constraints list. - """ - constraint = "Constraint1" - self.generator.add_constraint(constraint) - self.assertIn(constraint, self.generator.constraints) - - # Test whether the add_command() method adds a command to the generator's commands list - def test_add_command(self): - """ - Test if the add_command() method adds a command to the generator's commands list. - """ - command_label = "Command Label" - command_name = "command_name" - args = {"arg1": "value1", "arg2": "value2"} - self.generator.add_command(command_label, command_name, args) - command = { - "label": command_label, - "name": command_name, - "args": args, - } - self.assertIn(command, self.generator.commands) - - def test_add_resource(self): - """ - Test if the add_resource() method adds a resource to the generator's resources list. - """ - resource = "Resource1" - self.generator.add_resource(resource) - self.assertIn(resource, self.generator.resources) - - def test_add_performance_evaluation(self): - """ - Test if the add_performance_evaluation() method adds an evaluation to the generator's - performance_evaluation list. - """ - evaluation = "Evaluation1" - self.generator.add_performance_evaluation(evaluation) - self.assertIn(evaluation, self.generator.performance_evaluation) - - def test_generate_prompt_string(self): - """ - Test if the generate_prompt_string() method generates a prompt string with all the added - constraints, commands, resources, and evaluations. - """ - # Define the test data - constraints = ["Constraint1", "Constraint2"] - commands = [ - { - "label": "Command1", - "name": "command_name1", - "args": {"arg1": "value1"}, - }, - { - "label": "Command2", - "name": "command_name2", - "args": {}, - }, - ] - resources = ["Resource1", "Resource2"] - evaluations = ["Evaluation1", "Evaluation2"] - - # Add test data to the generator - for constraint in constraints: - self.generator.add_constraint(constraint) - for command in commands: - self.generator.add_command( - command["label"], command["name"], command["args"] - ) - for resource in resources: - self.generator.add_resource(resource) - for evaluation in evaluations: - self.generator.add_performance_evaluation(evaluation) - - # Generate the prompt string and verify its correctness - prompt_string = self.generator.generate_prompt_string() - self.assertIsNotNone(prompt_string) - - # Check if all constraints, commands, resources, and evaluations are present in the prompt string - for constraint in constraints: - self.assertIn(constraint, prompt_string) - for command in commands: - self.assertIn(command["name"], prompt_string) - for key, value in command["args"].items(): - self.assertIn(f'"{key}": "{value}"', prompt_string) - for resource in resources: - self.assertIn(resource, prompt_string) - for evaluation in evaluations: - self.assertIn(evaluation, prompt_string) - - self.assertIn("constraints", prompt_string.lower()) - self.assertIn("commands", prompt_string.lower()) - self.assertIn("resources", prompt_string.lower()) - self.assertIn("performance evaluation", prompt_string.lower()) diff --git a/spaces/awacke1/Gradio-Gallery-Health-Medical-Icon-Sets/README.md b/spaces/awacke1/Gradio-Gallery-Health-Medical-Icon-Sets/README.md deleted file mode 100644 index 1017f6e33dc0fa2e2fe551aae1e0fab300ff67cf..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Gradio-Gallery-Health-Medical-Icon-Sets/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 👁🥽UI Gallery of Icon Sets for AI Animated User Interfaces 📱👁 Gradio -emoji: 👁🥽📱👁 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/Map-California-AI/README.md b/spaces/awacke1/Map-California-AI/README.md deleted file mode 100644 index 71e90834b348b1cd412ab925c6e6aa6408c5f8d3..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Map-California-AI/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Maps.Markers.Honor.Iceland -emoji: 🌖 -colorFrom: purple -colorTo: green -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/NLPImageUnderstanding/app.py b/spaces/awacke1/NLPImageUnderstanding/app.py deleted file mode 100644 index aca2ef5a3170ece2b7a12454bba7fe45bc88d087..0000000000000000000000000000000000000000 --- a/spaces/awacke1/NLPImageUnderstanding/app.py +++ /dev/null @@ -1,59 +0,0 @@ -import torch -import gradio as gr -from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, AutoTokenizer - -model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning") -feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning") -tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning") -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -model.to(device) -max_length = 16 -num_beams = 4 -gen_kwargs = {"max_length": max_length, "num_beams": num_beams} - -def predict_step(image_paths): - images = [] - for image_path in image_paths: - i_image = Image.open(image_path) - if i_image.mode != "RGB": - i_image = i_image.convert(mode="RGB") - images.append(i_image) - pixel_values = feature_extractor(images=images, return_tensors="pt").pixel_values - pixel_values = pixel_values.to(device) - output_ids = model.generate(pixel_values, **gen_kwargs) - preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True) - preds = [pred.strip() for pred in preds] - return preds - -#torch.hub.download_url_to_file('https://github.com/AaronCWacker/Yggdrasil/blob/main/images/35-Favorite-Games.jpg', '35-Favorite-Games.jpg') - -#result = predict_step(['35-Favorite-Games.jpg']) - -def predict(image,max_length=64, num_beams=4): - image = image.convert('RGB') - image = feature_extractor(image, return_tensors="pt").pixel_values.to(device) - clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0] - caption_ids = model.generate(image, max_length = max_length)[0] - caption_text = clean_text(tokenizer.decode(caption_ids)) - return caption_text - -description= "NLP Image Understanding" -title = "NLP Image Understanding" -article = "nlpconnect vit-gpt2-image-captioning" - -input = gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True) -output = gr.outputs.Textbox(type="auto",label="Captions") - -#examples = [['35-Favorite-Games.jpg']] -examples = [f"{i}.jpg" for i in range(1,20)] - -interface = gr.Interface( - fn=predict, - inputs = input, - outputs=output, - examples = examples, - title=title, - description=description, - article = article, - ) -interface.launch() diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/ctm/lzma.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/ctm/lzma.js deleted file mode 100644 index 9960f2592fd4b8ec991d0f02ffd6fd9c4518ff81..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/ctm/lzma.js +++ /dev/null @@ -1,517 +0,0 @@ - -var LZMA = LZMA || {}; - -// browserify support -if ( typeof module === 'object' ) { - - module.exports = LZMA; - -} - -LZMA.OutWindow = function() { - this._windowSize = 0; -}; - -LZMA.OutWindow.prototype.create = function(windowSize) { - if ( (!this._buffer) || (this._windowSize !== windowSize) ) { - this._buffer = []; - } - this._windowSize = windowSize; - this._pos = 0; - this._streamPos = 0; -}; - -LZMA.OutWindow.prototype.flush = function() { - var size = this._pos - this._streamPos; - if (size !== 0) { - while (size --) { - this._stream.writeByte(this._buffer[this._streamPos ++]); - } - if (this._pos >= this._windowSize) { - this._pos = 0; - } - this._streamPos = this._pos; - } -}; - -LZMA.OutWindow.prototype.releaseStream = function() { - this.flush(); - this._stream = null; -}; - -LZMA.OutWindow.prototype.setStream = function(stream) { - this.releaseStream(); - this._stream = stream; -}; - -LZMA.OutWindow.prototype.init = function(solid) { - if (!solid) { - this._streamPos = 0; - this._pos = 0; - } -}; - -LZMA.OutWindow.prototype.copyBlock = function(distance, len) { - var pos = this._pos - distance - 1; - if (pos < 0) { - pos += this._windowSize; - } - while (len --) { - if (pos >= this._windowSize) { - pos = 0; - } - this._buffer[this._pos ++] = this._buffer[pos ++]; - if (this._pos >= this._windowSize) { - this.flush(); - } - } -}; - -LZMA.OutWindow.prototype.putByte = function(b) { - this._buffer[this._pos ++] = b; - if (this._pos >= this._windowSize) { - this.flush(); - } -}; - -LZMA.OutWindow.prototype.getByte = function(distance) { - var pos = this._pos - distance - 1; - if (pos < 0) { - pos += this._windowSize; - } - return this._buffer[pos]; -}; - -LZMA.RangeDecoder = function() { -}; - -LZMA.RangeDecoder.prototype.setStream = function(stream) { - this._stream = stream; -}; - -LZMA.RangeDecoder.prototype.releaseStream = function() { - this._stream = null; -}; - -LZMA.RangeDecoder.prototype.init = function() { - var i = 5; - - this._code = 0; - this._range = -1; - - while (i --) { - this._code = (this._code << 8) | this._stream.readByte(); - } -}; - -LZMA.RangeDecoder.prototype.decodeDirectBits = function(numTotalBits) { - var result = 0, i = numTotalBits, t; - - while (i --) { - this._range >>>= 1; - t = (this._code - this._range) >>> 31; - this._code -= this._range & (t - 1); - result = (result << 1) | (1 - t); - - if ( (this._range & 0xff000000) === 0) { - this._code = (this._code << 8) | this._stream.readByte(); - this._range <<= 8; - } - } - - return result; -}; - -LZMA.RangeDecoder.prototype.decodeBit = function(probs, index) { - var prob = probs[index], - newBound = (this._range >>> 11) * prob; - - if ( (this._code ^ 0x80000000) < (newBound ^ 0x80000000) ) { - this._range = newBound; - probs[index] += (2048 - prob) >>> 5; - if ( (this._range & 0xff000000) === 0) { - this._code = (this._code << 8) | this._stream.readByte(); - this._range <<= 8; - } - return 0; - } - - this._range -= newBound; - this._code -= newBound; - probs[index] -= prob >>> 5; - if ( (this._range & 0xff000000) === 0) { - this._code = (this._code << 8) | this._stream.readByte(); - this._range <<= 8; - } - return 1; -}; - -LZMA.initBitModels = function(probs, len) { - while (len --) { - probs[len] = 1024; - } -}; - -LZMA.BitTreeDecoder = function(numBitLevels) { - this._models = []; - this._numBitLevels = numBitLevels; -}; - -LZMA.BitTreeDecoder.prototype.init = function() { - LZMA.initBitModels(this._models, 1 << this._numBitLevels); -}; - -LZMA.BitTreeDecoder.prototype.decode = function(rangeDecoder) { - var m = 1, i = this._numBitLevels; - - while (i --) { - m = (m << 1) | rangeDecoder.decodeBit(this._models, m); - } - return m - (1 << this._numBitLevels); -}; - -LZMA.BitTreeDecoder.prototype.reverseDecode = function(rangeDecoder) { - var m = 1, symbol = 0, i = 0, bit; - - for (; i < this._numBitLevels; ++ i) { - bit = rangeDecoder.decodeBit(this._models, m); - m = (m << 1) | bit; - symbol |= bit << i; - } - return symbol; -}; - -LZMA.reverseDecode2 = function(models, startIndex, rangeDecoder, numBitLevels) { - var m = 1, symbol = 0, i = 0, bit; - - for (; i < numBitLevels; ++ i) { - bit = rangeDecoder.decodeBit(models, startIndex + m); - m = (m << 1) | bit; - symbol |= bit << i; - } - return symbol; -}; - -LZMA.LenDecoder = function() { - this._choice = []; - this._lowCoder = []; - this._midCoder = []; - this._highCoder = new LZMA.BitTreeDecoder(8); - this._numPosStates = 0; -}; - -LZMA.LenDecoder.prototype.create = function(numPosStates) { - for (; this._numPosStates < numPosStates; ++ this._numPosStates) { - this._lowCoder[this._numPosStates] = new LZMA.BitTreeDecoder(3); - this._midCoder[this._numPosStates] = new LZMA.BitTreeDecoder(3); - } -}; - -LZMA.LenDecoder.prototype.init = function() { - var i = this._numPosStates; - LZMA.initBitModels(this._choice, 2); - while (i --) { - this._lowCoder[i].init(); - this._midCoder[i].init(); - } - this._highCoder.init(); -}; - -LZMA.LenDecoder.prototype.decode = function(rangeDecoder, posState) { - if (rangeDecoder.decodeBit(this._choice, 0) === 0) { - return this._lowCoder[posState].decode(rangeDecoder); - } - if (rangeDecoder.decodeBit(this._choice, 1) === 0) { - return 8 + this._midCoder[posState].decode(rangeDecoder); - } - return 16 + this._highCoder.decode(rangeDecoder); -}; - -LZMA.Decoder2 = function() { - this._decoders = []; -}; - -LZMA.Decoder2.prototype.init = function() { - LZMA.initBitModels(this._decoders, 0x300); -}; - -LZMA.Decoder2.prototype.decodeNormal = function(rangeDecoder) { - var symbol = 1; - - do { - symbol = (symbol << 1) | rangeDecoder.decodeBit(this._decoders, symbol); - }while (symbol < 0x100); - - return symbol & 0xff; -}; - -LZMA.Decoder2.prototype.decodeWithMatchByte = function(rangeDecoder, matchByte) { - var symbol = 1, matchBit, bit; - - do { - matchBit = (matchByte >> 7) & 1; - matchByte <<= 1; - bit = rangeDecoder.decodeBit(this._decoders, ( (1 + matchBit) << 8) + symbol); - symbol = (symbol << 1) | bit; - if (matchBit !== bit) { - while (symbol < 0x100) { - symbol = (symbol << 1) | rangeDecoder.decodeBit(this._decoders, symbol); - } - break; - } - }while (symbol < 0x100); - - return symbol & 0xff; -}; - -LZMA.LiteralDecoder = function() { -}; - -LZMA.LiteralDecoder.prototype.create = function(numPosBits, numPrevBits) { - var i; - - if (this._coders - && (this._numPrevBits === numPrevBits) - && (this._numPosBits === numPosBits) ) { - return; - } - this._numPosBits = numPosBits; - this._posMask = (1 << numPosBits) - 1; - this._numPrevBits = numPrevBits; - - this._coders = []; - - i = 1 << (this._numPrevBits + this._numPosBits); - while (i --) { - this._coders[i] = new LZMA.Decoder2(); - } -}; - -LZMA.LiteralDecoder.prototype.init = function() { - var i = 1 << (this._numPrevBits + this._numPosBits); - while (i --) { - this._coders[i].init(); - } -}; - -LZMA.LiteralDecoder.prototype.getDecoder = function(pos, prevByte) { - return this._coders[( (pos & this._posMask) << this._numPrevBits) - + ( (prevByte & 0xff) >>> (8 - this._numPrevBits) )]; -}; - -LZMA.Decoder = function() { - this._outWindow = new LZMA.OutWindow(); - this._rangeDecoder = new LZMA.RangeDecoder(); - this._isMatchDecoders = []; - this._isRepDecoders = []; - this._isRepG0Decoders = []; - this._isRepG1Decoders = []; - this._isRepG2Decoders = []; - this._isRep0LongDecoders = []; - this._posSlotDecoder = []; - this._posDecoders = []; - this._posAlignDecoder = new LZMA.BitTreeDecoder(4); - this._lenDecoder = new LZMA.LenDecoder(); - this._repLenDecoder = new LZMA.LenDecoder(); - this._literalDecoder = new LZMA.LiteralDecoder(); - this._dictionarySize = -1; - this._dictionarySizeCheck = -1; - - this._posSlotDecoder[0] = new LZMA.BitTreeDecoder(6); - this._posSlotDecoder[1] = new LZMA.BitTreeDecoder(6); - this._posSlotDecoder[2] = new LZMA.BitTreeDecoder(6); - this._posSlotDecoder[3] = new LZMA.BitTreeDecoder(6); -}; - -LZMA.Decoder.prototype.setDictionarySize = function(dictionarySize) { - if (dictionarySize < 0) { - return false; - } - if (this._dictionarySize !== dictionarySize) { - this._dictionarySize = dictionarySize; - this._dictionarySizeCheck = Math.max(this._dictionarySize, 1); - this._outWindow.create( Math.max(this._dictionarySizeCheck, 4096) ); - } - return true; -}; - -LZMA.Decoder.prototype.setLcLpPb = function(lc, lp, pb) { - var numPosStates = 1 << pb; - - if (lc > 8 || lp > 4 || pb > 4) { - return false; - } - - this._literalDecoder.create(lp, lc); - - this._lenDecoder.create(numPosStates); - this._repLenDecoder.create(numPosStates); - this._posStateMask = numPosStates - 1; - - return true; -}; - -LZMA.Decoder.prototype.init = function() { - var i = 4; - - this._outWindow.init(false); - - LZMA.initBitModels(this._isMatchDecoders, 192); - LZMA.initBitModels(this._isRep0LongDecoders, 192); - LZMA.initBitModels(this._isRepDecoders, 12); - LZMA.initBitModels(this._isRepG0Decoders, 12); - LZMA.initBitModels(this._isRepG1Decoders, 12); - LZMA.initBitModels(this._isRepG2Decoders, 12); - LZMA.initBitModels(this._posDecoders, 114); - - this._literalDecoder.init(); - - while (i --) { - this._posSlotDecoder[i].init(); - } - - this._lenDecoder.init(); - this._repLenDecoder.init(); - this._posAlignDecoder.init(); - this._rangeDecoder.init(); -}; - -LZMA.Decoder.prototype.decode = function(inStream, outStream, outSize) { - var state = 0, rep0 = 0, rep1 = 0, rep2 = 0, rep3 = 0, nowPos64 = 0, prevByte = 0, - posState, decoder2, len, distance, posSlot, numDirectBits; - - this._rangeDecoder.setStream(inStream); - this._outWindow.setStream(outStream); - - this.init(); - - while (outSize < 0 || nowPos64 < outSize) { - posState = nowPos64 & this._posStateMask; - - if (this._rangeDecoder.decodeBit(this._isMatchDecoders, (state << 4) + posState) === 0) { - decoder2 = this._literalDecoder.getDecoder(nowPos64 ++, prevByte); - - if (state >= 7) { - prevByte = decoder2.decodeWithMatchByte(this._rangeDecoder, this._outWindow.getByte(rep0) ); - }else { - prevByte = decoder2.decodeNormal(this._rangeDecoder); - } - this._outWindow.putByte(prevByte); - - state = state < 4 ? 0 : state - (state < 10 ? 3 : 6); - - }else { - - if (this._rangeDecoder.decodeBit(this._isRepDecoders, state) === 1) { - len = 0; - if (this._rangeDecoder.decodeBit(this._isRepG0Decoders, state) === 0) { - if (this._rangeDecoder.decodeBit(this._isRep0LongDecoders, (state << 4) + posState) === 0) { - state = state < 7 ? 9 : 11; - len = 1; - } - }else { - if (this._rangeDecoder.decodeBit(this._isRepG1Decoders, state) === 0) { - distance = rep1; - }else { - if (this._rangeDecoder.decodeBit(this._isRepG2Decoders, state) === 0) { - distance = rep2; - }else { - distance = rep3; - rep3 = rep2; - } - rep2 = rep1; - } - rep1 = rep0; - rep0 = distance; - } - if (len === 0) { - len = 2 + this._repLenDecoder.decode(this._rangeDecoder, posState); - state = state < 7 ? 8 : 11; - } - }else { - rep3 = rep2; - rep2 = rep1; - rep1 = rep0; - - len = 2 + this._lenDecoder.decode(this._rangeDecoder, posState); - state = state < 7 ? 7 : 10; - - posSlot = this._posSlotDecoder[len <= 5 ? len - 2 : 3].decode(this._rangeDecoder); - if (posSlot >= 4) { - - numDirectBits = (posSlot >> 1) - 1; - rep0 = (2 | (posSlot & 1) ) << numDirectBits; - - if (posSlot < 14) { - rep0 += LZMA.reverseDecode2(this._posDecoders, - rep0 - posSlot - 1, this._rangeDecoder, numDirectBits); - }else { - rep0 += this._rangeDecoder.decodeDirectBits(numDirectBits - 4) << 4; - rep0 += this._posAlignDecoder.reverseDecode(this._rangeDecoder); - if (rep0 < 0) { - if (rep0 === -1) { - break; - } - return false; - } - } - }else { - rep0 = posSlot; - } - } - - if (rep0 >= nowPos64 || rep0 >= this._dictionarySizeCheck) { - return false; - } - - this._outWindow.copyBlock(rep0, len); - nowPos64 += len; - prevByte = this._outWindow.getByte(0); - } - } - - this._outWindow.flush(); - this._outWindow.releaseStream(); - this._rangeDecoder.releaseStream(); - - return true; -}; - -LZMA.Decoder.prototype.setDecoderProperties = function(properties) { - var value, lc, lp, pb, dictionarySize; - - if (properties.size < 5) { - return false; - } - - value = properties.readByte(); - lc = value % 9; - value = ~~(value / 9); - lp = value % 5; - pb = ~~(value / 5); - - if ( !this.setLcLpPb(lc, lp, pb) ) { - return false; - } - - dictionarySize = properties.readByte(); - dictionarySize |= properties.readByte() << 8; - dictionarySize |= properties.readByte() << 16; - dictionarySize += properties.readByte() * 16777216; - - return this.setDictionarySize(dictionarySize); -}; - -LZMA.decompress = function(properties, inStream, outStream, outSize) { - var decoder = new LZMA.Decoder(); - - if ( !decoder.setDecoderProperties(properties) ) { - throw "Incorrect stream properties"; - } - - if ( !decoder.decode(inStream, outStream, outSize) ) { - throw "Error in data stream"; - } - - return true; -}; diff --git a/spaces/befozg/stylematte/README.md b/spaces/befozg/stylematte/README.md deleted file mode 100644 index 4c86054804d0024fe778d2b380f284ae5939e0a5..0000000000000000000000000000000000000000 --- a/spaces/befozg/stylematte/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stylematte -emoji: 💻 -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/beihai/PDF-Table-Extractor/.history/app_20220621095310.py b/spaces/beihai/PDF-Table-Extractor/.history/app_20220621095310.py deleted file mode 100644 index 4cef219db358a4215b3ae49b30d6379554e6900b..0000000000000000000000000000000000000000 --- a/spaces/beihai/PDF-Table-Extractor/.history/app_20220621095310.py +++ /dev/null @@ -1,40 +0,0 @@ -#-*- coding : utf-8-*- -import base64 -from subprocess import STDOUT -import streamlit as st -import pandas as pd -import camelot as cam # extracting tables from PDFs - -st.title("PDF Table Extractor") - -input_pdf = st.file_uploader(label = "", type = 'pdf') - -page_number = st.text_input("请填写表格所在PDF页码,eg: 3", value = 1) -background = st.selectbox("表格线条是否隐藏",(True, False)) -if input_pdf is not None: - # byte object into a PDF file - with open("input.pdf", "wb") as f: - base64_pdf = base64.b64encode(input_pdf.read()).decode('utf-8') - f.write(base64.b64decode(base64_pdf)) - f.close() - - # read the pdf and parse it using stream - tables = cam.read_pdf("input.pdf", pages=page_number, process_background=background) - result = pd.ExcelWriter('result.xlsx', engine='xlsxwriter') - tables[0].to_excel(result,index=False) - # for i in range(0,len(tables)): - # table = tables[i].df - # sheetname = str(i) - # table.to_excel(result, sheetname,index=False) - - with open('result.xlsx','rb') as f: - st.download_button('提取完成,点击下载!', f,file_name='result.xlsx',mime="application/vnd.ms-excel") - - tables_all= cam.read_pdf("input.pdf", pages=all, process_background=background) - result_all = pd.ExcelWriter('result_all.xlsx', engine='xlsxwriter') - for i in range(0,len(tables_all)): - table = tables_all[i].df - sheetname = str(i) - table.to_excel(result_all, sheetname,index=False) - with open('result_all.xlsx','rb') as f: - st.download_button('一件抽取完成,', f,file_name='result_all.xlsx',mime="application/vnd.ms-excel") \ No newline at end of file diff --git a/spaces/benjaminperkins/yulet1de-hentaidiffusion.peoplegenerator/README.md b/spaces/benjaminperkins/yulet1de-hentaidiffusion.peoplegenerator/README.md deleted file mode 100644 index 16d8228a08d6beffb3695c38e0a5de0257a4ed29..0000000000000000000000000000000000000000 --- a/spaces/benjaminperkins/yulet1de-hentaidiffusion.peoplegenerator/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Yulet1de Hentaidiffusion.peoplegenerator -emoji: 💩 -colorFrom: indigo -colorTo: gray -sdk: gradio -sdk_version: 3.14.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/benzel34/fun/Dockerfile b/spaces/benzel34/fun/Dockerfile deleted file mode 100644 index eef259fa372a804549fb0af0913718a13344da34..0000000000000000000000000000000000000000 --- a/spaces/benzel34/fun/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM node:18-bullseye-slim -RUN apt-get update && \ - apt-get install -y git -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app -WORKDIR /app -RUN npm install -COPY Dockerfile greeting.md* .env* ./ -RUN npm run build -EXPOSE 7860 -ENV NODE_ENV=production -CMD [ "npm", "start" ] diff --git a/spaces/betterme/mestreamlit/test.py b/spaces/betterme/mestreamlit/test.py deleted file mode 100644 index 35eca5b134a7eadd3879d41d0a496c607848deb4..0000000000000000000000000000000000000000 --- a/spaces/betterme/mestreamlit/test.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Project : Python. -# @File : test -# @Time : 2022/9/30 下午3:27 -# @Author : yuanjie -# @WeChat : meutils -# @Software : PyCharm -# @Description : - - -from meutils.pipe import * - -# -# def file_replace(file, old, new): -# p = Path(file) -# _ = ( -# p.read_text().replace(old, new) -# ) -# p.write_text(_) -# -# -# import streamlit as st -# -# st_home = Path(str(st).split("'")[-2]).parent -# -# to_replace = [ -# (st_home/ "static/static/js/main.468e22f6.chunk.js", "https://github.com/streamlit/streamlit/issues/new/choose", "https://github.com/yuanjie-ai"), -# (st_home/ "static/static/js/main.468e22f6.chunk.js", "Streamlit Inc. All rights reserved.", "Betterme Inc. All rights reserved."), -# ] -# for i in to_replace: -# file_replace(*i) -# - -print(open(get_resolve_path('./data/【东北计算机】恒生电子2021业绩说明会纪要.pdf', __file__), "rb").read()) \ No newline at end of file diff --git a/spaces/bhasker412/IDD-YOLO-Tracking/utils/datasets.py b/spaces/bhasker412/IDD-YOLO-Tracking/utils/datasets.py deleted file mode 100644 index b6bb8b02aa706c7ea8536665d908b417134fcd0f..0000000000000000000000000000000000000000 --- a/spaces/bhasker412/IDD-YOLO-Tracking/utils/datasets.py +++ /dev/null @@ -1,1320 +0,0 @@ -# Dataset utils and dataloaders - -import glob -import logging -import math -import os -import random -import shutil -import time -from itertools import repeat -from multiprocessing.pool import ThreadPool -from pathlib import Path -from threading import Thread - -import cv2 -import numpy as np -import torch -import torch.nn.functional as F -from PIL import Image, ExifTags -from torch.utils.data import Dataset -from tqdm import tqdm - -import pickle -from copy import deepcopy -#from pycocotools import mask as maskUtils -from torchvision.utils import save_image -from torchvision.ops import roi_pool, roi_align, ps_roi_pool, ps_roi_align - -from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \ - resample_segments, clean_str -from utils.torch_utils import torch_distributed_zero_first - -# Parameters -help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' -img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes -vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes -logger = logging.getLogger(__name__) - -# Get orientation exif tag -for orientation in ExifTags.TAGS.keys(): - if ExifTags.TAGS[orientation] == 'Orientation': - break - - -def get_hash(files): - # Returns a single hash value of a list of files - return sum(os.path.getsize(f) for f in files if os.path.isfile(f)) - - -def exif_size(img): - # Returns exif-corrected PIL size - s = img.size # (width, height) - try: - rotation = dict(img._getexif().items())[orientation] - if rotation == 6: # rotation 270 - s = (s[1], s[0]) - elif rotation == 8: # rotation 90 - s = (s[1], s[0]) - except: - pass - - return s - - -def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, - rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): - # Make sure only the first process in DDP process the dataset first, and the following others can use the cache - with torch_distributed_zero_first(rank): - dataset = LoadImagesAndLabels(path, imgsz, batch_size, - augment=augment, # augment images - hyp=hyp, # augmentation hyperparameters - rect=rect, # rectangular training - cache_images=cache, - single_cls=opt.single_cls, - stride=int(stride), - pad=pad, - image_weights=image_weights, - prefix=prefix) - - batch_size = min(batch_size, len(dataset)) - nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers - sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None - loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader - # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() - dataloader = loader(dataset, - batch_size=batch_size, - num_workers=nw, - sampler=sampler, - pin_memory=True, - collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn) - return dataloader, dataset - - -class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader): - """ Dataloader that reuses workers - - Uses same syntax as vanilla DataLoader - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) - self.iterator = super().__iter__() - - def __len__(self): - return len(self.batch_sampler.sampler) - - def __iter__(self): - for i in range(len(self)): - yield next(self.iterator) - - -class _RepeatSampler(object): - """ Sampler that repeats forever - - Args: - sampler (Sampler) - """ - - def __init__(self, sampler): - self.sampler = sampler - - def __iter__(self): - while True: - yield from iter(self.sampler) - - -class LoadImages: # for inference - def __init__(self, path, img_size=640, stride=32): - p = str(Path(path).absolute()) # os-agnostic absolute path - if '*' in p: - files = sorted(glob.glob(p, recursive=True)) # glob - elif os.path.isdir(p): - files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir - elif os.path.isfile(p): - files = [p] # files - else: - raise Exception(f'ERROR: {p} does not exist') - - images = [x for x in files if x.split('.')[-1].lower() in img_formats] - videos = [x for x in files if x.split('.')[-1].lower() in vid_formats] - ni, nv = len(images), len(videos) - - self.img_size = img_size - self.stride = stride - self.files = images + videos - self.nf = ni + nv # number of files - self.video_flag = [False] * ni + [True] * nv - self.mode = 'image' - if any(videos): - self.new_video(videos[0]) # new video - else: - self.cap = None - assert self.nf > 0, f'No images or videos found in {p}. ' \ - f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}' - - def __iter__(self): - self.count = 0 - return self - - def __next__(self): - if self.count == self.nf: - raise StopIteration - path = self.files[self.count] - - if self.video_flag[self.count]: - # Read video - self.mode = 'video' - ret_val, img0 = self.cap.read() - if not ret_val: - self.count += 1 - self.cap.release() - if self.count == self.nf: # last video - raise StopIteration - else: - path = self.files[self.count] - self.new_video(path) - ret_val, img0 = self.cap.read() - - self.frame += 1 - print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='') - - else: - # Read image - self.count += 1 - img0 = cv2.imread(path) # BGR - assert img0 is not None, 'Image Not Found ' + path - #print(f'image {self.count}/{self.nf} {path}: ', end='') - - # Padded resize - img = letterbox(img0, self.img_size, stride=self.stride)[0] - - # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 - img = np.ascontiguousarray(img) - - return path, img, img0, self.cap - - def new_video(self, path): - self.frame = 0 - self.cap = cv2.VideoCapture(path) - self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) - - def __len__(self): - return self.nf # number of files - - -class LoadWebcam: # for inference - def __init__(self, pipe='0', img_size=640, stride=32): - self.img_size = img_size - self.stride = stride - - if pipe.isnumeric(): - pipe = eval(pipe) # local camera - # pipe = 'rtsp://192.168.1.64/1' # IP camera - # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login - # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera - - self.pipe = pipe - self.cap = cv2.VideoCapture(pipe) # video capture object - self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size - - def __iter__(self): - self.count = -1 - return self - - def __next__(self): - self.count += 1 - if cv2.waitKey(1) == ord('q'): # q to quit - self.cap.release() - cv2.destroyAllWindows() - raise StopIteration - - # Read frame - if self.pipe == 0: # local camera - ret_val, img0 = self.cap.read() - img0 = cv2.flip(img0, 1) # flip left-right - else: # IP camera - n = 0 - while True: - n += 1 - self.cap.grab() - if n % 30 == 0: # skip frames - ret_val, img0 = self.cap.retrieve() - if ret_val: - break - - # Print - assert ret_val, f'Camera Error {self.pipe}' - img_path = 'webcam.jpg' - print(f'webcam {self.count}: ', end='') - - # Padded resize - img = letterbox(img0, self.img_size, stride=self.stride)[0] - - # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 - img = np.ascontiguousarray(img) - - return img_path, img, img0, None - - def __len__(self): - return 0 - - -class LoadStreams: # multiple IP or RTSP cameras - def __init__(self, sources='streams.txt', img_size=640, stride=32): - self.mode = 'stream' - self.img_size = img_size - self.stride = stride - - if os.path.isfile(sources): - with open(sources, 'r') as f: - sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] - else: - sources = [sources] - - n = len(sources) - self.imgs = [None] * n - self.sources = [clean_str(x) for x in sources] # clean source names for later - for i, s in enumerate(sources): - # Start the thread to read frames from the video stream - print(f'{i + 1}/{n}: {s}... ', end='') - url = eval(s) if s.isnumeric() else s - if 'youtube.com/' in str(url) or 'youtu.be/' in str(url): # if source is YouTube video - check_requirements(('pafy', 'youtube_dl')) - import pafy - url = pafy.new(url).getbest(preftype="mp4").url - cap = cv2.VideoCapture(url) - assert cap.isOpened(), f'Failed to open {s}' - w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - self.fps = cap.get(cv2.CAP_PROP_FPS) % 100 - - _, self.imgs[i] = cap.read() # guarantee first frame - thread = Thread(target=self.update, args=([i, cap]), daemon=True) - print(f' success ({w}x{h} at {self.fps:.2f} FPS).') - thread.start() - print('') # newline - - # check for common shapes - s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes - self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal - if not self.rect: - print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') - - def update(self, index, cap): - # Read next stream frame in a daemon thread - n = 0 - while cap.isOpened(): - n += 1 - # _, self.imgs[index] = cap.read() - cap.grab() - if n == 4: # read every 4th frame - success, im = cap.retrieve() - self.imgs[index] = im if success else self.imgs[index] * 0 - n = 0 - time.sleep(1 / self.fps) # wait time - - def __iter__(self): - self.count = -1 - return self - - def __next__(self): - self.count += 1 - img0 = self.imgs.copy() - if cv2.waitKey(1) == ord('q'): # q to quit - cv2.destroyAllWindows() - raise StopIteration - - # Letterbox - img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0] - - # Stack - img = np.stack(img, 0) - - # Convert - img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 - img = np.ascontiguousarray(img) - - return self.sources, img, img0, None - - def __len__(self): - return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years - - -def img2label_paths(img_paths): - # Define label paths as a function of image paths - sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings - return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths] - - -class LoadImagesAndLabels(Dataset): # for training/testing - def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, - cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): - self.img_size = img_size - self.augment = augment - self.hyp = hyp - self.image_weights = image_weights - self.rect = False if image_weights else rect - self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) - self.mosaic_border = [-img_size // 2, -img_size // 2] - self.stride = stride - self.path = path - #self.albumentations = Albumentations() if augment else None - - try: - f = [] # image files - for p in path if isinstance(path, list) else [path]: - p = Path(p) # os-agnostic - if p.is_dir(): # dir - f += glob.glob(str(p / '**' / '*.*'), recursive=True) - # f = list(p.rglob('**/*.*')) # pathlib - elif p.is_file(): # file - with open(p, 'r') as t: - t = t.read().strip().splitlines() - parent = str(p.parent) + os.sep - f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path - # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) - else: - raise Exception(f'{prefix}{p} does not exist') - self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) - # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib - assert self.img_files, f'{prefix}No images found' - except Exception as e: - raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}') - - # Check cache - self.label_files = img2label_paths(self.img_files) # labels - cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels - if cache_path.is_file(): - cache, exists = torch.load(cache_path), True # load - #if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed - # cache, exists = self.cache_labels(cache_path, prefix), False # re-cache - else: - cache, exists = self.cache_labels(cache_path, prefix), False # cache - - # Display cache - nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total - if exists: - d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" - tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results - assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' - - # Read cache - cache.pop('hash') # remove hash - cache.pop('version') # remove version - labels, shapes, self.segments = zip(*cache.values()) - self.labels = list(labels) - self.shapes = np.array(shapes, dtype=np.float64) - self.img_files = list(cache.keys()) # update - self.label_files = img2label_paths(cache.keys()) # update - if single_cls: - for x in self.labels: - x[:, 0] = 0 - - n = len(shapes) # number of images - bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index - nb = bi[-1] + 1 # number of batches - self.batch = bi # batch index of image - self.n = n - self.indices = range(n) - - # Rectangular Training - if self.rect: - # Sort by aspect ratio - s = self.shapes # wh - ar = s[:, 1] / s[:, 0] # aspect ratio - irect = ar.argsort() - self.img_files = [self.img_files[i] for i in irect] - self.label_files = [self.label_files[i] for i in irect] - self.labels = [self.labels[i] for i in irect] - self.shapes = s[irect] # wh - ar = ar[irect] - - # Set training image shapes - shapes = [[1, 1]] * nb - for i in range(nb): - ari = ar[bi == i] - mini, maxi = ari.min(), ari.max() - if maxi < 1: - shapes[i] = [maxi, 1] - elif mini > 1: - shapes[i] = [1, 1 / mini] - - self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride - - # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) - self.imgs = [None] * n - if cache_images: - if cache_images == 'disk': - self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy') - self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files] - self.im_cache_dir.mkdir(parents=True, exist_ok=True) - gb = 0 # Gigabytes of cached images - self.img_hw0, self.img_hw = [None] * n, [None] * n - results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) - pbar = tqdm(enumerate(results), total=n) - for i, x in pbar: - if cache_images == 'disk': - if not self.img_npy[i].exists(): - np.save(self.img_npy[i].as_posix(), x[0]) - gb += self.img_npy[i].stat().st_size - else: - self.imgs[i], self.img_hw0[i], self.img_hw[i] = x - gb += self.imgs[i].nbytes - pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' - pbar.close() - - def cache_labels(self, path=Path('./labels.cache'), prefix=''): - # Cache dataset labels, check images and read shapes - x = {} # dict - nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate - pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files)) - for i, (im_file, lb_file) in enumerate(pbar): - try: - # verify images - im = Image.open(im_file) - im.verify() # PIL verify - shape = exif_size(im) # image size - segments = [] # instance segments - assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' - assert im.format.lower() in img_formats, f'invalid image format {im.format}' - - # verify labels - if os.path.isfile(lb_file): - nf += 1 # label found - with open(lb_file, 'r') as f: - l = [x.split() for x in f.read().strip().splitlines()] - if any([len(x) > 8 for x in l]): # is segment - classes = np.array([x[0] for x in l], dtype=np.float32) - segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) - l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) - l = np.array(l, dtype=np.float32) - if len(l): - assert l.shape[1] == 5, 'labels require 5 columns each' - assert (l >= 0).all(), 'negative labels' - assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels' - assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels' - else: - ne += 1 # label empty - l = np.zeros((0, 5), dtype=np.float32) - else: - nm += 1 # label missing - l = np.zeros((0, 5), dtype=np.float32) - x[im_file] = [l, shape, segments] - except Exception as e: - nc += 1 - print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') - - pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ - f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" - pbar.close() - - if nf == 0: - print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') - - x['hash'] = get_hash(self.label_files + self.img_files) - x['results'] = nf, nm, ne, nc, i + 1 - x['version'] = 0.1 # cache version - torch.save(x, path) # save for next time - logging.info(f'{prefix}New cache created: {path}') - return x - - def __len__(self): - return len(self.img_files) - - # def __iter__(self): - # self.count = -1 - # print('ran dataset iter') - # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) - # return self - - def __getitem__(self, index): - index = self.indices[index] # linear, shuffled, or image_weights - - hyp = self.hyp - mosaic = self.mosaic and random.random() < hyp['mosaic'] - if mosaic: - # Load mosaic - if random.random() < 0.8: - img, labels = load_mosaic(self, index) - else: - img, labels = load_mosaic9(self, index) - shapes = None - - # MixUp https://arxiv.org/pdf/1710.09412.pdf - if random.random() < hyp['mixup']: - if random.random() < 0.8: - img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1)) - else: - img2, labels2 = load_mosaic9(self, random.randint(0, len(self.labels) - 1)) - r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 - img = (img * r + img2 * (1 - r)).astype(np.uint8) - labels = np.concatenate((labels, labels2), 0) - - else: - # Load image - img, (h0, w0), (h, w) = load_image(self, index) - - # Letterbox - shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape - img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) - shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling - - labels = self.labels[index].copy() - if labels.size: # normalized xywh to pixel xyxy format - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) - - if self.augment: - # Augment imagespace - if not mosaic: - img, labels = random_perspective(img, labels, - degrees=hyp['degrees'], - translate=hyp['translate'], - scale=hyp['scale'], - shear=hyp['shear'], - perspective=hyp['perspective']) - - - #img, labels = self.albumentations(img, labels) - - # Augment colorspace - augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) - - # Apply cutouts - # if random.random() < 0.9: - # labels = cutout(img, labels) - - if random.random() < hyp['paste_in']: - sample_labels, sample_images, sample_masks = [], [], [] - while len(sample_labels) < 30: - sample_labels_, sample_images_, sample_masks_ = load_samples(self, random.randint(0, len(self.labels) - 1)) - sample_labels += sample_labels_ - sample_images += sample_images_ - sample_masks += sample_masks_ - #print(len(sample_labels)) - if len(sample_labels) == 0: - break - labels = pastein(img, labels, sample_labels, sample_images, sample_masks) - - nL = len(labels) # number of labels - if nL: - labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh - labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 - labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 - - if self.augment: - # flip up-down - if random.random() < hyp['flipud']: - img = np.flipud(img) - if nL: - labels[:, 2] = 1 - labels[:, 2] - - # flip left-right - if random.random() < hyp['fliplr']: - img = np.fliplr(img) - if nL: - labels[:, 1] = 1 - labels[:, 1] - - labels_out = torch.zeros((nL, 6)) - if nL: - labels_out[:, 1:] = torch.from_numpy(labels) - - # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 - img = np.ascontiguousarray(img) - - return torch.from_numpy(img), labels_out, self.img_files[index], shapes - - @staticmethod - def collate_fn(batch): - img, label, path, shapes = zip(*batch) # transposed - for i, l in enumerate(label): - l[:, 0] = i # add target image index for build_targets() - return torch.stack(img, 0), torch.cat(label, 0), path, shapes - - @staticmethod - def collate_fn4(batch): - img, label, path, shapes = zip(*batch) # transposed - n = len(shapes) // 4 - img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] - - ho = torch.tensor([[0., 0, 0, 1, 0, 0]]) - wo = torch.tensor([[0., 0, 1, 0, 0, 0]]) - s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale - for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW - i *= 4 - if random.random() < 0.5: - im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[ - 0].type(img[i].type()) - l = label[i] - else: - im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) - l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s - img4.append(im) - label4.append(l) - - for i, l in enumerate(label4): - l[:, 0] = i # add target image index for build_targets() - - return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4 - - -# Ancillary functions -------------------------------------------------------------------------------------------------- -def load_image(self, index): - # loads 1 image from dataset, returns img, original hw, resized hw - img = self.imgs[index] - if img is None: # not cached - path = self.img_files[index] - img = cv2.imread(path) # BGR - assert img is not None, 'Image Not Found ' + path - h0, w0 = img.shape[:2] # orig hw - r = self.img_size / max(h0, w0) # resize image to img_size - if r != 1: # always resize down, only resize up if training with augmentation - interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR - img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) - return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized - else: - return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized - - -def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): - r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) - dtype = img.dtype # uint8 - - x = np.arange(0, 256, dtype=np.int16) - lut_hue = ((x * r[0]) % 180).astype(dtype) - lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - - img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) - cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed - - -def hist_equalize(img, clahe=True, bgr=False): - # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255 - yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - if clahe: - c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) - yuv[:, :, 0] = c.apply(yuv[:, :, 0]) - else: - yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram - return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB - - -def load_mosaic(self, index): - # loads images in a 4-mosaic - - labels4, segments4 = [], [] - s = self.img_size - yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y - indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = load_image(self, index) - - # place img in img4 - if i == 0: # top left - img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles - x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) - x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) - elif i == 1: # top right - x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc - x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h - elif i == 2: # bottom left - x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) - x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) - elif i == 3: # bottom right - x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) - x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) - - img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] - padw = x1a - x1b - padh = y1a - y1b - - # Labels - labels, segments = self.labels[index].copy(), self.segments[index].copy() - if labels.size: - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format - segments = [xyn2xy(x, w, h, padw, padh) for x in segments] - labels4.append(labels) - segments4.extend(segments) - - # Concat/clip labels - labels4 = np.concatenate(labels4, 0) - for x in (labels4[:, 1:], *segments4): - np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() - # img4, labels4 = replicate(img4, labels4) # replicate - - # Augment - #img4, labels4, segments4 = remove_background(img4, labels4, segments4) - #sample_segments(img4, labels4, segments4, probability=self.hyp['copy_paste']) - img4, labels4, segments4 = copy_paste(img4, labels4, segments4, probability=self.hyp['copy_paste']) - img4, labels4 = random_perspective(img4, labels4, segments4, - degrees=self.hyp['degrees'], - translate=self.hyp['translate'], - scale=self.hyp['scale'], - shear=self.hyp['shear'], - perspective=self.hyp['perspective'], - border=self.mosaic_border) # border to remove - - return img4, labels4 - - -def load_mosaic9(self, index): - # loads images in a 9-mosaic - - labels9, segments9 = [], [] - s = self.img_size - indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = load_image(self, index) - - # place img in img9 - if i == 0: # center - img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles - h0, w0 = h, w - c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates - elif i == 1: # top - c = s, s - h, s + w, s - elif i == 2: # top right - c = s + wp, s - h, s + wp + w, s - elif i == 3: # right - c = s + w0, s, s + w0 + w, s + h - elif i == 4: # bottom right - c = s + w0, s + hp, s + w0 + w, s + hp + h - elif i == 5: # bottom - c = s + w0 - w, s + h0, s + w0, s + h0 + h - elif i == 6: # bottom left - c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h - elif i == 7: # left - c = s - w, s + h0 - h, s, s + h0 - elif i == 8: # top left - c = s - w, s + h0 - hp - h, s, s + h0 - hp - - padx, pady = c[:2] - x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords - - # Labels - labels, segments = self.labels[index].copy(), self.segments[index].copy() - if labels.size: - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format - segments = [xyn2xy(x, w, h, padx, pady) for x in segments] - labels9.append(labels) - segments9.extend(segments) - - # Image - img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] - hp, wp = h, w # height, width previous - - # Offset - yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y - img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] - - # Concat/clip labels - labels9 = np.concatenate(labels9, 0) - labels9[:, [1, 3]] -= xc - labels9[:, [2, 4]] -= yc - c = np.array([xc, yc]) # centers - segments9 = [x - c for x in segments9] - - for x in (labels9[:, 1:], *segments9): - np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() - # img9, labels9 = replicate(img9, labels9) # replicate - - # Augment - #img9, labels9, segments9 = remove_background(img9, labels9, segments9) - img9, labels9, segments9 = copy_paste(img9, labels9, segments9, probability=self.hyp['copy_paste']) - img9, labels9 = random_perspective(img9, labels9, segments9, - degrees=self.hyp['degrees'], - translate=self.hyp['translate'], - scale=self.hyp['scale'], - shear=self.hyp['shear'], - perspective=self.hyp['perspective'], - border=self.mosaic_border) # border to remove - - return img9, labels9 - - -def load_samples(self, index): - # loads images in a 4-mosaic - - labels4, segments4 = [], [] - s = self.img_size - yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y - indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = load_image(self, index) - - # place img in img4 - if i == 0: # top left - img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles - x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) - x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) - elif i == 1: # top right - x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc - x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h - elif i == 2: # bottom left - x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) - x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) - elif i == 3: # bottom right - x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) - x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) - - img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] - padw = x1a - x1b - padh = y1a - y1b - - # Labels - labels, segments = self.labels[index].copy(), self.segments[index].copy() - if labels.size: - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format - segments = [xyn2xy(x, w, h, padw, padh) for x in segments] - labels4.append(labels) - segments4.extend(segments) - - # Concat/clip labels - labels4 = np.concatenate(labels4, 0) - for x in (labels4[:, 1:], *segments4): - np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() - # img4, labels4 = replicate(img4, labels4) # replicate - - # Augment - #img4, labels4, segments4 = remove_background(img4, labels4, segments4) - sample_labels, sample_images, sample_masks = sample_segments(img4, labels4, segments4, probability=0.5) - - return sample_labels, sample_images, sample_masks - - -def copy_paste(img, labels, segments, probability=0.5): - # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - n = len(segments) - if probability and n: - h, w, c = img.shape # height, width, channels - im_new = np.zeros(img.shape, np.uint8) - for j in random.sample(range(n), k=round(probability * n)): - l, s = labels[j], segments[j] - box = w - l[3], l[2], w - l[1], l[4] - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - if (ioa < 0.30).all(): # allow 30% obscuration of existing labels - labels = np.concatenate((labels, [[l[0], *box]]), 0) - segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) - cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) - - result = cv2.bitwise_and(src1=img, src2=im_new) - result = cv2.flip(result, 1) # augment segments (flip left-right) - i = result > 0 # pixels to replace - # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch - img[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug - - return img, labels, segments - - -def remove_background(img, labels, segments): - # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - n = len(segments) - h, w, c = img.shape # height, width, channels - im_new = np.zeros(img.shape, np.uint8) - img_new = np.ones(img.shape, np.uint8) * 114 - for j in range(n): - cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) - - result = cv2.bitwise_and(src1=img, src2=im_new) - - i = result > 0 # pixels to replace - img_new[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug - - return img_new, labels, segments - - -def sample_segments(img, labels, segments, probability=0.5): - # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - n = len(segments) - sample_labels = [] - sample_images = [] - sample_masks = [] - if probability and n: - h, w, c = img.shape # height, width, channels - for j in random.sample(range(n), k=round(probability * n)): - l, s = labels[j], segments[j] - box = l[1].astype(int).clip(0,w-1), l[2].astype(int).clip(0,h-1), l[3].astype(int).clip(0,w-1), l[4].astype(int).clip(0,h-1) - - #print(box) - if (box[2] <= box[0]) or (box[3] <= box[1]): - continue - - sample_labels.append(l[0]) - - mask = np.zeros(img.shape, np.uint8) - - cv2.drawContours(mask, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) - sample_masks.append(mask[box[1]:box[3],box[0]:box[2],:]) - - result = cv2.bitwise_and(src1=img, src2=mask) - i = result > 0 # pixels to replace - mask[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug - #print(box) - sample_images.append(mask[box[1]:box[3],box[0]:box[2],:]) - - return sample_labels, sample_images, sample_masks - - -def replicate(img, labels): - # Replicate labels - h, w = img.shape[:2] - boxes = labels[:, 1:].astype(int) - x1, y1, x2, y2 = boxes.T - s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices - x1b, y1b, x2b, y2b = boxes[i] - bh, bw = y2b - y1b, x2b - x1b - yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y - x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] - img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] - labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) - - return img, labels - - -def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): - # Resize and pad image while meeting stride-multiple constraints - shape = img.shape[:2] # current shape [height, width] - if isinstance(new_shape, int): - new_shape = (new_shape, new_shape) - - # Scale ratio (new / old) - r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - if not scaleup: # only scale down, do not scale up (for better test mAP) - r = min(r, 1.0) - - # Compute padding - ratio = r, r # width, height ratios - new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - if auto: # minimum rectangle - dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - elif scaleFill: # stretch - dw, dh = 0.0, 0.0 - new_unpad = (new_shape[1], new_shape[0]) - ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios - - dw /= 2 # divide padding into 2 sides - dh /= 2 - - if shape[::-1] != new_unpad: # resize - img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) - top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border - return img, ratio, (dw, dh) - - -def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, - border=(0, 0)): - # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) - # targets = [cls, xyxy] - - height = img.shape[0] + border[0] * 2 # shape(h,w,c) - width = img.shape[1] + border[1] * 2 - - # Center - C = np.eye(3) - C[0, 2] = -img.shape[1] / 2 # x translation (pixels) - C[1, 2] = -img.shape[0] / 2 # y translation (pixels) - - # Perspective - P = np.eye(3) - P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) - P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) - - # Rotation and Scale - R = np.eye(3) - a = random.uniform(-degrees, degrees) - # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations - s = random.uniform(1 - scale, 1.1 + scale) - # s = 2 ** random.uniform(-scale, scale) - R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) - - # Shear - S = np.eye(3) - S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) - S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) - - # Translation - T = np.eye(3) - T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) - T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) - - # Combined rotation matrix - M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT - if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed - if perspective: - img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) - else: # affine - img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) - - # Visualize - # import matplotlib.pyplot as plt - # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(img[:, :, ::-1]) # base - # ax[1].imshow(img2[:, :, ::-1]) # warped - - # Transform label coordinates - n = len(targets) - if n: - use_segments = any(x.any() for x in segments) - new = np.zeros((n, 4)) - if use_segments: # warp segments - segments = resample_segments(segments) # upsample - for i, segment in enumerate(segments): - xy = np.ones((len(segment), 3)) - xy[:, :2] = segment - xy = xy @ M.T # transform - xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine - - # clip - new[i] = segment2box(xy, width, height) - - else: # warp boxes - xy = np.ones((n * 4, 3)) - xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 - xy = xy @ M.T # transform - xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine - - # create new boxes - x = xy[:, [0, 2, 4, 6]] - y = xy[:, [1, 3, 5, 7]] - new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T - - # clip - new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) - new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) - - # filter candidates - i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) - targets = targets[i] - targets[:, 1:5] = new[i] - - return img, targets - - -def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) - # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - w2, h2 = box2[2] - box2[0], box2[3] - box2[1] - ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio - return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates - - -def bbox_ioa(box1, box2): - # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 - box2 = box2.transpose() - - # Get the coordinates of bounding boxes - b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] - b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] - - # Intersection area - inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ - (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) - - # box2 area - box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 - - # Intersection over box2 area - return inter_area / box2_area - - -def cutout(image, labels): - # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - h, w = image.shape[:2] - - # create random masks - scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - for s in scales: - mask_h = random.randint(1, int(h * s)) - mask_w = random.randint(1, int(w * s)) - - # box - xmin = max(0, random.randint(0, w) - mask_w // 2) - ymin = max(0, random.randint(0, h) - mask_h // 2) - xmax = min(w, xmin + mask_w) - ymax = min(h, ymin + mask_h) - - # apply random color mask - image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] - - # return unobscured labels - if len(labels) and s > 0.03: - box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - labels = labels[ioa < 0.60] # remove >60% obscured labels - - return labels - - -def pastein(image, labels, sample_labels, sample_images, sample_masks): - # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - h, w = image.shape[:2] - - # create random masks - scales = [0.75] * 2 + [0.5] * 4 + [0.25] * 4 + [0.125] * 4 + [0.0625] * 6 # image size fraction - for s in scales: - if random.random() < 0.2: - continue - mask_h = random.randint(1, int(h * s)) - mask_w = random.randint(1, int(w * s)) - - # box - xmin = max(0, random.randint(0, w) - mask_w // 2) - ymin = max(0, random.randint(0, h) - mask_h // 2) - xmax = min(w, xmin + mask_w) - ymax = min(h, ymin + mask_h) - - box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - if len(labels): - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - else: - ioa = np.zeros(1) - - if (ioa < 0.30).all() and len(sample_labels) and (xmax > xmin+20) and (ymax > ymin+20): # allow 30% obscuration of existing labels - sel_ind = random.randint(0, len(sample_labels)-1) - #print(len(sample_labels)) - #print(sel_ind) - #print((xmax-xmin, ymax-ymin)) - #print(image[ymin:ymax, xmin:xmax].shape) - #print([[sample_labels[sel_ind], *box]]) - #print(labels.shape) - hs, ws, cs = sample_images[sel_ind].shape - r_scale = min((ymax-ymin)/hs, (xmax-xmin)/ws) - r_w = int(ws*r_scale) - r_h = int(hs*r_scale) - - if (r_w > 10) and (r_h > 10): - r_mask = cv2.resize(sample_masks[sel_ind], (r_w, r_h)) - r_image = cv2.resize(sample_images[sel_ind], (r_w, r_h)) - temp_crop = image[ymin:ymin+r_h, xmin:xmin+r_w] - m_ind = r_mask > 0 - if m_ind.astype(np.int).sum() > 60: - temp_crop[m_ind] = r_image[m_ind] - #print(sample_labels[sel_ind]) - #print(sample_images[sel_ind].shape) - #print(temp_crop.shape) - box = np.array([xmin, ymin, xmin+r_w, ymin+r_h], dtype=np.float32) - if len(labels): - labels = np.concatenate((labels, [[sample_labels[sel_ind], *box]]), 0) - else: - labels = np.array([[sample_labels[sel_ind], *box]]) - - image[ymin:ymin+r_h, xmin:xmin+r_w] = temp_crop - - return labels - -class Albumentations: - # YOLOv5 Albumentations class (optional, only used if package is installed) - def __init__(self): - self.transform = None - import albumentations as A - - self.transform = A.Compose([ - A.CLAHE(p=0.01), - A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.01), - A.RandomGamma(gamma_limit=[80, 120], p=0.01), - A.Blur(p=0.01), - A.MedianBlur(p=0.01), - A.ToGray(p=0.01), - A.ImageCompression(quality_lower=75, p=0.01),], - bbox_params=A.BboxParams(format='pascal_voc', label_fields=['class_labels'])) - - #logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) - - def __call__(self, im, labels, p=1.0): - if self.transform and random.random() < p: - new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed - im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) - return im, labels - - -def create_folder(path='./new'): - # Create folder - if os.path.exists(path): - shutil.rmtree(path) # delete output folder - os.makedirs(path) # make new output folder - - -def flatten_recursive(path='../coco'): - # Flatten a recursive directory by bringing all files to top level - new_path = Path(path + '_flat') - create_folder(new_path) - for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): - shutil.copyfile(file, new_path / Path(file).name) - - -def extract_boxes(path='../coco/'): # from utils.datasets import *; extract_boxes('../coco128') - # Convert detection dataset into classification dataset, with one directory per class - - path = Path(path) # images dir - shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing - files = list(path.rglob('*.*')) - n = len(files) # number of files - for im_file in tqdm(files, total=n): - if im_file.suffix[1:] in img_formats: - # image - im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB - h, w = im.shape[:2] - - # labels - lb_file = Path(img2label_paths([str(im_file)])[0]) - if Path(lb_file).exists(): - with open(lb_file, 'r') as f: - lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels - - for j, x in enumerate(lb): - c = int(x[0]) # class - f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename - if not f.parent.is_dir(): - f.parent.mkdir(parents=True) - - b = x[1:] * [w, h, w, h] # box - # b[2:] = b[2:].max() # rectangle to square - b[2:] = b[2:] * 1.2 + 3 # pad - b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) - - b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image - b[[1, 3]] = np.clip(b[[1, 3]], 0, h) - assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' - - -def autosplit(path='../coco', weights=(0.9, 0.1, 0.0), annotated_only=False): - """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files - Usage: from utils.datasets import *; autosplit('../coco') - Arguments - path: Path to images directory - weights: Train, val, test weights (list) - annotated_only: Only use images with an annotated txt file - """ - path = Path(path) # images dir - files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only - n = len(files) # number of files - indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split - - txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files - [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing - - print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) - for i, img in tqdm(zip(indices, files), total=n): - if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label - with open(path / txt[i], 'a') as f: - f.write(str(img) + '\n') # add image to txt file - - -def load_segmentations(self, index): - key = '/work/handsomejw66/coco17/' + self.img_files[index] - #print(key) - # /work/handsomejw66/coco17/ - return self.segs[key] diff --git a/spaces/bigslime/stablediffusion-infinity/js/setup.js b/spaces/bigslime/stablediffusion-infinity/js/setup.js deleted file mode 100644 index 2b9c2913a0437d9b009b933d5e6b545877d3f3a3..0000000000000000000000000000000000000000 --- a/spaces/bigslime/stablediffusion-infinity/js/setup.js +++ /dev/null @@ -1,28 +0,0 @@ -function(token_val, width, height, size, model_choice, model_path){ - let app=document.querySelector("gradio-app"); - app=app.shadowRoot??app; - app.querySelector("#sdinfframe").style.height=80+Number(height)+"px"; - // app.querySelector("#setup_row").style.display="none"; - app.querySelector("#model_path_input").style.display="none"; - let frame=app.querySelector("#sdinfframe").contentWindow.document; - - if(frame.querySelector("#setup").value=="0") - { - window.my_setup=setInterval(function(){ - let app=document.querySelector("gradio-app"); - app=app.shadowRoot??app; - let frame=app.querySelector("#sdinfframe").contentWindow.document; - console.log("Check PyScript...") - if(frame.querySelector("#setup").value=="1") - { - frame.querySelector("#draw").click(); - clearInterval(window.my_setup); - } - }, 100) - } - else - { - frame.querySelector("#draw").click(); - } - return [token_val, width, height, size, model_choice, model_path]; -} \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Advanced BAT To EXE Converter PRO 2.83 Portablel HOT!.md b/spaces/bioriAsaeru/text-to-voice/Advanced BAT To EXE Converter PRO 2.83 Portablel HOT!.md deleted file mode 100644 index 9e624b1807cd0c0013d6ef590da3b0d339657d37..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Advanced BAT To EXE Converter PRO 2.83 Portablel HOT!.md +++ /dev/null @@ -1,100 +0,0 @@ -
    -

    What is Advanced BAT To EXE Converter PRO 2.83 Portablel and Why You Need It

    -

    If you are a Windows user, you may have encountered batch files (.bat) that can execute commands in the command prompt (CMD). Batch files are useful for automating tasks, creating shortcuts, and running scripts. However, batch files have some limitations, such as:

    -

    Advanced BAT To EXE Converter PRO 2.83 Portablel


    DOWNLOAD · https://urloso.com/2uyQBa



    -
      -
    • They are plain text files that can be easily viewed and modified by anyone.
    • -
    • They require the CMD window to be open while running, which can be annoying or distracting.
    • -
    • They may not work on different versions of Windows or on other platforms.
    • -
    • They may not have access to advanced features or commands that are available in other programming languages.
    • -
    -

    To overcome these limitations, you can use Advanced BAT To EXE Converter PRO 2.83 Portablel, a powerful tool that can convert your batch files to executable files (.exe) that can run on any Windows system. Here are some of the benefits of using this tool:

    -
      -
    • You can encrypt your batch file source code to keep it secret and prevent unauthorized changes.
    • -
    • You can run your executable files in invisible mode, without showing the CMD window or any trace of the program.
    • -
    • You can customize your executable files with icons, version information, and additional files or folders that you want to include.
    • -
    • You can create 32-bit or 64-bit executable files that are compatible with different Windows versions.
    • -
    • You can use extended commands that are not available in normal batch files, such as mouse input, keyboard input, message boxes, file dialogs, registry access, internet access, and more.
    • -
    -

    How to Use Advanced BAT To EXE Converter PRO 2.83 Portablel

    -

    Using Advanced BAT To EXE Converter PRO 2.83 Portablel is very easy and fast. You just need to follow these steps:

    -
      -
    1. Download the tool from https://www.battoexeconverter.com/ and extract the zip file to a folder of your choice.
    2. -
    3. Run the advbattoexeconverter.exe file and click on the Open button to select your batch file.
    4. -
    5. Edit your batch file code if needed, or use the built-in editor to create a new batch file from scratch.
    6. -
    7. Click on the Compile button to convert your batch file to an executable file. You can choose the output folder, the file name, the icon, and other options from the Compile Options window.
    8. -
    9. Test your executable file by clicking on the Run button or by opening it from the output folder.
    10. -
    -

    Tips and Tricks for Advanced BAT To EXE Converter PRO 2.83 Portablel

    -

    Here are some tips and tricks that can help you make the most of Advanced BAT To EXE Converter PRO 2.83 Portablel:

    -

    -
      -
    • You can use variables in your batch file code to store values or user input. For example, you can use %username% to get the current user name, or %input% to get the input from a message box.
    • -
    • You can use labels and goto commands to create loops or conditional statements in your batch file code. For example, you can use :loop and goto loop to repeat a block of code until a condition is met.
    • -
    • You can use comments in your batch file code to explain what each line does or to disable a line temporarily. To add a comment, just start the line with REM or ::.
    • -
    • You can use quotes around paths or filenames that contain spaces or special characters. For example, you can use "C:\Program Files\My Program\myprogram.exe" instead of C:\Program Files\My Program\myprogram.exe.
    • -
    • You can use command line arguments in your executable files to pass values or options to your batch file code. For example, you can use %1% to get the first argument, %2% to get the second argument, and so on.
    • -

    -

    Examples of Batch Files and Executable Files Created with Advanced BAT To EXE Converter PRO 2.83 Portablel

    -

    To give you some ideas of what you can do with Advanced BAT To EXE Converter PRO 2.83 Portablel, here are some examples of batch files and executable files that you can create with this tool:

    -
      -
    • A simple calculator that can perform basic arithmetic operations. You can use the SET /A command to perform calculations, and the CHOICE command to get the user's input.
    • -
    • A password generator that can create random passwords of any length and complexity. You can use the %RANDOM% variable to generate random numbers, and the FOR /L command to loop through a set of characters.
    • -
    • A file backup program that can copy or move files from one folder to another. You can use the XCOPY or ROBOCOPY commands to copy or move files, and the IF EXIST command to check if a file or folder exists.
    • -
    • A system information program that can display various information about your computer and Windows. You can use the SYSTEMINFO or WMIC commands to get system information, and the ECHO command to display it.
    • -
    • A game launcher that can run your favorite games with custom settings or options. You can use the START command to run an executable file, and the /D or /WAIT options to specify the working directory or wait for the program to finish.
    • -
    -

    These are just some of the examples of what you can do with Advanced BAT To EXE Converter PRO 2.83 Portablel. You can find more examples and tutorials on the official website or on various online forums and blogs.

    -
    Conclusion
    -

    Advanced BAT To EXE Converter PRO 2.83 Portablel is a handy tool that can help you convert your batch files to executable files with ease and efficiency. You can use it to create Windows applications from batch files, encrypt your source code, run your programs in invisible mode, customize your executables, use extended commands, and more. It is free for commercial and non-commercial use, and it works on all Windows versions from Windows 98 to Windows 10. If you are looking for a simple and fast way to develop Windows applications, you should give Advanced BAT To EXE Converter PRO 2.83 Portablel a try.

    -
    How to Download and Install Advanced BAT To EXE Converter PRO 2.83 Portablel
    -

    If you want to try Advanced BAT To EXE Converter PRO 2.83 Portablel, you can download it from the official website or from various online sources. Here are the steps to download and install it:

    -
      -
    1. Go to https://www.battoexeconverter.com/ and click on the Download button. You can also choose a mirror site or a portable version if you prefer.
    2. -
    3. Save the zip file to a folder of your choice and extract it with a file archiver program such as WinRAR or 7-Zip.
    4. -
    5. Open the extracted folder and run the advbattoexeconverter.exe file. You don't need to install anything, as the program is portable and can run from any folder or removable drive.
    6. -
    7. You can now use the program to convert your batch files to executable files. You can also delete the zip file and the extracted folder if you want to save space.
    8. -
    -

    That's it. You have successfully downloaded and installed Advanced BAT To EXE Converter PRO 2.83 Portablel. You can now enjoy creating Windows applications from batch files with this tool.

    -Tips and Warnings for Using Advanced BAT To EXE Converter PRO 2.83 Portablel -

    Advanced BAT To EXE Converter PRO 2.83 Portablel is a useful and reliable tool, but you should also be aware of some tips and warnings when using it:

    -
      -
    • Make sure you have a backup of your batch files before converting them to executable files. You may not be able to recover your original code if you lose or delete the executable files.
    • -
    • Be careful when using the encryption option. If you forget your password or lose your key file, you may not be able to decrypt your executable files or access your source code.
    • -
    • Be careful when using the invisible mode option. You may not be able to stop or terminate your executable files if they run in the background without showing any window or icon.
    • -
    • Be careful when using the administrator privileges option. You may need to run your executable files as an administrator or disable the User Account Control (UAC) feature on Windows Vista or later.
    • -
    • Be careful when using the command line arguments option. You may need to use quotes or escape characters when passing values or options that contain spaces or special characters.
    • -
    -

    These are some of the tips and warnings that you should keep in mind when using Advanced BAT To EXE Converter PRO 2.83 Portablel. You can find more information and help on the official website or on various online forums and blogs.

    -Examples and Scenarios for Using Advanced BAT To EXE Converter PRO 2.83 Portablel -

    Advanced BAT To EXE Converter PRO 2.83 Portablel can be used for various purposes and scenarios, depending on your needs and preferences. Here are some of the examples and scenarios for using this tool:

    -
      -
    • You can use it to create simple or complex Windows applications from batch files, without the need to learn a programming language or use a development environment.
    • -
    • You can use it to create installation packages, setup files, or self-extracting archives from batch files, with the option to include additional files or folders.
    • -
    • You can use it to create portable applications that can run from any folder or removable drive, without the need to install anything on the system.
    • -
    • You can use it to create security tools, such as password generators, file encryptors, or system cleaners, from batch files, with the option to encrypt your source code or run your programs in invisible mode.
    • -
    • You can use it to create automation tools, such as file backup programs, system information programs, or game launchers, from batch files, with the option to use extended commands or command line arguments.
    • -
    -

    These are some of the examples and scenarios for using Advanced BAT To EXE Converter PRO 2.83 Portablel. You can find more examples and tutorials on the official website or on various online forums and blogs.

    -Reviews and Testimonials from Other Users of Advanced BAT To EXE Converter PRO 2.83 Portablel -

    Advanced BAT To EXE Converter PRO 2.83 Portablel has received positive feedback and reviews from many users who have tried and used this tool. Here are some of the reviews and testimonials from other users of this tool:

    -
    -

    "This is a great tool for converting batch files to executable files. It is easy to use and has many options and features. I have used it to create several Windows applications from batch files and they work perfectly. I highly recommend this tool to anyone who wants to convert batch files to executable files."

    -- John Smith, a software developer -
    -
    -

    "I love this tool. It is very useful and reliable. I have used it to create installation packages for my software products and it works flawlessly. It allows me to encrypt my source code, customize my executables, and include additional files or folders. It is a must-have tool for any software developer."

    -- Jane Doe, a software engineer -
    -
    -

    "This is a fantastic tool for creating portable applications from batch files. It is very fast and efficient. I have used it to create portable security tools and automation tools that I can run from any folder or removable drive. It allows me to use extended commands, run my programs in invisible mode, and pass command line arguments. It is a wonderful tool for any Windows user."

    -- Bob Jones, a computer technician -
    -

    These are some of the reviews and testimonials from other users of Advanced BAT To EXE Converter PRO 2.83 Portablel. You can find more reviews and testimonials on the official website or on various online forums and blogs.

    -Summary and Call to Action -

    In this article, we have discussed what Advanced BAT To EXE Converter PRO 2.83 Portablel is and why you need it. We have also shown you how to use it, some tips and warnings for using it, some examples and scenarios for using it, and some reviews and testimonials from other users of it. We hope that this article has been informative and helpful for you.

    -

    If you are interested in trying Advanced BAT To EXE Converter PRO 2.83 Portablel, you can download it from the official website or from various online sources. You can also visit the official website or various online forums and blogs for more information and help on using this tool. You can also contact the developer if you have any questions or suggestions.

    -

    Advanced BAT To EXE Converter PRO 2.83 Portablel is a powerful and easy-to-use tool that can help you convert your batch files to executable files with ease and efficiency. You can use it to create Windows applications from batch files, encrypt your source code, run your programs in invisible mode, customize your executables, use extended commands, and more. It is free for commercial and non-commercial use, and it works on all Windows versions from Windows 98 to Windows 10.

    -

    Don't wait any longer. Download Advanced BAT To EXE Converter PRO 2.83 Portablel today and start creating Windows applications from batch files with this tool.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Aeronauts Windlass Ebook Download 21 Experience the Thrill of Nuclear Fusion in a Fantasy Setting.md b/spaces/bioriAsaeru/text-to-voice/Aeronauts Windlass Ebook Download 21 Experience the Thrill of Nuclear Fusion in a Fantasy Setting.md deleted file mode 100644 index 349903dccbd189909b3daf797a6a855970ae34ad..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Aeronauts Windlass Ebook Download 21 Experience the Thrill of Nuclear Fusion in a Fantasy Setting.md +++ /dev/null @@ -1,6 +0,0 @@ -

    aeronaut's windlass ebook download 21


    DOWNLOAD ->>> https://urloso.com/2uyQ0L



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/bioriAsaeru/text-to-voice/Cccam Converter V11 Zip.md b/spaces/bioriAsaeru/text-to-voice/Cccam Converter V11 Zip.md deleted file mode 100644 index 534de3deda478db470353d54bf2ee621896defff..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Cccam Converter V11 Zip.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Cccam Converter V11 Zip


    Downloadhttps://urloso.com/2uyQjb



    - -How to convert a CCcam to Oscam and install Oscam in Dreambox ... \u0026 CCam 2.3.8 \u0026 , OScam , v11678 \u0026 Ncam v11.6 Installare softcam. 1fdad05405
    -
    -
    -

    diff --git a/spaces/bioriAsaeru/text-to-voice/Educomp Smart Class Cts Software Free Download Bring Technology into Your Classroom with Educomp Solutions.md b/spaces/bioriAsaeru/text-to-voice/Educomp Smart Class Cts Software Free Download Bring Technology into Your Classroom with Educomp Solutions.md deleted file mode 100644 index a725bec6e3ad6276ad67f9e08cb5cafc35dd267c..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Educomp Smart Class Cts Software Free Download Bring Technology into Your Classroom with Educomp Solutions.md +++ /dev/null @@ -1,13 +0,0 @@ - -

    smartclass did what no one had ever thought of before, bring technology into the classroom. It brought an exhaustive repository of world class digital modules or lessons, (consisting of 2D and 3D animations, graphics, audio and video) on every subject in the K12 spectrum, which the teacher could easily access and project in the classroom that illuminated and explained abstract and difficult concepts with liquid clarity. The result was amazing. Knowledge flourished freed from the centuries old bonds of books and chalk and blackboard. A new light of understanding dawned on young awakened minds. And the classroom became a fascinating place to be in as a new generation of learners saw (instead of just being told and explained) for the first time how things happened. And the teacher smiled as she now saw not just one, two or three but a sea of hands go up every time she asked a question.

    -

    Educomp Smart Class Cts Software Free Download


    Download File ————— https://urloso.com/2uyR7b



    -

    But the time was fast approaching for metamorphic and transformational change. For nearly 2 years starting end 2009, the think tank at smartclass has been at work devoting millions of man days to execute the most comprehensive re-engineering of its kind to make smartclass a truly 21st century teaching-learning. And the result is amazing new avatar of Educomp smartclass-re-christened befittingly as smartclass CTS(Class Transformation System)

    -

    The result: faster and accurate understanding of the concepts in class and consequently improvement in the overall academic performance of students. Now all of this was possible with Educomp smartclass as it existed before CTS.

    -

    With CTS a lot more is possible. The science teachers can use a Diagram Drawer which plays a step by step line drawing animation of say how to draw stages of DNA replication or block mountain formation. She can get a brilliant Teaching Idea by clicking on the Teaching Idea icon before she commences teaching. Or use the Topic Synopsis or Mind Map to recapitulate the salient points of a lesson or concept taught. She can also click on Simulation to show virtual experiments without having rely on limited Lab availability. There are Worksheets, Weblinks and then in the days to come access to smartclass Teacher Fraternity Resources an ideas and best practices portal which will give any teacher to share and access pedagogical expertise.

    -

    Above all, schools do not have to worry about the funds required to set up the infrastructure to run Educomp smartclass. Any school can adopt the program by entering into a contract with Educomp and pay a nominal subscription fees on a per student per month basis.

    -

    The content repository consists of thousands of highly animated, lesson specific, 3D and 2D multimedia modules built with an Instructor-led design that allows the teacher to effectively explain the lesson in a typical classroom of diverse set of learners.
    Educomp smartclass also provide a large repository of 3D animated modules and videos mapped to school curriculum through its exclusive partnership with some of best content creators.

    -

    At home, Students can access the content through Educomp Online, a portal dedicated to provide the smartclass schools with a 360 degree learning experience. The smartclass system includes a virtual school, where parents, teachers and students can communicate with each other. Teachers can upload assignments for students to download, and make available important information for parents to view.

    -

    Fliplearn.com

  8. E-DAC Learning System
  9. -->
  10. English Mentor
  11. 3D Lab
  12. Educomp INSIGHT
  13. Educomp Smart School
  14. smartclassPro
  15. smartstem
  16. Related Links
  17. Visit Educomp smartclass
  18. Indian Education Awards 2015
  19. Brochure
  20. Leaflet
  21. Related Links
  22. Download Advertisement
  23. Educomp Schools Photo Gallery
  24. Quick Links
  25. Photo Gallery
  26. News
  27. Careers
  28. Recognition and Ratings
  29. Investor Relations
  30. Overview
  31. Governance
  32. Quarterly & Annual Results
  33. Annual Reports
  34. Shareholding Pattern
  35. Corporate Governance Report
  36. Corporate Announcements
  37. Stock Prices & Charts
  38. Unpaid/Unclaimed Dividend Amount
  39. Employee Stock Option Schemes
  40. Recent TweetsTweets by @Educomp

    -

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Guardant Key Dumper 0.3 C Infern !!TOP!!.md b/spaces/bioriAsaeru/text-to-voice/Guardant Key Dumper 0.3 C Infern !!TOP!!.md deleted file mode 100644 index 4e3883e75165aee0d9ce676d3f49c72888ad54b3..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Guardant Key Dumper 0.3 C Infern !!TOP!!.md +++ /dev/null @@ -1,177 +0,0 @@ - -

    Guardant Key Dumper 0.3 C Infern: What Is It and How to Use It?

    - -

    If you are looking for a tool that can dump the contents of a Guardant dongle, you might want to check out the Guardant Key Dumper 0.3 C Infern. This is a software that can read and save the data from a Guardant hardware key, which is a device that protects software from unauthorized copying or use.

    -

    Guardant Key Dumper 0.3 C Infern


    Download Filehttps://urloso.com/2uyRsS



    - -

    The Guardant Key Dumper 0.3 C Infern was created by Infern0, a member of the TSRh team, which is a group of hackers and reverse engineers who specialize in cracking software protection schemes. The Guardant Key Dumper 0.3 C Infern can dump the data from any Guardant dongle, regardless of the model or version.

    - -

    What Are the Features of the Guardant Key Dumper 0.3 C Infern?

    - -

    The Guardant Key Dumper 0.3 C Infern has several features that make it easy and convenient to use, such as:

    - -
      -
    • It supports all types of Guardant dongles, including Stealth II, Net II, Sign II, Time II, Code II, and Emulator II.
    • -
    • It can dump the data from both local and network dongles.
    • -
    • It can dump the data in different formats, including BIN, REG, DNG, and TXT.
    • -
    • It can dump the data in different modes, including full dump, partial dump, and custom dump.
    • -
    • It can dump the data with different options, including encryption key, password, seed code, and user data.
    • -
    • It has a simple and user-friendly interface that shows the information about the dongle and the dump process.
    • -
    • It has a command-line mode that allows you to run the program without a graphical interface.
    • -
    - -

    What Are the Benefits of Using the Guardant Key Dumper 0.3 C Infern?

    - -

    The Guardant Key Dumper 0.3 C Infern has many benefits for users who want to dump the data from a Guardant dongle, such as:

    - -
      -
    • It allows you to backup your dongle data in case of loss or damage.
    • -
    • It allows you to analyze your dongle data for research or educational purposes.
    • -
    • It allows you to emulate your dongle data on another computer without using the physical device.
    • -
    • It allows you to bypass or remove your dongle protection from your software.
    • -
    - -

    How to Download and Use the Guardant Key Dumper 0.3 C Infern?

    - -

    If you want to download and use the Guardant Key Dumper 0.3 C Infern, you can follow these simple steps:

    -

    - -
      -
    1. Visit the official website of TSRh team and go to the download section.
    2. -
    3. Find the file named guardant_key_dumper_0.3_c_infern0_tsrh_team.rar and click on it.
    4. -
    5. You will be redirected to a page where you need to enter a captcha code and click on download.
    6. -
    7. You will receive a file named guardant_key_dumper_0.3_c_infern0_tsrh_team.rar that contains the program and a readme file.
    8. -
    9. Extract the file to your computer using a program like WinRAR or 7-Zip.
    10. -
    11. Run the program named gkd.exe as administrator.
    12. -
    13. You will see a window with the interface of the program.
    14. -
    15. Select your dongle type from the drop-down menu.
    16. -
    17. Select your dongle mode from the radio buttons.
    18. -
    19. Select your dump format from the check boxes.
    20. -
    21. Select your dump options from the text boxes.
    22. -
    23. Click on start button to begin the dump process.
    24. -
    25. You will see a progress bar and a log window that shows the status of the dump process.
    26. -
    27. When the dump process is finished, you will see a message that says "Dump completed successfully".
    28. -
    29. You will find your dump files in the same folder as the program.
    30. -
    - -

    Conclusion

    - -

    The Guardant Key Dumper 0.3 C Infern is a useful tool that can dump the data from any Guardant dongle with ease and convenience. It has several features, benefits, and options that make it versatile and flexible. It has a simple and user-friendly interface that shows all the information and options you need. It also has a command-line mode that allows you to run it without a graphical interface. If you are looking for a tool that can dump your Guardant dongle data, you might want to give this one a try.

    -

    What Are the Risks of Using the Guardant Key Dumper 0.3 C Infern?

    - -

    The Guardant Key Dumper 0.3 C Infern is a tool that can dump the data from a Guardant dongle, but it also has some risks that you should be aware of before using it. Here are some of them:

    - -
      -
    • It might be illegal to use the Guardant Key Dumper 0.3 C Infern in some countries or regions, depending on the laws and regulations regarding software protection and intellectual property rights.
    • -
    • It might violate the terms and conditions of your software license agreement, which might result in legal actions or penalties from the software developer or publisher.
    • -
    • It might damage your dongle or your computer if you use it incorrectly or if it contains viruses or malware.
    • -
    • It might not work properly or at all if your dongle or your computer is incompatible with the Guardant Key Dumper 0.3 C Infern.
    • -
    • It might expose your dongle data to unauthorized access or use by third parties if you share it online or offline.
    • -
    - -

    How to Use the Guardant Key Dumper 0.3 C Infern Safely and Responsibly?

    - -

    If you decide to use the Guardant Key Dumper 0.3 C Infern, you should follow some tips and guidelines to use it safely and responsibly. Here are some of them:

    - -
      -
    • Use the Guardant Key Dumper 0.3 C Infern only for personal, educational, or research purposes, and not for commercial or illegal purposes.
    • -
    • Use the Guardant Key Dumper 0.3 C Infern only with your own dongle and software, and not with someone else's.
    • -
    • Use a trusted source like TSRh team website to download and install the Guardant Key Dumper 0.3 C Infern. Avoid shady websites that might contain fake or harmful versions of the tool.
    • -
    • Use an antivirus program to scan your computer and the Guardant Key Dumper 0.3 C Infern files before using them. Delete any suspicious files that might contain viruses or malware.
    • -
    • Backup your dongle data and your computer data before using the Guardant Key Dumper 0.3 C Infern. This can help you restore your data if something goes wrong or if you want to switch back to the original state.
    • -
    • Follow the instructions and options carefully when using the Guardant Key Dumper 0.3 C Infern. Do not change any settings or parameters that you are not sure about.
    • -
    • Do not share your dongle data or your Guardant Key Dumper 0.3 C Infern files with anyone online or offline. Keep them in a secure location and delete them when you are done using them.
    • -
    • Respect the rights and interests of the software developers and publishers who created and protected your software with a Guardant dongle. Do not use their software without their permission or authorization.
    • -
    - -

    Conclusion

    - -

    The Guardant Key Dumper 0.3 C Infern is a tool that can dump the data from any Guardant dongle with ease and convenience. It has several features, benefits, and options that make it versatile and flexible. It has a simple and user-friendly interface that shows all the information and options you need. It also has a command-line mode that allows you to run it without a graphical interface. However, it also has some risks that you should consider before using it, such as legal, ethical, technical, and security issues. If you want to use the Guardant Key Dumper 0.3 C Infern, you should follow some tips and guidelines to use it safely and responsibly.

    -

    Guardant Key Dumper 0.3 C Infern: What Is It and How to Use It?

    - -

    If you are looking for a tool that can dump the contents of a Guardant dongle, you might want to check out the Guardant Key Dumper 0.3 C Infern. This is a software that can read and save the data from a Guardant hardware key, which is a device that protects software from unauthorized copying or use.

    - -

    The Guardant Key Dumper 0.3 C Infern was created by Infern0, a member of the TSRh team, which is a group of hackers and reverse engineers who specialize in cracking software protection schemes. The Guardant Key Dumper 0.3 C Infern can dump the data from any Guardant dongle, regardless of the model or version.

    - -

    What Are the Features of the Guardant Key Dumper 0.3 C Infern?

    - -

    The Guardant Key Dumper 0.3 C Infern has several features that make it easy and convenient to use, such as:

    - -
      -
    • It supports all types of Guardant dongles, including Stealth II, Net II, Sign II, Time II, Code II, and Emulator II.
    • -
    • It can dump the data from both local and network dongles.
    • -
    • It can dump the data in different formats, including BIN, REG, DNG, and TXT.
    • -
    • It can dump the data in different modes, including full dump, partial dump, and custom dump.
    • -
    • It can dump the data with different options, including encryption key, password, seed code, and user data.
    • -
    • It has a simple and user-friendly interface that shows the information about the dongle and the dump process.
    • -
    • It has a command-line mode that allows you to run the program without a graphical interface.
    • -
    - -

    What Are the Benefits of Using the Guardant Key Dumper 0.3 C Infern?

    - -

    The Guardant Key Dumper 0.3 C Infern has many benefits for users who want to dump the data from a Guardant dongle, such as:

    - -
      -
    • It allows you to backup your dongle data in case of loss or damage.
    • -
    • It allows you to analyze your dongle data for research or educational purposes.
    • -
    • It allows you to emulate your dongle data on another computer without using the physical device.
    • -
    • It allows you to bypass or remove your dongle protection from your software.
    • -
    - -

    How to Download and Use the Guardant Key Dumper 0.3 C Infern?

    - -

    If you want to download and use the Guardant Key Dumper 0.3 C Infern, you can follow these simple steps:

    - -
      -
    1. Visit the official website of TSRh team and go to the download section.
    2. -
    3. Find the file named guardant_key_dumper_0.3_c_infern0_tsrh_team.rar and click on it.
    4. -
    5. You will be redirected to a page where you need to enter a captcha code and click on download.
    6. -
    7. You will receive a file named guardant_key_dumper_0.3_c_infern0_tsrh_team.rar that contains the program and a readme file.
    8. -
    9. Extract the file to your computer using a program like WinRAR or 7-Zip.
    10. -
    11. Run the program named gkd.exe as administrator.
    12. -
    13. You will see a window with the interface of the program.
    14. -
    15. Select your dongle type from the drop-down menu.
    16. -
    17. Select your dongle mode from the radio buttons.
    18. -
    19. Select your dump format from the check boxes.
    20. -
    21. Select your dump options from the text boxes.
    22. -
    23. Click on start button to begin the dump process.
    24. -
    25. You will see a progress bar and a log window that shows the status of the dump process.
    26. -
    27. When the dump process is finished, you will see a message that says "Dump completed successfully".
    28. -
    29. You will find your dump files in the same folder as the program.
    30. -
    - -

    What Are the Risks of Using the Guardant Key Dumper 0.3 C Infern?

    - -

    The Guardant Key Dumper 0.3 C Infern is a tool that can dump the data from a Guardant dongle, but it also has some risks that you should be aware of before using it. Here are some of them:

    - -
      -
    • It might be illegal to use the Guardant Key Dumper 0.3 C Infern in some countries or regions, depending on the laws and regulations regarding software protection and intellectual property rights.
    • -
    • It might violate the terms and conditions of your software license agreement, which might result in legal actions or penalties from the software developer or publisher.
    • -
    • It might damage your dongle or your computer if you use it incorrectly or if it contains viruses or malware.
    • -
    • It might not work properly or at all if your dongle or your computer is incompatible with the Guardant Key Dumper 0.3 C Infern.
    • -
    • It might expose your dongle data to unauthorized access or use by third parties if you share it online or offline.
    • -
    - -

    How to Use the Guardant Key Dumper 0.3 C Infern Safely and Responsibly?

    - -

    If you decide to use the Guardant Key Dumper 0.3 C Infern, you should follow some tips and guidelines to use it safely and responsibly. Here are some of them:

    - -
      -
    • Use the Guardant Key Dumper 0.3 C Infern only for personal, educational, or research purposes, and not for commercial or illegal purposes.
    • -
    • Use the Guardant Key Dumper 0.3 C Infern only with your own dongle and software, and not with someone else's.
    • -
    • Use a trusted source like TSRh team website to download and install the Guardant Key Dumper 0.3 C Infern. Avoid shady websites that might contain fake or harmful versions of the tool.
    • -
    • Use an antivirus program to scan your computer and the Guardant Key Dumper 0.3 C Infern files before using them. Delete any suspicious files that might contain viruses or malware.
    • -
    • Backup your dongle data and your computer data before using the Guardant Key Dumper 0.3 C Infern. This can help you restore your data if something goes wrong or if you want to switch back to the original state.
    • -
    • Follow the instructions and options carefully when using the Guardant Key Dumper 0.3 C Infern. Do not change any settings or parameters that you are not sure about.
    • -
    • Do not share your dongle data or your Guardant Key Dumper 0.3 C Infern files with anyone online or offline. Keep them in a secure location and delete them when you are done using them.
    • -
    • Respect the rights and interests of the software developers and publishers who created and protected your software with a Guardant dongle. Do not use their software without their permission or authorization.
    • -
    - -

    Conclusion

    - -

    The Guardant Key Dumper 0.3 C Infern is a useful tool that can dump the data from any Guardant dongle with ease and convenience. It has several features, benefits, and options that make it versatile and flexible. It has a simple and user-friendly interface that shows all the information and options you need. It also has a command-line mode that allows you to run it without a graphical interface. However, it also has some risks that you should consider before using it, such as legal, ethical, technical, and security issues. If you want to use the Guardant Key Dumper 0.3 C Infern, you should follow some tips and guidelines to use it safely and responsibly.

    -

    Conclusion

    - -

    The Guardant Key Dumper 0.3 C Infern is a useful tool that can dump the data from any Guardant dongle with ease and convenience. It has several features, benefits, and options that make it versatile and flexible. It has a simple and user-friendly interface that shows all the information and options you need. It also has a command-line mode that allows you to run it without a graphical interface. However, it also has some risks that you should consider before using it, such as legal, ethical, technical, and security issues. If you want to use the Guardant Key Dumper 0.3 C Infern, you should follow some tips and guidelines to use it safely and responsibly.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/birsardar/stable-diffusion-mat-outpainting-primer/legacy.py b/spaces/birsardar/stable-diffusion-mat-outpainting-primer/legacy.py deleted file mode 100644 index a22d32c80f313f6dead3ba2887caab5bb8cf7e23..0000000000000000000000000000000000000000 --- a/spaces/birsardar/stable-diffusion-mat-outpainting-primer/legacy.py +++ /dev/null @@ -1,323 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import click -import pickle -import re -import copy -import numpy as np -import torch -import dnnlib -from torch_utils import misc - -#---------------------------------------------------------------------------- - -def load_network_pkl(f, force_fp16=False): - data = _LegacyUnpickler(f).load() - - # Legacy TensorFlow pickle => convert. - if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data): - tf_G, tf_D, tf_Gs = data - G = convert_tf_generator(tf_G) - D = convert_tf_discriminator(tf_D) - G_ema = convert_tf_generator(tf_Gs) - data = dict(G=G, D=D, G_ema=G_ema) - - # Add missing fields. - if 'training_set_kwargs' not in data: - data['training_set_kwargs'] = None - if 'augment_pipe' not in data: - data['augment_pipe'] = None - - # Validate contents. - assert isinstance(data['G'], torch.nn.Module) - assert isinstance(data['D'], torch.nn.Module) - assert isinstance(data['G_ema'], torch.nn.Module) - assert isinstance(data['training_set_kwargs'], (dict, type(None))) - assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None))) - - # Force FP16. - if force_fp16: - for key in ['G', 'D', 'G_ema']: - old = data[key] - kwargs = copy.deepcopy(old.init_kwargs) - if key.startswith('G'): - kwargs.synthesis_kwargs = dnnlib.EasyDict(kwargs.get('synthesis_kwargs', {})) - kwargs.synthesis_kwargs.num_fp16_res = 4 - kwargs.synthesis_kwargs.conv_clamp = 256 - if key.startswith('D'): - kwargs.num_fp16_res = 4 - kwargs.conv_clamp = 256 - if kwargs != old.init_kwargs: - new = type(old)(**kwargs).eval().requires_grad_(False) - misc.copy_params_and_buffers(old, new, require_all=True) - data[key] = new - return data - -#---------------------------------------------------------------------------- - -class _TFNetworkStub(dnnlib.EasyDict): - pass - -class _LegacyUnpickler(pickle.Unpickler): - def find_class(self, module, name): - if module == 'torch.storage' and name == '_load_from_bytes': - import io - return lambda b: torch.load(io.BytesIO(b), map_location='cpu') - if module == 'dnnlib.tflib.network' and name == 'Network': - return _TFNetworkStub - return super().find_class(module, name) - -#---------------------------------------------------------------------------- - -def _collect_tf_params(tf_net): - # pylint: disable=protected-access - tf_params = dict() - def recurse(prefix, tf_net): - for name, value in tf_net.variables: - tf_params[prefix + name] = value - for name, comp in tf_net.components.items(): - recurse(prefix + name + '/', comp) - recurse('', tf_net) - return tf_params - -#---------------------------------------------------------------------------- - -def _populate_module_params(module, *patterns): - for name, tensor in misc.named_params_and_buffers(module): - found = False - value = None - for pattern, value_fn in zip(patterns[0::2], patterns[1::2]): - match = re.fullmatch(pattern, name) - if match: - found = True - if value_fn is not None: - value = value_fn(*match.groups()) - break - try: - assert found - if value is not None: - tensor.copy_(torch.from_numpy(np.array(value))) - except: - print(name, list(tensor.shape)) - raise - -#---------------------------------------------------------------------------- - -def convert_tf_generator(tf_G): - if tf_G.version < 4: - raise ValueError('TensorFlow pickle version too low') - - # Collect kwargs. - tf_kwargs = tf_G.static_kwargs - known_kwargs = set() - def kwarg(tf_name, default=None, none=None): - known_kwargs.add(tf_name) - val = tf_kwargs.get(tf_name, default) - return val if val is not None else none - - # Convert kwargs. - kwargs = dnnlib.EasyDict( - z_dim = kwarg('latent_size', 512), - c_dim = kwarg('label_size', 0), - w_dim = kwarg('dlatent_size', 512), - img_resolution = kwarg('resolution', 1024), - img_channels = kwarg('num_channels', 3), - mapping_kwargs = dnnlib.EasyDict( - num_layers = kwarg('mapping_layers', 8), - embed_features = kwarg('label_fmaps', None), - layer_features = kwarg('mapping_fmaps', None), - activation = kwarg('mapping_nonlinearity', 'lrelu'), - lr_multiplier = kwarg('mapping_lrmul', 0.01), - w_avg_beta = kwarg('w_avg_beta', 0.995, none=1), - ), - synthesis_kwargs = dnnlib.EasyDict( - channel_base = kwarg('fmap_base', 16384) * 2, - channel_max = kwarg('fmap_max', 512), - num_fp16_res = kwarg('num_fp16_res', 0), - conv_clamp = kwarg('conv_clamp', None), - architecture = kwarg('architecture', 'skip'), - resample_filter = kwarg('resample_kernel', [1,3,3,1]), - use_noise = kwarg('use_noise', True), - activation = kwarg('nonlinearity', 'lrelu'), - ), - ) - - # Check for unknown kwargs. - kwarg('truncation_psi') - kwarg('truncation_cutoff') - kwarg('style_mixing_prob') - kwarg('structure') - unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs) - if len(unknown_kwargs) > 0: - raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0]) - - # Collect params. - tf_params = _collect_tf_params(tf_G) - for name, value in list(tf_params.items()): - match = re.fullmatch(r'ToRGB_lod(\d+)/(.*)', name) - if match: - r = kwargs.img_resolution // (2 ** int(match.group(1))) - tf_params[f'{r}x{r}/ToRGB/{match.group(2)}'] = value - kwargs.synthesis.kwargs.architecture = 'orig' - #for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}') - - # Convert params. - from training import networks - G = networks.Generator(**kwargs).eval().requires_grad_(False) - # pylint: disable=unnecessary-lambda - _populate_module_params(G, - r'mapping\.w_avg', lambda: tf_params[f'dlatent_avg'], - r'mapping\.embed\.weight', lambda: tf_params[f'mapping/LabelEmbed/weight'].transpose(), - r'mapping\.embed\.bias', lambda: tf_params[f'mapping/LabelEmbed/bias'], - r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'mapping/Dense{i}/weight'].transpose(), - r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'mapping/Dense{i}/bias'], - r'synthesis\.b4\.const', lambda: tf_params[f'synthesis/4x4/Const/const'][0], - r'synthesis\.b4\.conv1\.weight', lambda: tf_params[f'synthesis/4x4/Conv/weight'].transpose(3, 2, 0, 1), - r'synthesis\.b4\.conv1\.bias', lambda: tf_params[f'synthesis/4x4/Conv/bias'], - r'synthesis\.b4\.conv1\.noise_const', lambda: tf_params[f'synthesis/noise0'][0, 0], - r'synthesis\.b4\.conv1\.noise_strength', lambda: tf_params[f'synthesis/4x4/Conv/noise_strength'], - r'synthesis\.b4\.conv1\.affine\.weight', lambda: tf_params[f'synthesis/4x4/Conv/mod_weight'].transpose(), - r'synthesis\.b4\.conv1\.affine\.bias', lambda: tf_params[f'synthesis/4x4/Conv/mod_bias'] + 1, - r'synthesis\.b(\d+)\.conv0\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/weight'][::-1, ::-1].transpose(3, 2, 0, 1), - r'synthesis\.b(\d+)\.conv0\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/bias'], - r'synthesis\.b(\d+)\.conv0\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-5}'][0, 0], - r'synthesis\.b(\d+)\.conv0\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/noise_strength'], - r'synthesis\.b(\d+)\.conv0\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_weight'].transpose(), - r'synthesis\.b(\d+)\.conv0\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_bias'] + 1, - r'synthesis\.b(\d+)\.conv1\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/weight'].transpose(3, 2, 0, 1), - r'synthesis\.b(\d+)\.conv1\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/bias'], - r'synthesis\.b(\d+)\.conv1\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-4}'][0, 0], - r'synthesis\.b(\d+)\.conv1\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/noise_strength'], - r'synthesis\.b(\d+)\.conv1\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_weight'].transpose(), - r'synthesis\.b(\d+)\.conv1\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_bias'] + 1, - r'synthesis\.b(\d+)\.torgb\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/weight'].transpose(3, 2, 0, 1), - r'synthesis\.b(\d+)\.torgb\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/bias'], - r'synthesis\.b(\d+)\.torgb\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_weight'].transpose(), - r'synthesis\.b(\d+)\.torgb\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_bias'] + 1, - r'synthesis\.b(\d+)\.skip\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Skip/weight'][::-1, ::-1].transpose(3, 2, 0, 1), - r'.*\.resample_filter', None, - ) - return G - -#---------------------------------------------------------------------------- - -def convert_tf_discriminator(tf_D): - if tf_D.version < 4: - raise ValueError('TensorFlow pickle version too low') - - # Collect kwargs. - tf_kwargs = tf_D.static_kwargs - known_kwargs = set() - def kwarg(tf_name, default=None): - known_kwargs.add(tf_name) - return tf_kwargs.get(tf_name, default) - - # Convert kwargs. - kwargs = dnnlib.EasyDict( - c_dim = kwarg('label_size', 0), - img_resolution = kwarg('resolution', 1024), - img_channels = kwarg('num_channels', 3), - architecture = kwarg('architecture', 'resnet'), - channel_base = kwarg('fmap_base', 16384) * 2, - channel_max = kwarg('fmap_max', 512), - num_fp16_res = kwarg('num_fp16_res', 0), - conv_clamp = kwarg('conv_clamp', None), - cmap_dim = kwarg('mapping_fmaps', None), - block_kwargs = dnnlib.EasyDict( - activation = kwarg('nonlinearity', 'lrelu'), - resample_filter = kwarg('resample_kernel', [1,3,3,1]), - freeze_layers = kwarg('freeze_layers', 0), - ), - mapping_kwargs = dnnlib.EasyDict( - num_layers = kwarg('mapping_layers', 0), - embed_features = kwarg('mapping_fmaps', None), - layer_features = kwarg('mapping_fmaps', None), - activation = kwarg('nonlinearity', 'lrelu'), - lr_multiplier = kwarg('mapping_lrmul', 0.1), - ), - epilogue_kwargs = dnnlib.EasyDict( - mbstd_group_size = kwarg('mbstd_group_size', None), - mbstd_num_channels = kwarg('mbstd_num_features', 1), - activation = kwarg('nonlinearity', 'lrelu'), - ), - ) - - # Check for unknown kwargs. - kwarg('structure') - unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs) - if len(unknown_kwargs) > 0: - raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0]) - - # Collect params. - tf_params = _collect_tf_params(tf_D) - for name, value in list(tf_params.items()): - match = re.fullmatch(r'FromRGB_lod(\d+)/(.*)', name) - if match: - r = kwargs.img_resolution // (2 ** int(match.group(1))) - tf_params[f'{r}x{r}/FromRGB/{match.group(2)}'] = value - kwargs.architecture = 'orig' - #for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}') - - # Convert params. - from training import networks - D = networks.Discriminator(**kwargs).eval().requires_grad_(False) - # pylint: disable=unnecessary-lambda - _populate_module_params(D, - r'b(\d+)\.fromrgb\.weight', lambda r: tf_params[f'{r}x{r}/FromRGB/weight'].transpose(3, 2, 0, 1), - r'b(\d+)\.fromrgb\.bias', lambda r: tf_params[f'{r}x{r}/FromRGB/bias'], - r'b(\d+)\.conv(\d+)\.weight', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/weight'].transpose(3, 2, 0, 1), - r'b(\d+)\.conv(\d+)\.bias', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/bias'], - r'b(\d+)\.skip\.weight', lambda r: tf_params[f'{r}x{r}/Skip/weight'].transpose(3, 2, 0, 1), - r'mapping\.embed\.weight', lambda: tf_params[f'LabelEmbed/weight'].transpose(), - r'mapping\.embed\.bias', lambda: tf_params[f'LabelEmbed/bias'], - r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'Mapping{i}/weight'].transpose(), - r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'Mapping{i}/bias'], - r'b4\.conv\.weight', lambda: tf_params[f'4x4/Conv/weight'].transpose(3, 2, 0, 1), - r'b4\.conv\.bias', lambda: tf_params[f'4x4/Conv/bias'], - r'b4\.fc\.weight', lambda: tf_params[f'4x4/Dense0/weight'].transpose(), - r'b4\.fc\.bias', lambda: tf_params[f'4x4/Dense0/bias'], - r'b4\.out\.weight', lambda: tf_params[f'Output/weight'].transpose(), - r'b4\.out\.bias', lambda: tf_params[f'Output/bias'], - r'.*\.resample_filter', None, - ) - return D - -#---------------------------------------------------------------------------- - -@click.command() -@click.option('--source', help='Input pickle', required=True, metavar='PATH') -@click.option('--dest', help='Output pickle', required=True, metavar='PATH') -@click.option('--force-fp16', help='Force the networks to use FP16', type=bool, default=False, metavar='BOOL', show_default=True) -def convert_network_pickle(source, dest, force_fp16): - """Convert legacy network pickle into the native PyTorch format. - - The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA. - It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks. - - Example: - - \b - python legacy.py \\ - --source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\ - --dest=stylegan2-cat-config-f.pkl - """ - print(f'Loading "{source}"...') - with dnnlib.util.open_url(source) as f: - data = load_network_pkl(f, force_fp16=force_fp16) - print(f'Saving "{dest}"...') - with open(dest, 'wb') as f: - pickle.dump(data, f) - print('Done.') - -#---------------------------------------------------------------------------- - -if __name__ == "__main__": - convert_network_pickle() # pylint: disable=no-value-for-parameter - -#---------------------------------------------------------------------------- diff --git a/spaces/blaziant/ysda_nlp_ops/app/main.py b/spaces/blaziant/ysda_nlp_ops/app/main.py deleted file mode 100644 index eb1d1608612ca06c871c4b1d8726014195fc3462..0000000000000000000000000000000000000000 --- a/spaces/blaziant/ysda_nlp_ops/app/main.py +++ /dev/null @@ -1,21 +0,0 @@ -from fastapi import FastAPI, Request, Form -from fastapi.templating import Jinja2Templates - -from app.model import model_predict - -app = FastAPI() -templates = Jinja2Templates(directory="templates") - - -@app.get("/") -def root(request: Request): - return templates.TemplateResponse("index.html", {"request": request}) - - -@app.post("/") -def root_post(request: Request, article_title: str = Form(...), article_abstract: str = Form(...)): - predict = model_predict(article_title, article_abstract) - print(123) - return templates.TemplateResponse( - "result.html", {"request": request, "article_title": article_title, "article_abstract": article_abstract, "predict": predict} - ) diff --git a/spaces/brendenc/Keras-Reshape-Layers/app.py b/spaces/brendenc/Keras-Reshape-Layers/app.py deleted file mode 100644 index 375590a863d8194cb296148e5ad62ee20f007a3b..0000000000000000000000000000000000000000 --- a/spaces/brendenc/Keras-Reshape-Layers/app.py +++ /dev/null @@ -1,291 +0,0 @@ -import gradio as gr -import keras -import numpy as np - -# All reshaping layers and their args, descriptions -layers = { - "Reshape":{ - "args":["target_shape"], - "descriptions":["""target_shape: Target shape. Tuple of integers, does not include the - samples dimension (batch size)."""] - }, - "Flatten":{ - "args":["data_format"], - "descriptions":["""data_format: A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. - channels_last corresponds to inputs with shape (batch, ..., channels) while channels_first corresponds to inputs with shape (batch, channels, ...). - It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. - If you never set it, then it will be "channels_last"."""] - }, - "RepeatVector":{ - "args":["n"], - "descriptions":["n: Integer, repetition factor."] - }, - "Permute":{ - "args":["dims"], - "descriptions":["""dims: Tuple of integers. - Permutation pattern does not include the samples dimension. Indexing starts at 1. - For instance, (2, 1) permutes the first and second dimensions of the input."""] - }, - "Cropping1D":{ - "args":["cropping"], - "descriptions":["""cropping: Int or tuple of int (length 2) - How many units should be trimmed off at the beginning and end of the cropping dimension (axis 1). - If a single int is provided, the same value will be used for both."""] - }, - "Cropping2D":{ - "args":["cropping", "data_format"], - "descriptions":["""cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int: the same symmetric cropping is applied to height and width. - If tuple of 2 ints: interpreted as two different symmetric cropping values for height and width: (symmetric_height_crop, symmetric_width_crop). - If tuple of 2 tuples of 2 ints: interpreted as ((top_crop, bottom_crop), (left_crop, right_crop))""", - """data_format: A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. - channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape - (batch_size, channels, height, width). It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. - If you never set it, then it will be "channels_last"."""], - }, - "Cropping3D":{ - "args":["cropping", "data_format"], - "descriptions":["""cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints. - If int: the same symmetric cropping is applied to depth, height, and width. - If tuple of 3 ints: interpreted as two different symmetric cropping values for depth, height, and width: (symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop). - If tuple of 3 tuples of 2 ints: interpreted as ((left_dim1_crop, right_dim1_crop), (left_dim2_crop, right_dim2_crop), (left_dim3_crop, right_dim3_crop))""", - """data_format: A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape - (batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels) while channels_first corresponds to inputs with shape - (batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3). It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. - If you never set it, then it will be "channels_last"."""] - }, - "UpSampling1D":{ - "args":["size"], - "descriptions":["size: Integer. UpSampling factor."] - }, - "UpSampling2D":{ - "args":["size", "data_format", "interpolation"], - "descriptions":["size: Int, or tuple of 2 integers. The UpSampling factors for rows and columns.", - """data_format: A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. - channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with - shape (batch_size, channels, height, width). It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. - If you never set it, then it will be "channels_last".""", - """interpolation: A string, one of "area", "bicubic", "bilinear", "gaussian", "lanczos3", "lanczos5", "mitchellcubic", "nearest"."""] - }, - "UpSampling3D":{ - "args":["size","data_format"], - "descriptions":["size: Int, or tuple of 3 integers. The UpSampling factors for dim1, dim2 and dim3.", - """data_format: A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. - channels_last corresponds to inputs with shape (batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels) while - channels_first corresponds to inputs with shape (batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3). - It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. If you never set it, - then it will be "channels_last"."""] - }, - "ZeroPadding1D":{ - "args":["padding"], - "descriptions":["""padding: Int, or tuple of int (length 2), or dictionary. - If int: - How many zeros to add at the beginning and end of the padding dimension (axis 1). - - If tuple of int (length 2): How many zeros to add at the beginning and the end of the padding dimension ((left_pad, right_pad))."""] - }, - "ZeroPadding2D":{ - "args":["padding", "data_format"], - "descriptions":["""padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints. - If int: the same symmetric padding is applied to height and width. - If tuple of 2 ints: interpreted as two different symmetric padding values for height and width: (symmetric_height_pad, symmetric_width_pad). - If tuple of 2 tuples of 2 ints: interpreted as ((top_pad, bottom_pad), (left_pad, right_pad))""", - """data_format: A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. - channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape - (batch_size, channels, height, width). It defaults to the image_data_format value found in your Keras config file at ~/.keras/keras.json. - If you never set it, then it will be "channels_last"."""] - }, - "ZeroPadding3D":{ - "args":["padding", "data_format"], - "descriptions":["""padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints. - If int: the same symmetric padding is applied to height and width. - If tuple of 3 ints: interpreted as two different symmetric padding values for height and width: (symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad). - If tuple of 3 tuples of 2 ints: interpreted as ((left_dim1_pad, right_dim1_pad), (left_dim2_pad, right_dim2_pad), (left_dim3_pad, right_dim3_pad))""", - """data_format: A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs - with shape (batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels) while channels_first corresponds to inputs with shape - (batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3). It defaults to the image_data_format value found in your Keras config file - at ~/.keras/keras.json. If you never set it, then it will be "channels_last"."""] - } -} -with gr.Blocks() as demo: - gr.Markdown(f'![Keras](https://res.cloudinary.com/crunchbase-production/image/upload/c_lpad,h_256,w_256,f_auto,q_auto:eco,dpr_1/x3gdrogoamvuvjemehbr)') - gr.Markdown("# Reshaping Layers") - gr.Markdown("""This app allows you to play with various Keras Reshaping layers, and is meant to be a - supplement to the documentation. You are free to change the layer, tensor/array shape, and arguments associated - with that layer. Execution will show you the command used as well as your resulting array/tensor. - - Keras documentation can be found [here](https://keras.io/api/layers/reshaping_layers/).
    - App built by [Brenden Connors](https://github.com/brendenconnors).
    - Built using keras==2.9.0. - -
    """) - - with gr.Row(): - with gr.Column(variant='panel'): - layers_dropdown = gr.Dropdown(choices=list(layers.keys()), value="Reshape", label="Keras Layer") - with gr.Box(): - gr.Markdown("**Please enter desired shape.**") - desired_shape2d = gr.Dataframe(value = [[2,2]], - headers = ["Rows", "Columns"], - row_count=(1, 'fixed'), - col_count=(2, "fixed"), - datatype="number", - type = "numpy", - interactive=True, - visible = False - ) - - desired_shape3d = gr.Dataframe(value = [[2,2,2]], - headers = ["Rows", "Columns", "Depth/Channels"], - row_count=(1, 'fixed'), - col_count=(3, "fixed"), - datatype="number", - type = "numpy", - interactive=True, - visible = True - ) - - desired_shape4d = gr.Dataframe(value = [[2,2,2,2]], - headers = ["Rows", "Columns", "Depth", "Channels"], - row_count=(1, 'fixed'), - col_count=(4, "fixed"), - datatype="number", - type = "numpy", - interactive=True, - visible = False - ) - - button = gr.Button("Generate Tensor") - input_arr = gr.Textbox(label = "Input Tensor", - interactive = False, - value = np.array([[1,2],[3,4]])) - with gr.Box(): - gr.Markdown("**Layer Args**") - with gr.Row(): - arg1 = gr.Textbox(label='target_shape') - arg2 = gr.Textbox(label='arg2',visible=False) - arg3 = gr.Textbox(label='arg3',visible=False) - with gr.Row(): - desc1 = gr.Textbox(label= '', value = layers["Reshape"]["descriptions"][0]) - desc2 = gr.Textbox(label = '', visible=False) - desc3 = gr.Textbox(label = '', visible=False) - result_button = gr.Button("Execute", variant="primary") - with gr.Column(variant='panel'): - output = gr.Textbox(label = 'Command Used') - output2 = gr.Textbox(label = 'Result') - - def generate_arr(layer, data1, data2, data3): - """ - Create Input tensor - """ - if '1D' in layer: - data = data1[0] - - elif '2D' in layer: - data = data2[0] - - elif '3D' in layer: - data = data3[0] - - elif layer=="RepeatVector": - data = data1[0] - - else: - data = data2[0] - - - shape = tuple([int(x) for x in data if int(x)!=0]) - elements = [x+1 for x in range(np.prod(shape))] - return np.array(elements).reshape(shape) - - - def add_dim(layer): - """ - Adjust dimensions component dependent on layer type - """ - if '1D' in layer: - return gr.DataFrame.update(visible=True), gr.DataFrame.update(visible=False), gr.DataFrame.update(visible=False) - elif '2D' in layer: - return gr.DataFrame.update(visible=False), gr.DataFrame.update(visible=True), gr.DataFrame.update(visible=False) - elif '3D' in layer: - return gr.DataFrame.update(visible=False), gr.DataFrame.update(visible=False), gr.DataFrame.update(visible=True) - elif layer=="RepeatVector": - return gr.DataFrame.update(visible=True), gr.DataFrame.update(visible=False), gr.DataFrame.update(visible=False) - return gr.DataFrame.update(visible=False), gr.DataFrame.update(visible=True), gr.DataFrame.update(visible=False) - - - def change_args(layer): - """ - Change layer args dependent on layer name - """ - n_args = len(layers[layer]["args"]) - args = layers[layer]["args"] - descriptions = layers[layer]["descriptions"] - descriptions = descriptions + ['None']*3 - args = args + ['None']*3 - visible_bool = [True if i<=n_args else False for i in range(1,4)] - return gr.Textbox.update(label=args[0], visible=visible_bool[0]),\ - gr.Textbox.update(label=args[1], visible=visible_bool[1]),\ - gr.Textbox.update(label=args[2], visible=visible_bool[2]),\ - gr.Textbox.update(value = descriptions[0], visible = visible_bool[0]),\ - gr.Textbox.update(value = descriptions[1], visible = visible_bool[1]),\ - gr.Textbox.update(value = descriptions[2], visible = visible_bool[2]) - - def create_layer(layer_name, arg1, arg2, arg3): - """ - Create layer given layer name and args - """ - args = [arg1, arg2, arg3] - real_args = [x for x in args if x != ''] - arg_str = ','.join(real_args) - - return f"keras.layers.{layer_name}({arg_str})" - - - def execute(layer_name, arg1, arg2, arg3, shape1, shape2, shape3): - """ - Execute keras reshaping layer given input tensor - """ - args = [arg1, arg2, arg3] - real_args = [x for x in args if x != ''] - arg_str = ','.join(real_args) - try: - layer = eval(f"keras.layers.{layer_name}({arg_str})") - except Exception as e: - return f"Error: {e}" - - def arr(data, layer_name): - if layer_name == "RepeatVector": - shape = tuple([int(x) for x in data[0] if int(x)!=0]) - else: - shape = tuple([1] + [int(x) for x in data[0] if int(x)!=0]) - elements = [x+1 for x in range(np.prod(shape))] - return np.array(elements).reshape(shape) - - if '1D' in layer_name: - inp = arr(shape1, layer_name) - elif '2D' in layer_name: - inp = arr(shape2, layer_name) - elif '3D' in layer_name: - inp = arr(shape3, layer_name) - elif layer_name=="RepeatVector": - inp = arr(shape1, layer_name) - else: - inp = arr(shape2, layer_name) - - try: - return layer(inp) - except Exception as e: - return e - - # Generate tensor - button.click(generate_arr, [layers_dropdown, desired_shape2d, desired_shape3d, desired_shape4d], input_arr) - - # All changes dependent on layer selected - layers_dropdown.change(add_dim, layers_dropdown, [desired_shape2d, desired_shape3d, desired_shape4d]) - layers_dropdown.change(change_args, layers_dropdown, [arg1, arg2, arg3, desc1, desc2, desc3]) - layers_dropdown.change(generate_arr, [layers_dropdown, desired_shape2d, desired_shape3d, desired_shape4d], input_arr) - - # Show command used and execute it - result_button.click(create_layer, [layers_dropdown, arg1, arg2, arg3], output) - result_button.click(execute, [layers_dropdown, arg1, arg2, arg3, desired_shape2d, desired_shape3d, desired_shape4d], output2) - -demo.launch() diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py b/spaces/brjathu/HMR2.0/vendor/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py deleted file mode 100644 index df7a2aedf480ed8dc4aa3645e37420e9b893fae4..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py +++ /dev/null @@ -1,72 +0,0 @@ -import detectron2.data.transforms as T -from detectron2.config.lazy import LazyCall as L -from detectron2.layers.batch_norm import NaiveSyncBatchNorm -from detectron2.solver import WarmupParamScheduler -from fvcore.common.param_scheduler import MultiStepParamScheduler - -from ..common.data.coco import dataloader -from ..common.models.mask_rcnn_fpn import model -from ..common.optim import SGD as optimizer -from ..common.train import train - -# train from scratch -train.init_checkpoint = "" -train.amp.enabled = True -train.ddp.fp16_compression = True -model.backbone.bottom_up.freeze_at = 0 - -# SyncBN -# fmt: off -model.backbone.bottom_up.stem.norm = \ - model.backbone.bottom_up.stages.norm = \ - model.backbone.norm = "SyncBN" - -# Using NaiveSyncBatchNorm becase heads may have empty input. That is not supported by -# torch.nn.SyncBatchNorm. We can remove this after -# https://github.com/pytorch/pytorch/issues/36530 is fixed. -model.roi_heads.box_head.conv_norm = \ - model.roi_heads.mask_head.conv_norm = lambda c: NaiveSyncBatchNorm(c, - stats_mode="N") -# fmt: on - -# 2conv in RPN: -# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/modeling/architecture/heads.py#L95-L97 # noqa: E501, B950 -model.proposal_generator.head.conv_dims = [-1, -1] - -# 4conv1fc box head -model.roi_heads.box_head.conv_dims = [256, 256, 256, 256] -model.roi_heads.box_head.fc_dims = [1024] - -# resize_and_crop_image in: -# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/utils/input_utils.py#L127 # noqa: E501, B950 -image_size = 1024 -dataloader.train.mapper.augmentations = [ - L(T.ResizeScale)( - min_scale=0.1, max_scale=2.0, target_height=image_size, target_width=image_size - ), - L(T.FixedSizeCrop)(crop_size=(image_size, image_size)), - L(T.RandomFlip)(horizontal=True), -] - -# recompute boxes due to cropping -dataloader.train.mapper.recompute_boxes = True - -# larger batch-size. -dataloader.train.total_batch_size = 64 - -# Equivalent to 100 epochs. -# 100 ep = 184375 iters * 64 images/iter / 118000 images/ep -train.max_iter = 184375 - -lr_multiplier = L(WarmupParamScheduler)( - scheduler=L(MultiStepParamScheduler)( - values=[1.0, 0.1, 0.01], - milestones=[163889, 177546], - num_updates=train.max_iter, - ), - warmup_length=500 / train.max_iter, - warmup_factor=0.067, -) - -optimizer.lr = 0.1 -optimizer.weight_decay = 4e-5 diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/tests/data/test_dataset.py b/spaces/brjathu/HMR2.0/vendor/detectron2/tests/data/test_dataset.py deleted file mode 100644 index 7bdcda0d521019f0073be543137cf55ae64fa7bd..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/tests/data/test_dataset.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -import os -import pickle -import sys -import unittest -from functools import partial -import torch -from iopath.common.file_io import LazyPath - -from detectron2 import model_zoo -from detectron2.config import get_cfg, instantiate -from detectron2.data import ( - DatasetCatalog, - DatasetFromList, - MapDataset, - ToIterableDataset, - build_batch_data_loader, - build_detection_test_loader, - build_detection_train_loader, -) -from detectron2.data.common import ( - AspectRatioGroupedDataset, - set_default_dataset_from_list_serialize_method, -) -from detectron2.data.samplers import InferenceSampler, TrainingSampler - - -def _a_slow_func(x): - return "path/{}".format(x) - - -class TestDatasetFromList(unittest.TestCase): - # Failing for py3.6, likely due to pickle - @unittest.skipIf(sys.version_info.minor <= 6, "Not supported in Python 3.6") - def test_using_lazy_path(self): - dataset = [] - for i in range(10): - dataset.append({"file_name": LazyPath(partial(_a_slow_func, i))}) - - dataset = DatasetFromList(dataset) - for i in range(10): - path = dataset[i]["file_name"] - self.assertTrue(isinstance(path, LazyPath)) - self.assertEqual(os.fspath(path), _a_slow_func(i)) - - def test_alternative_serialize_method(self): - dataset = [1, 2, 3] - dataset = DatasetFromList(dataset, serialize=torch.tensor) - self.assertEqual(dataset[2], torch.tensor(3)) - - def test_change_default_serialize_method(self): - dataset = [1, 2, 3] - with set_default_dataset_from_list_serialize_method(torch.tensor): - dataset_1 = DatasetFromList(dataset, serialize=True) - self.assertEqual(dataset_1[2], torch.tensor(3)) - dataset_2 = DatasetFromList(dataset, serialize=True) - self.assertEqual(dataset_2[2], 3) - - -class TestMapDataset(unittest.TestCase): - @staticmethod - def map_func(x): - if x == 2: - return None - return x * 2 - - def test_map_style(self): - ds = DatasetFromList([1, 2, 3]) - ds = MapDataset(ds, TestMapDataset.map_func) - self.assertEqual(ds[0], 2) - self.assertEqual(ds[2], 6) - self.assertIn(ds[1], [2, 6]) - - def test_iter_style(self): - class DS(torch.utils.data.IterableDataset): - def __iter__(self): - yield from [1, 2, 3] - - ds = DS() - ds = MapDataset(ds, TestMapDataset.map_func) - self.assertIsInstance(ds, torch.utils.data.IterableDataset) - - data = list(iter(ds)) - self.assertEqual(data, [2, 6]) - - def test_pickleability(self): - ds = DatasetFromList([1, 2, 3]) - ds = MapDataset(ds, lambda x: x * 2) - ds = pickle.loads(pickle.dumps(ds)) - self.assertEqual(ds[0], 2) - - -class TestAspectRatioGrouping(unittest.TestCase): - def test_reiter_leak(self): - data = [(1, 0), (0, 1), (1, 0), (0, 1)] - data = [{"width": a, "height": b} for (a, b) in data] - batchsize = 2 - dataset = AspectRatioGroupedDataset(data, batchsize) - - for _ in range(5): - for idx, __ in enumerate(dataset): - if idx == 1: - # manually break, so the iterator does not stop by itself - break - # check that bucket sizes are valid - for bucket in dataset._buckets: - self.assertLess(len(bucket), batchsize) - - -class _MyData(torch.utils.data.IterableDataset): - def __iter__(self): - while True: - yield 1 - - -class TestDataLoader(unittest.TestCase): - def _get_kwargs(self): - # get kwargs of build_detection_train_loader - cfg = model_zoo.get_config("common/data/coco.py").dataloader.train - cfg.dataset.names = "coco_2017_val_100" - cfg.pop("_target_") - kwargs = {k: instantiate(v) for k, v in cfg.items()} - return kwargs - - def test_build_dataloader_train(self): - kwargs = self._get_kwargs() - dl = build_detection_train_loader(**kwargs) - next(iter(dl)) - - def test_build_iterable_dataloader_train(self): - kwargs = self._get_kwargs() - ds = DatasetFromList(kwargs.pop("dataset")) - ds = ToIterableDataset(ds, TrainingSampler(len(ds))) - dl = build_detection_train_loader(dataset=ds, **kwargs) - next(iter(dl)) - - def test_build_iterable_dataloader_from_cfg(self): - cfg = get_cfg() - cfg.DATASETS.TRAIN = ["iter_data"] - DatasetCatalog.register("iter_data", lambda: _MyData()) - dl = build_detection_train_loader(cfg, mapper=lambda x: x, aspect_ratio_grouping=False) - next(iter(dl)) - - dl = build_detection_test_loader(cfg, "iter_data", mapper=lambda x: x) - next(iter(dl)) - - def _check_is_range(self, data_loader, N): - # check that data_loader produces range(N) - data = list(iter(data_loader)) - data = [x for batch in data for x in batch] # flatten the batches - self.assertEqual(len(data), N) - self.assertEqual(set(data), set(range(N))) - - def test_build_batch_dataloader_inference(self): - # Test that build_batch_data_loader can be used for inference - N = 96 - ds = DatasetFromList(list(range(N))) - sampler = InferenceSampler(len(ds)) - dl = build_batch_data_loader(ds, sampler, 8, num_workers=3) - self._check_is_range(dl, N) - - def test_build_dataloader_inference(self): - N = 50 - ds = DatasetFromList(list(range(N))) - sampler = InferenceSampler(len(ds)) - # test that parallel loader works correctly - dl = build_detection_test_loader( - dataset=ds, sampler=sampler, mapper=lambda x: x, num_workers=3 - ) - self._check_is_range(dl, N) - - # test that batch_size works correctly - dl = build_detection_test_loader( - dataset=ds, sampler=sampler, mapper=lambda x: x, batch_size=4, num_workers=0 - ) - self._check_is_range(dl, N) - - def test_build_iterable_dataloader_inference(self): - # Test that build_detection_test_loader supports iterable dataset - N = 50 - ds = DatasetFromList(list(range(N))) - ds = ToIterableDataset(ds, InferenceSampler(len(ds))) - dl = build_detection_test_loader(dataset=ds, mapper=lambda x: x, num_workers=3) - self._check_is_range(dl, N) diff --git a/spaces/camenduru-com/audioldm-text-to-audio-generation/audioldm/hifigan/models.py b/spaces/camenduru-com/audioldm-text-to-audio-generation/audioldm/hifigan/models.py deleted file mode 100644 index c4382cc39de0463f9b7c0f33f037dbc233e7cb36..0000000000000000000000000000000000000000 --- a/spaces/camenduru-com/audioldm-text-to-audio-generation/audioldm/hifigan/models.py +++ /dev/null @@ -1,174 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn import Conv1d, ConvTranspose1d -from torch.nn.utils import weight_norm, remove_weight_norm - -LRELU_SLOPE = 0.1 - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -class ResBlock(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock, self).__init__() - self.h = h - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - xt = c2(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class Generator(torch.nn.Module): - def __init__(self, h): - super(Generator, self).__init__() - self.h = h - self.num_kernels = len(h.resblock_kernel_sizes) - self.num_upsamples = len(h.upsample_rates) - self.conv_pre = weight_norm( - Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3) - ) - resblock = ResBlock - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - h.upsample_initial_channel // (2**i), - h.upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = h.upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes) - ): - self.resblocks.append(resblock(h, ch, k, d)) - - self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) - self.ups.apply(init_weights) - self.conv_post.apply(init_weights) - - def forward(self, x): - x = self.conv_pre(x) - for i in range(self.num_upsamples): - x = F.leaky_relu(x, LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - # print("Removing weight norm...") - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/anchor_generator.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/anchor_generator.py deleted file mode 100644 index ac94e72396ba61778c102133218bb5defe5b4413..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/anchor_generator.py +++ /dev/null @@ -1,386 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import collections -import math -from typing import List -import torch -from torch import nn - -from detectron2.config import configurable -from detectron2.layers import ShapeSpec, move_device_like -from detectron2.structures import Boxes, RotatedBoxes -from detectron2.utils.registry import Registry - -ANCHOR_GENERATOR_REGISTRY = Registry("ANCHOR_GENERATOR") -ANCHOR_GENERATOR_REGISTRY.__doc__ = """ -Registry for modules that creates object detection anchors for feature maps. - -The registered object will be called with `obj(cfg, input_shape)`. -""" - - -class BufferList(nn.Module): - """ - Similar to nn.ParameterList, but for buffers - """ - - def __init__(self, buffers): - super().__init__() - for i, buffer in enumerate(buffers): - # Use non-persistent buffer so the values are not saved in checkpoint - self.register_buffer(str(i), buffer, persistent=False) - - def __len__(self): - return len(self._buffers) - - def __iter__(self): - return iter(self._buffers.values()) - - -def _create_grid_offsets( - size: List[int], stride: int, offset: float, target_device_tensor: torch.Tensor -): - grid_height, grid_width = size - shifts_x = move_device_like( - torch.arange(offset * stride, grid_width * stride, step=stride, dtype=torch.float32), - target_device_tensor, - ) - shifts_y = move_device_like( - torch.arange(offset * stride, grid_height * stride, step=stride, dtype=torch.float32), - target_device_tensor, - ) - - shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) - shift_x = shift_x.reshape(-1) - shift_y = shift_y.reshape(-1) - return shift_x, shift_y - - -def _broadcast_params(params, num_features, name): - """ - If one size (or aspect ratio) is specified and there are multiple feature - maps, we "broadcast" anchors of that single size (or aspect ratio) - over all feature maps. - - If params is list[float], or list[list[float]] with len(params) == 1, repeat - it num_features time. - - Returns: - list[list[float]]: param for each feature - """ - assert isinstance( - params, collections.abc.Sequence - ), f"{name} in anchor generator has to be a list! Got {params}." - assert len(params), f"{name} in anchor generator cannot be empty!" - if not isinstance(params[0], collections.abc.Sequence): # params is list[float] - return [params] * num_features - if len(params) == 1: - return list(params) * num_features - assert len(params) == num_features, ( - f"Got {name} of length {len(params)} in anchor generator, " - f"but the number of input features is {num_features}!" - ) - return params - - -@ANCHOR_GENERATOR_REGISTRY.register() -class DefaultAnchorGenerator(nn.Module): - """ - Compute anchors in the standard ways described in - "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks". - """ - - box_dim: torch.jit.Final[int] = 4 - """ - the dimension of each anchor box. - """ - - @configurable - def __init__(self, *, sizes, aspect_ratios, strides, offset=0.5): - """ - This interface is experimental. - - Args: - sizes (list[list[float]] or list[float]): - If ``sizes`` is list[list[float]], ``sizes[i]`` is the list of anchor sizes - (i.e. sqrt of anchor area) to use for the i-th feature map. - If ``sizes`` is list[float], ``sizes`` is used for all feature maps. - Anchor sizes are given in absolute lengths in units of - the input image; they do not dynamically scale if the input image size changes. - aspect_ratios (list[list[float]] or list[float]): list of aspect ratios - (i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies. - strides (list[int]): stride of each input feature. - offset (float): Relative offset between the center of the first anchor and the top-left - corner of the image. Value has to be in [0, 1). - Recommend to use 0.5, which means half stride. - """ - super().__init__() - - self.strides = strides - self.num_features = len(self.strides) - sizes = _broadcast_params(sizes, self.num_features, "sizes") - aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, "aspect_ratios") - self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios) - - self.offset = offset - assert 0.0 <= self.offset < 1.0, self.offset - - @classmethod - def from_config(cls, cfg, input_shape: List[ShapeSpec]): - return { - "sizes": cfg.MODEL.ANCHOR_GENERATOR.SIZES, - "aspect_ratios": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS, - "strides": [x.stride for x in input_shape], - "offset": cfg.MODEL.ANCHOR_GENERATOR.OFFSET, - } - - def _calculate_anchors(self, sizes, aspect_ratios): - cell_anchors = [ - self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios) - ] - return BufferList(cell_anchors) - - @property - @torch.jit.unused - def num_cell_anchors(self): - """ - Alias of `num_anchors`. - """ - return self.num_anchors - - @property - @torch.jit.unused - def num_anchors(self): - """ - Returns: - list[int]: Each int is the number of anchors at every pixel - location, on that feature map. - For example, if at every pixel we use anchors of 3 aspect - ratios and 5 sizes, the number of anchors is 15. - (See also ANCHOR_GENERATOR.SIZES and ANCHOR_GENERATOR.ASPECT_RATIOS in config) - - In standard RPN models, `num_anchors` on every feature map is the same. - """ - return [len(cell_anchors) for cell_anchors in self.cell_anchors] - - def _grid_anchors(self, grid_sizes: List[List[int]]): - """ - Returns: - list[Tensor]: #featuremap tensors, each is (#locations x #cell_anchors) x 4 - """ - anchors = [] - # buffers() not supported by torchscript. use named_buffers() instead - buffers: List[torch.Tensor] = [x[1] for x in self.cell_anchors.named_buffers()] - for size, stride, base_anchors in zip(grid_sizes, self.strides, buffers): - shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors) - shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) - - anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)) - - return anchors - - def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)): - """ - Generate a tensor storing canonical anchor boxes, which are all anchor - boxes of different sizes and aspect_ratios centered at (0, 0). - We can later build the set of anchors for a full feature map by - shifting and tiling these tensors (see `meth:_grid_anchors`). - - Args: - sizes (tuple[float]): - aspect_ratios (tuple[float]]): - - Returns: - Tensor of shape (len(sizes) * len(aspect_ratios), 4) storing anchor boxes - in XYXY format. - """ - - # This is different from the anchor generator defined in the original Faster R-CNN - # code or Detectron. They yield the same AP, however the old version defines cell - # anchors in a less natural way with a shift relative to the feature grid and - # quantization that results in slightly different sizes for different aspect ratios. - # See also https://github.com/facebookresearch/Detectron/issues/227 - - anchors = [] - for size in sizes: - area = size**2.0 - for aspect_ratio in aspect_ratios: - # s * s = w * h - # a = h / w - # ... some algebra ... - # w = sqrt(s * s / a) - # h = a * w - w = math.sqrt(area / aspect_ratio) - h = aspect_ratio * w - x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0 - anchors.append([x0, y0, x1, y1]) - return torch.tensor(anchors) - - def forward(self, features: List[torch.Tensor]): - """ - Args: - features (list[Tensor]): list of backbone feature maps on which to generate anchors. - - Returns: - list[Boxes]: a list of Boxes containing all the anchors for each feature map - (i.e. the cell anchors repeated over all locations in the feature map). - The number of anchors of each feature map is Hi x Wi x num_cell_anchors, - where Hi, Wi are resolution of the feature map divided by anchor stride. - """ - grid_sizes = [feature_map.shape[-2:] for feature_map in features] - anchors_over_all_feature_maps = self._grid_anchors(grid_sizes) - return [Boxes(x) for x in anchors_over_all_feature_maps] - - -@ANCHOR_GENERATOR_REGISTRY.register() -class RotatedAnchorGenerator(nn.Module): - """ - Compute rotated anchors used by Rotated RPN (RRPN), described in - "Arbitrary-Oriented Scene Text Detection via Rotation Proposals". - """ - - box_dim: int = 5 - """ - the dimension of each anchor box. - """ - - @configurable - def __init__(self, *, sizes, aspect_ratios, strides, angles, offset=0.5): - """ - This interface is experimental. - - Args: - sizes (list[list[float]] or list[float]): - If sizes is list[list[float]], sizes[i] is the list of anchor sizes - (i.e. sqrt of anchor area) to use for the i-th feature map. - If sizes is list[float], the sizes are used for all feature maps. - Anchor sizes are given in absolute lengths in units of - the input image; they do not dynamically scale if the input image size changes. - aspect_ratios (list[list[float]] or list[float]): list of aspect ratios - (i.e. height / width) to use for anchors. Same "broadcast" rule for `sizes` applies. - strides (list[int]): stride of each input feature. - angles (list[list[float]] or list[float]): list of angles (in degrees CCW) - to use for anchors. Same "broadcast" rule for `sizes` applies. - offset (float): Relative offset between the center of the first anchor and the top-left - corner of the image. Value has to be in [0, 1). - Recommend to use 0.5, which means half stride. - """ - super().__init__() - - self.strides = strides - self.num_features = len(self.strides) - sizes = _broadcast_params(sizes, self.num_features, "sizes") - aspect_ratios = _broadcast_params(aspect_ratios, self.num_features, "aspect_ratios") - angles = _broadcast_params(angles, self.num_features, "angles") - self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios, angles) - - self.offset = offset - assert 0.0 <= self.offset < 1.0, self.offset - - @classmethod - def from_config(cls, cfg, input_shape: List[ShapeSpec]): - return { - "sizes": cfg.MODEL.ANCHOR_GENERATOR.SIZES, - "aspect_ratios": cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS, - "strides": [x.stride for x in input_shape], - "offset": cfg.MODEL.ANCHOR_GENERATOR.OFFSET, - "angles": cfg.MODEL.ANCHOR_GENERATOR.ANGLES, - } - - def _calculate_anchors(self, sizes, aspect_ratios, angles): - cell_anchors = [ - self.generate_cell_anchors(size, aspect_ratio, angle).float() - for size, aspect_ratio, angle in zip(sizes, aspect_ratios, angles) - ] - return BufferList(cell_anchors) - - @property - def num_cell_anchors(self): - """ - Alias of `num_anchors`. - """ - return self.num_anchors - - @property - def num_anchors(self): - """ - Returns: - list[int]: Each int is the number of anchors at every pixel - location, on that feature map. - For example, if at every pixel we use anchors of 3 aspect - ratios, 2 sizes and 5 angles, the number of anchors is 30. - (See also ANCHOR_GENERATOR.SIZES, ANCHOR_GENERATOR.ASPECT_RATIOS - and ANCHOR_GENERATOR.ANGLES in config) - - In standard RRPN models, `num_anchors` on every feature map is the same. - """ - return [len(cell_anchors) for cell_anchors in self.cell_anchors] - - def _grid_anchors(self, grid_sizes): - anchors = [] - for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors): - shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors) - zeros = torch.zeros_like(shift_x) - shifts = torch.stack((shift_x, shift_y, zeros, zeros, zeros), dim=1) - - anchors.append((shifts.view(-1, 1, 5) + base_anchors.view(1, -1, 5)).reshape(-1, 5)) - - return anchors - - def generate_cell_anchors( - self, - sizes=(32, 64, 128, 256, 512), - aspect_ratios=(0.5, 1, 2), - angles=(-90, -60, -30, 0, 30, 60, 90), - ): - """ - Generate a tensor storing canonical anchor boxes, which are all anchor - boxes of different sizes, aspect_ratios, angles centered at (0, 0). - We can later build the set of anchors for a full feature map by - shifting and tiling these tensors (see `meth:_grid_anchors`). - - Args: - sizes (tuple[float]): - aspect_ratios (tuple[float]]): - angles (tuple[float]]): - - Returns: - Tensor of shape (len(sizes) * len(aspect_ratios) * len(angles), 5) - storing anchor boxes in (x_ctr, y_ctr, w, h, angle) format. - """ - anchors = [] - for size in sizes: - area = size**2.0 - for aspect_ratio in aspect_ratios: - # s * s = w * h - # a = h / w - # ... some algebra ... - # w = sqrt(s * s / a) - # h = a * w - w = math.sqrt(area / aspect_ratio) - h = aspect_ratio * w - anchors.extend([0, 0, w, h, a] for a in angles) - - return torch.tensor(anchors) - - def forward(self, features): - """ - Args: - features (list[Tensor]): list of backbone feature maps on which to generate anchors. - - Returns: - list[RotatedBoxes]: a list of Boxes containing all the anchors for each feature map - (i.e. the cell anchors repeated over all locations in the feature map). - The number of anchors of each feature map is Hi x Wi x num_cell_anchors, - where Hi, Wi are resolution of the feature map divided by anchor stride. - """ - grid_sizes = [feature_map.shape[-2:] for feature_map in features] - anchors_over_all_feature_maps = self._grid_anchors(grid_sizes) - return [RotatedBoxes(x) for x in anchors_over_all_feature_maps] - - -def build_anchor_generator(cfg, input_shape): - """ - Built an anchor generator from `cfg.MODEL.ANCHOR_GENERATOR.NAME`. - """ - anchor_generator = cfg.MODEL.ANCHOR_GENERATOR.NAME - return ANCHOR_GENERATOR_REGISTRY.get(anchor_generator)(cfg, input_shape) diff --git a/spaces/ccolas/TastyPiano/src/music/utilities/representation_learning_utilities/device.py b/spaces/ccolas/TastyPiano/src/music/utilities/representation_learning_utilities/device.py deleted file mode 100644 index 00fbe16bd6ec82cf6019a787e4c8b4612f6ccc10..0000000000000000000000000000000000000000 --- a/spaces/ccolas/TastyPiano/src/music/utilities/representation_learning_utilities/device.py +++ /dev/null @@ -1,67 +0,0 @@ -# For all things related to devices -#### ONLY USE PROVIDED FUNCTIONS, DO NOT USE GLOBAL CONSTANTS #### - -import torch - -TORCH_CPU_DEVICE = torch.device("cpu") - -if(torch.cuda.device_count() > 0): - TORCH_CUDA_DEVICE = torch.device("cuda") -else: - print("----- WARNING: CUDA devices not detected. This will cause the model to run very slow! -----") - print("") - TORCH_CUDA_DEVICE = None - -USE_CUDA = True - -# use_cuda -def use_cuda(cuda_bool): - """ - ---------- - Author: Damon Gwinn - ---------- - Sets whether to use CUDA (if available), or use the CPU (not recommended) - ---------- - """ - - global USE_CUDA - USE_CUDA = cuda_bool - -# get_device -def get_device(): - """ - ---------- - Author: Damon Gwinn - ---------- - Grabs the default device. Default device is CUDA if available and use_cuda is not False, CPU otherwise. - ---------- - """ - - if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)): - return TORCH_CPU_DEVICE - else: - return TORCH_CUDA_DEVICE - -# cuda_device -def cuda_device(): - """ - ---------- - Author: Damon Gwinn - ---------- - Grabs the cuda device (may be None if CUDA is not available) - ---------- - """ - - return TORCH_CUDA_DEVICE - -# cpu_device -def cpu_device(): - """ - ---------- - Author: Damon Gwinn - ---------- - Grabs the cpu device - ---------- - """ - - return TORCH_CPU_DEVICE diff --git a/spaces/chendl/compositional_test/transformers/examples/flax/conftest.py b/spaces/chendl/compositional_test/transformers/examples/flax/conftest.py deleted file mode 100644 index 131c6af92c44cca88d26a39ca68a9dabbad3559b..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/flax/conftest.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# tests directory-specific settings - this file is run automatically -# by pytest before any tests are run - -import sys -import warnings -from os.path import abspath, dirname, join - - -# allow having multiple repository checkouts and not needing to remember to rerun -# 'pip install -e .[dev]' when switching between checkouts and running tests. -git_repo_path = abspath(join(dirname(dirname(dirname(__file__))), "src")) -sys.path.insert(1, git_repo_path) - - -# silence FutureWarning warnings in tests since often we can't act on them until -# they become normal warnings - i.e. the tests still need to test the current functionality -warnings.simplefilter(action="ignore", category=FutureWarning) - - -def pytest_addoption(parser): - from transformers.testing_utils import pytest_addoption_shared - - pytest_addoption_shared(parser) - - -def pytest_terminal_summary(terminalreporter): - from transformers.testing_utils import pytest_terminal_summary_main - - make_reports = terminalreporter.config.getoption("--make-reports") - if make_reports: - pytest_terminal_summary_main(terminalreporter, id=make_reports) diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py deleted file mode 100644 index 901e921f26a6949eb63aa0d5052281a3e4a41d55..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py +++ /dev/null @@ -1,625 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2022 The Microsoft and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Fine-tuning the library models for tapex on table-based question answering tasks. -Adapted from script: https://github.com/huggingface/transformers/blob/master/examples/pytorch/summarization/run_summarization.py -""" - -import logging -import os -import sys -from collections import defaultdict -from dataclasses import dataclass, field -from functools import partial -from typing import List, Optional - -import nltk # Here to have a nice missing dependency error message early on -import numpy as np -import pandas as pd -from datasets import load_dataset -from filelock import FileLock - -import transformers -from transformers import ( - AutoConfig, - BartForConditionalGeneration, - DataCollatorForSeq2Seq, - HfArgumentParser, - Seq2SeqTrainer, - Seq2SeqTrainingArguments, - TapexTokenizer, - set_seed, -) -from transformers.file_utils import is_offline_mode -from transformers.trainer_utils import get_last_checkpoint, is_main_process -from transformers.utils import check_min_version - - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.17.0.dev0") - -logger = logging.getLogger(__name__) - -try: - nltk.data.find("tokenizers/punkt") -except (LookupError, OSError): - if is_offline_mode(): - raise LookupError( - "Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files" - ) - with FileLock(".lock") as lock: - nltk.download("punkt", quiet=True) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - tokenizer_name: Optional[str] = field( - default=None, - metadata={ - "help": ( - "Pretrained tokenizer name or path if not the same as model_name. " - "By default we use BART-large tokenizer for TAPEX-large." - ) - }, - ) - cache_dir: Optional[str] = field( - default=None, - metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, - ) - use_fast_tokenizer: bool = field( - default=True, - metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - """ - - dataset_name: Optional[str] = field( - default="wikitablequestions", metadata={"help": "The name of the dataset to use (via the datasets library)."} - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_file: Optional[str] = field( - default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."} - ) - validation_file: Optional[str] = field( - default=None, - metadata={ - "help": ( - "An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." - ) - }, - ) - test_file: Optional[str] = field( - default=None, - metadata={ - "help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." - }, - ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) - preprocessing_num_workers: Optional[int] = field( - default=None, - metadata={"help": "The number of processes to use for the preprocessing."}, - ) - max_source_length: Optional[int] = field( - default=1024, - metadata={ - "help": ( - "The maximum total input sequence length after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ) - }, - ) - max_target_length: Optional[int] = field( - default=128, - metadata={ - "help": ( - "The maximum total sequence length for target text after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded." - ) - }, - ) - val_max_target_length: Optional[int] = field( - default=None, - metadata={ - "help": ( - "The maximum total sequence length for validation target text after tokenization. Sequences longer " - "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`." - "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " - "during ``evaluate`` and ``predict``." - ) - }, - ) - pad_to_max_length: bool = field( - default=False, - metadata={ - "help": ( - "Whether to pad all samples to model maximum sentence length. " - "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " - "efficient on GPU but very bad for TPU." - ) - }, - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - max_predict_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of prediction examples to this " - "value if set." - ) - }, - ) - num_beams: Optional[int] = field( - default=None, - metadata={ - "help": ( - "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " - "which is used during ``evaluate`` and ``predict``." - ) - }, - ) - ignore_pad_token_for_loss: bool = field( - default=True, - metadata={ - "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." - }, - ) - - def __post_init__(self): - if self.dataset_name is None and self.train_file is None and self.validation_file is None: - raise ValueError("Need either a dataset name or a training/validation file.") - else: - if self.train_file is not None: - extension = self.train_file.split(".")[-1] - assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." - if self.validation_file is not None: - extension = self.validation_file.split(".")[-1] - assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." - if self.val_max_target_length is None: - self.val_max_target_length = self.max_target_length - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - # Set the verbosity to info of the Transformers logger (on main process only): - if is_main_process(training_args.local_rank): - transformers.utils.logging.set_verbosity_info() - logger.info(f"Training/evaluation parameters {training_args}") - - # Set seed before initializing model. - set_seed(training_args.seed) - - # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) - # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ - # (the dataset will be downloaded automatically from the datasets Hub). - # - # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. - # - # In distributed training, the load_dataset function guarantee that only one local process can concurrently - # download the dataset. - if data_args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) - else: - data_files = {} - if data_args.train_file is not None: - data_files["train"] = data_args.train_file - extension = data_args.train_file.split(".")[-1] - if data_args.validation_file is not None: - data_files["validation"] = data_args.validation_file - extension = data_args.validation_file.split(".")[-1] - if data_args.test_file is not None: - data_files["test"] = data_args.test_file - extension = data_args.test_file.split(".")[-1] - datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) - - # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at - # https://huggingface.co/docs/datasets/loading_datasets.html. - - # Load pretrained model and tokenizer - # - # Distributed training: - # The .from_pretrained methods guarantee that only one local process can concurrently - # download model & vocab. - - config = AutoConfig.from_pretrained( - model_args.config_name if model_args.config_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ) - - # IMPORTANT: the initial BART model's decoding is penalized by no_repeat_ngram_size, and thus - # we should disable it here to avoid problematic generation - config.no_repeat_ngram_size = 0 - config.max_length = 1024 - config.early_stopping = False - - # load tapex tokenizer - tokenizer = TapexTokenizer.from_pretrained( - model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - use_fast=model_args.use_fast_tokenizer, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - add_prefix_space=True, - ) - - # load Bart based Tapex model (default tapex-large) - model = BartForConditionalGeneration.from_pretrained( - model_args.model_name_or_path, - from_tf=bool(".ckpt" in model_args.model_name_or_path), - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ) - - if model.config.decoder_start_token_id is None: - raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") - - # Preprocessing the datasets. - # We need to tokenize inputs and targets. - if training_args.do_train: - column_names = datasets["train"].column_names - elif training_args.do_eval: - column_names = datasets["validation"].column_names - elif training_args.do_predict: - column_names = datasets["test"].column_names - else: - logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") - return - - # Temporarily set max_target_length for training. - max_target_length = data_args.max_target_length - padding = "max_length" if data_args.pad_to_max_length else False - - if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"): - logger.warning( - "label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for" - f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory" - ) - - def preprocess_tableqa_function(examples, is_training=False): - """ - The is_training FLAG is used to identify if we could use the supervision - to truncate the table content if it is required. - """ - - questions = [question.lower() for question in examples["question"]] - example_tables = examples["table"] - tables = [ - pd.DataFrame.from_records(example_table["rows"], columns=example_table["header"]) - for example_table in example_tables - ] - - # using wikitablequestion's answer set - answers = examples["answers"] - - # IMPORTANT: we cannot pass by answers during evaluation, answers passed during training are used to - # truncate large tables in the train set! - if is_training: - model_inputs = tokenizer( - table=tables, - query=questions, - answer=answers, - max_length=data_args.max_source_length, - padding=padding, - truncation=True, - ) - else: - model_inputs = tokenizer( - table=tables, query=questions, max_length=data_args.max_source_length, padding=padding, truncation=True - ) - - labels = tokenizer( - answer=[", ".join(answer) for answer in answers], - max_length=max_target_length, - padding=padding, - truncation=True, - ) - - # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore - # padding in the loss. - if padding == "max_length" and data_args.ignore_pad_token_for_loss: - labels["input_ids"] = [ - [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] - ] - - model_inputs["labels"] = labels["input_ids"] - - return model_inputs - - # in training, we can use the answer as extra information to truncate large tables - preprocess_tableqa_function_training = partial(preprocess_tableqa_function, is_training=True) - - if training_args.do_train: - if "train" not in datasets: - raise ValueError("--do_train requires a train dataset") - train_dataset = datasets["train"] - if data_args.max_train_samples is not None: - train_dataset = train_dataset.select(range(data_args.max_train_samples)) - train_dataset = train_dataset.map( - preprocess_tableqa_function_training, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - ) - - if training_args.do_eval: - max_target_length = data_args.val_max_target_length - if "validation" not in datasets: - raise ValueError("--do_eval requires a validation dataset") - eval_dataset = datasets["validation"] - if data_args.max_eval_samples is not None: - eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) - eval_dataset = eval_dataset.map( - preprocess_tableqa_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - ) - - if training_args.do_predict: - max_target_length = data_args.val_max_target_length - if "test" not in datasets: - raise ValueError("--do_predict requires a test dataset") - predict_dataset = datasets["test"] - if data_args.max_predict_samples is not None: - predict_dataset = predict_dataset.select(range(data_args.max_predict_samples)) - predict_dataset = predict_dataset.map( - preprocess_tableqa_function, - batched=True, - num_proc=data_args.preprocessing_num_workers, - remove_columns=column_names, - load_from_cache_file=not data_args.overwrite_cache, - ) - - # Data collator - label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id - data_collator = DataCollatorForSeq2Seq( - tokenizer, - model=model, - label_pad_token_id=label_pad_token_id, - pad_to_multiple_of=8 if training_args.fp16 else None, - ) - - def postprocess_text(preds, labels): - preds = [pred.strip() for pred in preds] - labels = [label.strip() for label in labels] - - return preds, labels - - def compute_metrics(eval_preds): - preds, labels = eval_preds - if isinstance(preds, tuple): - preds = preds[0] - decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) - if data_args.ignore_pad_token_for_loss: - # Replace -100 in the labels as we can't decode them. - labels = np.where(labels != -100, labels, tokenizer.pad_token_id) - decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) - - # Some simple post-processing - decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) - - delimiter = ", " - - # define example evaluation - def evaluate_example(predict_str: str, ground_str: str): - predict_spans = predict_str.split(delimiter) - ground_spans = ground_str.split(delimiter) - predict_values = defaultdict(lambda: 0) - ground_values = defaultdict(lambda: 0) - for span in predict_spans: - try: - predict_values[float(span)] += 1 - except ValueError: - predict_values[span.strip()] += 1 - for span in ground_spans: - try: - ground_values[float(span)] += 1 - except ValueError: - ground_values[span.strip()] += 1 - _is_correct = predict_values == ground_values - return _is_correct - - def get_denotation_accuracy(predictions: List[str], references: List[str]): - assert len(predictions) == len(references) - correct_num = 0 - for predict_str, ground_str in zip(predictions, references): - is_correct = evaluate_example(predict_str.lower(), ground_str.lower()) - if is_correct: - correct_num += 1 - return correct_num / len(predictions) - - accuracy = get_denotation_accuracy(decoded_preds, decoded_labels) - result = {"denotation_accuracy": accuracy} - - return result - - # Initialize our Trainer - trainer = Seq2SeqTrainer( - model=model, - args=training_args, - train_dataset=train_dataset if training_args.do_train else None, - eval_dataset=eval_dataset if training_args.do_eval else None, - tokenizer=tokenizer, - data_collator=data_collator, - compute_metrics=compute_metrics if training_args.predict_with_generate else None, - ) - - if training_args.do_train: - checkpoint = None - if training_args.resume_from_checkpoint is not None: - checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint - train_result = trainer.train(resume_from_checkpoint=checkpoint) - trainer.save_model() # Saves the tokenizer too for easy upload - - metrics = train_result.metrics - max_train_samples = ( - data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) - ) - metrics["train_samples"] = min(max_train_samples, len(train_dataset)) - - trainer.log_metrics("train", metrics) - trainer.save_metrics("train", metrics) - trainer.save_state() - - # Evaluation - results = {} - if training_args.do_eval: - logger.info("*** Evaluate ***") - - metrics = trainer.evaluate( - max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, metric_key_prefix="eval" - ) - max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) - metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) - - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - if training_args.do_predict: - logger.info("*** Predict ***") - - predict_results = trainer.predict( - predict_dataset, - metric_key_prefix="predict", - max_length=data_args.val_max_target_length, - num_beams=data_args.num_beams, - ) - metrics = predict_results.metrics - max_predict_samples = ( - data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) - ) - metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) - - trainer.log_metrics("predict", metrics) - trainer.save_metrics("predict", metrics) - - if trainer.is_world_process_zero(): - if training_args.predict_with_generate: - predictions = tokenizer.batch_decode( - predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True - ) - predictions = [pred.strip() for pred in predictions] - output_prediction_file = os.path.join(training_args.output_dir, "tapex_predictions.txt") - with open(output_prediction_file, "w") as writer: - writer.write("\n".join(predictions)) - - return results - - -def _mp_fn(index): - # For xla_spawn (TPUs) - main() - - -if __name__ == "__main__": - main() diff --git a/spaces/chenxx/ChuanhuChatGPT/run_macOS.command b/spaces/chenxx/ChuanhuChatGPT/run_macOS.command deleted file mode 100644 index 62af07283093d8e580763d7acfe493c3d88e7b08..0000000000000000000000000000000000000000 --- a/spaces/chenxx/ChuanhuChatGPT/run_macOS.command +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# 获取脚本所在目录 -script_dir=$(dirname "$0") - -# 将工作目录更改为脚本所在目录 -cd "$script_dir" - -# 检查Git仓库是否有更新 -git remote update -pwd - -if ! git status -uno | grep 'up to date' > /dev/null; then - # 如果有更新,关闭当前运行的服务器 - pkill -f ChuanhuChatbot.py - - # 拉取最新更改 - git pull - - # 安装依赖 - pip3 install -r requirements.txt - - # 重新启动服务器 - nohup python3 ChuanhuChatbot.py & -fi diff --git a/spaces/chinhon/Chinese_News_Headlines_Generator/README.md b/spaces/chinhon/Chinese_News_Headlines_Generator/README.md deleted file mode 100644 index c857a4f0b8c3684b202eeebad7e341b6621f7053..0000000000000000000000000000000000000000 --- a/spaces/chinhon/Chinese_News_Headlines_Generator/README.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Chinese_News_Headlines_Generator -emoji: 💻 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.43.2 -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/cihyFjudo/fairness-paper-search/Carsoft Mercedes Benz 7.6. ( K L COM USB ) [ What You Need to Know Before You Buy It.md b/spaces/cihyFjudo/fairness-paper-search/Carsoft Mercedes Benz 7.6. ( K L COM USB ) [ What You Need to Know Before You Buy It.md deleted file mode 100644 index 7353ae35bf685b9821ed4f1c5de9a273c68cd1ab..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Carsoft Mercedes Benz 7.6. ( K L COM USB ) [ What You Need to Know Before You Buy It.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Carsoft Mercedes Benz 7.6. ( K L , COM USB ) [


    Download Ziphttps://tinurli.com/2uwiwu



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cihyFjudo/fairness-paper-search/Honda Cbr 600 Price Philippines Bmw Compare Specs Features and Reviews.md b/spaces/cihyFjudo/fairness-paper-search/Honda Cbr 600 Price Philippines Bmw Compare Specs Features and Reviews.md deleted file mode 100644 index 77d07037ab3d6d408816f32532a41419ebd9cb88..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Honda Cbr 600 Price Philippines Bmw Compare Specs Features and Reviews.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Honda Cbr 600 Price Philippines Bmw


    DOWNLOAD ✸✸✸ https://tinurli.com/2uwiIA



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cihyFjudo/fairness-paper-search/Siemens Megaset 960 Bedienungsanleitung Pdf Download Hier finden Sie alle Informationen.md b/spaces/cihyFjudo/fairness-paper-search/Siemens Megaset 960 Bedienungsanleitung Pdf Download Hier finden Sie alle Informationen.md deleted file mode 100644 index 458fd343f6b45f4c4a7702cf755bb1cc5e887746..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Siemens Megaset 960 Bedienungsanleitung Pdf Download Hier finden Sie alle Informationen.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Siemens Megaset 960 Bedienungsanleitung Pdf Download


    Download Zip > https://tinurli.com/2uwisR



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cihyFjudo/fairness-paper-search/Will To Live 2 Tamil Dubbed Movie Free Download Get Ready for the Most Anticipated Release of the Year.md b/spaces/cihyFjudo/fairness-paper-search/Will To Live 2 Tamil Dubbed Movie Free Download Get Ready for the Most Anticipated Release of the Year.md deleted file mode 100644 index 31da9fa3f15b87f8f282c2d4ecc73ee9f7c4bb4d..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Will To Live 2 Tamil Dubbed Movie Free Download Get Ready for the Most Anticipated Release of the Year.md +++ /dev/null @@ -1,12 +0,0 @@ - -

    What's more lucrative is many of these websites offer free access to a wide range of anime movies, cartoons, and TV shows. Although the numbers are high; still, it isn't easy to find free dubbed anime websites with quality content and video streaming. So, we have listed the best 10 websites to watch dubbed anime online and free:

    -

    It is the most popular and best to watch free dubbed anime movies and TV shows. With millions of viewers every day, this anime site provides more than 10,000 anime, films, and videos with English subtitles and dubbing.

    -

    Will To Live 2 tamil dubbed movie free download


    Download » https://tinurli.com/2uwj6S



    -

    Gogoanime is yet another free dubbed anime website with a fantastic viewing and video streaming experience. Its sleek interface and user-friendly navigation make it a top choice for frugal like us. You can watch and download the latest and the oldest anime shows collection; Gogoanime is for all anime fans.

    -

    The recent ones top the chart while the titles are arranged alphabetically, yearly, popularity, and release date for quick and easy search. It has anime videos, movies, and shows on horror, kids, action, cars, and games. You get to find the latest Chinese, dubbed animation shows free of cost.

    -

    It is the most popular platform with a wide range of English dubbed Japanese anime videos. Without creating an account or login ID, you can watch and download your favorite anime shows and movies without any disruptions.

    -

    Watch dubbed anime to experience the rich and conventional Japanese and Chinese culture. With martial arts being core to Japanese and Chinese tradition, you're sure to experience action-packed fun. These free dubbed anime websites let you enjoy uncensored anime shows in your language and convenience. Our list offers multiple options to satiate your craving for anime shows and movies without spending a dime.

    -

    Thanks for the post. Very interesting post. This is my first time visit here. I found so much interesting stuff in your blog. Keep posting.
    -rules-illegal-movies-downloading-websites/
    -live-link-download-bollywood-hollywood-and-tamil-movies/

    -

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/charset_normalizer/md.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/charset_normalizer/md.py deleted file mode 100644 index 13aa062e71e4c07832c3dea08a70925b61848dcd..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/charset_normalizer/md.py +++ /dev/null @@ -1,582 +0,0 @@ -from functools import lru_cache -from logging import getLogger -from typing import List, Optional - -from .constant import ( - COMMON_SAFE_ASCII_CHARACTERS, - TRACE, - UNICODE_SECONDARY_RANGE_KEYWORD, -) -from .utils import ( - is_accentuated, - is_ascii, - is_case_variable, - is_cjk, - is_emoticon, - is_hangul, - is_hiragana, - is_katakana, - is_latin, - is_punctuation, - is_separator, - is_symbol, - is_thai, - is_unprintable, - remove_accent, - unicode_range, -) - - -class MessDetectorPlugin: - """ - Base abstract class used for mess detection plugins. - All detectors MUST extend and implement given methods. - """ - - def eligible(self, character: str) -> bool: - """ - Determine if given character should be fed in. - """ - raise NotImplementedError # pragma: nocover - - def feed(self, character: str) -> None: - """ - The main routine to be executed upon character. - Insert the logic in witch the text would be considered chaotic. - """ - raise NotImplementedError # pragma: nocover - - def reset(self) -> None: # pragma: no cover - """ - Permit to reset the plugin to the initial state. - """ - raise NotImplementedError - - @property - def ratio(self) -> float: - """ - Compute the chaos ratio based on what your feed() has seen. - Must NOT be lower than 0.; No restriction gt 0. - """ - raise NotImplementedError # pragma: nocover - - -class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin): - def __init__(self) -> None: - self._punctuation_count: int = 0 - self._symbol_count: int = 0 - self._character_count: int = 0 - - self._last_printable_char: Optional[str] = None - self._frenzy_symbol_in_word: bool = False - - def eligible(self, character: str) -> bool: - return character.isprintable() - - def feed(self, character: str) -> None: - self._character_count += 1 - - if ( - character != self._last_printable_char - and character not in COMMON_SAFE_ASCII_CHARACTERS - ): - if is_punctuation(character): - self._punctuation_count += 1 - elif ( - character.isdigit() is False - and is_symbol(character) - and is_emoticon(character) is False - ): - self._symbol_count += 2 - - self._last_printable_char = character - - def reset(self) -> None: # pragma: no cover - self._punctuation_count = 0 - self._character_count = 0 - self._symbol_count = 0 - - @property - def ratio(self) -> float: - if self._character_count == 0: - return 0.0 - - ratio_of_punctuation: float = ( - self._punctuation_count + self._symbol_count - ) / self._character_count - - return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0 - - -class TooManyAccentuatedPlugin(MessDetectorPlugin): - def __init__(self) -> None: - self._character_count: int = 0 - self._accentuated_count: int = 0 - - def eligible(self, character: str) -> bool: - return character.isalpha() - - def feed(self, character: str) -> None: - self._character_count += 1 - - if is_accentuated(character): - self._accentuated_count += 1 - - def reset(self) -> None: # pragma: no cover - self._character_count = 0 - self._accentuated_count = 0 - - @property - def ratio(self) -> float: - if self._character_count == 0 or self._character_count < 8: - return 0.0 - ratio_of_accentuation: float = self._accentuated_count / self._character_count - return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0 - - -class UnprintablePlugin(MessDetectorPlugin): - def __init__(self) -> None: - self._unprintable_count: int = 0 - self._character_count: int = 0 - - def eligible(self, character: str) -> bool: - return True - - def feed(self, character: str) -> None: - if is_unprintable(character): - self._unprintable_count += 1 - self._character_count += 1 - - def reset(self) -> None: # pragma: no cover - self._unprintable_count = 0 - - @property - def ratio(self) -> float: - if self._character_count == 0: - return 0.0 - - return (self._unprintable_count * 8) / self._character_count - - -class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin): - def __init__(self) -> None: - self._successive_count: int = 0 - self._character_count: int = 0 - - self._last_latin_character: Optional[str] = None - - def eligible(self, character: str) -> bool: - return character.isalpha() and is_latin(character) - - def feed(self, character: str) -> None: - self._character_count += 1 - if ( - self._last_latin_character is not None - and is_accentuated(character) - and is_accentuated(self._last_latin_character) - ): - if character.isupper() and self._last_latin_character.isupper(): - self._successive_count += 1 - # Worse if its the same char duplicated with different accent. - if remove_accent(character) == remove_accent(self._last_latin_character): - self._successive_count += 1 - self._last_latin_character = character - - def reset(self) -> None: # pragma: no cover - self._successive_count = 0 - self._character_count = 0 - self._last_latin_character = None - - @property - def ratio(self) -> float: - if self._character_count == 0: - return 0.0 - - return (self._successive_count * 2) / self._character_count - - -class SuspiciousRange(MessDetectorPlugin): - def __init__(self) -> None: - self._suspicious_successive_range_count: int = 0 - self._character_count: int = 0 - self._last_printable_seen: Optional[str] = None - - def eligible(self, character: str) -> bool: - return character.isprintable() - - def feed(self, character: str) -> None: - self._character_count += 1 - - if ( - character.isspace() - or is_punctuation(character) - or character in COMMON_SAFE_ASCII_CHARACTERS - ): - self._last_printable_seen = None - return - - if self._last_printable_seen is None: - self._last_printable_seen = character - return - - unicode_range_a: Optional[str] = unicode_range(self._last_printable_seen) - unicode_range_b: Optional[str] = unicode_range(character) - - if is_suspiciously_successive_range(unicode_range_a, unicode_range_b): - self._suspicious_successive_range_count += 1 - - self._last_printable_seen = character - - def reset(self) -> None: # pragma: no cover - self._character_count = 0 - self._suspicious_successive_range_count = 0 - self._last_printable_seen = None - - @property - def ratio(self) -> float: - if self._character_count == 0: - return 0.0 - - ratio_of_suspicious_range_usage: float = ( - self._suspicious_successive_range_count * 2 - ) / self._character_count - - if ratio_of_suspicious_range_usage < 0.1: - return 0.0 - - return ratio_of_suspicious_range_usage - - -class SuperWeirdWordPlugin(MessDetectorPlugin): - def __init__(self) -> None: - self._word_count: int = 0 - self._bad_word_count: int = 0 - self._foreign_long_count: int = 0 - - self._is_current_word_bad: bool = False - self._foreign_long_watch: bool = False - - self._character_count: int = 0 - self._bad_character_count: int = 0 - - self._buffer: str = "" - self._buffer_accent_count: int = 0 - - def eligible(self, character: str) -> bool: - return True - - def feed(self, character: str) -> None: - if character.isalpha(): - self._buffer += character - if is_accentuated(character): - self._buffer_accent_count += 1 - if ( - self._foreign_long_watch is False - and (is_latin(character) is False or is_accentuated(character)) - and is_cjk(character) is False - and is_hangul(character) is False - and is_katakana(character) is False - and is_hiragana(character) is False - and is_thai(character) is False - ): - self._foreign_long_watch = True - return - if not self._buffer: - return - if ( - character.isspace() or is_punctuation(character) or is_separator(character) - ) and self._buffer: - self._word_count += 1 - buffer_length: int = len(self._buffer) - - self._character_count += buffer_length - - if buffer_length >= 4: - if self._buffer_accent_count / buffer_length > 0.34: - self._is_current_word_bad = True - # Word/Buffer ending with an upper case accentuated letter are so rare, - # that we will consider them all as suspicious. Same weight as foreign_long suspicious. - if is_accentuated(self._buffer[-1]) and self._buffer[-1].isupper(): - self._foreign_long_count += 1 - self._is_current_word_bad = True - if buffer_length >= 24 and self._foreign_long_watch: - camel_case_dst = [ - i - for c, i in zip(self._buffer, range(0, buffer_length)) - if c.isupper() - ] - probable_camel_cased: bool = False - - if camel_case_dst and (len(camel_case_dst) / buffer_length <= 0.3): - probable_camel_cased = True - - if not probable_camel_cased: - self._foreign_long_count += 1 - self._is_current_word_bad = True - - if self._is_current_word_bad: - self._bad_word_count += 1 - self._bad_character_count += len(self._buffer) - self._is_current_word_bad = False - - self._foreign_long_watch = False - self._buffer = "" - self._buffer_accent_count = 0 - elif ( - character not in {"<", ">", "-", "=", "~", "|", "_"} - and character.isdigit() is False - and is_symbol(character) - ): - self._is_current_word_bad = True - self._buffer += character - - def reset(self) -> None: # pragma: no cover - self._buffer = "" - self._is_current_word_bad = False - self._foreign_long_watch = False - self._bad_word_count = 0 - self._word_count = 0 - self._character_count = 0 - self._bad_character_count = 0 - self._foreign_long_count = 0 - - @property - def ratio(self) -> float: - if self._word_count <= 10 and self._foreign_long_count == 0: - return 0.0 - - return self._bad_character_count / self._character_count - - -class CjkInvalidStopPlugin(MessDetectorPlugin): - """ - GB(Chinese) based encoding often render the stop incorrectly when the content does not fit and - can be easily detected. Searching for the overuse of '丅' and '丄'. - """ - - def __init__(self) -> None: - self._wrong_stop_count: int = 0 - self._cjk_character_count: int = 0 - - def eligible(self, character: str) -> bool: - return True - - def feed(self, character: str) -> None: - if character in {"丅", "丄"}: - self._wrong_stop_count += 1 - return - if is_cjk(character): - self._cjk_character_count += 1 - - def reset(self) -> None: # pragma: no cover - self._wrong_stop_count = 0 - self._cjk_character_count = 0 - - @property - def ratio(self) -> float: - if self._cjk_character_count < 16: - return 0.0 - return self._wrong_stop_count / self._cjk_character_count - - -class ArchaicUpperLowerPlugin(MessDetectorPlugin): - def __init__(self) -> None: - self._buf: bool = False - - self._character_count_since_last_sep: int = 0 - - self._successive_upper_lower_count: int = 0 - self._successive_upper_lower_count_final: int = 0 - - self._character_count: int = 0 - - self._last_alpha_seen: Optional[str] = None - self._current_ascii_only: bool = True - - def eligible(self, character: str) -> bool: - return True - - def feed(self, character: str) -> None: - is_concerned = character.isalpha() and is_case_variable(character) - chunk_sep = is_concerned is False - - if chunk_sep and self._character_count_since_last_sep > 0: - if ( - self._character_count_since_last_sep <= 64 - and character.isdigit() is False - and self._current_ascii_only is False - ): - self._successive_upper_lower_count_final += ( - self._successive_upper_lower_count - ) - - self._successive_upper_lower_count = 0 - self._character_count_since_last_sep = 0 - self._last_alpha_seen = None - self._buf = False - self._character_count += 1 - self._current_ascii_only = True - - return - - if self._current_ascii_only is True and is_ascii(character) is False: - self._current_ascii_only = False - - if self._last_alpha_seen is not None: - if (character.isupper() and self._last_alpha_seen.islower()) or ( - character.islower() and self._last_alpha_seen.isupper() - ): - if self._buf is True: - self._successive_upper_lower_count += 2 - self._buf = False - else: - self._buf = True - else: - self._buf = False - - self._character_count += 1 - self._character_count_since_last_sep += 1 - self._last_alpha_seen = character - - def reset(self) -> None: # pragma: no cover - self._character_count = 0 - self._character_count_since_last_sep = 0 - self._successive_upper_lower_count = 0 - self._successive_upper_lower_count_final = 0 - self._last_alpha_seen = None - self._buf = False - self._current_ascii_only = True - - @property - def ratio(self) -> float: - if self._character_count == 0: - return 0.0 - - return self._successive_upper_lower_count_final / self._character_count - - -@lru_cache(maxsize=1024) -def is_suspiciously_successive_range( - unicode_range_a: Optional[str], unicode_range_b: Optional[str] -) -> bool: - """ - Determine if two Unicode range seen next to each other can be considered as suspicious. - """ - if unicode_range_a is None or unicode_range_b is None: - return True - - if unicode_range_a == unicode_range_b: - return False - - if "Latin" in unicode_range_a and "Latin" in unicode_range_b: - return False - - if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b: - return False - - # Latin characters can be accompanied with a combining diacritical mark - # eg. Vietnamese. - if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and ( - "Combining" in unicode_range_a or "Combining" in unicode_range_b - ): - return False - - keywords_range_a, keywords_range_b = unicode_range_a.split( - " " - ), unicode_range_b.split(" ") - - for el in keywords_range_a: - if el in UNICODE_SECONDARY_RANGE_KEYWORD: - continue - if el in keywords_range_b: - return False - - # Japanese Exception - range_a_jp_chars, range_b_jp_chars = ( - unicode_range_a - in ( - "Hiragana", - "Katakana", - ), - unicode_range_b in ("Hiragana", "Katakana"), - ) - if (range_a_jp_chars or range_b_jp_chars) and ( - "CJK" in unicode_range_a or "CJK" in unicode_range_b - ): - return False - if range_a_jp_chars and range_b_jp_chars: - return False - - if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b: - if "CJK" in unicode_range_a or "CJK" in unicode_range_b: - return False - if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin": - return False - - # Chinese/Japanese use dedicated range for punctuation and/or separators. - if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or ( - unicode_range_a in ["Katakana", "Hiragana"] - and unicode_range_b in ["Katakana", "Hiragana"] - ): - if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b: - return False - if "Forms" in unicode_range_a or "Forms" in unicode_range_b: - return False - - return True - - -@lru_cache(maxsize=2048) -def mess_ratio( - decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False -) -> float: - """ - Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier. - """ - - detectors: List[MessDetectorPlugin] = [ - md_class() for md_class in MessDetectorPlugin.__subclasses__() - ] - - length: int = len(decoded_sequence) + 1 - - mean_mess_ratio: float = 0.0 - - if length < 512: - intermediary_mean_mess_ratio_calc: int = 32 - elif length <= 1024: - intermediary_mean_mess_ratio_calc = 64 - else: - intermediary_mean_mess_ratio_calc = 128 - - for character, index in zip(decoded_sequence + "\n", range(length)): - for detector in detectors: - if detector.eligible(character): - detector.feed(character) - - if ( - index > 0 and index % intermediary_mean_mess_ratio_calc == 0 - ) or index == length - 1: - mean_mess_ratio = sum(dt.ratio for dt in detectors) - - if mean_mess_ratio >= maximum_threshold: - break - - if debug: - logger = getLogger("charset_normalizer") - - logger.log( - TRACE, - "Mess-detector extended-analysis start. " - f"intermediary_mean_mess_ratio_calc={intermediary_mean_mess_ratio_calc} mean_mess_ratio={mean_mess_ratio} " - f"maximum_threshold={maximum_threshold}", - ) - - if len(decoded_sequence) > 16: - logger.log(TRACE, f"Starting with: {decoded_sequence[:16]}") - logger.log(TRACE, f"Ending with: {decoded_sequence[-16::]}") - - for dt in detectors: # pragma: nocover - logger.log(TRACE, f"{dt.__class__}: {dt.ratio}") - - return round(mean_mess_ratio, 3) diff --git a/spaces/cncn102/bingo1/src/lib/bots/bing/utils.ts b/spaces/cncn102/bingo1/src/lib/bots/bing/utils.ts deleted file mode 100644 index 6bbbc5e463ad55bc1219b63cf78013f5360fc908..0000000000000000000000000000000000000000 --- a/spaces/cncn102/bingo1/src/lib/bots/bing/utils.ts +++ /dev/null @@ -1,87 +0,0 @@ -import { ChatResponseMessage, BingChatResponse } from './types' - -export function convertMessageToMarkdown(message: ChatResponseMessage): string { - if (message.messageType === 'InternalSearchQuery') { - return message.text - } - for (const card of message.adaptiveCards??[]) { - for (const block of card.body) { - if (block.type === 'TextBlock') { - return block.text - } - } - } - return '' -} - -const RecordSeparator = String.fromCharCode(30) - -export const websocketUtils = { - packMessage(data: any) { - return `${JSON.stringify(data)}${RecordSeparator}` - }, - unpackMessage(data: string | ArrayBuffer | Blob) { - if (!data) return {} - return data - .toString() - .split(RecordSeparator) - .filter(Boolean) - .map((s) => { - try { - return JSON.parse(s) - } catch (e) { - return {} - } - }) - }, -} - -export async function createImage(prompt: string, id: string, headers: HeadersInit): Promise { - const { headers: responseHeaders } = await fetch(`https://www.bing.com/images/create?partner=sydney&re=1&showselective=1&sude=1&kseed=7000&SFX=&q=${encodeURIComponent(prompt)}&iframeid=${id}`, - { - method: 'HEAD', - headers, - redirect: 'manual' - }, - ); - - if (!/&id=([^&]+)$/.test(responseHeaders.get('location') || '')) { - throw new Error('请求异常,请检查身份信息是否有效') - } - - const resultId = RegExp.$1; - let count = 0 - const imageThumbUrl = `https://www.bing.com/images/create/async/results/${resultId}?q=${encodeURIComponent(prompt)}&partner=sydney&showselective=1&IID=images.as`; - - do { - await sleep(3000); - const content = await fetch(imageThumbUrl, { headers, method: 'GET' }) - - // @ts-ignore - if (content.headers.get('content-length') > 1) { - const text = await content.text() - return (text?.match(/ target?.split('src="').pop()?.replace(/&/g, '&')) - .map(img => `![${prompt}](${img})`).join(' ') - } - } while(count ++ < 10); -} - - -export async function* streamAsyncIterable(stream: ReadableStream) { - const reader = stream.getReader() - try { - while (true) { - const { done, value } = await reader.read() - if (done) { - return - } - yield value - } - } finally { - reader.releaseLock() - } -} - -export const sleep = (ms: number) => new Promise(resolve => setTimeout(resolve, ms)) - diff --git a/spaces/codelion/Grounding_DINO_demo/groundingdino/models/GroundingDINO/fuse_modules.py b/spaces/codelion/Grounding_DINO_demo/groundingdino/models/GroundingDINO/fuse_modules.py deleted file mode 100644 index 2753b3ddee43c7a9fe28d1824db5d786e7e1ad59..0000000000000000000000000000000000000000 --- a/spaces/codelion/Grounding_DINO_demo/groundingdino/models/GroundingDINO/fuse_modules.py +++ /dev/null @@ -1,297 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ - -import torch -import torch.nn as nn -import torch.nn.functional as F -from timm.models.layers import DropPath - - -class FeatureResizer(nn.Module): - """ - This class takes as input a set of embeddings of dimension C1 and outputs a set of - embedding of dimension C2, after a linear transformation, dropout and normalization (LN). - """ - - def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True): - super().__init__() - self.do_ln = do_ln - # Object feature encoding - self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True) - self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12) - self.dropout = nn.Dropout(dropout) - - def forward(self, encoder_features): - x = self.fc(encoder_features) - if self.do_ln: - x = self.layer_norm(x) - output = self.dropout(x) - return output - - -def l1norm(X, dim, eps=1e-8): - """L1-normalize columns of X""" - norm = torch.abs(X).sum(dim=dim, keepdim=True) + eps - X = torch.div(X, norm) - return X - - -def l2norm(X, dim, eps=1e-8): - """L2-normalize columns of X""" - norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps - X = torch.div(X, norm) - return X - - -def func_attention(query, context, smooth=1, raw_feature_norm="softmax", eps=1e-8): - """ - query: (n_context, queryL, d) - context: (n_context, sourceL, d) - """ - batch_size_q, queryL = query.size(0), query.size(1) - batch_size, sourceL = context.size(0), context.size(1) - - # Get attention - # --> (batch, d, queryL) - queryT = torch.transpose(query, 1, 2) - - # (batch, sourceL, d)(batch, d, queryL) - # --> (batch, sourceL, queryL) - attn = torch.bmm(context, queryT) - if raw_feature_norm == "softmax": - # --> (batch*sourceL, queryL) - attn = attn.view(batch_size * sourceL, queryL) - attn = nn.Softmax()(attn) - # --> (batch, sourceL, queryL) - attn = attn.view(batch_size, sourceL, queryL) - elif raw_feature_norm == "l2norm": - attn = l2norm(attn, 2) - elif raw_feature_norm == "clipped_l2norm": - attn = nn.LeakyReLU(0.1)(attn) - attn = l2norm(attn, 2) - else: - raise ValueError("unknown first norm type:", raw_feature_norm) - # --> (batch, queryL, sourceL) - attn = torch.transpose(attn, 1, 2).contiguous() - # --> (batch*queryL, sourceL) - attn = attn.view(batch_size * queryL, sourceL) - attn = nn.Softmax()(attn * smooth) - # --> (batch, queryL, sourceL) - attn = attn.view(batch_size, queryL, sourceL) - # --> (batch, sourceL, queryL) - attnT = torch.transpose(attn, 1, 2).contiguous() - - # --> (batch, d, sourceL) - contextT = torch.transpose(context, 1, 2) - # (batch x d x sourceL)(batch x sourceL x queryL) - # --> (batch, d, queryL) - weightedContext = torch.bmm(contextT, attnT) - # --> (batch, queryL, d) - weightedContext = torch.transpose(weightedContext, 1, 2) - - return weightedContext, attnT - - -class BiMultiHeadAttention(nn.Module): - def __init__(self, v_dim, l_dim, embed_dim, num_heads, dropout=0.1, cfg=None): - super(BiMultiHeadAttention, self).__init__() - - self.embed_dim = embed_dim - self.num_heads = num_heads - self.head_dim = embed_dim // num_heads - self.v_dim = v_dim - self.l_dim = l_dim - - assert ( - self.head_dim * self.num_heads == self.embed_dim - ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})." - self.scale = self.head_dim ** (-0.5) - self.dropout = dropout - - self.v_proj = nn.Linear(self.v_dim, self.embed_dim) - self.l_proj = nn.Linear(self.l_dim, self.embed_dim) - self.values_v_proj = nn.Linear(self.v_dim, self.embed_dim) - self.values_l_proj = nn.Linear(self.l_dim, self.embed_dim) - - self.out_v_proj = nn.Linear(self.embed_dim, self.v_dim) - self.out_l_proj = nn.Linear(self.embed_dim, self.l_dim) - - self.stable_softmax_2d = True - self.clamp_min_for_underflow = True - self.clamp_max_for_overflow = True - - self._reset_parameters() - - def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): - return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() - - def _reset_parameters(self): - nn.init.xavier_uniform_(self.v_proj.weight) - self.v_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.l_proj.weight) - self.l_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.values_v_proj.weight) - self.values_v_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.values_l_proj.weight) - self.values_l_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.out_v_proj.weight) - self.out_v_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.out_l_proj.weight) - self.out_l_proj.bias.data.fill_(0) - - def forward(self, v, l, attention_mask_v=None, attention_mask_l=None): - """_summary_ - - Args: - v (_type_): bs, n_img, dim - l (_type_): bs, n_text, dim - attention_mask_v (_type_, optional): _description_. bs, n_img - attention_mask_l (_type_, optional): _description_. bs, n_text - - Returns: - _type_: _description_ - """ - # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': - # import ipdb; ipdb.set_trace() - bsz, tgt_len, _ = v.size() - - query_states = self.v_proj(v) * self.scale - key_states = self._shape(self.l_proj(l), -1, bsz) - value_v_states = self._shape(self.values_v_proj(v), -1, bsz) - value_l_states = self._shape(self.values_l_proj(l), -1, bsz) - - proj_shape = (bsz * self.num_heads, -1, self.head_dim) - query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) - key_states = key_states.view(*proj_shape) - value_v_states = value_v_states.view(*proj_shape) - value_l_states = value_l_states.view(*proj_shape) - - src_len = key_states.size(1) - attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) # bs*nhead, nimg, ntxt - - if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): - raise ValueError( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}" - ) - - if self.stable_softmax_2d: - attn_weights = attn_weights - attn_weights.max() - - if self.clamp_min_for_underflow: - attn_weights = torch.clamp( - attn_weights, min=-50000 - ) # Do not increase -50000, data type half has quite limited range - if self.clamp_max_for_overflow: - attn_weights = torch.clamp( - attn_weights, max=50000 - ) # Do not increase 50000, data type half has quite limited range - - attn_weights_T = attn_weights.transpose(1, 2) - attn_weights_l = attn_weights_T - torch.max(attn_weights_T, dim=-1, keepdim=True)[0] - if self.clamp_min_for_underflow: - attn_weights_l = torch.clamp( - attn_weights_l, min=-50000 - ) # Do not increase -50000, data type half has quite limited range - if self.clamp_max_for_overflow: - attn_weights_l = torch.clamp( - attn_weights_l, max=50000 - ) # Do not increase 50000, data type half has quite limited range - - # mask vison for language - if attention_mask_v is not None: - attention_mask_v = ( - attention_mask_v[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) - ) - attn_weights_l.masked_fill_(attention_mask_v, float("-inf")) - - attn_weights_l = attn_weights_l.softmax(dim=-1) - - # mask language for vision - if attention_mask_l is not None: - attention_mask_l = ( - attention_mask_l[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) - ) - attn_weights.masked_fill_(attention_mask_l, float("-inf")) - attn_weights_v = attn_weights.softmax(dim=-1) - - attn_probs_v = F.dropout(attn_weights_v, p=self.dropout, training=self.training) - attn_probs_l = F.dropout(attn_weights_l, p=self.dropout, training=self.training) - - attn_output_v = torch.bmm(attn_probs_v, value_l_states) - attn_output_l = torch.bmm(attn_probs_l, value_v_states) - - if attn_output_v.size() != (bsz * self.num_heads, tgt_len, self.head_dim): - raise ValueError( - f"`attn_output_v` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output_v.size()}" - ) - - if attn_output_l.size() != (bsz * self.num_heads, src_len, self.head_dim): - raise ValueError( - f"`attn_output_l` should be of size {(bsz, self.num_heads, src_len, self.head_dim)}, but is {attn_output_l.size()}" - ) - - attn_output_v = attn_output_v.view(bsz, self.num_heads, tgt_len, self.head_dim) - attn_output_v = attn_output_v.transpose(1, 2) - attn_output_v = attn_output_v.reshape(bsz, tgt_len, self.embed_dim) - - attn_output_l = attn_output_l.view(bsz, self.num_heads, src_len, self.head_dim) - attn_output_l = attn_output_l.transpose(1, 2) - attn_output_l = attn_output_l.reshape(bsz, src_len, self.embed_dim) - - attn_output_v = self.out_v_proj(attn_output_v) - attn_output_l = self.out_l_proj(attn_output_l) - - return attn_output_v, attn_output_l - - -# Bi-Direction MHA (text->image, image->text) -class BiAttentionBlock(nn.Module): - def __init__( - self, - v_dim, - l_dim, - embed_dim, - num_heads, - dropout=0.1, - drop_path=0.0, - init_values=1e-4, - cfg=None, - ): - """ - Inputs: - embed_dim - Dimensionality of input and attention feature vectors - hidden_dim - Dimensionality of hidden layer in feed-forward network - (usually 2-4x larger than embed_dim) - num_heads - Number of heads to use in the Multi-Head Attention block - dropout - Amount of dropout to apply in the feed-forward network - """ - super(BiAttentionBlock, self).__init__() - - # pre layer norm - self.layer_norm_v = nn.LayerNorm(v_dim) - self.layer_norm_l = nn.LayerNorm(l_dim) - self.attn = BiMultiHeadAttention( - v_dim=v_dim, l_dim=l_dim, embed_dim=embed_dim, num_heads=num_heads, dropout=dropout - ) - - # add layer scale for training stability - self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() - self.gamma_v = nn.Parameter(init_values * torch.ones((v_dim)), requires_grad=True) - self.gamma_l = nn.Parameter(init_values * torch.ones((l_dim)), requires_grad=True) - - def forward(self, v, l, attention_mask_v=None, attention_mask_l=None): - v = self.layer_norm_v(v) - l = self.layer_norm_l(l) - delta_v, delta_l = self.attn( - v, l, attention_mask_v=attention_mask_v, attention_mask_l=attention_mask_l - ) - # v, l = v + delta_v, l + delta_l - v = v + self.drop_path(self.gamma_v * delta_v) - l = l + self.drop_path(self.gamma_l * delta_l) - return v, l - - # def forward(self, v:List[torch.Tensor], l, attention_mask_v=None, attention_mask_l=None) diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/hpeldsp_init_neon.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/hpeldsp_init_neon.c deleted file mode 100644 index d9feadd1dd75103fb196f3c559d79a7897898e71..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/hpeldsp_init_neon.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * ARM NEON optimised DSP functions - * Copyright (c) 2008 Mans Rullgard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include -#include - -#include "libavutil/attributes.h" -#include "hpeldsp_arm.h" - -void ff_put_pixels16_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_put_pixels16_x2_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_put_pixels16_y2_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_put_pixels16_xy2_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_put_pixels8_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_put_pixels8_x2_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_put_pixels8_y2_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_put_pixels8_xy2_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_put_pixels16_x2_no_rnd_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_put_pixels16_y2_no_rnd_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_put_pixels16_xy2_no_rnd_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_put_pixels8_x2_no_rnd_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_put_pixels8_y2_no_rnd_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_put_pixels8_xy2_no_rnd_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); - -void ff_avg_pixels16_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_avg_pixels16_x2_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_avg_pixels16_y2_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_avg_pixels16_xy2_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_avg_pixels8_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_avg_pixels8_x2_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_avg_pixels8_y2_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_avg_pixels8_xy2_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_avg_pixels16_x2_no_rnd_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_avg_pixels16_y2_no_rnd_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); -void ff_avg_pixels16_xy2_no_rnd_neon(uint8_t *, const uint8_t *, ptrdiff_t, int); - -av_cold void ff_hpeldsp_init_neon(HpelDSPContext *c, int flags) -{ - c->put_pixels_tab[0][0] = ff_put_pixels16_neon; - c->put_pixels_tab[0][1] = ff_put_pixels16_x2_neon; - c->put_pixels_tab[0][2] = ff_put_pixels16_y2_neon; - c->put_pixels_tab[0][3] = ff_put_pixels16_xy2_neon; - c->put_pixels_tab[1][0] = ff_put_pixels8_neon; - c->put_pixels_tab[1][1] = ff_put_pixels8_x2_neon; - c->put_pixels_tab[1][2] = ff_put_pixels8_y2_neon; - c->put_pixels_tab[1][3] = ff_put_pixels8_xy2_neon; - - c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_neon; - c->put_no_rnd_pixels_tab[0][1] = ff_put_pixels16_x2_no_rnd_neon; - c->put_no_rnd_pixels_tab[0][2] = ff_put_pixels16_y2_no_rnd_neon; - c->put_no_rnd_pixels_tab[0][3] = ff_put_pixels16_xy2_no_rnd_neon; - c->put_no_rnd_pixels_tab[1][0] = ff_put_pixels8_neon; - c->put_no_rnd_pixels_tab[1][1] = ff_put_pixels8_x2_no_rnd_neon; - c->put_no_rnd_pixels_tab[1][2] = ff_put_pixels8_y2_no_rnd_neon; - c->put_no_rnd_pixels_tab[1][3] = ff_put_pixels8_xy2_no_rnd_neon; - - c->avg_pixels_tab[0][0] = ff_avg_pixels16_neon; - c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_neon; - c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_neon; - c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_neon; - c->avg_pixels_tab[1][0] = ff_avg_pixels8_neon; - c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_neon; - c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_neon; - c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_neon; - - c->avg_no_rnd_pixels_tab[0] = ff_avg_pixels16_neon; - c->avg_no_rnd_pixels_tab[1] = ff_avg_pixels16_x2_no_rnd_neon; - c->avg_no_rnd_pixels_tab[2] = ff_avg_pixels16_y2_no_rnd_neon; - c->avg_no_rnd_pixels_tab[3] = ff_avg_pixels16_xy2_no_rnd_neon; -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/fic.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/fic.c deleted file mode 100644 index 94cf42887f3559bd03880648fb390e978dee80b0..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/fic.c +++ /dev/null @@ -1,498 +0,0 @@ -/* - * Mirillis FIC decoder - * - * Copyright (c) 2014 Konstantin Shishkov - * Copyright (c) 2014 Derek Buitenhuis - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/common.h" -#include "libavutil/mem_internal.h" -#include "libavutil/opt.h" -#include "avcodec.h" -#include "codec_internal.h" -#include "decode.h" -#include "get_bits.h" -#include "golomb.h" - -typedef struct FICThreadContext { - DECLARE_ALIGNED(16, int16_t, block)[64]; - const uint8_t *src; - int slice_h; - int src_size; - int y_off; - int p_frame; -} FICThreadContext; - -typedef struct FICContext { - AVClass *class; - AVCodecContext *avctx; - AVFrame *frame; - AVFrame *final_frame; - - FICThreadContext *slice_data; - int slice_data_size; - - const uint8_t *qmat; - - enum AVPictureType cur_frame_type; - - int aligned_width, aligned_height; - int num_slices, slice_h; - - uint8_t cursor_buf[4096]; - int skip_cursor; -} FICContext; - -static const uint8_t fic_qmat_hq[64] = { - 1, 2, 2, 2, 3, 3, 3, 4, - 2, 2, 2, 3, 3, 3, 4, 4, - 2, 2, 3, 3, 3, 4, 4, 4, - 2, 2, 3, 3, 3, 4, 4, 5, - 2, 3, 3, 3, 4, 4, 5, 6, - 3, 3, 3, 4, 4, 5, 6, 7, - 3, 3, 3, 4, 4, 5, 7, 7, - 3, 3, 4, 4, 5, 7, 7, 7, -}; - -static const uint8_t fic_qmat_lq[64] = { - 1, 5, 6, 7, 8, 9, 9, 11, - 5, 5, 7, 8, 9, 9, 11, 12, - 6, 7, 8, 9, 9, 11, 11, 12, - 7, 7, 8, 9, 9, 11, 12, 13, - 7, 8, 9, 9, 10, 11, 13, 16, - 8, 9, 9, 10, 11, 13, 16, 19, - 8, 9, 9, 11, 12, 15, 18, 23, - 9, 9, 11, 12, 15, 18, 23, 27 -}; - -static const uint8_t fic_header[7] = { 0, 0, 1, 'F', 'I', 'C', 'V' }; - -#define FIC_HEADER_SIZE 27 -#define CURSOR_OFFSET 59 - -static av_always_inline void fic_idct(int16_t *blk, int step, int shift, int rnd) -{ - const unsigned t0 = 27246 * blk[3 * step] + 18405 * blk[5 * step]; - const unsigned t1 = 27246 * blk[5 * step] - 18405 * blk[3 * step]; - const unsigned t2 = 6393 * blk[7 * step] + 32139 * blk[1 * step]; - const unsigned t3 = 6393 * blk[1 * step] - 32139 * blk[7 * step]; - const unsigned t4 = 5793U * ((int)(t2 + t0 + 0x800) >> 12); - const unsigned t5 = 5793U * ((int)(t3 + t1 + 0x800) >> 12); - const unsigned t6 = t2 - t0; - const unsigned t7 = t3 - t1; - const unsigned t8 = 17734 * blk[2 * step] - 42813 * blk[6 * step]; - const unsigned t9 = 17734 * blk[6 * step] + 42814 * blk[2 * step]; - const unsigned tA = (blk[0 * step] - blk[4 * step]) * 32768 + rnd; - const unsigned tB = (blk[0 * step] + blk[4 * step]) * 32768 + rnd; - blk[0 * step] = (int)( t4 + t9 + tB) >> shift; - blk[1 * step] = (int)( t6 + t7 + t8 + tA) >> shift; - blk[2 * step] = (int)( t6 - t7 - t8 + tA) >> shift; - blk[3 * step] = (int)( t5 - t9 + tB) >> shift; - blk[4 * step] = (int)( -t5 - t9 + tB) >> shift; - blk[5 * step] = (int)(-(t6 - t7) - t8 + tA) >> shift; - blk[6 * step] = (int)(-(t6 + t7) + t8 + tA) >> shift; - blk[7 * step] = (int)( -t4 + t9 + tB) >> shift; -} - -static void fic_idct_put(uint8_t *dst, int stride, int16_t *block) -{ - int i, j; - int16_t *ptr; - - ptr = block; - fic_idct(ptr++, 8, 13, (1 << 12) + (1 << 17)); - for (i = 1; i < 8; i++) { - fic_idct(ptr, 8, 13, 1 << 12); - ptr++; - } - - ptr = block; - for (i = 0; i < 8; i++) { - fic_idct(ptr, 1, 20, 0); - ptr += 8; - } - - ptr = block; - for (j = 0; j < 8; j++) { - for (i = 0; i < 8; i++) - dst[i] = av_clip_uint8(ptr[i]); - dst += stride; - ptr += 8; - } -} -static int fic_decode_block(FICContext *ctx, GetBitContext *gb, - uint8_t *dst, int stride, int16_t *block, int *is_p) -{ - int i, num_coeff; - - if (get_bits_left(gb) < 8) - return AVERROR_INVALIDDATA; - - /* Is it a skip block? */ - if (get_bits1(gb)) { - *is_p = 1; - return 0; - } - - memset(block, 0, sizeof(*block) * 64); - - num_coeff = get_bits(gb, 7); - if (num_coeff > 64) - return AVERROR_INVALIDDATA; - - for (i = 0; i < num_coeff; i++) { - int v = get_se_golomb(gb); - if (v < -2048 || v > 2048) - return AVERROR_INVALIDDATA; - block[ff_zigzag_direct[i]] = v * - ctx->qmat[ff_zigzag_direct[i]]; - } - - fic_idct_put(dst, stride, block); - - return 0; -} - -static int fic_decode_slice(AVCodecContext *avctx, void *tdata) -{ - FICContext *ctx = avctx->priv_data; - FICThreadContext *tctx = tdata; - GetBitContext gb; - const uint8_t *src = tctx->src; - int slice_h = tctx->slice_h; - int src_size = tctx->src_size; - int y_off = tctx->y_off; - int x, y, p, ret; - - ret = init_get_bits8(&gb, src, src_size); - if (ret < 0) - return ret; - - for (p = 0; p < 3; p++) { - int stride = ctx->frame->linesize[p]; - uint8_t* dst = ctx->frame->data[p] + (y_off >> !!p) * stride; - - for (y = 0; y < (slice_h >> !!p); y += 8) { - for (x = 0; x < (ctx->aligned_width >> !!p); x += 8) { - int ret; - - if ((ret = fic_decode_block(ctx, &gb, dst + x, stride, - tctx->block, &tctx->p_frame)) != 0) - return ret; - } - - dst += 8 * stride; - } - } - - return 0; -} - -static av_always_inline void fic_alpha_blend(uint8_t *dst, uint8_t *src, - int size, uint8_t *alpha) -{ - int i; - - for (i = 0; i < size; i++) - dst[i] += ((src[i] - dst[i]) * alpha[i]) >> 8; -} - -static void fic_draw_cursor(AVCodecContext *avctx, int cur_x, int cur_y) -{ - FICContext *ctx = avctx->priv_data; - uint8_t *ptr = ctx->cursor_buf; - uint8_t *dstptr[3]; - uint8_t planes[4][1024]; - uint8_t chroma[3][256]; - int i, j, p; - - /* Convert to YUVA444. */ - for (i = 0; i < 1024; i++) { - planes[0][i] = (( 25 * ptr[0] + 129 * ptr[1] + 66 * ptr[2]) / 255) + 16; - planes[1][i] = ((-38 * ptr[0] + 112 * ptr[1] + -74 * ptr[2]) / 255) + 128; - planes[2][i] = ((-18 * ptr[0] + 112 * ptr[1] + -94 * ptr[2]) / 255) + 128; - planes[3][i] = ptr[3]; - - ptr += 4; - } - - /* Subsample chroma. */ - for (i = 0; i < 32; i += 2) - for (j = 0; j < 32; j += 2) - for (p = 0; p < 3; p++) - chroma[p][16 * (i / 2) + j / 2] = (planes[p + 1][32 * i + j ] + - planes[p + 1][32 * i + j + 1] + - planes[p + 1][32 * (i + 1) + j ] + - planes[p + 1][32 * (i + 1) + j + 1]) / 4; - - /* Seek to x/y pos of cursor. */ - for (i = 0; i < 3; i++) - dstptr[i] = ctx->final_frame->data[i] + - (ctx->final_frame->linesize[i] * (cur_y >> !!i)) + - (cur_x >> !!i) + !!i; - - /* Copy. */ - for (i = 0; i < FFMIN(32, avctx->height - cur_y) - 1; i += 2) { - int lsize = FFMIN(32, avctx->width - cur_x); - int csize = lsize / 2; - - fic_alpha_blend(dstptr[0], - planes[0] + i * 32, lsize, planes[3] + i * 32); - fic_alpha_blend(dstptr[0] + ctx->final_frame->linesize[0], - planes[0] + (i + 1) * 32, lsize, planes[3] + (i + 1) * 32); - fic_alpha_blend(dstptr[1], - chroma[0] + (i / 2) * 16, csize, chroma[2] + (i / 2) * 16); - fic_alpha_blend(dstptr[2], - chroma[1] + (i / 2) * 16, csize, chroma[2] + (i / 2) * 16); - - dstptr[0] += ctx->final_frame->linesize[0] * 2; - dstptr[1] += ctx->final_frame->linesize[1]; - dstptr[2] += ctx->final_frame->linesize[2]; - } -} - -static int fic_decode_frame(AVCodecContext *avctx, AVFrame *rframe, - int *got_frame, AVPacket *avpkt) -{ - FICContext *ctx = avctx->priv_data; - const uint8_t *src = avpkt->data; - int ret; - int slice, nslices; - int msize; - int tsize; - int cur_x, cur_y; - int skip_cursor = ctx->skip_cursor; - const uint8_t *sdata; - - if ((ret = ff_reget_buffer(avctx, ctx->frame, 0)) < 0) - return ret; - - /* Header + at least one slice (4) */ - if (avpkt->size < FIC_HEADER_SIZE + 4) { - av_log(avctx, AV_LOG_ERROR, "Frame data is too small.\n"); - return AVERROR_INVALIDDATA; - } - - /* Check for header. */ - if (memcmp(src, fic_header, 7)) - av_log(avctx, AV_LOG_WARNING, "Invalid FIC Header.\n"); - - /* Is it a skip frame? */ - if (src[17]) { - if (!ctx->final_frame) { - av_log(avctx, AV_LOG_WARNING, "Initial frame is skipped\n"); - return AVERROR_INVALIDDATA; - } - goto skip; - } - - nslices = src[13]; - if (!nslices) { - av_log(avctx, AV_LOG_ERROR, "Zero slices found.\n"); - return AVERROR_INVALIDDATA; - } - - /* High or Low Quality Matrix? */ - ctx->qmat = src[23] ? fic_qmat_hq : fic_qmat_lq; - - /* Skip cursor data. */ - tsize = AV_RB24(src + 24); - if (tsize > avpkt->size - FIC_HEADER_SIZE) { - av_log(avctx, AV_LOG_ERROR, - "Packet is too small to contain cursor (%d vs %d bytes).\n", - tsize, avpkt->size - FIC_HEADER_SIZE); - return AVERROR_INVALIDDATA; - } - - if (!tsize || !AV_RL16(src + 37) || !AV_RL16(src + 39)) - skip_cursor = 1; - - if (!skip_cursor && tsize < 32) { - av_log(avctx, AV_LOG_WARNING, - "Cursor data too small. Skipping cursor.\n"); - skip_cursor = 1; - } - - /* Cursor position. */ - cur_x = AV_RL16(src + 33); - cur_y = AV_RL16(src + 35); - if (!skip_cursor && (cur_x > avctx->width || cur_y > avctx->height)) { - av_log(avctx, AV_LOG_DEBUG, - "Invalid cursor position: (%d,%d). Skipping cursor.\n", - cur_x, cur_y); - skip_cursor = 1; - } - - if (!skip_cursor && (AV_RL16(src + 37) != 32 || AV_RL16(src + 39) != 32)) { - av_log(avctx, AV_LOG_WARNING, - "Invalid cursor size. Skipping cursor.\n"); - skip_cursor = 1; - } - - if (!skip_cursor && avpkt->size < CURSOR_OFFSET + sizeof(ctx->cursor_buf)) { - skip_cursor = 1; - } - - /* Slice height for all but the last slice. */ - ctx->slice_h = 16 * (ctx->aligned_height >> 4) / nslices; - if (ctx->slice_h % 16) - ctx->slice_h = FFALIGN(ctx->slice_h - 16, 16); - - /* First slice offset and remaining data. */ - sdata = src + tsize + FIC_HEADER_SIZE + 4 * nslices; - msize = avpkt->size - nslices * 4 - tsize - FIC_HEADER_SIZE; - - if (msize <= ctx->aligned_width/8 * (ctx->aligned_height/8) / 8) { - av_log(avctx, AV_LOG_ERROR, "Not enough frame data to decode.\n"); - return AVERROR_INVALIDDATA; - } - - /* Allocate slice data. */ - av_fast_malloc(&ctx->slice_data, &ctx->slice_data_size, - nslices * sizeof(ctx->slice_data[0])); - if (!ctx->slice_data_size) { - av_log(avctx, AV_LOG_ERROR, "Could not allocate slice data.\n"); - return AVERROR(ENOMEM); - } - memset(ctx->slice_data, 0, nslices * sizeof(ctx->slice_data[0])); - - for (slice = 0; slice < nslices; slice++) { - unsigned slice_off = AV_RB32(src + tsize + FIC_HEADER_SIZE + slice * 4); - unsigned slice_size; - int y_off = ctx->slice_h * slice; - int slice_h = ctx->slice_h; - - /* - * Either read the slice size, or consume all data left. - * Also, special case the last slight height. - */ - if (slice == nslices - 1) { - slice_size = msize; - slice_h = FFALIGN(avctx->height - ctx->slice_h * (nslices - 1), 16); - } else { - slice_size = AV_RB32(src + tsize + FIC_HEADER_SIZE + slice * 4 + 4); - if (slice_size < slice_off) - return AVERROR_INVALIDDATA; - } - - if (slice_size < slice_off || slice_size > msize) - continue; - - slice_size -= slice_off; - - ctx->slice_data[slice].src = sdata + slice_off; - ctx->slice_data[slice].src_size = slice_size; - ctx->slice_data[slice].slice_h = slice_h; - ctx->slice_data[slice].y_off = y_off; - } - - if ((ret = avctx->execute(avctx, fic_decode_slice, ctx->slice_data, - NULL, nslices, sizeof(ctx->slice_data[0]))) < 0) - return ret; - - ctx->frame->key_frame = 1; - ctx->frame->pict_type = AV_PICTURE_TYPE_I; - for (slice = 0; slice < nslices; slice++) { - if (ctx->slice_data[slice].p_frame) { - ctx->frame->key_frame = 0; - ctx->frame->pict_type = AV_PICTURE_TYPE_P; - break; - } - } - av_frame_free(&ctx->final_frame); - ctx->final_frame = av_frame_clone(ctx->frame); - if (!ctx->final_frame) { - av_log(avctx, AV_LOG_ERROR, "Could not clone frame buffer.\n"); - return AVERROR(ENOMEM); - } - - /* Make sure we use a user-supplied buffer. */ - if ((ret = ff_reget_buffer(avctx, ctx->final_frame, 0)) < 0) { - av_log(avctx, AV_LOG_ERROR, "Could not make frame writable.\n"); - return ret; - } - - /* Draw cursor. */ - if (!skip_cursor) { - memcpy(ctx->cursor_buf, src + CURSOR_OFFSET, sizeof(ctx->cursor_buf)); - fic_draw_cursor(avctx, cur_x, cur_y); - } - -skip: - *got_frame = 1; - if ((ret = av_frame_ref(rframe, ctx->final_frame)) < 0) - return ret; - - return avpkt->size; -} - -static av_cold int fic_decode_close(AVCodecContext *avctx) -{ - FICContext *ctx = avctx->priv_data; - - av_freep(&ctx->slice_data); - av_frame_free(&ctx->final_frame); - av_frame_free(&ctx->frame); - - return 0; -} - -static av_cold int fic_decode_init(AVCodecContext *avctx) -{ - FICContext *ctx = avctx->priv_data; - - /* Initialize various context values */ - ctx->avctx = avctx; - ctx->aligned_width = FFALIGN(avctx->width, 16); - ctx->aligned_height = FFALIGN(avctx->height, 16); - - avctx->pix_fmt = AV_PIX_FMT_YUV420P; - avctx->bits_per_raw_sample = 8; - - ctx->frame = av_frame_alloc(); - if (!ctx->frame) - return AVERROR(ENOMEM); - - return 0; -} - -static const AVOption options[] = { -{ "skip_cursor", "skip the cursor", offsetof(FICContext, skip_cursor), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM }, -{ NULL }, -}; - -static const AVClass fic_decoder_class = { - .class_name = "FIC decoder", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; - -const FFCodec ff_fic_decoder = { - .p.name = "fic", - CODEC_LONG_NAME("Mirillis FIC"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_FIC, - .priv_data_size = sizeof(FICContext), - .init = fic_decode_init, - FF_CODEC_DECODE_CB(fic_decode_frame), - .close = fic_decode_close, - .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS, - .p.priv_class = &fic_decoder_class, -}; diff --git a/spaces/conciomith/RetinaFace_FaceDetector_Extractor/README.md b/spaces/conciomith/RetinaFace_FaceDetector_Extractor/README.md deleted file mode 100644 index 978da2aeacfbf87c9d80a0f54217914251769861..0000000000000000000000000000000000000000 --- a/spaces/conciomith/RetinaFace_FaceDetector_Extractor/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: RetinaFace_Face_Detector_Extractor_Recognizer -emoji: 💻 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 2.8.12 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download MARVEL Super War Mod APK with Happymod - Enjoy Unlimited Features and Updates.md b/spaces/congsaPfin/Manga-OCR/logs/Download MARVEL Super War Mod APK with Happymod - Enjoy Unlimited Features and Updates.md deleted file mode 100644 index 4e80ffaea3d567477598204a68609e14ea3bd46c..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download MARVEL Super War Mod APK with Happymod - Enjoy Unlimited Features and Updates.md +++ /dev/null @@ -1,124 +0,0 @@ - -

    Marvel Super War Mod APK Happymod: How to Download and Play

    -

    If you are a fan of Marvel comics and movies, you might have heard of Marvel Super War, a popular mobile game that lets you play as your favorite Marvel hero or villain in a 5v5 MOBA (multiplayer online battle arena) game. But did you know that you can also download a modded version of the game from HappyMod, a platform that provides thousands of modded apps and games for free? In this article, we will tell you everything you need to know about Marvel Super War mod APK happymod, including how to download it, what are its benefits and risks, and how to play it.

    -

    marvel super war mod apk happymod


    Download Zip >> https://urlca.com/2uOaBd



    -

    What is Marvel Super War?

    -

    Marvel Super War is a free-to-play mobile game developed by NetEase Games and Marvel Entertainment. It was released in December 2019 for Android and iOS devices. It is the first official MOBA game based on the Marvel universe, featuring over 50 characters from the Avengers, X-Men, Guardians of the Galaxy, Spider-Man, and more. You can choose your favorite character and join a team of five players to fight against another team in various modes and maps. You can also customize your character with different skins, items, and skills.

    -

    Features of Marvel Super War

    -

    Some of the features of Marvel Super War are:

    -
      -
    • Stunning graphics and sound effects that bring the Marvel universe to life.
    • -
    • Smooth and responsive controls that allow you to unleash your character's abilities with ease.
    • -
    • Strategic and balanced gameplay that requires teamwork and coordination.
    • -
    • Diverse and dynamic modes and maps that offer different challenges and rewards.
    • -
    • A rich and immersive story mode that follows the original comics and movies.
    • -
    • A friendly and active community that supports cross-server matchmaking and social interaction.
    • -
    -

    How to download Marvel Super War mod APK from HappyMod?

    -

    If you want to download Marvel Super War mod APK from HappyMod, you have two options:

    -

    Option A: Download from the website

    -
      -
    1. Click on this link to go to the website of HappyMod: https://happymod.com/
    2. -
    3. Search for "Marvel Super War" in the search bar or browse through the categories.
    4. -
    5. Select the version of the mod APK that you want to download. Make sure it is compatible with your device and has good ratings and reviews.
    6. -
    7. Click on "Download" and wait for the file to be downloaded in your device's Downloads folder.
    8. -
    9. Now tap on "Install" and wait for the installation to finish.
    10. -
    11. Once it is done, open the game and start playing it right away.
    12. -
    -

    Option B: Download from the app

    -
      -
    1. Download and install the HappyMod app from this link: https://happymod.com/happymod.apk
    2. -
    3. Open the app and search for "Marvel Super War" in the search bar or browse through the categories.
    4. -
    5. Select the version of the mod APK that you want to download. Make sure it is compatible with your device and has good ratings and reviews.
    6. -
    7. Click on "Download" and wait for the file to be downloaded in your device's Downloads folder.
    8. -
    9. Now tap on "Install" and wait for the installation to finish.
    10. -
    11. Once it is done, open the game and start playing it right away.
    12. -
    -

    What is Marvel Super War mod APK happymod?

    -

    Marvel Super War mod APK happymod is a modified version of the original game that offers some extra features and advantages that are not available in the official version. For example, some of the features of Marvel Super War mod APK happymod are:

    -

    Benefits of Marvel Super War mod APK happymod

    -
      -
    • Unlimited gold and crystals that you can use to buy skins, items, and skills.
    • -
    • Unlocked all characters and modes that you can access without any restrictions.
    • -
    • No ads and no root required that make your gaming experience smoother and safer.
    • -
    • Updated regularly and compatible with most devices that ensure your game is always up to date and working properly.
    • -
    -

    Risks of Marvel Super War mod APK happymod

    -
      -
    • Possible malware and viruses that can harm your device or steal your personal information.
    • -
    • Possible ban or suspension from the game server that can prevent you from playing the game online.
    • -
    • Possible loss of data or progress that can erase your achievements and records.
    • -
    • Possible legal issues or violations that can get you in trouble with the game developers or authorities.
    • -
    -

    How to play Marvel Super War mod APK happymod?

    -

    If you have successfully downloaded and installed Marvel Super War mod APK happymod, you can start playing it by following these steps:

    -

    Choose your favorite Marvel hero or villain

    -

    You can choose from over 50 characters from the Marvel universe, each with their own unique abilities and skills. You can also customize your character with different skins, items, and skills. Some of the popular characters are Iron Man, Captain America, Thor, Spider-Man, Black Widow, Hulk, Thanos, Loki, Deadpool, and more.

    -

    Join a 5v5 MOBA battle

    -

    You can join a team of five players and fight against another team in various modes and maps. You can choose from different roles such as tank, fighter, assassin, marksman, mage, or support. You can also communicate with your teammates using voice chat or text chat. Some of the modes are ranked match, quick match, arcade mode, practice mode, and story mode. Some of the maps are Wakanda, Xandar, Sakaar, New York, and more.

    -

    Use your skills and strategies

    -

    You can use your character's abilities and skills to attack, defend, heal, or support your team. You can also use different strategies and tactics to outsmart and outplay your enemies. You can also collect resources such as gold and crystals to buy items and upgrade your skills. You can also destroy towers, minions, monsters, and the enemy base to win the game.

    -

    Conclusion

    -

    Marvel Super War mod APK happymod is a fun and exciting way to enjoy the Marvel universe in a mobile game. You can download it from HappyMod for free and play as your favorite Marvel hero or villain in a 5v5 MOBA game. However, you should also be aware of the risks and consequences of using a modded version of the game. You should always play responsibly and respect the game rules and regulations. If you have any questions or problems with Marvel Super War mod APK happymod, you can contact HappyMod for support or feedback.

    -

    marvel super war mod apk unlimited money happymod
    -marvel super war mod apk latest version happymod
    -marvel super war mod apk download for android happymod
    -marvel super war mod apk offline happymod
    -marvel super war mod apk no root happymod
    -marvel super war mod apk free shopping happymod
    -marvel super war mod apk all characters unlocked happymod
    -marvel super war mod apk god mode happymod
    -marvel super war mod apk high damage happymod
    -marvel super war mod apk anti ban happymod
    -marvel super war mod apk unlimited crystals happymod
    -marvel super war mod apk obb happymod
    -marvel super war mod apk hack happymod
    -marvel super war mod apk 2023 happymod
    -marvel super war mod apk new update happymod
    -marvel super war mod apk revdl happymod
    -marvel super war mod apk rexdl happymod
    -marvel super war mod apk android 1 happymod
    -marvel super war mod apk android oyun club happymod
    -marvel super war mod apk an1 happymod
    -marvel super war mod apk blackmod happymod
    -marvel super war mod apk by androeed ru happymod
    -marvel super war mod apk by lenov ru happymod
    -marvel super war mod apk by ihackedit happymod
    -marvel super war mod apk by platinmods happymod
    -marvel super war mod apk cheat happymod
    -marvel super war mod apk coins and gems happymod
    -marvel super war mod apk data file host happymod
    -marvel super war mod apk direct download link happymod
    -marvel super war mod apk english version happymod
    -marvel super war mod apk everything unlocked happymod
    -marvel super war mod apk for ios happymod
    -marvel super war mod apk for pc happymod
    -marvel super war mod apk free download full version happymod
    -marvel super war mod apk gamestechy happymod
    -marvel super war mod apk gameplay happymod
    -marvel super war mod apk google drive link happymod
    -marvel super war mod apk how to install happymod
    -marvel super war mod apk highly compressed download happymod
    -marvel super war mod apk ihackedit.com/marvel-super-war-mod-apk/

    -

    FAQs

    -
      -
    • Q: Is Marvel Super War mod APK happymod safe to use?
    • -
    • A: Marvel Super War mod APK happymod is not officially endorsed or approved by NetEase Games or Marvel Entertainment. It is a third-party modification that may contain malware or viruses that can harm your device or steal your personal information. It may also cause your account to be banned or suspended from the game server. Therefore, you should use it at your own risk and discretion.
    • -
    • Q: How do I update Marvel Super War mod APK happymod?
    • -
    • A: You can update Marvel Super War mod APK happymod by downloading the latest version from HappyMod website or app. You should always check for updates regularly to ensure your game is working properly and has the latest features.
    • -
    • Q: Can I play Marvel Super War mod APK happymod offline?
    • -
    • A: No, you cannot play Marvel Super War mod APK happymod offline. You need an internet connection to play the game online with other players. You also need an internet connection to download and update the game from HappyMod.
    • -
    • Q: Can I play Marvel Super War mod APK happymod with my friends?
    • -
    • A: Yes, you can play Marvel Super War mod APK happymod with your friends. You can invite them to join your team or challenge them to a friendly match. You can also chat with them using voice chat or text chat. However, you should make sure that they are also using the same version of the mod APK as you, otherwise you may encounter compatibility issues or errors.
    • -
    • Q: What are some tips and tricks for playing Marvel Super War mod APK happymod?
    • -
    • A: Some tips and tricks for playing Marvel Super War mod APK happymod are:
    • -
        -
      • Choose a character that suits your play style and role. Learn their abilities and skills and how to use them effectively.
      • -
      • Coordinate with your teammates and communicate with them. Use voice chat or text chat to share information, plan strategies, and support each other.
      • -
      • Be aware of the map and the objectives. Know where the towers, minions, monsters, and the enemy base are. Know when to attack, defend, retreat, or farm.
      • -
      • Use items and skills wisely. Don't waste your gold and crystals on unnecessary items or skills. Don't spam your skills without cooldown or mana. Don't use your skills randomly or recklessly.
      • -
      • Have fun and enjoy the game. Don't be toxic or rude to other players. Don't cheat or hack the game. Don't take the game too seriously or personally.
      • -
      -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download PC Games at High Speed No Ads No Registration No Hassle.md b/spaces/congsaPfin/Manga-OCR/logs/Download PC Games at High Speed No Ads No Registration No Hassle.md deleted file mode 100644 index f96508b3941942a8002821f2c78515082937b9b2..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download PC Games at High Speed No Ads No Registration No Hassle.md +++ /dev/null @@ -1,189 +0,0 @@ - -

    How to Download PC Games: A Complete Guide

    -Introduction -

    PC games are video games that are played on a personal computer, rather than a console or a mobile device. PC games have been around since the 1970s, and they have evolved over the years to offer a wide range of genres, graphics, gameplay, and features. PC games are popular among gamers of all ages and backgrounds, as they offer a high level of customization, interactivity, and immersion.

    -

    Downloading PC games is one of the most convenient and affordable ways to enjoy PC gaming. Downloading PC games means that you can access them anytime and anywhere, without having to worry about physical discs, cartridges, or boxes. Downloading PC games also means that you can benefit from frequent updates, patches, mods, and DLCs, which can enhance your gaming experience.

    -

    download pc games


    Download ★★★ https://urlca.com/2uOclZ



    -

    However, downloading PC games is not always easy or straightforward. There are many challenges that you may face when downloading PC games, such as finding the right game, choosing the right platform, dealing with slow or unstable internet connections, managing your storage space and data usage, avoiding viruses and malware, and troubleshooting errors and issues.

    -

    That's why we have created this complete guide on how to download PC games. In this guide, we will cover everything you need to know about downloading PC games, from the types of PC games to the tips and tricks for downloading PC games. Whether you are a beginner or an expert in PC gaming, this guide will help you download PC games with ease and confidence.

    -

    Types of PC Games

    -

    Genre

    -

    One of the first things you need to consider when downloading PC games is the genre of the game. The genre of a game refers to the type of gameplay, story, setting, and theme that it offers. There are many genres of PC games, but some of the most common ones are:

    -
      -
    • Action: These are games that involve fast-paced gameplay, such as shooting, fighting, racing, or platforming. Examples of action games are Doom Eternal, Grand Theft Auto V, and Super Meat Boy.
    • -
    • Adventure: These are games that involve exploration, puzzle-solving, and narrative. Examples of adventure games are The Witcher 3: Wild Hunt, Life is Strange 2, and Gone Home.
    • -
    • Role-playing: These are games that involve creating and controlling a character in a fictional world, often with elements of combat, dialogue, and progression. Examples of role-playing games are Cyberpunk 2077, Fallout 4, and Divinity: Original Sin 2.
    • -
    • Simulation: These are games that simulate realistic or fictional scenarios, such as driving, flying, farming, or dating. Examples of simulation games are Microsoft Flight Simulator 2020, Stardew Valley, and The Sims 4.
    • -
    • Strategy: These are games that involve planning and decision-making, often with elements of resource management, combat, and diplomacy. Examples of strategy games are Civilization VI, XCOM 2, and Crusader Kings III.
    • -
    • Sports: These are games that simulate or emulate various sports activities, such as soccer, basketball, golf, or skateboarding. Examples of sports games are FIFA 21, NBA 2K21, and Tony Hawk's Pro Skater 1 + 2.
    • -
    • Others: These are games that do not fit into any of the above categories or belong to multiple categories. Examples of other games are Minecraft, Among Us, and Portal 2.
    • -
    -

    The genre of a game can affect your enjoyment and satisfaction with it. Therefore, you should choose a game that matches your preferences and interests. You can also try different genres to expand your horizons and discover new experiences.

    -

    Platform

    -

    Another thing you need to consider when downloading PC games is the platform of the game. The platform of a game refers to the software or service that allows you to download, install, and play the game on your PC. There are many platforms for PC gaming, but some of the most popular ones are:

    -
      -
    • Steam: This is the largest and most popular platform for PC gaming, with over 50, 000 games available for download. Steam offers a variety of features, such as cloud saving, achievements, workshop, community, and streaming. Steam also has frequent sales and discounts on PC games. You can download Steam for free from its official website.
    • -
    • Origin: This is the platform for EA (Electronic Arts) games, such as Battlefield, FIFA, The Sims, and Star Wars. Origin also offers some features similar to Steam, such as cloud saving, achievements, friends, and chat. Origin also has a subscription service called EA Play, which gives you access to hundreds of EA games and other benefits. You can download Origin for free from its official website.
    • -
    • Epic Games Store: This is the platform for Epic Games, the developer of Fortnite, Unreal Engine, and other popular games. Epic Games Store also offers some exclusive games, such as Borderlands 3, Control, and Hitman 3. Epic Games Store also has a feature called Free Games, which gives you a free game every week. You can download Epic Games Store for free from its official website.
    • -
    • Microsoft Store: This is the platform for Microsoft games, such as Halo, Gears of War, and Forza. Microsoft Store also offers some games that are compatible with both PC and Xbox, such as Sea of Thieves, Ori and the Will of the Wisps, and Minecraft Dungeons. Microsoft Store also has a subscription service called Xbox Game Pass for PC, which gives you access to hundreds of PC and Xbox games and other benefits. You can download Microsoft Store for free from your Windows 10 PC.
    • -
    • Others: These are platforms that are not as popular or widely used as the ones mentioned above, but still offer some PC games for download. Some examples of other platforms are GOG.com, Humble Bundle, itch.io, and Uplay.
    • -
    -

    The platform of a game can affect your convenience and compatibility with it. Therefore, you should choose a platform that suits your needs and preferences. You can also use multiple platforms to access a wider range of PC games.

    -

    How to Download PC Games

    -

    Choose a Game

    -

    The first step to download PC games is to choose a game that you want to play. This may seem like an easy task, but with thousands of PC games available for download, it can be overwhelming and confusing. Here are some tips on how to choose a game:

    -

    Search for "download pc games" as a seed keyword
    -Go to the Matching terms report
    -Filter for keywords with a monthly search volume up to 300
    -Filter for keywords with a Traffic Potential (TP) up to 300
    -Sort by Keyword Difficulty (KD) from low to high
    -download pc games for windows 7
    -download pc games for free full version
    -download pc games highly compressed
    -download pc games under 1gb
    -download pc games without internet
    -download pc games from google drive
    -download pc games on android
    -download pc games on mac
    -download pc games on chromebook
    -download pc games on iphone
    -download pc games with controller support
    -download pc games with low system requirements
    -download pc games with crack
    -download pc games with direct link
    -download pc games with multiplayer
    -download pc games offline installer
    -download pc games iso files
    -download pc games zip file
    -download pc games rar file
    -download pc games setup file
    -download pc games no installation required
    -download pc games no virus
    -download pc games no ads
    -download pc games no surveys
    -download pc games no registration required
    -download pc games from steam
    -download pc games from origin
    -download pc games from epic store
    -download pc games from microsoft store
    -download pc games from torrent sites
    -download old pc games for windows 10
    -download old pc games for free full version
    -download old pc games from 2000s
    -download old pc games from 90s
    -download old pc games from childhood
    -how to download new pc games for free
    -how to download new pc games faster
    -how to download new pc games before release date
    -how to download new pc games without torrenting
    -how to download new pc games on laptop
    -best site to download latest pc games for free
    -best site to download latest pc games highly compressed
    -best site to download latest pc games with crack
    -best site to download latest pc games full version
    -best site to download latest pc games reddit

    -
      -
    • Find a game that suits your preferences and system requirements. You should consider factors such as genre, theme, style, difficulty, length, and multiplayer options when choosing a game. You should also check the minimum and recommended system requirements of the game to make sure that your PC can run it smoothly and without issues.
    • -
    • Compare different games and reviews. You should not rely on the title, description, or screenshots of the game alone. You should also look at the ratings, reviews, and feedback from other players who have downloaded and played the game. You can find these information on the platform's website or on other websites such as Metacritic, SteamDB, or Reddit.
    • -
    • Check the price and availability of the game. You should also consider how much the game costs and whether it is available for download on your preferred platform. You should compare the prices of different platforms and look for discounts and deals that can save you money. You should also check the availability of the game on your region and whether it has any restrictions or limitations.
    • -
    -

    Choosing a game can be fun and exciting, but also challenging and time-consuming. Therefore, you should take your time and do your research before downloading a game.

    -

    Choose a Platform

    -

    The next step to download PC games is to choose a platform that you want to use for downloading the game. This may seem like an obvious task, but with many platforms available for PC gaming, it can be tricky and complicated. Here are some tips on how to choose a platform:

    -
      -
    • Decide which platform to use for downloading the game. You should consider factors such as features, selection, performance, security, and customer service when choosing a platform. You should also check if the game you want to download is available and compatible with the platform you want to use. You can find these information on the platform's website or on other websites such as PCGamingWiki, PCGamesN, or PC Gamer.
    • -
    • Create an account and install a platform. You should also create an account and install the platform on your PC before downloading the game. You should follow the instructions and guidelines on the platform's website or app to create an account and install the platform. You should also verify your email address and set up your payment method if required.
    • -
    • Browse and search for games on a platform. You should also browse and search for games on the platform that you want to use. You can use the filters, categories, tags, and recommendations to find games that match your criteria. You can also use the search bar to type in the name or keywords of the game that you want to download.
    • -
    -

    Choosing a platform can be convenient and simple, but also difficult and confusing. Therefore, you should compare and contrast different platforms before downloading a game.

    -

    Download a Game

    -

    The final step to download PC games is to download the game that you want to play. This may seem like a straightforward task, but with many steps and options involved, it can be complex and tedious. Here are some tips on how to download a game:

    -
      -
    • Purchase a game and add it to your library. You should also purchase the game that you want to download and add it to your library on the platform that you want to use. You should follow the instructions and guidelines on the platform's website or app to purchase the game and add it to your library. You should also review the terms and conditions, privacy policy, and refund policy of the platform and the game before purchasing.
    • -
    • Download and install a game on your PC. You should also download and install the game that you want to play on your PC. You should follow the instructions and guidelines on the platform's website or app to download and install the game on your PC. You should also check the download progress, size, speed, and location of the game on your PC.
    • -
    • Launch and play a game on your PC. You should also launch and play the game that you have downloaded and installed on your PC. You should follow the instructions and guidelines on the platform's website or app to launch and play the game on your PC. You should also adjust the settings, options, controls, and graphics of the game according to your preferences.
    • -
    -

    Downloading a game can be exciting and rewarding, but also challenging and frustrating. Therefore, you should follow these tips and tricks for downloading a game.

    -

    Tips and Tricks for Downloading PC Games

    -

    Save Money

    -

    One of the advantages of downloading PC games is that you can save money compared to buying physical copies of games. However, downloading PC games can still be expensive if you are not careful. Here are some tips on how to save money when downloading PC games:

    -
      -
    • Find discounts and deals on PC games. You should look for discounts and deals on PC games that can lower their prices significantly. You can find these discounts and deals on the platform's website or app, or on other websites such as IsThereAnyDeal, CheapShark, or Slickdeals. You can also use browser extensions such as Honey, Wikibuy, or CamelCamelCamel to find and apply coupons and promo codes for PC games.
    • -
    • Use subscription services and loyalty programs for PC games. You should also use subscription services and loyalty programs that can give you access to hundreds of PC games and other benefits for a monthly or annual fee. Some examples of subscription services are Xbox Game Pass for PC, EA Play, and Humble Choice. Some examples of loyalty programs are Steam Points, Origin Access, and Epic Games Coupons.
    • -
    -

    Saving money when downloading PC games can help you enjoy more games without breaking the bank. Therefore, you should use these tips and tricks for saving money when downloading PC games.

    -

    Save Time

    -

    One of the disadvantages of downloading PC games is that it can take a lot of time depending on your internet connection, download speed, and game size. However, downloading PC games can be faster if you are smart. Here are some tips on how to save time when downloading PC games:

    -
      -
    • Optimize your download speed and bandwidth for PC games. You should optimize your download speed and bandwidth for PC games by using a wired connection instead of a wireless one, closing other applications that use the internet, updating your drivers and software, and changing your DNS settings. You can also use tools such as Speedtest, Fast.com, or Google Fiber Speed Test to measure your download speed and bandwidth.
    • -
    • Pause and resume downloads for PC games. You should also pause and resume downloads for PC games when you need to use the internet for other purposes, such as browsing, streaming, or gaming. You can pause and resume downloads for PC games on the platform's website or app, or on your PC's settings. You can also schedule downloads for PC games to run at specific times when the internet is less busy.
    • -
    • Manage your storage space and data usage for PC games. You should also manage your storage space and data usage for PC games by deleting or uninstalling games that you no longer play, moving or transferring games to another drive or device, and compressing or archiving games that you rarely play. You can also use tools such as WinDirStat, TreeSize, or SpaceSniffer to analyze your storage space and data usage.
    • -
    -

    Saving time when downloading PC games can help you play more games without wasting time. Therefore, you should use these tips and tricks for saving time when downloading PC games.

    -

    Save Trouble

    -

    One of the risks of downloading PC games is that you may encounter viruses, malware, errors, and issues that can harm your PC or ruin your gaming experience. However, downloading PC games can be safer and smoother if you are careful. Here are some tips on how to save trouble when downloading PC games:

    -
      -
    • Avoid viruses and malware when downloading PC games. You should avoid viruses and malware when downloading PC games by using a reputable platform that has security measures and guarantees, such as Steam, Origin, or Epic Games Store. You should also use a reliable antivirus software that can scan and protect your PC from malicious files and programs, such as Avast, Norton, or McAfee.
    • -
    • Troubleshoot common errors and issues when downloading PC games. You should troubleshoot common errors and issues when downloading PC games by following the instructions and guidelines on the platform's website or app, or on other websites such as Steam Support, Origin Help, or Epic Games Help. You should also update your drivers, software, and firmware that can affect your download performance and compatibility.
    • -
    • Get support and help when downloading PC games. You should get support and help when downloading PC games by contacting the platform's customer service or technical support team, or by reaching out to other players who have downloaded and played the game. You can find these contacts on the platform's website or app, or on other websites such as Steam Community, Origin Forums, or Epic Games Forums.
    • -
    -

    Saving trouble when downloading PC games can help you enjoy more games without hassle. Therefore, you should use these tips and tricks for saving trouble when downloading PC games.

    -

    Conclusion

    -

    Downloading PC games is a great way to enjoy PC gaming at your own convenience and comfort. However, downloading PC games can also be a daunting and frustrating task if you are not prepared and informed. That's why we have created this complete guide on how to download PC games. In this guide, we have covered everything you need to know about downloading PC games, from the types of PC games to the tips and tricks for downloading PC games. We hope that this guide has helped you download PC games with ease and confidence.

    -

    If you are ready to start downloading PC games, you can check out some of the best PC games to download in 2023, such as:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    GameGenrePlatform
    Half-Life: AlyxActionSteam
    Red Dead Redemption 2AdventureEpic Games Store
    Baldur's Gate 3Role-playingSteam
    Kerbal Space Program 2SimulationSteam
    Total War: Warhammer IIIStrategySteam
    FIFA 23SportsOrigin
    Cyberpunk 2077: The Final CutOthersGOG.com
    -

    If you have any questions or feedback about downloading PC games, you can check out some of the FAQs below or leave us a comment. We would love to hear from you and help you with your PC gaming needs.

    -

    FAQs

    -

    What are some of the best platforms to download PC games in 2023?

    -

    Some of the best platforms to download PC games in 2023 are Steam, Origin, Epic Games Store, Microsoft Store, and GOG.com. These platforms offer a large selection of PC games, as well as features such as cloud saving, achievements, community, and streaming. They also have frequent sales and discounts on PC games, as well as subscription services and loyalty programs that can give you access to hundreds of PC games and other benefits.

    -

    What are some of the best free PC games to download in 2023?

    -

    Some of the best free PC games to download in 2023 are Fortnite, Apex Legends, Valorant, League of Legends, and Dota 2. These games are free to play and download, but they also offer optional in-game purchases that can enhance your gaming experience. They also have high-quality graphics, gameplay, and features, as well as large and active player bases.

    -

    What are some of the best tools and software to download PC games in 2023?

    -

    Some of the best tools and software to download PC games in 2023 are Speedtest, WinDirStat, Avast, Speedtest, WinDirStat, Avast. These tools and software can help you optimize your download speed and bandwidth, manage your storage space and data usage, and avoid viruses and malware when downloading PC games. They can also help you troubleshoot common errors and issues when downloading PC games.

    -

    What are some of the best resources and websites to learn more about downloading PC games in 2023?

    -

    Some of the best resources and websites to learn more about downloading PC games in 2023 are Metacritic, SteamDB, Reddit, Metacritic, SteamDB, Reddit. These resources and websites can help you find and compare different PC games and reviews, as well as get tips and tricks from other players who have downloaded and played the game. They can also help you stay updated on the latest news and trends in PC gaming.

    -

    How can I contact you for more information or feedback about downloading PC games?

    -

    You can contact us for more information or feedback about downloading PC games by leaving us a comment below or sending us an email at [email protected] We would love to hear from you and help you with your PC gaming needs.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Real FPS Commando Game Shooting Gun Strike Offline Mod APK for Android.md b/spaces/congsaPfin/Manga-OCR/logs/Download Real FPS Commando Game Shooting Gun Strike Offline Mod APK for Android.md deleted file mode 100644 index 7e99165c108994a713549626039b4bd60762a1e2..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Real FPS Commando Game Shooting Gun Strike Offline Mod APK for Android.md +++ /dev/null @@ -1,82 +0,0 @@ -
    -

    Real FPS Commando Game Shooting Gun Strike Offline Mod APK

    -

    If you are looking for an action-packed shooting game that you can play offline, then you should try Real FPS Commando Game Shooting Gun Strike offline mod apk. This is a thrilling game that will test your skills as a commando in various missions and scenarios. You will have access to unlimited money and weapons, realistic 3D graphics and sound effects, multiple modes and missions, and easy controls and gameplay. In this article, we will tell you more about this game and how to download and install it on your device.

    -

    real fps commando game shooting gun strike offline mod apk


    Download Filehttps://urlca.com/2uO4vr



    -

    Introduction

    -

    What is Real FPS Commando Game Shooting Gun Strike?

    -

    Real FPS Commando Game Shooting Gun Strike is a first-person shooter game that puts you in the role of a commando who has to complete various tasks and objectives. You will face different enemies, such as terrorists, zombies, robots, and aliens, in different locations, such as city streets, deserts, jungles, and space stations. You will have to use your skills and strategies to survive and eliminate your targets.

    -

    Why play Real FPS Commando Game Shooting Gun Strike offline mod apk?

    -

    There are many reasons why you should play Real FPS Commando Game Shooting Gun Strike offline mod apk. Here are some of them:

    -
      -
    • You can play it offline without any internet connection.
    • -
    • You can enjoy unlimited money and weapons that will help you in your missions.
    • -
    • You can experience realistic 3D graphics and sound effects that will immerse you in the game.
    • -
    • You can choose from multiple modes and missions that will challenge you and keep you entertained.
    • -
    • You can control your commando easily with simple and intuitive controls.
    • -
    -

    Features of Real FPS Commando Game Shooting Gun Strike offline mod apk

    -

    Unlimited money and weapons

    -

    One of the best features of Real FPS Commando Game Shooting Gun Strike offline mod apk is that you can get unlimited money and weapons. You can use the money to buy new weapons, upgrade your existing ones, or customize your commando. You can also unlock all the weapons in the game, such as pistols, rifles, shotguns, snipers, rocket launchers, grenades, and more. You can use these weapons to blast your enemies away with ease.

    -

    Realistic 3D graphics and sound effects

    -

    Another great feature of Real FPS Commando Game Shooting Gun Strike offline mod apk is that it has realistic 3D graphics and sound effects. The game has high-quality graphics that will make you feel like you are in the middle of the action. The game also has amazing sound effects that will enhance your gaming experience. You will hear the sounds of gunshots, explosions, screams, footsteps, and more. You will also hear the voice of your commando as he communicates with his team or taunts his enemies.

    -

    Multiple modes and missions

    -

    A third feature of Real FPS Commando Game Shooting Gun Strike offline mod apk is that it has multiple modes and missions. The game has four modes: Campaign, Survival, Zombie, and Multiplayer. In Campaign mode, you will follow a story line and complete various missions. In Survival mode, you will face endless waves of enemies and try to survive as long as possible. In Zombie mode, you will fight against hordes of zombies in different maps. In Multiplayer mode, you will compete with other players online in different modes, such as Team Deathmatch, Free for All, Capture the Flag, and more. The game also has various missions that will test your skills and abilities. You will have to complete objectives, such as rescuing hostages, defusing bombs, assassinating targets, and more.

    -

    Easy controls and gameplay

    -

    A fourth feature of Real FPS Commando Game Shooting Gun Strike offline mod apk is that it has easy controls and gameplay. The game has simple and intuitive controls that will let you control your commando with ease. You can move your commando with a virtual joystick, aim and shoot with a fire button, reload with a reload button, switch weapons with a weapon button, and use other functions with other buttons. The game also has smooth and fast gameplay that will keep you on the edge of your seat. You can perform actions, such as jumping, crouching, running, sliding, and more.

    -

    How to download and install Real FPS Commando Game Shooting Gun Strike offline mod apk

    -

    Step 1: Download the apk file from a trusted source

    -

    The first step to download and install Real FPS Commando Game Shooting Gun Strike offline mod apk is to download the apk file from a trusted source. You can find the apk file on various websites that offer modded games and apps. However, you should be careful and only download from reputable sources that are virus-free and safe. You can also scan the apk file with an antivirus app before installing it.

    -

    real fps commando game shooting gun strike offline mod apk
    -real fps commando game shooting gun strike offline hack apk
    -real fps commando game shooting gun strike offline unlimited money
    -real fps commando game shooting gun strike offline latest version
    -real fps commando game shooting gun strike offline download free
    -real fps commando game shooting gun strike offline cheats android
    -real fps commando game shooting gun strike offline gameplay
    -real fps commando game shooting gun strike offline review
    -real fps commando game shooting gun strike offline tips and tricks
    -real fps commando game shooting gun strike offline best weapons
    -real fps commando game shooting gun strike offline mod menu
    -real fps commando game shooting gun strike offline no root
    -real fps commando game shooting gun strike offline online multiplayer
    -real fps commando game shooting gun strike offline new update
    -real fps commando game shooting gun strike offline features
    -real fps commando game shooting gun strike offline how to install
    -real fps commando game shooting gun strike offline for pc
    -real fps commando game shooting gun strike offline mod apk platinmods[^1^]
    -real fps commando game shooting gun strike offline mod apk rexdl
    -real fps commando game shooting gun strike offline mod apk happymod
    -real fps commando game shooting gun strike offline mod apk revdl
    -real fps commando game shooting gun strike offline mod apk an1
    -real fps commando game shooting gun strike offline mod apk android 1
    -real fps commando game shooting gun strike offline mod apk apkpure
    -real fps commando game shooting gun strike offline mod apk apkdone
    -real fps commando game shooting gun strike offline mod apk android republic
    -real fps commando game shooting gun strike offline mod apk blackmod
    -real fps commando game shooting gun strike offline mod apk ihackedit
    -real fps commando game shooting gun strike offline mod apk mob.org
    -real fps commando game shooting gun strike offline mod apk andropalace

    -

    Step 2: Enable unknown sources on your device

    -

    The second step to download and install Real FPS Commando Game Shooting Gun Strike offline mod apk is to enable unknown sources on your device. This is because the game is not available on the official Google Play Store and you need to allow your device to install apps from other sources. To do this, you need to go to your device settings, then security or privacy settings, then enable unknown sources or allow installation from unknown sources.

    -

    Step 3: Install the apk file and launch the game

    -

    The third step to download and install Real FPS Commando Game Shooting Gun Strike offline mod apk is to install the apk file and launch the game. To do this, you need to locate the apk file on your device storage, then tap on it and follow the instructions to install it. Once the installation is done, you can launch the game from your app drawer or home screen and enjoy playing it.

    -

    Conclusion

    -

    Summary of the main points

    -

    In conclusion, Real FPS Commando Game Shooting Gun Strike offline mod apk is an awesome shooting game that you can play offline. It has unlimited money and weapons, realistic 3D graphics and sound effects, multiple modes and missions, and easy controls and gameplay. It is a game that will keep you entertained and challenged for hours.

    -

    Call to action and recommendation

    -

    If you are interested in playing Real FPS Commando Game Shooting Gun Strike offline mod apk, then you should download it now from a trusted source. You will not regret it as it is one of the best shooting games out there. You will have a blast shooting your enemies and completing your missions. You will also have fun competing with other players online in multiplayer mode. So what are you waiting for? Download Real FPS Commando Game Shooting Gun Strike offline mod apk today and enjoy!

    -

    FAQs

    -
      -
    • Q: Is Real FPS Commando Game Shooting Gun Strike offline mod apk free?
    • -
    • A: Yes, it is free to download and play.
    • -
    • Q: Is Real FPS Commando Game Shooting Gun Strike offline mod apk safe?
    • -
    • A: Yes, it is safe as long as you download it from a trusted source and scan it with an antivirus app.
    • -
    • Q: How much storage space does Real FPS Commando Game Shooting Gun Strike offline mod apk require?
    • -
    • A: It requires about 100 MB of storage space on your device.
    • -
    • Q: Can I play Real FPS Commando Game Shooting Gun Strike offline mod apk on PC?
    • -
    • A: Yes, you can play it on PC using an Android emulator such as Bluestacks or NoxPlayer.
    • -
    • Q: Can I play Real FPS Commando Game Shooting Gun Strike offline mod apk with friends?
    • -
    • A: Yes, you can play it with friends online in multiplayer mode or locally using Wi-Fi or Bluetooth.
    • -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Facebook Messenger APK Download for Android 5.0 - Everything You Need to Know.md b/spaces/congsaPfin/Manga-OCR/logs/Facebook Messenger APK Download for Android 5.0 - Everything You Need to Know.md deleted file mode 100644 index b471afbf103a96e1716a47f2fc7641b67a2934a6..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Facebook Messenger APK Download for Android 5.0 - Everything You Need to Know.md +++ /dev/null @@ -1,132 +0,0 @@ - -

    Facebook Messenger APK Download for Android 5.0

    -

    Facebook Messenger is one of the most popular messaging apps that lets you chat with your friends and family on the go. But what if you want to use it on an older Android device that doesn't support the latest version of the app? Or what if you want to access some features that are not available in your region? That's where an APK file comes in handy.

    -

    An APK file is an application package file that contains all the files needed to install an app on your Android device. By downloading and installing an APK file, you can bypass the Google Play Store and get the app directly from a third-party source.

    -

    facebook messenger apk download for android 5.0


    Download File ===> https://urlca.com/2uO7We



    -

    In this article, we will show you how to download and install the Facebook Messenger APK file on your Android 5.0 device. We will also tell you about some of the features and benefits of using Facebook Messenger, as well as some common issues and solutions for using it on your device.

    -

    Features and Benefits of Facebook Messenger

    -

    Facebook Messenger is more than just a texting app. It offers a variety of features and benefits that make it a great choice for staying in touch with your favorite people.

    -

    Cross-App Communication

    -

    With Facebook Messenger, you can chat with your friends across different apps and devices. You can use it to communicate with your Facebook contacts, as well as your Instagram followers, Portal contacts, and Oculus friends. You can also switch between apps seamlessly without losing your conversation history.

    -

    Watch Together

    -

    Facebook Messenger also lets you watch videos with your friends while video chatting with them. You can watch movies, music videos, TV shows, and more from Facebook Watch, IGTV, Reels, and other sources. You can also share your reactions and comments with your friends as you watch together.

    -

    Custom Reactions

    -

    Facebook Messenger allows you to customize your reactions with more emojis to choose from. You can express yourself better with different emotions, such as love, laughter, anger, sadness, and more. You can also use any emoji from your keyboard as a reaction.

    -

    Animated Effects

    -

    You can also bring your conversations to life with fun AR effects, message effects, and selfie stickers. You can use AR effects to transform your appearance, add accessories, or create interactive scenes. You can use message effects to add animations, colors, or sounds to your texts. You can use selfie stickers to create personalized stickers of yourself and send them to your friends.

    -

    download facebook messenger apk for android 5.0 lollipop
    -facebook messenger apk android 5.0 free download
    -how to install facebook messenger apk on android 5.0
    -facebook messenger apk latest version for android 5.0
    -facebook messenger apk old version for android 5.0
    -facebook messenger apk download uptodown for android 5.0
    -facebook messenger apk mod for android 5.0
    -facebook messenger apk lite for android 5.0
    -facebook messenger apk dark mode for android 5.0
    -facebook messenger apk offline installer for android 5.0
    -facebook messenger apk beta for android 5.0
    -facebook messenger apk update for android 5.0
    -facebook messenger apk file download for android 5.0
    -facebook messenger apk mirror for android 5.0
    -facebook messenger apk pure for android 5.0
    -facebook messenger apk hack for android 5.0
    -facebook messenger apk no ads for android 5.0
    -facebook messenger apk pro for android 5.0
    -facebook messenger apk cracked for android 5.0
    -facebook messenger apk premium for android 5.0
    -facebook messenger apk full version for android 5.0
    -facebook messenger apk unlocked for android 5.0
    -facebook messenger apk patched for android 5.0
    -facebook messenger apk direct download for android 5.0
    -facebook messenger apk original for android 5.0
    -facebook messenger apk safe download for android 5.0
    -facebook messenger apk virus free download for android 5.0
    -facebook messenger apk secure download for android 5.0
    -facebook messenger apk fast download for android 5.0
    -facebook messenger apk easy download for android 5.0
    -facebook messenger apk best download site for android 5.0
    -facebook messenger apk official download link for android 5.0
    -facebook messenger apk compatible with android 5.0
    -facebook messenger apk features for android 5.0
    -facebook messenger apk requirements for android 5.0
    -facebook messenger apk size for android 5.0
    -facebook messenger apk rating for android 5.0
    -facebook messenger apk review for android 5.0
    -facebook messenger apk tips and tricks for android 5.0
    -facebook messenger apk troubleshooting for android 5.0
    -facebook messenger apk error fix for android 5.0
    -facebook messenger apk not working on android 5.0 solution
    -facebook messenger apk permissions for android 5.0
    -facebook messenger apk privacy policy for android 5.0
    -facebook messenger apk terms and conditions for android 5.0
    -facebook messenger apk support contact for android 5.0
    -facebook messenger apk feedback form for android 5.0
    -facebook messenger apk developer website for android 5.0

    -

    Replies and Forwarding

    -

    Facebook Messenger makes it easy to reply to or forward specific messages in your chat. You can swipe right on any message to reply to it directly in the same chat. You can also tap and hold on any message to forward it to another chat or contact.

    -

    App Lock

    -

    If you want to keep your chats private and secure, you can use the app lock feature on Facebook Messenger. This feature allows you to lock your app with your device's face or fingerprint ID. You can also set a timer for how long the app will stay unlocked after you use it.

    -

    Payments

    -

    Facebook Messenger also lets you send and receive money securely and easily by adding your debit card, PayPal account, or prepaid card. You can send money to anyone in your contacts list or request money from them. You can also use Facebook Pay to shop online, donate to causes, or buy tickets for events.

    -

    Business

    -

    Finally, Facebook Messenger can help you connect with businesses to find deals, make reservations, and get customer support. You can browse through different categories of businesses, such as food, shopping, travel, and more. You can also chat with bots or live agents to get answers to your questions, receive updates, or provide feedback.

    -

    How to Download and Install Facebook Messenger APK on Android 5.0

    -

    If you want to download and install the Facebook Messenger APK file on your Android 5.0 device, you need to follow these steps:

    -

    Enable Unknown Sources

    -

    Before you can install an APK file on your device, you need to enable unknown sources from the device settings. This will allow Chrome or other browsers to install unknown apps on your device.

    -

    To enable unknown sources, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message that says installing unknown apps may harm your device. Tap OK to proceed.

    -

    Download the APK File

    -

    Next, you need to download the APK file for Facebook Messenger from a reputable source such as APK Mirror. APK Mirror is a website that hosts APK files for various apps that are safe and verified.

    -

    To download the APK file for Facebook Messenger from APK Mirror, go to this link: https://www.apkmirror.com/apk/facebook-2/messenger/messenger-330-0-0-12-116-release/. This is the latest version of Facebook Messenger as of June 2023.

    -

    On the webpage, scroll down until you see the Download APK button and tap on it. You may see a pop-up window that asks you to confirm the download. Tap OK to start the download.

    -

    Install the APK File

    -

    After the download is complete, you need to locate and tap the APK file in the file explorer app on your device. You may see a notification that says the file is ready to install. Tap on it to open it.

    -

    You may see another warning message that says installing this app may harm your device. Tap Install Anyway (Unsafe) to continue.

    -

    The installation process may take a few seconds or minutes depending on your device speed and memory space. Once it is done, you will see a message that says App Installed. Tap Open to launch Facebook Messenger on your device.

    -

    Update the App

    -

    The last step is to check for updates for Facebook Messenger from the Google Play Store or from the app settings. This will ensure that you have the latest features and bug fixes for the app.

    -

    To check for updates from the Google Play Store, go to the Google Play Store app on your device and search for Facebook Messenger. Tap on the app icon and see if there is an Update button. If there is, tap on it to update the app.

    -

    To check for updates from the app settings, open Facebook Messenger on your device and tap on your profile picture in the top left corner. Scroll down and tap on Legal & Policies. Tap on App Updates and see if there is a Check for Updates button. If there is, tap on it to update the app.

    -

    Common Issues and Solutions for Facebook Messenger on Android 5.0

    -

    Facebook Messenger is a reliable and user-friendly app, but sometimes you may encounter some issues while using it on your Android 5.0 device. Here are some of the common issues and solutions for Facebook Messenger on Android 5.0:

    -

    Login Problems

    -

    If you are unable to log in to Facebook Messenger, you may have forgotten your password, have an outdated app, or have a corrupted app data. Here are some ways to fix this issue:

    -
      -
    • Reset your password: Go to the Facebook website or app and tap on Forgot Password. Follow the instructions to reset your password using your email, phone number, or Facebook account.
    • -
    • Update your app: Follow the steps above to check for updates for Facebook Messenger from the Google Play Store or from the app settings.
    • -
    • Reinstall your app: Uninstall Facebook Messenger from your device and then reinstall it from the Google Play Store or from the APK file.
    • -
    -

    Message Sending Problems

    -

    If you are unable to send messages on Facebook Messenger, you may have a poor network connection, have data saver mode enabled, or have a full cache and data. Here are some ways to fix this issue:

    -
      -
    • Check your network connection: Make sure you have a stable Wi-Fi or mobile data connection. You can also try switching between Wi-Fi and mobile data or turning off and on your airplane mode.
    • -
    • Disable data saver mode: Data saver mode can limit the background data usage of apps, which may affect Facebook Messenger. To disable data saver mode, go to Settings > Network & Internet > Data Saver and toggle it off.
    • -
    • Clear cache and data: Cache and data are temporary files that can help apps run faster, but they can also cause problems if they are corrupted or full. To clear cache and data for Facebook Messenger, go to Settings > Apps > Facebook Messenger > Storage and tap on Clear Cache and Clear Data.
    • -
    -

    App Crashing Problems

    -

    If Facebook Messenger keeps crashing on your device, you may have a low memory space, have an incompatible app version, or have a bug in the app. Here are some ways to fix this issue:

    -
      -
    • Restart your app: Close Facebook Messenger completely and then reopen it. This can help clear any glitches or errors in the app.
    • -
    • Free up memory space: Make sure you have enough free space on your device for Facebook Messenger to run smoothly. You can delete some unwanted files, apps, photos, videos, or music from your device to free up some space.
    • -
    • Report a problem: If none of the above solutions work, you can report a problem to Facebook Messenger's support team. To do this, open Facebook Messenger on your device and tap on your profile picture in the top left corner. Scroll down and tap on Legal & Policies. Tap on Report a Problem and follow the instructions to submit your feedback.
    • -
    -

    Conclusion

    -

    Facebook Messenger is a great app that lets you chat with your friends and family across different apps and devices. It also offers many features and benefits that make it fun and convenient to use.

    -

    If you want to use Facebook Messenger on your Android 5.0 device, you can download and install the APK file from a third-party source such as APK Mirror. You just need to enable unknown sources, download the APK file, install the APK file, and update the app.

    -

    If you encounter any issues while using Facebook Messenger on your device, you can try some of the solutions we provided above. You can also contact Facebook Messenger's support team if you need more help.

    -

    We hope this article was helpful for you. If you liked it, please share it with your friends who might be interested in using Facebook Messenger APK on their Android 5.0 devices.

    -

    Frequently Asked Questions

    -
      -
    1. What is the difference between Facebook Messenger APK and Facebook Messenger Lite APK?
    2. -

      Facebook Messenger APK is the full version of the app that offers all the features and benefits of Facebook Messenger. Facebook Messenger Lite APK is a simplified version of the app that uses less data and battery and works on slower networks and older devices. However, it does not have some of the features of Facebook Messenger, such as animated effects, watch together, payments, and business.

      -
    3. How can I update Facebook Messenger APK without Google Play Store?
    4. -

      If you don't have access to the Google Play Store, you can update Facebook Messenger APK by downloading and installing the latest version of the APK file from a third-party source such as APK Mirror. You can follow the same steps as above to download and install the APK file. You can also check for updates from the app settings by tapping on Legal & Policies > App Updates.

      -
    5. Is it safe to download Facebook Messenger APK from a third-party source?
    6. -

      It depends on the source you are downloading from. Some sources may offer fake or malicious APK files that can harm your device or compromise your privacy. Therefore, you should always download APK files from reputable and verified sources such as APK Mirror. You should also scan the APK file with an antivirus app before installing it.

      -
    7. Can I use Facebook Messenger APK on other Android versions?
    8. -

      Yes, you can use Facebook Messenger APK on other Android versions as long as they are compatible with the app. The minimum Android version required for Facebook Messenger is Android 4.4 (KitKat). However, some features may not work on older or newer Android versions.

      -
    9. Can I use Facebook Messenger APK on other devices?
    10. -

      No, you can only use Facebook Messenger APK on Android devices. If you want to use Facebook Messenger on other devices, such as iOS, Windows, Mac, or web, you need to download or access the app from the official sources, such as the App Store, Microsoft Store, or Facebook website.

      -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Garena Blockman GO APK 2022 The Best Sandbox Game for Mobile Devices.md b/spaces/congsaPfin/Manga-OCR/logs/Garena Blockman GO APK 2022 The Best Sandbox Game for Mobile Devices.md deleted file mode 100644 index 690121b85f2d3e0dba34aab16a67facdea793a5b..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Garena Blockman GO APK 2022 The Best Sandbox Game for Mobile Devices.md +++ /dev/null @@ -1,107 +0,0 @@ - -

    Garena Blockman GO APK 2022: Everything You Need to Know

    -

    If you are looking for a fun and creative sandbox game that lets you play, craft, and share your experiences with your friends, then you should check out Garena Blockman GO APK 2022. This is the latest version of the popular game developed by Garena Games Private Limited, which has attracted millions of players from all over the world. In this article, we will tell you everything you need to know about Garena Blockman GO APK 2022, including what it is, what's new, and how to download and install it on your device.

    -

    What is Garena Blockman GO?

    -

    Garena Blockman GO is a free-to-play sandbox game that offers you endless possibilities to create and explore. You can enjoy various minigames from different genres, customize your avatar with fashionable accessories, and socialize with other players in a friendly community. Here are some of the features that make Garena Blockman GO stand out:

    -

    garena blockman go apk 2022


    DOWNLOAD ->>->>->> https://urlca.com/2uO6Qy



    -

    A free-to-play sandbox game with endless possibilities

    -

    Garena Blockman GO gives you the freedom to craft your own world and share it with others. You can use various tools and materials to build anything you can imagine, from houses and castles to cities and landscapes. You can also join other players' worlds and see what they have created. You can even create your own minigames and invite others to play with you.

    -

    A wonderland of minigames from different genres

    -

    Garena Blockman GO also offers you a wide range of minigames to choose from, each with its own rules and objectives. You can find games from all kinds of genres, such as action, adventure, simulation, puzzle, racing, shooting, and more. Some of the most popular minigames are Bed Wars, Egg Wars, Sky Block, Free City RP, and more. You can also discover new minigames every day and join the adventures with other players.

    -

    A social platform to play with friends and customize your avatar

    -

    Garena Blockman GO is not only a game but also a social platform where you can meet new friends and chat with them. You can join or create parties, clans, or guilds and team up with other players for more fun. You can also show off your unique style by customizing your avatar with hundreds of fashionable accessories. You can find hats, glasses, masks, clothes, shoes, wings, pets, and more. You can also earn coins by playing games and use them to buy more items.

    -

    What's new in Garena Blockman GO APK 2022?

    -

    Garena Blockman GO APK 2022 is the latest version of the game that brings you more features and improvements. Here are some of the highlights of this update:

    -

    New minigames: Party Street, The Exorcists, and Frontline

    -

    Garena Blockman GO APK 2022 introduces three new minigames that will give you more fun and excitement. They are:

    -
      -
    • Party Street: A super cool street style game where you can collect graffitis from all over the city and spray them to your heart's content. You can also hop into a random party with other cool guys and enjoy the music and dance.
    • -
    • <
    • The Exorcists: A horror game where you have to survive the night in a haunted house. You can play as an exorcist or a ghost, and use different skills and items to defeat your enemies. You can also team up with other players or play solo.
    • -
    • Frontline: A military game where you have to fight against terrorists and protect the hostages. You can choose from different weapons and equipment, and use tactics and strategies to win the battle. You can also cooperate with other players or compete against them.
    • -
    -

    Upgraded game experience with improved tutorial and language system

    -

    Garena Blockman GO APK 2022 also enhances the game experience with some improvements and optimizations. For example, it adds a new tutorial system that guides you through the basics of the game and helps you get started quickly. It also improves the language system and supports more languages, such as Arabic, Turkish, Vietnamese, and more. You can also switch languages easily in the settings.

    -

    garena blockman go apk download latest version 2022
    -garena blockman go apk mod unlimited money 2022
    -garena blockman go apk free city rp 2022
    -garena blockman go apk bed wars update 2022
    -garena blockman go apk hack no root 2022
    -garena blockman go apk offline installer 2022
    -garena blockman go apk party street mode 2022
    -garena blockman go apk the exorcists game 2022
    -garena blockman go apk frontline battle royale 2022
    -garena blockman go apk sandbox crafting 2022
    -garena blockman go apk for android tv 2022
    -garena blockman go apk for pc windows 10 2022
    -garena blockman go apk for ios iphone ipad 2022
    -garena blockman go apk for mac os x 2022
    -garena blockman go apk for linux ubuntu 2022
    -garena blockman go apk vip subscription 2022
    -garena blockman go apk new skins and accessories 2022
    -garena blockman go apk custom maps and servers 2022
    -garena blockman go apk play with friends online 2022
    -garena blockman go apk best minigames and genres 2022
    -garena blockman go apk tips and tricks 2022
    -garena blockman go apk cheats and codes 2022
    -garena blockman go apk reviews and ratings 2022
    -garena blockman go apk news and updates 2022
    -garena blockman go apk bugs and issues 2022
    -garena blockman go apk support and feedback 2022
    -garena blockman go apk community and forum 2022
    -garena blockman go apk fan art and videos 2022
    -garena blockman go apk competitions and events 2022
    -garena blockman go apk fun and adventure 2022
    -how to install garena blockman go apk on android device 2022
    -how to uninstall garena blockman go apk from android device 2022
    -how to update garena blockman go apk on android device 2022
    -how to backup and restore garena blockman go apk data on android device 2022
    -how to transfer garena blockman go apk data from one android device to another 2022
    -how to fix garena blockman go apk not working on android device 2022
    -how to enable or disable in-app purchases in garena blockman go apk on android device 2022
    -how to change language settings in garena blockman go apk on android device 2022
    -how to create or login to a Blockman GO account in garena blockman go apk on android device 2022
    -how to redeem codes or gift cards in garena blockman go apk on android device 2022
    -how to earn or spend coins or gems in garena blockman go apk on android device 2022
    -how to join or create a party or team in garena blockman go apk on android device 2022
    -how to chat or voice call with other players in garena blockman go apk on android device 2022
    -how to report or mute abusive players in garena blockman go apk on android device 2022
    -how to customize your avatar or profile in garena blockman go apk on android device 2022
    -how to access or manage your inventory or wardrobe in garena blockman go apk on android device 2022
    -how to select or switch between different minigames or modes in garena blockman go apk on android device 2022
    -how to play or craft in the sandbox mode in garena blockman go apk on android device 2022
    -how to share your creations or experiences with other players in garena blockman go apk on android device 2022

    -

    Auto-renewable VIP subscription with exclusive benefits

    -

    Garena Blockman GO APK 2022 also offers you a new way to enjoy more benefits and privileges in the game. You can subscribe to the VIP service, which is an auto-renewable monthly subscription that gives you access to exclusive features, such as:

    -
      -
    • Free coins every day: You can get 100 coins every day as a VIP member, which you can use to buy more items and accessories.
    • -
    • Free VIP chests every week: You can get one VIP chest every week as a VIP member, which contains rare and valuable items.
    • -
    • Free VIP minigames every month: You can get one VIP minigame every month as a VIP member, which is a special minigame that only VIPs can play.
    • -
    • No ads: You can enjoy the game without any ads as a VIP member, which makes the game smoother and faster.
    • -
    • More discounts: You can get more discounts on various items and services as a VIP member, which saves you more money.
    • -
    • More badges: You can get more badges and titles as a VIP member, which shows your status and prestige in the game.
    • -
    -

    You can subscribe to the VIP service for $4.99 per month, which will be charged to your Google Play account at confirmation of purchase. You can cancel the subscription at any time in your account settings.

    -

    How to download and install Garena Blockman GO APK 2022?

    -

    If you want to try Garena Blockman GO APK 2022 on your device, you need to download and install it manually. Here are the steps you need to follow:

    -

    Download the APK file from a trusted source

    -

    The first step is to download the APK file of Garena Blockman GO APK 2022 from a trusted source. You can find many websites that offer the APK file for free, but you need to be careful about the security and quality of the file. We recommend you to use this link to download the APK file safely and quickly.

    -

    Enable unknown sources on your device settings

    -

    The next step is to enable unknown sources on your device settings. This is because Garena Blockman GO APK 2022 is not available on the Google Play Store, so you need to allow your device to install apps from other sources. To do this, you need to go to your device settings, then security or privacy, then unknown sources or install unknown apps, then toggle on the option or allow from this source.

    -

    Install the APK file and launch the game

    -

    The final step is to install the APK file and launch the game. To do this, you need to locate the downloaded APK file on your device storage, then tap on it and follow the instructions on the screen. Once the installation is complete, you can open the game and enjoy it.

    -

    Conclusion

    -

    Garena Blockman GO APK 2022 is a great sandbox game that offers you endless fun and creativity. You can play various minigames from different genres, craft your own world and share it with others, customize your avatar with fashionable accessories, and socialize with other players in a friendly community. You can also enjoy more features and improvements in this latest version of the game, such as new minigames, upgraded game experience, and auto-renewable VIP subscription. If you want to try Garena Blockman GO APK 2022 today, you just need to download and install it on your device following our simple guide. We hope you have a great time with Garena Blockman GO APK 2022.

    -

    FAQs

    -

    Here are some of the frequently asked questions about Garena Blockman GO APK 2022:

    -

    Is Garena Blockman GO APK 2022 safe to download and install?

    -

    Yes, Garena Blockman GO APK 2022 is safe to download and install, as long as you use a trusted source and follow the steps we provided. However, you should always be careful when downloading and installing apps from unknown sources, as they may contain viruses or malware that can harm your device or steal your data.

    -

    Is Garena Blockman GO APK 2022 compatible with my device?

    -

    Garena Blockman GO APK 2022 is compatible with most Android devices that have Android 4.1 or higher. However, some devices may not support some features or functions of the game, such as graphics, sound, or performance. You can check the compatibility of your device by visiting the official website of Garena Blockman GO.

    -

    How can I update Garena Blockman GO APK 2022?

    -

    Garena Blockman GO APK 2022 is updated regularly with new features and improvements. You can update the game by downloading and installing the latest version of the APK file from the same source you used before. You can also check for updates by visiting the official website of Garena Blockman GO.

    -

    How can I contact the support team of Garena Blockman GO APK 2022?

    -

    If you have any questions, problems, or feedback about Garena Blockman GO APK 2022, you can contact the support team by sending an email to blockmango@service.netease.com. You can also visit the official website of Garena Blockman GO and find more information and resources there.

    -

    How can I share my feedback or suggestions about Garena Blockman GO APK 2022?

    -

    If you have any feedback or suggestions about Garena Blockman GO APK 2022, you can share them by leaving a comment on the official website of Garena Blockman GO, or by rating and reviewing the game on Google Play Store. You can also join the official community of Garena Blockman GO on Facebook, Twitter, Instagram, YouTube, or Discord and interact with other players and developers.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/If Loving You Is Wrong Season 1 Download the Episodes of the Show That Will Keep You Hooked.md b/spaces/congsaPfin/Manga-OCR/logs/If Loving You Is Wrong Season 1 Download the Episodes of the Show That Will Keep You Hooked.md deleted file mode 100644 index 6be64d445c37e01618942135ecbf843627a4094d..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/If Loving You Is Wrong Season 1 Download the Episodes of the Show That Will Keep You Hooked.md +++ /dev/null @@ -1,169 +0,0 @@ -
    -

    If Loving You Is Wrong Season 1 Download: How to Watch It Offline

    -

    If you are a fan of drama, romance, and suspense, you might be interested in watching "If Loving You Is Wrong", a TV series created by Tyler Perry. The show follows the lives and relationships of a group of five husbands and wives who live on the same street in the fictional community of Maxine. The show explores themes such as infidelity, betrayal, secrets, lies, and revenge, as well as friendship, family, and love.

    -

    Season 1 of "If Loving You Is Wrong" consists of 13 episodes that aired from September to November 2014 on OWN. The season introduces the main characters and sets up the conflicts that will drive the plot throughout the series. Some of the highlights of season 1 include Alex's pregnancy test, Randal's affair with Alex, Eddie's corruption, Kelly's new house, Marcie's discovery of Peppa, and Brad's birthday party.

    -

    if loving you is wrong season 1 download


    Download Zip ✫✫✫ https://urlca.com/2uOftv



    -

    If you want to watch season 1 of "If Loving You Is Wrong", you might be wondering how to download it legally and safely. In this article, we will show you three options to download season 1 from different streaming platforms. We will also explain how to watch season 1 offline on your device. Let's get started!

    -

    How to Download Season 1 Legally

    -

    Downloading TV shows from illegal or shady sites can expose you to malware, viruses, or legal issues. That's why we recommend using only reputable and licensed streaming platforms that offer downloads for offline viewing. Here are three sites that you can use to download season 1 of "If Loving You Is Wrong":

    -

    Option 1: Netflix

    -

    Netflix is one of the most popular streaming services in the world, with a huge catalog of titles, including original content. Netflix also offers downloads for offline viewing on your phone, tablet, or PC.

    -

    To download season 1 of "If Loving You Is Wrong" from Netflix, you need to:

    -
      -
    1. Subscribe to Netflix. You can choose from three plans: Basic ($8.99 per month), Standard ($13.99 per month), or Premium ($17.99 per month). The Basic plan allows you to watch on one screen at a time, while the Standard and Premium plans allow you to watch on two or four screens at a time, respectively. The Premium plan also gives you access to Ultra HD content.
    2. -
    3. Download the Netflix app on your device. You can find it on the App Store for iOS devices, Google Play Store for Android devices, Microsoft Store for Windows devices, or Amazon Appstore for Fire devices.
    4. -
    5. Sign in to your Netflix account on the app.
    6. -
    7. Search for "If Loving You Is Wrong" on the app.
    8. -
    9. Select Season 1 from the list of seasons.
    10. -
    11. Tap on the Download icon next to each episode that you want to download. You can also tap on Download All to download all episodes at once.
    12. -
    13. Wait for the downloads to finish. You can check the progress on the Downloads tab on the app.
    14. -
    15. - Enjoy watching season 1 offline. You can find your downloaded episodes on the Downloads tab on the app. You can watch them without an internet connection for as long as they are available on Netflix.

      -

      Some of the pros and cons of Netflix are:

      -

      How to watch if loving you is wrong season 1 online
      -If loving you is wrong season 1 episode guide and recap
      -Where to stream if loving you is wrong season 1 for free
      -If loving you is wrong season 1 cast and characters
      -If loving you is wrong season 1 spoilers and reviews
      -If loving you is wrong season 1 DVD release date and price
      -If loving you is wrong season 1 trailer and clips
      -If loving you is wrong season 1 ratings and awards
      -If loving you is wrong season 1 behind the scenes and interviews
      -If loving you is wrong season 1 soundtrack and songs
      -If loving you is wrong season 1 trivia and facts
      -If loving you is wrong season 1 fan theories and predictions
      -If loving you is wrong season 1 merchandise and gifts
      -If loving you is wrong season 1 memes and quotes
      -If loving you is wrong season 1 crossover and spin-off
      -If loving you is wrong season 1 netflix and hulu
      -If loving you is wrong season 1 tyler perry and oprah winfrey
      -If loving you is wrong season 1 alex and randal affair
      -If loving you is wrong season 1 marcie and brad relationship
      -If loving you is wrong season 1 esperanza and eddie divorce
      -If loving you is wrong season 1 kelly and travis breakup
      -If loving you is wrong season 1 natalie and lushion romance
      -If loving you is wrong season 1 joey and julius feud
      -If loving you is wrong season 1 eddie and steven rivalry
      -If loving you is wrong season 1 randal and larry conspiracy
      -If loving you is wrong season 1 alex's baby daddy mystery
      -If loving you is wrong season 1 brad's proposal to marcie shocker
      -If loving you is wrong season 1 kelly's arrest for murder twist
      -If loving you is wrong season 1 eddie's death by cartel cliffhanger
      -If loving you is wrong season 1 finale recap and reaction

      - - - - - - - - - - - - - - - - - - - - - -
      ProsCons
      Large and diverse catalog of titlesSome titles may not be available in your region
      Original and exclusive contentSome titles may be removed after a certain period
      High-quality video and audioHigher plans are more expensive than other platforms
      User-friendly interface and featuresDownloads may expire or require internet connection to renew
      -

      Option 2: Hulu

      -

      Hulu is another popular streaming service that offers a variety of TV shows, movies, and original content. Hulu also allows you to download some titles for offline viewing on your phone or tablet.

      -

      To download season 1 of "If Loving You Is Wrong" from Hulu, you need to:

      -
        -
      1. Subscribe to Hulu. You can choose from four plans: Hulu ($5.99 per month), Hulu (No Ads) ($11.99 per month), Hulu + Live TV ($64.99 per month), or Hulu (No Ads) + Live TV ($70.99 per month). The Hulu and Hulu (No Ads) plans give you access to the streaming library, while the Hulu + Live TV and Hulu (No Ads) + Live TV plans give you access to the streaming library and live TV channels. The No Ads plans let you watch most titles without commercials.
      2. -
      3. Download the Hulu app on your device. You can find it on the App Store for iOS devices, Google Play Store for Android devices, or Amazon Appstore for Fire devices.
      4. -
      5. Sign in to your Hulu account on the app.
      6. -
      7. Search for "If Loving You Is Wrong" on the app.
      8. -
      9. Select Season 1 from the list of seasons.
      10. -
      11. Tap on the Download icon next to each episode that you want to download. You can also tap on Download All to download all episodes at once.
      12. -
      13. Wait for the downloads to finish. You can check the progress on the Downloads tab on the app.
      14. -
      15. Enjoy watching season 1 offline. You can find your downloaded episodes on the Downloads tab on the app. You can watch them without an internet connection for up to 30 days or until they expire from Hulu.

        -

        Some of the pros and cons of Hulu are:

        - - - - - - - - - - - - - - - - - - - -
        ProsCons
        Fresh and updated catalog of titlesLimited availability of downloads for some titles
        Original and exclusive contentCommercials on some plans and titles
        Live TV option with DVR featureMore expensive than other platforms for live TV option
        Add-on options for premium channels and featuresDownloads may expire or require internet connection to renew
        -

        Option 3: Amazon Prime Video

        -

        Amazon Prime Video is a streaming service that offers a wide range of TV shows, movies, and original content. Amazon Prime Video also lets you download some titles for offline viewing on your phone, tablet, or PC.

        -

        To download season 1 of "If Loving You Is Wrong" from Amazon Prime Video, you need to:

        -
        1. Subscribe to Amazon Prime Video. You can choose from two plans: Prime Video ($8.99 per month) or Prime Membership ($12.99 per month or $119 per year). The Prime Video plan gives you access to the streaming library, while the Prime Membership plan gives you access to the streaming library and other benefits such as free shipping, music streaming, e-books, and more.
        2. Download the Prime Video app on your device. You can find it on the App Store for iOS devices, Google Play Store for Android devices, Microsoft Store for Windows devices, or Amazon Appstore for Fire devices.
        3. Sign in to your Amazon account on the app.
        4. Search for "If Loving You Is Wrong" on the app.
        5. Select Season 1 from the list of seasons.
        6. Tap on the Download icon next to each episode that you want to download. You can also tap on Download All to download all episodes at once.
        7. Wait for the downloads to finish. You can check the progress on the Downloads tab on the app.
        8. -
        9. Enjoy watching season 1 offline. You can find your downloaded episodes on the Downloads tab on the app. You can watch them without an internet connection for as long as they are available on Amazon Prime Video.

          -

          Some of the pros and cons of Amazon Prime Video are:

          - - - - - - - - - - - - - - - - - - - -
          ProsCons
          Huge and varied catalog of titlesSome titles may require additional purchase or rental
          Original and exclusive contentSome titles may not be available in your region
          Other benefits of Prime MembershipMore expensive than other platforms for Prime Membership plan
          Add-on options for premium channels and featuresDownloads may expire or require internet connection to renew
          -

          How to Watch Season 1 Offline

          -

          Once you have downloaded season 1 of "If Loving You Is Wrong" from one of the streaming platforms, you can watch it offline on your device. However, there are some things you need to know before you do so:

          -

          What Devices and Apps Are Compatible with Downloaded TV Shows?

          -

          Not all devices and apps can play downloaded TV shows from streaming platforms. Here are some of the compatible devices and apps for each platform:

          -
          • Netflix: You can watch downloaded TV shows on iOS devices (iPhone, iPad, iPod touch), Android devices (phones, tablets), Windows devices (PCs, tablets), and Fire devices (Fire tablets, Fire TV Stick). You need to use the Netflix app to watch downloaded TV shows.
          • Hulu: You can watch downloaded TV shows on iOS devices (iPhone, iPad, iPod touch), Android devices (phones, tablets), and Fire devices (Fire tablets). You need to use the Hulu app to watch downloaded TV shows.
          • Amazon Prime Video: You can watch downloaded TV shows on iOS devices (iPhone, iPad, iPod touch), Android devices (phones, tablets), Windows devices (PCs, tablets), Fire devices (Fire tablets, Fire TV Stick), and some smart TVs. You need to use the Prime Video app to watch downloaded TV shows.
          -

          How to Manage Downloads and Storage Space?

          -

          Downloading TV shows can take up a lot of storage space on your device. To avoid running out of space or having performance issues, you need to manage your downloads and storage space properly. Here are some tips to do so:

          -
          • Delete downloads that you have already watched or don't need anymore. You can do this by going to the Downloads tab on the app and tapping on the Delete icon next to each download or on Delete All to delete all downloads at once.
          • Choose a lower video quality for your downloads. This will reduce the file size and save storage space. You can do this by going to the Settings or Menu tab on the app and selecting Download Quality or Download Options. You can choose from different quality levels such as Standard, High, or Best.
          • Use an external storage device such as a USB drive or a memory card to store your downloads. This will free up space on your device's internal storage. However, not all devices and apps support external storage for downloads. Check the compatibility before you use this option.
          -

          Conclusion

          -

          In this article, we have shown you how to download season 1 of "If Loving You Is Wrong" legally and safely from three streaming platforms: Netflix, Hulu, and Amazon Prime Video. We have also explained how to watch season 1 offline on your device and how to manage your downloads and storage space.

          -

          If you are looking for the best site to download season 1 of "If Loving You Is Wrong", we recommend Netflix. Netflix has a large and diverse catalog of titles, including original and exclusive content. Netflix also offers high-quality video and audio, user-friendly interface and features, and reasonable prices for its plans. Netflix also allows you to download TV shows on various devices and apps, and gives you more control over your download quality and expiration.

          -

          If you want to watch season 1 of "If Loving You Is Wrong", don't miss this opportunity to download it from Netflix and enjoy it offline anytime, anywhere. Happy watching!

          -

          FAQs

          -

          How many episodes are in season 1 of "If Loving You Is Wrong"?

          -

          Season 1 of "If Loving You Is Wrong" consists of 13 episodes that aired from September 9, 2014 to November 25, 2014 on OWN. Each episode is about 42 minutes long.

          -

          When was season 1 released and how long is each episode?

          -

          Season 1 of "If Loving You Is Wrong" was released on September 9, 2014 and ended on November 25, 2014 on OWN. Each episode is about 42 minutes long.

          -

          What are some of the main characters and actors in season 1?

          -

          Some of the main characters and actors in season 1 of "If Loving You Is Wrong" are:

          -
            -
          • Alex Montgomery (Amanda Clayton): The wife of Brad and the mother of three children. She has an affair with Randal, her neighbor, and becomes pregnant with his child.
          • -
          • Randal Holmes (Eltony Williams): The husband of Marcie and a psychologist. He has an affair with Alex, his neighbor, and impregnates her.
          • -
          • Brad Montgomery (Aiden Turner): The husband of Alex and the father of three children. He suspects that Alex is cheating on him and confronts her.
          • -
          • Marcie Holmes (Heather Hemmens): The wife of Randal and a real estate agent. She discovers that Randal is cheating on her with Alex and seeks revenge.
          • -
          • Eddie Willis (Joel Rush): The husband of Esperanza and a corrupt police officer. He abuses Esperanza and tries to control her life.
          • -
          • Esperanza Willis (Zulay Henao): The ex-wife of Eddie and the mother of a daughter. She works as a bank teller and tries to move on from Eddie.
          • -
          • Kelly Isaacs (Edwina Findley Dickerson): The single mother of a son and a nurse. She buys a new house on the same street as her friends and develops a crush on Travis, her neighbor.
          • -
          • Travis Cain (Denzel Wells): The neighbor of Kelly and a civil engineer. He is engaged to another woman but has feelings for Kelly.
          • -
          • Natalie Henning (April Parker Jones): The single mother of four children and the manager of a diner. She lives in a rundown apartment with her children and Lushion, her boyfriend.
          • -
          • Lushion Morgan (Charles Malik Whitfield): The boyfriend of Natalie and a police officer. He works with Eddie but does not approve of his corruption.
          • -
          -

          Is season 1 of "If Loving You Is Wrong" based on a book or a movie?

          -

          No, season 1 of "If Loving You Is Wrong" is not based on a book or a movie. It is an original TV series created by Tyler Perry, who also serves as the writer, director, producer, and showrunner. However, the show is loosely inspired by Perry's 2014 film "The Single Moms Club", which features some of the same characters and actors.

          -

          Will there be a season 6 of "If Loving You Is Wrong"?

          -

          Yes, there will be a season 6 of "If Loving You Is Wrong". OWN announced that the show will return for its sixth and final season on March 31, 2020. The season will consist of eight episodes that will wrap up the storylines of the characters.

          197e85843d
          -
          -
          \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Quaid-e-Azam Zindabad (2022) Urdu 720p HDRip - Download Latest Pakistani Movies.md b/spaces/congsaPfin/Manga-OCR/logs/Quaid-e-Azam Zindabad (2022) Urdu 720p HDRip - Download Latest Pakistani Movies.md deleted file mode 100644 index af7ebf5b8fd6bf3eb21733806c95328e886dd187..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Quaid-e-Azam Zindabad (2022) Urdu 720p HDRip - Download Latest Pakistani Movies.md +++ /dev/null @@ -1,112 +0,0 @@ - -

          Quaid-e-Azam Zindabad Full Movie Download 720p: A Review

          -

          If you are looking for a fun and patriotic Pakistani movie to watch, you might want to check out Quaid-e-Azam Zindabad, a 2022 action comedy drama film starring Fahad Mustafa and Mahira Khan. In this article, we will give you a brief overview of the movie, tell you why you should watch it, and show you how to download Quaid-e-Azam Zindabad full movie in 720p quality.

          -

          quaid e azam zindabad full movie download 720p


          Download Filehttps://urlca.com/2uOchu



          -

          What is Quaid-e-Azam Zindabad?

          -

          Quaid-e-Azam Zindabad is a slogan that means "Long Live the Great Leader", referring to Muhammad Ali Jinnah, the founder of Pakistan. The movie uses this slogan as a theme to explore the corruption and injustice that plague the country that was built on the ideals of purity and honesty.

          -

          The plot of the movie

          -

          The movie follows Inspector Gulab Mughal, a honest and brave police officer who is assigned to catch a notorious criminal named Ronaq Ali. Along the way, he meets Jia, a journalist who is investigating a scam involving fake currency notes. Together, they uncover a conspiracy that involves high-ranking officials, politicians, and businessmen who are exploiting the people and tarnishing the legacy of Quaid-e-Azam. They also discover that Ronaq Ali is not what he seems, and that he has a hidden agenda that could change the fate of Pakistan.

          -

          The cast and crew of the movie

          -

          The movie is directed by Nabeel Qureshi, who is known for his previous hits like Na Maloom Afraad, Actor in Law, and Load Wedding. The movie is produced by Fizza Ali Meerza, who is also the co-writer along with Qureshi. The movie features Fahad Mustafa as Inspector Gulab Mughal, Mahira Khan as Jia, Jawed Sheikh as Ronaq Ali, Mehmood Aslam as Babar Jilani, Nayyar Ejaz as Rana Kamran, Qavi Khan as Munir Mughal, Saleem Mairaj as Corrupt sub-inspector, and Irfan Motiwala as Saleem Junaid.

          -

          Why should you watch Quaid-e-Azam Zindabad?

          -

          There are many reasons why you should watch Quaid-e-Azam Zindabad, whether you are a fan of Pakistani cinema or not. Here are some of them:

          -

          The entertainment value of the movie

          -

          The movie is a perfect blend of action, comedy, and drama that will keep you hooked from start to finish. The movie has thrilling chase scenes, hilarious dialogues, romantic moments, and emotional twists that will make you laugh, cry, and cheer. The movie also has amazing songs and dances that will make you groove along with the characters. The movie is a feast for your eyes and ears that will leave you satisfied and entertained.

          -

          The patriotic message of the movie

          -

          The movie is not just a mindless entertainer, but also a meaningful tribute to Quaid-e-Azam and his vision for Pakistan. The movie shows how corruption and greed have corrupted the system and the society that was supposed to be based on justice and equality. The movie also shows how ordinary people can make a difference by standing up for their rights and fighting against injustice. The movie inspires you to be proud of your country and its history, and to strive for its betterment

          The positive reviews of the movie

          -

          The movie has received rave reviews from critics and audiences alike, who have praised the movie for its direction, script, performances, music, and message. The movie has a rating of 8.1 out of 10 on IMDb, and 4.5 out of 5 on BookMyShow. The movie has also been nominated for several awards, including the Lux Style Awards, the ARY Film Awards, and the Pakistan International Screen Awards. The movie has been hailed as one of the best Pakistani movies of all time, and a must-watch for everyone.

          -

          How to download Quaid-e-Azam Zindabad full movie in 720p quality?

          -

          If you want to watch Quaid-e-Azam Zindabad full movie in 720p quality, you have several options to choose from. However, not all of them are legal or ethical, and some of them may pose risks to your device or your privacy. Here are some of the ways you can download the movie, along with their pros and cons:

          -

          The legal and ethical ways to download the movie

          -

          The best way to download Quaid-e-Azam Zindabad full movie in 720p quality is to use a legal and ethical platform or source that has the rights to distribute the movie. Some of these platforms or sources are:

          -

          quaid e azam zindabad pakistani movie 480p download
          -quaid e azam zindabad punjabi movie 1080p download
          -quaid e azam zindabad urdu movie pre-dvdrip download
          -quaid e azam zindabad full movie online watch free
          -quaid e azam zindabad full movie hd download filmywap
          -quaid e azam zindabad full movie download 720p dotmovies
          -quaid e azam zindabad full movie download 720p filmyworlds
          -quaid e azam zindabad full movie download 720p torrent
          -quaid e azam zindabad full movie download 720p google drive
          -quaid e azam zindabad full movie download 720p dual audio
          -quaid e azam zindabad full movie download 720p in hindi
          -quaid e azam zindabad full movie download 720p in english
          -quaid e azam zindabad full movie download 720p with subtitles
          -quaid e azam zindabad full movie download 720p with urdu dubbing
          -quaid e azam zindabad full movie download 720p with punjabi dubbing
          -quaid e azam zindabad full movie review and rating
          -quaid e azam zindabad full movie cast and crew
          -quaid e azam zindabad full movie trailer and songs
          -quaid e azam zindabad full movie story and plot
          -quaid e azam zindabad full movie box office collection and budget
          -quaid e azam zindabad full movie action comedy drama genre
          -quaid e azam zindabad full movie release date and time
          -quaid e azam zindabad full movie streaming platform and availability
          -quaid e azam zindabad full movie based on true story or fiction
          -quaid e azam zindabad full movie inspired by real life events or characters
          -quaid e azam zindabad full movie behind the scenes and making of
          -quaid e azam zindabad full movie awards and nominations
          -quaid e azam zindabad full movie controversies and scandals
          -quaid e azam zindabad full movie facts and trivia
          -quaid e azam zindabad full movie memes and jokes

          -
            -
          • Netflix: Netflix is a popular streaming service that offers a wide range of movies and shows, including Quaid-e-Azam Zindabad. You can watch the movie online or download it offline on your device using the Netflix app. You need to have a subscription to Netflix to access its content, which costs between Rs. 199 to Rs. 799 per month depending on the plan you choose. The pros of using Netflix are that you get high-quality video and audio, no ads, no viruses, and no legal issues. The cons are that you need a stable internet connection, a compatible device, and enough storage space on your device.
          • -
          • Amazon Prime Video: Amazon Prime Video is another popular streaming service that offers a variety of movies and shows, including Quaid-e-Azam Zindabad. You can watch the movie online or download it offline on your device using the Prime Video app. You need to have a subscription to Amazon Prime to access its content, which costs Rs. 129 per month or Rs. 999 per year. The pros of using Amazon Prime Video are similar to Netflix, except that you also get other benefits like free delivery, exclusive deals, and access to other services like Amazon Music and Kindle. The cons are also similar to Netflix, except that you may not find some content that is available on Netflix.
          • -
          • YouTube: YouTube is a free video-sharing platform that allows anyone to upload and watch videos online. You can find Quaid-e-Azam Zindabad on YouTube uploaded by various channels or users who have bought or rented the movie from other sources. You can watch the movie online or download it offline on your device using the YouTube app or a third-party app like YouTube Downloader. You do not need to pay anything to watch or download the movie on YouTube, but you may have to deal with ads, low-quality video or audio, incomplete or fake videos, and possible legal issues if the uploader does not have the rights to distribute the movie.
          • -
          -

          The risks and drawbacks of illegal downloading

          -

          Some people may resort to illegal downloading of Quaid-e-Azam Zindabad full movie in 720p quality from websites or apps that offer pirated content. These websites or apps may claim to provide free or cheap downloads of the movie, but they may also expose you to various risks and drawbacks such as:

          -
            -
          • Viruses and malware: These websites or apps may contain viruses and malware that can infect your device and compromise its security and performance. They may also steal your personal information or data and use it for malicious purposes.
          • -
          • Poor quality: These websites or apps may not provide high-quality video or audio of the movie, and may also have missing scenes, wrong subtitles, distorted sound, or other errors.
          • -
          • Legal issues: These websites or apps may violate the intellectual property rights of the makers and distributors of the movie, and may also be banned by the authorities. If you download the movie from these sources, you may also face legal action or penalties for piracy.
          • -
          • Unethical behavior: These websites or apps may deprive the makers and distributors of the movie of their rightful earnings and recognition, and may also discourage them from making more quality movies in the future

            A table comparing different platforms and sources for downloading the movie

            -

            To help you decide which platform or source to use for downloading Quaid-e-Azam Zindabad full movie in 720p quality, we have prepared a table that compares their features, advantages, and disadvantages. You can see the table below:

            - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            Platform/SourceFeaturesAdvantagesDisadvantages
            NetflixStreaming service, subscription-based, offline download option, high-quality video and audio, no ads, no viruses, no legal issuesReliable, safe, convenient, ethicalRequires internet connection, compatible device, storage space, and subscription fee
            Amazon Prime VideoStreaming service, subscription-based, offline download option, high-quality video and audio, no ads, no viruses, no legal issues, other benefits like free delivery, exclusive deals, and access to other servicesReliable, safe, convenient, ethical, value-addedRequires internet connection, compatible device, storage space, and subscription fee
            YouTubeVideo-sharing platform, free, online and offline viewing option, variable quality video and audio, ads, possible viruses and legal issuesCheap, accessible, easy to useRisky, unreliable, unethical, low-quality
            Pirated websites/appsWebsites or apps that offer illegal downloads of the movie, free or cheap, offline viewing option, poor quality video and audio, viruses and malware, legal issuesCheapRisky, unreliable, unethical, low-quality
            -

            Conclusion

            -

            In conclusion, Quaid-e-Azam Zindabad is a great movie that you should watch if you are looking for a fun and patriotic Pakistani movie. The movie has a captivating plot, a talented cast and crew, a entertaining value, a patriotic message, and positive reviews. The movie is available on various platforms and sources for downloading in 720p quality. However, you should be careful about the risks and drawbacks of illegal downloading and choose a legal and ethical way to enjoy the movie.

            -

            FAQs

            -

            Here are some frequently asked questions about Quaid-e-Azam Zindabad full movie download 720p:

            -

            Q: When was Quaid-e-Azam Zindabad released?

            -

            A: Quaid-e-Azam Zindabad was released on December 25th 2022 in Pakistan. It was also released in other countries like UAE, UK, USA, Canada, Australia, and New Zealand on different dates.

            -

            Q: How much did Quaid-e-Azam Zindabad earn at the box office?

            -

            A: Quaid-e-Azam Zindabad earned Rs. 32.5 crore in Pakistan and Rs. 12.5 crore in overseas markets as of February 2023. It became the highest-grossing Pakistani movie of all time.

            -

            Q: Is Quaid-e-Azam Zindabad based on a true story?

            -

            A: No. Quaid-e-Azam Zindabad is a fictional story that uses the slogan of Quaid-e-Azam as a theme to explore the corruption and injustice in Pakistan. However, the movie may have some references or inspirations from real-life events or personalities.

            -

            Q: Who sang the songs of Quaid-e-Azam Zindabad?

            -

            A: The songs of Quaid-e-Azam Zindabad were composed by Shani Arshad and sung by various artists like Rahat Fateh Ali Khan, Aima Baig, Shani Arshad, Jabbar Abbas, Asrar Shah, and Natasha Baig. The songs were well-received by the audience and critics.

            -

            Q: Where can I watch the trailer of Quaid-e-Azam Zindabad?

            -

            A: You can watch the trailer of Quaid-e-Azam Zindabad on YouTube on the official channel of Filmwala Pictures. You can also find other videos related to the movie on the same channel.

            197e85843d
            -
            -
            \ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/decode_heads/gc_head.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/decode_heads/gc_head.py deleted file mode 100644 index 70741245af975800840709911bd18d72247e3e04..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/decode_heads/gc_head.py +++ /dev/null @@ -1,47 +0,0 @@ -import torch -from annotator.uniformer.mmcv.cnn import ContextBlock - -from ..builder import HEADS -from .fcn_head import FCNHead - - -@HEADS.register_module() -class GCHead(FCNHead): - """GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond. - - This head is the implementation of `GCNet - `_. - - Args: - ratio (float): Multiplier of channels ratio. Default: 1/4. - pooling_type (str): The pooling type of context aggregation. - Options are 'att', 'avg'. Default: 'avg'. - fusion_types (tuple[str]): The fusion type for feature fusion. - Options are 'channel_add', 'channel_mul'. Default: ('channel_add',) - """ - - def __init__(self, - ratio=1 / 4., - pooling_type='att', - fusion_types=('channel_add', ), - **kwargs): - super(GCHead, self).__init__(num_convs=2, **kwargs) - self.ratio = ratio - self.pooling_type = pooling_type - self.fusion_types = fusion_types - self.gc_block = ContextBlock( - in_channels=self.channels, - ratio=self.ratio, - pooling_type=self.pooling_type, - fusion_types=self.fusion_types) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - output = self.convs[0](x) - output = self.gc_block(output) - output = self.convs[1](output) - if self.concat_input: - output = self.conv_cat(torch.cat([x, output], dim=1)) - output = self.cls_seg(output) - return output diff --git a/spaces/csuhan/LLaMA-Adapter/style.css b/spaces/csuhan/LLaMA-Adapter/style.css deleted file mode 100644 index cbd4576f31f91ca9f68973958e53c4cfe3c67d9e..0000000000000000000000000000000000000000 --- a/spaces/csuhan/LLaMA-Adapter/style.css +++ /dev/null @@ -1,4 +0,0 @@ -h1,p { - text-align: center; - } - \ No newline at end of file diff --git a/spaces/dachenchen/HiWantJoin/readme/README_ja.md b/spaces/dachenchen/HiWantJoin/readme/README_ja.md deleted file mode 100644 index fc56eec0b81c22ff0a49e3960aa52ffd7d6dc5cb..0000000000000000000000000000000000000000 --- a/spaces/dachenchen/HiWantJoin/readme/README_ja.md +++ /dev/null @@ -1,126 +0,0 @@ -
            - - 简体中文 | English | 日本語 -
            - -

            川虎 Chat 🐯 Chuanhu Chat

            -
            - - Logo - - -

            -

            ChatGPT/ChatGLM/LLaMAなどのLLMのための軽量でユーザーフレンドリーなWeb-UI

            -

            - - Tests Passing - - - GitHub Contributors - - - GitHub pull requests - -

            - ストリーム出力/会話回数無制限/履歴保存/プリセットプロンプト/ファイルへの質問チャット
            - ウェブ検索/LaTeXレンダリング/表レンダリング/コードハイライト
            - オートダークモード/アダプティブ・ウェブ・インターフェイス/WeChatライク・テーマ
            - マルチパラメーターチューニング/マルチAPI-Key対応/マルチユーザー対応
            - GPT-4対応/LLMのローカルデプロイ可能。 -

            - 動画チュートリアル - · - 2.0 イントロダクション - · - 3.0 イントロダクション & チュートリアル - || - オンライントライアル - · - ワンクリックデプロイ -

            -

            - Animation Demo -

            -

            -
            - -## 使う上でのTips - -- ChatGPTをより適切に制御するために、システムプロンプトを使用できます。 -- プロンプトテンプレートを使用するには、プロンプトテンプレートコレクションを選択し、ドロップダウンメニューから特定のプロンプトを選択。回答が不十分な場合は、`🔄再生成`ボタンを使って再試行します。 -- 入力ボックスで改行するには、Shift + Enterキーを押してください。 -- 入力履歴を素早く切り替えるには、入力ボックスで キーを押す。 -- プログラムをサーバにデプロイするには、プログラムの最終行を `demo.launch(server_name="0.0.0.0", server_port=)`に変更します。 -- 共有リンクを取得するには、プログラムの最後の行を `demo.launch(share=True)` に変更してください。なお、公開リンクでアクセスするためには、プログラムが実行されている必要があることに注意してください。 -- Hugging Face Spacesで使用する場合: より速く、より安全に利用するために、**Duplicate Space**を使用し、自分のスペースでプログラムを実行することをお勧めします。 - -## インストール - -```shell -git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git -cd ChuanhuChatGPT -pip install -r requirements.txt -``` - -次に `config_example.json`をコピーして `config.json`にリネームし、そのファイルにAPI-Keyなどの設定を記入する。 - -```shell -python ChuanhuChatbot.py -``` - -ブラウザのウィンドウが開き、ChatGPTとチャットできるようになります。 - -> **Note** -> -> 詳しい手順は[wikiページ](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程)をご確認ください。 - -## トラブルシューティング - -問題が発生した場合は、まずこのプロジェクトの最新の変更点を手動で引っ張ってみるのがよいでしょう。その手順は以下の通りです: - -1. ウェブページの `Download ZIP` をクリックして最新のコードアーカイブをダウンロードするか、または - ```shell - git pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f - ``` -2. 新しい依存関係が導入されている可能性があるため、依存関係を再度インストールしてみてください。 - ``` - pip install -r requirements.txt - ``` -3. Gradioを更新 - ``` - pip install gradio --upgrade --force-reinstall - ``` - -一般的に、以下の手順でほとんどの問題を解決することができます。 - -それでも問題が解決しない場合は、こちらのページをご参照ください: [よくある質問(FAQ)](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题) - -このページでは、考えられるほぼすべての問題点と解決策を掲載しています。よくお読みください。 - -## More Information - -より詳細な情報は、[wiki](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki) をご覧ください。: - -- [How to contribute a translation](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/Localization) -- [How to make a contribution](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/贡献指南) -- [How to cite the project](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可#如何引用该项目) -- [Project changelog](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/更新日志) -- [Project license](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可) - -## Starchart - -[![Star History Chart](https://api.star-history.com/svg?repos=GaiZhenbiao/ChuanhuChatGPT&type=Date)](https://star-history.com/#GaiZhenbiao/ChuanhuChatGPT&Date) - -## Contributors - - - - - -## Sponsor - -🐯 この企画が役に立ったら、遠慮なくコーラかコーヒーでもおごってください〜。 - -Buy Me A Coffee - -image diff --git a/spaces/davanstrien/notebooks_on_the_hub/README.md b/spaces/davanstrien/notebooks_on_the_hub/README.md deleted file mode 100644 index 865cc683b286e4f210a582348219d1b0905fd64d..0000000000000000000000000000000000000000 --- a/spaces/davanstrien/notebooks_on_the_hub/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Notebooks On The Hub -emoji: 🐠 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/dawdqd/ChuanhuChatGPT/web_assets/javascript/avatar.js b/spaces/dawdqd/ChuanhuChatGPT/web_assets/javascript/avatar.js deleted file mode 100644 index 14da1d3ba174320f8b52b6ceb18799909dff0c6e..0000000000000000000000000000000000000000 --- a/spaces/dawdqd/ChuanhuChatGPT/web_assets/javascript/avatar.js +++ /dev/null @@ -1,53 +0,0 @@ - -function addAvatars(messageElement, role='user'||'bot') { - if(messageElement.innerHTML === '') { - return; - } - if (messageElement.classList.contains('avatar-added') || messageElement.classList.contains('hide')) { - return; - } - if (role === 'bot' && botAvatarUrl === "" || role === 'user' && userAvatarUrl === "") { - messageElement.classList.add('avatar-added'); - return; - } - - - const messageRow = document.createElement('div'); - messageRow.classList.add('message-row'); - messageElement.classList.add('avatar-added'); - - if (role === 'bot') { - messageRow.classList.add('bot-message-row'); - } else if (role === 'user') { - messageRow.classList.add('user-message-row'); - } - - const avatarDiv = document.createElement('div'); - avatarDiv.classList.add('chatbot-avatar'); - if (role === 'bot') { - avatarDiv.classList.add('bot-avatar'); - avatarDiv.innerHTML = `bot-avatar`; - } else if (role === 'user') { - avatarDiv.classList.add('user-avatar'); - avatarDiv.innerHTML = `user-avatar`; - } - - messageElement.parentNode.replaceChild(messageRow, messageElement); - - if (role === 'bot') { - messageRow.appendChild(avatarDiv); - messageRow.appendChild(messageElement); - } else if (role === 'user') { - messageRow.appendChild(messageElement); - messageRow.appendChild(avatarDiv); - } -} - -function clearMessageRows() { - const messageRows = chatbotWrap.querySelectorAll('.message-row'); - messageRows.forEach((messageRow) => { - if (messageRow.innerText === '') { - messageRow.parentNode.removeChild(messageRow); - } - }); -} \ No newline at end of file diff --git a/spaces/dbirks/diffuse-the-rest/build/_app/immutable/assets/+layout-e9d23274.css b/spaces/dbirks/diffuse-the-rest/build/_app/immutable/assets/+layout-e9d23274.css deleted file mode 100644 index ef5132b6badb27a38c4f17f4c6c9cf40e8c371c6..0000000000000000000000000000000000000000 --- a/spaces/dbirks/diffuse-the-rest/build/_app/immutable/assets/+layout-e9d23274.css +++ /dev/null @@ -1 +0,0 @@ -*,:before,:after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}:before,:after{--tw-content: ""}html{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol,"Noto Color Emoji"}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;font-weight:inherit;line-height:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dl,dd,h1,h2,h3,h4,h5,h6,hr,figure,p,pre{margin:0}fieldset{margin:0;padding:0}legend{padding:0}ol,ul,menu{list-style:none;margin:0;padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#9ca3af}input::placeholder,textarea::placeholder{opacity:1;color:#9ca3af}button,[role=button]{cursor:pointer}:disabled{cursor:default}img,svg,video,canvas,audio,iframe,embed,object{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}*,:before,:after{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }::-webkit-backdrop{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }::backdrop{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }.prose-sm{font-size:.875rem;line-height:1.7142857}.prose-sm :where(p):not(:where([class~="not-prose"] *)){margin-top:1.1428571em;margin-bottom:1.1428571em}.prose-sm :where([class~="lead"]):not(:where([class~="not-prose"] *)){font-size:1.2857143em;line-height:1.5555556;margin-top:.8888889em;margin-bottom:.8888889em}.prose-sm :where(blockquote):not(:where([class~="not-prose"] *)){margin-top:1.3333333em;margin-bottom:1.3333333em;padding-left:1.1111111em}.prose-sm :where(h1):not(:where([class~="not-prose"] *)){font-size:2.1428571em;margin-top:0;margin-bottom:.8em;line-height:1.2}.prose-sm :where(h2):not(:where([class~="not-prose"] *)){font-size:1.4285714em;margin-top:1.6em;margin-bottom:.8em;line-height:1.4}.prose-sm :where(h3):not(:where([class~="not-prose"] *)){font-size:1.2857143em;margin-top:1.5555556em;margin-bottom:.4444444em;line-height:1.5555556}.prose-sm :where(h4):not(:where([class~="not-prose"] *)){margin-top:1.4285714em;margin-bottom:.5714286em;line-height:1.4285714}.prose-sm :where(img):not(:where([class~="not-prose"] *)){margin-top:1.7142857em;margin-bottom:1.7142857em}.prose-sm :where(video):not(:where([class~="not-prose"] *)){margin-top:1.7142857em;margin-bottom:1.7142857em}.prose-sm :where(figure):not(:where([class~="not-prose"] *)){margin-top:1.7142857em;margin-bottom:1.7142857em}.prose-sm :where(figure > *):not(:where([class~="not-prose"] *)){margin-top:0;margin-bottom:0}.prose-sm :where(figcaption):not(:where([class~="not-prose"] *)){font-size:.8571429em;line-height:1.3333333;margin-top:.6666667em}.prose-sm :where(code):not(:where([class~="not-prose"] *)){font-size:.8571429em}.prose-sm :where(h2 code):not(:where([class~="not-prose"] *)){font-size:.9em}.prose-sm :where(h3 code):not(:where([class~="not-prose"] *)){font-size:.8888889em}.prose-sm :where(pre):not(:where([class~="not-prose"] *)){font-size:.8571429em;line-height:1.6666667;margin-top:1.6666667em;margin-bottom:1.6666667em;border-radius:.25rem;padding:.6666667em 1em}.prose-sm :where(ol):not(:where([class~="not-prose"] *)){margin-top:1.1428571em;margin-bottom:1.1428571em;padding-left:1.5714286em}.prose-sm :where(ul):not(:where([class~="not-prose"] *)){margin-top:1.1428571em;margin-bottom:1.1428571em;padding-left:1.5714286em}.prose-sm :where(li):not(:where([class~="not-prose"] *)){margin-top:.2857143em;margin-bottom:.2857143em}.prose-sm :where(ol > li):not(:where([class~="not-prose"] *)){padding-left:.4285714em}.prose-sm :where(ul > li):not(:where([class~="not-prose"] *)){padding-left:.4285714em}.prose-sm :where(.prose > ul > li p):not(:where([class~="not-prose"] *)){margin-top:.5714286em;margin-bottom:.5714286em}.prose-sm :where(.prose > ul > li > *:first-child):not(:where([class~="not-prose"] *)){margin-top:1.1428571em}.prose-sm :where(.prose > ul > li > *:last-child):not(:where([class~="not-prose"] *)){margin-bottom:1.1428571em}.prose-sm :where(.prose > ol > li > *:first-child):not(:where([class~="not-prose"] *)){margin-top:1.1428571em}.prose-sm :where(.prose > ol > li > *:last-child):not(:where([class~="not-prose"] *)){margin-bottom:1.1428571em}.prose-sm :where(ul ul,ul ol,ol ul,ol ol):not(:where([class~="not-prose"] *)){margin-top:.5714286em;margin-bottom:.5714286em}.prose-sm :where(hr):not(:where([class~="not-prose"] *)){margin-top:2.8571429em;margin-bottom:2.8571429em}.prose-sm :where(hr + *):not(:where([class~="not-prose"] *)){margin-top:0}.prose-sm :where(h2 + *):not(:where([class~="not-prose"] *)){margin-top:0}.prose-sm :where(h3 + *):not(:where([class~="not-prose"] *)){margin-top:0}.prose-sm :where(h4 + *):not(:where([class~="not-prose"] *)){margin-top:0}.prose-sm :where(table):not(:where([class~="not-prose"] *)){font-size:.8571429em;line-height:1.5}.prose-sm :where(thead th):not(:where([class~="not-prose"] *)){padding-right:1em;padding-bottom:.6666667em;padding-left:1em}.prose-sm :where(thead th:first-child):not(:where([class~="not-prose"] *)){padding-left:0}.prose-sm :where(thead th:last-child):not(:where([class~="not-prose"] *)){padding-right:0}.prose-sm :where(tbody td,tfoot td):not(:where([class~="not-prose"] *)){padding:.6666667em 1em}.prose-sm :where(tbody td:first-child,tfoot td:first-child):not(:where([class~="not-prose"] *)){padding-left:0}.prose-sm :where(tbody td:last-child,tfoot td:last-child):not(:where([class~="not-prose"] *)){padding-right:0}.prose-sm :where(.prose > :first-child):not(:where([class~="not-prose"] *)){margin-top:0}.prose-sm :where(.prose > :last-child):not(:where([class~="not-prose"] *)){margin-bottom:0}.pointer-events-none{pointer-events:none}.my-8{margin-top:2rem;margin-bottom:2rem}.my-4{margin-top:1rem;margin-bottom:1rem}.mt-3{margin-top:.75rem}.mt-4{margin-top:1rem}.mt-2{margin-top:.5rem}.mb-8{margin-bottom:2rem}.inline-block{display:inline-block}.inline{display:inline}.flex{display:flex}.hidden{display:none}.max-h-\[500px\]{max-height:500px}.min-h-\[42px\]{min-height:42px}.w-\[12\.5rem\]{width:12.5rem}.\!w-\[181px\]{width:181px!important}@-webkit-keyframes spin{to{transform:rotate(360deg)}}@keyframes spin{to{transform:rotate(360deg)}}.animate-spin{-webkit-animation:spin 1s linear infinite;animation:spin 1s linear infinite}@-webkit-keyframes pulse{50%{opacity:.5}}@keyframes pulse{50%{opacity:.5}}.animate-pulse{-webkit-animation:pulse 2s cubic-bezier(.4,0,.6,1) infinite;animation:pulse 2s cubic-bezier(.4,0,.6,1) infinite}.cursor-pointer{cursor:pointer}.resize-y{resize:vertical}.flex-col{flex-direction:column}.flex-wrap{flex-wrap:wrap}.items-start{align-items:flex-start}.items-center{align-items:center}.justify-center{justify-content:center}.gap-x-2{-moz-column-gap:.5rem;column-gap:.5rem}.gap-x-4{-moz-column-gap:1rem;column-gap:1rem}.gap-y-2{row-gap:.5rem}.overflow-auto{overflow:auto}.whitespace-pre-wrap{white-space:pre-wrap}.rounded-full{border-radius:9999px}.rounded-xl{border-radius:.75rem}.border-\[1\.2px\]{border-width:1.2px}.border{border-width:1px}.border-gray-200{--tw-border-opacity: 1;border-color:rgb(229 231 235 / var(--tw-border-opacity))}.bg-black{--tw-bg-opacity: 1;background-color:rgb(0 0 0 / var(--tw-bg-opacity))}.bg-sky-200{--tw-bg-opacity: 1;background-color:rgb(186 230 253 / var(--tw-bg-opacity))}.bg-slate-200{--tw-bg-opacity: 1;background-color:rgb(226 232 240 / var(--tw-bg-opacity))}.px-2{padding-left:.5rem;padding-right:.5rem}.py-1{padding-top:.25rem;padding-bottom:.25rem}.py-2{padding-top:.5rem;padding-bottom:.5rem}.px-3{padding-left:.75rem;padding-right:.75rem}.py-\[0\.555rem\]{padding-top:.555rem;padding-bottom:.555rem}.px-4{padding-left:1rem;padding-right:1rem}.text-center{text-align:center}.align-middle{vertical-align:middle}.font-semibold{font-weight:600}.font-bold{font-weight:700}.text-white{--tw-text-opacity: 1;color:rgb(255 255 255 / var(--tw-text-opacity))}.text-sky-900{--tw-text-opacity: 1;color:rgb(12 74 110 / var(--tw-text-opacity))}.opacity-25{opacity:.25}.opacity-75{opacity:.75}.opacity-50{opacity:.5}.shadow-inner{--tw-shadow: inset 0 2px 4px 0 rgb(0 0 0 / .05);--tw-shadow-colored: inset 0 2px 4px 0 var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow, 0 0 #0000),var(--tw-ring-shadow, 0 0 #0000),var(--tw-shadow)}.outline-none{outline:2px solid transparent;outline-offset:2px}a{-webkit-text-decoration-line:underline!important;text-decoration-line:underline!important}.drawing-board-controls{--tw-border-spacing-x: .125rem !important;--tw-border-spacing-y: .125rem !important;border-spacing:var(--tw-border-spacing-x) var(--tw-border-spacing-y)!important}@media (min-width: 768px){.drawing-board-controls{--tw-border-spacing-x: .5rem !important;--tw-border-spacing-y: .5rem !important;border-spacing:var(--tw-border-spacing-x) var(--tw-border-spacing-y)!important}}.hover\:bg-sky-300:hover{--tw-bg-opacity: 1;background-color:rgb(125 211 252 / var(--tw-bg-opacity))}@media (min-width: 816px){.desktop\:mt-\[34px\]{margin-top:34px}.desktop\:inline{display:inline}}@media (min-width: 768px){.md\:px-12{padding-left:3rem;padding-right:3rem}}@media (min-width: 1024px){.lg\:px-56{padding-left:14rem;padding-right:14rem}} diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/attr/filters.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/attr/filters.py deleted file mode 100644 index a1e40c98db853aa375ab0b24559e0559f91e6152..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/attr/filters.py +++ /dev/null @@ -1,66 +0,0 @@ -# SPDX-License-Identifier: MIT - -""" -Commonly useful filters for `attr.asdict`. -""" - -from ._make import Attribute - - -def _split_what(what): - """ - Returns a tuple of `frozenset`s of classes and attributes. - """ - return ( - frozenset(cls for cls in what if isinstance(cls, type)), - frozenset(cls for cls in what if isinstance(cls, str)), - frozenset(cls for cls in what if isinstance(cls, Attribute)), - ) - - -def include(*what): - """ - Include *what*. - - :param what: What to include. - :type what: `list` of classes `type`, field names `str` or - `attrs.Attribute`\\ s - - :rtype: `callable` - - .. versionchanged:: 23.1.0 Accept strings with field names. - """ - cls, names, attrs = _split_what(what) - - def include_(attribute, value): - return ( - value.__class__ in cls - or attribute.name in names - or attribute in attrs - ) - - return include_ - - -def exclude(*what): - """ - Exclude *what*. - - :param what: What to exclude. - :type what: `list` of classes `type`, field names `str` or - `attrs.Attribute`\\ s. - - :rtype: `callable` - - .. versionchanged:: 23.3.0 Accept field name string as input argument - """ - cls, names, attrs = _split_what(what) - - def exclude_(attribute, value): - return not ( - value.__class__ in cls - or attribute.name in names - or attribute in attrs - ) - - return exclude_ diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/markdown_it/token.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/markdown_it/token.py deleted file mode 100644 index 90008b7229d025a2f2432516d3a7e0da5f6781f9..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/markdown_it/token.py +++ /dev/null @@ -1,180 +0,0 @@ -from __future__ import annotations - -from collections.abc import Callable, MutableMapping -import dataclasses as dc -from typing import Any, Literal -import warnings - -from markdown_it._compat import DATACLASS_KWARGS - - -def convert_attrs(value: Any) -> Any: - """Convert Token.attrs set as ``None`` or ``[[key, value], ...]`` to a dict. - - This improves compatibility with upstream markdown-it. - """ - if not value: - return {} - if isinstance(value, list): - return dict(value) - return value - - -@dc.dataclass(**DATACLASS_KWARGS) -class Token: - type: str - """Type of the token (string, e.g. "paragraph_open")""" - - tag: str - """HTML tag name, e.g. 'p'""" - - nesting: Literal[-1, 0, 1] - """Level change (number in {-1, 0, 1} set), where: - - `1` means the tag is opening - - `0` means the tag is self-closing - - `-1` means the tag is closing - """ - - attrs: dict[str, str | int | float] = dc.field(default_factory=dict) - """HTML attributes. - Note this differs from the upstream "list of lists" format, - although than an instance can still be initialised with this format. - """ - - map: list[int] | None = None - """Source map info. Format: `[ line_begin, line_end ]`""" - - level: int = 0 - """Nesting level, the same as `state.level`""" - - children: list[Token] | None = None - """Array of child nodes (inline and img tokens).""" - - content: str = "" - """Inner content, in the case of a self-closing tag (code, html, fence, etc.),""" - - markup: str = "" - """'*' or '_' for emphasis, fence string for fence, etc.""" - - info: str = "" - """Additional information: - - Info string for "fence" tokens - - The value "auto" for autolink "link_open" and "link_close" tokens - - The string value of the item marker for ordered-list "list_item_open" tokens - """ - - meta: dict[Any, Any] = dc.field(default_factory=dict) - """A place for plugins to store any arbitrary data""" - - block: bool = False - """True for block-level tokens, false for inline tokens. - Used in renderer to calculate line breaks - """ - - hidden: bool = False - """If true, ignore this element when rendering. - Used for tight lists to hide paragraphs. - """ - - def __post_init__(self) -> None: - self.attrs = convert_attrs(self.attrs) - - def attrIndex(self, name: str) -> int: - warnings.warn( # noqa: B028 - "Token.attrIndex should not be used, since Token.attrs is a dictionary", - UserWarning, - ) - if name not in self.attrs: - return -1 - return list(self.attrs.keys()).index(name) - - def attrItems(self) -> list[tuple[str, str | int | float]]: - """Get (key, value) list of attrs.""" - return list(self.attrs.items()) - - def attrPush(self, attrData: tuple[str, str | int | float]) -> None: - """Add `[ name, value ]` attribute to list. Init attrs if necessary.""" - name, value = attrData - self.attrSet(name, value) - - def attrSet(self, name: str, value: str | int | float) -> None: - """Set `name` attribute to `value`. Override old value if exists.""" - self.attrs[name] = value - - def attrGet(self, name: str) -> None | str | int | float: - """Get the value of attribute `name`, or null if it does not exist.""" - return self.attrs.get(name, None) - - def attrJoin(self, name: str, value: str) -> None: - """Join value to existing attribute via space. - Or create new attribute if not exists. - Useful to operate with token classes. - """ - if name in self.attrs: - current = self.attrs[name] - if not isinstance(current, str): - raise TypeError( - f"existing attr 'name' is not a str: {self.attrs[name]}" - ) - self.attrs[name] = f"{current} {value}" - else: - self.attrs[name] = value - - def copy(self, **changes: Any) -> Token: - """Return a shallow copy of the instance.""" - return dc.replace(self, **changes) - - def as_dict( - self, - *, - children: bool = True, - as_upstream: bool = True, - meta_serializer: Callable[[dict[Any, Any]], Any] | None = None, - filter: Callable[[str, Any], bool] | None = None, - dict_factory: Callable[..., MutableMapping[str, Any]] = dict, - ) -> MutableMapping[str, Any]: - """Return the token as a dictionary. - - :param children: Also convert children to dicts - :param as_upstream: Ensure the output dictionary is equal to that created by markdown-it - For example, attrs are converted to null or lists - :param meta_serializer: hook for serializing ``Token.meta`` - :param filter: A callable whose return code determines whether an - attribute or element is included (``True``) or dropped (``False``). - Is called with the (key, value) pair. - :param dict_factory: A callable to produce dictionaries from. - For example, to produce ordered dictionaries instead of normal Python - dictionaries, pass in ``collections.OrderedDict``. - - """ - mapping = dict_factory((f.name, getattr(self, f.name)) for f in dc.fields(self)) - if filter: - mapping = dict_factory((k, v) for k, v in mapping.items() if filter(k, v)) - if as_upstream and "attrs" in mapping: - mapping["attrs"] = ( - None - if not mapping["attrs"] - else [[k, v] for k, v in mapping["attrs"].items()] - ) - if meta_serializer and "meta" in mapping: - mapping["meta"] = meta_serializer(mapping["meta"]) - if children and mapping.get("children", None): - mapping["children"] = [ - child.as_dict( - children=children, - filter=filter, - dict_factory=dict_factory, - as_upstream=as_upstream, - meta_serializer=meta_serializer, - ) - for child in mapping["children"] - ] - return mapping - - @classmethod - def from_dict(cls, dct: MutableMapping[str, Any]) -> Token: - """Convert a dict to a Token.""" - token = cls(**dct) - if token.children: - token.children = [cls.from_dict(c) for c in token.children] # type: ignore[arg-type] - return token diff --git a/spaces/declare-lab/tango/diffusers/tests/test_training.py b/spaces/declare-lab/tango/diffusers/tests/test_training.py deleted file mode 100644 index d540f997622148082874272ff7cebffea4d4450d..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/tests/test_training.py +++ /dev/null @@ -1,86 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import torch - -from diffusers import DDIMScheduler, DDPMScheduler, UNet2DModel -from diffusers.training_utils import set_seed -from diffusers.utils.testing_utils import slow - - -torch.backends.cuda.matmul.allow_tf32 = False - - -class TrainingTests(unittest.TestCase): - def get_model_optimizer(self, resolution=32): - set_seed(0) - model = UNet2DModel(sample_size=resolution, in_channels=3, out_channels=3) - optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) - return model, optimizer - - @slow - def test_training_step_equality(self): - device = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable - ddpm_scheduler = DDPMScheduler( - num_train_timesteps=1000, - beta_start=0.0001, - beta_end=0.02, - beta_schedule="linear", - clip_sample=True, - ) - ddim_scheduler = DDIMScheduler( - num_train_timesteps=1000, - beta_start=0.0001, - beta_end=0.02, - beta_schedule="linear", - clip_sample=True, - ) - - assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps - - # shared batches for DDPM and DDIM - set_seed(0) - clean_images = [torch.randn((4, 3, 32, 32)).clip(-1, 1).to(device) for _ in range(4)] - noise = [torch.randn((4, 3, 32, 32)).to(device) for _ in range(4)] - timesteps = [torch.randint(0, 1000, (4,)).long().to(device) for _ in range(4)] - - # train with a DDPM scheduler - model, optimizer = self.get_model_optimizer(resolution=32) - model.train().to(device) - for i in range(4): - optimizer.zero_grad() - ddpm_noisy_images = ddpm_scheduler.add_noise(clean_images[i], noise[i], timesteps[i]) - ddpm_noise_pred = model(ddpm_noisy_images, timesteps[i]).sample - loss = torch.nn.functional.mse_loss(ddpm_noise_pred, noise[i]) - loss.backward() - optimizer.step() - del model, optimizer - - # recreate the model and optimizer, and retry with DDIM - model, optimizer = self.get_model_optimizer(resolution=32) - model.train().to(device) - for i in range(4): - optimizer.zero_grad() - ddim_noisy_images = ddim_scheduler.add_noise(clean_images[i], noise[i], timesteps[i]) - ddim_noise_pred = model(ddim_noisy_images, timesteps[i]).sample - loss = torch.nn.functional.mse_loss(ddim_noise_pred, noise[i]) - loss.backward() - optimizer.step() - del model, optimizer - - self.assertTrue(torch.allclose(ddpm_noisy_images, ddim_noisy_images, atol=1e-5)) - self.assertTrue(torch.allclose(ddpm_noise_pred, ddim_noise_pred, atol=1e-5)) diff --git a/spaces/deelerb/3dselfie/PIFu/spaces.py b/spaces/deelerb/3dselfie/PIFu/spaces.py deleted file mode 100644 index d68a73a9f57c022517e719b9c843b8e8c3cb5925..0000000000000000000000000000000000000000 --- a/spaces/deelerb/3dselfie/PIFu/spaces.py +++ /dev/null @@ -1,139 +0,0 @@ -import os -os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" -os.environ["CUDA_VISIBLE_DEVICES"]="0" -try: - os.system("pip install --upgrade torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html") -except Exception as e: - print(e) - -from pydoc import describe -from huggingface_hub import hf_hub_download -import gradio as gr -import os -from datetime import datetime -from PIL import Image -import torch -import torchvision -import skimage -import paddlehub -import numpy as np -from lib.options import BaseOptions -from apps.crop_img import process_img -from apps.eval import Evaluator -from types import SimpleNamespace -import trimesh -import glob - -print( - "torch: ", torch.__version__, - "\ntorchvision: ", torchvision.__version__, - "\nskimage:", skimage.__version__ -) - -print("EnV", os.environ) - -net_C = hf_hub_download("radames/PIFu-upright-standing", filename="net_C") -net_G = hf_hub_download("radames/PIFu-upright-standing", filename="net_G") - - -opt = BaseOptions() -opts = opt.parse_to_dict() -opts['batch_size'] = 1 -opts['mlp_dim'] = [257, 1024, 512, 256, 128, 1] -opts['mlp_dim_color'] = [513, 1024, 512, 256, 128, 3] -opts['num_stack'] = 4 -opts['num_hourglass'] = 2 -opts['resolution'] = 128 -opts['hg_down'] = 'ave_pool' -opts['norm'] = 'group' -opts['norm_color'] = 'group' -opts['load_netG_checkpoint_path'] = net_G -opts['load_netC_checkpoint_path'] = net_C -opts['results_path'] = "./results" -opts['name'] = "spaces_demo" -opts = SimpleNamespace(**opts) -print("Params", opts) -evaluator = Evaluator(opts) -bg_remover_model = paddlehub.Module(name="U2Net") - - -def process(img_path): - base = os.path.basename(img_path) - img_name = os.path.splitext(base)[0] - print("\n\n\nStarting Process", datetime.now()) - print("image name", img_name) - img_raw = Image.open(img_path).convert('RGB') - - img = img_raw.resize( - (512, int(512 * img_raw.size[1] / img_raw.size[0])), - Image.Resampling.LANCZOS) - - try: - # remove background - print("Removing Background") - masks = bg_remover_model.Segmentation( - images=[np.array(img)], - paths=None, - batch_size=1, - input_size=320, - output_dir='./PIFu/inputs', - visualization=False) - mask = masks[0]["mask"] - front = masks[0]["front"] - except Exception as e: - print(e) - - print("Aliging mask with input training image") - print("Not aligned", front.shape, mask.shape) - img_new, msk_new = process_img(front, mask) - print("Aligned", img_new.shape, msk_new.shape) - - try: - time = datetime.now() - data = evaluator.load_image_from_memory(img_new, msk_new, img_name) - print("Evaluating via PIFu", time) - evaluator.eval(data, True) - print("Success Evaluating via PIFu", datetime.now() - time) - result_path = f'./{opts.results_path}/{opts.name}/result_{img_name}' - except Exception as e: - print("Error evaluating via PIFu", e) - - try: - mesh = trimesh.load(result_path + '.obj') - # flip mesh - mesh.apply_transform([[-1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, -1, 0], - [0, 0, 0, 1]]) - mesh.export(file_obj=result_path + '.glb') - result_gltf = result_path + '.glb' - return [result_gltf, result_gltf] - - except Exception as e: - print("error generating MESH", e) - - -examples = sorted(glob.glob('examples/*.png')) -description = ''' - -**The inference takes about 180seconds for a new image.** - - -''' - -iface = gr.Interface( - fn=process, - description=description, - inputs=gr.Image(type="filepath", label="Input Image"), - outputs=[ - gr.Model3D( - clear_color=[0.0, 0.0, 0.0, 0.0], label="3D Model"), - gr.File(label="Download 3D Model") - ], - examples=examples, - allow_flagging="never", - cache_examples=True -) - -if __name__ == "__main__": - iface.launch(debug=True, enable_queue=False) diff --git a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/facerender/modules/mapping.py b/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/facerender/modules/mapping.py deleted file mode 100644 index 0e3a1c2d1770996080c08e9daafb346f05d7bcdd..0000000000000000000000000000000000000000 --- a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/facerender/modules/mapping.py +++ /dev/null @@ -1,47 +0,0 @@ -import numpy as np - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class MappingNet(nn.Module): - def __init__(self, coeff_nc, descriptor_nc, layer, num_kp, num_bins): - super( MappingNet, self).__init__() - - self.layer = layer - nonlinearity = nn.LeakyReLU(0.1) - - self.first = nn.Sequential( - torch.nn.Conv1d(coeff_nc, descriptor_nc, kernel_size=7, padding=0, bias=True)) - - for i in range(layer): - net = nn.Sequential(nonlinearity, - torch.nn.Conv1d(descriptor_nc, descriptor_nc, kernel_size=3, padding=0, dilation=3)) - setattr(self, 'encoder' + str(i), net) - - self.pooling = nn.AdaptiveAvgPool1d(1) - self.output_nc = descriptor_nc - - self.fc_roll = nn.Linear(descriptor_nc, num_bins) - self.fc_pitch = nn.Linear(descriptor_nc, num_bins) - self.fc_yaw = nn.Linear(descriptor_nc, num_bins) - self.fc_t = nn.Linear(descriptor_nc, 3) - self.fc_exp = nn.Linear(descriptor_nc, 3*num_kp) - - def forward(self, input_3dmm): - out = self.first(input_3dmm) - for i in range(self.layer): - model = getattr(self, 'encoder' + str(i)) - out = model(out) + out[:,:,3:-3] - out = self.pooling(out) - out = out.view(out.shape[0], -1) - #print('out:', out.shape) - - yaw = self.fc_yaw(out) - pitch = self.fc_pitch(out) - roll = self.fc_roll(out) - t = self.fc_t(out) - exp = self.fc_exp(out) - - return {'yaw': yaw, 'pitch': pitch, 'roll': roll, 't': t, 'exp': exp} \ No newline at end of file diff --git a/spaces/deepwisdom/MetaGPT/examples/llm_hello_world.py b/spaces/deepwisdom/MetaGPT/examples/llm_hello_world.py deleted file mode 100644 index 329247afc6e34efd0346645c2bf4d1bb4808389e..0000000000000000000000000000000000000000 --- a/spaces/deepwisdom/MetaGPT/examples/llm_hello_world.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/6 14:13 -@Author : alexanderwu -@File : llm_hello_world.py -@Modified By: mashenquan, 2023-8-9, fix-bug: cannot find metagpt module. -""" -import asyncio -from pathlib import Path -import sys -sys.path.append(str(Path(__file__).resolve().parent.parent)) -from metagpt.llm import LLM, Claude -from metagpt.logs import logger - - -async def main(): - llm = LLM() - claude = Claude() - logger.info(await claude.aask('你好,请进行自我介绍')) - logger.info(await llm.aask('hello world')) - logger.info(await llm.aask_batch(['hi', 'write python hello world.'])) - - hello_msg = [{'role': 'user', 'content': 'count from 1 to 10. split by newline.'}] - logger.info(await llm.acompletion(hello_msg)) - logger.info(await llm.acompletion_batch([hello_msg])) - logger.info(await llm.acompletion_batch_text([hello_msg])) - - logger.info(await llm.acompletion_text(hello_msg)) - await llm.acompletion_text(hello_msg, stream=True) - - -if __name__ == '__main__': - asyncio.run(main()) diff --git a/spaces/deepwisdom/MetaGPT/metagpt/actions/action.py b/spaces/deepwisdom/MetaGPT/metagpt/actions/action.py deleted file mode 100644 index e4b9613ad9de23a4bdf3c91c34773ebc9bf57a7a..0000000000000000000000000000000000000000 --- a/spaces/deepwisdom/MetaGPT/metagpt/actions/action.py +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/11 14:43 -@Author : alexanderwu -@File : action.py -@Modified By: mashenquan, 2023/8/20. Add function return annotations. -""" -from __future__ import annotations - -from abc import ABC -from typing import Optional - -from tenacity import retry, stop_after_attempt, wait_fixed - -from metagpt.actions.action_output import ActionOutput -from metagpt.llm import LLM -from metagpt.logs import logger -from metagpt.utils.common import OutputParser - - -class Action(ABC): - def __init__(self, name: str = "", context=None, llm: LLM = None): - self.name: str = name - if llm is None: - llm = LLM() - self.llm = llm - self.context = context - self.prefix = "" - self.profile = "" - self.desc = "" - self.content = "" - self.instruct_content = None - - def set_prefix(self, prefix, profile): - """Set prefix for later usage""" - self.prefix = prefix - self.profile = profile - - def __str__(self): - return self.__class__.__name__ - - def __repr__(self): - return self.__str__() - - async def _aask(self, prompt: str, system_msgs: Optional[list[str]] = None) -> str: - """Append default prefix""" - if not system_msgs: - system_msgs = [] - system_msgs.append(self.prefix) - return await self.llm.aask(prompt, system_msgs) - - @retry(stop=stop_after_attempt(2), wait=wait_fixed(1)) - async def _aask_v1( - self, prompt: str, output_class_name: str, output_data_mapping: dict, system_msgs: Optional[list[str]] = None - ) -> ActionOutput: - """Append default prefix""" - if not system_msgs: - system_msgs = [] - system_msgs.append(self.prefix) - content = await self.llm.aask(prompt, system_msgs) - logger.debug(content) - output_class = ActionOutput.create_model_class(output_class_name, output_data_mapping) - parsed_data = OutputParser.parse_data_with_mapping(content, output_data_mapping) - logger.debug(parsed_data) - instruct_content = output_class(**parsed_data) - return ActionOutput(content, instruct_content) - - async def run(self, *args, **kwargs) -> str | ActionOutput | None: - """Run action""" - raise NotImplementedError("The run method should be implemented in a subclass.") diff --git a/spaces/devisionx/auto-annotation-segmentation/app.py b/spaces/devisionx/auto-annotation-segmentation/app.py deleted file mode 100644 index 31be1bf1bafcde05b54038f30c6df48a90ea7b96..0000000000000000000000000000000000000000 --- a/spaces/devisionx/auto-annotation-segmentation/app.py +++ /dev/null @@ -1,165 +0,0 @@ -import base64 -import io -import cv2 -import requests -import json -import gradio as gr -import os -from PIL import Image -import numpy as np -from PIL import ImageOps - -# Accessing a specific environment variable -api_key = os.environ.get('devisionx') - -# Checking if the environment variable exists -if not api_key: - print("devisionx environment variable is not set.") - exit() - -# Define a function to call the API and get the results - -def base64str_to_PILImage(base64str): - base64_img_bytes = base64str.encode('utf-8') - base64bytes = base64.b64decode(base64_img_bytes) - bytesObj = io.BytesIO(base64bytes) - return ImageOps.exif_transpose(Image.open(bytesObj)) - -def get_results(image, prompt,segment): - threshold = 0.5 - - # Convert the NumPy array to PIL image - image = Image.fromarray(image) - - # Convert the image to base64 string - with io.BytesIO() as output: - image.save(output, format="JPEG") - base64str = base64.b64encode(output.getvalue()).decode("utf-8") - - # Prepare the payload (Adjust this part according to the API requirements) - #payload = json.dumps({"base64str": base64str, "classes": prompt}) - task_="0" - if segment == "Segmentation": - task_="1" - payload =json.dumps({ - "base64str": base64str, - "classes": prompt, - "segment": task_ }) - # Prepare the payload (Adjust this part according to the API requirements) - # Send the request to the API - response = requests.put(api_key, data=payload) - - # Parse the JSON response - data = response.json() - print(response.status_code) - print(data) - - # Access the values (Adjust this part according to the API response format) - output_image_base64 = data['firstName'] # Assuming the API returns the output image as base64 - - - # Convert the output image from base64 to PIL and then to NumPy array - output_image = base64str_to_PILImage(output_image_base64) - output_image = np.array(output_image) - - return output_image - - -# Define the input components for Gradio (adding a new input for the prompt) -# image_input = gr.inputs.Image() -# text_input = gr.inputs.Textbox(label="Prompt") # New input for the text prompt - - -# # Define the output components for Gradio (including both image and text) -# outputs = gr.Image(type="numpy", label="Output Image") - -# Define the text description within an HTML
            element -description_html = """ - - - - Tuba AI Auto-Annotation - - -

            Tuba AI Auto-Annotation 🚀

            -

            Saving Time, Bounding Boxes & Polygons at a Time

            -

            Introduction

            -

            Welcome to the world of DevisionX, where AI meets vision to revolutionize annotation. Our mission is to make computer vision accessible to all, and this README is your gateway to understanding how our auto-annotation model can change the way you work.

            -

            Meet Tuba.AI - Your Partner in Vision

            -

            What is Tuba?

            -

            Tuba is the secret sauce behind DevisionX, your no-code/low-code companion for all things computer vision. It's your toolkit for labeling, training data, and deploying AI-vision applications faster and easier than ever before.

            -
              -
            • No-Code/Low-Code: Say goodbye to complex coding. Tuba's user-friendly interface makes it accessible to everyone.
            • -
            • Labeling Made Easy: Annotate your data effortlessly with Tuba's intuitive tools.
            • -
            • Faster Deployment: Deploy your AI models with ease, whether you're building a standalone app or integrating within an existing one.
            • -
            • State-of-the-Art Technology: Tuba is powered by the latest AI tech and follows production-ready standards.
            • -
            -

            The DevisionX Auto-Annotation

            -

            Our auto-annotation model is a game-changer. It takes input text and images, weaving them together to generate precise bounding boxes. This AI marvel comes with a plethora of benefits:

            -
              -
            • Time Saver: Say goodbye to hours of manual annotation. Let our model do the heavy lifting.
            • -
            • Annotation Formats: It speaks the language of YOLO and COCO, making it versatile for various projects.
            • -
            • Human Assistance: While it's incredibly efficient, it also respects human creativity and can be your reliable assistant.
            • -
            -

            Let's Build Together

            -

            We are here to redefine the way you approach computer vision. Join us in this exciting journey, where AI meets creativity, and innovation knows no bounds.

            -

            Get started today and be a part of the future of vision.

            - - - - - -""" -title = "autoannotation" - -description = "This is a project description. It demonstrates how to use Gradio with an image and text input to interact with an API." - -import os -examples = [ - ["traffic.jpg", 'person,car,traffic sign,traffic light', "Segmentation"], # Example with "Segmentation" selected - ["3000.jpeg", 'person,car,traffic sign,traffic light', "Detection"], # Example with "Detection" selected -] - - - -# Create a Blocks object and use it as a context manager -with gr.Blocks() as demo: - gr.Markdown( - """ -
            -

            Tuba Autoannotation Demo

            -

            A prompt based controllable model for auto annotation (Detection and Segmentation)

            -

            Saving Time, Bounding Boxes & Polygons at a Time

            - Powered by Tuba -
            - """ - ) - # Define the input components and add them to the layout - - with gr.Row(): - image_input = gr.Image() - output = gr.Image(type="numpy", label="Output Image") - - # Define the output component and add it to the layout - with gr.Row(): - text_input = gr.Textbox(label="Prompt") - - # text_input = gr.inputs.Textbox(label="Prompt") - with gr.Row(): - segment_checkbox = gr.Radio(["Segmentation", "Detection"], value="file",label="Select Detection or Segmentation",info="Select Segmentation to extract Polygons or Detection to extract only the bounding boxes of of the desired objects automatically") - #segment_checkbox = gr.inputs.Checkbox(label="Segment", default=False) - with gr.Row(): - button = gr.Button("Run") - - # Define the event listener that connects the input and output components and triggers the function - button.click(fn=get_results, inputs=[image_input, text_input, segment_checkbox], outputs=output, api_name="get_results") - # Add the description below the layout - gr.Examples( - fn=get_results, - examples=examples, - inputs=[image_input, text_input,segment_checkbox], - outputs=[output] - ) - gr.Markdown(description_html) -# Launch the app -demo.launch(share=False) \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/City Car Driving 1.4.1 Crack.md b/spaces/diacanFperku/AutoGPT/City Car Driving 1.4.1 Crack.md deleted file mode 100644 index 6b44b894a9f02b05b50e33d0f5854d90a277c3e4..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/City Car Driving 1.4.1 Crack.md +++ /dev/null @@ -1,11 +0,0 @@ -

            city car driving 1.4.1 crack


            Download Ziphttps://gohhs.com/2uFSXw



            - -May 26, 2015 - - Read about the origin and history of the City Car Driving Crack driving simulator - in this article. Key Attributes: Multilingual ... City Car Driving Crack -This article describes the City Car Driving Crack driving simulator. -This article describes City Car Driving Crack driving simulator, which can be found on the page. Key Attributes: Multilingual Interface ... -City Car Driving Crack -This article describes the City Car Driving Simulator Crack, which can be found on the page. Key attributes: Multilingual Interface. -City Car Driving Crack 8a78ff9644
            -
            -
            -

            diff --git a/spaces/diacanFperku/AutoGPT/Fifa Manager 07 No-cd !!BETTER!! Crack.md b/spaces/diacanFperku/AutoGPT/Fifa Manager 07 No-cd !!BETTER!! Crack.md deleted file mode 100644 index 59f572962060b11f97711c700a4c28ac3b492705..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Fifa Manager 07 No-cd !!BETTER!! Crack.md +++ /dev/null @@ -1,96 +0,0 @@ -
            -

            Fifa Manager 07 No-CD Crack

            - -

            Fifa Manager 07 is a football management simulation game that allows you to take control of your favorite team and lead them to glory. The game features realistic gameplay, detailed graphics, and a huge database of players and teams. However, if you want to play the game without the original CD, you will need a no-CD crack that can bypass the CD check and allow you to run the game on any PC. In this article, we will tell you everything you need to know about Fifa Manager 07 no-CD crack and how to download and use it.

            -

            Fifa Manager 07 No-cd Crack


            Download Zip ✔✔✔ https://gohhs.com/2uFT1K



            - -

            What is Fifa Manager 07 No-CD Crack?

            - -

            Fifa Manager 07 no-CD crack is a modified executable file that can replace the original game file and make it run without the CD. The no-CD crack works by fooling the game into thinking that the CD is inserted and that the game is authorized to run. The no-CD crack also removes the SecuROM v7 protection that prevents the game from being copied or pirated.

            - -

            Fifa Manager 07 no-CD crack can help you play the game without the hassle of inserting the CD every time you want to play. It can also help you avoid any damage or scratches on your CD that may affect the game performance. Moreover, it can help you save some space on your hard drive by not having to install the game from the CD.

            - -

            How to Download Fifa Manager 07 No-CD Crack?

            - -

            One of the challenges of finding Fifa Manager 07 no-CD crack is that there are many websites that offer it, but not all of them are reliable or safe. Some websites may contain fake or outdated files that may not work or may harm your computer or device. Therefore, you need to be careful and cautious when downloading Fifa Manager 07 no-CD crack.

            -

            - -

            One of the websites that we recommend for downloading Fifa Manager 07 no-CD crack is GameCopyWorld, which is a reputable and trustworthy website that hosts various files and documents for games. You can find the link to download Fifa Manager 07 no-CD crack from GameCopyWorld below:

            - -

            https://www.gamecopyworld.com/games/pc_fifa_manager_07.shtml

            - -

            Once you download the file, you need to unzip it and copy the FIFA MANAGER 07.EXE file to your game directory.

            - -

            How to Use Fifa Manager 07 No-CD Crack?

            - -

            After downloading and copying Fifa Manager 07 no-CD crack to your game directory, you need to follow these steps to use it:

            - -
              -
            1. Make sure you have installed Fifa Manager 07 on your PC from the original CD.
            2. -
            3. Make a backup of the original FIFA MANAGER 07.EXE file in case you need to restore it later.
            4. -
            5. Paste the FIFA MANAGER 07.EXE file from the no-CD crack archive over the original file in your game directory.
            6. -
            7. Launch the game from the FIFA MANAGER 07.EXE file and enjoy playing without the CD.
            8. -
            - -

            What are the Advantages and Disadvantages of Using Fifa Manager 07 No-CD Crack?

            - -

            Fifa Manager 07 no-CD crack is a useful and convenient tool that can help you play the game without the CD, but it also has some advantages and disadvantages that you need to be aware of before using it.

            - -

            Some of the advantages of using Fifa Manager 07 no-CD crack are:

            - -
              -
            • It can help you play the game without the hassle of inserting the CD every time you want to play.
            • -
            • It can help you avoid any damage or scratches on your CD that may affect the game performance.
            • -
            • It can help you save some space on your hard drive by not having to install the game from the CD.
            • -
            - -

            Some of the disadvantages of using Fifa Manager 07 no-CD crack are:

            - -
              -
            • It may not work for some versions or updates of the game.
            • -
            • It may be detected as a virus or malware by some antivirus or security software.
            • -
            • It may violate the terms and conditions of the game and cause legal issues.
            • -
            - -

            Conclusion

            - -

            In conclusion, Fifa Manager 07 no-CD crack is a modified executable file that can replace -the original game file and make it run without the CD. The no-CD crack works by fooling -the game into thinking that the CD is inserted and that the game is authorized to run. The -no-CD crack also removes the SecuROM v7 protection that prevents the game from being -copied or pirated.

            - -

            If you want to download Fifa Manager 07 no-CD crack, you can use GameCopyWorld as -a source, but be careful of any viruses or malware that may come with it.

            - -

            If you want to use Fifa Manager 07 no-CD crack, you need to follow some simple steps -to copy it to your game directory and launch it from there.

            - -

            Fifa Manager 07 no-CD crack has some advantages and disadvantages that you need to -be aware of before using it. It can help you play the game without the CD, but it may also -cause some problems or issues.

            - -

            We hope this article has helped you understand more about Fifa Manager 07 no-CD -crack and how to download and use it. If you have any questions or feedback, please feel -free to leave a comment below.

            -

            In conclusion, Fifa Manager 07 no-CD crack is a modified executable file that can replace -the original game file and make it run without the CD. The no-CD crack works by fooling -the game into thinking that the CD is inserted and that the game is authorized to run. The -no-CD crack also removes the SecuROM v7 protection that prevents the game from being -copied or pirated.

            - -

            If you want to download Fifa Manager 07 no-CD crack, you can use GameCopyWorld as -a source, but be careful of any viruses or malware that may come with it.

            - -

            If you want to use Fifa Manager 07 no-CD crack, you need to follow some simple steps -to copy it to your game directory and launch it from there.

            - -

            Fifa Manager 07 no-CD crack has some advantages and disadvantages that you need to -be aware of before using it. It can help you play the game without the CD, but it may also -cause some problems or issues.

            - -

            We hope this article has helped you understand more about Fifa Manager 07 no-CD -crack and how to download and use it. If you have any questions or feedback, please feel -free to leave a comment below.

            3cee63e6c2
            -
            -
            \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/HD Online Player (Motu Patlu - King Of Kings Part 1 In).md b/spaces/diacanFperku/AutoGPT/HD Online Player (Motu Patlu - King Of Kings Part 1 In).md deleted file mode 100644 index 8ee88b94be502b9c06bc0cdbb3df9f44138db0a1..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/HD Online Player (Motu Patlu - King Of Kings Part 1 In).md +++ /dev/null @@ -1,9 +0,0 @@ -
            -

            motu patlu king of kings opens in the circus, where the people gathered to watch the show of a lion named guddu. but the circus had a fire accident and fortunately lion dies. it scared everyone. however motu rescues his city from the lion by eating samosas and caged the lion. motu asks about the lion. the lion explained and requested them to put him in the forest. however chingam officer says to put him in the national park. meanwhile in the forest the animals were enjoying. three monster cars captured some animals so the animals sought the help of the lion. however lion manages to rescue the animals and they ran off. but a man named dheeru manages to arrest the lion and some other animals. a girl from his gang slaps him with the mosquito bat. meanwhile motu and patlu takes lion in a jeep

            -

            HD Online Player (Motu Patlu - King of Kings part 1 in)


            Download Filehttps://gohhs.com/2uFSRV



            -

            family please don't be kids you can watch and download motu patlu - king of kings part 1 in high quality, hd format. uploaded by videoswatchfree. motu patlu - king of kings hindi. https://www.youtube.com/watch?v=rmkh6v6f8lw

            -

            download motu patlu king of kings movie hd 1080p 720p watch online. subscribe to tvnz on demand to play or watch hd online player motu patlu king of kings video. motu patlu - king of kings 2016 is a comedy hindi film starring saurav chakraborty,vinay pathak in the lead roles, directed by suhas kadav. motu patlu - king of kings (2016) is a comedy hindi film starring saurav chakraborty,vinay pathak in the lead roles, directed by suhas kadav. hd online player motu patlu king of kings video.

            -

            motu patlu - king of kings (2016) is a comedy hindi film starring saurav chakraborty,vinay pathak in the lead roles, directed by suhas kadav. motu patlu - king of kings (2016) is a comedy hindi film starring saurav chakraborty,vinay pathak in the lead roles, directed by suhas kadav. https://www.horizondigitalnet.com/profile/hd-online-player-life-is-beautiful-full-. /post/download-motu-patlu-king-of-kings-movie-in-720p-movies-work.

            -

            899543212b
            -
            -
            \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Ion Fulga Farmacologie Pdf 13.md b/spaces/diacanFperku/AutoGPT/Ion Fulga Farmacologie Pdf 13.md deleted file mode 100644 index 6bb6ddf1144793086d4cfc8234a42ec2c9bea68c..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Ion Fulga Farmacologie Pdf 13.md +++ /dev/null @@ -1,6 +0,0 @@ -

            Ion Fulga Farmacologie Pdf 13


            Download File ✓✓✓ https://gohhs.com/2uFUzO



            -
            -13. 294. Non-ST-Segment elevation acute coronary syndrome (p 1593-1599). 14. 239. ... FARMACOLOGIE. Farmacologie, editia ... Ion Fulga (coordonator). VIII. 1fdad05405
            -
            -
            -

            diff --git a/spaces/diacanFperku/AutoGPT/Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download.md b/spaces/diacanFperku/AutoGPT/Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download.md deleted file mode 100644 index 647a26f8c960791d1f60625754d302e1fdaa0069..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download.md +++ /dev/null @@ -1,112 +0,0 @@ -
            ----> ServiceClient failure for DeepLeo[/ERROR]

            -

            Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download


            Download Zip · https://gohhs.com/2uFU01



            -

            What is Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download?

            - -

            Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download is a software that allows you to flash, repair, and unlock your Android devices with Qualcomm or MediaTek chipsets. It is a powerful and versatile tool that can help you solve various problems on your devices such as FRP lock, pattern lock, bootloader lock, IMEI issues, network issues, and more.

            - -

            Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download is a cracked version of the original Miracle Box software that does not require any box or dongle to use. You can download it for free from the internet and install it on your PC. You can use it to perform various tasks on your devices without any limitations or restrictions.

            - -

            How to Download Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download?

            - -

            If you want to download Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download, you can do so from various websites that offer it for free. However, you should be careful and check the downloaded files with any free antivirus before installing them on your PC. Some of the websites that offer Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download are:

            - -
              -
            • LexCliq: This website provides a direct download link for Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download along with a detailed guide on how to install and use it.
            • -
            • VirtuDojo: This website provides a PDF file that contains the download link and the serial number for Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download.
            • -
            • Get Into PC: This website provides a full offline installer setup of Miracle Box 2020 Free Download which includes Miracle Box 3.04 Crack Serial Number Latest Version 2020.
            • -
            - -

            How to Install and Use Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download?

            - -

            After downloading Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download from any of the above websites, you can install and use it by following these steps:

            - -
              -
            1. Extract the downloaded file using any file extractor software such as WinRAR or 7-Zip.
            2. -
            3. Run the setup file as administrator and follow the instructions on the screen.
            4. -
            5. After the installation is complete, run the Miracle Box software from your desktop or start menu.
            6. -
            7. Enter the serial number that you got from the PDF file or the website where you downloaded the software.
            8. -
            9. Connect your device to your PC using a USB cable and enable USB debugging mode on your device.
            10. -
            11. Select your device model and chipset from the drop-down menu on the software interface.
            12. -
            13. Select the task that you want to perform on your device such as flash, repair, or unlock.
            14. -
            15. Click on the start button and wait for the process to complete.
            16. -
            17. Disconnect your device from your PC and reboot it.
            18. -
            - -

            You have successfully installed and used Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download on your PC and device.

            -

            - - ---> ServiceClient failure for DeepLeo[/ERROR] -

            What are the Advantages and Disadvantages of Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download?

            - -

            Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download is a software that has many advantages and disadvantages that you should be aware of before using it. Here are some of them:

            - - - - - - - - - - - - - - - - - - - - - - - - - - -
            AdvantagesDisadvantages
            It is free and easy to download and install.It may not be legal or safe to use a cracked version of the software.
            It can flash, repair, and unlock various Android devices with Qualcomm or MediaTek chipsets.It may not support some devices or models that are not compatible with the software.
            It can solve various problems on your devices such as FRP lock, pattern lock, bootloader lock, IMEI issues, network issues, and more.It may cause some problems on your devices such as data loss, device damage, warranty void, or security risks.
            It has a user-friendly and intuitive interface that makes it easy to use.It may require some technical knowledge and skills to use it properly and effectively.
            It has an auto device detection feature that makes it convenient to connect your devices to your PC.It may not detect your devices correctly or at all if you have a faulty USB cable or driver.
            - -

            How to Update Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download?

            - -

            If you want to update Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download to the latest version, you can do so by following these steps:

            - -
              -
            1. Go to the official website of Miracle Box here and check for the latest version available.
            2. -
            3. Download the latest version of Miracle Box setup file from the website or any other trusted source.
            4. -
            5. Uninstall the previous version of Miracle Box from your PC by using the Control Panel or any third-party uninstaller software.
            6. -
            7. Install the latest version of Miracle Box on your PC by running the setup file as administrator and following the instructions on the screen.
            8. -
            9. Enter the serial number that you got from the PDF file or the website where you downloaded the software.
            10. -
            11. Run the Miracle Box software from your desktop or start menu and enjoy its new features and improvements.
            12. -
            - -

            You have successfully updated Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download to the latest version.

            - -

            Conclusion

            - -

            Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download is a software that can help you flash, repair, and unlock your Android devices with Qualcomm or MediaTek chipsets. It is a free and easy-to-use software that can solve various problems on your devices such as FRP lock, pattern lock, bootloader lock, IMEI issues, network issues, and more. However, it also has some disadvantages such as being illegal or unsafe to use a cracked version of the software, causing some problems on your devices such as data loss, device damage, warranty void, or security risks, and requiring some technical knowledge and skills to use it properly and effectively. Therefore, you should be careful and responsible when using this software and always backup your data before performing any tasks on your devices.

            - -

            If you want to download Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download, you can do so from various websites that offer it for free such as LexCliq, VirtuDojo, or Get Into PC. However, you should always check the downloaded files with any free antivirus before installing them on your PC. You can also update this software to the latest version by downloading it from the official website of Miracle Box or any other trusted source.

            - -

            We hope this article was helpful for you. -

            How does Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download Compare with Other Similar Software?

            - -

            Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download is not the only software that can flash, repair, and unlock Android devices with Qualcomm or MediaTek chipsets. There are other similar software that can perform the same or similar tasks on your devices. However, Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download has some advantages and disadvantages over these software that you should consider before choosing one. Here are some of the most popular software that can compete with Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download:

            - -
              -
            • UMT Dongle: This is a dongle-based software that can flash, repair, and unlock various Android devices with Qualcomm, MediaTek, Spreadtrum, and other chipsets. It supports a wide range of models and brands and has many features and functions such as FRP reset, IMEI repair, network unlock, root, backup, and more.
            • -
            • SigmaKey: This is a software that can flash, repair, and unlock various Android devices with Qualcomm, MediaTek, Broadcom, TI OMAP, and other chipsets. It supports a wide range of models and brands and has many features and functions such as FRP reset, IMEI repair, network unlock, root, backup, and more.
            • -
            • DC Unlocker: This is a software that can flash, repair, and unlock various Android devices with Qualcomm, MediaTek, Huawei Hisilicon Kirin chipsets. It supports a wide range of models and brands and has many features and functions such as FRP reset, IMEI repair, network unlock, bootloader unlock, root, backup, and more.
            • -
            - -

            Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download has some advantages over these software such as being free and easy to download and install, having an auto device detection feature that makes it convenient to connect your devices to your PC, and having a user-friendly and intuitive interface that makes it easy to use. However, it also has some disadvantages over these software such as being illegal or unsafe to use a cracked version of the software, causing some problems on your devices such as data loss, device damage, warranty void, or security risks, requiring some technical knowledge and skills to use it properly and effectively.

            - -

            Therefore, you should compare these software carefully and choose the one that suits your needs and preferences best.

            -

            Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download is a software that can help you flash, repair, and unlock your Android devices with Qualcomm or MediaTek chipsets. It is a free and easy-to-use software that can solve various problems on your devices such as FRP lock, pattern lock, bootloader lock, IMEI issues, network issues, and more. However, it also has some disadvantages such as being illegal or unsafe to use a cracked version of the software, causing some problems on your devices such as data loss, device damage, warranty void, or security risks, and requiring some technical knowledge and skills to use it properly and effectively. Therefore, you should be careful and responsible when using this software and always backup your data before performing any tasks on your devices.

            - -

            If you want to download Miracle Box 3.04 Crack Serial Number Latest Version 2020 Free Download, you can do so from various websites that offer it for free such as LexCliq, VirtuDojo, or Get Into PC. However, you should always check the downloaded files with any free antivirus before installing them on your PC. You can also update this software to the latest version by downloading it from the official website of Miracle Box or any other trusted source.

            - -

            We hope this article was helpful for you.

            3cee63e6c2
            -
            -
            \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/New York Movie In Hindi Dubbed Download.md b/spaces/diacanFperku/AutoGPT/New York Movie In Hindi Dubbed Download.md deleted file mode 100644 index 64069674488b14c1816264c81f1f46d538172254..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/New York Movie In Hindi Dubbed Download.md +++ /dev/null @@ -1,14 +0,0 @@ -

            New York Movie In Hindi Dubbed Download


            Download Ziphttps://gohhs.com/2uFUPo



            -
            -2010 720p BluRay H264 AAC-RARBG torrent free, direct download via Magnet Link ... Introducing New South (Sauth) Indian Movies Dubbed In Hindi 2021 Full ... Duration: 1:20:30 ... -All movies HD720 online for free -All Indian series 2020 watch online in Russian on indkino.ru -Indian Movies of 2020 watch online in good quality in Russian -Watch movies and TV series online on Lordfilm. -HD 720p " Watch Indian Movies Online in Good Quality in Russian -Indian Movies Watch Online. -Watch Indian Movies 2019 in Russian. -The first place in the popularity of the world is exactly Indian movies 8a78ff9644
            -
            -
            -

            diff --git a/spaces/diagaiwei/ir_chinese_medqa/colbert/indexing/index_manager.py b/spaces/diagaiwei/ir_chinese_medqa/colbert/indexing/index_manager.py deleted file mode 100644 index 946cffcb2c439aff739fcf19411f7fe2bd8e1bfe..0000000000000000000000000000000000000000 --- a/spaces/diagaiwei/ir_chinese_medqa/colbert/indexing/index_manager.py +++ /dev/null @@ -1,38 +0,0 @@ -import torch -import numpy as np - -from bitarray import bitarray - - -class IndexManager(): - def __init__(self, dim): - self.dim = dim - - def save(self, tensor, path_prefix): - torch.save(tensor, path_prefix) - - def save_bitarray(self, bitarray, path_prefix): - with open(path_prefix, "wb") as f: - bitarray.tofile(f) - - -def load_index_part(filename, verbose=True): - part = torch.load(filename) - - if type(part) == list: # for backward compatibility - part = torch.cat(part) - - return part - - -def load_compressed_index_part(filename, dim, bits): - a = bitarray() - - with open(filename, "rb") as f: - a.fromfile(f) - - n = len(a) // dim // bits - part = torch.tensor(np.frombuffer(a.tobytes(), dtype=np.uint8)) # TODO: isn't from_numpy(.) faster? - part = part.reshape((n, int(np.ceil(dim * bits / 8)))) - - return part diff --git a/spaces/diego2554/RemBG_super/rembg/sessions/dis.py b/spaces/diego2554/RemBG_super/rembg/sessions/dis.py deleted file mode 100644 index 2f3b849a2b0390a1c4f3f161307d1f1001d22938..0000000000000000000000000000000000000000 --- a/spaces/diego2554/RemBG_super/rembg/sessions/dis.py +++ /dev/null @@ -1,47 +0,0 @@ -import os -from typing import List - -import numpy as np -import pooch -from PIL import Image -from PIL.Image import Image as PILImage - -from .base import BaseSession - - -class DisSession(BaseSession): - def predict(self, img: PILImage, *args, **kwargs) -> List[PILImage]: - ort_outs = self.inner_session.run( - None, - self.normalize(img, (0.485, 0.456, 0.406), (1.0, 1.0, 1.0), (1024, 1024)), - ) - - pred = ort_outs[0][:, 0, :, :] - - ma = np.max(pred) - mi = np.min(pred) - - pred = (pred - mi) / (ma - mi) - pred = np.squeeze(pred) - - mask = Image.fromarray((pred * 255).astype("uint8"), mode="L") - mask = mask.resize(img.size, Image.LANCZOS) - - return [mask] - - @classmethod - def download_models(cls, *args, **kwargs): - fname = f"{cls.name()}.onnx" - pooch.retrieve( - "https://github.com/danielgatis/rembg/releases/download/v0.0.0/isnet-general-use.onnx", - "md5:fc16ebd8b0c10d971d3513d564d01e29", - fname=fname, - path=cls.u2net_home(), - progressbar=True, - ) - - return os.path.join(cls.u2net_home(), fname) - - @classmethod - def name(cls, *args, **kwargs): - return "isnet-general-use" diff --git a/spaces/digitalxingtong/Lixiang-Bert-Vits2/train_ms.py b/spaces/digitalxingtong/Lixiang-Bert-Vits2/train_ms.py deleted file mode 100644 index 5d109003d40497ea4493e7c73f47c1eb7370a81e..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Lixiang-Bert-Vits2/train_ms.py +++ /dev/null @@ -1,402 +0,0 @@ -import os -import json -import argparse -import itertools -import math -import torch -import shutil -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler -from tqdm import tqdm -import logging -logging.getLogger('numba').setLevel(logging.WARNING) -import commons -import utils -from data_utils import ( - TextAudioSpeakerLoader, - TextAudioSpeakerCollate, - DistributedBucketSampler -) -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, - DurationDiscriminator, -) -from losses import ( - generator_loss, - discriminator_loss, - feature_loss, - kl_loss -) -from mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from text.symbols import symbols - -torch.backends.cudnn.benchmark = True -torch.backends.cuda.matmul.allow_tf32 = True -torch.backends.cudnn.allow_tf32 = True -torch.set_float32_matmul_precision('medium') -global_step = 0 - - -def main(): - """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." - - n_gpus = torch.cuda.device_count() - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '65280' - - hps = utils.get_hparams() - if not hps.cont: - shutil.copy('./pretrained_models/D_0.pth','./logs/OUTPUT_MODEL/D_0.pth') - shutil.copy('./pretrained_models/G_0.pth','./logs/OUTPUT_MODEL/G_0.pth') - shutil.copy('./pretrained_models/DUR_0.pth','./logs/OUTPUT_MODEL/DUR_0.pth') - mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - - train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) - train_sampler = DistributedBucketSampler( - train_dataset, - hps.train.batch_size, - [32, 300, 400, 500, 600, 700, 800, 900, 1000], - num_replicas=n_gpus, - rank=rank, - shuffle=True) - collate_fn = TextAudioSpeakerCollate() - train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True, - collate_fn=collate_fn, batch_sampler=train_sampler) - if rank == 0: - eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) - eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, - batch_size=1, pin_memory=True, - drop_last=False, collate_fn=collate_fn) - if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True: - print("Using noise scaled MAS for VITS2") - use_noise_scaled_mas = True - mas_noise_scale_initial = 0.01 - noise_scale_delta = 2e-6 - else: - print("Using normal MAS for VITS1") - use_noise_scaled_mas = False - mas_noise_scale_initial = 0.0 - noise_scale_delta = 0.0 - if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True: - print("Using duration discriminator for VITS2") - use_duration_discriminator = True - net_dur_disc = DurationDiscriminator( - hps.model.hidden_channels, - hps.model.hidden_channels, - 3, - 0.1, - gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, - ).cuda(rank) - if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True: - if hps.data.n_speakers == 0: - raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model") - use_spk_conditioned_encoder = True - else: - print("Using normal encoder for VITS1") - use_spk_conditioned_encoder = False - - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - mas_noise_scale_initial = mas_noise_scale_initial, - noise_scale_delta = noise_scale_delta, - **hps.model).cuda(rank) - - freeze_enc = getattr(hps.model, "freeze_enc", False) - if freeze_enc: - print("freeze encoder !!!") - for param in net_g.enc_p.parameters(): - param.requires_grad = False - - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - filter(lambda p: p.requires_grad, net_g.parameters()), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - if net_dur_disc is not None: - optim_dur_disc = torch.optim.AdamW( - net_dur_disc.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - else: - optim_dur_disc = None - net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) - net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) - if net_dur_disc is not None: - net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) - - pretrain_dir = None - if pretrain_dir is None: - try: - if net_dur_disc is not None: - _, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont) - _, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, - optim_g, skip_optimizer=not hps.cont) - _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, - optim_d, skip_optimizer=not hps.cont) - - epoch_str = max(epoch_str, 1) - global_step = (epoch_str - 1) * len(train_loader) - except Exception as e: - print(e) - epoch_str = 1 - global_step = 0 - else: - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, - optim_g, True) - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, - optim_d, True) - - - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - if net_dur_disc is not None: - scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) - else: - scheduler_dur_disc = None - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval]) - else: - train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None) - scheduler_g.step() - scheduler_d.step() - if net_dur_disc is not None: - scheduler_dur_disc.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): - net_g, net_d, net_dur_disc = nets - optim_g, optim_d, optim_dur_disc = optims - scheduler_g, scheduler_d, scheduler_dur_disc = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - if net_dur_disc is not None: - net_dur_disc.train() - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): - if net_g.module.use_noise_scaled_mas: - current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step - net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) - x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) - spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) - y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) - speakers = speakers.cuda(rank, non_blocking=True) - tone = tone.cuda(rank, non_blocking=True) - language = language.cuda(rank, non_blocking=True) - bert = bert.cuda(rank, non_blocking=True) - - with autocast(enabled=hps.train.fp16_run): - y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ - (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - loss_disc_all = loss_disc - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) - with autocast(enabled=False): - # TODO: I think need to mean using the mask, but for now, just mean all - loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) - loss_dur_disc_all = loss_dur_disc - optim_dur_disc.zero_grad() - scaler.scale(loss_dur_disc_all).backward() - scaler.unscale_(optim_dur_disc) - grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) - scaler.step(optim_dur_disc) - - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) - with autocast(enabled=False): - loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - if net_dur_disc is not None: - loss_dur_gen, losses_dur_gen = generator_loss(y_dur_hat_g) - loss_gen_all += loss_dur_gen - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]['lr'] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl] - logger.info('Train Epoch: {} [{:.0f}%]'.format( - epoch, - 100. * batch_idx / len(train_loader))) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, - "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} - scalar_dict.update( - {"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl}) - scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - "all/attn": utils.plot_alignment_to_numpy(attn[0, 0].data.cpu().numpy()) - } - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) - utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) - if net_dur_disc is not None: - utils.save_checkpoint(net_dur_disc, optim_dur_disc, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "DUR_{}.pth".format(global_step))) - keep_ckpts = getattr(hps.train, 'keep_ckpts', 5) - if keep_ckpts > 0: - utils.clean_checkpoints(path_to_models=hps.model_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True) - - - global_step += 1 - - if rank == 0: - logger.info('====> Epoch: {}'.format(epoch)) - - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - image_dict = {} - audio_dict = {} - print("Evaluating ...") - with torch.no_grad(): - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in enumerate(eval_loader): - x, x_lengths = x.cuda(), x_lengths.cuda() - spec, spec_lengths = spec.cuda(), spec_lengths.cuda() - y, y_lengths = y.cuda(), y_lengths.cuda() - speakers = speakers.cuda() - bert = bert.cuda() - tone = tone.cuda() - language = language.cuda() - for use_sdp in [True, False]: - y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, tone, language, bert, y=spec, max_len=1000, sdp_ratio=0.0 if not use_sdp else 1.0) - y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - image_dict.update({ - f"gen/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()) - }) - audio_dict.update({ - f"gen/audio_{batch_idx}_{use_sdp}": y_hat[0, :, :y_hat_lengths[0]] - }) - image_dict.update({f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())}) - audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, :y_lengths[0]]}) - - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate - ) - generator.train() - -if __name__ == "__main__": - main() diff --git a/spaces/digitalxingtong/Taffy-Bert-VITS2/transcribe_genshin.py b/spaces/digitalxingtong/Taffy-Bert-VITS2/transcribe_genshin.py deleted file mode 100644 index acc98814af6189d129ab85946525bec55419a33f..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Taffy-Bert-VITS2/transcribe_genshin.py +++ /dev/null @@ -1,78 +0,0 @@ -# coding=gbk -import os -import argparse -import librosa -import numpy as np -from multiprocessing import Pool, cpu_count - -import soundfile -from scipy.io import wavfile -from tqdm import tqdm - -global speaker_annos -speaker_annos = [] - -def process(item): - spkdir, wav_name, args = item - speaker = spkdir.replace("\\", "/").split("/")[-1] - wav_path = os.path.join(args.in_dir, speaker, wav_name) - if os.path.exists(wav_path) and '.wav' in wav_path: - os.makedirs(os.path.join(args.out_dir, speaker), exist_ok=True) - wav, sr = librosa.load(wav_path, sr=args.sr) - soundfile.write( - os.path.join(args.out_dir, speaker, wav_name), - wav, - sr - ) - -def process_text(item): - spkdir, wav_name, args = item - speaker = spkdir.replace("\\", "/").split("/")[-1] - wav_path = os.path.join(args.in_dir, speaker, wav_name) - global speaker_annos - tr_name = wav_name.replace('.wav', '') - with open(args.out_dir+'/'+speaker+'/'+tr_name+'.lab', "r", encoding="utf-8") as file: - text = file.read() - text = text.replace("{NICKNAME}",'') - text = text.replace("{M#}{F#}",'') - text = text.replace("{M#}{F#}",'') - substring = "{M#}{F#}" - if substring in text: - if tr_name.endswith("a"): - text = text.replace("{M#}{F#}",'') - if tr_name.endswith("b"): - text = text.replace("{M#}{F#}",'') - text = text.replace("#",'') - text = "ZH|" + text + "\n" # - speaker_annos.append(args.out_dir+'/'+speaker+'/'+wav_name+ "|" + speaker + "|" + text) - - - -if __name__ == "__main__": - parent_dir = "./genshin_dataset/" - speaker_names = list(os.walk(parent_dir))[0][1] - parser = argparse.ArgumentParser() - parser.add_argument("--sr", type=int, default=44100, help="sampling rate") - parser.add_argument("--in_dir", type=str, default="./genshin_dataset", help="path to source dir") - parser.add_argument("--out_dir", type=str, default="./genshin_dataset", help="path to target dir") - args = parser.parse_args() - # processs = 8 - processs = cpu_count()-2 if cpu_count() >4 else 1 - pool = Pool(processes=processs) - - for speaker in os.listdir(args.in_dir): - spk_dir = os.path.join(args.in_dir, speaker) - if os.path.isdir(spk_dir): - print(spk_dir) - for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])): - pass - for i in os.listdir(spk_dir): - if i.endswith("wav"): - pro=(spk_dir, i, args) - process_text(pro) - if len(speaker_annos) == 0: - print("transcribe error!!!") - with open("./filelists/short_character_anno.list", 'w', encoding='utf-8') as f: - for line in speaker_annos: - f.write(line) - print("transcript file finished.") diff --git a/spaces/digitalxingtong/Xingtong-Read-Dongmuchang-Bert-VITS2/monotonic_align/setup.py b/spaces/digitalxingtong/Xingtong-Read-Dongmuchang-Bert-VITS2/monotonic_align/setup.py deleted file mode 100644 index 30c224807a70faa9df9c9eb75f8e80c8c867b16b..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-Read-Dongmuchang-Bert-VITS2/monotonic_align/setup.py +++ /dev/null @@ -1,9 +0,0 @@ -from distutils.core import setup -from Cython.Build import cythonize -import numpy - -setup( - name = 'monotonic_align', - ext_modules = cythonize("core.pyx"), - include_dirs=[numpy.get_include()] -) diff --git a/spaces/dineshreddy/WALT/mmdet/utils/profiling.py b/spaces/dineshreddy/WALT/mmdet/utils/profiling.py deleted file mode 100644 index 4be9222c37e922329d537f883f5587995e27efc6..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/utils/profiling.py +++ /dev/null @@ -1,39 +0,0 @@ -import contextlib -import sys -import time - -import torch - -if sys.version_info >= (3, 7): - - @contextlib.contextmanager - def profile_time(trace_name, - name, - enabled=True, - stream=None, - end_stream=None): - """Print time spent by CPU and GPU. - - Useful as a temporary context manager to find sweet spots of code - suitable for async implementation. - """ - if (not enabled) or not torch.cuda.is_available(): - yield - return - stream = stream if stream else torch.cuda.current_stream() - end_stream = end_stream if end_stream else stream - start = torch.cuda.Event(enable_timing=True) - end = torch.cuda.Event(enable_timing=True) - stream.record_event(start) - try: - cpu_start = time.monotonic() - yield - finally: - cpu_end = time.monotonic() - end_stream.record_event(end) - end.synchronize() - cpu_time = (cpu_end - cpu_start) * 1000 - gpu_time = start.elapsed_time(end) - msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms ' - msg += f'gpu_time {gpu_time:.2f} ms stream {stream}' - print(msg, end_stream) diff --git a/spaces/dirge/voicevox/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/policy.md b/spaces/dirge/voicevox/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/policy.md deleted file mode 100644 index 0328c63112a40f44145440562c8fe2d56ac86e38..0000000000000000000000000000000000000000 --- a/spaces/dirge/voicevox/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/policy.md +++ /dev/null @@ -1,3 +0,0 @@ -dummy2 policy - -https://voicevox.hiroshiba.jp/ diff --git a/spaces/dirge/voicevox/test/test_mora_list.py b/spaces/dirge/voicevox/test/test_mora_list.py deleted file mode 100644 index 25b287fa0e8b0febb1895ac84223823915e548ea..0000000000000000000000000000000000000000 --- a/spaces/dirge/voicevox/test/test_mora_list.py +++ /dev/null @@ -1,20 +0,0 @@ -from unittest import TestCase - -from voicevox_engine.mora_list import openjtalk_mora2text - - -class TestOpenJTalkMoraList(TestCase): - def test_mora2text(self): - self.assertEqual("ッ", openjtalk_mora2text["cl"]) - self.assertEqual("ティ", openjtalk_mora2text["ti"]) - self.assertEqual("トゥ", openjtalk_mora2text["tu"]) - self.assertEqual("ディ", openjtalk_mora2text["di"]) - # GitHub issue #60 - self.assertEqual("ギェ", openjtalk_mora2text["gye"]) - self.assertEqual("イェ", openjtalk_mora2text["ye"]) - - def test_mora2text_injective(self): - """異なるモーラが同じ読みがなに対応しないか確認する""" - values = list(openjtalk_mora2text.values()) - uniq_values = list(set(values)) - self.assertCountEqual(values, uniq_values) diff --git a/spaces/dogincharge/Shap-ER/app_image_to_3d.py b/spaces/dogincharge/Shap-ER/app_image_to_3d.py deleted file mode 100644 index 491a1c513baaa919d762aba0d883ebf12d75d63d..0000000000000000000000000000000000000000 --- a/spaces/dogincharge/Shap-ER/app_image_to_3d.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python - -import pathlib -import shlex -import subprocess - -import gradio as gr - -from model import Model -from settings import CACHE_EXAMPLES, MAX_SEED -from utils import randomize_seed_fn - - -def create_demo(model: Model) -> gr.Blocks: - if not pathlib.Path('corgi.png').exists(): - subprocess.run( - shlex.split( - 'wget https://raw.githubusercontent.com/openai/shap-e/d99cedaea18e0989e340163dbaeb4b109fa9e8ec/shap_e/examples/example_data/corgi.png -O corgi.png' - )) - examples = ['corgi.png'] - - def process_example_fn(image_path: str) -> str: - return model.run_image(image_path) - - with gr.Blocks() as demo: - with gr.Box(): - image = gr.Image(label='Input image', show_label=False, type='pil') - run_button = gr.Button('Run') - result = gr.Model3D(label='Result', show_label=False) - with gr.Accordion('Advanced options', open=False): - seed = gr.Slider(label='Seed', - minimum=0, - maximum=MAX_SEED, - step=1, - value=0) - randomize_seed = gr.Checkbox(label='Randomize seed', - value=True) - guidance_scale = gr.Slider(label='Guidance scale', - minimum=1, - maximum=20, - step=0.1, - value=3.0) - num_inference_steps = gr.Slider( - label='Number of inference steps', - minimum=1, - maximum=100, - step=1, - value=64) - - gr.Examples(examples=examples, - inputs=image, - outputs=result, - fn=process_example_fn, - cache_examples=CACHE_EXAMPLES) - - inputs = [ - image, - seed, - guidance_scale, - num_inference_steps, - ] - - run_button.click( - fn=randomize_seed_fn, - inputs=[seed, randomize_seed], - outputs=seed, - queue=False, - ).then( - fn=model.run_image, - inputs=inputs, - outputs=result, - api_name='image-to-3d', - ) - return demo diff --git a/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/extensions/gallery/script.py b/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/extensions/gallery/script.py deleted file mode 100644 index 993ef273839e7cfbf9e80f2d7f9d4a71d208b446..0000000000000000000000000000000000000000 --- a/spaces/dorkai/text-generation-webui-main/text-generation-webui-main/extensions/gallery/script.py +++ /dev/null @@ -1,96 +0,0 @@ -from pathlib import Path - -import gradio as gr - -from modules.html_generator import get_image_cache -from modules.shared import gradio - - -def generate_css(): - css = """ - .character-gallery > .gallery { - margin: 1rem 0; - display: grid !important; - grid-template-columns: repeat(auto-fit, minmax(150px, 1fr)); - grid-column-gap: 0.4rem; - grid-row-gap: 1.2rem; - } - - .character-gallery > .label { - display: none !important; - } - - .character-gallery button.gallery-item { - display: contents; - } - - .character-container { - cursor: pointer; - text-align: center; - position: relative; - opacity: 0.85; - } - - .character-container:hover { - opacity: 1; - } - - .character-container .placeholder, .character-container img { - width: 150px; - height: 200px; - background-color: gray; - object-fit: cover; - margin: 0 auto; - border-radius: 1rem; - border: 3px solid white; - box-shadow: 3px 3px 6px 0px rgb(0 0 0 / 50%); - } - - .character-name { - margin-top: 0.3rem; - display: block; - font-size: 1.2rem; - font-weight: 600; - overflow-wrap: anywhere; - } - """ - return css - - -def generate_html(): - cards = [] - # Iterate through files in image folder - for file in sorted(Path("characters").glob("*")): - if file.suffix in [".json", ".yml", ".yaml"]: - character = file.stem - container_html = '
            ' - image_html = "
            " - - for path in [Path(f"characters/{character}.{extension}") for extension in ['png', 'jpg', 'jpeg']]: - if path.exists(): - image_html = f'' - break - - container_html += f'{image_html} {character}' - container_html += "
            " - cards.append([container_html, character]) - - return cards - - -def select_character(evt: gr.SelectData): - return (evt.value[1]) - - -def ui(): - with gr.Accordion("Character gallery", open=False): - update = gr.Button("Refresh") - gr.HTML(value="") - gallery = gr.Dataset(components=[gr.HTML(visible=False)], - label="", - samples=generate_html(), - elem_classes=["character-gallery"], - samples_per_page=50 - ) - update.click(generate_html, [], gallery) - gallery.select(select_character, None, gradio['character_menu']) diff --git a/spaces/errorok/rvc-models-en-test/app.py b/spaces/errorok/rvc-models-en-test/app.py deleted file mode 100644 index 832553d67e16e6e9cb7371914df8a494671680a5..0000000000000000000000000000000000000000 --- a/spaces/errorok/rvc-models-en-test/app.py +++ /dev/null @@ -1,188 +0,0 @@ -import os -import json -import argparse -import traceback -import logging -import gradio as gr -import numpy as np -import librosa -import torch -import asyncio -import edge_tts -from datetime import datetime -from fairseq import checkpoint_utils -from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono -from vc_infer_pipeline import VC -from config import ( - is_half, - device -) -logging.getLogger("numba").setLevel(logging.WARNING) -limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces - -def create_vc_fn(tgt_sr, net_g, vc, if_f0, file_index, file_big_npy): - def vc_fn( - input_audio, - f0_up_key, - f0_method, - index_rate, - tts_mode, - tts_text, - tts_voice - ): - try: - if tts_mode: - if len(tts_text) > 100 and limitation: - return "Text is too long", None - if tts_text is None or tts_voice is None: - return "You need to enter text and select a voice", None - asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3")) - audio, sr = librosa.load("tts.mp3", sr=16000, mono=True) - else: - if args.files: - audio, sr = librosa.load(input_audio, sr=16000, mono=True) - else: - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - duration = audio.shape[0] / sampling_rate - if duration > 20 and limitation: - return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - times = [0, 0, 0] - f0_up_key = int(f0_up_key) - audio_opt = vc.pipeline( - hubert_model, - net_g, - 0, - audio, - times, - f0_up_key, - f0_method, - file_index, - file_big_npy, - index_rate, - if_f0, - ) - print( - f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s" - ) - return "Success", (tgt_sr, audio_opt) - except: - info = traceback.format_exc() - print(info) - return info, (None, None) - return vc_fn - -def load_hubert(): - global hubert_model - models, _, _ = checkpoint_utils.load_model_ensemble_and_task( - ["hubert_base.pt"], - suffix="", - ) - hubert_model = models[0] - hubert_model = hubert_model.to(device) - if is_half: - hubert_model = hubert_model.half() - else: - hubert_model = hubert_model.float() - hubert_model.eval() - -def change_to_tts_mode(tts_mode): - if tts_mode: - return gr.Audio.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True) - else: - return gr.Audio.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False) - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--api', action="store_true", default=False) - parser.add_argument("--share", action="store_true", default=False, help="share gradio app") - parser.add_argument("--files", action="store_true", default=False, help="load audio from path") - args, unknown = parser.parse_known_args() - load_hubert() - models = [] - tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) - voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list] - with open("weights/model_info.json", "r", encoding="utf-8") as f: - models_info = json.load(f) - for name, info in models_info.items(): - if not info['enable']: - continue - title = info['title'] - author = info.get("author", None) - cover = f"weights/{name}/{info['cover']}" - index = f"weights/{name}/{info['feature_retrieval_library']}" - npy = f"weights/{name}/{info['feature_file']}" - cpt = torch.load(f"weights/{name}/{name}.pth", map_location="cpu") - tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - if_f0 = cpt.get("f0", 1) - if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - del net_g.enc_q - print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净, 真奇葩 - net_g.eval().to(device) - if is_half: - net_g = net_g.half() - else: - net_g = net_g.float() - vc = VC(tgt_sr, device, is_half) - models.append((name, title, author, cover, create_vc_fn(tgt_sr, net_g, vc, if_f0, index, npy))) - with gr.Blocks() as app: - gr.Markdown( - "#
            RVC Models\n" - "##
            The input audio should be clean and pure voice without background music.\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=ardha27.Rvc-Models)\n\n" - "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1fI4CJNwyYNYRlB3y6Fh4hriILOE6_g9r?usp=share_link)\n\n" - "[![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm-dark.svg)](https://huggingface.co/spaces/ardha27pi/rvc-models?duplicate=true)\n\n" - "[![Train Own Voice](https://badgen.net/badge/icon/github?icon=github&label=Train%20Voice)](https://github.com/ardha27/AI-Song-Cover-RVC)\n\n" - "[![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/R6R7AH1FA)\n\n" - ) - with gr.Tabs(): - for (name, title, author, cover, vc_fn) in models: - with gr.TabItem(name): - with gr.Row(): - gr.Markdown( - '
            ' - f'
            {title}
            \n'+ - (f'
            Model author: {author}
            ' if author else "")+ - (f'' if cover else "")+ - '
            ' - ) - with gr.Row(): - with gr.Column(): - if args.files: - vc_input = gr.Textbox(label="Input audio path") - else: - vc_input = gr.Audio(label="Input audio"+' (less than 20 seconds)' if limitation else '') - vc_transpose = gr.Number(label="Transpose", value=0) - vc_f0method = gr.Radio( - label="Pitch extraction algorithm, PM is fast but Harvest is better for low frequencies", - choices=["pm", "harvest"], - value="pm", - interactive=True, - ) - vc_index_ratio = gr.Slider( - minimum=0, - maximum=1, - label="Retrieval feature ratio", - value=0.6, - interactive=True, - ) - tts_mode = gr.Checkbox(label="tts (use edge-tts as input)", value=False) - tts_text = gr.Textbox(visible=False,label="TTS text (100 words limitation)" if limitation else "TTS text") - tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female") - vc_submit = gr.Button("Generate", variant="primary") - with gr.Column(): - vc_output1 = gr.Textbox(label="Output Message") - vc_output2 = gr.Audio(label="Output Audio") - vc_submit.click(vc_fn, [vc_input, vc_transpose, vc_f0method, vc_index_ratio, tts_mode, tts_text, tts_voice], [vc_output1, vc_output2]) - tts_mode.change(change_to_tts_mode, [tts_mode], [vc_input, tts_text, tts_voice]) - app.queue(concurrency_count=1, max_size=20, api_open=args.api).launch(share=args.share) \ No newline at end of file diff --git a/spaces/eswat/Image-and-3D-Model-Creator/PIFu/scripts/test.sh b/spaces/eswat/Image-and-3D-Model-Creator/PIFu/scripts/test.sh deleted file mode 100644 index a7a3d7ec6d2a3572bbb699f935aefd8c575e768e..0000000000000000000000000000000000000000 --- a/spaces/eswat/Image-and-3D-Model-Creator/PIFu/scripts/test.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash -set -ex - -# Training -GPU_ID=0 -DISPLAY_ID=$((GPU_ID*10+10)) -NAME='spaces_demo' - -# Network configuration - -BATCH_SIZE=1 -MLP_DIM='257 1024 512 256 128 1' -MLP_DIM_COLOR='513 1024 512 256 128 3' - -# Reconstruction resolution -# NOTE: one can change here to reconstruct mesh in a different resolution. -# VOL_RES=256 - -# CHECKPOINTS_NETG_PATH='./checkpoints/net_G' -# CHECKPOINTS_NETC_PATH='./checkpoints/net_C' - -# TEST_FOLDER_PATH='./sample_images' - -# command -CUDA_VISIBLE_DEVICES=${GPU_ID} python ./apps/eval_spaces.py \ - --name ${NAME} \ - --batch_size ${BATCH_SIZE} \ - --mlp_dim ${MLP_DIM} \ - --mlp_dim_color ${MLP_DIM_COLOR} \ - --num_stack 4 \ - --num_hourglass 2 \ - --resolution ${VOL_RES} \ - --hg_down 'ave_pool' \ - --norm 'group' \ - --norm_color 'group' \ - --load_netG_checkpoint_path ${CHECKPOINTS_NETG_PATH} \ - --load_netC_checkpoint_path ${CHECKPOINTS_NETC_PATH} \ - --results_path ${RESULTS_PATH} \ - --img_path ${INPUT_IMAGE_PATH} \ No newline at end of file diff --git a/spaces/ethzanalytics/dialog-China/file_test.py b/spaces/ethzanalytics/dialog-China/file_test.py deleted file mode 100644 index e3b7589fb02320240bb42ea7f959b9d729fb8ff6..0000000000000000000000000000000000000000 --- a/spaces/ethzanalytics/dialog-China/file_test.py +++ /dev/null @@ -1,3 +0,0 @@ -import os - -print(os.path.exists("/Users/jonathan/ai-msgbot/gpt2_dailydialogue_355M_150Ksteps/pytorch_model.bin")) diff --git a/spaces/facebook/MusicGen/audiocraft/solvers/musicgen.py b/spaces/facebook/MusicGen/audiocraft/solvers/musicgen.py deleted file mode 100644 index 2439da33e5e3cf78a526fcc1a5d630a349e735ed..0000000000000000000000000000000000000000 --- a/spaces/facebook/MusicGen/audiocraft/solvers/musicgen.py +++ /dev/null @@ -1,705 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from pathlib import Path -import time -import typing as tp -import warnings - -import flashy -import math -import omegaconf -import torch -from torch.nn import functional as F - -from . import base, builders -from .compression import CompressionSolver -from .. import metrics as eval_metrics -from .. import models -from ..data.audio_dataset import AudioDataset -from ..data.music_dataset import MusicDataset, MusicInfo, AudioInfo -from ..data.audio_utils import normalize_audio -from ..modules.conditioners import JointEmbedCondition, SegmentWithAttributes, WavCondition -from ..utils.cache import CachedBatchWriter, CachedBatchLoader -from ..utils.samples.manager import SampleManager -from ..utils.utils import get_dataset_from_loader, is_jsonable, warn_once - - -class MusicGenSolver(base.StandardSolver): - """Solver for MusicGen training task. - - Used in: https://arxiv.org/abs/2306.05284 - """ - DATASET_TYPE: builders.DatasetType = builders.DatasetType.MUSIC - - def __init__(self, cfg: omegaconf.DictConfig): - super().__init__(cfg) - # easier access to sampling parameters - self.generation_params = { - 'use_sampling': self.cfg.generate.lm.use_sampling, - 'temp': self.cfg.generate.lm.temp, - 'top_k': self.cfg.generate.lm.top_k, - 'top_p': self.cfg.generate.lm.top_p, - } - self._best_metric_name: tp.Optional[str] = 'ce' - - self._cached_batch_writer = None - self._cached_batch_loader = None - if cfg.cache.path: - if cfg.cache.write: - self._cached_batch_writer = CachedBatchWriter(Path(cfg.cache.path)) - if self.cfg.cache.write_num_shards: - self.logger.warning("Multiple shard cache, best_metric_name will be set to None.") - self._best_metric_name = None - else: - self._cached_batch_loader = CachedBatchLoader( - Path(cfg.cache.path), cfg.dataset.batch_size, cfg.dataset.num_workers, - min_length=self.cfg.optim.updates_per_epoch or 1) - self.dataloaders['original_train'] = self.dataloaders['train'] - self.dataloaders['train'] = self._cached_batch_loader # type: ignore - - @staticmethod - def get_eval_solver_from_sig(sig: str, dtype: tp.Optional[str] = None, - device: tp.Optional[str] = None, autocast: bool = True, - batch_size: tp.Optional[int] = None, - override_cfg: tp.Optional[tp.Union[dict, omegaconf.DictConfig]] = None, - **kwargs): - """Mostly a convenience function around magma.train.get_solver_from_sig, - populating all the proper param, deactivating EMA, FSDP, loading the best state, - basically all you need to get a solver ready to "play" with in single GPU mode - and with minimal memory overhead. - - Args: - sig (str): signature to load. - dtype (str or None): potential dtype, as a string, i.e. 'float16'. - device (str or None): potential device, as a string, i.e. 'cuda'. - override_cfg (dict or omegaconf.DictConfig or None): potential device, as a string, i.e. 'cuda'. - """ - from audiocraft import train - our_override_cfg: tp.Dict[str, tp.Any] = {'optim': {'ema': {'use': False}}} - our_override_cfg['autocast'] = autocast - if dtype is not None: - our_override_cfg['dtype'] = dtype - if device is not None: - our_override_cfg['device'] = device - if batch_size is not None: - our_override_cfg['dataset'] = {'batch_size': batch_size} - if override_cfg is None: - override_cfg = {} - override_cfg = omegaconf.OmegaConf.merge( - omegaconf.DictConfig(override_cfg), omegaconf.DictConfig(our_override_cfg)) # type: ignore - solver = train.get_solver_from_sig( - sig, override_cfg=override_cfg, - load_best=True, disable_fsdp=True, - ignore_state_keys=['optimizer', 'ema'], **kwargs) - solver.model.eval() - return solver - - def get_formatter(self, stage_name: str) -> flashy.Formatter: - return flashy.Formatter({ - 'lr': '.2E', - 'ce': '.3f', - 'ppl': '.3f', - 'grad_norm': '.3E', - }, exclude_keys=['ce_q*', 'ppl_q*']) - - @property - def best_metric_name(self) -> tp.Optional[str]: - return self._best_metric_name - - def build_model(self) -> None: - """Instantiate models and optimizer.""" - # we can potentially not use all quantizers with which the EnCodec model was trained - # (e.g. we trained the model with quantizers dropout) - self.compression_model = CompressionSolver.wrapped_model_from_checkpoint( - self.cfg, self.cfg.compression_model_checkpoint, device=self.device) - assert self.compression_model.sample_rate == self.cfg.sample_rate, ( - f"Compression model sample rate is {self.compression_model.sample_rate} but " - f"Solver sample rate is {self.cfg.sample_rate}." - ) - # ensure we have matching configuration between LM and compression model - assert self.cfg.transformer_lm.card == self.compression_model.cardinality, ( - "Cardinalities of the LM and compression model don't match: ", - f"LM cardinality is {self.cfg.transformer_lm.card} vs ", - f"compression model cardinality is {self.compression_model.cardinality}" - ) - assert self.cfg.transformer_lm.n_q == self.compression_model.num_codebooks, ( - "Numbers of codebooks of the LM and compression models don't match: ", - f"LM number of codebooks is {self.cfg.transformer_lm.n_q} vs ", - f"compression model numer of codebooks is {self.compression_model.num_codebooks}" - ) - self.logger.info("Compression model has %d codebooks with %d cardinality, and a framerate of %d", - self.compression_model.num_codebooks, self.compression_model.cardinality, - self.compression_model.frame_rate) - # instantiate LM model - self.model: models.LMModel = models.builders.get_lm_model(self.cfg).to(self.device) - if self.cfg.fsdp.use: - assert not self.cfg.autocast, "Cannot use autocast with fsdp" - self.model = self.wrap_with_fsdp(self.model) - self.register_ema('model') - # initialize optimization - self.optimizer = builders.get_optimizer(builders.get_optim_parameter_groups(self.model), self.cfg.optim) - self.lr_scheduler = builders.get_lr_scheduler(self.optimizer, self.cfg.schedule, self.total_updates) - self.register_stateful('compression_model', 'model', 'optimizer', 'lr_scheduler') - self.register_best_state('model') - self.autocast_dtype = { - 'float16': torch.float16, 'bfloat16': torch.bfloat16 - }[self.cfg.autocast_dtype] - self.scaler: tp.Optional[torch.cuda.amp.GradScaler] = None - if self.cfg.fsdp.use: - need_scaler = self.cfg.fsdp.param_dtype == 'float16' - else: - need_scaler = self.cfg.autocast and self.autocast_dtype is torch.float16 - if need_scaler: - if self.cfg.fsdp.use: - from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler - self.scaler = ShardedGradScaler() # type: ignore - else: - self.scaler = torch.cuda.amp.GradScaler() - self.register_stateful('scaler') - - def build_dataloaders(self) -> None: - """Instantiate audio dataloaders for each stage.""" - self.dataloaders = builders.get_audio_datasets(self.cfg, dataset_type=self.DATASET_TYPE) - - def show(self) -> None: - """Show the compression model and LM model.""" - self.logger.info("Compression model:") - self.log_model_summary(self.compression_model) - self.logger.info("LM model:") - self.log_model_summary(self.model) - - def load_state_dict(self, state: dict) -> None: - if 'condition_provider' in state: - model_state = state['model'] - condition_provider_state = state.pop('condition_provider') - prefix = 'condition_provider.' - for key, value in condition_provider_state.items(): - key = prefix + key - assert key not in model_state - model_state[key] = value - super().load_state_dict(state) - - def load_from_pretrained(self, name: str): - # TODO: support native HF versions of MusicGen. - lm_pkg = models.loaders.load_lm_model_ckpt(name) - state: dict = { - 'best_state': { - 'model': lm_pkg['best_state'], - }, - } - return state - - def _compute_cross_entropy( - self, logits: torch.Tensor, targets: torch.Tensor, mask: torch.Tensor - ) -> tp.Tuple[torch.Tensor, tp.List[torch.Tensor]]: - """Compute cross entropy between multi-codebook targets and model's logits. - The cross entropy is computed per codebook to provide codebook-level cross entropy. - Valid timesteps for each of the codebook are pulled from the mask, where invalid - timesteps are set to 0. - - Args: - logits (torch.Tensor): Model's logits of shape [B, K, T, card]. - targets (torch.Tensor): Target codes, of shape [B, K, T]. - mask (torch.Tensor): Mask for valid target codes, of shape [B, K, T]. - Returns: - ce (torch.Tensor): Cross entropy averaged over the codebooks - ce_per_codebook (list of torch.Tensor): Cross entropy per codebook (detached). - """ - B, K, T = targets.shape - assert logits.shape[:-1] == targets.shape - assert mask.shape == targets.shape - ce = torch.zeros([], device=targets.device) - ce_per_codebook: tp.List[torch.Tensor] = [] - for k in range(K): - logits_k = logits[:, k, ...].contiguous().view(-1, logits.size(-1)) # [B x T, card] - targets_k = targets[:, k, ...].contiguous().view(-1) # [B x T] - mask_k = mask[:, k, ...].contiguous().view(-1) # [B x T] - ce_targets = targets_k[mask_k] - ce_logits = logits_k[mask_k] - q_ce = F.cross_entropy(ce_logits, ce_targets) - ce += q_ce - ce_per_codebook.append(q_ce.detach()) - # average cross entropy across codebooks - ce = ce / K - return ce, ce_per_codebook - - def _prepare_tokens_and_attributes( - self, batch: tp.Tuple[torch.Tensor, tp.List[SegmentWithAttributes]], - check_synchronization_points: bool = False - ) -> tp.Tuple[dict, torch.Tensor, torch.Tensor]: - """Prepare input batchs for language model training. - - Args: - batch (tuple[torch.Tensor, list[SegmentWithAttributes]]): Input batch with audio tensor of shape [B, C, T] - and corresponding metadata as SegmentWithAttributes (with B items). - check_synchronization_points (bool): Whether to check for synchronization points slowing down training. - Returns: - Condition tensors (dict[str, any]): Preprocessed condition attributes. - Tokens (torch.Tensor): Audio tokens from compression model, of shape [B, K, T_s], - with B the batch size, K the number of codebooks, T_s the token timesteps. - Padding mask (torch.Tensor): Mask with valid positions in the tokens tensor, of shape [B, K, T_s]. - """ - if self.model.training: - warnings.warn( - "Up to version 1.0.1, the _prepare_tokens_and_attributes was evaluated with `torch.no_grad()`. " - "This is inconsistent with how model were trained in the MusicGen paper. We removed the " - "`torch.no_grad()` in version 1.1.0. Small changes to the final performance are expected. " - "Really sorry about that.") - if self._cached_batch_loader is None or self.current_stage != "train": - audio, infos = batch - audio = audio.to(self.device) - audio_tokens = None - assert audio.size(0) == len(infos), ( - f"Mismatch between number of items in audio batch ({audio.size(0)})", - f" and in metadata ({len(infos)})" - ) - else: - audio = None - # In that case the batch will be a tuple coming from the _cached_batch_writer bit below. - infos, = batch # type: ignore - assert all([isinstance(info, AudioInfo) for info in infos]) - assert all([info.audio_tokens is not None for info in infos]) # type: ignore - audio_tokens = torch.stack([info.audio_tokens for info in infos]).to(self.device) # type: ignore - audio_tokens = audio_tokens.long() - for info in infos: - if isinstance(info, MusicInfo): - # Careful here, if you want to use this condition_wav (e.b. chroma conditioning), - # then you must be using the chroma cache! otherwise the code will try - # to use this segment and fail (by that I mean you will see NaN everywhere). - info.self_wav = WavCondition( - torch.full([1, info.channels, info.total_frames], float('NaN')), - length=torch.tensor([info.n_frames]), - sample_rate=[info.sample_rate], - path=[info.meta.path], - seek_time=[info.seek_time]) - dataset = get_dataset_from_loader(self.dataloaders['original_train']) - assert isinstance(dataset, MusicDataset), type(dataset) - if dataset.paraphraser is not None and info.description is not None: - # Hackingly reapplying paraphraser when using cache. - info.description = dataset.paraphraser.sample_paraphrase( - info.meta.path, info.description) - # prepare attributes - attributes = [info.to_condition_attributes() for info in infos] - attributes = self.model.cfg_dropout(attributes) - attributes = self.model.att_dropout(attributes) - tokenized = self.model.condition_provider.tokenize(attributes) - - # Now we should be synchronization free. - if self.device == "cuda" and check_synchronization_points: - torch.cuda.set_sync_debug_mode("warn") - - if audio_tokens is None: - with torch.no_grad(): - audio_tokens, scale = self.compression_model.encode(audio) - assert scale is None, "Scaled compression model not supported with LM." - - with self.autocast: - condition_tensors = self.model.condition_provider(tokenized) - - # create a padding mask to hold valid vs invalid positions - padding_mask = torch.ones_like(audio_tokens, dtype=torch.bool, device=audio_tokens.device) - # replace encodec tokens from padded audio with special_token_id - if self.cfg.tokens.padding_with_special_token: - audio_tokens = audio_tokens.clone() - padding_mask = padding_mask.clone() - token_sample_rate = self.compression_model.frame_rate - B, K, T_s = audio_tokens.shape - for i in range(B): - n_samples = infos[i].n_frames - audio_sample_rate = infos[i].sample_rate - # take the last token generated from actual audio frames (non-padded audio) - valid_tokens = math.floor(float(n_samples) / audio_sample_rate * token_sample_rate) - audio_tokens[i, :, valid_tokens:] = self.model.special_token_id - padding_mask[i, :, valid_tokens:] = 0 - - if self.device == "cuda" and check_synchronization_points: - torch.cuda.set_sync_debug_mode("default") - - if self._cached_batch_writer is not None and self.current_stage == 'train': - assert self._cached_batch_loader is None - assert audio_tokens is not None - for info, one_audio_tokens in zip(infos, audio_tokens): - assert isinstance(info, AudioInfo) - if isinstance(info, MusicInfo): - assert not info.joint_embed, "joint_embed and cache not supported yet." - info.self_wav = None - assert one_audio_tokens.max() < 2**15, one_audio_tokens.max().item() - info.audio_tokens = one_audio_tokens.short().cpu() - self._cached_batch_writer.save(infos) - - return condition_tensors, audio_tokens, padding_mask - - def run_step(self, idx: int, batch: tp.Tuple[torch.Tensor, tp.List[SegmentWithAttributes]], metrics: dict) -> dict: - """Perform one training or valid step on a given batch.""" - check_synchronization_points = idx == 1 and self.device == 'cuda' - - condition_tensors, audio_tokens, padding_mask = self._prepare_tokens_and_attributes( - batch, check_synchronization_points) - - self.deadlock_detect.update('tokens_and_conditions') - - if check_synchronization_points: - torch.cuda.set_sync_debug_mode('warn') - - with self.autocast: - model_output = self.model.compute_predictions(audio_tokens, [], condition_tensors) # type: ignore - logits = model_output.logits - mask = padding_mask & model_output.mask - ce, ce_per_codebook = self._compute_cross_entropy(logits, audio_tokens, mask) - loss = ce - self.deadlock_detect.update('loss') - - if check_synchronization_points: - torch.cuda.set_sync_debug_mode('default') - - if self.is_training: - metrics['lr'] = self.optimizer.param_groups[0]['lr'] - if self.scaler is not None: - loss = self.scaler.scale(loss) - self.deadlock_detect.update('scale') - if self.cfg.fsdp.use: - loss.backward() - flashy.distrib.average_tensors(self.model.buffers()) - elif self.cfg.optim.eager_sync: - with flashy.distrib.eager_sync_model(self.model): - loss.backward() - else: - # this should always be slower but can be useful - # for weird use cases like multiple backwards. - loss.backward() - flashy.distrib.sync_model(self.model) - self.deadlock_detect.update('backward') - - if self.scaler is not None: - self.scaler.unscale_(self.optimizer) - if self.cfg.optim.max_norm: - if self.cfg.fsdp.use: - metrics['grad_norm'] = self.model.clip_grad_norm_(self.cfg.optim.max_norm) # type: ignore - else: - metrics['grad_norm'] = torch.nn.utils.clip_grad_norm_( - self.model.parameters(), self.cfg.optim.max_norm - ) - if self.scaler is None: - self.optimizer.step() - else: - self.scaler.step(self.optimizer) - self.scaler.update() - if self.lr_scheduler: - self.lr_scheduler.step() - self.optimizer.zero_grad() - self.deadlock_detect.update('optim') - if self.scaler is not None: - scale = self.scaler.get_scale() - metrics['grad_scale'] = scale - if not loss.isfinite().all(): - raise RuntimeError("Model probably diverged.") - - metrics['ce'] = ce - metrics['ppl'] = torch.exp(ce) - for k, ce_q in enumerate(ce_per_codebook): - metrics[f'ce_q{k + 1}'] = ce_q - metrics[f'ppl_q{k + 1}'] = torch.exp(ce_q) - - return metrics - - @torch.no_grad() - def run_generate_step(self, batch: tp.Tuple[torch.Tensor, tp.List[SegmentWithAttributes]], - gen_duration: float, prompt_duration: tp.Optional[float] = None, - remove_prompt: bool = False, - **generation_params) -> dict: - """Run generate step on a batch of optional audio tensor and corresponding attributes. - - Args: - batch (tuple[torch.Tensor, list[SegmentWithAttributes]]): - use_prompt (bool): Whether to do audio continuation generation with prompt from audio batch. - gen_duration (float): Target audio duration for the generation. - prompt_duration (float, optional): Duration for the audio prompt to use for continuation. - remove_prompt (bool, optional): Whether to remove the prompt from the generated audio. - generation_params: Additional generation parameters. - Returns: - gen_outputs (dict): Generation outputs, consisting in audio, audio tokens from both the generation - and the prompt along with additional information. - """ - bench_start = time.time() - audio, meta = batch - assert audio.size(0) == len(meta), ( - f"Mismatch between number of items in audio batch ({audio.size(0)})", - f" and in metadata ({len(meta)})" - ) - # prepare attributes - attributes = [x.to_condition_attributes() for x in meta] - # TODO: Add dropout for chroma? - - # prepare audio prompt - if prompt_duration is None: - prompt_audio = None - else: - assert prompt_duration < gen_duration, "Prompt duration must be lower than target generation duration" - prompt_audio_frames = int(prompt_duration * self.compression_model.sample_rate) - prompt_audio = audio[..., :prompt_audio_frames] - - # get audio tokens from compression model - if prompt_audio is None or prompt_audio.nelement() == 0: - num_samples = len(attributes) - prompt_tokens = None - else: - num_samples = None - prompt_audio = prompt_audio.to(self.device) - prompt_tokens, scale = self.compression_model.encode(prompt_audio) - assert scale is None, "Compression model in MusicGen should not require rescaling." - - # generate by sampling from the LM - with self.autocast: - total_gen_len = math.ceil(gen_duration * self.compression_model.frame_rate) - gen_tokens = self.model.generate( - prompt_tokens, attributes, max_gen_len=total_gen_len, - num_samples=num_samples, **self.generation_params) - - # generate audio from tokens - assert gen_tokens.dim() == 3 - gen_audio = self.compression_model.decode(gen_tokens, None) - - bench_end = time.time() - gen_outputs = { - 'rtf': (bench_end - bench_start) / gen_duration, - 'ref_audio': audio, - 'gen_audio': gen_audio, - 'gen_tokens': gen_tokens, - 'prompt_audio': prompt_audio, - 'prompt_tokens': prompt_tokens, - } - return gen_outputs - - def generate_audio(self) -> dict: - """Audio generation stage.""" - generate_stage_name = f'{self.current_stage}' - sample_manager = SampleManager(self.xp) - self.logger.info(f"Generating samples in {sample_manager.base_folder}") - loader = self.dataloaders['generate'] - updates = len(loader) - lp = self.log_progress(generate_stage_name, loader, total=updates, updates=self.log_updates) - - dataset = get_dataset_from_loader(loader) - dataset_duration = dataset.segment_duration - assert dataset_duration is not None - assert isinstance(dataset, AudioDataset) - target_duration = self.cfg.generate.lm.gen_duration - prompt_duration = self.cfg.generate.lm.prompt_duration - if target_duration is None: - target_duration = dataset_duration - if prompt_duration is None: - prompt_duration = dataset_duration / 4 - assert prompt_duration < dataset_duration, ( - f"Specified prompt duration ({prompt_duration}s) is longer", - f" than reference audio duration ({dataset_duration}s)" - ) - - def get_hydrated_conditions(meta: tp.List[SegmentWithAttributes]): - hydrated_conditions = [] - for sample in [x.to_condition_attributes() for x in meta]: - cond_dict = {} - for cond_type in sample.__annotations__.keys(): - for cond_key, cond_val in getattr(sample, cond_type).items(): - if cond_key not in self.model.condition_provider.conditioners.keys(): - continue - if is_jsonable(cond_val): - cond_dict[cond_key] = cond_val - elif isinstance(cond_val, WavCondition): - cond_dict[cond_key] = cond_val.path - elif isinstance(cond_val, JointEmbedCondition): - cond_dict[cond_key] = cond_val.text # only support text at inference for now - else: - # if we reached this point, it is not clear how to log the condition - # so we just log the type. - cond_dict[cond_key] = str(type(cond_val)) - continue - hydrated_conditions.append(cond_dict) - return hydrated_conditions - - metrics: dict = {} - average = flashy.averager() - for batch in lp: - audio, meta = batch - # metadata for sample manager - hydrated_conditions = get_hydrated_conditions(meta) - sample_generation_params = { - **{f'classifier_free_guidance_{k}': v for k, v in self.cfg.classifier_free_guidance.items()}, - **self.generation_params - } - if self.cfg.generate.lm.unprompted_samples: - if self.cfg.generate.lm.gen_gt_samples: - # get the ground truth instead of generation - self.logger.warn( - "Use ground truth instead of audio generation as generate.lm.gen_gt_samples=true") - gen_unprompted_audio = audio - rtf = 1. - else: - gen_unprompted_outputs = self.run_generate_step( - batch, gen_duration=target_duration, prompt_duration=None, - **self.generation_params) - gen_unprompted_audio = gen_unprompted_outputs['gen_audio'].cpu() - rtf = gen_unprompted_outputs['rtf'] - sample_manager.add_samples( - gen_unprompted_audio, self.epoch, hydrated_conditions, - ground_truth_wavs=audio, generation_args=sample_generation_params) - - if self.cfg.generate.lm.prompted_samples: - gen_outputs = self.run_generate_step( - batch, gen_duration=target_duration, prompt_duration=prompt_duration, - **self.generation_params) - gen_audio = gen_outputs['gen_audio'].cpu() - prompt_audio = gen_outputs['prompt_audio'].cpu() - sample_manager.add_samples( - gen_audio, self.epoch, hydrated_conditions, - prompt_wavs=prompt_audio, ground_truth_wavs=audio, - generation_args=sample_generation_params) - - metrics['rtf'] = rtf - metrics = average(metrics) - - flashy.distrib.barrier() - return metrics - - def generate(self) -> dict: - """Generate stage.""" - self.model.eval() - with torch.no_grad(): - return self.generate_audio() - - def run_epoch(self): - if self.cfg.cache.write: - if ((self.epoch - 1) % self.cfg.cache.write_num_shards) != self.cfg.cache.write_shard: - return - super().run_epoch() - - def train(self): - """Train stage. - """ - if self._cached_batch_writer is not None: - self._cached_batch_writer.start_epoch(self.epoch) - if self._cached_batch_loader is None: - dataset = get_dataset_from_loader(self.dataloaders['train']) - assert isinstance(dataset, AudioDataset) - dataset.current_epoch = self.epoch - else: - self._cached_batch_loader.start_epoch(self.epoch) - return super().train() - - def evaluate_audio_generation(self) -> dict: - """Evaluate audio generation with off-the-shelf metrics.""" - evaluate_stage_name = f'{self.current_stage}_generation' - # instantiate evaluation metrics, if at least one metric is defined, run audio generation evaluation - fad: tp.Optional[eval_metrics.FrechetAudioDistanceMetric] = None - kldiv: tp.Optional[eval_metrics.KLDivergenceMetric] = None - text_consistency: tp.Optional[eval_metrics.TextConsistencyMetric] = None - chroma_cosine: tp.Optional[eval_metrics.ChromaCosineSimilarityMetric] = None - should_run_eval = False - eval_chroma_wavs: tp.Optional[torch.Tensor] = None - if self.cfg.evaluate.metrics.fad: - fad = builders.get_fad(self.cfg.metrics.fad).to(self.device) - should_run_eval = True - if self.cfg.evaluate.metrics.kld: - kldiv = builders.get_kldiv(self.cfg.metrics.kld).to(self.device) - should_run_eval = True - if self.cfg.evaluate.metrics.text_consistency: - text_consistency = builders.get_text_consistency(self.cfg.metrics.text_consistency).to(self.device) - should_run_eval = True - if self.cfg.evaluate.metrics.chroma_cosine: - chroma_cosine = builders.get_chroma_cosine_similarity(self.cfg.metrics.chroma_cosine).to(self.device) - # if we have predefind wavs for chroma we should purge them for computing the cosine metric - has_predefined_eval_chromas = 'self_wav' in self.model.condition_provider.conditioners and \ - self.model.condition_provider.conditioners['self_wav'].has_eval_wavs() - if has_predefined_eval_chromas: - warn_once(self.logger, "Attempting to run cosine eval for config with pre-defined eval chromas! " - 'Resetting eval chromas to None for evaluation.') - eval_chroma_wavs = self.model.condition_provider.conditioners.self_wav.eval_wavs # type: ignore - self.model.condition_provider.conditioners.self_wav.reset_eval_wavs(None) # type: ignore - should_run_eval = True - - def get_compressed_audio(audio: torch.Tensor) -> torch.Tensor: - audio_tokens, scale = self.compression_model.encode(audio.to(self.device)) - compressed_audio = self.compression_model.decode(audio_tokens, scale) - return compressed_audio[..., :audio.shape[-1]] - - metrics: dict = {} - if should_run_eval: - loader = self.dataloaders['evaluate'] - updates = len(loader) - lp = self.log_progress(f'{evaluate_stage_name} inference', loader, total=updates, updates=self.log_updates) - average = flashy.averager() - dataset = get_dataset_from_loader(loader) - assert isinstance(dataset, AudioDataset) - self.logger.info(f"Computing evaluation metrics on {len(dataset)} samples") - - for idx, batch in enumerate(lp): - audio, meta = batch - assert all([self.cfg.sample_rate == m.sample_rate for m in meta]) - - target_duration = audio.shape[-1] / self.cfg.sample_rate - if self.cfg.evaluate.fixed_generation_duration: - target_duration = self.cfg.evaluate.fixed_generation_duration - - gen_outputs = self.run_generate_step( - batch, gen_duration=target_duration, - **self.generation_params - ) - y_pred = gen_outputs['gen_audio'].detach() - y_pred = y_pred[..., :audio.shape[-1]] - - normalize_kwargs = dict(self.cfg.generate.audio) - normalize_kwargs.pop('format', None) - y_pred = torch.stack([normalize_audio(w, **normalize_kwargs) for w in y_pred], dim=0).cpu() - y = audio.cpu() # should already be on CPU but just in case - sizes = torch.tensor([m.n_frames for m in meta]) # actual sizes without padding - sample_rates = torch.tensor([m.sample_rate for m in meta]) # sample rates for audio samples - audio_stems = [Path(m.meta.path).stem + f"_{m.seek_time}" for m in meta] - - if fad is not None: - if self.cfg.metrics.fad.use_gt: - y_pred = get_compressed_audio(y).cpu() - fad.update(y_pred, y, sizes, sample_rates, audio_stems) - if kldiv is not None: - if self.cfg.metrics.kld.use_gt: - y_pred = get_compressed_audio(y).cpu() - kldiv.update(y_pred, y, sizes, sample_rates) - if text_consistency is not None: - texts = [m.description for m in meta] - if self.cfg.metrics.text_consistency.use_gt: - y_pred = y - text_consistency.update(y_pred, texts, sizes, sample_rates) - if chroma_cosine is not None: - if self.cfg.metrics.chroma_cosine.use_gt: - y_pred = get_compressed_audio(y).cpu() - chroma_cosine.update(y_pred, y, sizes, sample_rates) - # restore chroma conditioner's eval chroma wavs - if eval_chroma_wavs is not None: - self.model.condition_provider.conditioners['self_wav'].reset_eval_wavs(eval_chroma_wavs) - - flashy.distrib.barrier() - if fad is not None: - metrics['fad'] = fad.compute() - if kldiv is not None: - kld_metrics = kldiv.compute() - metrics.update(kld_metrics) - if text_consistency is not None: - metrics['text_consistency'] = text_consistency.compute() - if chroma_cosine is not None: - metrics['chroma_cosine'] = chroma_cosine.compute() - metrics = average(metrics) - metrics = flashy.distrib.average_metrics(metrics, len(loader)) - - return metrics - - def evaluate(self) -> dict: - """Evaluate stage.""" - self.model.eval() - with torch.no_grad(): - metrics: dict = {} - if self.cfg.evaluate.metrics.base: - metrics.update(self.common_train_valid('evaluate')) - gen_metrics = self.evaluate_audio_generation() - return {**metrics, **gen_metrics} diff --git a/spaces/fakezeta/pdfchat/LICENSE.md b/spaces/fakezeta/pdfchat/LICENSE.md deleted file mode 100644 index cb564dabeb29dd072fb252024a34d129c0fa3c44..0000000000000000000000000000000000000000 --- a/spaces/fakezeta/pdfchat/LICENSE.md +++ /dev/null @@ -1,163 +0,0 @@ -GNU Lesser General Public License -================================= - -_Version 3, 29 June 2007_ -_Copyright © 2007 Free Software Foundation, Inc. <>_ - -Everyone is permitted to copy and distribute verbatim copies -of this license document, but changing it is not allowed. - - -This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - -### 0. Additional Definitions - -As used herein, “this License” refers to version 3 of the GNU Lesser -General Public License, and the “GNU GPL” refers to version 3 of the GNU -General Public License. - -“The Library” refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - -An “Application” is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - -A “Combined Work” is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the “Linked -Version”. - -The “Minimal Corresponding Source” for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - -The “Corresponding Application Code” for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - -### 1. Exception to Section 3 of the GNU GPL - -You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - -### 2. Conveying Modified Versions - -If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - -* **a)** under this License, provided that you make a good faith effort to -ensure that, in the event an Application does not supply the -function or data, the facility still operates, and performs -whatever part of its purpose remains meaningful, or - -* **b)** under the GNU GPL, with none of the additional permissions of -this License applicable to that copy. - -### 3. Object Code Incorporating Material from Library Header Files - -The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - -* **a)** Give prominent notice with each copy of the object code that the -Library is used in it and that the Library and its use are -covered by this License. -* **b)** Accompany the object code with a copy of the GNU GPL and this license -document. - -### 4. Combined Works - -You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - -* **a)** Give prominent notice with each copy of the Combined Work that -the Library is used in it and that the Library and its use are -covered by this License. - -* **b)** Accompany the Combined Work with a copy of the GNU GPL and this license -document. - -* **c)** For a Combined Work that displays copyright notices during -execution, include the copyright notice for the Library among -these notices, as well as a reference directing the user to the -copies of the GNU GPL and this license document. - -* **d)** Do one of the following: - - **0)** Convey the Minimal Corresponding Source under the terms of this -License, and the Corresponding Application Code in a form -suitable for, and under terms that permit, the user to -recombine or relink the Application with a modified version of -the Linked Version to produce a modified Combined Work, in the -manner specified by section 6 of the GNU GPL for conveying -Corresponding Source. - - **1)** Use a suitable shared library mechanism for linking with the -Library. A suitable mechanism is one that **(a)** uses at run time -a copy of the Library already present on the user's computer -system, and **(b)** will operate properly with a modified version -of the Library that is interface-compatible with the Linked -Version. - -* **e)** Provide Installation Information, but only if you would otherwise -be required to provide such information under section 6 of the -GNU GPL, and only to the extent that such information is -necessary to install and execute a modified version of the -Combined Work produced by recombining or relinking the -Application with a modified version of the Linked Version. (If -you use option **4d0**, the Installation Information must accompany -the Minimal Corresponding Source and Corresponding Application -Code. If you use option **4d1**, you must provide the Installation -Information in the manner specified by section 6 of the GNU GPL -for conveying Corresponding Source.) - -### 5. Combined Libraries - -You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - -* **a)** Accompany the combined library with a copy of the same work based -on the Library, uncombined with any other library facilities, -conveyed under the terms of this License. -* **b)** Give prominent notice with the combined library that part of it -is a work based on the Library, and explaining where to find the -accompanying uncombined form of the same work. - -### 6. Revised Versions of the GNU Lesser General Public License - -The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License “or any later version” -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - -If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/spaces/falterWliame/Face_Mask_Detection/Download 720p Dharam Sankat Mein Movies In Hindi !!BETTER!!.md b/spaces/falterWliame/Face_Mask_Detection/Download 720p Dharam Sankat Mein Movies In Hindi !!BETTER!!.md deleted file mode 100644 index 63b56a76c2c5106758141cca00a9d4c149426eb1..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Download 720p Dharam Sankat Mein Movies In Hindi !!BETTER!!.md +++ /dev/null @@ -1,22 +0,0 @@ -

            download 720p Dharam Sankat Mein movies in hindi


            DOWNLOAD ★★★ https://urlca.com/2uDbMa



            -
            -gudeti hai desh mein movies 2 zamana hai - -Screen-Shot-of-movie.jpg Screen Shot of Movie 2019 From Movie download Screen Shot of movie 2019. India's leading Bollywood production company, UTV, got itself involved in an online war with the makers of Akshay Kumar's No. 1 movie Slumdog Millionaire. The online battle between Bollywood and Oscar-winning movie Slumdog Millionaire took an ugly turn. The movie is the biggest-grossing movie of 2008, having made Rs 88 crore worldwide. It has also won seven Oscars including best actress for Juhi Chawla, best director for Danny Boyle and best film. - -The movie was based on the true story of a poor kid who becomes a millionaire. The film was released in India on December 14, 2008. - -"It's unfortunate that some other Bollywood filmmakers feel it necessary to express their dislike towards the Slumdog Millionaire movie by suggesting similar names for the movie (ex. Dhool Katha)," said UTV Worldwide's country head Harish Uppal. He said he would also wait for the Prime Minister and the President to speak about the matter. "Let's hope this is a one-off error on the part of the makers. Otherwise, the MPAA India needs to get back on their feet and start upholding intellectual property rights as it is a key component of our economy," Uppal added. - -Director Danny Boyle, however, demanded an apology. "I think the 'bogus' (sic) attack on the Bollywood industry by the MPAA and FIPRESCI is disgraceful. - -Our industry has developed a great reputation over the past decade and I hope this controversy will not tarnish it. I would like to request the board to issue an apology and meet with us to discuss this serious problem in the future. I also request the Fipresci members to speak to the president of the MPAA to get it rectified."Q: - -How to change css variables in parent when css variables are set in child component - -I have a web page with an image which looks like this : - -I have a parent component like this 4fefd39f24
            -
            -
            -

            diff --git a/spaces/fatiXbelha/sd/4D Wallpaper 2023 APK The Best App for 4K HD 3D Wallpapers and Backgrounds.md b/spaces/fatiXbelha/sd/4D Wallpaper 2023 APK The Best App for 4K HD 3D Wallpapers and Backgrounds.md deleted file mode 100644 index 95c941157a41f47fea6ff6282b8ad07c804dd8f9..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/4D Wallpaper 2023 APK The Best App for 4K HD 3D Wallpapers and Backgrounds.md +++ /dev/null @@ -1,117 +0,0 @@ -
            -

            4D Wallpaper 2023 APK: A New Way to Customize Your Phone

            -

            If you are bored with the same old wallpapers on your phone, you might want to try something new and exciting. How about a wallpaper that moves and changes according to your device's motion? How about a wallpaper that gives you a realistic and immersive view of your favorite images? How about a wallpaper that makes your phone stand out from the crowd?

            -

            4d wallpaper 2023 apk


            Download Ziphttps://urllie.com/2uNEdp



            -

            If you are interested in any of these, then you should check out 4D Wallpaper 2023 APK, a new app that offers you a collection of stunning and dynamic wallpapers that use the phone's gyroscope or accelerometer to create a 4D effect. In this article, we will tell you everything you need to know about this app, including what it is, how to download and install it, how to use it, and what are its benefits.

            -

            What is 4D Wallpaper?

            -

            Before we dive into the details of the app, let's first understand what 4D wallpaper is and how it differs from other types of wallpapers.

            -

            Definition and features of 4D wallpaper

            -

            According to Wallpapers.com, 4D wallpapers are dynamic wallpapers that use the phone's gyroscope or accelerometer to make static images appear alive. They create a dimensionally enhanced view of the subject and background, making it look 3-dimensional.

            -

            Some of the features of 4D wallpapers are:

            -
              -
            • They have a parallax effect, which means they shift and tilt according to your device's movement.
            • -
            • They have a depth effect, which means they create a sense of distance and perspective between the foreground and background.
            • -
            • They have a texture effect, which means they add details and realism to the images.
            • -
            • They have a variety of themes and categories, such as nature, animals, abstract, art, etc.
            • -
            • They are compatible with most Android devices (such as Galaxy, Xiaomi, Vivo, OPPO, Honor, etc.)
            • -
            -

            Examples of 4D wallpaper

            -

            To give you an idea of how amazing 4D wallpapers look like, here are some examples from Wallpaper Flare:

            -

            4d live wallpaper 2023 app download
            -4d wallpaper 2023 hd free apk
            -4d wallpaper 2023 pro mod apk
            -4d wallpaper 2023 anime motion background
            -4d wallpaper 2023 premium unlocked apk
            -4d wallpaper 2023 latest version apk
            -4d wallpaper 2023 for android phone
            -4d wallpaper 2023 best collection apk
            -4d wallpaper 2023 cool and stylish
            -4d wallpaper 2023 full hd quality apk
            -4d wallpaper 2023 new update apk
            -4d wallpaper 2023 offline download apk
            -4d wallpaper 2023 realistic effect apk
            -4d wallpaper 2023 super amoled apk
            -4d wallpaper 2023 ultra hd apk
            -4d wallpaper 2023 video loop apk
            -4d wallpaper 2023 with sound apk
            -download 4d wallpaper 2023 apk for free
            -how to install 4d wallpaper 2023 apk
            -how to use 4d wallpaper 2023 apk
            -is 4d wallpaper 2023 apk safe to use
            -what is new in 4d wallpaper 2023 apk
            -where to get 4d wallpaper 2023 apk
            -why choose 4d wallpaper 2023 apk
            -wallify: 4d wallpapers - apps on google play[^1^]

            - - - - - - - - - - - -
            Black and blue cube wallpaperVaporwave cityscape wallpaperPink and white geometric artwork
            Cute illustration wallpaperPlanet earth wallpaperDiamonds wallpaper
            -

            As you can see, these wallpapers are not just static images, but dynamic and interactive ones that change according to your phone's orientation and movement. They give you a different perspective every time you look at them.

            -

            How to Download and Install 4D Wallpaper 2023 APK

            -

            Now that you know what 4D wallpaper is and how it looks like, you might be wondering how to get it on your phone. Well, it's very easy and simple. Just follow these steps:

            -

            Steps to download and install the app

            -
              -
            1. Go to APKPure.com and search for 4D Wallpaper 2023 APK.
            2. -
            3. Select the app from the list of results and click on the Download APK button.
            4. -
            5. Wait for the download to finish and then open the file.
            6. -
            7. If you see a warning message that says "For your security, your phone is not allowed to install unknown apps from this source", go to your phone's settings and enable the option to allow installation from unknown sources.
            8. -
            9. Follow the instructions on the screen and install the app.
            10. -
            11. Once the installation is complete, you can launch the app and enjoy the 4D wallpapers.
            12. -
            -

            Tips and tricks to use the app

            -

            Here are some tips and tricks to help you make the most of the app:

            -
              -
            • To apply a 4D wallpaper, simply browse through the categories or search for your favorite one, then tap on it and select Set as Wallpaper.
            • -
            • To adjust the 4D effect, you can use the slider at the bottom of the screen to change the intensity of the parallax, depth, and texture effects.
            • -
            • To save a 4D wallpaper to your device, you can tap on the Download button at the top right corner of the screen.
            • -
            • To share a 4D wallpaper with your friends, you can tap on the Share button at the top right corner of the screen and choose your preferred app or platform.
            • -
            • To rate or review a 4D wallpaper, you can tap on the Star button at the top right corner of the screen and give your feedback.
            • -
            -

            Benefits of Using 4D Wallpaper 2023 APK

            -

            You might be wondering why you should use 4D wallpaper instead of regular wallpaper. Well, there are many benefits of using 4D wallpaper, such as:

            -

            Advantages of 4D wallpaper over traditional wallpaper

            -
              -
            • They make your phone look more attractive and unique.
            • -
            • They give you a more realistic and immersive experience of your favorite images.
            • -
            • They add more fun and excitement to your phone usage.
            • -
            • They suit different moods and occasions.
            • -
            • They are easy to use and customize.
            • -
            -

            Disadvantages and limitations of 4D wallpaper

            -

            However, there are also some disadvantages and limitations of using 4D wallpaper, such as:

            -
              -
            • They may consume more battery power than regular wallpaper.
            • -
            • They may not work well on some devices or models.
            • -
            • They may not be compatible with some launchers or themes.
            • -
            • They may not have enough variety or quality for some users.
            • -
            • They may cause some lag or performance issues on some devices.
            • -
            -

            Conclusion

            -

            In conclusion, 4D Wallpaper 2023 APK is a great app that offers you a collection of stunning and dynamic wallpapers that use the phone's gyroscope or accelerometer to create a 4D effect. It is easy to download, install, and use, and it has many benefits over traditional wallpaper. However, it also has some disadvantages and limitations that you should be aware of before using it. If you are looking for a new way to customize your phone, you should give this app a try and see for yourself how amazing it is.

            -

            FAQs

            -

            Q1: Is 4D Wallpaper 2023 APK safe to use?

            -

            A1: Yes, 4D Wallpaper 2023 APK is safe to use as long as you download it from a trusted source like APKPure.com. It does not contain any viruses or malware that could harm your device or data. However, you should always be careful when installing apps from unknown sources and check their permissions and reviews before using them.

            -

            Q2: How much battery does 4D wallpaper consume?

            -

            A2: The battery consumption of 4D wallpaper depends on various factors, such as your device's model, screen size, brightness, resolution, etc. Generally speaking, 4D wallpaper consumes more battery than regular wallpaper because it uses more resources and processes to create the dynamic effect. However, you can reduce the battery consumption by lowering the intensity of the 4D effect, using dark or simple wallpapers, and turning off the wallpaper when not in use.

            -

            Q3: Can I create my own 4D wallpaper?

            -

            A3: Yes, you can create your own 4D wallpaper using the app. You can either use your own photos or images from the app's gallery, and then apply the 4D effect to them. You can also adjust the parameters of the effect, such as the parallax, depth, and texture. You can then save and apply your custom 4D wallpaper to your device.

            -

            Q4: What are the best sources for 4D wallpaper?

            -

            A4: There are many sources for 4D wallpaper, both online and offline. Some of the best sources are:

            -
              -
            • The app itself, which has a large and diverse collection of 4D wallpapers in various categories and themes.
            • -
            • Other apps or websites that offer 4D wallpapers, such as 4D Live Wallpaper, 4K Wallpapers, Zedge, etc.
            • -
            • Online communities or forums that share and discuss 4D wallpapers, such as Reddit, Quora, Pinterest, etc.
            • -
            • Your own creativity and imagination, which can help you create your own 4D wallpapers using your own photos or images.
            • -
            -

            Q5: How can I contact the developer of 4D Wallpaper 2023 APK?

            -

            A5: If you have any questions, suggestions, feedback, or issues regarding the app, you can contact the developer of 4D Wallpaper 2023 APK by emailing them at 4dwallpaper2023@gmail.com. You can also visit their website at https://www.4dwallpaper2023.com/ for more information and updates.

            197e85843d
            -
            -
            \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/AZPDD - srclk imtahanna hazrlamaq n nmr 1 tlimat.md b/spaces/fatiXbelha/sd/AZPDD - srclk imtahanna hazrlamaq n nmr 1 tlimat.md deleted file mode 100644 index a5d2403da40c7f241282e28215cba3d926f073b2..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/AZPDD - srclk imtahanna hazrlamaq n nmr 1 tlimat.md +++ /dev/null @@ -1,108 +0,0 @@ - -

            Azpdd yukle: How to download and use the best app for learning the road traffic rules in Azerbaijan

            -

            If you are planning to get a driving license in Azerbaijan, you need to pass a theoretical exam on the road traffic rules. This exam can be challenging and stressful, especially if you don't have enough time or resources to prepare for it. But don't worry, there is a solution that can make your life easier and help you ace the exam. It's called azpdd yukle, and it's a free app that you can download on your smartphone or tablet. In this article, we will tell you everything you need to know about azpdd yukle, how to download and use it, what are its main features, and what are the benefits of using it. Let's get started!

            -

            Introduction

            -

            What is azpdd yukle and why do you need it?

            -

            Azpdd yukle is an app that helps you learn the road traffic rules in Azerbaijan. It is based on the official materials and tests that are used by the State Traffic Police Department (DYP) for issuing driving licenses. The app contains more than 3000 questions and answers, covering all the categories and topics that you need to know for the exam. You can use azpdd yukle to practice your knowledge, test your skills, and improve your confidence before taking the real exam.

            -

            azpdd yukle


            Download File » https://urllie.com/2uNvYt



            -

            How to download and install azpdd yukle on your device?

            -

            Downloading and installing azpdd yukle is very easy and fast. You just need to follow these simple steps:

            -
              -
            1. Go to www.talibov.az, which is the official website of the app.
            2. -
            3. Click on the "Download" button and choose your device type (Android or iOS).
            4. -
            5. Wait for the app to be downloaded on your device.
            6. -
            7. Open the app and follow the instructions to complete the installation.
            8. -
            9. Enjoy using azpdd yukle!
            10. -
            -

            How to use azpdd yukle to prepare for the driving license exam?

            -

            Using azpdd yukle to prepare for the driving license exam is very simple and convenient. You just need to choose one of the three modes that are available in the app: test mode, learning mode, or statistics mode. Here is how each mode works:

            -

            azpdd yukle pulsuz
            -azpdd yukle komputer ucun
            -azpdd yukle android
            -azpdd yukle windows
            -azpdd yukle talibov
            -azpdd yukle 2023
            -azpdd yukle apk
            -azpdd yukle pdf
            -azpdd yukle online
            -azpdd yukle test
            -azpdd yukle proqrami
            -azpdd yukle kitab
            -azpdd yukle mobil
            -azpdd yukle ios
            -azpdd yukle mac
            -azpdd yukle red cat
            -azpdd yukle video
            -azpdd yukle derslik
            -azpdd yukle imtahan
            -azpdd yukle asan doc
            -azpdd yukle sayt
            -azpdd yukle yeni versiya
            -azpdd yukle qeydiyyat
            -azpdd yukle oyunu
            -azpdd yukle biletler
            -azpdd yukle rusca
            -azpdd yukle ingilisce
            -azpdd yukle azerbaycanca
            -azpdd yukle kateqoriya b
            -azpdd yukle kateqoriya c
            -azpdd yukle kateqoriya d
            -azpdd yukle kateqoriya e
            -azpdd yukle kateqoriya a1
            -azpdd yukle kateqoriya a2
            -azpdd yukle kateqoriya a3
            -azpdd yukle kateqoriya be
            -azpdd yukle kateqoriya ce
            -azpdd yukle kateqoriya de
            -azpdd yukle kateqoriya ee
            -azpdd yukle qaydaları öyrənən və sürücülük vəsiqəsini almağa hazırlaşan şəxslər üçün nəzərdə tutulmuşdur.

            -
              -
            • Test mode: In this mode, you can practice with different categories and topics of questions that are similar to those in the real exam. You can choose the number of questions, the level of difficulty, and the time limit. You can also skip questions, mark them for review, or end the test at any time. After completing each test, you will see your score, your correct and incorrect answers, and detailed explanations for each question.
            • -
            • Learning mode: In this mode, you can study the road traffic rules and signs in detail. You can access the full text of the road traffic rules, as well as the images and videos of the road signs and signals. You can also search for specific terms or topics, or browse through the table of contents. You can also bookmark the pages that you want to review later.
            • -
            • Statistics mode: In this mode, you can track your progress and performance in the app. You can see how many tests you have taken, how many questions you have answered, and how many correct and incorrect answers you have given. You can also see your average score, your best score, and your worst score. You can also compare your results with other users of the app, and see where you rank among them.
            • -
            -

            Main features of azpdd yukle

            -

            Azpdd yukle is not just a simple app for learning the road traffic rules. It is a comprehensive and interactive tool that offers many features and benefits for its users. Here are some of the main features of azpdd yukle that make it stand out from other similar apps:

            -

            Test mode: practice with different categories and topics of questions

            -

            One of the most important features of azpdd yukle is the test mode, which allows you to practice with different categories and topics of questions that are similar to those in the real exam. You can choose from four categories: A (motorcycles), B (cars), C (trucks), and D (buses). You can also choose from 12 topics: general rules, road signs, road markings, signals, speed limits, priority rules, overtaking, parking, stopping, turning, accidents, and penalties. You can customize your test by choosing the number of questions (from 10 to 40), the level of difficulty (from easy to hard), and the time limit (from 10 to 40 minutes). You can also skip questions, mark them for review, or end the test at any time. After completing each test, you will see your score, your correct and incorrect answers, and detailed explanations for each question. You will also see how much time you spent on each question, and how you performed compared to other users. -

            Learning mode: study the road traffic rules and signs in detail

            -

            Another important feature of azpdd yukle is the learning mode, which allows you to study the road traffic rules and signs in detail. You can access the full text of the road traffic rules, as well as the images and videos of the road signs and signals. You can also search for specific terms or topics, or browse through the table of contents. You can also bookmark the pages that you want to review later. The learning mode is designed to help you understand the logic and reasoning behind each rule and sign, and to help you memorize them easily. The learning mode is also updated regularly with the latest changes and additions to the road traffic rules. -

            Statistics mode: track your progress and performance

            -

            A third important feature of azpdd yukle is the statistics mode, which allows you to track your progress and performance in the app. You can see how many tests you have taken, how many questions you have answered, and how many correct and incorrect answers you have given. You can also see your average score, your best score, and your worst score. You can also compare your results with other users of the app, and see where you rank among them. The statistics mode is designed to help you monitor your strengths and weaknesses, and to help you improve your knowledge and skills over time. -

            Tips and tricks: get useful advice and guidance from experts

            -

            A fourth important feature of azpdd yukle is the tips and tricks section, which gives you useful advice and guidance from experts on how to prepare for the driving license exam. You can find tips on how to study effectively, how to manage your time, how to deal with stress, how to avoid common mistakes, how to answer tricky questions, and how to pass the exam with confidence. You can also find tricks on how to remember the road traffic rules and signs better, how to use mnemonics and associations, how to visualize scenarios and situations, and how to apply logic and common sense. The tips and tricks section is designed to help you overcome any difficulties or challenges that you may face while preparing for the exam. -

            Benefits of using azpdd yukle

            -

            Azpdd yukle is not just a useful app for learning the road traffic rules. It is also a beneficial app for saving time and money, learning at your own pace, improving your knowledge and skills, and staying updated and informed. Here are some of the benefits of using azpdd yukle that you should know:

            -

            Save time and money: no need to attend expensive courses or buy books

            -

            One of the main benefits of using azpdd yukle is that it saves you time and money. You don't need to attend expensive courses or buy books to prepare for the driving license exam. You can use azpdd yukle anytime and anywhere, as long as you have a device and an internet connection. You can also use azpdd yukle offline, by downloading the questions and answers to your device. You can use azpdd yukle for free, without any hidden fees or charges. You can also get access to premium features, such as unlimited tests, unlimited questions, and unlimited time, by watching ads or inviting friends. -

            Learn at your own pace: choose the level of difficulty and the number of questions

            -

            Another benefit of using azpdd yukle is that it lets you learn at your own pace. You can choose the level of difficulty and the number of questions that suit your needs and preferences. You can start with easy questions and gradually move to harder ones, or you can mix and match different levels and topics. You can also repeat the tests as many times as you want, until you feel confident and ready for the exam. You can also review the questions and answers, and learn from your mistakes. -

            Improve your knowledge and skills: get feedback and explanations for every answer

            -

            A third benefit of using azpdd yukle is that it helps you improve your knowledge and skills. You don't just get the right or wrong answer, but you also get feedback and explanations for every answer. You can understand why an answer is correct or incorrect, and what is the logic and reasoning behind it. You can also learn new facts and information that you may not have known before. You can also improve your skills in reading, understanding, analyzing, and applying the road traffic rules. -

            Stay updated and informed: get the latest changes and additions to the road traffic rules

            -

            A fourth benefit of using azpdd yukle is that it keeps you updated and informed. You don't have to worry about outdated or inaccurate information, because azpdd yukle is constantly updated with the latest changes and additions to the road traffic rules. You can get notifications about any new or revised rules, signs, or questions that are added to the app. You can also get access to news and articles about road safety and driving tips. -

            Conclusion

            -

            Azpdd yukle is a free app that helps you learn the road traffic rules in Azerbaijan. It is based on the official materials and tests that are used by the State Traffic Police Department (DYP) for issuing driving licenses. The app contains more than 3000 questions and answers, covering all the categories and topics that you need to know for the exam. You can use azpdd yukle to practice your knowledge, test your skills, and improve your confidence before taking the real exam.

            -

            Azpdd yukle also offers many features and benefits for its users, such as:

            -
              -
            • Test mode: practice with different categories and topics of questions
            • -
            • Learning mode: study the road traffic rules and signs in detail
            • -
            • Statistics mode: track your progress and performance
            • -
            • Tips and tricks: get useful advice and guidance from experts
            • -
            • Save time and money: no need to attend expensive courses or buy books
            • -
            • Learn at your own pace: choose the level of difficulty and the number of questions
            • -
            • Improve your knowledge and skills: get feedback and explanations for every answer
            • -
            • Stay updated and informed: get the latest changes and additions to the road traffic rules
            • -
            -

            If you are looking for a simple, convenient, and effective way to prepare for the driving license exam in Azerbaijan, you should definitely download and use azpdd yukle. It is the best app for learning the road traffic rules in Azerbaijan, and it will help you pass the exam with flying colors. Don't hesitate, download azpdd yukle today and start your journey to becoming a safe and responsible driver!

            -

            FAQs

            -

            Here are some of the frequently asked questions about azpdd yukle:

            -
              -
            1. Is azpdd yukle free? Yes, azpdd yukle is free to download and use. You can access all the features and benefits of the app without paying anything. However, if you want to get access to premium features, such as unlimited tests, unlimited questions, and unlimited time, you can watch ads or invite friends to unlock them.
            2. -
            3. Is azpdd yukle reliable? Yes, azpdd yukle is reliable and accurate. It is based on the official materials and tests that are used by the State Traffic Police Department (DYP) for issuing driving licenses. The app is also updated regularly with the latest changes and additions to the road traffic rules. You can trust azpdd yukle to provide you with the most relevant and up-to-date information and questions.
            4. -
            5. Is azpdd yukle easy to use? Yes, azpdd yukle is easy to use and user-friendly. The app has a simple and intuitive interface that guides you through the different modes and features. The app also has a clear and readable font, a colorful and attractive design, and a smooth and fast performance. You can use azpdd yukle without any difficulty or hassle.
            6. -
            7. Is azpdd yukle compatible with my device? Yes, azpdd yukle is compatible with most devices that run on Android or iOS operating systems. You can download and install azpdd yukle on your smartphone or tablet, as long as you have enough storage space and an internet connection. You can also use azpdd yukle offline, by downloading the questions and answers to your device.
            8. -
            9. Is azpdd yukle secure? Yes, azpdd yukle is secure and safe. The app does not collect or store any personal or sensitive information from its users. The app also does not contain any viruses, malware, or spyware that could harm your device or data. You can use azpdd yukle without any risk or worry.
            10. -

            197e85843d
            -
            -
            \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Aprenda sobre o servio de auxlio espiritual com Obreiros da Vida Eterna em PDF.md b/spaces/fatiXbelha/sd/Aprenda sobre o servio de auxlio espiritual com Obreiros da Vida Eterna em PDF.md deleted file mode 100644 index 32f93dd2ea69f867382de2c0895d813c2a428d1f..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Aprenda sobre o servio de auxlio espiritual com Obreiros da Vida Eterna em PDF.md +++ /dev/null @@ -1,131 +0,0 @@ - -

            Obreiros da Vida Eterna: A Spiritist Classic by Chico Xavier

            -

            If you are interested in Spiritism, the doctrine that teaches about the existence and communication of spirits, you may have heard of Chico Xavier, one of the most influential and respected mediums in Brazil and in the world. He wrote hundreds of books through a process called psychography, or automatic writing, in which he claimed to receive messages from spirits of different levels of evolution. One of his most remarkable works is Obreiros da Vida Eterna, or Workers of Eternal Life, which is part of a series called A Vida no Mundo Espiritual, or Life in the Spiritual World. In this article, we will tell you what this book is about, why you should read it, and how you can download it in PDF format.

            -

            What is Obreiros da Vida Eterna?

            -

            Obreiros da Vida Eterna is a book that was published in 1946 by the Brazilian Spiritist Federation. It was psychographed by Chico Xavier and dictated by the spirit André Luiz, who was a doctor when he was alive. André Luiz is also the protagonist and narrator of the book, which tells his experiences as a member of a team of spirit workers who assist souls who are about to leave their physical bodies and enter the spiritual world.

            -

            obreiros da vida eterna pdf download


            Downloadhttps://urllie.com/2uNEjI



            -

            The author and the medium

            -

            Chico Xavier was born in 1910 in Pedro Leopoldo, Minas Gerais, Brazil. He was raised in poverty and suffered from various illnesses and mistreatments. He discovered his mediumship abilities when he was a child, but he only started to use them for public service when he was 17 years old. He became a follower of Spiritism, the doctrine codified by Allan Kardec in France in the 19th century, which combines Christian teachings with scientific and philosophical principles. He dedicated his life to charity and to spreading the spiritist message through his books, which he never charged for or claimed as his own. He died in 2002 in Uberaba, Minas Gerais, Brazil.

            -

            André Luiz was a spirit who identified himself as a former doctor who lived in Rio de Janeiro in the early 20th century. He did not reveal his real name or any details about his earthly life, but he said he had been an atheist and a materialist who had wasted his talents and opportunities. After his death, he found himself in a dark and miserable region called the Umbral, where he suffered for many years until he was rescued by benevolent spirits who took him to Nosso Lar, or Our Home, a spiritual colony where he learned about Spiritism and started his process of rehabilitation and enlightenment. He became a writer and a teacher who shared his knowledge and experiences with other spirits and with incarnated humans through Chico Xavier.

            -

            The content and the message

            -

            Obreiros da Vida Eterna is divided into 20 chapters that describe different cases of disincarnation, or death, that André Luiz and his team witnessed and helped. The cases range from peaceful transitions of good people to tragic endings of evil ones, from suicides to accidents, from young children to old people. The book shows how each person's condition after death is determined by their moral conduct during life, by their level of awareness and detachment from material things, by their faith and love for God and their fellow beings. The book also reveals how spirits can influence and interfere with human affairs, for good or for evil, depending on their intentions and vibrations.

            -

            The main message of Obreiros da Vida Eterna is that death is not the end of life, but the beginning of a new stage of existence, where we continue to learn, to work, to grow, and to face the consequences of our actions. It is a message of hope and responsibility, of faith and reason, of love and justice. It is a message that invites us to reflect on the meaning and purpose of our lives, and to prepare ourselves for the inevitable transition that awaits us all.

            -

            Why should you read Obreiros da Vida Eterna?

            -

            There are many reasons why you should read Obreiros da Vida Eterna, whether you are a spiritist or not, whether you believe in life after death or not. Here are some of them:

            -

            It is a source of inspiration and comfort

            -

            Reading Obreiros da Vida Eterna can inspire you to live a better life, to cultivate virtues and values, to overcome your weaknesses and vices, to serve others and to love God. It can also comfort you in times of grief and loss, by showing you that death is not the end, but a passage to a higher plane of existence, where your loved ones are waiting for you and where you can communicate with them through prayer and mediumship.

            -

            It is a guide for spiritual evolution

            -

            Reading Obreiros da Vida Eterna can guide you in your spiritual evolution, by teaching you the laws and principles that govern the universe and the destiny of souls, by explaining the process of reincarnation and karma, by illustrating the different stages and levels of spiritual development, by offering practical advice and examples on how to improve yourself and help others.

            -

            It is a testimony of life after death

            -

            Reading Obreiros da Vida Eterna can provide you with a testimony of life after death, by presenting you with a detailed and realistic description of the spiritual world, its inhabitants, its activities, its organization, its challenges, its joys, its mysteries. It can also challenge your preconceptions and prejudices about death and the afterlife, by showing you that there is no heaven or hell as traditionally conceived, but a variety of conditions and situations that reflect the moral state of each soul.

            -

            obreiros da vida eterna livro pdf
            -baixar obreiros da vida eterna pdf
            -chico xavier obreiros da vida eterna pdf
            -obreiros da vida eterna emmanuel pdf
            -obreiros da vida eterna andré luiz pdf
            -obreiros da vida eterna pdf gratis
            -obreiros da vida eterna pdf online
            -obreiros da vida eterna pdf completo
            -obreiros da vida eterna pdf minhateca
            -obreiros da vida eterna pdf google drive
            -obreiros da vida eterna pdf docplayer
            -obreiros da vida eterna pdf ebook espirita
            -obreiros da vida eterna pdf feb
            -obreiros da vida eterna pdf slideshare
            -obreiros da vida eterna pdf scribd
            -obreiros da vida eterna pdf lelivros
            -obreiros da vida eterna pdf zibia gasparetto
            -obreiros da vida eterna pdf espiritismo brasil
            -obreiros da vida eterna pdf espirita org
            -obreiros da vida eterna pdf espirito santo
            -obreiros da vida eterna pdf espiritualidade e ciencia
            -obreiros da vida eterna pdf espiritismo net
            -obreiros da vida eterna pdf espiritismo com br
            -obreiros da vida eterna pdf espiritismo na rede
            -obreiros da vida eterna pdf espiritismo em estudo
            -obreiros da vida eterna pdf espiritismo no lar
            -obreiros da vida eterna pdf espiritismo para todos
            -obreiros da vida eterna pdf espiritismo e evangelho
            -obreiros da vida eterna pdf espiritismo e amor
            -obreiros da vida eterna pdf espiritismo e paz
            -obreiros da vida eterna resumo em pdf
            -obreiros da vida eterna sinopse em pdf
            -obreiros da vida eterna resenha em pdf
            -obreiros da vida eterna analise em pdf
            -obreiros da vida eterna comentario em pdf
            -obreiros da vida eterna mensagem em pdf
            -obreiros da vida eterna ensinamento em pdf
            -obreiros da vida eterna reflexao em pdf
            -obreiros da vida eterna lição em pdf
            -obreiros da vida eterna aprendizado em pdf
            -como baixar o livro obreiros da vida eterna em pdf
            -onde encontrar o livro obreiros da vida eterna em pdf
            -como ler o livro obreiros da vida eterna em pdf
            -como estudar o livro obreiros da vida eterna em pdf
            -como entender o livro obreiros da vida eterna em pdf
            -como aplicar o livro obreiros da vida eterna em pdf
            -como compartilhar o livro obreiros da vida eterna em pdf
            -como doar o livro obreiros da vida eterna em pdf
            -como adquirir o livro obreiros da vida eterna em pdf

            -

            How can you download Obreiros da Vida Eterna in PDF format?

            -

            If you are interested in reading Obreiros da Vida Eterna in PDF format, there are two ways you can do it: the legal and ethical way, and the easy and convenient way.

            -

            The legal and ethical way

            -

            The legal and ethical way to download Obreiros da Vida Eterna in PDF format is to buy it from an authorized online store or publisher. This way, you will be respecting the intellectual property rights of the author and the medium, as well as supporting the spiritist movement and its charitable works. You will also be getting a high-quality version of the book, with accurate translation and formatting. Some of the online stores or publishers where you can buy Obreiros da Vida Eterna in PDF format are:

            - - - - - - - - - - - - - - - - - - - - - -
            NameWebsitePrice
            Brazilian Spiritist Federationhttps://www.febnet.org.br/loja/obreiros-da-vida-eterna/R$ 10.00 (Brazilian reais)
            Spiritist Group of New Yorkhttps://www.sgny.org/product-page/workers-of-the-life-eternal-obreiros-da-vida-eterna$ 10.00 (US dollars)
            Spiritist Networkhttps://www.spiritist.com/product-page/obreiros-da-vida-eterna-workers-of-the-life-eternal-ebook-pdf$ 5.00 (US dollars)
            -

            The easy and convenient way

            -

            The easy and convenient way to download Obreiros da Vida Eterna in PDF format is to find it on a free online platform or website that offers it without any charge or registration. This way, you will be saving time and money, as well as accessing the book from any device or location. However, this way may not be legal or ethical, as it may violate the intellectual property rights of the author and the medium, as well as deprive them and the spiritist movement of their deserved income. You may also get a low-quality version of the book, with errors or omissions in translation or formatting. Some of the online platforms or websites where you can download Obreiros da Vida Eterna in PDF format for free are:

            - - - - - - - - - - - - - - - - - -
            NameWebsite
            PDF Drivehttps://www.pdfdrive.com/obreiros-da-vida-eterna-ebooks.html
            Le Livroshttps://lelivros.love/book/baixar-livro-obreiros-da-vida-eterna-chico-xavier-em-pdf-epub-mobi-ou-ler-online/
            Livros Espíritas Grátishttps://livrosespiritasgratis.com.br/obreiros-da-vida-eterna-chico-xavier-andre-luiz/
            -

            Conclusion

            -

            Obreiros da Vida Eterna is a spiritist classic that can enrich your knowledge and understanding of life, death, and the spiritual world. It can also inspire you to live a more meaningful and fulfilling life, in harmony with the divine laws and your own conscience. Whether you buy it or download it for free, we hope you enjoy reading this book and that it helps you in your spiritual journey.

            -

            FAQs

            -

            Here are some frequently asked questions about Obreiros da Vida Eterna and their answers:

            -

            What is the genre of Obreiros da Vida Eterna?

            -

            Obreiros da Vida Eterna is a book that can be classified as a spiritual novel, a reportage, or a doctrinal study. It is a fictional narrative based on real facts and teachings, with elements of drama, suspense, romance, and humor.

            -

            How many pages does Obreiros da Vida Eterna have?

            -

            The original edition of Obreiros da Vida Eterna has 368 pages. The PDF version may vary depending on the font size and layout.

            -

            Is Obreiros da Vida Eterna available in other languages?

            -

            Yes, Obreiros da Vida Eterna has been translated into several languages, such as English, Spanish, French, Italian, German, and Japanese. You can find some of these translations online or in bookstores.

            -

            Is Obreiros da Vida Eterna part of a series?

            -

            Yes, Obreiros da Vida Eterna is the third book of a series called A Vida no Mundo Espiritual, or Life in the Spiritual World, which consists of 13 books written by Chico Xavier and dictated by André Luiz. The first book is Nosso Lar, or Our Home, and the last one is Evolução em Dois Mundos, or Evolution in Two Worlds.

            -

            Is there a movie adaptation of Obreiros da Vida Eterna?

            -

            No, there is no movie adaptation of Obreiros da Vida Eterna yet. However, there is a movie adaptation of Nosso Lar, which was released in 2010 and was a box-office success in Brazil. There is also a TV series adaptation of A Vida no Mundo Espiritual, which was produced by TV Mundo Maior and aired in 2015.

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Poppy Playtime Chapter 1 for iOS and Discover the Dark Secrets of Playtime Co..md b/spaces/fatiXbelha/sd/Download Poppy Playtime Chapter 1 for iOS and Discover the Dark Secrets of Playtime Co..md deleted file mode 100644 index b459fe99107cd55c957f1585f690e97a6b0c9d1a..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Poppy Playtime Chapter 1 for iOS and Discover the Dark Secrets of Playtime Co..md +++ /dev/null @@ -1,73 +0,0 @@ -
            -

            Download Poppy Playtime Chapter 1 iOS: How to Play the Horror Game on Your Mobile Device

            -

            If you are a fan of survival horror games, you might have heard of Poppy Playtime, one of the most popular and terrifying games of 2021. Poppy Playtime is a game that will make you scream, jump, and question your sanity as you explore an eerie toy factory filled with vengeful toys. But did you know that you can also play this game on your iOS device? That's right, Poppy Playtime Chapter 1 is now available on the App Store for only $2.99. In this article, we will tell you everything you need to know about how to download and play Poppy Playtime Chapter 1 on your iPhone, iPad, or iPod touch.

            -

            What is Poppy Playtime?

            -

            Poppy Playtime is a horror/puzzle adventure game developed by indie studio MOB Games. The game was first released on PC in October 2021, and has since gained a huge fan base thanks to its creepy atmosphere, engaging gameplay, and intriguing story. The game is set in the abandoned toy factory of Playtime Co., a company that used to produce the most amazing toys in the world. You play as a former employee who returns to the factory 10 years after it was mysteriously shut down. Your goal is to find out what happened to the staff, the toys, and especially Poppy, the company's mascot and star doll.

            -

            download poppy playtime chapter 1 ios


            Download ❤❤❤ https://urllie.com/2uNHBa



            -

            Why should you play Poppy Playtime on iOS?

            -

            Poppy Playtime is a game that will keep you on the edge of your seat as you explore the dark and twisted corridors of the factory. But why should you play it on your iOS device instead of your PC? Here are some reasons why playing Poppy Playtime on your mobile device is a great idea:

            -

            Portability and convenience

            -

            One of the advantages of playing Poppy Playtime on your iOS device is that you can take it with you anywhere you go. Whether you are at home, at school, at work, or on the bus, you can enjoy this game anytime you want. You don't need a powerful PC or a stable internet connection to play this game. All you need is your iOS device and some headphones.

            -

            Immersive and interactive gameplay

            -

            Another benefit of playing Poppy Playtime on your iOS device is that it offers a more immersive and interactive experience than playing it on your PC. The game uses touch controls that are easy to use and responsive. You can swipe, tap, drag, and pinch to interact with the environment and solve puzzles. The game also uses gyroscope and accelerometer features that allow you to tilt and move your device to look around and aim. The game also supports haptic feedback that makes you feel every vibration, bump, and shock in the game.

            -

            Affordable and accessible price

            -

            A final reason why playing Poppy Playtime on your iOS device is a good idea is that it is very affordable and accessible. The game costs only $2. 99 on the App Store, which is much cheaper than the PC version, which costs $4.99 on Steam. The game also has a smaller file size of only 1.1 GB, which means it won't take up much space on your device. The game is compatible with iOS 11.0 or later, and works on iPhone 6S or newer, iPad Air 2 or newer, and iPod touch 7th generation or newer. The game also has a high rating of 4.8 out of 5 stars on the App Store, based on over 2,000 reviews from satisfied players.

            -

            How to download Poppy Playtime Chapter 1 on iOS?

            -

            Now that you know why you should play Poppy Playtime on your iOS device, you might be wondering how to download and install it. Don't worry, it's very easy and simple. Just follow these steps:

            -

            Visit the App Store and search for Poppy Playtime Chapter 1

            -

            The first step is to open the App Store app on your iOS device and type "Poppy Playtime Chapter 1" in the search bar. You should see the game's icon and name appear in the results. Tap on it to go to the game's page.

            -

            Tap on the Get button and confirm your purchase

            -

            The next step is to tap on the Get button on the game's page. You will be asked to confirm your purchase using your Apple ID password, Touch ID, or Face ID. Once you do that, the game will start downloading to your device.

            -

            How to download poppy playtime chapter 1 on iphone
            -Poppy playtime chapter 1 ios release date and price
            -Poppy playtime chapter 1 gameplay and review for ios
            -Best tips and tricks for poppy playtime chapter 1 on ios
            -Poppy playtime chapter 1 ios download link and instructions
            -Poppy playtime chapter 1 ios vs android comparison
            -Poppy playtime chapter 1 ios system requirements and compatibility
            -Poppy playtime chapter 1 ios cheats and hacks
            -Poppy playtime chapter 1 ios walkthrough and guide
            -Poppy playtime chapter 1 ios trailer and screenshots
            -Poppy playtime chapter 1 ios free download no jailbreak
            -Poppy playtime chapter 1 ios update and patch notes
            -Poppy playtime chapter 1 ios achievements and trophies
            -Poppy playtime chapter 1 ios secrets and easter eggs
            -Poppy playtime chapter 1 ios mod apk and obb file
            -Poppy playtime chapter 1 ios online multiplayer and co-op mode
            -Poppy playtime chapter 1 ios controller support and settings
            -Poppy playtime chapter 1 ios horror game rating and reviews
            -Poppy playtime chapter 1 ios fan art and wallpapers
            -Poppy playtime chapter 1 ios soundtrack and music
            -Poppy playtime chapter 1 ios developer interview and behind the scenes
            -Poppy playtime chapter 1 ios bugs and glitches report
            -Poppy playtime chapter 1 ios refund policy and customer service
            -Poppy playtime chapter 1 ios alternatives and similar games
            -Poppy playtime chapter 1 ios merchandise and collectibles
            -Poppy playtime chapter 1 ios wiki and faq
            -Poppy playtime chapter 1 ios forum and community
            -Poppy playtime chapter 1 ios news and rumors
            -Poppy playtime chapter 1 ios spoilers and theories
            -Poppy playtime chapter 1 ios ending and sequel

            -

            Wait for the game to download and launch it from your home screen

            -

            The final step is to wait for the game to finish downloading. You can check the progress by looking at the circle around the game's icon on your home screen. When the circle is full, it means the game is ready to play. Tap on the icon to launch the game and enjoy!

            -

            What to expect from Poppy Playtime Chapter 1?

            -

            Poppy Playtime Chapter 1 is a game that will keep you hooked and scared for about an hour of gameplay. The game has many features and challenges that will test your skills and nerves. Here are some of them:

            -

            Explore the abandoned toy factory of Playtime Co.

            -

            The game takes place in the huge and creepy toy factory of Playtime Co., where you will find many rooms, hallways, and secrets to explore. You will also encounter various toys that were once loved by children, but are now twisted and hostile. Some of them are harmless, but others will try to kill you if they see you. You will also learn more about the history and fate of Playtime Co., and what happened to Poppy, the doll that everyone adored.

            -

            Solve puzzles and avoid enemies using the GrabPack device

            -

            The game is not just about walking around and being scared. You will also have to solve puzzles and avoid enemies using a special device called the GrabPack. The GrabPack is a pair of gloves that allow you to grab and manipulate objects from a distance using magnetic force. You can use it to open doors, activate switches, move boxes, and more. You can also use it to defend yourself from some enemies by throwing objects at them or pulling them away from you. The GrabPack is a fun and innovative tool that adds a lot of gameplay possibilities.

            -

            Uncover the secrets and mysteries of Poppy Playtime's world

            -

            The game is not only scary, but also intriguing. As you play, you will discover more about the world of Poppy Playtime, and what lies behind its horror. You will find clues, notes, tapes, and other items that will reveal more information about the characters, the toys, and the events that led to the downfall of Playtime Co. You will also encounter some surprises and twists that will make you question everything you thought you knew.

            -

            Conclusion and FAQs

            -

            Poppy Playtime Chapter 1 is a horror/puzzle adventure game that will make you scream, jump, and wonder as you explore an abandoned toy factory filled with vengeful toys. The game is now available on iOS devices for only $2.99, and offers a portable, immersive, interactive, affordable, and accessible experience that you won't regret. If you are looking for a game that will challenge your skills and nerves, download Poppy Playtime Chapter 1 today and see if you can survive its horrors.

            -

            Here are some frequently asked questions about Poppy Playtime Chapter 1:

            - - - - - - - -
            QuestionAnswer
            Is Poppy Playtime Chapter 1 the only chapter of the game?No, Poppy Playtime Chapter 1 is the first chapter of a planned series of chapters that will continue the story and gameplay of the game. The developers have not announced when the next chapter will be released, but they have said that they are working hard on it.
            Is Poppy Playtime Chapter 1 scary?Yes, Poppy Playtime Chapter 1 is a very scary game that will make you feel tense, nervous, and terrified as you play. The game has a lot of jump scares, creepy sounds, and disturbing visuals that will make your heart race. The game is not recommended for people who are easily scared or have a weak heart.
            Is Poppy Playtime Chapter 1 suitable for children?No, Poppy Playtime Chapter 1 is not suitable for children under the age of 13. The game has a lot of violence, gore, and horror that are not appropriate for young audiences. The game also has some themes and topics that are mature and sensitive, such as death, murder, and trauma. The game is rated 12+ on the App Store, but parental discretion is advised.
            How long does it take to finish Poppy Playtime Chapter 1?Poppy Playtime Chapter 1 is a relatively short game that can be completed in about an hour or less, depending on your skill level and play style. The game has some replay value, as you can try to find all the secrets and collectibles in the game, or play it on a higher difficulty level.
            Can I play Poppy Playtime Chapter 1 offline?Yes, you can play Poppy Playtime Chapter 1 offline without an internet connection. However, you will need an internet connection to download and update the game from the App Store.

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/fb700/chat3/request_llm/bridge_all.py b/spaces/fb700/chat3/request_llm/bridge_all.py deleted file mode 100644 index ecc6b1e0d09a4dd5f3d4b171ecd9cb9f35d4c5ce..0000000000000000000000000000000000000000 --- a/spaces/fb700/chat3/request_llm/bridge_all.py +++ /dev/null @@ -1,188 +0,0 @@ - -""" - 该文件中主要包含2个函数 - - 不具备多线程能力的函数: - 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 - - 具备多线程调用能力的函数 - 2. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 -""" -import tiktoken - -from concurrent.futures import ThreadPoolExecutor - -from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui -from .bridge_chatgpt import predict as chatgpt_ui - -from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui -from .bridge_chatglm import predict as chatglm_ui - -from .bridge_tgui import predict_no_ui_long_connection as tgui_noui -from .bridge_tgui import predict as tgui_ui - -colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044'] - -get_token_num_gpt35 = lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())) -get_token_num_gpt4 = lambda txt: len(tiktoken.encoding_for_model("gpt-4").encode(txt, disallowed_special=())) - -model_info = { - # openai - "gpt-3.5-turbo": { - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": "https://api.openai.com/v1/chat/completions", - "max_token": 4096, - "tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"), - "token_cnt": get_token_num_gpt35, - }, - - "gpt-4": { - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": "https://api.openai.com/v1/chat/completions", - "max_token": 8192, - "tokenizer": tiktoken.encoding_for_model("gpt-4"), - "token_cnt": get_token_num_gpt4, - }, - - # api_2d - "api2d-gpt-3.5-turbo": { - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": "https://openai.api2d.net/v1/chat/completions", - "max_token": 4096, - "tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"), - "token_cnt": get_token_num_gpt35, - }, - - "api2d-gpt-4": { - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": "https://openai.api2d.net/v1/chat/completions", - "max_token": 8192, - "tokenizer": tiktoken.encoding_for_model("gpt-4"), - "token_cnt": get_token_num_gpt4, - }, - - # chatglm - "chatglm": { - "fn_with_ui": chatglm_ui, - "fn_without_ui": chatglm_noui, - "endpoint": None, - "max_token": 1024, - "tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"), - "token_cnt": get_token_num_gpt35, - }, - -} - - -def LLM_CATCH_EXCEPTION(f): - """ - 装饰器函数,将错误显示出来 - """ - def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience): - try: - return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience) - except Exception as e: - from toolbox import get_conf - import traceback - proxies, = get_conf('proxies') - tb_str = '\n```\n' + traceback.format_exc() + '\n```\n' - observe_window[0] = tb_str - return tb_str - return decorated - - -def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False): - """ - 发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。 - inputs: - 是本次问询的输入 - sys_prompt: - 系统静默prompt - llm_kwargs: - LLM的内部调优参数 - history: - 是之前的对话列表 - observe_window = None: - 用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗 - """ - import threading, time, copy - - model = llm_kwargs['llm_model'] - n_model = 1 - if '&' not in model: - assert not model.startswith("tgui"), "TGUI不支持函数插件的实现" - - # 如果只询问1个大语言模型: - method = model_info[model]["fn_without_ui"] - return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience) - else: - # 如果同时询问多个大语言模型: - executor = ThreadPoolExecutor(max_workers=4) - models = model.split('&') - n_model = len(models) - - window_len = len(observe_window) - assert window_len==3 - window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True] - - futures = [] - for i in range(n_model): - model = models[i] - method = model_info[model]["fn_without_ui"] - llm_kwargs_feedin = copy.deepcopy(llm_kwargs) - llm_kwargs_feedin['llm_model'] = model - future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience) - futures.append(future) - - def mutex_manager(window_mutex, observe_window): - while True: - time.sleep(0.5) - if not window_mutex[-1]: break - # 看门狗(watchdog) - for i in range(n_model): - window_mutex[i][1] = observe_window[1] - # 观察窗(window) - chat_string = [] - for i in range(n_model): - chat_string.append( f"【{str(models[i])} 说】: {window_mutex[i][0]} " ) - res = '

            \n\n---\n\n'.join(chat_string) - # # # # # # # # # # # - observe_window[0] = res - - t_model = threading.Thread(target=mutex_manager, args=(window_mutex, observe_window), daemon=True) - t_model.start() - - return_string_collect = [] - while True: - worker_done = [h.done() for h in futures] - if all(worker_done): - executor.shutdown() - break - time.sleep(1) - - for i, future in enumerate(futures): # wait and get - return_string_collect.append( f"【{str(models[i])} 说】: {future.result()} " ) - - window_mutex[-1] = False # stop mutex thread - res = '
            \n\n---\n\n'.join(return_string_collect) - return res - - -def predict(inputs, llm_kwargs, *args, **kwargs): - """ - 发送至LLM,流式获取输出。 - 用于基础的对话功能。 - inputs 是本次问询的输入 - top_p, temperature是LLM的内部调优参数 - history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误) - chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容 - additional_fn代表点击的哪个按钮,按钮见functional.py - """ - - method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] - yield from method(inputs, llm_kwargs, *args, **kwargs) - diff --git a/spaces/fclong/summary/fengshen/cli/fengshen_pipeline.py b/spaces/fclong/summary/fengshen/cli/fengshen_pipeline.py deleted file mode 100644 index 07c31349ef96fd86d0c14b807601c645b095372f..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/cli/fengshen_pipeline.py +++ /dev/null @@ -1,34 +0,0 @@ -import sys -from importlib import import_module -from datasets import load_dataset -import argparse - - -def main(): - if len(sys.argv) < 3: - raise Exception( - 'args len < 3, example: fengshen_pipeline text_classification predict xxxxx') - pipeline_name = sys.argv[1] - method = sys.argv[2] - pipeline_class = getattr(import_module('fengshen.pipelines.' + pipeline_name), 'Pipeline') - - total_parser = argparse.ArgumentParser("FengShen Pipeline") - total_parser.add_argument('--model', default='', type=str) - total_parser.add_argument('--datasets', default='', type=str) - total_parser.add_argument('--text', default='', type=str) - total_parser = pipeline_class.add_pipeline_specific_args(total_parser) - args = total_parser.parse_args(args=sys.argv[3:]) - pipeline = pipeline_class(args=args, model=args.model) - - if method == 'predict': - print(pipeline(args.text)) - elif method == 'train': - datasets = load_dataset(args.datasets) - pipeline.train(datasets) - else: - raise Exception( - 'cmd not support, now only support {predict, train}') - - -if __name__ == '__main__': - main() diff --git a/spaces/feregVcuzo/sanity-test-midi/Rahasya 720p In Hindi Dubbed Movie HOT!.md b/spaces/feregVcuzo/sanity-test-midi/Rahasya 720p In Hindi Dubbed Movie HOT!.md deleted file mode 100644 index 7531df83b89f8fafaad22ebcc2f5f2b85eeef9fb..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/Rahasya 720p In Hindi Dubbed Movie HOT!.md +++ /dev/null @@ -1,54 +0,0 @@ -## Rahasya 720p In Hindi Dubbed Movie - - - - - - ![Rahasya 720p In Hindi Dubbed Movie HOT!](https://www.tabooafairs.com/wp-content/uploads/2023/02/WATCH-2-4.png) - - - - - -**Download --->>> [https://apconhanstraf.blogspot.com/?c=2tyq7I](https://apconhanstraf.blogspot.com/?c=2tyq7I)** - - - - - - - - - - - - - -# Rahasya 720p In Hindi Dubbed Movie: A Gripping Murder Mystery - - - -If you are looking for a thrilling and suspenseful movie to watch, you might want to check out Rahasya 720p in Hindi dubbed movie. Rahasya is a 2015 Indian murder mystery film directed by Manish Gupta and starring Kay Kay Menon, Tisca Chopra, Ashish Vidyarthi, Mita Vashisht and Ashwini Kalsekar. The film is inspired by the 2008 Noida double murder case, which was one of the most controversial and sensational cases in India. - - - -The film follows the investigation of CBI officer Sunil Paraskar (Kay Kay Menon) who is assigned to solve the murder of Ayesha Mahajan (Sakshi Sem), the teenage daughter of a renowned doctor couple, Sachin (Ashish Vidyarthi) and Aarti (Tisca Chopra). The prime suspect is Sachin, who was found drunk and unconscious in his house with a blood-stained knife. However, as Paraskar digs deeper into the case, he uncovers a web of secrets, lies and motives that point to different suspects, including Aarti, Ayesha's boyfriend, her tutor, her friends and even a mysterious caller. - - - -Rahasya 720p in Hindi dubbed movie is a captivating and engrossing film that keeps you guessing till the end. The film has a tight script, crisp editing, brilliant performances and a haunting background score. The film also raises some pertinent questions about the justice system, media trial, parental pressure and social stigma. Rahasya 720p in Hindi dubbed movie is a must-watch for fans of crime thrillers and mysteries. - - - -The film has received mostly positive reviews from critics and audiences alike. The film has been praised for its gripping plot, realistic portrayal of the case, and stellar performances by the cast. The film has also been appreciated for its direction, cinematography, editing and music. The film has been rated 9/10 by kausix777 on IMDb, who called it "very watchable suspense with great acting" [^2^]. The film has also been rated 7/10 by Ansango on IMDb, who called it "gripping and captivating whodunit thriller" [^2^]. The film has also been rated 2.5/5 by Renuka Vyavahare of The Times of India, who said that "the film relies heavily on Kay Kay's Sherlock act" [^1^]. The film has also been rated 3/5 by Shilpa Jamkhandikar of Reuters, who said that "the two hour-long murder mystery is taut; and even though it is packed with all the familiar tropes, the atmospherics ensure that the story never sags, keeping the audience engaged throughout" [^1^]. The film has also been rated 4/5 by Nandini Ramnath of Scroll.in, who said that "Rahasya is far from being an exploitative flick ‒ rather, it's a taut and stylish affair, which gives no quarter to extraneous elements such as songs and sub-plots over its 125 minutes" [^1^]. The film has also been rated 4/5 by India Today, who said that "if you enjoy murder mysteries then you must watch Rahasya. It's a well-made film" [^3^]. - - - -Rahasya 720p in Hindi dubbed movie is available online on YouTube [^1^]. You can watch the full movie for free with English subtitles. You can also download the movie in HD quality from various websites. However, we advise you to watch the movie legally and support the makers of this brilliant film. Rahasya 720p in Hindi dubbed movie is a rare gem in Bollywood that deserves your attention and appreciation. - - dfd1c89656 - - - - - diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/ADB Versi 2.0 Whats New and How to Download It.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/ADB Versi 2.0 Whats New and How to Download It.md deleted file mode 100644 index 86363ee8f75f66f806ad094da4e0f68ba419afd5..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/ADB Versi 2.0 Whats New and How to Download It.md +++ /dev/null @@ -1,183 +0,0 @@ -
            -

            Download ADB Versi 2.0: A Complete Guide

            -

            If you are an Android developer or a mobile tester, you probably use adb (Android Debug Bridge) to debug your mobile app on an emulator or a real device. Adb is a command-line tool that lets you communicate with your device, install and run apps, capture logs, and perform other tasks.

            -

            download adb versi 2.0


            Download >>>>> https://gohhs.com/2uPqaF



            -

            But did you know that there is a new version of adb available? Adb versi 2.0 is the latest version of the tool that supports new features and bug fixes for Android development. In this article, we will show you how to download adb versi 2.0 on Windows, macOS, and Linux, how to use it to communicate with your device, what are the benefits of using it, and how to troubleshoot some common errors and issues.

            -

            How to check your adb version and update it if needed

            -

            Before you download adb versi 2.0, you should check your current adb version and see if you need to update it. To check your adb version, open a terminal or command prompt and type adb version. You should see something like this:

            -
            Android Debug Bridge version 1.0.41 Version 31.0.3-7562133 Installed as C:\Users\user\AppData\Local\Android\Sdk\platform-tools\adb.exe
            -

            If your adb version is lower than the latest version, you should update it by downloading the latest platform-tools package from the Android SDK website, or by using the SDK Manager tool in Android Studio.

            -

            How to download adb versi 2.0 on Windows, macOS, and Linux

            -

            The process of downloading adb versi 2.0 varies depending on your operating system. Here are the steps for each platform:

            -

            Windows

            -
              -
            • Download the platform-tools package for Windows from the Android SDK website.
            • -
            • Extract the zip file to a folder on your computer.
            • -
            • Add the folder path to your system environment variable PATH.
            • -
            • Open a terminal or command prompt and type adb version to verify that you have installed adb versi 2.0 correctly.
            • -
            -

            macOS

            -
              -
            • Install Homebrew, an open-source package manager for macOS.
            • -
            • Open a terminal and Outline of the article: - Introduction - What is adb versi 2.0 and why you need it - How to check your adb version and update it if needed - How to download adb versi 2.0 on Windows, macOS, and Linux - Windows: Download the platform-tools package or use the SDK Manager tool in Android Studio - macOS: Install Homebrew and then install the android-platform-tools package - Linux: Use a package manager or download the platform-tools package - How to use adb versi 2.0 to communicate with your device - Enable USB debugging on your device and connect it to your computer - Use adb commands to install and debug apps, capture logs, and perform other tasks - Benefits of adb versi 2.0 - Supports new features and bug fixes for Android development - Compatible with the latest Android devices and emulators - Improves performance and stability of adb communication - Common errors and solutions for adb versi 2.0 - Device not found: Check connection mode, enable USB debugging, update adb driver, or restart adb server - Command not found: Select media transfer protocol, update adb driver, or set environment variable - Waiting for device: Reconnect your device, authorize your computer, or restart adb server - Conclusion - Summary of the main points and call to action - FAQs - What is the difference between adb versi 2.0 and adb versi 1.0? - How can I connect to a device over Wi-Fi using adb versi 2.0? - How can I use adb logcat to capture and view the system and app logs from my device? - How can I backup and restore my device data using adb versi 2.0? - How can I uninstall an app from my device using adb versi 2.0? Article with HTML formatting:

              Download ADB Versi 2.0: A Complete Guide

              -

              If you are an Android developer or a mobile tester, you probably use adb (Android Debug Bridge) to debug your mobile app on an emulator or a real device. Adb is a command-line tool that lets you communicate with your device, install and run apps, capture logs, and perform other tasks.

              -

              But did you know that there is a new version of adb available? Adb versi 2.0 is the latest version of the tool that supports new features and bug fixes for Android development. In this article, we will show you how to download adb versi 2.0 on Windows, macOS, and Linux, how to use it to communicate with your device, what are the benefits of using it, and how to troubleshoot some common errors and issues.

              -

              How to check your adb version and update it if needed

              -

              Before you download adb versi 2.0, you should check your current adb version and see if you need to update it. To check your adb version, open a terminal or command prompt and type adb version. You should see something like this:

              -
              Android Debug Bridge version 1.0.41 Version 31.0.3-7562133 Installed as C:\Users\user\AppData\Local\Android\Sdk\platform-tools\adb.exe
              -

              If your adb version is lower than the latest version, you should update it by downloading the latest platform-tools package from the Android SDK website, or by using the SDK Manager tool in Android Studio.

              -

              How to download adb versi 2.0 for Android development
              -Download adb versi 2.0 for Windows, Mac, or Linux
              -Download adb versi 2.0 and fastboot tools for Android
              -Download Oculus adb drivers 2.0 for VR development
              -Download adb versi 2.0 release notes and changelog
              -Download adb versi 2.0 and install apps on Android devices
              -Download adb versi 2.0 and enable USB debugging on Android
              -Download adb versi 2.0 and connect to a device over Wi-Fi
              -Download adb versi 2.0 and use command-line tools for Android
              -Download adb versi 2.0 and access a Unix shell on Android devices
              -Download adb versi 2.0 and run a variety of commands on Android devices
              -Download adb versi 2.0 and debug apps on Android devices
              -Download adb versi 2.0 and use the Android Debug Bridge (adb)
              -Download adb versi 2.0 and flash a new system image on Android devices
              -Download adb versi 2.0 and unlock your device bootloader
              -Download adb versi 2.0 and use the fastboot tool for Android
              -Download adb versi 2.0 and manage communication between devices and clients
              -Download adb versi 2.0 and set up connections to all running devices
              -Download adb versi 2.0 and control any device from any client or script
              -Download adb versi 2.0 and use the SDK Platform Tools package for Android
              -Download adb versi 2.0 and update it with Android Studio's SDK Manager
              -Download adb versi 2.0 and fix common problems with the Connection Assistant
              -Download adb versi 2.0 and use the latest version of the tools for Android
              -Download adb versi 2.0 and file bugs if you find exceptions
              -Download adb versi 2.0 and learn how it works with the Android platform
              -Download adb versi 2.0 and check the server process running on your machine
              -Download adb versi 2.0 and locate emulators by scanning ports in the range 5555 to 5585
              -Download adb versi 2.0 and set up a connection to the daemon (adbd) on each device
              -Download adb versi 2.0 and bind to local TCP port 5037 for communication with the server
              -Download adb versi 2.0 and start an adb client from a command-line terminal
              -Download adb versi 2.0 and send commands to the server or devices
              -Download adb versi 2.0 and listen for commands sent from adb clients
              -Download adb versi 2.0 and use port 5037 to communicate with the server
              -Download adb versi 2.0 and use odd-numbered ports for adb connections to emulators
              -Download adb versi 2.0 and use even-numbered ports for console connections to emulators
              -Download adb versi 2.0 and make the Developer options screen visible on your device
              -Download adb versi 2.0 and resolve wireless connection issues with your device
              -Download adb versi 2.0 and install multiple packages on your device with install-multi-package command
              -Download adb versi 2.0 and push files to your device with push command
              -Download adb versi 2.0 and pull files from your device with pull command
              -Download adb versi 2.0 and list all connected devices with devices command
              -Download adb versi 2.0 and reboot your device with reboot command
              -Download adb versi 2.0 and log messages from your device with logcat command
              -Download adb versi 2.0 and forward ports between your device and computer with forward command
              -Download adb versi 2.0 and reverse ports between your device and computer with reverse command
              -Download adb versi 2.0 and copy files between your computer's clipboard and device's clipboard with shell input keyevent command
              -Download adb versi 2.0 and take a screenshot of your device with shell screencap command
              -Download adb versi 2.0 and record a video of your device screen with shell screenrecord command

              -

              How to download adb versi 2.0 on Windows, macOS, and Linux

              -

              The process of downloading adb versi 2.0 varies depending on your operating system. Here are the steps for each platform:

              -

              Windows

              -
                -
              • Download the platform-tools package for Windows from the Android SDK website.
              • -
              • Extract the zip file to a folder on your computer.
              • -
              • Add the folder path to your system environment variable PATH.
              • -
              • Open a terminal or command prompt and type adb version to verify that you have installed adb versi 2.0 correctly.
              • -
              -

              macOS

              -
                -
              • Install Homebrew, an open-source package manager for macOS.
              • -
              • Open a terminal and. type brew install android-platform-tools to install adb versi 2.0 and other tools.
              • -
              • Open a terminal and type adb version to verify that you have installed adb versi 2.0 correctly.
              • -
              -

              Linux

              -
                -
              • Use a package manager such as apt, yum, or pacman to install adb versi 2.0 and other tools. For example, on Ubuntu, you can type sudo apt install android-tools-adb.
              • -
              • Alternatively, you can download the platform-tools package for Linux from the Android SDK website and extract it to a folder on your computer.
              • -
              • Add the folder path to your system environment variable PATH.
              • -
              • Open a terminal and type adb version to verify that you have installed adb versi 2.0 correctly.
              • -
              -

              How to use adb versi 2.0 to communicate with your device

              -

              Once you have downloaded and installed adb versi 2.0, you can use it to communicate with your device and perform various tasks. Here are the steps to use adb versi 2.0:

              -
                -
              1. Enable USB debugging on your device by going to Settings > Developer options > USB debugging. If you don't see Developer options, go to Settings > About phone and tap Build number seven times.
              2. -
              3. Connect your device to your computer using a USB cable.
              4. -
              5. Open a terminal or command prompt and type adb devices to see the list of connected devices. You should see something like this:

                -
                List of devices attached 1234567890ABCDEF	device
                -

                If you see a pop-up on your device asking for permission to allow USB debugging, tap OK.

              6. -
              7. Use adb commands to install and debug apps, capture logs, and perform other tasks. For example, you can type adb install app.apk to install an app from your computer to your device, or adb shell to access the device's shell.
              8. -
              -

              Benefits of adb versi 2.0

              -

              Adb versi 2.0 is the latest version of the tool that supports new features and bug fixes for Android development. Some of the benefits of using adb versi 2.0 are:

              -
                -
              • It supports new features such as wireless debugging, incremental installation, fast deployment, and native crash filtering.
              • -
              • It is compatible with the latest Android devices and emulators running Android 12 or higher.
              • -
              • It improves performance and stability of adb communication by using a new protocol and reducing latency and overhead.
              • -
              -

              Common errors and solutions for adb versi 2.0

              -

              Sometimes, you may encounter some errors or issues when using adb versi 2.0. Here are some of the common errors and solutions:

              -

              Device not found

              -

              This error means that adb cannot detect your device. To fix this error, you can try the following steps:

              -
                -
              • Check if your device is in the correct connection mode. For most devices, you need to select media transfer protocol (MTP) or file transfer mode.
              • -
              • Enable USB debugging on your device and authorize your computer if prompted.
              • -
              • Update your adb driver on your computer if it is outdated or corrupted.
              • -
              • Restart the adb server by typing adb kill-server and then adb start-server.
              • -
              -

              Command not found

              -

              This error means that adb cannot find the command you typed. To fix this error, you can try the following steps:

              -
                -
              • Select media transfer protocol (MTP) or file transfer mode on your device if it is in charging mode or any other mode.
              • -
              • Update your adb driver on your computer if it is outdated or corrupted.
              • -
              • Add the folder path of adb to your system environment variable PATH if it is not set correctly.
              • -
              -

              Waiting for device

              -

              This error means that adb is waiting for your device to be ready. To fix this error, you can try the following steps:

              -
                -
              • Reconnect your device to your computer using a different USB port or cable.
              • -
              • Authorize your computer on your device if prompted.
              • -
              • Restart the adb server by typing adb kill-server and then adb start-server).
              • -
              -

              Conclusion

              -

              Adb versi 2.0 is the latest and best version of the Android Debug Bridge tool that you can use to debug your mobile app on an emulator or a real device. In this article, we have shown you how to download adb versi 2.0 on Windows, macOS, and Linux, how to use it to communicate with your device, what are the benefits of using it, and how to troubleshoot some common errors and issues. We hope that this article has helped you to learn more about adb versi 2.0 and how to use it effectively.

              -

              If you have any questions or feedback, please feel free to leave a comment below. And if you want to learn more about Android development and testing, check out our other articles on our website. Thank you for reading and happy coding!

              -

              FAQs

              -

              What is the difference between adb versi 2.0 and adb versi 1.0?

              -

              Adb versi 2.0 is the newer version of adb that supports new features and bug fixes for Android development. Some of the new features include wireless debugging, incremental installation, fast deployment, and native crash filtering. Adb versi 2.0 is also compatible with the latest Android devices and emulators running Android 12 or higher.

              -

              How can I connect to a device over Wi-Fi using adb versi 2.0?

              -

              You can connect to a device over Wi-Fi using adb versi 2.0 by following these steps:

              -
                -
              1. Connect your device to your computer using a USB cable.
              2. -
              3. Open a terminal or command prompt and type adb tcpip 5555 to enable wireless debugging on port 5555.
              4. -
              5. Disconnect your device from your computer.
              6. -
              7. Find the IP address of your device by going to Settings > About phone > Status > IP address.
              8. -
              9. Open a terminal or command prompt and type adb connect ip_address:5555, where ip_address is the IP address of your device.
              10. -
              11. Type adb devices to see the list of connected devices. You should see something like this:

                -
                List of devices attached 1234567890ABCDEF	device ip_address:5555	device
                -
              -

              How can I use adb logcat to capture and view the system and app logs from my device?

              -

              You can use adb logcat to capture and view the system and app logs from your device by following these steps:

              -
                -
              1. Connect your device to your computer using a USB cable or Wi-Fi.
              2. -
              3. Open a terminal or command prompt and type adb logcat to start capturing the logs.
              4. -
              5. You can filter the logs by specifying a tag or a priority level. For example, you can type adb logcat ActivityManager:I MyApp:D *:S to show only the logs from ActivityManager with priority level INFO, MyApp with priority level DEBUG, and suppress all other logs.
              6. -
              7. You can also save the logs to a file by typing adb logcat -f filename.txt, where filename.txt is the name of the file you want to save the logs to.
              8. -
              9. To stop capturing the logs, press Ctrl+C on your keyboard.
              10. -
              -

              How can I backup and restore my device data using adb versi 2.0?

              -

              You can backup and restore your device data using adb versi 2.0 by following these steps:

              -
                -
              1. Connect your device to your computer using a USB cable or Wi-Fi.
              2. -
              3. To backup your device data, open a terminal or command prompt and type adb backup -all -f filename.ab, where filename.ab is the name of the backup file you want to create. You may need to enter a password on your device to encrypt the backup file.
              4. -
              5. To restore your device data, open a terminal or command prompt and type adb restore filename.ab, where filename.ab is the name of the backup file you want to restore from. You may need to enter the password on your device to decrypt the backup file.
              6. -
              -

              How can I uninstall an app from my device using adb versi 2.0?

              -

              You can uninstall an app from your device using adb versi 2.0 by following these steps:

              -
                -
              1. Connect your device to your computer using a USB cable or Wi-Fi.
              2. -
              3. Find the package name of the app you want to uninstall by typing adb shell pm list packages. You should see a list of packages like this:

                -
                package:com.android.providers.telephony package:com.android.providers.calendar package:com.example.myapp ...
                -
              4. Type adb uninstall package_name, where package_name is the name of the package you want to uninstall. For example, you can type adb uninstall com.example.myapp to uninstall the app with that package name.
              5. -
              6. You should see a message like Success or Failure indicating the result of the uninstallation.
              7. -

              401be4b1e0
              -
              -
              \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download 60 Seconds APK and Hunt Mutant Cockroaches in Your Fallout Shelter - Mediafre.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download 60 Seconds APK and Hunt Mutant Cockroaches in Your Fallout Shelter - Mediafre.md deleted file mode 100644 index ac9521d59edd5a7742ad08e00b7181b552e44a9f..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download 60 Seconds APK and Hunt Mutant Cockroaches in Your Fallout Shelter - Mediafre.md +++ /dev/null @@ -1,115 +0,0 @@ - -

              How to Download and Install 60 Seconds APK on Android

              -

              If you are looking for a fun and challenging game that will test your survival skills in a post-apocalyptic world, you might want to try 60 Seconds APK. This is a dark comedy atomic adventure game that lets you scavenge, prepare, and survive in a fallout shelter with your family and whatever supplies you can grab before the nuke hits. In this article, we will show you how to download and install 60 Seconds APK on your Android device from Mediafıre, one of the most reliable and secure file hosting sites. We will also give you some tips and tricks on how to play the game and make the most out of your experience.

              -

              What is 60 Seconds APK?

              -

              A brief introduction to the game and its features

              -

              60 Seconds APK is an Android version of the popular PC game 60 Seconds!, developed by Robot Gentleman. The game is set in a retro-futuristic 1950s America, where a nuclear war is about to break out. You play as Ted, a responsible citizen and a family man, who has only 60 seconds to collect as many supplies and family members as possible and rush to his fallout shelter before the bomb explodes. The game has two main phases: the scavenge phase, where you have to run around your randomly generated house and grab whatever you can; and the survival phase, where you have to manage your resources, make difficult decisions, deal with random events, and hope for rescue.

              -

              60 seconds apk download mediafıre


              Download Ziphttps://gohhs.com/2uPpBH



              -

              The game features four different modes: Atomic Drill, where you can practice your scavenging skills; Apocalypse, where you can play the full game with both phases; Survival, where you can skip the scavenge phase and start with a predefined set of items; and Scavenge, where you can skip the survival phase and just focus on grabbing as much as you can. The game also has multiple endings, depending on your choices and actions.

              -

              Why download the APK file from Mediafıre?

              -

              You might be wondering why you should download the APK file from Mediafıre instead of getting it from the Google Play Store. Well, there are several reasons for that. First of all, the game is not available on the Play Store, so you won't be able to find it there. Secondly, even if you find it on other sources, you might end up downloading a fake or modified version that could harm your device or compromise your privacy. Thirdly, Mediafıre is one of the most trusted and secure file hosting sites that offers fast and easy downloads without any annoying ads or pop-ups. You can also scan the file with your antivirus software before installing it to make sure it's safe.

              -

              60 seconds atomic adventure apk mediafıre
              -60 seconds reatomized apk mediafıre
              -60 seconds game android download mediafıre
              -60 seconds survival apk mediafıre
              -60 seconds free download android mediafıre
              -60 seconds teddy apk mediafıre
              -60 seconds apocalypse apk mediafıre
              -60 seconds mod apk mediafıre
              -60 seconds fallout shelter apk mediafıre
              -60 seconds nuke apk mediafıre
              -60 seconds comedy adventure apk mediafıre
              -60 seconds scavenger apk mediafıre
              -60 seconds wasteland apk mediafıre
              -60 seconds mutant cockroaches apk mediafıre
              -60 seconds dash apk mediafıre
              -60 seconds family apk mediafıre
              -60 seconds supplies apk mediafıre
              -60 seconds bunker apk mediafıre
              -60 seconds choices apk mediafıre
              -60 seconds action packed apk mediafıre
              -60 seconds latest version apk mediafıre
              -60 seconds offline apk mediafıre
              -60 seconds full game apk mediafıre
              -60 seconds cracked apk mediafıre
              -60 seconds unlimited food apk mediafıre
              -60 seconds hack apk mediafıre
              -60 seconds cheats apk mediafıre
              -60 seconds tips and tricks apk mediafıre
              -60 seconds guide apk mediafıre
              -60 seconds walkthrough apk mediafıre
              -60 seconds gameplay apk mediafıre
              -60 seconds review apk mediafıre
              -60 seconds trailer apk mediafıre
              -60 seconds update apk mediafıre
              -60 seconds dlc apk mediafıre
              -60 seconds challenges apk mediafıre
              -60 seconds scenarios apk mediafıre
              -60 seconds endings apk mediafıre
              -60 seconds achievements apk mediafıre
              -60 seconds secrets apk mediafıre
              -60 seconds easter eggs apk mediafıre
              -60 seconds funniest moments apk mediafıre
              -60 seconds best strategy apk mediafıre
              -60 seconds hardest mode apk mediafıre
              -60 seconds easy mode apk mediafıre
              -60 seconds random mode apk mediafıre
              -60 seconds custom mode apk mediafıre
              -60 seconds sandbox mode apk mediafıre

              -

              How to Download 60 Seconds APK from Mediafıre?

              -

              Step 1: Enable unknown sources on your device

              -

              Before you can install any APK file on your Android device, you need to enable unknown sources in your settings. This will allow you to install apps from sources other than the Play Store. To do this, follow these steps:

              -
                -
              • Go to your device's Settings and tap on Security.
              • -
              • Find the option that says Unknown sources and toggle it on.
              • -
              • A warning message will pop up, telling you that installing apps from unknown sources could expose your device to security risks. Tap on OK to confirm.
              • -
              -

              You have now enabled unknown sources on your device and you can proceed to the next step.

              -

              Step 2: Download the APK file from Mediafıre

              -

              Now that you have enabled unknown sources, you can download the APK file from Mediafıre. To do this, follow these steps:

              -
                -
              • Open your browser and go to this link: 60 Seconds APK Download Mediafıre
              • -
              • You will be redirected to the Mediafıre page where you can see the file name, size, and upload date. Tap on the green Download button to start the download.
              • -
              • The download will take a few minutes, depending on your internet speed and connection. You can check the progress of the download in your notification bar or in your browser's downloads section.
              • -
              -

              You have now downloaded the APK file from Mediafıre and you can proceed to the next step.

              -

              Step 3: Locate and install the APK file on your device

              -

              Now that you have downloaded the APK file, you need to locate and install it on your device. To do this, follow these steps:

              -
                -
              • Go to your device's File Manager and find the folder where you saved the APK file. It is usually in the Downloads folder, unless you changed it.
              • -
              • Tap on the APK file to open it. You will see a screen that shows the app's name, icon, permissions, and other information. Tap on Install to begin the installation.
              • -
              • The installation will take a few seconds, depending on your device's performance and storage space. You will see a message that says App installed when it is done. Tap on Open to launch the app or Done to exit.
              • -
              -

              You have now installed 60 Seconds APK on your device and you can start playing the game.

              -

              How to Play 60 Seconds APK on Android?

              -

              A quick overview of the gameplay and the modes

              -

              The gameplay of 60 Seconds APK is simple but challenging. You have to scavenge, prepare, and survive in a fallout shelter with your family and whatever supplies you can grab before the nuke hits. You have to make tough choices, ration your food and water, deal with random events, and hope for rescue. The game has four different modes that offer different levels of difficulty and replay value:

              - - - - - - -
              ModeDescription
              Atomic DrillThis is a tutorial mode that teaches you how to scavenge and survive. You can practice your scavenging skills in a safe environment without any time limit or consequences. You can also customize your house layout, items, and family members.
              ApocalypseThis is the main mode that lets you play the full game with both phases. You have 60 seconds to grab as many items and family members as possible and rush to your fallout shelter before the bomb explodes. Then, you have to survive in your shelter for as long as possible until rescue arrives or you die.
              SurvivalThis is a mode that lets you skip the scavenge phase and start with a predefined set of items in your shelter. You can choose from different scenarios that vary in difficulty and challenge. You have to survive in your shelter for as long as possible until rescue arrives or you die.
              ScavengeThis is a mode that lets you skip the survival phase and just focus on grabbing as much as you can in 60 seconds. You can choose from different house layouts that vary in size and complexity. You have to grab as many items and family members as possible before the bomb explodes.
              -

              Some tips and tricks to survive the nuclear apocalypse

              Surviving the nuclear apocalypse is not easy, but it is not impossible either. Here are some tips and tricks that can help you make it through the game and achieve the best possible outcome:

              -
                -
              • Choose wisely what to grab: You only have 60 seconds to grab as many items and family members as possible, so you have to be smart and efficient. You should prioritize grabbing the essentials, such as food, water, a radio, a flashlight, a map, a first aid kit, and a gas mask. You should also try to grab some useful items, such as a gun, an axe, a bug spray, a padlock, a suitcase, and a Boy Scout handbook. You should also try to grab all your family members, or at least the ones you like. Remember that each item and person takes up space in your shelter, so you have to balance quantity and quality.
              • -
              • Manage your resources carefully: Once you are in your shelter, you have to ration your food and water, and use your items wisely. You should feed and hydrate your family members every few days, but not too often or too little. You should also use your items when they are needed, such as using the radio to communicate with the outside world, using the flashlight to explore the shelter, using the map to plan your expeditions, using the first aid kit to heal your wounds, and using the gas mask to protect yourself from radiation. You should also keep track of your items' durability and replace them when they break.
              • -
              • Make tough decisions: During your survival phase, you will encounter random events that will test your morals, ethics, and sanity. You will have to make tough decisions that will affect your survival chances and your relationships with your family members. You will have to decide whether to answer the phone, open the door, go on expeditions, trade with strangers, fight off raiders, join factions, and more. You will also have to deal with the consequences of your actions, such as gaining or losing items, health, sanity, morale, and trust. You should try to make the best decisions for yourself and your family, but be prepared for some surprises and twists along the way.
              • -
              -

              Conclusion

              -

              A summary of the main points and a call to action

              -

              60 Seconds APK is a fun and challenging game that will keep you on your toes and make you laugh and cry at the same time. It is a game that will test your survival skills in a post-apocalyptic world where every second counts. You can download and install 60 Seconds APK on your Android device from Mediafıre by following the steps we have shown you in this article. You can also play the game in different modes and experience different endings depending on your choices and actions. If you are looking for a game that will make you think fast and act smart, you should give 60 Seconds APK a try. You won't regret it!

              -

              FAQs

              -

              Is 60 Seconds APK safe to download and install?

              -

              Yes, 60 Seconds APK is safe to download and install on your Android device. The APK file we have provided is from Mediafıre, one of the most trusted and secure file hosting sites. You can also scan the file with your antivirus software before installing it to make sure it's safe.

              -

              What are the system requirements for 60 Seconds APK?

              -

              The system requirements for 60 Seconds APK are not very high. You need an Android device that runs on Android 4.1 or higher, has at least 1 GB of RAM, and has at least 200 MB of free storage space.

              -

              How to update 60 Seconds APK on Android?

              -

              To update 60 Seconds APK on Android, you need to download the latest version of the APK file from Mediafıre and install it over the existing one. You don't need to uninstall the previous version or lose your progress.

              -

              How to uninstall 60 Seconds APK on Android?

              -

              To uninstall 60 Seconds APK on Android, you need to go to your device's Settings and tap on Apps or Applications. Find 60 Seconds APK in the list of apps and tap on it. Tap on Uninstall and confirm your choice.

              -

              Where can I find more information about 60 Seconds APK?

              -

              You can find more information about 60 Seconds APK on its official website: https://robotgentleman.com/60seconds/. There you can find more details about the game's features, modes, endings, updates, news, reviews, and more.

              197e85843d
              -
              -
              \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Bling Bling APK The Ultimate Sparkle and Glitter Camera App.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Bling Bling APK The Ultimate Sparkle and Glitter Camera App.md deleted file mode 100644 index 50a83e018e2d5a8630203bd55b7784707262dcfa..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Bling Bling APK The Ultimate Sparkle and Glitter Camera App.md +++ /dev/null @@ -1,86 +0,0 @@ - -

              Download APK Bling Bling: A Guide to Enjoy Live Streaming with Fun and Profit

              -

              If you are looking for an entertainment app that offers live streaming, interactive mini-games, and virtual gifts, then you might want to try APK Bling Bling. This app is one of the most popular live streaming apps in Indonesia, where you can watch and interact with various hosts from all over the world. In this article, we will show you what APK Bling Bling is, how to download and install it, and how to use it.

              -

              What is APK Bling Bling?

              -

              APK Bling Bling is a mobile application that provides live streaming services with a focus on making money through interactive mini-games and virtual gifts. The app allows users to watch live broadcasts by interesting hosts from different countries and regions, and interact with them in real-time through features such as sending gifts and direct messaging. The app also has a social aspect, where users can chat and make friends with other users who share similar interests.

              -

              download apk bling bling


              Download Zip ✒ ✒ ✒ https://gohhs.com/2uPsUh



              -

              Features of APK Bling Bling

              -

              APK Bling Bling has many features that make it stand out from other live streaming apps. Here are some of them:

              -

              Live streaming with various hosts

              -

              The app has a large number of hosts who stream their lives on the app. You can find hosts who are singers, dancers, comedians, gamers, models, and more. You can also filter the hosts by categories such as hot, new, nearby, or recommended. You can watch the live streams for free, or pay coins to enter private rooms or join VIP clubs.

              -

              Interactive mini-games and virtual gifts

              -

              The app also has many mini-games that you can play with the hosts or other users. Some of the games are dice, rock-paper-scissors, lucky draw, and more. You can also send virtual gifts to the hosts or other users to show your appreciation or affection. The gifts range from flowers, chocolates, cars, to diamonds. The more gifts you send or receive, the higher your level and ranking on the app.

              -

              Chatting and messaging with other users

              -

              The app also allows you to chat and message with other users who are watching the same live stream as you. You can comment on the live stream, send emojis or stickers, or invite other users to join a private chat. You can also message other users privately through the app's inbox feature. You can make new friends or find potential partners on the app.

              -

              How to Download and Install APK Bling Bling?

              -

              If you want to download and install APK Bling Bling on your device, you need to follow these steps:

              -

              Download APK Bling Bling from a trusted source

              -

              The first step is to download the APK file of APK Bling Bling from a trusted source. You can find many websites that offer the download link for the app, such as [Bling2 MOD APK v2.11.8 Unlock All Room 18+ - Dafunda Download](^3^) or [Bling2 mod apk](^4^). Make sure you download the latest version of the app that is compatible with your device.

              -

              Enable unknown sources on your device

              -

              The next step is to enable unknown sources on your device. This is because APK files are not from the official Google Play Store, so you need to allow your device Bling Bling and launch it -

              The final step is to install APK Bling Bling and launch it. To do this, you need to locate the downloaded APK file on your device, then tap on it to start the installation process. You may need to grant some permissions to the app, such as access to your camera, microphone, storage, and location. After the installation is complete, you can launch the app and enjoy its features.

              -

              How to Use APK Bling Bling?

              -

              Now that you have downloaded and installed APK Bling Bling, you may wonder how to use it. Here are some tips on how to use the app:

              -

              download bling bling live stream apk
              -download bling bling live mod apk
              -download bling bling live apk terbaru
              -download bling bling live apk indonesia
              -download aplikasi bling bling live
              -download app bling bling live
              -download bling2.apk for android
              -download bling2.apk latest version
              -download bling2.apk free
              -download bling2.apk mod
              -download apk live streaming bling bling
              -download apk live show bling bling
              -download apk live video bling bling
              -download apk live chat bling bling
              -download apk live broadcast bling bling
              -cara download apk bling bling live
              -link download apk bling bling live
              -situs download apk bling bling live
              -website download apk bling bling live
              -tutorial download apk bling bling live
              -unduh apk bling bling live gratis
              -unduh apk bling bling live mod
              -unduh apk bling bling live terbaru
              -unduh aplikasi bling bling live streaming
              -unduh app bling2.apk android
              -install apk bling2.apk on android
              -install apk live streaming app Bling Bling Live on android
              -install aplikasi Bling Bling Live di android
              -install app Bling Bling Live di hp android
              -install Bling Bling Live mod apk di android
              -update apk Bling Bling Live to latest version
              -update aplikasi Bling Bling Live ke versi terbaru
              -update app Bling Bling Live secara otomatis
              -update Bling Bling Live mod apk tanpa root
              -review apk Bling Bling Live streaming app
              -review aplikasi Bling Bling Live show app
              -review app Bling Bling Live video app
              -review Bling Bling Live mod apk features and benefits
              -tips and tricks for using apk Blin

              -

              Create an account and log in

              -

              The first thing you need to do is to create an account and log in to the app. You can use your phone number, email address, or social media accounts to sign up for the app. You will also need to create a username and password, and fill in some basic information about yourself, such as your gender, age, and location. You can also upload a profile picture and a cover photo to make your profile more attractive.

              -

              Browse and watch live broadcasts

              -

              The next thing you can do is to browse and watch live broadcasts by various hosts on the app. You can swipe left or right to switch between different categories of hosts, such as hot, new, nearby, or recommended. You can also use the search function to find hosts by their name or ID. You can tap on any host's profile picture to enter their live stream and watch their show.

              -

              Interact with hosts and other users

              -

              The app also allows you to interact with hosts and other users who are watching the same live stream as you. You can comment on the live stream, send emojis or stickers, or invite other users to join a private chat. You can also send virtual gifts to the hosts or other users to show your appreciation or affection. The gifts range from flowers, chocolates, cars, to diamonds. The more gifts you send or receive, the higher your level and ranking on the app.

              -

              Earn and spend coins on the app

              -

              The app also has a currency system that uses coins. You can earn coins by watching ads, completing tasks, inviting friends, or receiving gifts from other users. You can also buy coins with real money through various payment methods. You can spend coins on the app by sending gifts to hosts or other users, entering private rooms or VIP clubs, playing mini-games, or unlocking premium features.

              -

              Conclusion

              -

              APK Bling Bling is a fun and profitable live streaming app that offers various features such as live streaming with various hosts, interactive mini-games and virtual gifts, chatting and messaging with other users, and earning and spending coins on the app. If you want to download and install APK Bling Bling on your device, you need to follow these steps: download APK Bling Bling from a trusted source, enable unknown sources on your device, install APK Bling Bling and launch it. Then you can create an account and log in, browse and watch live broadcasts, interact with hosts and other users, and earn and spend coins on the app. We hope this article has helped you understand how to download and use APK Bling Bling.

              -

              FAQs

              -

              Here are some frequently asked questions about APK Bling Bling:

              -

              Is APK Bling Bling safe?

              -

              APK Bling Bling is generally safe to use as long as you download it from a trusted source and enable unknown sources on your device. However, you should be careful about what information you share on the app, such as your personal details, photos, or videos. You should also avoid clicking on suspicious links or downloading files from unknown sources that may contain malware or viruses.

              -

              Is APK Bling Bling free?

              -

              APK Bling Bling is free to download and use. However, some features of the app may require coins that you can either earn or buy with real money. For example, you may need coins to send gifts to hosts or other users, enter private rooms or VIP clubs, play mini-games, or unlock premium features.

              -

              How do I update APK Bling Bling?

              -

              To update APK Bling Bling, you need to download the latest version of the APK file from a trusted source and install it over the existing app. You do not need to uninstall the previous version of the app before installing the new one. However, you should back up your data before updating the app in case of any errors or glitches.

              -

              How do I delete my account on APK Bling Bling ?

              -

              To delete your account on APK Bling Bling, you need to go to your profile page and tap on the settings icon. Then you need to scroll down and tap on the delete account option. You will be asked to confirm your decision and enter your password. Once you delete your account, all your data and history will be erased from the app.

              -

              How do I contact the customer service of APK Bling Bling?

              -

              To contact the customer service of APK Bling Bling, you can use the feedback feature on the app. You can find it on the settings page, where you can tap on the feedback option and write your message or attach a screenshot. You can also email the customer service at [support@bling2.com] or call them at +62-21-12345678.

              401be4b1e0
              -
              -
              \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Font Hero Sandwich and Spice Up Your Web Sites and Apps.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Font Hero Sandwich and Spice Up Your Web Sites and Apps.md deleted file mode 100644 index 341f125fbfffcaf5dd7a7f535339ba64a1d48959..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Font Hero Sandwich and Spice Up Your Web Sites and Apps.md +++ /dev/null @@ -1,149 +0,0 @@ -
              -

              Download Font Hero Sandwich: A Super Font for Super Projects

              -

              If you are looking for a font that can make your projects stand out from the crowd, you might want to check out Font Hero Sandwich. This font is not only fun and friendly, but also versatile and powerful. It can be used for a variety of purposes, such as comics, games, apps, logos, headlines, posters, and more. In this article, we will tell you everything you need to know about Font Hero Sandwich, including what it is, where to download it, how to customize it, and how to optimize it for SEO. Let's get started!

              -

              What is Font Hero Sandwich?

              -

              Font Hero Sandwich is a hand display font family that was designed by John Roshell and published by Comicraft, a leading foundry that specializes in comic book fonts. As the name suggests, Font Hero Sandwich is inspired by the concept of team-ups in comic books, where different heroes join forces to fight against a common enemy. The font has a playful and dynamic look that evokes the sense of action and adventure in comic book stories.

              -

              download font hero sandwich


              DOWNLOAD ✒ ✒ ✒ https://gohhs.com/2uPrIH



              -

              A brief history of Font Hero Sandwich

              -

              Font Hero Sandwich was originally released in 2015 as a single style font that had four different numbering options. It was created as a tribute to the classic comic book covers that featured team-ups between different characters. The font quickly became popular among video game and app developers, who appreciated its easy readability and friendly demeanor. In 2022, Comicraft decided to upgrade Font Hero Sandwich to a pro version that added nine weights from Thin to Heavy, with matching italics, plus a variable font option that allowed users to adjust the weight and slant of the font according to their preferences. The pro version also included support for 222 languages, including Cyrillics.

              -

              The features and benefits of Font Hero Sandwich

              -

              Font Hero Sandwich has many features and benefits that make it a great choice for your projects. Here are some of them:

              -
                -
              • It has a unique and eye-catching design that can capture the attention of your audience.
              • -
              • It has a wide range of styles and weights that can suit different moods and tones.
              • -
              • It has a variable font option that can give you more flexibility and control over the appearance of the font.
              • -
              • It has a high level of language support and glyphs that can cater to different markets and audiences.
              • -
              • It has a comic book vibe that can add some fun and personality to your projects.
              • -
              -

              How to use Font Hero Sandwich in your projects

              -

              Font Hero Sandwich can be used for various types of projects, such as:

              -
                -
              • Comics: You can use Font Hero Sandwich to create comic book covers, titles, captions, speech bubbles, sound effects, and more.
              • -
              • Games: You can use Font Hero Sandwich to create game logos, menus, buttons, icons, scores, achievements, and more.
              • -
              • Apps: You can use Font Hero Sandwich to create app logos, banners, headers, labels, notifications, and more.
              • -
              • Logos: You can use Font Hero Sandwich to create logos for your brand, product, service, or event. You can also use it to create slogans, taglines, or catchphrases.
              • -
              • Headlines: You can use Font Hero Sandwich to create headlines for your blog posts, articles, newsletters, or social media posts. You can also use it to create subheadings, bullet points, or quotes.
              • -
              • Posters: You can use Font Hero Sandwich to create posters for your campaigns, promotions, or events. You can also use it to create flyers, brochures, or banners.
              • -
              -

              When using Font Hero Sandwich in your projects, you should keep in mind some tips and tricks to make the most of it. Here are some of them:

              -
                -
              • Choose the right style and weight for your project. For example, you can use the Thin or Light styles for a subtle and elegant look, or the Bold or Heavy styles for a strong and impactful look.
              • -
              • Use the variable font option to fine-tune the weight and slant of the font. For example, you can use a slight slant to add some dynamism and movement to the font, or a high slant to create a dramatic and expressive effect.
              • -
              • Use the language support and glyphs to reach a wider audience. For example, you can use the Cyrillics to target the Russian market, or the accented characters to target the European market.
              • -
              • Use the comic book vibe to add some fun and personality to your project. For example, you can use the font to create humorous or witty messages, or to create a contrast with a serious or formal tone.
              • -
              -

              Where to download Font Hero Sandwich?

              -

              If you are interested in downloading Font Hero Sandwich, you have several options to choose from. Here are some of them:

              -

              The official website of Comicraft

              -

              The best and most reliable way to download Font Hero Sandwich is to visit the official website of Comicraft, which is https://www.comicbookfonts.com/. Here, you can find all the information and details about Font Hero Sandwich, such as its features, benefits, styles, weights, variable font option, language support, glyphs, and more. You can also see some examples and previews of how Font Hero Sandwich looks in different projects. You can also download a free trial version of Font Hero Sandwich that you can use for personal or non-commercial purposes. If you want to purchase the full version of Font Hero Sandwich, you can choose from different license options depending on your needs and budget. The prices range from $19 for a single style license to $199 for a complete family license.

              -

              Other online sources of Font Hero Sandwich

              -

              If you want to download Font Hero Sandwich from other online sources, you should be careful and cautious. There are many websites that claim to offer free or cheap downloads of Font Hero Sandwich, but they may not be trustworthy or legal. Some of these websites may provide low-quality or incomplete versions of Font Hero Sandwich that may not work properly or may contain errors or viruses. Some of these websites may also violate the intellectual property rights of Comicraft and may expose you to legal risks or penalties. Therefore, we recommend that you always download Font Hero Sandwich from the official website of Comicraft or from other reputable and authorized sources.

              -

              download hero sandwich font family by comicraft
              -download hero sandwich pro font family by comicraft
              -download hero sandwich combos font by comicraft
              -download hero sandwich meat font by comicraft
              -download hero sandwich cheese font by comicraft
              -download hero sandwich lettuce font by comicraft
              -download hero sandwich tomato font by comicraft
              -download hero sandwich onion font by comicraft
              -download hero sandwich variable font by comicraft
              -download hero sandwich thin font by comicraft
              -download hero sandwich extra light font by comicraft
              -download hero sandwich light font by comicraft
              -download hero sandwich regular font by comicraft
              -download hero sandwich medium font by comicraft
              -download hero sandwich semi bold font by comicraft
              -download hero sandwich bold font by comicraft
              -download hero sandwich extra bold font by comicraft
              -download hero sandwich heavy font by comicraft
              -download hero sandwich italic font by comicraft
              -download hero sandwich thin italic font by comicraft
              -download hero sandwich extra light italic font by comicraft
              -download hero sandwich light italic font by comicraft
              -download hero sandwich regular italic font by comicraft
              -download hero sandwich medium italic font by comicraft
              -download hero sandwich semi bold italic font by comicraft
              -download hero sandwich bold italic font by comicraft
              -download hero sandwich extra bold italic font by comicraft
              -download hero sandwich heavy italic font by comicraft
              -how to download and install hero sandwich fonts on windows or mac
              -where to buy and download licensed hero sandwich fonts online
              -how to use hero sandwich fonts for comics and graphic design projects
              -how to create a layered effect with hero sandwich fonts in photoshop or illustrator
              -how to customize and edit hero sandwich fonts with glyphs and alternates
              -how to access and apply different numbering options with hero sandwich fonts
              -how to preview and test drive hero sandwich fonts before downloading or purchasing them
              -how to get a worry-free license for using hero sandwich fonts for desktop, web, app, digital ad, or ebook purposes
              -how to compare and contrast different styles and weights of hero sandwich fonts
              -how to get support and updates for using hero sandwich fonts from the foundry or the vendor
              -how to find similar fonts to hero sandwich fonts from other foundries or vendors
              -how to get discounts and deals for buying or downloading multiple styles or packages of hero sandwich fonts
              -what are the features and benefits of using hero sandwich fonts for your creative needs
              -what are the reviews and ratings of using hero sandwich fonts from other users or customers
              -what are the best practices and tips for using hero sandwich fonts effectively and efficiently
              -what are the technical specifications and requirements for using hero sandwich fonts on various platforms or devices
              -what are the history and background of the development and design of hero sandwich fonts
              -what are the inspirations and influences of the creation and style of hero sandwich fonts
              -what are the advantages and disadvantages of using variable fonts like hero sandwich variable font
              -what are the differences and similarities between the original and the pro version of hero sandwich fonts

              -

              How to install Font Hero Sandwich on your device

              -

              Once you have downloaded Font Hero Sandwich from a reliable source, you need to install it on your device before you can use it in your projects. The installation process may vary depending on your device and operating system, but here are some general steps that you can follow:

              -
                -
              1. Locate the downloaded file of Font Hero Sandwich on your device. It should be in a ZIP format that contains the font files and other documents.
              2. -
              3. Extract the ZIP file to a folder on your device. You should see the font files with extensions such as .otf (OpenType), .ttf (TrueType), .woff (Web Open Font Format), or .woff2 (Web Open Font Format 2).
              4. -
              5. Double-click on the font file that matches your device and operating system. For example, if you are using a Windows device, you should double-click on the .otf or .ttf file. If you are using a Mac device, you should double-click on the .otf file.
              6. -
              7. A window will pop up that shows a preview of Font Hero Sandwich and an option to install it. Click on the install button and wait for a few seconds until the installation is complete.
              8. -
              9. You can now use Font Hero Sandwich in your projects. You can find it in your font menu under the name "HeroSandwich".
              10. -
              -

              How to customize Font Hero Sandwich?How to customize Font Hero Sandwich?

              -

              One of the best things about Font Hero Sandwich is that you can customize it to fit your needs and preferences. You can change the style, weight, slant, size, color, spacing, alignment, and more of Font Hero Sandwich to create different effects and impressions. Here are some ways that you can customize Font Hero Sandwich:

              -

              The different styles and weights of Font Hero Sandwich

              -

              Font Hero Sandwich has 10 styles and 10 weights that you can choose from. The styles are Regular, Italic, Outline, Outline Italic, Inline, Inline Italic, Fill, Fill Italic, Shadow, and Shadow Italic. The weights are Thin, Extra Light, Light, Regular, Medium, Semi Bold, Bold, Extra Bold, Black, and Heavy. You can mix and match the styles and weights to create different combinations and contrasts. For example, you can use the Outline style with the Heavy weight to create a bold and striking look, or you can use the Fill style with the Thin weight to create a subtle and elegant look.

              -

              The variable font option of Font Hero Sandwich

              -

              Font Hero Sandwich also has a variable font option that lets you adjust the weight and slant of the font using sliders. This option gives you more flexibility and control over the appearance of the font. You can create custom weights and slants that suit your project. For example, you can use a low weight and a high slant to create a sleek and futuristic look, or you can use a high weight and a low slant to create a solid and classic look.

              -

              The language support and glyphs of Font Hero Sandwich

              -

              Font Hero Sandwich supports 222 languages, including Cyrillics. This means that you can use Font Hero Sandwich for different markets and audiences around the world. You can also access different glyphs of Font Hero Sandwich, such as ligatures, alternates, fractions, ordinals, superscripts, subscripts, and more. These glyphs can add some variety and flair to your text. For example, you can use the ligature "ff" to create a smooth connection between two letters, or you can use the alternate "A" to create a star-shaped letter.

              -

              How to optimize Font Hero Sandwich for SEO?

              -

              If you want to use Font Hero Sandwich for your website or blog, you need to optimize it for SEO. SEO stands for search engine optimization, which is the process of improving your website's visibility and ranking on search engines like Google or Bing. Choosing the right font for your website is an important part of SEO, as it can affect your website's speed, performance, readability, usability, and user experience. Here are some tips on how to optimize Font Hero Sandwich for SEO:

              -

              The importance of choosing the right font for your website

              -

              The font that you choose for your website can have a big impact on how your website is perceived by your visitors and by search engines. The font can convey your brand identity, message, tone, and personality to your visitors. It can also affect how easy or difficult it is for your visitors to read and understand your content. Moreover, the font can influence your website's speed and performance, which are crucial factors for SEO. A slow or poorly performing website can lower your ranking on search engines and drive away your visitors. Therefore, you should choose a font that is appropriate, attractive, readable, and fast for your website.

              -

              The best practices for using Font Hero Sandwich on the web

              -

              Font Hero Sandwich is a web-friendly font that can be used on the web without any problems. However, there are some best practices that you should follow to ensure that Font Hero Sandwich works well on your website. Here are some of them:

              -
                -
              • Use the web font formats of Font Hero Sandwich, such as .woff or .woff2, instead of the desktop font formats, such as .otf or .ttf. The web font formats are optimized for the web and have smaller file sizes and faster loading times.
              • -
              • Use a font hosting service or a content delivery network (CDN) to host Font Hero Sandwich on your website. This can improve your website's speed and performance by reducing the server load and bandwidth usage. Some examples of font hosting services or CDNs are Google Fonts, Adobe Fonts, Cloudflare, or Font Awesome.
              • -
              • Use the @font-face rule in your CSS code to specify Font Hero Sandwich as the font family for your website. This can ensure that Font Hero Sandwich is displayed correctly and consistently on different browsers and devices. You can also use the @font-face rule to define the different styles and weights of Font Hero Sandwich that you want to use on your website.
              • -
              • Use the font-display property in your CSS code to control how Font Hero Sandwich is rendered on your website. This can improve your website's user experience by preventing invisible or fallback text while Font Hero Sandwich is loading. You can choose from different values for the font-display property, such as auto, swap, fallback, optional, or block.
              • -
              • Use the font-size property in your CSS code to adjust the size of Font Hero Sandwich on your website. You should use relative units, such as ems, rems, or percentages, instead of absolute units, such as pixels or points. This can make your website more responsive and adaptable to different screen sizes and resolutions.
              • -
              • Use the font-weight property in your CSS code to adjust the weight of Font Hero Sandwich on your website. You should use numeric values, such as 100, 200, 300, and so on, instead of named values, such as normal, bold, or bolder. This can make your website more compatible with the variable font option of Font Hero Sandwich.
              • -
              -

              The tools and resources for testing and improving Font Hero Sandwich performance

              -

              If you want to test and improve Font Hero Sandwich performance on your website, you can use some tools and resources that can help you with that. Here are some of them:

              -
                -
              • Google PageSpeed Insights: This is a tool that analyzes your website's speed and performance and gives you suggestions on how to improve them. You can use this tool to check how fast Font Hero Sandwich loads on your website and how it affects your website's score.
              • -
              • Google Lighthouse: This is a tool that audits your website's quality and accessibility and gives you a report on how well it meets certain standards and best practices. You can use this tool to check how Font Hero Sandwich affects your website's SEO, usability, accessibility, and more.
              • -
              • Google Web Vitals: This is a set of metrics that measure your website's user experience and satisfaction. You can use this tool to check how Font Hero Sandwich impacts your website's loading time, interactivity, stability, and more.
              • -
              • Font Squirrel: This is a website that offers free web fonts and web font generators. You can use this website to download or create web font formats of Font Hero Sandwich that are optimized for the web.
              • -
              • Font Pair: This is a website that helps you find the best font combinations for your website. You can use this website to find fonts that match well with Font Hero Sandwich and create a harmonious and balanced look for your website.
              • -
              -

              Conclusion

              -

              Summary of the main points

              -

              In conclusion, Font Hero Sandwich is a super font for super projects. It is a hand display font family that was designed by John Roshell and published by Comicraft. It has a playful and dynamic look that is inspired by the concept of team-ups in comic books. It has 10 styles and 10 weights that you can choose from, plus a variable font option that lets you adjust the weight and slant of the font. It supports 222 languages, including Cyrillic s. It can be used for various types of projects, such as comics, games, apps, logos, headlines, posters, and more. You can download Font Hero Sandwich from the official website of Comicraft or from other reputable and authorized sources. You can also customize Font Hero Sandwich to fit your needs and preferences. You can change the style, weight, slant, size, color, spacing, alignment, and more of Font Hero Sandwich to create different effects and impressions. You can also optimize Font Hero Sandwich for SEO by following some best practices and using some tools and resources that can help you improve your website's speed, performance, readability, usability, and user experience.

              -

              Call to action

              -

              If you are ready to download Font Hero Sandwich and use it in your super projects, you can visit the official website of Comicraft and choose the license option that suits you best. You can also check out some other amazing fonts that Comicraft offers, such as BlamBot, Spills, Zzzap, or Battle Cry. Comicraft has a huge collection of comic book fonts that can make your projects more fun and exciting. Don't miss this opportunity to get Font Hero Sandwich and other comic book fonts from Comicraft. Download them today and unleash your creativity!

              -

              FAQs

              -

              Here are some frequently asked questions about Font Hero Sandwich:

              -
                -
              1. What is the difference between Font Hero Sandwich and Font Hero Sandwich Pro?
              2. -

                Font Hero Sandwich is the original version of the font that was released in 2015. It has a single style with four numbering options. Font Hero Sandwich Pro is the upgraded version of the font that was released in 2022. It has 10 styles and 10 weights, plus a variable font option. It also has more language support and glyphs than Font Hero Sandwich.

                -
              3. How many fonts are included in Font Hero Sandwich Pro?
              4. -

                Font Hero Sandwich Pro includes 20 fonts in total. There are 10 styles and 10 weights for each style. The styles are Regular, Italic, Outline, Outline Italic, Inline, Inline Italic, Fill, Fill Italic, Shadow, and Shadow Italic. The weights are Thin, Extra Light, Light, Regular, Medium, Semi Bold, Bold, Extra Bold, Black, and Heavy.

                -
              5. Can I use Font Hero Sandwich for free?
              6. -

                You can use Font Hero Sandwich for free for personal or non-commercial purposes by downloading the free trial version from the official website of Comicraft. However, if you want to use Font Hero Sandwich for commercial purposes or for more than one project or device, you need to purchase a license from Comicraft.

                -
              7. How much does Font Hero Sandwich cost?
              8. -

                The price of Font Hero Sandwich depends on the license option that you choose. The license options are Single Style License ($19), Family License ($99), Complete Family License ($199), or Variable Family License ($199). You can also get discounts or special offers if you subscribe to Comicraft's newsletter or follow them on social media.

                -
              9. How can I contact Comicraft if I have any questions or issues with Font Hero Sandwich?
              10. -

                You can contact Comicraft by sending an email to info@comicbookfonts.com or by filling out the contact form on their website. You can also follow them on Facebook, Twitter, Instagram, or YouTube to get updates and news about their fonts.

                -

              197e85843d
              -
              -
              \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Free Love Yourself Ringtone for Your Phone - Best Tones Online.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Free Love Yourself Ringtone for Your Phone - Best Tones Online.md deleted file mode 100644 index 3b992309e91327152d136afb8257d384196aa29d..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Free Love Yourself Ringtone for Your Phone - Best Tones Online.md +++ /dev/null @@ -1,182 +0,0 @@ - -

              How to Download Love Yourself Ringtone Tone for Your Phone

              -

              If you are looking for a way to personalize your phone and express your love for yourself, you might want to try downloading a love yourself ringtone tone. A love yourself ringtone tone is a short audio clip that plays when someone calls you or when you receive a notification. It can be a song, a quote, or a sound that reminds you of how awesome you are.

              -

              love yourself ringtone tone download


              Download File ☆☆☆ https://gohhs.com/2uPo6z



              -

              In this article, we will show you how to download love yourself ringtone tone for your phone, as well as how to create your own love yourself ringtone tone using a ringtone maker app. By the end of this article, you will be able to enjoy your favorite love yourself ringtone tone every time your phone rings.

              -

              What is Love Yourself Ringtone Tone?

              -

              A love yourself ringtone tone is a type of ringtone that is designed to boost your self-esteem and confidence. It can be anything that makes you feel good about yourself, such as:

              -
                -
              • A song that inspires you or makes you happy
              • -
              • A quote that motivates you or makes you laugh
              • -
              • A sound that relaxes you or makes you smile
              • -
              -

              For example, some popular love yourself ringtone tones are:

              -
                -
              • "Love Yourself" by Justin Bieber
              • -
              • "You Are The Best Thing" by Ray LaMontagne
              • -
              • "I'm Every Woman" by Whitney Houston
              • -
              • "You're Beautiful" by James Blunt
              • -
              • "Don't Stop Believin'" by Journey
              • -
              • "You Are My Sunshine" by Johnny Cash
              • -
              • "You Gotta Be" by Des'ree
              • -
              • "Firework" by Katy Perry
              • -
              • "Roar" by Katy Perry
              • -
              • "I'm A Believer" by The Monkees
              • -
              -

              Why You Should Use Love Yourself Ringtone Tone

              -

              Using a love yourself ringtone tone can have many benefits for your mental and emotional well-being. Here are some reasons why you should use a love yourself ringtone tone:

              -

              love yourself justin bieber ringtone download
              -love yourself marimba remix ringtone download
              -love yourself parody ringtone download
              -love yourself violin ringtone download
              -love yourself bts ringtone download
              -love yourself highlight reel ringtone download
              -love yourself anpanman ringtone download
              -love yourself let me love you ringtone download
              -love yourself aggressive love ringtone download
              -love yourself fakelove demo ringtone download
              -love yourself euphoria ringtone download
              -love yourself airspoken ringtone download
              -love yourself company ringtone download
              -love yourself baby ringtone download
              -love yourself justin ringtone download
              -love yourself zedge ringtone download
              -love yourself free ringtone download
              -love yourself premium ringtone download
              -love yourself mp3 ringtone download
              -love yourself m4r ringtone download
              -love yourself instrumental ringtone download
              -love yourself acoustic ringtone download
              -love yourself edm ringtone download
              -love yourself remix ringtone download
              -love yourself cover ringtone download
              -love yourself karaoke ringtone download
              -love yourself lyrics ringtone download
              -love yourself song ringtone download
              -love yourself music ringtone download
              -love yourself video ringtone download
              -love yourself youtube ringtone download
              -love yourself tiktok ringtone download
              -love yourself spotify ringtone download
              -love yourself apple music ringtone download
              -love yourself amazon music ringtone download
              -love yourself soundcloud ringtone download
              -love yourself audiomack ringtone download
              -love yourself deezer ringtone download
              -love yourself tidal ringtone download
              -love yourself pandora ringtone download
              -love yourself iheartradio ringtone download
              -love yourself tunein radio ringtone download
              -love yourself shazam ringtone download
              -love yourself genius lyrics ringtone download
              -love yourself azlyrics ringtone download
              -love yourself metrolyrics ringtone download
              -love yourself musixmatch lyrics ringtone download

              -
                -
              • It can make you feel happier and more positive every time your phone rings.
              • -
              • It can remind you of your strengths and achievements every time someone calls you.
              • -
              • It can motivate you to pursue your goals and dreams every time you receive a notification.
              • -
              • It can help you cope with stress and challenges every time you hear it.
              • -
              • It can make you more attractive and charismatic every time you answer your phone.
              • -
              -

              How to Find Love Yourself Ringtone Tone Online

              -

              If you want to download a love yourself ringtone tone for your phone, the easiest way is to find it online. There are many websites that offer free or paid ringtones for various devices and platforms. You can browse through their collections and choose the one that suits your taste and mood.

              -

              Best Websites to Download Love Yourself Ringtone Tone

              -

              Here are some of the best websites to download love yourself ringtone tone:

              - - - - -
              Website NameDescriptionURL
              ZedgeA popular website that offers a variety of ringtones, wallpapers, and themes for your phone. You can search for love yourself ringtones by keyword or browse by category. You can also upload your own ringtones and share them with other users.[Zedge](^1^)
              iRingProA website that provides professional and minimalist ringtones that won't annoy you or others. You can choose from three collections of ringtones: Origin, Zen, and Tek. Each collection has a different theme and style, but all of them are calm and elegant.[iRingPro](^2^)
              MelofaniaA website that allows you to create your own ringtones from any song or audio file. You can upload your own file or use the online search tool to find a song you like. You can then cut and edit the audio file to make your own love yourself ringtone tone.[Melofania](^3^)
              -

              How to Preview and Download Love Yourself Ringtone Tone

              -

              Once you have found a website that offers love yourself ringtone tone, you can preview and download it to your phone. Here are the general steps to do so:

              -
                -
              1. Click on the ringtone you want to download and listen to it.
              2. -
              3. If you like it, click on the download button or link.
              4. -
              5. Choose the format and quality of the ringtone file. Some websites may offer different formats for different devices, such as MP3, M4R, OGG, etc.
              6. -
              7. Save the ringtone file to your computer or phone.
              8. -
              9. Transfer the ringtone file to your phone if you downloaded it to your computer. You can use a USB cable, Bluetooth, Wi-Fi, or cloud service to do so.
              10. -
              -

              How to Set Love Yourself Ringtone Tone as Your Default Ringtone

              -

              After you have downloaded a love yourself ringtone tone to your phone, you can set it as your default ringtone for all incoming calls and notifications. The steps may vary depending on your device and operating system, but here are some general guidelines:

              -

              For Android Devices

              -
                -
              1. Go to Settings > Sound > Phone ringtone.
              2. -
              3. Browse through your phone's storage and find the ringtone file you downloaded.
              4. -
              5. Select the ringtone file and tap OK.
              6. -
              7. You can also assign different ringtones to different contacts by going to Contacts > Edit > Ringtone.
              8. -
              -

              For iOS Devices

              -
                -
              1. Connect your iPhone to your computer and launch iTunes.
              2. -
              3. Drag and drop the ringtone file you downloaded to the Tones section in iTunes.
              4. -
              5. Sync your iPhone with iTunes and wait for the process to finish.
              6. -
              7. Go to Settings > Sounds > Ringtone on your iPhone.
              8. -
              9. Select the ringtone file you downloaded and tap Done.
              10. -
              11. You can also assign different ringtones to different contacts by going to Contacts > Edit > Ringtone.
              12. -
              -

              How to Create Your Own Love Yourself Ringtone Tone

              -

              If you want to create your own love yourself ringtone tone from scratch, you can use a ringtone maker app on your phone or computer. A ringtone maker app is a software that allows you to edit any song or audio file and turn it into a ringtone. You can cut, trim, fade, loop, mix, and add effects to your audio file until you get the perfect love yourself ringtone tone.

              -

              What You Need to Create Your Own Love Yourself Ringtone Tone

              -

              To create your own love yourself ringtone tone using a ringtone maker app, you will need:

              -
                -
              • A song or audio file that you want to use as the source material for your love yourself ringtone tone. You can use any song or audio file that you have on your phone or computer, or download one from the internet.
              • -
              • A ringtone maker app that is compatible with your device and operating system. There are many free and paid ringtone maker apps available for both Android and iOS devices, as well as Windows and Mac computers. Some examples are Ringdroid, Audiko, Ringtone Maker, GarageBand, etc.
              • -
              • A device that can play and edit audio files, such as a smartphone, tablet, laptop, or desktop computer.
              • -
              • A pair of headphones or speakers that can produce good sound quality.
              • -
              -

              How to Use a Ring tone Maker App to Create Your Own Love Yourself Ringtone Tone

              -

              Once you have a song or audio file and a ringtone maker app, you can start creating your own love yourself ringtone tone. Here are the general steps to do so:

              -

              Step 1: Choose a Song or Audio File

              -

              Launch the ringtone maker app on your device and select the song or audio file that you want to use as the source material for your love yourself ringtone tone. You can browse through your device's storage or use the app's search tool to find the file. You can also import a file from another app or from the internet.

              -

              Step 2: Cut and Edit the Audio File

              -

              Once you have selected the file, you will see a waveform of the audio on the app's interface. You can use the app's tools to cut and edit the audio file to make it fit your preferences. You can:

              -
                -
              • Drag the sliders or use the buttons to select the start and end points of the ringtone. You can also zoom in and out of the waveform to make precise adjustments.
              • -
              • Use the trim, crop, split, merge, copy, paste, undo, and redo functions to edit the audio file.
              • -
              • Use the fade in and fade out functions to make smooth transitions.
              • -
              • Use the loop function to repeat a section of the audio file.
              • -
              • Use the mixer function to add another audio file or sound effect to your ringtone.
              • -
              • Use the effects function to apply filters, equalizers, pitch, speed, volume, and other effects to your ringtone.
              • -
              -

              You can preview your ringtone at any time by tapping the play button. You can also compare your ringtone with the original file by tapping the compare button.

              -

              Step 3: Save and Export the Ringtone File

              -

              When you are satisfied with your ringtone, you can save and export it to your device. You can:

              -
                -
              • Tap the save button to save your ringtone as a project file that you can edit later.
              • -
              • Tap the export button to export your ringtone as an audio file that you can use as a ringtone. You can choose the format and quality of the file, such as MP3, M4R, OGG, etc.
              • -
              • Tap the share button to share your ringtone with others via email, message, social media, or other apps.
              • -
              -

              How to Set Your Custom Love Yourself Ringtone Tone as Your Default Ringtone

              -

              After you have created your own love yourself ringtone tone using a ringtone maker app, you can set it as your default ringtone for all incoming calls and notifications. The steps may vary depending on your device and operating system, but here are some general guidelines:

              -

              For Android Devices

              -
                -
              1. Go to Settings > Sound > Phone ringtone.
              2. -
              3. Browse through your phone's storage and find the ringtone file you created.
              4. -
              5. Select the ringtone file and tap OK.
              6. -
              7. You can also assign different ringtones to different contacts by going to Contacts > Edit > Ringtone.
              8. -
              -

              For iOS Devices

              -
                -
              1. Connect your iPhone to your computer and launch iTunes.
              2. -
              3. Drag and drop the ringtone file you created to the Tones section in iTunes.
              4. -
              5. Sync your iPhone with iTunes and wait for the process to finish.
              6. -
              7. Go to Settings > Sounds > Ringtone on your iPhone.
              8. -
              9. Select the ringtone file you created and tap Done.
              10. -
              11. You can also assign different ringtones to different contacts by going to Contacts > Edit > Ringtone.
              12. -
              -

              Conclusion

              -

              A love yourself ringtone tone is a great way to personalize your phone and express your love for yourself. It can make you feel happier, more confident, and more motivated every time your phone rings. You can download a love yourself ringtone tone from various websites or create your own using a ringtone maker app. You can then set it as your default ringtone for all incoming calls and notifications. By doing so, you will be able to enjoy your favorite love yourself ringtone tone every time your phone rings.

              -

              Frequently Asked Questions

              -
              1. What is a love yourself ringtone tone?
              2. A love yourself ringtone tone is a short audio clip that plays when someone calls you or when you receive a notification. It can be a song, a quote, or a sound that reminds you of how awesome you are.

                -
              3. Why should I use a love yourself ringtone tone?
              4. -

                Using a love yourself ringtone tone can have many benefits for your mental and emotional well-being. It can make you feel happier, more positive, more confident, more motivated, more relaxed, and more attractive every time your phone rings.

                -
              5. How can I download a love yourself ringtone tone?
              6. -

                You can download a love yourself ringtone tone from various websites that offer free or paid ringtones for different devices and platforms. You can search for love yourself ringtones by keyword or browse by category. You can then preview and download the ringtone file to your phone or computer.

                -
              7. How can I create my own love yourself ringtone tone?
              8. -

                You can create your own love yourself ringtone tone using a ringtone maker app on your phone or computer. You can upload any song or audio file that you want to use as the source material for your love yourself ringtone tone. You can then cut and edit the audio file to make it fit your preferences. You can also add effects, filters, loops, and sounds to your ringtone. You can then save and export the ringtone file to your device.

                -
              9. How can I set my love yourself ringtone tone as my default ringtone?
              10. -

                You can set your love yourself ringtone tone as your default ringtone for all incoming calls and notifications by following the steps for your device and operating system. For Android devices, you can go to Settings > Sound > Phone ringtone and select the ringtone file you downloaded or created. For iOS devices, you can sync your iPhone with iTunes and transfer the ringtone file to the Tones section in iTunes. You can then go to Settings > Sounds > Ringtone and select the ringtone file you downloaded or created.

                -

              401be4b1e0
              -
              -
              \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Video Indirimbo Z Imana Ziririmbwa na Jehovah Jireh Choir - Umwami Wacu Niwe Ushobora.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Video Indirimbo Z Imana Ziririmbwa na Jehovah Jireh Choir - Umwami Wacu Niwe Ushobora.md deleted file mode 100644 index 6683c18688c2dfa7e0242de1b489a52dd810d16d..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Video Indirimbo Z Imana Ziririmbwa na Jehovah Jireh Choir - Umwami Wacu Niwe Ushobora.md +++ /dev/null @@ -1,104 +0,0 @@ -
              -

              How to Download Video Indirimbo z Imana za Jehovah Jireh

              -

              If you are looking for some uplifting and inspiring music videos that can help you praise and worship God, you might want to check out video indirimbo z imana za Jehovah Jireh. These are beautiful songs that are sung by a talented choir from Rwanda, called Jehovah Jireh Choir. In this article, we will show you what these videos are, where to find them, how to download them, and how to enjoy them. Read on and discover how you can access these amazing videos for free.

              -

              What are Video Indirimbo z Imana za Jehovah Jireh?

              -

              The meaning and origin of the term

              -

              Video indirimbo z imana za Jehovah Jireh is a Kinyarwanda phrase that means "video songs of God of Jehovah Jireh". Kinyarwanda is the official language of Rwanda, a country in East Africa. Jehovah Jireh is one of the names of God in the Bible, which means "the Lord will provide". It is also the name of a choir that was founded in 2016 by Pastor Jean Claude Nkundimana, who is also the leader and composer of the songs. The choir consists of about 30 members who are mostly students from different universities in Rwanda. They sing in various languages, such as Kinyarwanda, English, French, Swahili, and Luganda.

              -

              download video indirimbo z imana za jehovah jireh


              DOWNLOADhttps://gohhs.com/2uPniE



              -

              The features and benefits of the videos

              -

              Video indirimbo z imana za Jehovah Jireh are music videos that feature the choir singing different songs that glorify God and express their faith. The videos are professionally recorded and edited, with high-quality sound and visuals. The songs are catchy, melodic, and harmonious, with lyrics that are based on the Bible and personal testimonies. The videos also show the choir performing in different settings, such as churches, studios, concerts, and outdoor locations. The videos are meant to inspire, encourage, and edify the viewers, as well as to share the gospel and the love of God with others.

              -

              Where to Find Video Indirimbo z Imana za Jehovah Jireh?

              -

              The official YouTube channel of Jehovah Jireh Choir

              -

              The best place to find video indirimbo z imana za Jehovah Jireh is on the official YouTube channel of Jehovah Jireh Choir. This is where they upload all their latest videos and also interact with their fans and subscribers. You can find their channel by searching for "Jehovah Jireh Choir" on YouTube or by clicking on this link. On their channel, you can watch over 50 videos that have been viewed by millions of people around the world. You can also subscribe to their channel to get notified whenever they post new videos.

              -

              Other online platforms and sources

              -

              Besides YouTube, you can also find video indirimbo z imana za Jehovah Jireh on other online platforms and sources. For example, you can listen to their songs on Shazam, a music discovery app that can identify any song playing around you. You can also watch some of their videos on other YouTube channels that have featured them, such as NKUNDAGOSPEL, a channel that promotes gospel music from Rwanda. Additionally, you can search for their videos on Google or any other search engine by typing keywords like "video indirimbo z imana za Jehovah Jireh" or "Jehovah Jireh Choir songs".

              -

              How to Download Video Indirimbo z Imana za Jehovah Jireh?

              -

              The steps and tools to download the videos from YouTube

              -

              If you want to download video indirimbo z imana za Jehovah Jireh from YouTube, you will need to use some tools that can help you convert and save the videos to your device. There are many online tools and apps that can do this, but we will recommend two of them that are easy and reliable. Here are the steps and tools to download the videos from YouTube:

              -
                -
              1. Go to the YouTube video that you want to download and copy its URL from the address bar.
              2. -
              3. Go to one of these online tools: Y2mate or SaveFrom. These are websites that can convert YouTube videos to different formats and qualities.
              4. -
              5. Paste the URL of the video into the search box on the website and click on "Start" or "Download".
              6. -
              7. Select the format and quality that you want for the video. You can choose from MP4, MP3, WEBM, M4A, and more. You can also choose the resolution, such as 1080p, 720p, 480p, etc.
              8. -
              9. Click on "Download" or "Save" and wait for the video to be converted and downloaded. You can also scan the QR code or use the direct link to download the video.
              10. -
              11. Once the download is complete, you can find the video in your device's downloads folder or gallery. You can also rename or move the video to another location if you want.
              12. -
              -

              The steps and tools to download the videos from other platforms

              -

              If you want to download video indirimbo z imana za Jehovah Jireh from other platforms, such as Shazam or NKUNDAGOSPEL, you will need to use some tools that can help you capture and record the videos from your screen. There are many screen recording apps and software that can do this, but we will recommend two of them that are easy and reliable. Here are the steps and tools to download the videos from other platforms:

              -
                -
              1. Go to the platform that has the video that you want to download and play it on your device.
              2. -
              3. Go to one of these screen recording tools: AZ Screen Recorder or OBS Studio. These are apps and software that can record your screen and audio with high quality.
              4. -
              5. Open the tool and adjust the settings according to your preferences. You can choose the resolution, frame rate, bit rate, audio source, etc.
              6. -
              7. Start the recording and go back to the platform where the video is playing. Make sure that you capture the whole screen and that there is no interruption or noise.
              8. -
              9. Stop the recording when the video is over and save it to your device. You can also edit or trim the video if you want.
              10. -
              11. Once the recording is saved, you can find it in your device's recordings folder or gallery. You can also rename or move it to another location if you want.
              12. -
              -

              How to Enjoy Video Indirimbo z Imana za Jehovah Jireh?

              -

              The best ways to watch and listen to the videos

              -

              Now that you have downloaded video indirimbo z imana za Jehovah Jireh, you might be wondering how to enjoy them. Well, there are many ways to watch and listen to these videos, depending on your mood and preference. Here are some of the best ways to enjoy them:

              -
                -
              • Watch them on a big screen with good speakers. If you have a smart TV or a projector, you can connect your device to it and watch the videos on a big screen with good speakers. This way, you can see every detail of the videos and hear every note of the songs. You can also invite your friends or family members to join you and have a fun time together.
              • -
              • Listen to them on headphones or earphones. If you want a more personal and intimate experience, you can listen to the videos on headphones or earphones. This way, you can block out any external noise and focus on the lyrics and melodies of the songs. You can also feel closer to God and His presence as you listen.
              • -
              • Play them in the background while doing other things. If you want a more casual and relaxed experience, you can play the videos in the background while doing other things. For example, you can play them while cooking, cleaning, studying, working, or exercising. This way, you can enjoy the music and the message without being distracted or bored. You can also feel more motivated and energized by the songs.
              • -
              -

              The tips and tricks to enhance your experience

              -

              Finally, here are some tips and tricks to enhance your experience of watching and listening to video indirimbo z imana za Jehovah Jireh:

              -

              download video indirimbo z imana za jehovah jireh choir
              -download video indirimbo z imana za jehovah jireh official lyric
              -download video indirimbo z imana za jehovah jireh music youtube
              -download video indirimbo z imana za jehovah jireh 2021
              -download video indirimbo z imana za jehovah jireh 2020
              -download video indirimbo z imana za jehovah jireh mp3
              -download video indirimbo z imana za jehovah jireh hd
              -download video indirimbo z imana za jehovah jireh free
              -download video indirimbo z imana za jehovah jireh online
              -download video indirimbo z imana za jehovah jireh lyrics
              -download video indirimbo z imana ikwiye amashimwe by jehovah jireh
              -download video indirimbo z imana yakoze ibirenze by jehovah jireh
              -download video indirimbo z imana ishimwe by jehovah jireh
              -download video indirimbo z imana ikwiriye amashimwe by jehovah jireh
              -download video indirimbo z imana yaraduhaye by jehovah jireh
              -download video indirimbo z imana yaradukijije by jehovah jireh
              -download video indirimbo z imana yaradukiza by jehovah jireh
              -download video indirimbo z imana yaradukunda by jehovah jireh
              -download video indirimbo z imana yaradusangiye by jehovah jireh
              -download video indirimbo z imana yaradusubije by jehovah jireh
              -download video indirimbo z umwami w'imana by jehovah jireh
              -download video indirimbo z umwuka w'imana by jehovah jireh
              -download video indirimbo z ubutumwa bw'imana by jehovah jireh
              -download video indirimbo z ubwiza bw'imana by jehovah jireh
              -download video indirimbo z ubwenge bw'imana by jehovah jireh
              -download video indirimbo z ubutabera bw'imana by jehovah jireh
              -download video indirimbo z ubugingo bw'imana by jehovah jireh
              -download video indirimbo z ubukene bw'imana by jehovah jireh
              -download video indirimbo z ubukristo bw'imana by jehovah jireh
              -download video indirimbo z ubumwe bw'imana by jehovah jireh
              -download video indirimbo za kinyarwanda ku mana by jehovahj ireh
              -download video indirimbo za kirundi ku mana byj ehovaj ireh
              -download video indirimbo za kiswahili ku mana byj ehovaj ireh
              -download video indirimbo za kinyamulenge ku mana byj ehovaj ireh
              -download video indirimbo za kihavu ku mana byj ehovaj ireh
              -download video indirimbo za kinyankole ku mana byj ehovaj ireh
              -download video indirimbo za kikongo ku mana byj ehovaj ireh
              -download video indirimbo za kiluba ku mana byj ehovaj ireh
              -download video indirimbo za lingala ku mana byj ehovaj ireh

              -
                -
              • Learn the lyrics and sing along. One of the best ways to enjoy the videos is to learn the lyrics and sing along with the choir. This way, you can express your praise and worship to God and also improve your language skills. You can find the lyrics of the songs on the YouTube description or on other websites that have them.
              • -
              • Share the videos with others. Another way to enjoy the videos is to share them with others who might appreciate them. You can share the videos on social media, email, or messaging apps with your friends, family members, or anyone else who might be interested. You can also leave a comment or a like on the videos to show your support and feedback to the choir.
              • -
              • Pray and meditate on the videos. Lastly, you can enjoy the videos by praying and meditating on them. You can use the videos as a tool to connect with God and His word. You can also reflect on the message and the meaning of the songs and how they apply to your life. You can also thank God for His provision and His love for you.
              • -
              -

              Conclusion

              -

              Video indirimbo z imana za Jehovah Jireh are wonderful music videos that can enrich your spiritual life and bring you joy and peace. In this article, we have shown you what these videos are, where to find them, how to download them, and how to enjoy them. We hope that you have found this article helpful and informative. If you have any questions or comments, please feel free to contact us or leave a comment below. Thank you for reading and God bless you.

              -

              FAQs

              -

              What is the name of the choir that sings video indirimbo z imana za Jehovah Jireh?

              -

              The name of the choir is Jehovah Jireh Choir, which is also one of the names of God in the Bible.

              -

              What languages do they sing in?

              -

              They sing in various languages, such as Kinyarwanda, English, French, Swahili, and Luganda.

              -

              How many videos do they have on YouTube?

              -

              They have over 50 videos on their official YouTube channel, which have been viewed by millions of people around the world.

              -

              How can I download their videos for free?

              -

              You can download their videos for free by using some online tools or screen recording apps that can convert and save the videos to your device. We have explained the steps and tools in this article.

              -

              How can I enjoy their videos more?

              -

              You can enjoy their videos more by watching them on a big screen with good speakers, listening to them on headphones or earphones, playing them in the background while doing other things, learning the lyrics and singing along, sharing them with others, and praying and meditating on them.

              197e85843d
              -
              -
              \ No newline at end of file diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/index.d.ts b/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/index.d.ts deleted file mode 100644 index 1950db46dcb3c814e9d52efac6b88bc4ac9e7915..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/index.d.ts +++ /dev/null @@ -1,134 +0,0 @@ -// Type definitions for non-npm package Node.js 18.15 -// Project: https://nodejs.org/ -// Definitions by: Microsoft TypeScript -// DefinitelyTyped -// Alberto Schiabel -// Alvis HT Tang -// Andrew Makarov -// Benjamin Toueg -// Chigozirim C. -// David Junger -// Deividas Bakanas -// Eugene Y. Q. Shen -// Hannes Magnusson -// Huw -// Kelvin Jin -// Klaus Meinhardt -// Lishude -// Mariusz Wiktorczyk -// Mohsen Azimi -// Nicolas Even -// Nikita Galkin -// Parambir Singh -// Sebastian Silbermann -// Simon Schick -// Thomas den Hollander -// Wilco Bakker -// wwwy3y3 -// Samuel Ainsworth -// Kyle Uehlein -// Thanik Bhongbhibhat -// Marcin Kopacz -// Trivikram Kamat -// Junxiao Shi -// Ilia Baryshnikov -// ExE Boss -// Piotr Błażejewicz -// Anna Henningsen -// Victor Perin -// Yongsheng Zhang -// NodeJS Contributors -// Linus Unnebäck -// wafuwafu13 -// Matteo Collina -// Dmitry Semigradsky -// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped - -/** - * License for programmatically and manually incorporated - * documentation aka. `JSDoc` from https://github.com/nodejs/node/tree/master/doc - * - * Copyright Node.js contributors. All rights reserved. - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -// NOTE: These definitions support NodeJS and TypeScript 4.9+. - -// Reference required types from the default lib: -/// -/// -/// -/// - -// Base definitions for all NodeJS modules that are not specific to any version of TypeScript: -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// -/// - -/// diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/ts4.8/os.d.ts b/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/ts4.8/os.d.ts deleted file mode 100644 index ebe02da76978ace2f1e62d31115930209734ce2c..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/ts4.8/os.d.ts +++ /dev/null @@ -1,473 +0,0 @@ -/** - * The `os` module provides operating system-related utility methods and - * properties. It can be accessed using: - * - * ```js - * const os = require('os'); - * ``` - * @see [source](https://github.com/nodejs/node/blob/v18.0.0/lib/os.js) - */ -declare module 'os' { - interface CpuInfo { - model: string; - speed: number; - times: { - user: number; - nice: number; - sys: number; - idle: number; - irq: number; - }; - } - interface NetworkInterfaceBase { - address: string; - netmask: string; - mac: string; - internal: boolean; - cidr: string | null; - } - interface NetworkInterfaceInfoIPv4 extends NetworkInterfaceBase { - family: 'IPv4'; - scopeid?: undefined; - } - interface NetworkInterfaceInfoIPv6 extends NetworkInterfaceBase { - family: 'IPv6'; - scopeid: number; - } - interface UserInfo { - username: T; - uid: number; - gid: number; - shell: T; - homedir: T; - } - type NetworkInterfaceInfo = NetworkInterfaceInfoIPv4 | NetworkInterfaceInfoIPv6; - /** - * Returns the host name of the operating system as a string. - * @since v0.3.3 - */ - function hostname(): string; - /** - * Returns an array containing the 1, 5, and 15 minute load averages. - * - * The load average is a measure of system activity calculated by the operating - * system and expressed as a fractional number. - * - * The load average is a Unix-specific concept. On Windows, the return value is - * always `[0, 0, 0]`. - * @since v0.3.3 - */ - function loadavg(): number[]; - /** - * Returns the system uptime in number of seconds. - * @since v0.3.3 - */ - function uptime(): number; - /** - * Returns the amount of free system memory in bytes as an integer. - * @since v0.3.3 - */ - function freemem(): number; - /** - * Returns the total amount of system memory in bytes as an integer. - * @since v0.3.3 - */ - function totalmem(): number; - /** - * Returns an array of objects containing information about each logical CPU core. - * - * The properties included on each object include: - * - * ```js - * [ - * { - * model: 'Intel(R) Core(TM) i7 CPU 860 @ 2.80GHz', - * speed: 2926, - * times: { - * user: 252020, - * nice: 0, - * sys: 30340, - * idle: 1070356870, - * irq: 0 - * } - * }, - * { - * model: 'Intel(R) Core(TM) i7 CPU 860 @ 2.80GHz', - * speed: 2926, - * times: { - * user: 306960, - * nice: 0, - * sys: 26980, - * idle: 1071569080, - * irq: 0 - * } - * }, - * { - * model: 'Intel(R) Core(TM) i7 CPU 860 @ 2.80GHz', - * speed: 2926, - * times: { - * user: 248450, - * nice: 0, - * sys: 21750, - * idle: 1070919370, - * irq: 0 - * } - * }, - * { - * model: 'Intel(R) Core(TM) i7 CPU 860 @ 2.80GHz', - * speed: 2926, - * times: { - * user: 256880, - * nice: 0, - * sys: 19430, - * idle: 1070905480, - * irq: 20 - * } - * }, - * ] - * ``` - * - * `nice` values are POSIX-only. On Windows, the `nice` values of all processors - * are always 0. - * @since v0.3.3 - */ - function cpus(): CpuInfo[]; - /** - * Returns an estimate of the default amount of parallelism a program should use. Always returns a value greater than zero. - * - * This function is a small wrapper about libuv's [`uv_available_parallelism()`](https://docs.libuv.org/en/v1.x/misc.html#c.uv_available_parallelism). - * @since 18.4.0 - */ - function availableParallelism(): number; - /** - * Returns the operating system name as returned by [`uname(3)`](https://linux.die.net/man/3/uname). For example, it - * returns `'Linux'` on Linux, `'Darwin'` on macOS, and `'Windows_NT'` on Windows. - * - * See [https://en.wikipedia.org/wiki/Uname#Examples](https://en.wikipedia.org/wiki/Uname#Examples) for additional information - * about the output of running [`uname(3)`](https://linux.die.net/man/3/uname) on various operating systems. - * @since v0.3.3 - */ - function type(): string; - /** - * Returns the operating system as a string. - * - * On POSIX systems, the operating system release is determined by calling [`uname(3)`](https://linux.die.net/man/3/uname). On Windows, `GetVersionExW()` is used. See - * [https://en.wikipedia.org/wiki/Uname#Examples](https://en.wikipedia.org/wiki/Uname#Examples) for more information. - * @since v0.3.3 - */ - function release(): string; - /** - * Returns an object containing network interfaces that have been assigned a - * network address. - * - * Each key on the returned object identifies a network interface. The associated - * value is an array of objects that each describe an assigned network address. - * - * The properties available on the assigned network address object include: - * - * ```js - * { - * lo: [ - * { - * address: '127.0.0.1', - * netmask: '255.0.0.0', - * family: 'IPv4', - * mac: '00:00:00:00:00:00', - * internal: true, - * cidr: '127.0.0.1/8' - * }, - * { - * address: '::1', - * netmask: 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff', - * family: 'IPv6', - * mac: '00:00:00:00:00:00', - * scopeid: 0, - * internal: true, - * cidr: '::1/128' - * } - * ], - * eth0: [ - * { - * address: '192.168.1.108', - * netmask: '255.255.255.0', - * family: 'IPv4', - * mac: '01:02:03:0a:0b:0c', - * internal: false, - * cidr: '192.168.1.108/24' - * }, - * { - * address: 'fe80::a00:27ff:fe4e:66a1', - * netmask: 'ffff:ffff:ffff:ffff::', - * family: 'IPv6', - * mac: '01:02:03:0a:0b:0c', - * scopeid: 1, - * internal: false, - * cidr: 'fe80::a00:27ff:fe4e:66a1/64' - * } - * ] - * } - * ``` - * @since v0.6.0 - */ - function networkInterfaces(): NodeJS.Dict; - /** - * Returns the string path of the current user's home directory. - * - * On POSIX, it uses the `$HOME` environment variable if defined. Otherwise it - * uses the [effective UID](https://en.wikipedia.org/wiki/User_identifier#Effective_user_ID) to look up the user's home directory. - * - * On Windows, it uses the `USERPROFILE` environment variable if defined. - * Otherwise it uses the path to the profile directory of the current user. - * @since v2.3.0 - */ - function homedir(): string; - /** - * Returns information about the currently effective user. On POSIX platforms, - * this is typically a subset of the password file. The returned object includes - * the `username`, `uid`, `gid`, `shell`, and `homedir`. On Windows, the `uid` and`gid` fields are `-1`, and `shell` is `null`. - * - * The value of `homedir` returned by `os.userInfo()` is provided by the operating - * system. This differs from the result of `os.homedir()`, which queries - * environment variables for the home directory before falling back to the - * operating system response. - * - * Throws a `SystemError` if a user has no `username` or `homedir`. - * @since v6.0.0 - */ - function userInfo(options: { encoding: 'buffer' }): UserInfo; - function userInfo(options?: { encoding: BufferEncoding }): UserInfo; - type SignalConstants = { - [key in NodeJS.Signals]: number; - }; - namespace constants { - const UV_UDP_REUSEADDR: number; - namespace signals {} - const signals: SignalConstants; - namespace errno { - const E2BIG: number; - const EACCES: number; - const EADDRINUSE: number; - const EADDRNOTAVAIL: number; - const EAFNOSUPPORT: number; - const EAGAIN: number; - const EALREADY: number; - const EBADF: number; - const EBADMSG: number; - const EBUSY: number; - const ECANCELED: number; - const ECHILD: number; - const ECONNABORTED: number; - const ECONNREFUSED: number; - const ECONNRESET: number; - const EDEADLK: number; - const EDESTADDRREQ: number; - const EDOM: number; - const EDQUOT: number; - const EEXIST: number; - const EFAULT: number; - const EFBIG: number; - const EHOSTUNREACH: number; - const EIDRM: number; - const EILSEQ: number; - const EINPROGRESS: number; - const EINTR: number; - const EINVAL: number; - const EIO: number; - const EISCONN: number; - const EISDIR: number; - const ELOOP: number; - const EMFILE: number; - const EMLINK: number; - const EMSGSIZE: number; - const EMULTIHOP: number; - const ENAMETOOLONG: number; - const ENETDOWN: number; - const ENETRESET: number; - const ENETUNREACH: number; - const ENFILE: number; - const ENOBUFS: number; - const ENODATA: number; - const ENODEV: number; - const ENOENT: number; - const ENOEXEC: number; - const ENOLCK: number; - const ENOLINK: number; - const ENOMEM: number; - const ENOMSG: number; - const ENOPROTOOPT: number; - const ENOSPC: number; - const ENOSR: number; - const ENOSTR: number; - const ENOSYS: number; - const ENOTCONN: number; - const ENOTDIR: number; - const ENOTEMPTY: number; - const ENOTSOCK: number; - const ENOTSUP: number; - const ENOTTY: number; - const ENXIO: number; - const EOPNOTSUPP: number; - const EOVERFLOW: number; - const EPERM: number; - const EPIPE: number; - const EPROTO: number; - const EPROTONOSUPPORT: number; - const EPROTOTYPE: number; - const ERANGE: number; - const EROFS: number; - const ESPIPE: number; - const ESRCH: number; - const ESTALE: number; - const ETIME: number; - const ETIMEDOUT: number; - const ETXTBSY: number; - const EWOULDBLOCK: number; - const EXDEV: number; - const WSAEINTR: number; - const WSAEBADF: number; - const WSAEACCES: number; - const WSAEFAULT: number; - const WSAEINVAL: number; - const WSAEMFILE: number; - const WSAEWOULDBLOCK: number; - const WSAEINPROGRESS: number; - const WSAEALREADY: number; - const WSAENOTSOCK: number; - const WSAEDESTADDRREQ: number; - const WSAEMSGSIZE: number; - const WSAEPROTOTYPE: number; - const WSAENOPROTOOPT: number; - const WSAEPROTONOSUPPORT: number; - const WSAESOCKTNOSUPPORT: number; - const WSAEOPNOTSUPP: number; - const WSAEPFNOSUPPORT: number; - const WSAEAFNOSUPPORT: number; - const WSAEADDRINUSE: number; - const WSAEADDRNOTAVAIL: number; - const WSAENETDOWN: number; - const WSAENETUNREACH: number; - const WSAENETRESET: number; - const WSAECONNABORTED: number; - const WSAECONNRESET: number; - const WSAENOBUFS: number; - const WSAEISCONN: number; - const WSAENOTCONN: number; - const WSAESHUTDOWN: number; - const WSAETOOMANYREFS: number; - const WSAETIMEDOUT: number; - const WSAECONNREFUSED: number; - const WSAELOOP: number; - const WSAENAMETOOLONG: number; - const WSAEHOSTDOWN: number; - const WSAEHOSTUNREACH: number; - const WSAENOTEMPTY: number; - const WSAEPROCLIM: number; - const WSAEUSERS: number; - const WSAEDQUOT: number; - const WSAESTALE: number; - const WSAEREMOTE: number; - const WSASYSNOTREADY: number; - const WSAVERNOTSUPPORTED: number; - const WSANOTINITIALISED: number; - const WSAEDISCON: number; - const WSAENOMORE: number; - const WSAECANCELLED: number; - const WSAEINVALIDPROCTABLE: number; - const WSAEINVALIDPROVIDER: number; - const WSAEPROVIDERFAILEDINIT: number; - const WSASYSCALLFAILURE: number; - const WSASERVICE_NOT_FOUND: number; - const WSATYPE_NOT_FOUND: number; - const WSA_E_NO_MORE: number; - const WSA_E_CANCELLED: number; - const WSAEREFUSED: number; - } - namespace priority { - const PRIORITY_LOW: number; - const PRIORITY_BELOW_NORMAL: number; - const PRIORITY_NORMAL: number; - const PRIORITY_ABOVE_NORMAL: number; - const PRIORITY_HIGH: number; - const PRIORITY_HIGHEST: number; - } - } - const devNull: string; - const EOL: string; - /** - * Returns the operating system CPU architecture for which the Node.js binary was - * compiled. Possible values are `'arm'`, `'arm64'`, `'ia32'`, `'mips'`,`'mipsel'`, `'ppc'`, `'ppc64'`, `'s390'`, `'s390x'`, and `'x64'`. - * - * The return value is equivalent to `process.arch`. - * @since v0.5.0 - */ - function arch(): string; - /** - * Returns a string identifying the kernel version. - * - * On POSIX systems, the operating system release is determined by calling [`uname(3)`](https://linux.die.net/man/3/uname). On Windows, `RtlGetVersion()` is used, and if it is not - * available, `GetVersionExW()` will be used. See [https://en.wikipedia.org/wiki/Uname#Examples](https://en.wikipedia.org/wiki/Uname#Examples) for more information. - * @since v13.11.0, v12.17.0 - */ - function version(): string; - /** - * Returns a string identifying the operating system platform for which - * the Node.js binary was compiled. The value is set at compile time. - * Possible values are `'aix'`, `'darwin'`, `'freebsd'`,`'linux'`,`'openbsd'`, `'sunos'`, and `'win32'`. - * - * The return value is equivalent to `process.platform`. - * - * The value `'android'` may also be returned if Node.js is built on the Android - * operating system. [Android support is experimental](https://github.com/nodejs/node/blob/HEAD/BUILDING.md#androidandroid-based-devices-eg-firefox-os). - * @since v0.5.0 - */ - function platform(): NodeJS.Platform; - /** - * Returns the machine type as a string, such as arm, aarch64, mips, mips64, ppc64, ppc64le, s390, s390x, i386, i686, x86_64. - * - * On POSIX systems, the machine type is determined by calling [`uname(3)`](https://linux.die.net/man/3/uname). - * On Windows, `RtlGetVersion()` is used, and if it is not available, `GetVersionExW()` will be used. - * See [https://en.wikipedia.org/wiki/Uname#Examples](https://en.wikipedia.org/wiki/Uname#Examples) for more information. - * @since v18.9.0 - */ - function machine(): string; - /** - * Returns the operating system's default directory for temporary files as a - * string. - * @since v0.9.9 - */ - function tmpdir(): string; - /** - * Returns a string identifying the endianness of the CPU for which the Node.js - * binary was compiled. - * - * Possible values are `'BE'` for big endian and `'LE'` for little endian. - * @since v0.9.4 - */ - function endianness(): 'BE' | 'LE'; - /** - * Returns the scheduling priority for the process specified by `pid`. If `pid` is - * not provided or is `0`, the priority of the current process is returned. - * @since v10.10.0 - * @param [pid=0] The process ID to retrieve scheduling priority for. - */ - function getPriority(pid?: number): number; - /** - * Attempts to set the scheduling priority for the process specified by `pid`. If`pid` is not provided or is `0`, the process ID of the current process is used. - * - * The `priority` input must be an integer between `-20` (high priority) and `19`(low priority). Due to differences between Unix priority levels and Windows - * priority classes, `priority` is mapped to one of six priority constants in`os.constants.priority`. When retrieving a process priority level, this range - * mapping may cause the return value to be slightly different on Windows. To avoid - * confusion, set `priority` to one of the priority constants. - * - * On Windows, setting priority to `PRIORITY_HIGHEST` requires elevated user - * privileges. Otherwise the set priority will be silently reduced to`PRIORITY_HIGH`. - * @since v10.10.0 - * @param [pid=0] The process ID to set scheduling priority for. - * @param priority The scheduling priority to assign to the process. - */ - function setPriority(priority: number): void; - function setPriority(pid: number, priority: number): void; -} -declare module 'node:os' { - export * from 'os'; -} diff --git a/spaces/flax-community/DietNerf-Demo/jaxnerf/nerf/__init__.py b/spaces/flax-community/DietNerf-Demo/jaxnerf/nerf/__init__.py deleted file mode 100644 index c4cbefc3397c8c691234e616369bda8b71f721a6..0000000000000000000000000000000000000000 --- a/spaces/flax-community/DietNerf-Demo/jaxnerf/nerf/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The Google Research Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/spaces/flax-community/Multilingual-VQA/apps/mlm.py b/spaces/flax-community/Multilingual-VQA/apps/mlm.py deleted file mode 100644 index 6fa59aaad84492929786ec651d79d119da76fb24..0000000000000000000000000000000000000000 --- a/spaces/flax-community/Multilingual-VQA/apps/mlm.py +++ /dev/null @@ -1,146 +0,0 @@ -from .utils import ( - get_text_attributes, - get_top_5_predictions, - get_transformed_image, - plotly_express_horizontal_bar_plot, - bert_tokenizer, -) - -import streamlit as st -import numpy as np -import pandas as pd -import os -import matplotlib.pyplot as plt -from mtranslate import translate -from .utils import read_markdown -import requests -from PIL import Image -from .model.flax_clip_vision_bert.modeling_clip_vision_bert import ( - FlaxCLIPVisionBertForMaskedLM, -) - - -def softmax(logits): - return np.exp(logits) / np.sum(np.exp(logits), axis=0) - -def app(state): - mlm_state = state - st.header("Visuo-linguistic Mask Filling Demo") - - with st.beta_expander("Usage"): - st.write(read_markdown("mlm_usage.md")) - st.info(read_markdown("mlm_intro.md")) - - # @st.cache(persist=False) # TODO: Make this work with mlm_state. Currently not supported. - def predict(transformed_image, caption_inputs): - outputs = mlm_state.mlm_model(pixel_values=transformed_image, **caption_inputs) - indices = np.where(caption_inputs["input_ids"] == bert_tokenizer.mask_token_id)[1][0] - preds = outputs.logits[0][indices] - scores = np.array(preds) - return scores - - # @st.cache(persist=False) - def load_model(ckpt): - return FlaxCLIPVisionBertForMaskedLM.from_pretrained(ckpt) - - mlm_checkpoints = ["flax-community/clip-vision-bert-cc12m-70k"] - #mlm_checkpoints = ["./ckpt/mlm/ckpt-60k"] - dummy_data = pd.read_csv("cc12m_data/vqa_val.tsv", sep="\t") - - first_index = 15 - # Init Session mlm_state - if mlm_state.mlm_image_file is None: - mlm_state.mlm_image_file = dummy_data.loc[first_index, "image_file"] - caption = dummy_data.loc[first_index, "caption"].strip("- ") - mlm_state.unmasked_caption = caption - ids = bert_tokenizer.encode(caption) - mask_index = np.random.randint(1, len(ids) - 1) - mlm_state.currently_masked_token = bert_tokenizer.convert_ids_to_tokens([ids[mask_index]])[0] - ids[mask_index] = bert_tokenizer.mask_token_id - mlm_state.caption = bert_tokenizer.decode(ids[1:-1]) - mlm_state.caption_lang_id = dummy_data.loc[first_index, "lang_id"] - - image_path = os.path.join("cc12m_data/resized_images_vqa", mlm_state.mlm_image_file) - image = plt.imread(image_path) - mlm_state.mlm_image = image - - if mlm_state.mlm_model is None: - # Display Top-5 Predictions - with st.spinner("Loading model..."): - mlm_state.mlm_model = load_model(mlm_checkpoints[0]) - - query1 = st.text_input( - "Enter a URL to an image", - value="http://images.cocodataset.org/val2017/000000039769.jpg", - ) - - col1, col2, col3 = st.beta_columns([2,1, 2]) - if col1.button( - "Get a random example", - help="Get a random example from the 100 `seeded` image-text pairs.", - ): - sample = dummy_data.sample(1).reset_index() - mlm_state.mlm_image_file = sample.loc[0, "image_file"] - caption = sample.loc[0, "caption"].strip("- ") - mlm_state.unmasked_caption = caption - ids = bert_tokenizer.encode(caption) - mask_index = np.random.randint(1, len(ids) - 1) - mlm_state.currently_masked_token = bert_tokenizer.convert_ids_to_tokens([ids[mask_index]])[0] - ids[mask_index] = bert_tokenizer.mask_token_id - mlm_state.caption = bert_tokenizer.decode(ids[1:-1]) - mlm_state.caption_lang_id = sample.loc[0, "lang_id"] - - image_path = os.path.join("cc12m_data/resized_images_vqa", mlm_state.mlm_image_file) - image = plt.imread(image_path) - mlm_state.mlm_image = image - - col2.write("OR") - - if col3.button("Use above URL"): - image_data = requests.get(query1, stream=True).raw - image = np.asarray(Image.open(image_data)) - mlm_state.mlm_image = image - - - - transformed_image = get_transformed_image(mlm_state.mlm_image) - - new_col1, new_col2 = st.beta_columns([5, 5]) - - # Display Image - new_col1.image(mlm_state.mlm_image, use_column_width="auto") - - # Display caption - new_col2.write("Write your text with exactly one [MASK] token.") - mlm_state.caption = new_col2.text_input( - label="Text", - value=mlm_state.caption, - help="Type your masked caption regarding the image above in one of the four languages.", - ) - - print(mlm_state.currently_maskd_token) - print(mlm_state.unmasked_caption) - print(mlm_state.caption) - if mlm_state.unmasked_caption == mlm_state.caption.replace("[MASK]", mlm_state.currently_masked_token): - new_col2.markdown("**Masked Token**: "+mlm_state.currently_masked_token) - new_col2.markdown("**English Translation: " + mlm_state.unmasked_caption if mlm_state.caption_lang_id == "en" else translate(mlm_state.unmasked_caption, 'en')) - - else: - new_col2.markdown( - f"""**English Translation**: {mlm_state.caption if mlm_state.caption_lang_id == "en" else translate(mlm_state.caption, 'en')}""" - ) - caption_inputs = get_text_attributes(mlm_state.caption) - - # Display Top-5 Predictions - with st.spinner("Predicting..."): - scores = predict(transformed_image, dict(caption_inputs)) - scores = softmax(scores) - labels, values = get_top_5_predictions(scores) - filled_sentence = mlm_state.caption.replace("[MASK]", labels[-1]) - st.write("**Filled Sentence**: " + filled_sentence) - st.write( f"""**English Translation**: {translate(filled_sentence, 'en')}""") - # newer_col1, newer_col2 = st.beta_columns([6,4]) - fig = plotly_express_horizontal_bar_plot(values, labels) - st.dataframe(pd.DataFrame({"Tokens":labels, "English Translation": list(map(lambda x: translate(x),labels))}).T) - st.plotly_chart(fig, use_container_width=True) - \ No newline at end of file diff --git a/spaces/foghuang/ChatGLM2-6B/cli_demo.py b/spaces/foghuang/ChatGLM2-6B/cli_demo.py deleted file mode 100644 index 4aa35d3019313f4dc5d35146b5267457b352b3e9..0000000000000000000000000000000000000000 --- a/spaces/foghuang/ChatGLM2-6B/cli_demo.py +++ /dev/null @@ -1,60 +0,0 @@ -import os -import platform -import signal -from transformers import AutoTokenizer, AutoModel -import readline - -tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).cuda() -# 多显卡支持,使用下面两行代替上面一行,将num_gpus改为你实际的显卡数量 -# from utils import load_model_on_gpus -# model = load_model_on_gpus("THUDM/chatglm2-6b", num_gpus=2) -model = model.eval() - -os_name = platform.system() -clear_command = 'cls' if os_name == 'Windows' else 'clear' -stop_stream = False - - -def build_prompt(history): - prompt = "欢迎使用 ChatGLM2-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序" - for query, response in history: - prompt += f"\n\n用户:{query}" - prompt += f"\n\nChatGLM2-6B:{response}" - return prompt - - -def signal_handler(signal, frame): - global stop_stream - stop_stream = True - - -def main(): - past_key_values, history = None, [] - global stop_stream - print("欢迎使用 ChatGLM2-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序") - while True: - query = input("\n用户:") - if query.strip() == "stop": - break - if query.strip() == "clear": - past_key_values, history = None, [] - os.system(clear_command) - print("欢迎使用 ChatGLM2-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序") - continue - print("\nChatGLM:", end="") - current_length = 0 - for response, history, past_key_values in model.stream_chat(tokenizer, query, history=history, - past_key_values=past_key_values, - return_past_key_values=True): - if stop_stream: - stop_stream = False - break - else: - print(response[current_length:], end="", flush=True) - current_length = len(response) - print("") - - -if __name__ == "__main__": - main() diff --git a/spaces/freddyaboulton/3.1.4.9-all-demos/demos/sentence_builder/run.py b/spaces/freddyaboulton/3.1.4.9-all-demos/demos/sentence_builder/run.py deleted file mode 100644 index 2b1385deca619dcffcaa002aa985465cca6fc7c6..0000000000000000000000000000000000000000 --- a/spaces/freddyaboulton/3.1.4.9-all-demos/demos/sentence_builder/run.py +++ /dev/null @@ -1,27 +0,0 @@ -import gradio as gr - - -def sentence_builder(quantity, animal, place, activity_list, morning): - return f"""The {quantity} {animal}s went to the {place} where they {" and ".join(activity_list)} until the {"morning" if morning else "night"}""" - - -demo = gr.Interface( - sentence_builder, - [ - gr.Slider(2, 20, value=4), - gr.Dropdown(["cat", "dog", "bird"]), - gr.Radio(["park", "zoo", "road"]), - gr.CheckboxGroup(["ran", "swam", "ate", "slept"]), - gr.Checkbox(label="Is it the morning?"), - ], - "text", - examples=[ - [2, "cat", "park", ["ran", "swam"], True], - [4, "dog", "zoo", ["ate", "swam"], False], - [10, "bird", "road", ["ran"], False], - [8, "cat", "zoo", ["ate"], True], - ], -) - -if __name__ == "__main__": - demo.launch() diff --git a/spaces/generativeai/test-image-similarity/services/aws_service.py b/spaces/generativeai/test-image-similarity/services/aws_service.py deleted file mode 100644 index dd3e1860733575a9785ca09a1b69de9f5febb31d..0000000000000000000000000000000000000000 --- a/spaces/generativeai/test-image-similarity/services/aws_service.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -import boto3 -from PIL import Image -from io import BytesIO - -class AwsService: - def session(): - return boto3.Session( - aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID'), - aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY'), - region_name=os.environ.get('AWS_REGION') - ) - - def s3_client(): - return AwsService.session().client('s3') - - def get_files_from_s3(bucket, prefix): - return AwsService.s3_client().list_objects(Bucket=bucket, Prefix=prefix)['Contents'] - - def get_image_from_s3(bucket, key): - file_byte_string = AwsService.s3_client().get_object(Bucket=bucket, Key=key)['Body'].read() - return { - 'key': key.split('/')[-1].split('.')[0], - 'pil': Image.open(BytesIO(file_byte_string)) - } \ No newline at end of file diff --git a/spaces/godfiry/runwayml-stable-diffusion-v1-5/README.md b/spaces/godfiry/runwayml-stable-diffusion-v1-5/README.md deleted file mode 100644 index 568364ae217d0c84fe1a6211a8099e4c23b7181b..0000000000000000000000000000000000000000 --- a/spaces/godfiry/runwayml-stable-diffusion-v1-5/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Runwayml Stable Diffusion V1 5 -emoji: 👀 -colorFrom: indigo -colorTo: gray -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/gotiQspiryo/whisper-ui/db.py b/spaces/gotiQspiryo/whisper-ui/db.py deleted file mode 100644 index d3d70f40ed8a55e1d0c618192bf96daa2ea5406f..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/db.py +++ /dev/null @@ -1,97 +0,0 @@ -"""Database config, models & operations -This is lumped together in a single file to keep things simple. -""" -import uuid -from datetime import datetime -from typing import List, Optional - -from config import DATA_DIR, DEBUG -from sqlalchemy import ForeignKey, MetaData, create_engine -from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column, relationship - - -# This is a slightly augmented version of SQL model that adds some common fields -# that are used across all models -# Also, to keep things simple, only sqlite native types are used -class Base(DeclarativeBase): - # Identifiers are all uuids - id: Mapped[str] = mapped_column(default=lambda: str(uuid.uuid4()), primary_key=True) - - # All timestamps are timezone aware isoformatted strings (local timezone by default) - created: Mapped[str] = mapped_column(default=lambda: datetime.now().astimezone().isoformat(), nullable=False) - updated: Mapped[str] = mapped_column(default=lambda: datetime.now().astimezone().isoformat(), nullable=False) - - -# Models & Schemas -# ---------------------- -# This is the core model that stitches a piece of Media with audio to derivative content -# like transcripts & derived data from them. -class Media(Base): - __tablename__ = "media" - - # Source type is a string and is, as of now, either - source_type: Mapped[str] - source_name: Mapped[str] - source_link: Mapped[Optional[str]] - - # Full path of where the audio is locally stored - filepath: Mapped[str] - - # Additional metadata - duration: Mapped[Optional[float]] - - # Flag to indicate if the audio has been transcribed - transcript: Mapped["Transcript"] = relationship(back_populates="media", uselist=False, cascade="all, delete-orphan") - segments: Mapped[List["Segment"]] = relationship( - back_populates="media", order_by="Segment.number", cascade="all, delete-orphan" - ) - - -class Transcript(Base): - """A transcript is the full text of an audio file's transcription""" - - __tablename__ = "transcript" - - # The media object that this transcript is for - media_id: Mapped[str] = mapped_column(ForeignKey("media.id", ondelete="CASCADE"), nullable=False) - media: Mapped["Media"] = relationship(back_populates="transcript") - - # The transcript - text: Mapped[str] - language: Mapped[str] - generated_by: Mapped[str] - - -class Segment(Base): - """A segment is a transcription of a specific audio segment within the file""" - - __tablename__ = "segment" - - # The media object that this transcript is for - media_id: Mapped[str] = mapped_column(ForeignKey("media.id", ondelete="CASCADE"), nullable=False) - media: Mapped["Media"] = relationship(back_populates="segments") - - # Segment text - number: Mapped[int] - text: Mapped[str] - start: Mapped[float] - end: Mapped[float] - - # OpenAI provided metadata - generated_by: Mapped[str] - temperature: Mapped[float] - avg_logprob: Mapped[float] - compression_ratio: Mapped[float] - no_speech_prob: Mapped[float] - - -# Database config -# ---------------------- -DATABASE_URL = f"sqlite:///{DATA_DIR}/db.sqlite3" -# DATABASE_URL = "sqlite+pysqlite:///:memory:" - -# Create database engine -ENGINE = create_engine(DATABASE_URL, echo=True) if DEBUG else create_engine(DATABASE_URL) - -# Create all tables -Base.metadata.create_all(ENGINE) diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Delphi 2014 R3 Keygen Download Filehippo Tips and Tricks for Using the Software.md b/spaces/gotiQspiryo/whisper-ui/examples/Delphi 2014 R3 Keygen Download Filehippo Tips and Tricks for Using the Software.md deleted file mode 100644 index 3c69991c425a15bd5bf894d3290553144983dc82..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Delphi 2014 R3 Keygen Download Filehippo Tips and Tricks for Using the Software.md +++ /dev/null @@ -1,6 +0,0 @@ - -

              service pack 3 patch for windows xp free download freemicrosoft excel 2019 tips free downloadandroid manager for pc free downloaddownload game limbo 2 for pcmicrosoft windows 10 free download iso freebloody roar 2 download for pc windows 10parts of microsoft word 2016 and their functions pdf free downloadupgrade windows xp pro to windows 7 professional free downloadbest offline game for pc free downloadautodesk inventor 2018 3d pdf free download ://bit.ly/3CDjv9d ://bit.ly/2U98o6q talking photofree download grammarly for windows 10android cdc driver for windows 10 downloaddownload lightworks free for pcdownload 8.1 windows for pc ://bit.ly/3CA2D3e ://bit.ly/3CDLKV7 publisher 2013 download free full version free downloadkaraoke software free download for windows 10cisco usb wireless adapter ae2500 driver download windows 10 freejava 86 bit download windows 10ni reaktor 6 player free downloaddownload activation key for windows 10microsoft office 2016 crack download free downloaddownload gamepad driver for windows 7 freemicrosoft office activation wizard 2010 disable free downloadppsspp free download for windows free ://bit.ly/2U4DSum ://bit.ly/3jM59Lb [url= ]]parallels desktop 13 for mac business edition crack free download[/url]download latest bootcamp for windows 10where to download windows 10 drivers freeturtlebeach santacruzhtm to excelmetacafe downloader onlinesuper vpn for pc free download windows xpgame pc free full version downloadmicrosoft office excel 2007 free download for pc free downloadandroid apk free download for pcage of pirates captain blood pc free downloaddownload microsoft project 2010 for mac free downloadfree ea football games download for pc007 legends pc free downloaddownload game call of duty ww2 pcwindows 8.1 pro 94fbr free download ://bit.ly/3xGyz21 ://bit.ly/3jX19HR [url= ]]biohazard 1 pc game free download[/url]smoothboardyamaha mox8 driverassassin's creed download for free pcdownload crystal report 64 bit windows 10cnc simulator free download for windows 10nero cd burner free download for windows 8 freecall of duty ww2 free download for windows 10reelop digital jockey 2aptana studio 3 download for windows 10dolby digital plus software free download for pc ://bit.ly/3AzGMHn ://bit.ly/3xNO4Fs [url= ]]free download cooking academy 4 full version pc game [/url]car racing game setup free download for pcf1 2017 game pc free downloadcubase 5 windows 10 downloadwmp54g v4.1 driverdownload sonicwall netextender windows 10[url= ]]bike rivals game download for pc [/url]malayalam keyboard for windows 10 free downloadavg 2019 free download for pcvoot download for pc windows 10dell wireless 5620 evdo hspa mobile broadband mini card driverdownload webshots for windows 10[url= ]]desktop sticky notes windows 10 download [/url]nod64 antivirus download free windows 7 freemagicjack download for windows 8.1 freeads instant hdtv pciadobe acrobat reader dc free download for pclf2 download windows 10[url= ]]free vista download for windows xp free [/url]download windows 10 live cd isowww .windows 10 software free download freedownload game digimon for pc18 games for pc downloadcod modern warfare free download full version pc[url= ]]microsoft office home & student 2013 free download free download [/url]ai suite 3 download windows 10 64 bit freelenovo enhanced performance usb keyboardintel iris pro graphics 5200 driverwindows server 2014 r2 download freelexicon i onix u42s[url= ]]windows mail download windows xp free [/url]download links resident evil 4 full pc game compressedwg311v3 windows 10g2a pc games downloaddbz sagas pc game downloadfoxit pdf reader free download for windows 7 free [url= ]]microsoft powerpoint 2016 full version free download free download [/url][url= ]]download jvm for windows xp free free [/url]download the latest itunes for free for windows 7 freefirefox mozilla free download for windows 7 freedownload music onto windows media player for free freesager np9153ludo star for pc windows 7 free download[url= ]]microsoft office download windows 7 free free [/url]download latest bootcamp drivers windows 10how long should it take to download windows 10 freemactek hart modemcandy crush saga free download for pc windows vistaintel centrino wireless-n 6150 driver windows 7[url= ]]windows 7 professional dateien suchen free download [/url]free photoshop cc download for windows 10call of duty free download full version pc gameableton live 9 free download for pckiller e2400 gigabit ethernet controller driver windows 10hp scanjet 5590 driver windows xp free download free[url= ]]bike stunt game download for pc [/url]pc games windows 10 free downloadcanon mx452 driver download windows 10download cheat engine pc windows 10 64 bitcounter strike modern warfare 2 free download for pcadobe fill & sign free download for pc[url= ]]windows 10 license virtual machine free download [/url]big buck hunter pc game free downloadnvidia nvs 4200mmsi ge62 apache pro bios updated link dwa 130bluetooth software for pc windows 7 64 bit free download[url= ]]microsoft access database engine office 2019 free download[/url]itunes 10 download windows 7windows 7 activator free download for all version for pcjdk 11 download for windows 10 32 bithp photosmart d7300m audio conectiv

              -

              microsoft project 2010 32 bit full free downloadmicrosoft office professional 2016 serial number free downloadbluetooth software windows 10 free downloaddownload pc games without license keyamd pro graphics card download windows 10 64 bitdr fone software for pc free downloaddownload mozilla windows 10download kmsauto activator for windows 10barbie girl games free download for pcvmware workstation 10 free download for windows 7 32 bit filehippo free download ://bit.ly/3fUiOyx ://bit.ly/3AuUECB live updater download windows 10microsoft internet explorer 8 download for windows xp freedownload illegal windows xp freelaserjet 1012 windows 7 driversgoogle windows 10 download free ://bit.ly/3xADWQp ://bit.ly/3s7XE4W outlook tutorial 2019 free downloadethernet controller driver download windows xp dell inspiron 1525 freemicrosoft office word 2013 free download for windows 8 64 bit free downloadwindows 7 home premium 64 bit buy free downloadwindows server 2008 r2 standard edition 64 bit download free downloadmicrosoft office home and student 2010 download free full version free downloadparallels desktop install drivers windows 10 free downloadwindows server 2012 r2 x64 essentials pt br free downloaddownload canva for pc freeavp 3 pc game free download ://bit.ly/3iwIPFL ://bit.ly/3lSP0Gk [url= ]]windows 10 x64 build 1903 free download[/url]cf auto root free download for pclimbo game free download for windows 10realtek rtl 8139 810x family fast ethernet nic driverfree download japanese english dictionary for windows 7 freecasio app for pc free downloadmicrosoft directx 11 download windows xp freemicrosoft expression studio 4 web professional free download free downloadwindows vista ultimate virtualbox free downloadadobe photoshop latest version free download for windows 8.1 freeitunes download windows 10 64 bit free downloadmicrosoft office professional plus 2010 with service pack 1 vl en x64 free downloadautocad 2002 windows 10 downloaddownload game yugioh pc free full version terbarudownload bully scholarship edition pc free windows 7attack on titan tribute game pc free download ://bit.ly/3fRL1pD ://bit.ly/3yFdWEX [url= ]]download clip converter for windows free[/url]download andy emulator for windows 10injustice 2 download for windows 10asus z97-a/usb 3.1 driversdeer hunting games pc free downloadtexas instruments pcixx12 integrated flash media controllerwindows 95 usb drivers download freeaccel world vs sword art online pc free downloadautocad 2010 64 bit free download windows 10download mac dock for windows vista freegame editor download for pc ://bit.ly/3jJpNLD ://bit.ly/3fVz4iL [url= ]]adobe premiere pro cc 2018 software price free download [/url]convert csv to iif freelexmark x264 driverdirectx 11 download windows 10 64 bit ?????i217 vdownload pdf adobe reader for windows 10 free[url= ]]chivalry medieval warfare free download pc [/url]ati mobility radeon hd 5650 driver windows 10 downloadqualcomm atheros ar3012 driverdownload driver hp laserjet pro m12w windows 10 64 bitfifa 2002 korea japan game free download for pcantivirus protection free download for windows 10[url= ]]download ibooks author for windows free [/url]bios download windows 10webcam 210avast antivirus free download for windows 10 64 bit offlinedownload windows 10 on pc for freejava 7.2 free download for windows 10 64 bit[url= ]]free download cs 1.6 full version game for pc [/url]asus crossblade ranger drivershp business inkjet 2200asus rampage v driversdragon ball super game free download for pcnetflix download pc windows[url= ]]windows media classic player latest version free download free [/url]radeon 9250 driverorigin download for pc windows 10verigiogoogle chrome windows 10 64 bit downloaddownload bluetooth peripheral device driver for windows 7 32bit free[url= ]]age of empires 4 game download for pc [/url]aurora svgflash games download for pc windows 7chemdraw free download for windows 10 freefinal fantasy 7 pc game free downloadall mario games download for pc [url= ]]microsoft project 2010 install path free download [/url][url= ]]microsoft word 2013 advanced tutorial ppt free download [/url]critical ops pc download freehp officejet 3830 driver download windows 10traktor kontrol s2 setuppac man pc download windows 7windows 8.1 vmware image download free[url= ]]commandos behind enemy lines pc game free download [/url]free download opera software for windows xp freedownload amd catalyst for windows 10adobe photoshop cs5 download windows 10sony acid pro 7 reviewsdell wireless 1705 driver 802.11b/g/n 2.4ghz[url= ]]windows 7 professional vs enterprise vs ultimate free download [/url]line pc download windows freeequalizer download pc windows 10adaptec 29320lpe driverdell wireless 1703 card 802.11 b/g/n driverandroid device manager for windows 10 free download[url= ]]hp laserjet p1102 driver download windows 10 64 bit [/url]qualcomm atheros ar5006x wireless network adapterwindows xp vienna edition download freegoogle chrome download manager for windows 10generic ieee 1284.4 printing support driver downloadangry birds windows 10 free download[url= ]]windows 7 home premium oa 64 bit download free [/url]download freegate 7.42disk wipe download windows 10usb vid_8086&pid_0189&rev_6919download windows 10 for raspberry pi 4canon mx470 driver download windows 10[url= ]]mariadb download for windows 10[/url]helvetica neue font free download windows freedownload tomtom mydrive for windows 10download camtasia windows 10 32 bitintel wireless display windows 10 free downloaddownload codec video windows 10

              -

              Delphi 2014 R3 Keygen Download Filehippo


              Download Zip ✯✯✯ https://urlgoal.com/2uyMOI



              aaccfb2cb3
              -
              -
              \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Kill The Noise (Part 1) Movie Learn Italian with this Amazing Dubstep Horror Film.md b/spaces/gotiQspiryo/whisper-ui/examples/Kill The Noise (Part 1) Movie Learn Italian with this Amazing Dubstep Horror Film.md deleted file mode 100644 index 6d1b8f3655f467d7f79a7fb2926d1d905f4ef835..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Kill The Noise (Part 1) Movie Learn Italian with this Amazing Dubstep Horror Film.md +++ /dev/null @@ -1,6 +0,0 @@ -

              Kill The Noise (Part 1) Movie In Italian Free Download


              Download Zip » https://urlgoal.com/2uyMHO



              - - aaccfb2cb3
              -
              -
              -

              diff --git a/spaces/gradio/HuBERT/fairseq/dataclass/utils.py b/spaces/gradio/HuBERT/fairseq/dataclass/utils.py deleted file mode 100644 index 89206125d1d50ccc4b4d56394a76bc07bb32927a..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/dataclass/utils.py +++ /dev/null @@ -1,476 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import ast -import inspect -import logging -import os -import re -from argparse import ArgumentError, ArgumentParser, Namespace -from dataclasses import _MISSING_TYPE, MISSING, is_dataclass -from enum import Enum -from typing import Any, Dict, List, Optional, Tuple, Type - -from fairseq.dataclass import FairseqDataclass -from fairseq.dataclass.configs import FairseqConfig -from hydra.core.global_hydra import GlobalHydra -from hydra.experimental import compose, initialize -from omegaconf import DictConfig, OmegaConf, open_dict - -logger = logging.getLogger(__name__) - - -def eval_str_list(x, x_type=float): - if x is None: - return None - if isinstance(x, str): - if len(x) == 0: - return [] - x = ast.literal_eval(x) - try: - return list(map(x_type, x)) - except TypeError: - return [x_type(x)] - - -def interpret_dc_type(field_type): - if isinstance(field_type, str): - raise RuntimeError("field should be a type") - - if field_type == Any: - return str - - typestring = str(field_type) - if re.match( - r"(typing.|^)Union\[(.*), NoneType\]$", typestring - ) or typestring.startswith("typing.Optional"): - return field_type.__args__[0] - return field_type - - -def gen_parser_from_dataclass( - parser: ArgumentParser, - dataclass_instance: FairseqDataclass, - delete_default: bool = False, -) -> None: - """convert a dataclass instance to tailing parser arguments""" - - def argparse_name(name: str): - if name == "data": - # normally data is positional args - return name - if name == "_name": - # private member, skip - return None - return "--" + name.replace("_", "-") - - def get_kwargs_from_dc( - dataclass_instance: FairseqDataclass, k: str - ) -> Dict[str, Any]: - """k: dataclass attributes""" - - kwargs = {} - - field_type = dataclass_instance._get_type(k) - inter_type = interpret_dc_type(field_type) - - field_default = dataclass_instance._get_default(k) - - if isinstance(inter_type, type) and issubclass(inter_type, Enum): - field_choices = [t.value for t in list(inter_type)] - else: - field_choices = None - - field_help = dataclass_instance._get_help(k) - field_const = dataclass_instance._get_argparse_const(k) - - if isinstance(field_default, str) and field_default.startswith("${"): - kwargs["default"] = field_default - else: - if field_default is MISSING: - kwargs["required"] = True - if field_choices is not None: - kwargs["choices"] = field_choices - if ( - isinstance(inter_type, type) - and (issubclass(inter_type, List) or issubclass(inter_type, Tuple)) - ) or ("List" in str(inter_type) or "Tuple" in str(inter_type)): - if "int" in str(inter_type): - kwargs["type"] = lambda x: eval_str_list(x, int) - elif "float" in str(inter_type): - kwargs["type"] = lambda x: eval_str_list(x, float) - elif "str" in str(inter_type): - kwargs["type"] = lambda x: eval_str_list(x, str) - else: - raise NotImplementedError( - "parsing of type " + str(inter_type) + " is not implemented" - ) - if field_default is not MISSING: - kwargs["default"] = ( - ",".join(map(str, field_default)) - if field_default is not None - else None - ) - elif ( - isinstance(inter_type, type) and issubclass(inter_type, Enum) - ) or "Enum" in str(inter_type): - kwargs["type"] = str - if field_default is not MISSING: - if isinstance(field_default, Enum): - kwargs["default"] = field_default.value - else: - kwargs["default"] = field_default - elif inter_type is bool: - kwargs["action"] = ( - "store_false" if field_default is True else "store_true" - ) - kwargs["default"] = field_default - else: - kwargs["type"] = inter_type - if field_default is not MISSING: - kwargs["default"] = field_default - - kwargs["help"] = field_help - if field_const is not None: - kwargs["const"] = field_const - kwargs["nargs"] = "?" - - return kwargs - - for k in dataclass_instance._get_all_attributes(): - field_name = argparse_name(dataclass_instance._get_name(k)) - field_type = dataclass_instance._get_type(k) - if field_name is None: - continue - elif inspect.isclass(field_type) and issubclass(field_type, FairseqDataclass): - gen_parser_from_dataclass(parser, field_type(), delete_default) - continue - - kwargs = get_kwargs_from_dc(dataclass_instance, k) - - field_args = [field_name] - alias = dataclass_instance._get_argparse_alias(k) - if alias is not None: - field_args.append(alias) - - if "default" in kwargs: - if isinstance(kwargs["default"], str) and kwargs["default"].startswith( - "${" - ): - if kwargs["help"] is None: - # this is a field with a name that will be added elsewhere - continue - else: - del kwargs["default"] - if delete_default and "default" in kwargs: - del kwargs["default"] - try: - parser.add_argument(*field_args, **kwargs) - except ArgumentError: - pass - - -def _set_legacy_defaults(args, cls): - """Helper to set default arguments based on *add_args*.""" - if not hasattr(cls, "add_args"): - return - - import argparse - - parser = argparse.ArgumentParser( - argument_default=argparse.SUPPRESS, allow_abbrev=False - ) - cls.add_args(parser) - # copied from argparse.py: - defaults = argparse.Namespace() - for action in parser._actions: - if action.dest is not argparse.SUPPRESS: - if not hasattr(defaults, action.dest): - if action.default is not argparse.SUPPRESS: - setattr(defaults, action.dest, action.default) - for key, default_value in vars(defaults).items(): - if not hasattr(args, key): - setattr(args, key, default_value) - - -def _override_attr( - sub_node: str, data_class: Type[FairseqDataclass], args: Namespace -) -> List[str]: - overrides = [] - - if not inspect.isclass(data_class) or not issubclass(data_class, FairseqDataclass): - return overrides - - def get_default(f): - if not isinstance(f.default_factory, _MISSING_TYPE): - return f.default_factory() - return f.default - - for k, v in data_class.__dataclass_fields__.items(): - if k.startswith("_"): - # private member, skip - continue - - val = get_default(v) if not hasattr(args, k) else getattr(args, k) - - field_type = interpret_dc_type(v.type) - if ( - isinstance(val, str) - and not val.startswith("${") # not interpolation - and field_type != str - and ( - not inspect.isclass(field_type) or not issubclass(field_type, Enum) - ) # not choices enum - ): - # upgrade old models that stored complex parameters as string - val = ast.literal_eval(val) - - if isinstance(val, tuple): - val = list(val) - - v_type = getattr(v.type, "__origin__", None) - if ( - (v_type is List or v_type is list or v_type is Optional) - # skip interpolation - and not (isinstance(val, str) and val.startswith("${")) - ): - # if type is int but val is float, then we will crash later - try to convert here - if hasattr(v.type, "__args__"): - t_args = v.type.__args__ - if len(t_args) == 1 and (t_args[0] is float or t_args[0] is int): - val = list(map(t_args[0], val)) - elif val is not None and ( - field_type is int or field_type is bool or field_type is float - ): - try: - val = field_type(val) - except: - pass # ignore errors here, they are often from interpolation args - - if val is None: - overrides.append("{}.{}=null".format(sub_node, k)) - elif val == "": - overrides.append("{}.{}=''".format(sub_node, k)) - elif isinstance(val, str): - val = val.replace("'", r"\'") - overrides.append("{}.{}='{}'".format(sub_node, k, val)) - elif isinstance(val, FairseqDataclass): - overrides += _override_attr(f"{sub_node}.{k}", type(val), args) - elif isinstance(val, Namespace): - sub_overrides, _ = override_module_args(val) - for so in sub_overrides: - overrides.append(f"{sub_node}.{k}.{so}") - else: - overrides.append("{}.{}={}".format(sub_node, k, val)) - - return overrides - - -def migrate_registry( - name, value, registry, args, overrides, deletes, use_name_as_val=False -): - if value in registry: - overrides.append("{}={}".format(name, value)) - overrides.append("{}._name={}".format(name, value)) - overrides.extend(_override_attr(name, registry[value], args)) - elif use_name_as_val and value is not None: - overrides.append("{}={}".format(name, value)) - else: - deletes.append(name) - - -def override_module_args(args: Namespace) -> Tuple[List[str], List[str]]: - """use the field in args to overrides those in cfg""" - overrides = [] - deletes = [] - - for k in FairseqConfig.__dataclass_fields__.keys(): - overrides.extend( - _override_attr(k, FairseqConfig.__dataclass_fields__[k].type, args) - ) - - if args is not None: - if hasattr(args, "task"): - from fairseq.tasks import TASK_DATACLASS_REGISTRY - - migrate_registry( - "task", args.task, TASK_DATACLASS_REGISTRY, args, overrides, deletes - ) - else: - deletes.append("task") - - # these options will be set to "None" if they have not yet been migrated - # so we can populate them with the entire flat args - CORE_REGISTRIES = {"criterion", "optimizer", "lr_scheduler"} - - from fairseq.registry import REGISTRIES - - for k, v in REGISTRIES.items(): - if hasattr(args, k): - migrate_registry( - k, - getattr(args, k), - v["dataclass_registry"], - args, - overrides, - deletes, - use_name_as_val=k not in CORE_REGISTRIES, - ) - else: - deletes.append(k) - - no_dc = True - if hasattr(args, "arch"): - from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_MODEL_NAME_REGISTRY - - if args.arch in ARCH_MODEL_REGISTRY: - m_cls = ARCH_MODEL_REGISTRY[args.arch] - dc = getattr(m_cls, "__dataclass", None) - if dc is not None: - m_name = ARCH_MODEL_NAME_REGISTRY[args.arch] - overrides.append("model={}".format(m_name)) - overrides.append("model._name={}".format(args.arch)) - # override model params with those exist in args - overrides.extend(_override_attr("model", dc, args)) - no_dc = False - if no_dc: - deletes.append("model") - - return overrides, deletes - - -def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig: - """Convert a flat argparse.Namespace to a structured DictConfig.""" - - # Here we are using field values provided in args to override counterparts inside config object - overrides, deletes = override_module_args(args) - - # configs will be in fairseq/config after installation - config_path = os.path.join("..", "config") - - GlobalHydra.instance().clear() - - with initialize(config_path=config_path): - try: - composed_cfg = compose("config", overrides=overrides, strict=False) - except: - logger.error("Error when composing. Overrides: " + str(overrides)) - raise - - for k in deletes: - composed_cfg[k] = None - - cfg = OmegaConf.create( - OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True) - ) - - # hack to be able to set Namespace in dict config. this should be removed when we update to newer - # omegaconf version that supports object flags, or when we migrate all existing models - from omegaconf import _utils - - old_primitive = _utils.is_primitive_type - _utils.is_primitive_type = lambda _: True - - if cfg.task is None and getattr(args, "task", None): - cfg.task = Namespace(**vars(args)) - from fairseq.tasks import TASK_REGISTRY - - _set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task]) - cfg.task._name = args.task - if cfg.model is None and getattr(args, "arch", None): - cfg.model = Namespace(**vars(args)) - from fairseq.models import ARCH_MODEL_REGISTRY - - _set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch]) - cfg.model._name = args.arch - if cfg.optimizer is None and getattr(args, "optimizer", None): - cfg.optimizer = Namespace(**vars(args)) - from fairseq.optim import OPTIMIZER_REGISTRY - - _set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer]) - cfg.optimizer._name = args.optimizer - if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None): - cfg.lr_scheduler = Namespace(**vars(args)) - from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY - - _set_legacy_defaults(cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler]) - cfg.lr_scheduler._name = args.lr_scheduler - if cfg.criterion is None and getattr(args, "criterion", None): - cfg.criterion = Namespace(**vars(args)) - from fairseq.criterions import CRITERION_REGISTRY - - _set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion]) - cfg.criterion._name = args.criterion - - _utils.is_primitive_type = old_primitive - OmegaConf.set_struct(cfg, True) - return cfg - - -def populate_dataclass( - dataclass: FairseqDataclass, - args: Namespace, -) -> FairseqDataclass: - for k in dataclass.__dataclass_fields__.keys(): - if k.startswith("_"): - # private member, skip - continue - if hasattr(args, k): - setattr(dataclass, k, getattr(args, k)) - - return dataclass - - -def overwrite_args_by_name(cfg: DictConfig, overrides: Dict[str, any]): - # this will be deprecated when we get rid of argparse and model_overrides logic - - from fairseq.registry import REGISTRIES - - with open_dict(cfg): - for k in cfg.keys(): - # "k in cfg" will return false if its a "mandatory value (e.g. ???)" - if k in cfg and isinstance(cfg[k], DictConfig): - if k in overrides and isinstance(overrides[k], dict): - for ok, ov in overrides[k].items(): - if isinstance(ov, dict) and cfg[k][ok] is not None: - overwrite_args_by_name(cfg[k][ok], ov) - else: - cfg[k][ok] = ov - else: - overwrite_args_by_name(cfg[k], overrides) - elif k in cfg and isinstance(cfg[k], Namespace): - for override_key, val in overrides.items(): - setattr(cfg[k], override_key, val) - elif k in overrides: - if ( - k in REGISTRIES - and overrides[k] in REGISTRIES[k]["dataclass_registry"] - ): - cfg[k] = DictConfig( - REGISTRIES[k]["dataclass_registry"][overrides[k]] - ) - overwrite_args_by_name(cfg[k], overrides) - cfg[k]._name = overrides[k] - else: - cfg[k] = overrides[k] - - -def merge_with_parent(dc: FairseqDataclass, cfg: DictConfig, remove_missing=True): - if remove_missing: - - if is_dataclass(dc): - target_keys = set(dc.__dataclass_fields__.keys()) - else: - target_keys = set(dc.keys()) - - with open_dict(cfg): - for k in list(cfg.keys()): - if k not in target_keys: - del cfg[k] - - merged_cfg = OmegaConf.merge(dc, cfg) - merged_cfg.__dict__["_parent"] = cfg.__dict__["_parent"] - OmegaConf.set_struct(merged_cfg, True) - return merged_cfg diff --git a/spaces/gwang-kim/DATID-3D/eg3d/torch_utils/ops/upfirdn2d.py b/spaces/gwang-kim/DATID-3D/eg3d/torch_utils/ops/upfirdn2d.py deleted file mode 100644 index 5d634714167043daf63ec7f643ddd85d98d926dc..0000000000000000000000000000000000000000 --- a/spaces/gwang-kim/DATID-3D/eg3d/torch_utils/ops/upfirdn2d.py +++ /dev/null @@ -1,391 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: LicenseRef-NvidiaProprietary -# -# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual -# property and proprietary rights in and to this material, related -# documentation and any modifications thereto. Any use, reproduction, -# disclosure or distribution of this material and related documentation -# without an express license agreement from NVIDIA CORPORATION or -# its affiliates is strictly prohibited. - -"""Custom PyTorch ops for efficient resampling of 2D images.""" - -import os -import numpy as np -import torch - -from .. import custom_ops -from .. import misc -from . import conv2d_gradfix - -#---------------------------------------------------------------------------- - -_plugin = None - -def _init(): - global _plugin - if _plugin is None: - _plugin = custom_ops.get_plugin( - module_name='upfirdn2d_plugin', - sources=['upfirdn2d.cpp', 'upfirdn2d.cu'], - headers=['upfirdn2d.h'], - source_dir=os.path.dirname(__file__), - extra_cuda_cflags=['--use_fast_math'], - ) - return True - -def _parse_scaling(scaling): - if isinstance(scaling, int): - scaling = [scaling, scaling] - assert isinstance(scaling, (list, tuple)) - assert all(isinstance(x, int) for x in scaling) - sx, sy = scaling - assert sx >= 1 and sy >= 1 - return sx, sy - -def _parse_padding(padding): - if isinstance(padding, int): - padding = [padding, padding] - assert isinstance(padding, (list, tuple)) - assert all(isinstance(x, int) for x in padding) - if len(padding) == 2: - padx, pady = padding - padding = [padx, padx, pady, pady] - padx0, padx1, pady0, pady1 = padding - return padx0, padx1, pady0, pady1 - -def _get_filter_size(f): - if f is None: - return 1, 1 - assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] - fw = f.shape[-1] - fh = f.shape[0] - with misc.suppress_tracer_warnings(): - fw = int(fw) - fh = int(fh) - misc.assert_shape(f, [fh, fw][:f.ndim]) - assert fw >= 1 and fh >= 1 - return fw, fh - -#---------------------------------------------------------------------------- - -def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None): - r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`. - - Args: - f: Torch tensor, numpy array, or python list of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), - `[]` (impulse), or - `None` (identity). - device: Result device (default: cpu). - normalize: Normalize the filter so that it retains the magnitude - for constant input signal (DC)? (default: True). - flip_filter: Flip the filter? (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - separable: Return a separable filter? (default: select automatically). - - Returns: - Float32 tensor of the shape - `[filter_height, filter_width]` (non-separable) or - `[filter_taps]` (separable). - """ - # Validate. - if f is None: - f = 1 - f = torch.as_tensor(f, dtype=torch.float32) - assert f.ndim in [0, 1, 2] - assert f.numel() > 0 - if f.ndim == 0: - f = f[np.newaxis] - - # Separable? - if separable is None: - separable = (f.ndim == 1 and f.numel() >= 8) - if f.ndim == 1 and not separable: - f = f.ger(f) - assert f.ndim == (1 if separable else 2) - - # Apply normalize, flip, gain, and device. - if normalize: - f /= f.sum() - if flip_filter: - f = f.flip(list(range(f.ndim))) - f = f * (gain ** (f.ndim / 2)) - f = f.to(device=device) - return f - -#---------------------------------------------------------------------------- - -def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Pad, upsample, filter, and downsample a batch of 2D images. - - Performs the following sequence of operations for each channel: - - 1. Upsample the image by inserting N-1 zeros after each pixel (`up`). - - 2. Pad the image with the specified number of zeros on each side (`padding`). - Negative padding corresponds to cropping the image. - - 3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it - so that the footprint of all output pixels lies within the input image. - - 4. Downsample the image by keeping every Nth pixel (`down`). - - This sequence of operations bears close resemblance to scipy.signal.upfirdn(). - The fused op is considerably more efficient than performing the same calculation - using standard PyTorch ops. It supports gradients of arbitrary order. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - up: Integer upsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - down: Integer downsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - padding: Padding with respect to the upsampled image. Can be a single number - or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - assert isinstance(x, torch.Tensor) - assert impl in ['ref', 'cuda'] - if impl == 'cuda' and x.device.type == 'cuda' and _init(): - return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f) - return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain) - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1): - """Slow reference implementation of `upfirdn2d()` using standard PyTorch ops. - """ - # Validate arguments. - assert isinstance(x, torch.Tensor) and x.ndim == 4 - if f is None: - f = torch.ones([1, 1], dtype=torch.float32, device=x.device) - assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] - assert f.dtype == torch.float32 and not f.requires_grad - batch_size, num_channels, in_height, in_width = x.shape - upx, upy = _parse_scaling(up) - downx, downy = _parse_scaling(down) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - - # Check that upsampled buffer is not smaller than the filter. - upW = in_width * upx + padx0 + padx1 - upH = in_height * upy + pady0 + pady1 - assert upW >= f.shape[-1] and upH >= f.shape[0] - - # Upsample by inserting zeros. - x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1]) - x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1]) - x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx]) - - # Pad or crop. - x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)]) - x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)] - - # Setup filter. - f = f * (gain ** (f.ndim / 2)) - f = f.to(x.dtype) - if not flip_filter: - f = f.flip(list(range(f.ndim))) - - # Convolve with the filter. - f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim) - if f.ndim == 4: - x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels) - else: - x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels) - x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels) - - # Downsample by throwing away pixels. - x = x[:, :, ::downy, ::downx] - return x - -#---------------------------------------------------------------------------- - -_upfirdn2d_cuda_cache = dict() - -def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1): - """Fast CUDA implementation of `upfirdn2d()` using custom ops. - """ - # Parse arguments. - upx, upy = _parse_scaling(up) - downx, downy = _parse_scaling(down) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - - # Lookup from cache. - key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain) - if key in _upfirdn2d_cuda_cache: - return _upfirdn2d_cuda_cache[key] - - # Forward op. - class Upfirdn2dCuda(torch.autograd.Function): - @staticmethod - def forward(ctx, x, f): # pylint: disable=arguments-differ - assert isinstance(x, torch.Tensor) and x.ndim == 4 - if f is None: - f = torch.ones([1, 1], dtype=torch.float32, device=x.device) - if f.ndim == 1 and f.shape[0] == 1: - f = f.square().unsqueeze(0) # Convert separable-1 into full-1x1. - assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] - y = x - if f.ndim == 2: - y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain) - else: - y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, 1.0) - y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, gain) - ctx.save_for_backward(f) - ctx.x_shape = x.shape - return y - - @staticmethod - def backward(ctx, dy): # pylint: disable=arguments-differ - f, = ctx.saved_tensors - _, _, ih, iw = ctx.x_shape - _, _, oh, ow = dy.shape - fw, fh = _get_filter_size(f) - p = [ - fw - padx0 - 1, - iw * upx - ow * downx + padx0 - upx + 1, - fh - pady0 - 1, - ih * upy - oh * downy + pady0 - upy + 1, - ] - dx = None - df = None - - if ctx.needs_input_grad[0]: - dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f) - - assert not ctx.needs_input_grad[1] - return dx, df - - # Add to cache. - _upfirdn2d_cuda_cache[key] = Upfirdn2dCuda - return Upfirdn2dCuda - -#---------------------------------------------------------------------------- - -def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Filter a batch of 2D images using the given 2D FIR filter. - - By default, the result is padded so that its shape matches the input. - User-specified padding is applied on top of that, with negative values - indicating cropping. Pixels outside the image are assumed to be zero. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - padding: Padding with respect to the output. Can be a single number or a - list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - padx0, padx1, pady0, pady1 = _parse_padding(padding) - fw, fh = _get_filter_size(f) - p = [ - padx0 + fw // 2, - padx1 + (fw - 1) // 2, - pady0 + fh // 2, - pady1 + (fh - 1) // 2, - ] - return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl) - -#---------------------------------------------------------------------------- - -def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Upsample a batch of 2D images using the given 2D FIR filter. - - By default, the result is padded so that its shape is a multiple of the input. - User-specified padding is applied on top of that, with negative values - indicating cropping. Pixels outside the image are assumed to be zero. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - up: Integer upsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - padding: Padding with respect to the output. Can be a single number or a - list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - upx, upy = _parse_scaling(up) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - fw, fh = _get_filter_size(f) - p = [ - padx0 + (fw + upx - 1) // 2, - padx1 + (fw - upx) // 2, - pady0 + (fh + upy - 1) // 2, - pady1 + (fh - upy) // 2, - ] - return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl) - -#---------------------------------------------------------------------------- - -def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Downsample a batch of 2D images using the given 2D FIR filter. - - By default, the result is padded so that its shape is a fraction of the input. - User-specified padding is applied on top of that, with negative values - indicating cropping. Pixels outside the image are assumed to be zero. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - down: Integer downsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - padding: Padding with respect to the input. Can be a single number or a - list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - downx, downy = _parse_scaling(down) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - fw, fh = _get_filter_size(f) - p = [ - padx0 + (fw - downx + 1) // 2, - padx1 + (fw - downx) // 2, - pady0 + (fh - downy + 1) // 2, - pady1 + (fh - downy) // 2, - ] - return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl) - -#---------------------------------------------------------------------------- diff --git a/spaces/h2oai/wave-tour/examples/image.py b/spaces/h2oai/wave-tour/examples/image.py deleted file mode 100644 index 5d3d6177af5e3b5c0fd4b86fba0a2e7b68ed9ee3..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/image.py +++ /dev/null @@ -1,59 +0,0 @@ -# Image -# Use an image card to display an image by specifying its URL or a data URL in case of the base64-encoded #image. -# --- -from h2o_wave import site, ui -import io -import base64 -import numpy as np -import matplotlib.pyplot as plt - -n = 25 -plt.figure(figsize=(3, 3)) -plt.scatter( - [0.7003673, 0.74275081, 0.70928001, 0.56674552, 0.97778533, 0.70633485, - 0.24791576, 0.15788335, 0.69769852, 0.71995667, 0.25774443, 0.34154678, - 0.96876117, 0.6945071, 0.46638326, 0.7028127, 0.51178587, 0.92874137, - 0.7397693, 0.62243903, 0.65154547, 0.39680761, 0.54323939, 0.79989953, - 0.72154473], - [0.29536398, 0.16094588, 0.20612551, 0.13432539, 0.48060502, 0.34252181, - 0.36296929, 0.97291764, 0.11094361, 0.38826409, 0.78306588, 0.97289726, - 0.48320961, 0.33642111, 0.56741904, 0.04794151, 0.38893703, 0.90630365, - 0.16101821, 0.74362113, 0.63297416, 0.32418002, 0.92237653, 0.23722644, - 0.82394557], - s=(30 * np.asarray([ - 0.75060714, 0.11378445, 0.84536125, 0.92393213, 0.22083679, 0.93305388, - 0.48899874, 0.47471864, 0.08916747, 0.22994818, 0.71593741, 0.49612616, - 0.76648938, 0.89679732, 0.77222302, 0.92717429, 0.61465203, 0.60906377, - 0.68468487, 0.25101297, 0.83783764, 0.11861562, 0.79723474, 0.94900427, - 0.14806288])) ** 2, - c=[0.90687198, 0.78837333, 0.76840584, 0.59849648, 0.44214562, 0.72303802, - 0.41661825, 0.2268104, 0.45422734, 0.84794375, 0.93665595, 0.95603618, - 0.39209432, 0.70832467, 0.12951583, 0.35379639, 0.40427152, 0.6485339, - 0.03307097, 0.53800936, 0.13171312, 0.52093493, 0.10248479, 0.15798038, - 0.92002965], - alpha=0.5, -) - -buf = io.BytesIO() -plt.savefig(buf, format='png') -buf.seek(0) -image = base64.b64encode(buf.read()).decode('utf-8') - -page = site['/demo'] -page['example1'] = ui.image_card( - box='1 1 2 4', - title='An image', - type='png', - image=image, -) - -# Another way to achieve the same result is to use a data URL for the path: -# The example below constructs the data URL from the base64-encoded -# used in the previous example. -page['example2'] = ui.image_card( - box='3 1 2 4', - title='An image', - path=f"data:image/png;base64,{image}", -) - -page.save() \ No newline at end of file diff --git a/spaces/hands012/gpt-academic/crazy_functions/test_project/cpp/cppipc/shm.cpp b/spaces/hands012/gpt-academic/crazy_functions/test_project/cpp/cppipc/shm.cpp deleted file mode 100644 index 593ce3129dc1574dbc8fc8b088cf595df215de93..0000000000000000000000000000000000000000 --- a/spaces/hands012/gpt-academic/crazy_functions/test_project/cpp/cppipc/shm.cpp +++ /dev/null @@ -1,103 +0,0 @@ - -#include -#include - -#include "libipc/shm.h" - -#include "libipc/utility/pimpl.h" -#include "libipc/memory/resource.h" - -namespace ipc { -namespace shm { - -class handle::handle_ : public pimpl { -public: - shm::id_t id_ = nullptr; - void* m_ = nullptr; - - ipc::string n_; - std::size_t s_ = 0; -}; - -handle::handle() - : p_(p_->make()) { -} - -handle::handle(char const * name, std::size_t size, unsigned mode) - : handle() { - acquire(name, size, mode); -} - -handle::handle(handle&& rhs) - : handle() { - swap(rhs); -} - -handle::~handle() { - release(); - p_->clear(); -} - -void handle::swap(handle& rhs) { - std::swap(p_, rhs.p_); -} - -handle& handle::operator=(handle rhs) { - swap(rhs); - return *this; -} - -bool handle::valid() const noexcept { - return impl(p_)->m_ != nullptr; -} - -std::size_t handle::size() const noexcept { - return impl(p_)->s_; -} - -char const * handle::name() const noexcept { - return impl(p_)->n_.c_str(); -} - -std::int32_t handle::ref() const noexcept { - return shm::get_ref(impl(p_)->id_); -} - -void handle::sub_ref() noexcept { - shm::sub_ref(impl(p_)->id_); -} - -bool handle::acquire(char const * name, std::size_t size, unsigned mode) { - release(); - impl(p_)->id_ = shm::acquire((impl(p_)->n_ = name).c_str(), size, mode); - impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_)); - return valid(); -} - -std::int32_t handle::release() { - if (impl(p_)->id_ == nullptr) return -1; - return shm::release(detach()); -} - -void* handle::get() const { - return impl(p_)->m_; -} - -void handle::attach(id_t id) { - if (id == nullptr) return; - release(); - impl(p_)->id_ = id; - impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_)); -} - -id_t handle::detach() { - auto old = impl(p_)->id_; - impl(p_)->id_ = nullptr; - impl(p_)->m_ = nullptr; - impl(p_)->s_ = 0; - impl(p_)->n_.clear(); - return old; -} - -} // namespace shm -} // namespace ipc diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/utils/checkpoint.py b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/utils/checkpoint.py deleted file mode 100644 index 2b8106503de32bcf626000761b1f91af7c04572b..0000000000000000000000000000000000000000 --- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/utils/checkpoint.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import logging -import os - -import torch - -from maskrcnn_benchmark.utils.model_serialization import load_state_dict -from maskrcnn_benchmark.utils.c2_model_loading import load_c2_format -from maskrcnn_benchmark.utils.big_model_loading import load_big_format -from maskrcnn_benchmark.utils.pretrain_model_loading import load_pretrain_format -from maskrcnn_benchmark.utils.imports import import_file -from maskrcnn_benchmark.utils.model_zoo import cache_url - - -class Checkpointer(object): - def __init__( - self, - model, - optimizer=None, - scheduler=None, - save_dir="", - save_to_disk=None, - logger=None, - ): - self.model = model - self.optimizer = optimizer - self.scheduler = scheduler - self.save_dir = save_dir - self.save_to_disk = save_to_disk - if logger is None: - logger = logging.getLogger(__name__) - self.logger = logger - - def save(self, name, **kwargs): - if not self.save_dir: - return - - if not self.save_to_disk: - return - - data = {} - data["model"] = self.model.state_dict() - if self.optimizer is not None: - data["optimizer"] = self.optimizer.state_dict() - if self.scheduler is not None: - if isinstance(self.scheduler, list): - data["scheduler"] = [scheduler.state_dict() for scheduler in self.scheduler] - else: - data["scheduler"] = self.scheduler.state_dict() - data.update(kwargs) - - save_file = os.path.join(self.save_dir, "{}.pth".format(name)) - self.logger.info("Saving checkpoint to {}".format(save_file)) - torch.save(data, save_file) - # self.tag_last_checkpoint(save_file) - # use relative path name to save the checkpoint - self.tag_last_checkpoint("{}.pth".format(name)) - - def load(self, f=None, force=False, keyword="model", skip_optimizer =False): - resume = False - if self.has_checkpoint() and not force: - # override argument with existing checkpoint - f = self.get_checkpoint_file() - # get the absolute path - f = os.path.join(self.save_dir, f) - resume = True - if not f: - # no checkpoint could be found - self.logger.info("No checkpoint found. Initializing model from scratch") - return {} - self.logger.info("Loading checkpoint from {}".format(f)) - checkpoint = self._load_file(f) - self._load_model(checkpoint, keyword=keyword) - # if resume training, load optimizer and scheduler, - # otherwise use the specified LR in config yaml for fine-tuning - if resume and not skip_optimizer: - if "optimizer" in checkpoint and self.optimizer: - self.logger.info("Loading optimizer from {}".format(f)) - self.optimizer.load_state_dict(checkpoint.pop("optimizer")) - if "scheduler" in checkpoint and self.scheduler: - self.logger.info("Loading scheduler from {}".format(f)) - if isinstance(self.scheduler, list): - for scheduler, state_dict in zip(self.scheduler, checkpoint.pop("scheduler")): - scheduler.load_state_dict(state_dict) - else: - self.scheduler.load_state_dict(checkpoint.pop("scheduler")) - - # return any further checkpoint data - return checkpoint - else: - return {} - - def has_checkpoint(self): - save_file = os.path.join(self.save_dir, "last_checkpoint") - return os.path.exists(save_file) - - def get_checkpoint_file(self): - save_file = os.path.join(self.save_dir, "last_checkpoint") - try: - with open(save_file, "r") as f: - last_saved = f.read() - last_saved = last_saved.strip() - except IOError: - # if file doesn't exist, maybe because it has just been - # deleted by a separate process - last_saved = "" - return last_saved - - def tag_last_checkpoint(self, last_filename): - save_file = os.path.join(self.save_dir, "last_checkpoint") - with open(save_file, "w") as f: - f.write(last_filename) - - def _load_file(self, f): - return torch.load(f, map_location=torch.device("cpu")) - - def _load_model(self, checkpoint, keyword="model"): - load_state_dict(self.model, checkpoint.pop(keyword)) - - -class DetectronCheckpointer(Checkpointer): - def __init__( - self, - cfg, - model, - optimizer=None, - scheduler=None, - save_dir="", - save_to_disk=None, - logger=None, - ): - super(DetectronCheckpointer, self).__init__( - model, optimizer, scheduler, save_dir, save_to_disk, logger - ) - self.cfg = cfg.clone() - - def _load_file(self, f): - # catalog lookup - if f.startswith("catalog://"): - paths_catalog = import_file( - "maskrcnn_benchmark.config.paths_catalog", self.cfg.PATHS_CATALOG, True - ) - catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://") :]) - self.logger.info("{} points to {}".format(f, catalog_f)) - f = catalog_f - # download url files - if f.startswith("http"): - # if the file is a url path, download it and cache it - cached_f = cache_url(f) - self.logger.info("url {} cached in {}".format(f, cached_f)) - f = cached_f - # convert Caffe2 checkpoint from pkl - if f.endswith(".pkl"): - return load_c2_format(self.cfg, f) - if f.endswith(".big"): - return load_big_format(self.cfg, f) - if f.endswith(".pretrain"): - return load_pretrain_format(self.cfg, f) - # load native detectron.pytorch checkpoint - loaded = super(DetectronCheckpointer, self)._load_file(f) - if "model" not in loaded: - loaded = dict(model=loaded) - return loaded diff --git a/spaces/harry18456/TestChatGPT/app.py b/spaces/harry18456/TestChatGPT/app.py deleted file mode 100644 index 2c477b6b87c3188386c1dd0c16ed4f85bc2678a3..0000000000000000000000000000000000000000 --- a/spaces/harry18456/TestChatGPT/app.py +++ /dev/null @@ -1,50 +0,0 @@ -from config import config -import gradio as gr -from revChatGPT.revChatGPT import Chatbot - - -story_background = None -first_interact = True -chatbot = None - -def init(): - global story_background, first_interact, chatbot - story_background = "你在森林裡冒險,不確定會從哪裡跑出一些奇怪的東西,你握緊手上的槍,希望能從這次冒險找到一些值錢的東西,你往森林深處走去。" - first_interact = True - chatbot = Chatbot(config, conversation_id=None) - chatbot.reset_chat() - chatbot.refresh_session() - return [], [] - -def chat(message, history): - global first_interact, chatbot - history = history or [] - - if message[-1] != '。': - message = message + '。' - if first_interact: - prompt = '現在來用繁體中文續寫一個冒險小說,續寫的時候注意節奏,不要太快,每個段落就只講5分鐘的事情。一次只需要續寫四句話。開頭是,{},你{}'.format(story_background, message) - first_interact = False - else: - prompt = '繼續續寫,續寫的時候注意節奏,續寫的時候注意節奏,不要太快,一次只需要續寫四到六句話,總共就只講5分鐘內發生的事情。你{}'.format(message) - resp = chatbot.get_chat_response(prompt) - history.append(('你' + message, resp["message"])) - return history, history, '' - -def main(): - init() - demo = gr.Blocks() - with demo: - global story_background - gr.Markdown(story_background) - gChatbot = gr.Chatbot(label='故事').style(color_map=('blue', 'gray')) - input_text = gr.TextArea(label='你', lines=3) - gState = gr.State() - sendBtn = gr.Button(value='Send') - sendBtn.click(chat, inputs=[input_text, gState], outputs=[gChatbot, gState, input_text]) - resetBtn = gr.Button(value='Reset Story') - resetBtn.click(init, outputs=[gChatbot, gState]) - demo.launch() - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/tools/deploy/caffe2_converter.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/tools/deploy/caffe2_converter.py deleted file mode 100644 index 08feb69fba090a302d1624d52d146ac7a0787223..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/tools/deploy/caffe2_converter.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import argparse -import os -import onnx -import torch - -from detectron2.checkpoint import DetectionCheckpointer -from detectron2.config import get_cfg -from detectron2.data import build_detection_test_loader -from detectron2.evaluation import COCOEvaluator, inference_on_dataset, print_csv_format -from detectron2.export import Caffe2Tracer, add_export_config -from detectron2.modeling import build_model -from detectron2.utils.logger import setup_logger - - -def setup_cfg(args): - cfg = get_cfg() - # cuda context is initialized before creating dataloader, so we don't fork anymore - cfg.DATALOADER.NUM_WORKERS = 0 - cfg = add_export_config(cfg) - cfg.merge_from_file(args.config_file) - cfg.merge_from_list(args.opts) - cfg.freeze() - if cfg.MODEL.DEVICE != "cpu": - TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2]) - assert TORCH_VERSION >= (1, 5), "PyTorch>=1.5 required for GPU conversion!" - return cfg - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Convert a model using caffe2 tracing.") - parser.add_argument( - "--format", - choices=["caffe2", "onnx", "torchscript"], - help="output format", - default="caffe2", - ) - parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") - parser.add_argument("--run-eval", action="store_true") - parser.add_argument("--output", help="output directory for the converted model") - parser.add_argument( - "opts", - help="Modify config options using the command-line", - default=None, - nargs=argparse.REMAINDER, - ) - args = parser.parse_args() - logger = setup_logger() - logger.info("Command line arguments: " + str(args)) - os.makedirs(args.output, exist_ok=True) - - cfg = setup_cfg(args) - - # create a torch model - torch_model = build_model(cfg) - DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS) - - # get a sample data - data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) - first_batch = next(iter(data_loader)) - - # convert and save caffe2 model - tracer = Caffe2Tracer(cfg, torch_model, first_batch) - if args.format == "caffe2": - caffe2_model = tracer.export_caffe2() - caffe2_model.save_protobuf(args.output) - # draw the caffe2 graph - caffe2_model.save_graph(os.path.join(args.output, "model.svg"), inputs=first_batch) - elif args.format == "onnx": - onnx_model = tracer.export_onnx() - onnx.save(onnx_model, os.path.join(args.output, "model.onnx")) - elif args.format == "torchscript": - script_model = tracer.export_torchscript() - script_model.save(os.path.join(args.output, "model.ts")) - - # Recursively print IR of all modules - with open(os.path.join(args.output, "model_ts_IR.txt"), "w") as f: - try: - f.write(script_model._actual_script_module._c.dump_to_str(True, False, False)) - except AttributeError: - pass - # Print IR of the entire graph (all submodules inlined) - with open(os.path.join(args.output, "model_ts_IR_inlined.txt"), "w") as f: - f.write(str(script_model.inlined_graph)) - # Print the model structure in pytorch style - with open(os.path.join(args.output, "model.txt"), "w") as f: - f.write(str(script_model)) - - # run evaluation with the converted model - if args.run_eval: - assert args.format == "caffe2", "Python inference in other format is not yet supported." - dataset = cfg.DATASETS.TEST[0] - data_loader = build_detection_test_loader(cfg, dataset) - # NOTE: hard-coded evaluator. change to the evaluator for your dataset - evaluator = COCOEvaluator(dataset, cfg, True, args.output) - metrics = inference_on_dataset(caffe2_model, data_loader, evaluator) - print_csv_format(metrics) diff --git a/spaces/hebert2099/MusicGen/audiocraft/quantization/vq.py b/spaces/hebert2099/MusicGen/audiocraft/quantization/vq.py deleted file mode 100644 index f67c3a0cd30d4b8993a36c587f00dc8a451d926f..0000000000000000000000000000000000000000 --- a/spaces/hebert2099/MusicGen/audiocraft/quantization/vq.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import math -import typing as tp - -import torch - -from .base import BaseQuantizer, QuantizedResult -from .core_vq import ResidualVectorQuantization - - -class ResidualVectorQuantizer(BaseQuantizer): - """Residual Vector Quantizer. - - Args: - dimension (int): Dimension of the codebooks. - n_q (int): Number of residual vector quantizers used. - q_dropout (bool): Random quantizer drop out at train time. - bins (int): Codebook size. - decay (float): Decay for exponential moving average over the codebooks. - kmeans_init (bool): Whether to use kmeans to initialize the codebooks. - kmeans_iters (int): Number of iterations used for kmeans initialization. - threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes - that have an exponential moving average cluster size less than the specified threshold with - randomly selected vector from the current batch. - orthogonal_reg_weight (float): Orthogonal regularization weights. - orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes. - orthogonal_reg_max_codes (optional int): Maximum number of codes to consider. - for orthogonal regulariation. - """ - def __init__( - self, - dimension: int = 256, - n_q: int = 8, - q_dropout: bool = False, - bins: int = 1024, - decay: float = 0.99, - kmeans_init: bool = True, - kmeans_iters: int = 10, - threshold_ema_dead_code: int = 2, - orthogonal_reg_weight: float = 0.0, - orthogonal_reg_active_codes_only: bool = False, - orthogonal_reg_max_codes: tp.Optional[int] = None, - ): - super().__init__() - self.max_n_q = n_q - self.n_q = n_q - self.q_dropout = q_dropout - self.dimension = dimension - self.bins = bins - self.decay = decay - self.kmeans_init = kmeans_init - self.kmeans_iters = kmeans_iters - self.threshold_ema_dead_code = threshold_ema_dead_code - self.orthogonal_reg_weight = orthogonal_reg_weight - self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only - self.orthogonal_reg_max_codes = orthogonal_reg_max_codes - self.vq = ResidualVectorQuantization( - dim=self.dimension, - codebook_size=self.bins, - num_quantizers=self.n_q, - decay=self.decay, - kmeans_init=self.kmeans_init, - kmeans_iters=self.kmeans_iters, - threshold_ema_dead_code=self.threshold_ema_dead_code, - orthogonal_reg_weight=self.orthogonal_reg_weight, - orthogonal_reg_active_codes_only=self.orthogonal_reg_active_codes_only, - orthogonal_reg_max_codes=self.orthogonal_reg_max_codes, - channels_last=False - ) - - def forward(self, x: torch.Tensor, frame_rate: int): - n_q = self.n_q - if self.training and self.q_dropout: - n_q = int(torch.randint(1, self.n_q + 1, (1,)).item()) - bw_per_q = math.log2(self.bins) * frame_rate / 1000 - quantized, codes, commit_loss = self.vq(x, n_q=n_q) - codes = codes.transpose(0, 1) - # codes is [B, K, T], with T frames, K nb of codebooks. - bw = torch.tensor(n_q * bw_per_q).to(x) - return QuantizedResult(quantized, codes, bw, penalty=torch.mean(commit_loss)) - - def encode(self, x: torch.Tensor) -> torch.Tensor: - """Encode a given input tensor with the specified frame rate at the given bandwidth. - The RVQ encode method sets the appropriate number of quantizer to use - and returns indices for each quantizer. - """ - n_q = self.n_q - codes = self.vq.encode(x, n_q=n_q) - codes = codes.transpose(0, 1) - # codes is [B, K, T], with T frames, K nb of codebooks. - return codes - - def decode(self, codes: torch.Tensor) -> torch.Tensor: - """Decode the given codes to the quantized representation. - """ - # codes is [B, K, T], with T frames, K nb of codebooks, vq.decode expects [K, B, T]. - codes = codes.transpose(0, 1) - quantized = self.vq.decode(codes) - return quantized - - @property - def total_codebooks(self): - return self.max_n_q - - @property - def num_codebooks(self): - return self.n_q - - def set_num_codebooks(self, n: int): - assert n > 0 and n <= self.max_n_q - self.n_q = n diff --git a/spaces/hohonu-vicml/DirectedDiffusion/DirectedDiffusion/ProgramInfo.py b/spaces/hohonu-vicml/DirectedDiffusion/DirectedDiffusion/ProgramInfo.py deleted file mode 100644 index fd00f6b0c41a17de851d5f618d00d9eee2953680..0000000000000000000000000000000000000000 --- a/spaces/hohonu-vicml/DirectedDiffusion/DirectedDiffusion/ProgramInfo.py +++ /dev/null @@ -1,57 +0,0 @@ -import textwrap - - -def get_parser_description(): - return textwrap.dedent( - """\ - ***Stable Diffusion Local Editor*** - - ** Examples (Sginel run, single region): - - # To locate the car at top right of the image - python ./bin/SdEditorCmd.py -roi "0.5,1.0,0.0,0.5" -ei "1,2,3" -nt "10" -s "2.0" -ns 15 -p "A yellow car on a bridge" -m - - ** Example (Single run, multiple regions) - - The following arugment flags are part of regioning strategy controlling the effects of specific region during attention editing step. - Their length must be the same, otherwise the program will be terminated. - --num-trailing-attn (-nt) # a string of integers - --noise-scale (-s) # a string of floats - --edit-index (-ei) # multiple strings of integers - --region-of-interest (-roi) # multiple strings of integers - - # Two region case - python ./bin/SdEditorCmd.py -roi "0.4,0.7,0.1,0.5" "0.4,0.7,0.5,0.9" -ei "2,3" "8,9" -nt "30,30" -ns 10 -s "1.0,1.0" -p "A red cube on top of a blue sphere" -m -sd 2483964026830 - - ** Examples (Grid Search): - - The following arguments are part of the grid search method to speed up the experimental efficiency: - --num-trailing-attn (-nt) - --noise-scale(-s) - --num-affected-steps(-ns) - --diffusion-steps (-ds) - - # The following command will run four times with varied options of -nt and -ns - python ./bin/SdEditorCmd.py -roi "0.5,1.0,0.0,0.5" -ei "1,2,3" -nt 5 10 20 -ns 5 10 -s 2.5 -p "A yellow car running on a bridge" -m - - ** Others - - Using -m flag will draw the metadata on the saved image for quick reference. - Using -is flag will show the final result after each diffusion run - - - ** Lazy search - - We offer a lazy grid search command at the initial experiment stage, for instance - - # for large number of parameters - python ./bin/SdEditorCmd.py -roi "0.4,0.7,0.1,0.5" "0.4,0.7,0.5,0.9" -ei "2,3" "8,9" -p "A red cube on top of a blue sphere" -l1 - - # relatively smaller number of parameters - python ./bin/SdEditorCmd.py -roi "0.4,0.7,0.1,0.5" "0.4,0.7,0.5,0.9" -ei "2,3" "8,9" -p "A red cube on top of a blue sphere" -l2 - - This also contains -m function - - See more examples under scripts/sdeditor-example.sh - """ - ) diff --git a/spaces/huggingface-projects/color-palette-generator-sd/frontend/src/app.css b/spaces/huggingface-projects/color-palette-generator-sd/frontend/src/app.css deleted file mode 100644 index bd6213e1dfe6b0a79ce7d8b37d0d2dc70f0250bb..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/color-palette-generator-sd/frontend/src/app.css +++ /dev/null @@ -1,3 +0,0 @@ -@tailwind base; -@tailwind components; -@tailwind utilities; \ No newline at end of file diff --git a/spaces/ikechan8370/meme-generator/utils.py b/spaces/ikechan8370/meme-generator/utils.py deleted file mode 100644 index 8cba408535d0f42bde4c8f37f6406642783bfcd6..0000000000000000000000000000000000000000 --- a/spaces/ikechan8370/meme-generator/utils.py +++ /dev/null @@ -1,448 +0,0 @@ -import asyncio -import hashlib -import inspect -import math -import random -import time -from dataclasses import dataclass -from enum import Enum -from functools import partial, wraps -from io import BytesIO -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Coroutine, - List, - Literal, - Optional, - Protocol, - Tuple, - TypeVar, -) - -import httpx -from PIL.Image import Image as IMG -from pil_utils import BuildImage, Text2Image -from pil_utils.types import ColorType, FontStyle, FontWeight -from typing_extensions import ParamSpec - -from .config import meme_config -from .exception import MemeGeneratorException - -if TYPE_CHECKING: - from .meme import Meme - -P = ParamSpec("P") -R = TypeVar("R") - - -def run_sync(call: Callable[P, R]) -> Callable[P, Coroutine[None, None, R]]: - """一个用于包装 sync function 为 async function 的装饰器 - 参数: - call: 被装饰的同步函数 - """ - - @wraps(call) - async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R: - loop = asyncio.get_running_loop() - pfunc = partial(call, *args, **kwargs) - result = await loop.run_in_executor(None, pfunc) - return result - - return _wrapper - - -def is_coroutine_callable(call: Callable[..., Any]) -> bool: - """检查 call 是否是一个 callable 协程函数""" - if inspect.isroutine(call): - return inspect.iscoroutinefunction(call) - if inspect.isclass(call): - return False - func_ = getattr(call, "__call__", None) - return inspect.iscoroutinefunction(func_) - - -def save_gif(frames: List[IMG], duration: float) -> BytesIO: - output = BytesIO() - frames[0].save( - output, - format="GIF", - save_all=True, - append_images=frames[1:], - duration=duration * 1000, - loop=0, - disposal=2, - optimize=False, - ) - - # 没有超出最大大小,直接返回 - nbytes = output.getbuffer().nbytes - if nbytes <= meme_config.gif.gif_max_size * 10**6: - return output - - # 超出最大大小,帧数超出最大帧数时,缩减帧数 - n_frames = len(frames) - gif_max_frames = meme_config.gif.gif_max_frames - if n_frames > gif_max_frames: - index = range(n_frames) - ratio = n_frames / gif_max_frames - index = (int(i * ratio) for i in range(gif_max_frames)) - new_duration = duration * ratio - new_frames = [frames[i] for i in index] - return save_gif(new_frames, new_duration) - - # 超出最大大小,帧数没有超出最大帧数时,缩小尺寸 - new_frames = [ - frame.resize((int(frame.width * 0.9), int(frame.height * 0.9))) - for frame in frames - ] - return save_gif(new_frames, duration) - - -class Maker(Protocol): - def __call__(self, img: BuildImage) -> BuildImage: - ... - - -class GifMaker(Protocol): - def __call__(self, i: int) -> Maker: - ... - - -def get_avg_duration(image: IMG) -> float: - if not getattr(image, "is_animated", False): - return 0 - total_duration = 0 - for i in range(image.n_frames): - image.seek(i) - total_duration += image.info["duration"] - return total_duration / image.n_frames - - -def split_gif(image: IMG) -> List[IMG]: - frames: List[IMG] = [] - - update_mode = "full" - for i in range(image.n_frames): - image.seek(i) - if image.tile: # type: ignore - update_region = image.tile[0][1][2:] # type: ignore - if update_region != image.size: - update_mode = "partial" - break - - last_frame: Optional[IMG] = None - for i in range(image.n_frames): - image.seek(i) - frame = image.copy() - if update_mode == "partial" and last_frame: - frame = last_frame.copy().paste(frame) - frames.append(frame) - image.seek(0) - if image.info.__contains__("transparency"): - frames[0].info["transparency"] = image.info["transparency"] - return frames - - -def make_jpg_or_gif( - img: BuildImage, func: Maker, keep_transparency: bool = False -) -> BytesIO: - """ - 制作静图或者动图 - :params - * ``img``: 输入图片 - * ``func``: 图片处理函数,输入img,返回处理后的图片 - * ``keep_transparency``: 传入gif时,是否保留该gif的透明度 - """ - image = img.image - if not getattr(image, "is_animated", False): - return func(img).save_jpg() - else: - frames = split_gif(image) - duration = get_avg_duration(image) / 1000 - frames = [func(BuildImage(frame)).image for frame in frames] - if keep_transparency: - image.seek(0) - if image.info.__contains__("transparency"): - frames[0].info["transparency"] = image.info["transparency"] - return save_gif(frames, duration) - - -def make_png_or_gif( - img: BuildImage, func: Maker, keep_transparency: bool = False -) -> BytesIO: - """ - 制作静图或者动图 - :params - * ``img``: 输入图片 - * ``func``: 图片处理函数,输入img,返回处理后的图片 - * ``keep_transparency``: 传入gif时,是否保留该gif的透明度 - """ - image = img.image - if not getattr(image, "is_animated", False): - return func(img).save_png() - else: - frames = split_gif(image) - duration = get_avg_duration(image) / 1000 - frames = [func(BuildImage(frame)).image for frame in frames] - if keep_transparency: - image.seek(0) - if image.info.__contains__("transparency"): - frames[0].info["transparency"] = image.info["transparency"] - return save_gif(frames, duration) - - -class FrameAlignPolicy(Enum): - """ - 要叠加的gif长度大于基准gif时,是否延长基准gif长度以对齐两个gif - """ - - no_extend = 0 - """不延长""" - extend_first = 1 - """延长第一帧""" - extend_last = 2 - """延长最后一帧""" - extend_loop = 3 - """以循环方式延长""" - - -def make_gif_or_combined_gif( - img: BuildImage, - maker: GifMaker, - frame_num: int, - duration: float, - frame_align: FrameAlignPolicy = FrameAlignPolicy.no_extend, - input_based: bool = False, - keep_transparency: bool = False, -) -> BytesIO: - """ - 使用静图或动图制作gif - :params - * ``img``: 输入图片,如头像 - * ``maker``: 图片处理函数生成,传入第几帧,返回对应的图片处理函数 - * ``frame_num``: 目标gif的帧数 - * ``duration``: 相邻帧之间的时间间隔,单位为秒 - * ``frame_align``: 要叠加的gif长度大于基准gif时,gif长度对齐方式 - * ``input_based``: 是否以输入gif为基准合成gif,默认为`False`,即以目标gif为基准 - * ``keep_transparency``: 传入gif时,是否保留该gif的透明度 - """ - image = img.image - if not getattr(image, "is_animated", False): - return save_gif([maker(i)(img).image for i in range(frame_num)], duration) - - frame_num_in = image.n_frames - duration_in = get_avg_duration(image) / 1000 - total_duration_in = frame_num_in * duration_in - total_duration = frame_num * duration - - if input_based: - frame_num_base = frame_num_in - frame_num_fit = frame_num - duration_base = duration_in - duration_fit = duration - total_duration_base = total_duration_in - total_duration_fit = total_duration - else: - frame_num_base = frame_num - frame_num_fit = frame_num_in - duration_base = duration - duration_fit = duration_in - total_duration_base = total_duration - total_duration_fit = total_duration_in - - frame_idxs: List[int] = list(range(frame_num_base)) - diff_duration = total_duration_fit - total_duration_base - diff_num = int(diff_duration / duration_base) - - if diff_duration >= duration_base: - if frame_align == FrameAlignPolicy.extend_first: - frame_idxs = [0] * diff_num + frame_idxs - - elif frame_align == FrameAlignPolicy.extend_last: - frame_idxs += [frame_num_base - 1] * diff_num - - elif frame_align == FrameAlignPolicy.extend_loop: - frame_num_total = frame_num_base - # 重复基准gif,直到两个gif总时长之差在1个间隔以内,或总帧数超出最大帧数 - while frame_num_total + frame_num_base <= meme_config.gif.gif_max_frames: - frame_num_total += frame_num_base - frame_idxs += list(range(frame_num_base)) - multiple = round(frame_num_total * duration_base / total_duration_fit) - if ( - math.fabs( - total_duration_fit * multiple - frame_num_total * duration_base - ) - <= duration_base - ): - break - - frames: List[IMG] = [] - frame_idx_fit = 0 - time_start = 0 - for i, idx in enumerate(frame_idxs): - while frame_idx_fit < frame_num_fit: - if ( - frame_idx_fit * duration_fit - <= i * duration_base - time_start - < (frame_idx_fit + 1) * duration_fit - ): - if input_based: - idx_in = idx - idx_maker = frame_idx_fit - else: - idx_in = frame_idx_fit - idx_maker = idx - - func = maker(idx_maker) - image.seek(idx_in) - frames.append(func(BuildImage(image.copy())).image) - break - else: - frame_idx_fit += 1 - if frame_idx_fit >= frame_num_fit: - frame_idx_fit = 0 - time_start += total_duration_fit - - if keep_transparency: - image.seek(0) - if image.info.__contains__("transparency"): - frames[0].info["transparency"] = image.info["transparency"] - - return save_gif(frames, duration) - - -async def translate(text: str, lang_from: str = "auto", lang_to: str = "zh") -> str: - appid = meme_config.translate.baidu_trans_appid - apikey = meme_config.translate.baidu_trans_apikey - if not appid or not apikey: - raise MemeGeneratorException( - "The `baidu_trans_appid` or `baidu_trans_apikey` is not set." - "Please check your config file!" - ) - salt = str(round(time.time() * 1000)) - sign_raw = appid + text + salt + apikey - sign = hashlib.md5(sign_raw.encode("utf8")).hexdigest() - params = { - "q": text, - "from": lang_from, - "to": lang_to, - "appid": appid, - "salt": salt, - "sign": sign, - } - url = "https://fanyi-api.baidu.com/api/trans/vip/translate" - async with httpx.AsyncClient() as client: - resp = await client.get(url, params=params) - result = resp.json() - return result["trans_result"][0]["dst"] -async def translate_microsoft(text: str, lang_from: str = "zh-CN", lang_to: str = "ja") -> str: - if lang_to == 'jp': - lang_to = 'ja' - params = { - "text": text, - "from": lang_from, - "to": lang_to, - } - url = "https://api.pawan.krd/mtranslate" - async with httpx.AsyncClient() as client: - resp = await client.get(url, params=params) - result = resp.json() - return result["translated"] - -def random_text() -> str: - return random.choice(["刘一", "陈二", "张三", "李四", "王五", "赵六", "孙七", "周八", "吴九", "郑十"]) - - -def random_image() -> BytesIO: - text = random.choice(["😂", "😅", "🤗", "🤤", "🥵", "🥰", "😍", "😭", "😋", "😏"]) - return ( - BuildImage.new("RGBA", (500, 500), "white") - .draw_text((0, 0, 500, 500), text, max_fontsize=400) - .save_png() - ) - - -@dataclass -class TextProperties: - fill: ColorType = "black" - style: FontStyle = "normal" - weight: FontWeight = "normal" - stroke_width: int = 0 - stroke_fill: Optional[ColorType] = None - - -def default_template(meme: "Meme", number: int) -> str: - return f"{number}. {'/'.join(meme.keywords)}" - - -def render_meme_list( - meme_list: List[Tuple["Meme", TextProperties]], - *, - template: Callable[["Meme", int], str] = default_template, - order_direction: Literal["row", "column"] = "column", - columns: int = 4, - column_align: Literal["left", "center", "right"] = "left", - item_padding: Tuple[int, int] = (15, 6), - image_padding: Tuple[int, int] = (50, 50), - bg_color: ColorType = "white", - fontsize: int = 30, - fontname: str = "", - fallback_fonts: List[str] = [], -) -> BytesIO: - item_images: List[Text2Image] = [] - for i, (meme, properties) in enumerate(meme_list, start=1): - text = template(meme, i) - t2m = Text2Image.from_text( - text, - fontsize=fontsize, - style=properties.style, - weight=properties.weight, - fill=properties.fill, - stroke_width=properties.stroke_width, - stroke_fill=properties.stroke_fill, - fontname=fontname, - fallback_fonts=fallback_fonts, - ) - item_images.append(t2m) - char_A = ( - Text2Image.from_text( - "A", fontsize=fontsize, fontname=fontname, fallback_fonts=fallback_fonts - ) - .lines[0] - .chars[0] - ) - num_per_col = math.ceil(len(item_images) / columns) - column_images: List[BuildImage] = [] - for col in range(columns): - if order_direction == "column": - images = item_images[col * num_per_col : (col + 1) * num_per_col] - else: - images = [ - item_images[num * columns + col] - for num in range((len(item_images) - col - 1) // columns + 1) - ] - img_w = max((t2m.width for t2m in images)) + item_padding[0] * 2 - img_h = (char_A.ascent + item_padding[1] * 2) * len(images) + char_A.descent - image = BuildImage.new("RGB", (img_w, img_h), bg_color) - y = item_padding[1] - for t2m in images: - if column_align == "left": - x = 0 - elif column_align == "center": - x = (img_w - t2m.width - item_padding[0] * 2) // 2 - else: - x = img_w - t2m.width - item_padding[0] * 2 - t2m.draw_on_image(image.image, (x, y)) - y += char_A.ascent + item_padding[1] * 2 - column_images.append(image) - - img_w = sum((img.width for img in column_images)) + image_padding[0] * 2 - img_h = max((img.height for img in column_images)) + image_padding[1] * 2 - image = BuildImage.new("RGB", (img_w, img_h), bg_color) - x, y = image_padding - for img in column_images: - image.paste(img, (x, y)) - x += img.width - return image.save_jpg() diff --git a/spaces/inamXcontru/PoeticTTS/Assassins Creed Odyssey Deluxe Edition MULTi15 Repack-FitGirl.md b/spaces/inamXcontru/PoeticTTS/Assassins Creed Odyssey Deluxe Edition MULTi15 Repack-FitGirl.md deleted file mode 100644 index 4a8a4070ebd603ff7bdac960fde3b82f0ca99cd4..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Assassins Creed Odyssey Deluxe Edition MULTi15 Repack-FitGirl.md +++ /dev/null @@ -1,6 +0,0 @@ -

              Assassins Creed Odyssey Deluxe Edition MULTi15 Repack-FitGirl


              DOWNLOADhttps://gohhs.com/2uz5H3



              -
              - aaccfb2cb3
              -
              -
              -

              diff --git a/spaces/inayet/inayet-autotrain-price-prediction-1331950922/app.py b/spaces/inayet/inayet-autotrain-price-prediction-1331950922/app.py deleted file mode 100644 index 65c268f82af2d82d6f34373ce098f873812efe25..0000000000000000000000000000000000000000 --- a/spaces/inayet/inayet-autotrain-price-prediction-1331950922/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/inayet/autotrain-price-prediction-1331950922").launch() \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/14 English Topics Atsakymai Nemokamai.md b/spaces/inplisQlawa/anything-midjourney-v4-1/14 English Topics Atsakymai Nemokamai.md deleted file mode 100644 index c00f4fc46fc2bd927c6fb315bd92d73c983f1d9e..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/14 English Topics Atsakymai Nemokamai.md +++ /dev/null @@ -1,36 +0,0 @@ - -

              How to Learn English with 14 Free Topics

              -

              If you are looking for a way to improve your English skills, you might be interested in 14 English Topics Atsakymai Nemokamai. This is a website that offers free access to 14 topics that cover various aspects of English language and culture. You can download the topics as PDF files and use them for self-study or classroom activities.

              -

              14 English Topics Atsakymai Nemokamai


              Download >>>>> https://urlin.us/2uEysm



              -

              Each topic consists of a reading text, a vocabulary list, a grammar section, a speaking task and an exam practice. The topics are designed to help you expand your vocabulary, practice your use of English, develop your reading and speaking skills and prepare for exams such as FCE, CAE or CPE. The topics cover a range of themes, such as education, health, travel, sports, art and music.

              -

              Some of the benefits of using 14 English Topics Atsakymai Nemokamai are:

              -
                -
              • You can learn at your own pace and level.
              • -
              • You can choose the topics that interest you the most.
              • -
              • You can review the topics as many times as you need.
              • -
              • You can test your knowledge and progress with the exam practice.
              • -
              • You can access the topics anytime and anywhere.
              • -
              -

              To start learning with 14 English Topics Atsakymai Nemokamai, you just need to visit their website and register for free. You will then be able to download the topics and start learning. You can also join their Facebook group and share your opinions and experiences with other learners.

              -

              14 English Topics Atsakymai Nemokamai is a great resource for anyone who wants to improve their English skills and learn more about the world. Whether you are a beginner or an advanced learner, you will find something useful and enjoyable in these topics. So don't hesitate and give it a try!

              -

              - -

              How can you benefit from using 14 English Topics Atsakymai Nemokamai? According to the website, there are several advantages of learning English with these topics. For example, you can:

              -
                -
              • Improve your communication skills and confidence in speaking English.
              • -
              • Enhance your cultural awareness and understanding of different perspectives.
              • -
              • Boost your creativity and critical thinking skills.
              • -
              • Prepare for academic or professional exams and opportunities.
              • -
              • Have fun and enjoy learning English.
              • -
              -

              How can you get the most out of 14 English Topics Atsakymai Nemokamai? Here are some tips to help you use these topics effectively:

              -
                -
              • Read the text carefully and try to understand the main idea and details.
              • -
              • Look up any unfamiliar words or expressions in the vocabulary list or a dictionary.
              • -
              • Do the grammar exercises and check your answers with the key.
              • -
              • Practice the speaking task with a partner or record yourself and listen to your performance.
              • -
              • Do the exam practice and compare your answers with the model answers.
              • -
              -

              In conclusion, 14 English Topics Atsakymai Nemokamai is a valuable resource for anyone who wants to learn English with 14 free topics. You can download the topics from their website and use them for self-study or classroom activities. You can also join their Facebook group and interact with other learners. If you are looking for a way to improve your English skills and learn more about the world, you should give 14 English Topics Atsakymai Nemokamai a try!

              d5da3c52bf
              -
              -
              \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Microsoft Project 2010 32 Bit Free Download Torrent [REPACK].md b/spaces/inplisQlawa/anything-midjourney-v4-1/Microsoft Project 2010 32 Bit Free Download Torrent [REPACK].md deleted file mode 100644 index 3c8a611d41db082c06741e3b38adceeed371706f..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Microsoft Project 2010 32 Bit Free Download Torrent [REPACK].md +++ /dev/null @@ -1,14 +0,0 @@ -
              -

              How to Download Microsoft Project 2010 32 Bit for Free Using Torrent

              -

              Microsoft Project 2010 is a powerful project management software that helps you plan, track, and manage your projects. It has many features and tools that make it easy to create schedules, assign resources, monitor progress, and communicate with stakeholders. However, Microsoft Project 2010 is not a cheap software and you may not want to spend money on buying it. If you are looking for a way to download Microsoft Project 2010 32 bit for free using torrent, then this article is for you.

              -

              microsoft project 2010 32 bit free download torrent


              Download File ———>>> https://urlin.us/2uEwnh



              -

              Torrent is a peer-to-peer file sharing protocol that allows users to download files from other users who have the same file. Torrent files are small files that contain information about the larger files that you want to download. To download a torrent file, you need a torrent client software that can connect to other users and download the file in pieces. There are many torrent clients available online, such as uTorrent, BitTorrent, qBittorrent, etc. You can choose any one of them and install it on your computer.

              -

              Once you have a torrent client installed, you need to find a torrent file for Microsoft Project 2010 32 bit. You can search for it on various torrent websites, such as The Pirate Bay, Kickass Torrents, RARBG, etc. However, be careful when downloading torrent files from these websites as they may contain viruses or malware that can harm your computer. Always check the comments and ratings of the torrent files before downloading them. Also, use a VPN service to hide your IP address and protect your privacy.

              -

              After you have downloaded the torrent file for Microsoft Project 2010 32 bit, open it with your torrent client and start the download process. Depending on the speed of your internet connection and the number of seeders (users who have the complete file and are sharing it), the download may take some time. Once the download is complete, you will have an ISO file of Microsoft Project 2010 32 bit on your computer.

              -

              An ISO file is an image file that contains all the data of a CD or DVD. To install Microsoft Project 2010 32 bit from an ISO file, you need to mount it on a virtual drive or burn it on a physical disc. You can use software such as Daemon Tools Lite or PowerISO to mount or burn ISO files. After mounting or burning the ISO file, you can run the setup.exe file and follow the instructions to install Microsoft Project 2010 32 bit on your computer.

              -

              However, installing Microsoft Project 2010 32 bit is not enough to use it. You also need to activate it with a valid product key or a crack. A product key is a code that verifies that you have purchased the software legally and allows you to use all its features. A crack is a program that bypasses the activation process and makes the software think that it is activated. You can find product keys or cracks for Microsoft Project 2010 32 bit on various websites or forums online. However, again be careful when downloading them as they may contain viruses or malware that can harm your computer.

              -

              -

              Once you have activated Microsoft Project 2010 32 bit with a product key or a crack, you can enjoy using it for free. However, keep in mind that downloading Microsoft Project 2010 32 bit for free using torrent is illegal and unethical. You are violating the copyright laws and depriving the developers of their rightful income. You may also face legal consequences if you are caught by the authorities. Therefore, we do not recommend downloading Microsoft Project 2010 32 bit for free using torrent.

              -

              The best way to use Microsoft Project 2010 32 bit is to buy it from an official source and support the developers who have created this amazing software. You can buy Microsoft Project 2010 32 bit from Microsoft's website or from authorized resellers online or offline. You will get a genuine product key that will activate your software and allow you to use all its features without any limitations or risks.

              d5da3c52bf
              -
              -
              \ No newline at end of file diff --git a/spaces/ismot/1702t1/evaluation/accuracy.py b/spaces/ismot/1702t1/evaluation/accuracy.py deleted file mode 100644 index 754a33502a3b89e9b3ff41b14e4d4ca76f7fa8d4..0000000000000000000000000000000000000000 --- a/spaces/ismot/1702t1/evaluation/accuracy.py +++ /dev/null @@ -1,249 +0,0 @@ -""" -@date: 2021/8/4 -@description: -""" -import numpy as np -import cv2 -import scipy - -from evaluation.f1_score import f1_score_2d -from loss import GradLoss -from utils.boundary import corners2boundaries, layout2depth -from utils.conversion import depth2xyz, uv2xyz, get_u, depth2uv, xyz2uv, uv2pixel -from utils.height import calc_ceil_ratio -from evaluation.iou import calc_IoU, calc_Iou_height -from visualization.boundary import draw_boundaries -from visualization.floorplan import draw_iou_floorplan -from visualization.grad import show_grad - - -def calc_accuracy(dt, gt, visualization=False, h=512): - visb_iou_2ds = [] - visb_iou_3ds = [] - full_iou_2ds = [] - full_iou_3ds = [] - iou_heights = [] - - visb_iou_floodplans = [] - full_iou_floodplans = [] - pano_bds = [] - - if 'depth' not in dt.keys(): - dt['depth'] = gt['depth'] - - for i in range(len(gt['depth'])): - # print(i) - dt_xyz = dt['processed_xyz'][i] if 'processed_xyz' in dt else depth2xyz(np.abs(dt['depth'][i])) - visb_gt_xyz = depth2xyz(np.abs(gt['depth'][i])) - corners = gt['corners'][i] - full_gt_corners = corners[corners[..., 0] + corners[..., 1] != 0] # Take effective corners - full_gt_xyz = uv2xyz(full_gt_corners) - - dt_xz = dt_xyz[..., ::2] - visb_gt_xz = visb_gt_xyz[..., ::2] - full_gt_xz = full_gt_xyz[..., ::2] - - gt_ratio = gt['ratio'][i][0] - - if 'ratio' not in dt.keys(): - if 'boundary' in dt.keys(): - w = len(dt['boundary'][i]) - boundary = np.clip(dt['boundary'][i], 0.0001, 0.4999) - depth = np.clip(dt['depth'][i], 0.001, 9999) - dt_ceil_boundary = np.concatenate([get_u(w, is_np=True)[..., None], boundary], axis=-1) - dt_floor_boundary = depth2uv(depth) - dt_ratio = calc_ceil_ratio(boundaries=[dt_ceil_boundary, dt_floor_boundary]) - else: - dt_ratio = gt_ratio - else: - dt_ratio = dt['ratio'][i][0] - - visb_iou_2d, visb_iou_3d = calc_IoU(dt_xz, visb_gt_xz, dt_height=1 + dt_ratio, gt_height=1 + gt_ratio) - full_iou_2d, full_iou_3d = calc_IoU(dt_xz, full_gt_xz, dt_height=1 + dt_ratio, gt_height=1 + gt_ratio) - iou_height = calc_Iou_height(dt_height=1 + dt_ratio, gt_height=1 + gt_ratio) - - visb_iou_2ds.append(visb_iou_2d) - visb_iou_3ds.append(visb_iou_3d) - full_iou_2ds.append(full_iou_2d) - full_iou_3ds.append(full_iou_3d) - iou_heights.append(iou_height) - - if visualization: - pano_img = cv2.resize(gt['image'][i].transpose(1, 2, 0), (h*2, h)) - # visb_iou_floodplans.append(draw_iou_floorplan(dt_xz, visb_gt_xz, iou_2d=visb_iou_2d, iou_3d=visb_iou_3d, side_l=h)) - # full_iou_floodplans.append(draw_iou_floorplan(dt_xz, full_gt_xz, iou_2d=full_iou_2d, iou_3d=full_iou_3d, side_l=h)) - visb_iou_floodplans.append(draw_iou_floorplan(dt_xz, visb_gt_xz, side_l=h)) - full_iou_floodplans.append(draw_iou_floorplan(dt_xz, full_gt_xz, side_l=h)) - gt_boundaries = corners2boundaries(gt_ratio, corners_xyz=full_gt_xyz, step=None, length=1024, visible=False) - dt_boundaries = corners2boundaries(dt_ratio, corners_xyz=dt_xyz, step=None, visible=False, - length=1024)#visb_gt_xyz.shape[0] if dt_xyz.shape[0] != visb_gt_xyz.shape[0] else None) - - pano_bd = draw_boundaries(pano_img, boundary_list=gt_boundaries, boundary_color=[0, 0, 1]) - pano_bd = draw_boundaries(pano_bd, boundary_list=dt_boundaries, boundary_color=[0, 1, 0]) - pano_bds.append(pano_bd) - - visb_iou_2d = np.array(visb_iou_2ds).mean() - visb_iou_3d = np.array(visb_iou_3ds).mean() - full_iou_2d = np.array(full_iou_2ds).mean() - full_iou_3d = np.array(full_iou_3ds).mean() - iou_height = np.array(iou_heights).mean() - - if visualization: - visb_iou_floodplans = np.array(visb_iou_floodplans).transpose(0, 3, 1, 2) # NCHW - full_iou_floodplans = np.array(full_iou_floodplans).transpose(0, 3, 1, 2) # NCHW - pano_bds = np.array(pano_bds).transpose(0, 3, 1, 2) - return [visb_iou_2d, visb_iou_3d, visb_iou_floodplans],\ - [full_iou_2d, full_iou_3d, full_iou_floodplans], iou_height, pano_bds, full_iou_2ds - - -def calc_ce(dt, gt): - w = 1024 - h = 512 - ce_s = [] - for i in range(len(gt['corners'])): - floor_gt_corners = gt['corners'][i] - # Take effective corners - floor_gt_corners = floor_gt_corners[floor_gt_corners[..., 0] + floor_gt_corners[..., 1] != 0] - floor_gt_corners = np.roll(floor_gt_corners, -np.argmin(floor_gt_corners[..., 0]), 0) - gt_ratio = gt['ratio'][i][0] - ceil_gt_corners = corners2boundaries(gt_ratio, corners_uv=floor_gt_corners, step=None)[1] - gt_corners = np.concatenate((floor_gt_corners, ceil_gt_corners)) - gt_corners = uv2pixel(gt_corners, w, h) - - floor_dt_corners = xyz2uv(dt['processed_xyz'][i]) - floor_dt_corners = np.roll(floor_dt_corners, -np.argmin(floor_dt_corners[..., 0]), 0) - dt_ratio = dt['ratio'][i][0] - ceil_dt_corners = corners2boundaries(dt_ratio, corners_uv=floor_dt_corners, step=None)[1] - dt_corners = np.concatenate((floor_dt_corners, ceil_dt_corners)) - dt_corners = uv2pixel(dt_corners, w, h) - - mse = np.sqrt(((gt_corners - dt_corners) ** 2).sum(1)).mean() - ce = 100 * mse / np.sqrt(w ** 2 + h ** 2) - ce_s.append(ce) - - return np.array(ce_s).mean() - - -def calc_pe(dt, gt): - w = 1024 - h = 512 - pe_s = [] - for i in range(len(gt['corners'])): - floor_gt_corners = gt['corners'][i] - # Take effective corners - floor_gt_corners = floor_gt_corners[floor_gt_corners[..., 0] + floor_gt_corners[..., 1] != 0] - floor_gt_corners = np.roll(floor_gt_corners, -np.argmin(floor_gt_corners[..., 0]), 0) - gt_ratio = gt['ratio'][i][0] - gt_floor_boundary, gt_ceil_boundary = corners2boundaries(gt_ratio, corners_uv=floor_gt_corners, length=w) - gt_floor_boundary = uv2pixel(gt_floor_boundary, w, h) - gt_ceil_boundary = uv2pixel(gt_ceil_boundary, w, h) - - floor_dt_corners = xyz2uv(dt['processed_xyz'][i]) - floor_dt_corners = np.roll(floor_dt_corners, -np.argmin(floor_dt_corners[..., 0]), 0) - dt_ratio = dt['ratio'][i][0] - dt_floor_boundary, dt_ceil_boundary = corners2boundaries(dt_ratio, corners_uv=floor_dt_corners, length=w) - dt_floor_boundary = uv2pixel(dt_floor_boundary, w, h) - dt_ceil_boundary = uv2pixel(dt_ceil_boundary, w, h) - - gt_surface = np.zeros((h, w), dtype=np.int32) - gt_surface[gt_ceil_boundary[..., 1], np.arange(w)] = 1 - gt_surface[gt_floor_boundary[..., 1], np.arange(w)] = 1 - gt_surface = np.cumsum(gt_surface, axis=0) - - dt_surface = np.zeros((h, w), dtype=np.int32) - dt_surface[dt_ceil_boundary[..., 1], np.arange(w)] = 1 - dt_surface[dt_floor_boundary[..., 1], np.arange(w)] = 1 - dt_surface = np.cumsum(dt_surface, axis=0) - - pe = 100 * (dt_surface != gt_surface).sum() / (h * w) - pe_s.append(pe) - return np.array(pe_s).mean() - - -def calc_rmse_delta_1(dt, gt): - rmse_s = [] - delta_1_s = [] - for i in range(len(gt['depth'])): - gt_boundaries = corners2boundaries(gt['ratio'][i], corners_xyz=depth2xyz(gt['depth'][i]), step=None, - visible=False) - dt_xyz = dt['processed_xyz'][i] if 'processed_xyz' in dt else depth2xyz(np.abs(dt['depth'][i])) - - dt_boundaries = corners2boundaries(dt['ratio'][i], corners_xyz=dt_xyz, step=None, - length=256 if 'processed_xyz' in dt else None, - visible=True if 'processed_xyz' in dt else False) - gt_layout_depth = layout2depth(gt_boundaries, show=False) - dt_layout_depth = layout2depth(dt_boundaries, show=False) - - rmse = ((gt_layout_depth - dt_layout_depth) ** 2).mean() ** 0.5 - threshold = np.maximum(gt_layout_depth / dt_layout_depth, dt_layout_depth / gt_layout_depth) - delta_1 = (threshold < 1.25).mean() - rmse_s.append(rmse) - delta_1_s.append(delta_1) - return np.array(rmse_s).mean(), np.array(delta_1_s).mean() - - -def calc_f1_score(dt, gt, threshold=10): - w = 1024 - h = 512 - f1_s = [] - precision_s = [] - recall_s = [] - for i in range(len(gt['corners'])): - floor_gt_corners = gt['corners'][i] - # Take effective corners - floor_gt_corners = floor_gt_corners[floor_gt_corners[..., 0] + floor_gt_corners[..., 1] != 0] - floor_gt_corners = np.roll(floor_gt_corners, -np.argmin(floor_gt_corners[..., 0]), 0) - gt_ratio = gt['ratio'][i][0] - ceil_gt_corners = corners2boundaries(gt_ratio, corners_uv=floor_gt_corners, step=None)[1] - gt_corners = np.concatenate((floor_gt_corners, ceil_gt_corners)) - gt_corners = uv2pixel(gt_corners, w, h) - - floor_dt_corners = xyz2uv(dt['processed_xyz'][i]) - floor_dt_corners = np.roll(floor_dt_corners, -np.argmin(floor_dt_corners[..., 0]), 0) - dt_ratio = dt['ratio'][i][0] - ceil_dt_corners = corners2boundaries(dt_ratio, corners_uv=floor_dt_corners, step=None)[1] - dt_corners = np.concatenate((floor_dt_corners, ceil_dt_corners)) - dt_corners = uv2pixel(dt_corners, w, h) - - Fs, Ps, Rs = f1_score_2d(gt_corners, dt_corners, [threshold]) - f1_s.append(Fs[0]) - precision_s.append(Ps[0]) - recall_s.append(Rs[0]) - - return np.array(f1_s).mean(), np.array(precision_s).mean(), np.array(recall_s).mean() - - -def show_heat_map(dt, gt, vis_w=1024): - dt_heat_map = dt['corner_heat_map'].detach().cpu().numpy() - gt_heat_map = gt['corner_heat_map'].detach().cpu().numpy() - dt_heat_map_imgs = [] - gt_heat_map_imgs = [] - for i in range(len(gt['depth'])): - dt_heat_map_img = dt_heat_map[..., np.newaxis].repeat(3, axis=-1).repeat(20, axis=0) - gt_heat_map_img = gt_heat_map[..., np.newaxis].repeat(3, axis=-1).repeat(20, axis=0) - dt_heat_map_imgs.append(cv2.resize(dt_heat_map_img, (vis_w, dt_heat_map_img.shape[0])).transpose(2, 0, 1)) - gt_heat_map_imgs.append(cv2.resize(gt_heat_map_img, (vis_w, dt_heat_map_img.shape[0])).transpose(2, 0, 1)) - return dt_heat_map_imgs, gt_heat_map_imgs - - -def show_depth_normal_grad(dt, gt, device, vis_w=1024): - grad_conv = GradLoss().to(device).grad_conv - gt_grad_imgs = [] - dt_grad_imgs = [] - - if 'depth' not in dt.keys(): - dt['depth'] = gt['depth'] - - if vis_w == 1024: - h = 5 - else: - h = int(vis_w / (12 * 10)) - - for i in range(len(gt['depth'])): - gt_grad_img = show_grad(gt['depth'][i], grad_conv, h) - dt_grad_img = show_grad(dt['depth'][i], grad_conv, h) - vis_h = dt_grad_img.shape[0] * (vis_w // dt_grad_img.shape[1]) - gt_grad_imgs.append(cv2.resize(gt_grad_img, (vis_w, vis_h), interpolation=cv2.INTER_NEAREST).transpose(2, 0, 1)) - dt_grad_imgs.append(cv2.resize(dt_grad_img, (vis_w, vis_h), interpolation=cv2.INTER_NEAREST).transpose(2, 0, 1)) - - return gt_grad_imgs, dt_grad_imgs diff --git a/spaces/jackli888/stable-diffusion-webui/extensions-builtin/SwinIR/scripts/swinir_model.py b/spaces/jackli888/stable-diffusion-webui/extensions-builtin/SwinIR/scripts/swinir_model.py deleted file mode 100644 index e8783bca153954afd086536a6dee854ec5e17ba9..0000000000000000000000000000000000000000 --- a/spaces/jackli888/stable-diffusion-webui/extensions-builtin/SwinIR/scripts/swinir_model.py +++ /dev/null @@ -1,178 +0,0 @@ -import contextlib -import os - -import numpy as np -import torch -from PIL import Image -from basicsr.utils.download_util import load_file_from_url -from tqdm import tqdm - -from modules import modelloader, devices, script_callbacks, shared -from modules.shared import cmd_opts, opts, state -from swinir_model_arch import SwinIR as net -from swinir_model_arch_v2 import Swin2SR as net2 -from modules.upscaler import Upscaler, UpscalerData - - -device_swinir = devices.get_device_for('swinir') - - -class UpscalerSwinIR(Upscaler): - def __init__(self, dirname): - self.name = "SwinIR" - self.model_url = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0" \ - "/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR" \ - "-L_x4_GAN.pth " - self.model_name = "SwinIR 4x" - self.user_path = dirname - super().__init__() - scalers = [] - model_files = self.find_models(ext_filter=[".pt", ".pth"]) - for model in model_files: - if "http" in model: - name = self.model_name - else: - name = modelloader.friendly_name(model) - model_data = UpscalerData(name, model, self) - scalers.append(model_data) - self.scalers = scalers - - def do_upscale(self, img, model_file): - model = self.load_model(model_file) - if model is None: - return img - model = model.to(device_swinir, dtype=devices.dtype) - img = upscale(img, model) - try: - torch.cuda.empty_cache() - except: - pass - return img - - def load_model(self, path, scale=4): - if "http" in path: - dl_name = "%s%s" % (self.model_name.replace(" ", "_"), ".pth") - filename = load_file_from_url(url=path, model_dir=self.model_path, file_name=dl_name, progress=True) - else: - filename = path - if filename is None or not os.path.exists(filename): - return None - if filename.endswith(".v2.pth"): - model = net2( - upscale=scale, - in_chans=3, - img_size=64, - window_size=8, - img_range=1.0, - depths=[6, 6, 6, 6, 6, 6], - embed_dim=180, - num_heads=[6, 6, 6, 6, 6, 6], - mlp_ratio=2, - upsampler="nearest+conv", - resi_connection="1conv", - ) - params = None - else: - model = net( - upscale=scale, - in_chans=3, - img_size=64, - window_size=8, - img_range=1.0, - depths=[6, 6, 6, 6, 6, 6, 6, 6, 6], - embed_dim=240, - num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8], - mlp_ratio=2, - upsampler="nearest+conv", - resi_connection="3conv", - ) - params = "params_ema" - - pretrained_model = torch.load(filename) - if params is not None: - model.load_state_dict(pretrained_model[params], strict=True) - else: - model.load_state_dict(pretrained_model, strict=True) - return model - - -def upscale( - img, - model, - tile=None, - tile_overlap=None, - window_size=8, - scale=4, -): - tile = tile or opts.SWIN_tile - tile_overlap = tile_overlap or opts.SWIN_tile_overlap - - - img = np.array(img) - img = img[:, :, ::-1] - img = np.moveaxis(img, 2, 0) / 255 - img = torch.from_numpy(img).float() - img = img.unsqueeze(0).to(device_swinir, dtype=devices.dtype) - with torch.no_grad(), devices.autocast(): - _, _, h_old, w_old = img.size() - h_pad = (h_old // window_size + 1) * window_size - h_old - w_pad = (w_old // window_size + 1) * window_size - w_old - img = torch.cat([img, torch.flip(img, [2])], 2)[:, :, : h_old + h_pad, :] - img = torch.cat([img, torch.flip(img, [3])], 3)[:, :, :, : w_old + w_pad] - output = inference(img, model, tile, tile_overlap, window_size, scale) - output = output[..., : h_old * scale, : w_old * scale] - output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy() - if output.ndim == 3: - output = np.transpose( - output[[2, 1, 0], :, :], (1, 2, 0) - ) # CHW-RGB to HCW-BGR - output = (output * 255.0).round().astype(np.uint8) # float32 to uint8 - return Image.fromarray(output, "RGB") - - -def inference(img, model, tile, tile_overlap, window_size, scale): - # test the image tile by tile - b, c, h, w = img.size() - tile = min(tile, h, w) - assert tile % window_size == 0, "tile size should be a multiple of window_size" - sf = scale - - stride = tile - tile_overlap - h_idx_list = list(range(0, h - tile, stride)) + [h - tile] - w_idx_list = list(range(0, w - tile, stride)) + [w - tile] - E = torch.zeros(b, c, h * sf, w * sf, dtype=devices.dtype, device=device_swinir).type_as(img) - W = torch.zeros_like(E, dtype=devices.dtype, device=device_swinir) - - with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="SwinIR tiles") as pbar: - for h_idx in h_idx_list: - if state.interrupted or state.skipped: - break - - for w_idx in w_idx_list: - if state.interrupted or state.skipped: - break - - in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile] - out_patch = model(in_patch) - out_patch_mask = torch.ones_like(out_patch) - - E[ - ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf - ].add_(out_patch) - W[ - ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf - ].add_(out_patch_mask) - pbar.update(1) - output = E.div_(W) - - return output - - -def on_ui_settings(): - import gradio as gr - - shared.opts.add_option("SWIN_tile", shared.OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling"))) - shared.opts.add_option("SWIN_tile_overlap", shared.OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}, section=('upscaling', "Upscaling"))) - - -script_callbacks.on_ui_settings(on_ui_settings) diff --git a/spaces/jbilcke-hf/VideoQuest/src/app/queries/getInventoryItem.ts b/spaces/jbilcke-hf/VideoQuest/src/app/queries/getInventoryItem.ts deleted file mode 100644 index 05c9fcaf6e41cc1a7a03d56b31034cdd549dac13..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/VideoQuest/src/app/queries/getInventoryItem.ts +++ /dev/null @@ -1,5 +0,0 @@ - - -// 3D render of a single coconut, highly detailed, beautiful, white background - -// 3D render of a fishbone, highly detailed, beautiful, pixar style, white background \ No newline at end of file diff --git a/spaces/jbilcke-hf/ai-clip-factory/src/app/server/actions/models.ts b/spaces/jbilcke-hf/ai-clip-factory/src/app/server/actions/models.ts deleted file mode 100644 index 37740a86575f332c79589d5afa1679d73f4faab0..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-clip-factory/src/app/server/actions/models.ts +++ /dev/null @@ -1,98 +0,0 @@ -"use server" - -import { SDXLModel } from "@/types" - -const SDXL_MODEL_DATABASE_URL = "https://huggingface.co/spaces/multimodalart/LoraTheExplorer/raw/main/sdxl_loras.json" - -export async function getSDXLModels(): Promise { - const res = await fetch(SDXL_MODEL_DATABASE_URL, { - method: "GET", - headers: { - "Content-Type": "application/json" - }, - cache: "no-store", - // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache) - // next: { revalidate: 1 } - }) - - const content = await res.json() as SDXLModel[] - - - // we only return compatible models - const compatibleModels = content.filter(model => model.is_compatible) - - const hardcoded: SDXLModel[] = [ - { - "image": "https://jbilcke-hf-ai-clip-factory.hf.space/images/models/sdxl-starfield.jpg", - "title": "sdxl-starfield", - "repo": "jbilcke-hf/sdxl-starfield", - "trigger_word": "starfield-style", - "weights": "pytorch_lora_weights.safetensors", - "is_compatible": true, - "likes": 0, - "downloads": 0 - }, - { - "image": "https://jbilcke-hf-ai-clip-factory.hf.space/images/models/sdxl-akira.jpg", - "title": "sdxl-akira", - "repo": "jbilcke-hf/sdxl-akira", - "trigger_word": "akira-style", - "weights": "pytorch_lora_weights.safetensors", - "is_compatible": true, - "likes": 0, - "downloads": 0 - }, - { - "image": "https://jbilcke-hf-ai-clip-factory.hf.space/images/models/sdxl-cyberpunk-2077.jpg", - "title": "sdxl-cyberpunk-2077", - "repo": "jbilcke-hf/sdxl-cyberpunk-2077", - "trigger_word": "cyberpunk-2077", - "weights": "pytorch_lora_weights.safetensors", - "is_compatible": true, - "likes": 0, - "downloads": 0 - }, - { - "image": "https://jbilcke-hf-ai-clip-factory.hf.space/images/models/sdxl-modern-pixar.jpg", - "title": "sdxl-pixar-2", - "repo": "jbilcke-hf/sdxl-pixar-2", - "trigger_word": "pixar-2", - "weights": "pytorch_lora_weights.safetensors", - "is_compatible": true, - "likes": 0, - "downloads": 0 - }, - { - "image": "https://jbilcke-hf-ai-clip-factory.hf.space/images/models/sdxl-cinematic-2.jpg", - "title": "sdxl-cinematic-2", - "repo": "jbilcke-hf/sdxl-cinematic-2", - "trigger_word": "cinematic-2", - "weights": "pytorch_lora_weights.safetensors", - "is_compatible": true, - "likes": 0, - "downloads": 0 - }, - { - "image": "https://jbilcke-hf-ai-clip-factory.hf.space/images/models/sdxl-moebius-lean.jpg", - "title": "sdxl-moebius-lean", - "repo": "jbilcke-hf/sdxl-moebius-lean", - "trigger_word": "moebius-lean", - "weights": "pytorch_lora_weights.safetensors", - "is_compatible": true, - "likes": 0, - "downloads": 0 - }, - { - "image": "https://jbilcke-hf-ai-clip-factory.hf.space/images/models/sdxl-foundation-2.jpg", - "title": "sdxl-foundation-2", - "repo": "jbilcke-hf/sdxl-foundation-2", - "trigger_word": "hober-mallow", - "weights": "pytorch_lora_weights.safetensors", - "is_compatible": true, - "likes": 0, - "downloads": 0 - }, - ] - - return hardcoded.concat(compatibleModels) -} \ No newline at end of file diff --git a/spaces/jbilcke-hf/ai-clip-factory/src/components/ui/card.tsx b/spaces/jbilcke-hf/ai-clip-factory/src/components/ui/card.tsx deleted file mode 100644 index 6583ebc1bb942bfb94e00fb4e7c7d685073c7b2a..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-clip-factory/src/components/ui/card.tsx +++ /dev/null @@ -1,79 +0,0 @@ -import * as React from "react" - -import { cn } from "@/lib/utils" - -const Card = React.forwardRef< - HTMLDivElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -
              -)) -Card.displayName = "Card" - -const CardHeader = React.forwardRef< - HTMLDivElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -
              -)) -CardHeader.displayName = "CardHeader" - -const CardTitle = React.forwardRef< - HTMLParagraphElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -

              -)) -CardTitle.displayName = "CardTitle" - -const CardDescription = React.forwardRef< - HTMLParagraphElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -

              -)) -CardDescription.displayName = "CardDescription" - -const CardContent = React.forwardRef< - HTMLDivElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -

              -)) -CardContent.displayName = "CardContent" - -const CardFooter = React.forwardRef< - HTMLDivElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -
              -)) -CardFooter.displayName = "CardFooter" - -export { Card, CardHeader, CardFooter, CardTitle, CardDescription, CardContent } diff --git a/spaces/jbilcke-hf/ai-clip-factory/src/components/ui/command.tsx b/spaces/jbilcke-hf/ai-clip-factory/src/components/ui/command.tsx deleted file mode 100644 index a4e602ef2508a071948aef7779023540c9f25381..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-clip-factory/src/components/ui/command.tsx +++ /dev/null @@ -1,155 +0,0 @@ -"use client" - -import * as React from "react" -import { DialogProps } from "@radix-ui/react-dialog" -import { Command as CommandPrimitive } from "cmdk" -import { Search } from "lucide-react" - -import { cn } from "@/lib/utils" -import { Dialog, DialogContent } from "@/components/ui/dialog" - -const Command = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -Command.displayName = CommandPrimitive.displayName - -interface CommandDialogProps extends DialogProps {} - -const CommandDialog = ({ children, ...props }: CommandDialogProps) => { - return ( - - - - {children} - - - - ) -} - -const CommandInput = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( -
              - - -
              -)) - -CommandInput.displayName = CommandPrimitive.Input.displayName - -const CommandList = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) - -CommandList.displayName = CommandPrimitive.List.displayName - -const CommandEmpty = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->((props, ref) => ( - -)) - -CommandEmpty.displayName = CommandPrimitive.Empty.displayName - -const CommandGroup = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) - -CommandGroup.displayName = CommandPrimitive.Group.displayName - -const CommandSeparator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -CommandSeparator.displayName = CommandPrimitive.Separator.displayName - -const CommandItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) - -CommandItem.displayName = CommandPrimitive.Item.displayName - -const CommandShortcut = ({ - className, - ...props -}: React.HTMLAttributes) => { - return ( - - ) -} -CommandShortcut.displayName = "CommandShortcut" - -export { - Command, - CommandDialog, - CommandInput, - CommandList, - CommandEmpty, - CommandGroup, - CommandItem, - CommandShortcut, - CommandSeparator, -} diff --git a/spaces/jbrinkma/deepmind-pushworld/demo/photon.min.css b/spaces/jbrinkma/deepmind-pushworld/demo/photon.min.css deleted file mode 100644 index db7315b557f999ec058a0c96e43ff650d0337c99..0000000000000000000000000000000000000000 --- a/spaces/jbrinkma/deepmind-pushworld/demo/photon.min.css +++ /dev/null @@ -1,2016 +0,0 @@ -/*source code copied from https://deepmind-pushworld.github.io/play/*/ - -@charset "UTF-8"; -/*! - * ===================================================== - * Photon v0.1.1 - * Copyright 2015 Connor Sears - * Licensed under MIT (https://github.com/connors/proton/blob/master/LICENSE) - * - * v0.1.1 designed by @connors. - * ===================================================== - */ -audio, canvas, progress, sub, sup, video { - vertical-align:baseline -} - -body, html { - height:100% -} - -hr, html, label { - overflow:auto -} - -.clearfix:after, .toolbar-actions:after, .toolbar:after { - clear:both -} - -*, img { - -webkit-user-drag:text -} - -.list-group *, .nav-group-item, h1, h2, h3, h4, h5, h6, label, td, th { - white-space: nowrap; - text-overflow:ellipsis -} - -audio:not([controls]) { - display:none -} - -a:active, a:hover { - outline:0 -} - -abbr[title] { - border-bottom:1px dotted -} - -b, strong { - font-weight:700 -} - -dfn { - font-style:italic -} - -h1 { - margin: .67em 0; - font-size:36px -} - -small { - font-size:80% -} - -sub, sup { - font-size: 75%; - line-height: 0; - position:relative -} - -sup { - top:-.5em -} - -.pane-group, .window { - top: 0; - left: 0; - right:0 -} - -sub { - bottom:-.25em -} - -pre { - overflow:auto -} - -code, kbd, pre, samp { - font-family: monospace, monospace; - font-size:1em -} - -button, input, optgroup, select, textarea { - color: inherit; - font: inherit; - margin:0 -} - -input[type=number]::-webkit-inner-spin-button, input[type=number]::-webkit-outer-spin-button { - height:auto -} - -input[type=search]::-webkit-search-cancel-button, input[type=search]::-webkit-search-decoration { - -webkit-appearance:none -} - -fieldset { - border: 1px solid silver; - margin: 0 2px; - padding:.35em .625em .75em -} - -legend { - border: 0; - padding:0 -} - -* { - cursor: default; - -webkit-user-select: auto; - -webkit-box-sizing: border-box; - box-sizing:border-box -} - -html { - width:100% -} - -body { - padding: 0; - margin: 0; - font-family: system, -apple-system, ".SFNSDisplay-Regular", "Helvetica Neue", Helvetica, "Segoe UI", sans-serif; - font-size: 13px; - line-height: 1.6; - color: #333; - background-color:transparent -} - -.btn-dropdown:after, .icon:before { - font-family:photon-entypo -} - -hr { - margin: 15px 0; - background: 0 0; - border: 0; - border-bottom:1px solid #ddd -} - -h1, h2, h3, h4, h5, h6 { - margin-top: 20px; - margin-bottom: 10px; - font-weight: 500; - overflow:hidden -} - -.btn .icon, .toolbar-header .title { - margin-top:1px -} - -h2 { - font-size:30px -} - -h3 { - font-size:24px -} - -h4 { - font-size:18px -} - -h5 { - font-size:14px -} - -.btn, h6 { - font-size:12px -} - -.window { - position: absolute; - bottom: 0; - display: flex; - flex-direction: column; - background-color:#fff -} - -.window-content { - position: relative; - overflow-y: auto; - display: flex; - flex:1 -} - -.selectable-text { - cursor: text; - -webkit-user-select:text -} - -.btn, .title { - cursor:default -} - -.text-center { - text-align:center -} - -.text-right { - text-align:right -} - -.text-left { - text-align:left -} - -.btn, .title { - text-align:center -} - -.pull-left { - float:left -} - -.pull-right { - float:right -} - -.padded { - padding:10px -} - -.padded-less { - padding:5px -} - -.padded-more { - padding:20px -} - -.padded-vertically { - padding-top: 10px; - padding-bottom:10px -} - -.padded-vertically-less { - padding-top: 5px; - padding-bottom:5px -} - -.padded-vertically-more { - padding-top: 20px; - padding-bottom:20px -} - -.padded-horizontally { - padding-right: 10px; - padding-left:10px -} - -.padded-horizontally-less { - padding-right: 5px; - padding-left:5px -} - -.padded-horizontally-more { - padding-right: 20px; - padding-left:20px -} - -.padded-top { - padding-top:10px -} - -.padded-top-less { - padding-top:5px -} - -.padded-top-more { - padding-top:20px -} - -.padded-bottom { - padding-bottom:10px -} - -.padded-bottom-less { - padding-bottom:5px -} - -.padded-bottom-more { - padding-bottom:20px -} - -.sidebar { - background-color:#f5f5f4 -} - -.draggable { - -webkit-app-region:drag -} - -.btn, .btn-group { - vertical-align: middle; - -webkit-app-region:no-drag -} - -.clearfix:after, .clearfix:before { - display: table; - content: " " -} - -.btn { - display: inline-block; - padding: 3px 8px; - margin-bottom: 0; - line-height: 1.4; - white-space: nowrap; - background-image: none; - border: 1px solid transparent; - border-radius: 4px; - box-shadow:0 1px 1px rgba(0, 0, 0, .06) -} - -.btn:focus { - outline: 0; - box-shadow:none -} - -.btn-mini { - padding:2px 6px -} - -.btn-large { - padding:6px 12px -} - -.btn-form { - padding-right: 20px; - padding-left:20px -} - -.btn-default { - color: #333; - background-color: #fcfcfc; - background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0, #fcfcfc), color-stop(100%, #f1f1f1)); - background-image: -webkit-linear-gradient(top, #fcfcfc 0, #f1f1f1 100%); - background-image: linear-gradient(to bottom, #fcfcfc 0, #f1f1f1 100%); - border-color:#c2c0c2 #c2c0c2 #a19fa1 -} - -.btn-default:active { - background-color: #ddd; - background-image:none -} - -.btn-negative, .btn-positive, .btn-primary, .btn-warning { - color: #fff; - text-shadow:0 1px 1px rgba(0, 0, 0, .1) -} - -.btn-primary { - border-color: #388df8 #388df8 #0866dc; - background-color: #6eb4f7; - background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0, #6eb4f7), color-stop(100%, #1a82fb)); - background-image: -webkit-linear-gradient(top, #6eb4f7 0, #1a82fb 100%); - background-image:linear-gradient(to bottom, #6eb4f7 0, #1a82fb 100%) -} - -.btn-primary:active { - background-color: #3e9bf4; - background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0, #3e9bf4), color-stop(100%, #0469de)); - background-image: -webkit-linear-gradient(top, #3e9bf4 0, #0469de 100%); - background-image:linear-gradient(to bottom, #3e9bf4 0, #0469de 100%) -} - -.btn-positive { - border-color: #29a03b #29a03b #248b34; - background-color: #5bd46d; - background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0, #5bd46d), color-stop(100%, #29a03b)); - background-image: -webkit-linear-gradient(top, #5bd46d 0, #29a03b 100%); - background-image:linear-gradient(to bottom, #5bd46d 0, #29a03b 100%) -} - -.btn-positive:active { - background-color: #34c84a; - background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0, #34c84a), color-stop(100%, #248b34)); - background-image: -webkit-linear-gradient(top, #34c84a 0, #248b34 100%); - background-image:linear-gradient(to bottom, #34c84a 0, #248b34 100%) -} - -.btn-negative { - border-color: #fb2f29 #fb2f29 #fb1710; - background-color: #fd918d; - background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0, #fd918d), color-stop(100%, #fb2f29)); - background-image: -webkit-linear-gradient(top, #fd918d 0, #fb2f29 100%); - background-image:linear-gradient(to bottom, #fd918d 0, #fb2f29 100%) -} - -.btn-negative:active { - background-color: #fc605b; - background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0, #fc605b), color-stop(100%, #fb1710)); - background-image: -webkit-linear-gradient(top, #fc605b 0, #fb1710 100%); - background-image:linear-gradient(to bottom, #fc605b 0, #fb1710 100%) -} - -.btn-warning { - border-color: #fcaa0e #fcaa0e #ee9d02; - background-color: #fece72; - background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0, #fece72), color-stop(100%, #fcaa0e)); - background-image: -webkit-linear-gradient(top, #fece72 0, #fcaa0e 100%); - background-image:linear-gradient(to bottom, #fece72 0, #fcaa0e 100%) -} - -.btn-warning:active { - background-color: #fdbc40; - background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0, #fdbc40), color-stop(100%, #ee9d02)); - background-image: -webkit-linear-gradient(top, #fdbc40 0, #ee9d02 100%); - background-image:linear-gradient(to bottom, #fdbc40 0, #ee9d02 100%) -} - -.btn .icon { - float: left; - width: 14px; - height: 14px; - margin-bottom: 1px; - color: #737475; - font-size: 14px; - line-height:1 -} - -.btn .icon-text { - margin-right:5px -} - -.btn-dropdown:after { - margin-left: 5px; - content: "" -} - -.btn-group { - position: relative; - display:inline-block -} - -.toolbar-actions:after, .toolbar-actions:before, .toolbar:after, .toolbar:before { - display: table; - content: " " -} - -.btn-group .btn { - position: relative; - float:left -} - -.btn-group .btn:active, .btn-group .btn:focus { - z-index:2 -} - -.btn-group .btn.active { - z-index:3 -} - -.btn-group .btn + .btn, .btn-group .btn + .btn-group, .btn-group .btn-group + .btn, .btn-group .btn-group + .btn-group { - margin-left:-1px -} - -.btn-group > .btn:first-child { - border-top-right-radius: 0; - border-bottom-right-radius:0 -} - -.btn-group > .btn:last-child { - border-top-left-radius: 0; - border-bottom-left-radius:0 -} - -.btn-group > .btn:not(:first-child):not(:last-child) { - border-radius:0 -} - -.btn-group .btn + .btn { - border-left:1px solid #c2c0c2 -} - -.btn-group .btn + .btn.active { - border-left:0 -} - -.btn-group .active { - color: #fff; - border: 1px solid transparent; - background-color: #6d6c6d; - background-image:none -} - -.btn-group .active .icon { - color:#fff -} - -.toolbar { - min-height: 22px; - box-shadow: inset 0 1px 0 #f5f4f5; - background-color: #e8e6e8; - background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0, #e8e6e8), color-stop(100%, #d1cfd1)); - background-image: -webkit-linear-gradient(top, #e8e6e8 0, #d1cfd1 100%); - background-image:linear-gradient(to bottom, #e8e6e8 0, #d1cfd1 100%) -} - -.toolbar-header { - border-bottom:1px solid #c2c0c2 -} - -.toolbar-footer { - border-top: 1px solid #c2c0c2; - -webkit-app-region:drag -} - -.title { - margin: 0; - font-size: 12px; - font-weight: 400; - color:#555 -} - -.toolbar-borderless { - border-top: 0; - border-bottom:0 -} - -.toolbar-actions { - margin-top: 4px; - margin-bottom: 3px; - padding-right: 3px; - padding-left: 3px; - padding-bottom: 3px; - -webkit-app-region:drag -} - -.form-control, label { - display: inline-block; - font-size:13px -} - -.toolbar-actions > .btn, .toolbar-actions > .btn-group { - margin-left: 4px; - margin-right:4px -} - -label { - margin-bottom:5px -} - -input[type=search] { - -webkit-appearance: textfield; - box-sizing:border-box -} - -input[type=checkbox], input[type=radio] { - margin: 4px 0 0; - line-height:normal -} - -.checkbox, .form-group, .radio { - margin-bottom:10px -} - -.form-control { - width: 100%; - min-height: 25px; - padding: 5px 10px; - line-height: 1.6; - background-color: #fff; - border: 1px solid #ddd; - border-radius: 4px; - outline:0 -} - -.form-control:focus { - border-color: #6db3fd; - box-shadow:3px 3px 0 #6db3fd, -3px -3px 0 #6db3fd, -3px 3px 0 #6db3fd, 3px -3px 0 #6db3fd -} - -textarea { - height:auto -} - -.checkbox, .radio { - position: relative; - display: block; - margin-top:10px -} - -.checkbox label, .radio label { - padding-left: 20px; - margin-bottom: 0; - font-weight:400 -} - -.checkbox input[type=checkbox], .checkbox-inline input[type=checkbox], .radio input[type=radio], .radio-inline input[type=radio] { - position: absolute; - margin-left: -20px; - margin-top:4px -} - -.form-actions .btn { - margin-right:10px -} - -.form-actions .btn:last-child { - margin-right:0 -} - -.pane-group { - position: absolute; - bottom: 0; - display:flex -} - -.icon:before, .pane, .tab-item { - position:relative -} - -.pane { - overflow-y: auto; - flex: 1; - border-left:1px solid #ddd -} - -.list-group *, .media-body, .nav-group-item, td, th { - overflow:hidden -} - -.pane:first-child { - border-left:0 -} - -.pane-sm { - max-width: 220px; - min-width:150px -} - -.pane-mini { - width: 80px; - flex:none -} - -.pane-one-fourth { - width: 25%; - flex:none -} - -.pane-one-third { - width:33.3% -} - -.img-circle { - border-radius:50% -} - -.img-rounded { - border-radius:4px -} - -.list-group { - width: 100%; - list-style: none; - margin: 0; - padding:0 -} - -.list-group * { - margin:0 -} - -.list-group-item { - padding: 10px; - font-size: 12px; - color: #414142; - border-top:1px solid #ddd -} - -.list-group-item:first-child { - border-top:0 -} - -.list-group-item.active, .list-group-item.selected { - color: #fff; - background-color:#116cd6 -} - -.list-group-header { - padding:10px -} - -.media-object { - margin-top:3px -} - -.media-object.pull-left { - margin-right:10px -} - -.media-object.pull-right { - margin-left:10px -} - -.nav-group { - font-size:14px -} - -.nav-group-item { - padding: 2px 10px 2px 25px; - display: block; - color: #333; - text-decoration:none -} - -.nav-group-item.active, .nav-group-item:active { - background-color:#dcdfe1 -} - -.nav-group-item .icon { - width: 19px; - height: 18px; - float: left; - color: #737475; - margin-top: -3px; - margin-right: 7px; - font-size: 18px; - text-align:center -} - -.nav-group-title { - margin: 0; - padding: 10px 10px 2px; - font-size: 12px; - font-weight: 500; - color:#666 -} - -.icon:before, th { - font-weight:400 -} - -@font-face { - font-family: photon-entypo; - src: url(fonts/photon-entypo.eot); - src: url(fonts/photon-entypo.eot?#iefix) format("eot"), url(fonts/photon-entypo.woff) format("woff"), url(fonts/photon-entypo.ttf) format("truetype"); - font-weight: 400; - font-style:normal -} - -.icon:before { - display: inline-block; - speak: none; - font-size: 100%; - font-style: normal; - font-variant: normal; - text-transform: none; - line-height: 1; - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing:grayscale -} - -.icon-note:before { - content: '\e800' -} - -.icon-note-beamed:before { - content: '\e801' -} - -.icon-music:before { - content: '\e802' -} - -.icon-search:before { - content: '\e803' -} - -.icon-flashlight:before { - content: '\e804' -} - -.icon-mail:before { - content: '\e805' -} - -.icon-heart:before { - content: '\e806' -} - -.icon-heart-empty:before { - content: '\e807' -} - -.icon-star:before { - content: '\e808' -} - -.icon-star-empty:before { - content: '\e809' -} - -.icon-user:before { - content: '\e80a' -} - -.icon-users:before { - content: '\e80b' -} - -.icon-user-add:before { - content: '\e80c' -} - -.icon-video:before { - content: '\e80d' -} - -.icon-picture:before { - content: '\e80e' -} - -.icon-camera:before { - content: '\e80f' -} - -.icon-layout:before { - content: '\e810' -} - -.icon-menu:before { - content: '\e811' -} - -.icon-check:before { - content: '\e812' -} - -.icon-cancel:before { - content: '\e813' -} - -.icon-cancel-circled:before { - content: '\e814' -} - -.icon-cancel-squared:before { - content: '\e815' -} - -.icon-plus:before { - content: '\e816' -} - -.icon-plus-circled:before { - content: '\e817' -} - -.icon-plus-squared:before { - content: '\e818' -} - -.icon-minus:before { - content: '\e819' -} - -.icon-minus-circled:before { - content: '\e81a' -} - -.icon-minus-squared:before { - content: '\e81b' -} - -.icon-help:before { - content: '\e81c' -} - -.icon-help-circled:before { - content: '\e81d' -} - -.icon-info:before { - content: '\e81e' -} - -.icon-info-circled:before { - content: '\e81f' -} - -.icon-back:before { - content: '\e820' -} - -.icon-home:before { - content: '\e821' -} - -.icon-link:before { - content: '\e822' -} - -.icon-attach:before { - content: '\e823' -} - -.icon-lock:before { - content: '\e824' -} - -.icon-lock-open:before { - content: '\e825' -} - -.icon-eye:before { - content: '\e826' -} - -.icon-tag:before { - content: '\e827' -} - -.icon-bookmark:before { - content: '\e828' -} - -.icon-bookmarks:before { - content: '\e829' -} - -.icon-flag:before { - content: '\e82a' -} - -.icon-thumbs-up:before { - content: '\e82b' -} - -.icon-thumbs-down:before { - content: '\e82c' -} - -.icon-download:before { - content: '\e82d' -} - -.icon-upload:before { - content: '\e82e' -} - -.icon-upload-cloud:before { - content: '\e82f' -} - -.icon-reply:before { - content: '\e830' -} - -.icon-reply-all:before { - content: '\e831' -} - -.icon-forward:before { - content: '\e832' -} - -.icon-quote:before { - content: '\e833' -} - -.icon-code:before { - content: '\e834' -} - -.icon-export:before { - content: '\e835' -} - -.icon-pencil:before { - content: '\e836' -} - -.icon-feather:before { - content: '\e837' -} - -.icon-print:before { - content: '\e838' -} - -.icon-retweet:before { - content: '\e839' -} - -.icon-keyboard:before { - content: '\e83a' -} - -.icon-comment:before { - content: '\e83b' -} - -.icon-chat:before { - content: '\e83c' -} - -.icon-bell:before { - content: '\e83d' -} - -.icon-attention:before { - content: '\e83e' -} - -.icon-alert:before { - content: '\e83f' -} - -.icon-vcard:before { - content: '\e840' -} - -.icon-address:before { - content: '\e841' -} - -.icon-location:before { - content: '\e842' -} - -.icon-map:before { - content: '\e843' -} - -.icon-direction:before { - content: '\e844' -} - -.icon-compass:before { - content: '\e845' -} - -.icon-cup:before { - content: '\e846' -} - -.icon-trash:before { - content: '\e847' -} - -.icon-doc:before { - content: '\e848' -} - -.icon-docs:before { - content: '\e849' -} - -.icon-doc-landscape:before { - content: '\e84a' -} - -.icon-doc-text:before { - content: '\e84b' -} - -.icon-doc-text-inv:before { - content: '\e84c' -} - -.icon-newspaper:before { - content: '\e84d' -} - -.icon-book-open:before { - content: '\e84e' -} - -.icon-book:before { - content: '\e84f' -} - -.icon-folder:before { - content: '\e850' -} - -.icon-archive:before { - content: '\e851' -} - -.icon-box:before { - content: '\e852' -} - -.icon-rss:before { - content: '\e853' -} - -.icon-phone:before { - content: '\e854' -} - -.icon-cog:before { - content: '\e855' -} - -.icon-tools:before { - content: '\e856' -} - -.icon-share:before { - content: '\e857' -} - -.icon-shareable:before { - content: '\e858' -} - -.icon-basket:before { - content: '\e859' -} - -.icon-bag:before { - content: '\e85a' -} - -.icon-calendar:before { - content: '\e85b' -} - -.icon-login:before { - content: '\e85c' -} - -.icon-logout:before { - content: '\e85d' -} - -.icon-mic:before { - content: '\e85e' -} - -.icon-mute:before { - content: '\e85f' -} - -.icon-sound:before { - content: '\e860' -} - -.icon-volume:before { - content: '\e861' -} - -.icon-clock:before { - content: '\e862' -} - -.icon-hourglass:before { - content: '\e863' -} - -.icon-lamp:before { - content: '\e864' -} - -.icon-light-down:before { - content: '\e865' -} - -.icon-light-up:before { - content: '\e866' -} - -.icon-adjust:before { - content: '\e867' -} - -.icon-block:before { - content: '\e868' -} - -.icon-resize-full:before { - content: '\e869' -} - -.icon-resize-small:before { - content: '\e86a' -} - -.icon-popup:before { - content: '\e86b' -} - -.icon-publish:before { - content: '\e86c' -} - -.icon-window:before { - content: '\e86d' -} - -.icon-arrow-combo:before { - content: '\e86e' -} - -.icon-down-circled:before { - content: '\e86f' -} - -.icon-left-circled:before { - content: '\e870' -} - -.icon-right-circled:before { - content: '\e871' -} - -.icon-up-circled:before { - content: '\e872' -} - -.icon-down-open:before { - content: '\e873' -} - -.icon-left-open:before { - content: '\e874' -} - -.icon-right-open:before { - content: '\e875' -} - -.icon-up-open:before { - content: '\e876' -} - -.icon-down-open-mini:before { - content: '\e877' -} - -.icon-left-open-mini:before { - content: '\e878' -} - -.icon-right-open-mini:before { - content: '\e879' -} - -.icon-up-open-mini:before { - content: '\e87a' -} - -.icon-down-open-big:before { - content: '\e87b' -} - -.icon-left-open-big:before { - content: '\e87c' -} - -.icon-right-open-big:before { - content: '\e87d' -} - -.icon-up-open-big:before { - content: '\e87e' -} - -.icon-down:before { - content: '\e87f' -} - -.icon-left:before { - content: '\e880' -} - -.icon-right:before { - content: '\e881' -} - -.icon-up:before { - content: '\e882' -} - -.icon-down-dir:before { - content: '\e883' -} - -.icon-left-dir:before { - content: '\e884' -} - -.icon-right-dir:before { - content: '\e885' -} - -.icon-up-dir:before { - content: '\e886' -} - -.icon-down-bold:before { - content: '\e887' -} - -.icon-left-bold:before { - content: '\e888' -} - -.icon-right-bold:before { - content: '\e889' -} - -.icon-up-bold:before { - content: '\e88a' -} - -.icon-down-thin:before { - content: '\e88b' -} - -.icon-left-thin:before { - content: '\e88c' -} - -.icon-right-thin:before { - content: '\e88d' -} - -.icon-up-thin:before { - content: '\e88e' -} - -.icon-ccw:before { - content: '\e88f' -} - -.icon-cw:before { - content: '\e890' -} - -.icon-arrows-ccw:before { - content: '\e891' -} - -.icon-level-down:before { - content: '\e892' -} - -.icon-level-up:before { - content: '\e893' -} - -.icon-shuffle:before { - content: '\e894' -} - -.icon-loop:before { - content: '\e895' -} - -.icon-switch:before { - content: '\e896' -} - -.icon-play:before { - content: '\e897' -} - -.icon-stop:before { - content: '\e898' -} - -.icon-pause:before { - content: '\e899' -} - -.icon-record:before { - content: '\e89a' -} - -.icon-to-end:before { - content: '\e89b' -} - -.icon-to-start:before { - content: '\e89c' -} - -.icon-fast-forward:before { - content: '\e89d' -} - -.icon-fast-backward:before { - content: '\e89e' -} - -.icon-progress-0:before { - content: '\e89f' -} - -.icon-progress-1:before { - content: '\e8a0' -} - -.icon-progress-2:before { - content: '\e8a1' -} - -.icon-progress-3:before { - content: '\e8a2' -} - -.icon-target:before { - content: '\e8a3' -} - -.icon-palette:before { - content: '\e8a4' -} - -.icon-list:before { - content: '\e8a5' -} - -.icon-list-add:before { - content: '\e8a6' -} - -.icon-signal:before { - content: '\e8a7' -} - -.icon-trophy:before { - content: '\e8a8' -} - -.icon-battery:before { - content: '\e8a9' -} - -.icon-back-in-time:before { - content: '\e8aa' -} - -.icon-monitor:before { - content: '\e8ab' -} - -.icon-mobile:before { - content: '\e8ac' -} - -.icon-network:before { - content: '\e8ad' -} - -.icon-cd:before { - content: '\e8ae' -} - -.icon-inbox:before { - content: '\e8af' -} - -.icon-install:before { - content: '\e8b0' -} - -.icon-globe:before { - content: '\e8b1' -} - -.icon-cloud:before { - content: '\e8b2' -} - -.icon-cloud-thunder:before { - content: '\e8b3' -} - -.icon-flash:before { - content: '\e8b4' -} - -.icon-moon:before { - content: '\e8b5' -} - -.icon-flight:before { - content: '\e8b6' -} - -.icon-paper-plane:before { - content: '\e8b7' -} - -.icon-leaf:before { - content: '\e8b8' -} - -.icon-lifebuoy:before { - content: '\e8b9' -} - -.icon-mouse:before { - content: '\e8ba' -} - -.icon-briefcase:before { - content: '\e8bb' -} - -.icon-suitcase:before { - content: '\e8bc' -} - -.icon-dot:before { - content: '\e8bd' -} - -.icon-dot-2:before { - content: '\e8be' -} - -.icon-dot-3:before { - content: '\e8bf' -} - -.icon-brush:before { - content: '\e8c0' -} - -.icon-magnet:before { - content: '\e8c1' -} - -.icon-infinity:before { - content: '\e8c2' -} - -.icon-erase:before { - content: '\e8c3' -} - -.icon-chart-pie:before { - content: '\e8c4' -} - -.icon-chart-line:before { - content: '\e8c5' -} - -.icon-chart-bar:before { - content: '\e8c6' -} - -.icon-chart-area:before { - content: '\e8c7' -} - -.icon-tape:before { - content: '\e8c8' -} - -.icon-graduation-cap:before { - content: '\e8c9' -} - -.icon-language:before { - content: '\e8ca' -} - -.icon-ticket:before { - content: '\e8cb' -} - -.icon-water:before { - content: '\e8cc' -} - -.icon-droplet:before { - content: '\e8cd' -} - -.icon-air:before { - content: '\e8ce' -} - -.icon-credit-card:before { - content: '\e8cf' -} - -.icon-floppy:before { - content: '\e8d0' -} - -.icon-clipboard:before { - content: '\e8d1' -} - -.icon-megaphone:before { - content: '\e8d2' -} - -.icon-database:before { - content: '\e8d3' -} - -.icon-drive:before { - content: '\e8d4' -} - -.icon-bucket:before { - content: '\e8d5' -} - -.icon-thermometer:before { - content: '\e8d6' -} - -.icon-key:before { - content: '\e8d7' -} - -.icon-flow-cascade:before { - content: '\e8d8' -} - -.icon-flow-branch:before { - content: '\e8d9' -} - -.icon-flow-tree:before { - content: '\e8da' -} - -.icon-flow-line:before { - content: '\e8db' -} - -.icon-flow-parallel:before { - content: '\e8dc' -} - -.icon-rocket:before { - content: '\e8dd' -} - -.icon-gauge:before { - content: '\e8de' -} - -.icon-traffic-cone:before { - content: '\e8df' -} - -.icon-cc:before { - content: '\e8e0' -} - -.icon-cc-by:before { - content: '\e8e1' -} - -.icon-cc-nc:before { - content: '\e8e2' -} - -.icon-cc-nc-eu:before { - content: '\e8e3' -} - -.icon-cc-nc-jp:before { - content: '\e8e4' -} - -.icon-cc-sa:before { - content: '\e8e5' -} - -.icon-cc-nd:before { - content: '\e8e6' -} - -.icon-cc-pd:before { - content: '\e8e7' -} - -.icon-cc-zero:before { - content: '\e8e8' -} - -.icon-cc-share:before { - content: '\e8e9' -} - -.icon-cc-remix:before { - content: '\e8ea' -} - -.icon-github:before { - content: '\e8eb' -} - -.icon-github-circled:before { - content: '\e8ec' -} - -.icon-flickr:before { - content: '\e8ed' -} - -.icon-flickr-circled:before { - content: '\e8ee' -} - -.icon-vimeo:before { - content: '\e8ef' -} - -.icon-vimeo-circled:before { - content: '\e8f0' -} - -.icon-twitter:before { - content: '\e8f1' -} - -.icon-twitter-circled:before { - content: '\e8f2' -} - -.icon-facebook:before { - content: '\e8f3' -} - -.icon-facebook-circled:before { - content: '\e8f4' -} - -.icon-facebook-squared:before { - content: '\e8f5' -} - -.icon-gplus:before { - content: '\e8f6' -} - -.icon-gplus-circled:before { - content: '\e8f7' -} - -.icon-pinterest:before { - content: '\e8f8' -} - -.icon-pinterest-circled:before { - content: '\e8f9' -} - -.icon-tumblr:before { - content: '\e8fa' -} - -.icon-tumblr-circled:before { - content: '\e8fb' -} - -.icon-linkedin:before { - content: '\e8fc' -} - -.icon-linkedin-circled:before { - content: '\e8fd' -} - -.icon-dribbble:before { - content: '\e8fe' -} - -.icon-dribbble-circled:before { - content: '\e8ff' -} - -.icon-stumbleupon:before { - content: '\e900' -} - -.icon-stumbleupon-circled:before { - content: '\e901' -} - -.icon-lastfm:before { - content: '\e902' -} - -.icon-lastfm-circled:before { - content: '\e903' -} - -.icon-rdio:before { - content: '\e904' -} - -.icon-rdio-circled:before { - content: '\e905' -} - -.icon-spotify:before { - content: '\e906' -} - -.icon-spotify-circled:before { - content: '\e907' -} - -.icon-qq:before { - content: '\e908' -} - -.icon-instagram:before { - content: '\e909' -} - -.icon-dropbox:before { - content: '\e90a' -} - -.icon-evernote:before { - content: '\e90b' -} - -.icon-flattr:before { - content: '\e90c' -} - -.icon-skype:before { - content: '\e90d' -} - -.icon-skype-circled:before { - content: '\e90e' -} - -.icon-renren:before { - content: '\e90f' -} - -.icon-sina-weibo:before { - content: '\e910' -} - -.icon-paypal:before { - content: '\e911' -} - -.icon-picasa:before { - content: '\e912' -} - -.icon-soundcloud:before { - content: '\e913' -} - -.icon-mixi:before { - content: '\e914' -} - -.icon-behance:before { - content: '\e915' -} - -.icon-google-circles:before { - content: '\e916' -} - -.icon-vkontakte:before { - content: '\e917' -} - -.icon-smashing:before { - content: '\e918' -} - -.icon-sweden:before { - content: '\e919' -} - -.icon-db-shape:before { - content: '\e91a' -} - -.icon-logo-db:before { - content: '\e91b' -} - -table { - border-spacing: 0; - width: 100%; - border: 0; - border-collapse: separate; - font-size: 12px; - text-align:left -} - -.table-striped tr:nth-child(even), thead { - background-color:#f5f5f4 -} - -tbody { - background-color:#fff -} - -.table-striped tr:active:nth-child(even), tr:active { - color: #fff; - background-color:#116cd6 -} - -thead tr:active { - color: #333; - background-color:#f5f5f4 -} - -th { - border-right: 1px solid #ddd; - border-bottom:1px solid #ddd -} - -td, th { - padding:2px 15px -} - -td:last-child, th:last-child { - border-right:0 -} - -.tab-group { - margin-top: -1px; - display: flex; - border-top: 1px solid #989698; - border-bottom:1px solid #989698 -} - -.tab-item { - flex: 1; - padding: 3px; - font-size: 12px; - text-align: center; - border-left: 1px solid #989698; - background-color: #b8b6b8; - background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0, #b8b6b8), color-stop(100%, #b0aeb0)); - background-image: -webkit-linear-gradient(top, #b8b6b8 0, #b0aeb0 100%); - background-image:linear-gradient(to bottom, #b8b6b8 0, #b0aeb0 100%) -} - -.tab-item:first-child { - border-left:0 -} - -.tab-item.active { - background-color: #d4d2d4; - background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0, #d4d2d4), color-stop(100%, #cccacc)); - background-image: -webkit-linear-gradient(top, #d4d2d4 0, #cccacc 100%); - background-image:linear-gradient(to bottom, #d4d2d4 0, #cccacc 100%) -} - -.tab-item .icon-close-tab:hover, .tab-item:after { - background-color:rgba(0, 0, 0, .08) -} - -.tab-item .icon-close-tab { - position: absolute; - top: 50%; - left: 5px; - width: 15px; - height: 15px; - font-size: 15px; - line-height: 15px; - text-align: center; - color: #666; - opacity: 0; - transition: opacity .1s linear, background-color .1s linear; - border-radius: 3px; - transform: translateY(-50%); - z-index:10 -} - -.tab-item:after { - position: absolute; - top: 0; - right: 0; - bottom: 0; - left: 0; - content: ""; - opacity: 0; - transition: opacity .1s linear; - z-index:1 -} - -.tab-item:hover .icon-close-tab, .tab-item:hover:not(.active):after { - opacity:1 -} - -.tab-item-fixed { - flex: none; - padding: 3px 10px -} - diff --git a/spaces/jitubutwal1441/image-to-story/app.py b/spaces/jitubutwal1441/image-to-story/app.py deleted file mode 100644 index 077740b89c94cf358c478fed37d5d7616e3c9b52..0000000000000000000000000000000000000000 --- a/spaces/jitubutwal1441/image-to-story/app.py +++ /dev/null @@ -1,107 +0,0 @@ -from dotenv import load_dotenv, find_dotenv -from transformers import pipeline -from langchain import LLMChain, OpenAI, PromptTemplate - -import requests -import os - -# UI layer -import streamlit as st - -load_dotenv(find_dotenv()) - -HUGGINGFACEHUB_API_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN') -# It involves 3 steps -# image to text -def image_to_text(url, use_api=True): - if use_api: - API_URL = "https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-large" - headers = {"Authorization": f"Bearer {HUGGINGFACEHUB_API_TOKEN}"} - - filename = url.split("/")[-1] - with open(filename, "rb") as f: - data = f.read() - response = requests.post(API_URL, headers=headers, data=data) - return response.json()[0]['generated_text'] - - - # Download the model and use it, which is slow - captioner = pipeline("image-to-text",model="Salesforce/blip-image-captioning-base") - # captioner("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") - result = captioner(url) - return result[0]['generated_text'] - -## [{'generated_text': 'two birds are standing next to each other '}] - -# LLM -def generate_story(story_idea): - # template = """ - # You are a professional song writter; - # Generate a song based on a simple narrative, the song should be no more than 100 words. - # Song should be in Nepali language - # CONTEXT: {story_idea} - # STORY: - # """ - template = """ - you are a song writer, write a song using following context: - {story_idea}. - Song should not be more than 150 words. It should be in English language. - """ - prompt = PromptTemplate(input_variables=["story_idea"], template=template) - - story_llm = LLMChain(llm=OpenAI(model_name='gpt-3.5-turbo-0301', temperature=1), prompt=prompt, verbose=True) - story = story_llm.run(story_idea) - return story - -# text to speech -def text_to_speech(story): - API_URL = "https://api-inference.huggingface.co/models/espnet/kan-bayashi_ljspeech_vits" - headers = {"Authorization": f"Bearer {HUGGINGFACEHUB_API_TOKEN}"} - - payloads = { - "inputs": story - } - - response = requests.post(API_URL, headers=headers, json=payloads) - with open("story_audio.flac", "wb") as file: - file.write(response.content) - -# caption = image_to_text("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") -# story = generate_story(story_idea="Two parrots singing a song") -# text_to_speech(story="Two parrots singing a song") - -def main(): - st.set_page_config(page_title="Upload any image to hear a nice story") - - st.header("Listen to what your image has to tell you. JK DEMO APP") - - uploaded_file = st.file_uploader("Choose an image...", type="jpg") - if uploaded_file is not None: - print(uploaded_file) - bytes_data = uploaded_file.getvalue() - with open(uploaded_file.name, "wb") as file: - file.write(bytes_data) - - st.image(uploaded_file, caption="Uploaded image", use_column_width=True) - - image_description = image_to_text(uploaded_file.name, use_api=True) - - # Display image description on FE - with st.expander("Image Description"): - st.write(image_description) - - story = generate_story(story_idea=image_description) - # story_starter_text = "Yo ho Radio Nepal, prastut xa sun nai parne katha: " - story_starter_text = "" - story = story_starter_text + story - - # Display story text on FE - with st.expander("Story"): - st.write(story) - - # Display audio player on FE - text_to_speech(story=story) - st.audio("story_audio.flac") - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/spaces/joaogabriellima/Real-Time-Voice-Cloning/synthesizer/utils/symbols.py b/spaces/joaogabriellima/Real-Time-Voice-Cloning/synthesizer/utils/symbols.py deleted file mode 100644 index 132d3a612c3b13e2ada905a706001cff29a4f63a..0000000000000000000000000000000000000000 --- a/spaces/joaogabriellima/Real-Time-Voice-Cloning/synthesizer/utils/symbols.py +++ /dev/null @@ -1,17 +0,0 @@ -""" -Defines the set of symbols used in text input to the model. - -The default is a set of ASCII characters that works well for English or text that has been run -through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. -""" -# from . import cmudict - -_pad = "_" -_eos = "~" -_characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!\'\"(),-.:;? " - -# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters): -#_arpabet = ["@' + s for s in cmudict.valid_symbols] - -# Export all symbols: -symbols = [_pad, _eos] + list(_characters) #+ _arpabet diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/G_S_U_B_.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/G_S_U_B_.py deleted file mode 100644 index bb8375a5f83029d2b05388d5c882edd9c4aba95c..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/G_S_U_B_.py +++ /dev/null @@ -1,5 +0,0 @@ -from .otBase import BaseTTXConverter - - -class table_G_S_U_B_(BaseTTXConverter): - pass diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/steamship/file_reader.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/steamship/file_reader.py deleted file mode 100644 index b9c4185f166fdf43dc5bdf45380b4cfcc98dcf31..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/readers/steamship/file_reader.py +++ /dev/null @@ -1,92 +0,0 @@ -"""Load Documents from a set of persistent Steamship Files.""" -from typing import List, Optional - -from gpt_index.readers.base import BaseReader -from gpt_index.readers.schema.base import Document - - -class SteamshipFileReader(BaseReader): - """Reads persistent Steamship Files and converts them to Documents. - - Args: - api_key: Steamship API key. Defaults to STEAMSHIP_API_KEY value if not provided. - - Note: - Requires install of `steamship` package and an active Steamship API Key. - To get a Steamship API Key, visit: https://steamship.com/account/api. - Once you have an API Key, expose it via an environment variable named - `STEAMSHIP_API_KEY` or pass it as an init argument (`api_key`). - """ - - def __init__(self, api_key: Optional[str] = None) -> None: - """Initialize the Reader.""" - try: - import steamship # noqa: F401 - - self.api_key = api_key - except ImportError: - raise ImportError( - "`steamship` must be installed to use the SteamshipFileReader.\n" - "Please run `pip install --upgrade steamship." - ) - - def load_data( - self, - workspace: str, - query: Optional[str] = None, - file_handles: Optional[List[str]] = None, - collapse_blocks: bool = True, - join_str: str = "\n\n", - ) -> List[Document]: - """Load data from persistent Steamship Files into Documents. - - Args: - workspace: the handle for a Steamship workspace - (see: https://docs.steamship.com/workspaces/index.html) - query: a Steamship tag query for retrieving files - (ex: 'filetag and value("import-id")="import-001"') - file_handles: a list of Steamship File handles - (ex: `smooth-valley-9kbdr`) - collapse_blocks: whether to merge individual File Blocks into a - single Document, or separate them. - join_str: when collapse_blocks is True, this is how the block texts - will be concatenated. - - Note: - The collection of Files from both `query` and `file_handles` will be - combined. There is no (current) support for deconflicting the collections - (meaning that if a file appears both in the result set of the query and - as a handle in file_handles, it will be loaded twice). - """ - from steamship import File, Steamship - - client = Steamship(workspace=workspace, api_key=self.api_key) - files = [] - if query: - files_from_query = File.query(client=client, tag_filter_query=query).files - files.extend(files_from_query) - - if file_handles: - files.extend([File.get(client=client, handle=h) for h in file_handles]) - - docs = [] - for file in files: - extra_info = {"source": file.handle} - - for tag in file.tags: - extra_info[tag.kind] = tag.value - - if collapse_blocks: - text = join_str.join([b.text for b in file.blocks]) - docs.append( - Document(text=text, doc_id=file.handle, extra_info=extra_info) - ) - else: - docs.extend( - [ - Document(text=b.text, doc_id=file.handle, extra_info=extra_info) - for b in file.blocks - ] - ) - - return docs diff --git a/spaces/joaquinu/merluzo/app.py b/spaces/joaquinu/merluzo/app.py deleted file mode 100644 index bb151168e539ebc6b5dae7743dc6c2bff67b5665..0000000000000000000000000000000000000000 --- a/spaces/joaquinu/merluzo/app.py +++ /dev/null @@ -1,16 +0,0 @@ -import gradio as gr -from fastai.vision.all import * - - -learn = load_learner('export.pkl') -categories = ('Fish','Merluzo','People') -def classify_image(img): - pred,idx,probs = learn.predict(img) - return dict(zip(categories, map(float,probs))) - -image = gr.inputs.Image(shape=(192,192)) -label = gr.outputs.Label() -examples = ['bird.jpg', 'fish.png', 'people.jpg'] - -intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples) -intf.launch(inline=False) \ No newline at end of file diff --git a/spaces/johnslegers/stable-diffusion-gui-test/ldmlib/models/diffusion/ddpm.py b/spaces/johnslegers/stable-diffusion-gui-test/ldmlib/models/diffusion/ddpm.py deleted file mode 100644 index 498c78353bc2fd32de7e8e47320e6d8708d1a5ae..0000000000000000000000000000000000000000 --- a/spaces/johnslegers/stable-diffusion-gui-test/ldmlib/models/diffusion/ddpm.py +++ /dev/null @@ -1,1445 +0,0 @@ -""" -wild mixture of -https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py -https://github.com/CompVis/taming-transformers --- merci -""" - -import torch -import torch.nn as nn -import numpy as np -import pytorch_lightning as pl -from torch.optim.lr_scheduler import LambdaLR -from einops import rearrange, repeat -from contextlib import contextmanager -from functools import partial -from tqdm import tqdm -from torchvision.utils import make_grid -from pytorch_lightning.utilities.distributed import rank_zero_only - -from ldmlib.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config -from ldmlib.modules.ema import LitEma -from ldmlib.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution -from ldmlib.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL -from ldmlib.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like -from ldmlib.models.diffusion.ddim import DDIMSampler - - -__conditioning_keys__ = {'concat': 'c_concat', - 'crossattn': 'c_crossattn', - 'adm': 'y'} - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -def uniform_on_device(r1, r2, shape, device): - return (r1 - r2) * torch.rand(*shape, device=device) + r2 - - -class DDPM(pl.LightningModule): - # classic DDPM with Gaussian diffusion, in image space - def __init__(self, - unet_config, - timesteps=1000, - beta_schedule="linear", - loss_type="l2", - ckpt_path=None, - ignore_keys=[], - load_only_unet=False, - monitor="val/loss", - use_ema=True, - first_stage_key="image", - image_size=256, - channels=3, - log_every_t=100, - clip_denoised=True, - linear_start=1e-4, - linear_end=2e-2, - cosine_s=8e-3, - given_betas=None, - original_elbo_weight=0., - v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta - l_simple_weight=1., - conditioning_key=None, - parameterization="eps", # all assuming fixed variance schedules - scheduler_config=None, - use_positional_encodings=False, - learn_logvar=False, - logvar_init=0., - ): - super().__init__() - assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' - self.parameterization = parameterization - print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") - self.cond_stage_model = None - self.clip_denoised = clip_denoised - self.log_every_t = log_every_t - self.first_stage_key = first_stage_key - self.image_size = image_size # try conv? - self.channels = channels - self.use_positional_encodings = use_positional_encodings - self.model = DiffusionWrapper(unet_config, conditioning_key) - count_params(self.model, verbose=True) - self.use_ema = use_ema - if self.use_ema: - self.model_ema = LitEma(self.model) - print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - self.use_scheduler = scheduler_config is not None - if self.use_scheduler: - self.scheduler_config = scheduler_config - - self.v_posterior = v_posterior - self.original_elbo_weight = original_elbo_weight - self.l_simple_weight = l_simple_weight - - if monitor is not None: - self.monitor = monitor - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) - - self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, - linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) - - self.loss_type = loss_type - - self.learn_logvar = learn_logvar - self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) - if self.learn_logvar: - self.logvar = nn.Parameter(self.logvar, requires_grad=True) - - - def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if exists(given_betas): - betas = given_betas - else: - betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, - cosine_s=cosine_s) - alphas = 1. - betas - alphas_cumprod = np.cumprod(alphas, axis=0) - alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) - - timesteps, = betas.shape - self.num_timesteps = int(timesteps) - self.linear_start = linear_start - self.linear_end = linear_end - assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' - - to_torch = partial(torch.tensor, dtype=torch.float32) - - self.register_buffer('betas', to_torch(betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) - - # calculations for posterior q(x_{t-1} | x_t, x_0) - posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( - 1. - alphas_cumprod) + self.v_posterior * betas - # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) - self.register_buffer('posterior_variance', to_torch(posterior_variance)) - # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain - self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) - self.register_buffer('posterior_mean_coef1', to_torch( - betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) - self.register_buffer('posterior_mean_coef2', to_torch( - (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) - - if self.parameterization == "eps": - lvlb_weights = self.betas ** 2 / ( - 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) - elif self.parameterization == "x0": - lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) - else: - raise NotImplementedError("mu not supported") - # TODO how to choose this term - lvlb_weights[0] = lvlb_weights[1] - self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) - assert not torch.isnan(self.lvlb_weights).all() - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.model.parameters()) - self.model_ema.copy_to(self.model) - if context is not None: - print(f"{context}: Switched to EMA weights") - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.model.parameters()) - if context is not None: - print(f"{context}: Restored training weights") - - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location="cpu") - if "state_dict" in list(sd.keys()): - sd = sd["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( - sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - if len(unexpected) > 0: - print(f"Unexpected Keys: {unexpected}") - - def q_mean_variance(self, x_start, t): - """ - Get the distribution q(x_t | x_0). - :param x_start: the [N x C x ...] tensor of noiseless inputs. - :param t: the number of diffusion steps (minus 1). Here, 0 means one step. - :return: A tuple (mean, variance, log_variance), all of x_start's shape. - """ - mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) - variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) - log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) - return mean, variance, log_variance - - def predict_start_from_noise(self, x_t, t, noise): - return ( - extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise - ) - - def q_posterior(self, x_start, x_t, t): - posterior_mean = ( - extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + - extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t - ) - posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) - posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) - return posterior_mean, posterior_variance, posterior_log_variance_clipped - - def p_mean_variance(self, x, t, clip_denoised: bool): - model_out = self.model(x, t) - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - if clip_denoised: - x_recon.clamp_(-1., 1.) - - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): - b, *_, device = *x.shape, x.device - model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) - noise = noise_like(x.shape, device, repeat_noise) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def p_sample_loop(self, shape, return_intermediates=False): - device = self.betas.device - b = shape[0] - img = torch.randn(shape, device=device) - intermediates = [img] - for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): - img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), - clip_denoised=self.clip_denoised) - if i % self.log_every_t == 0 or i == self.num_timesteps - 1: - intermediates.append(img) - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample(self, batch_size=16, return_intermediates=False): - image_size = self.image_size - channels = self.channels - return self.p_sample_loop((batch_size, channels, image_size, image_size), - return_intermediates=return_intermediates) - - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) - - def get_loss(self, pred, target, mean=True): - if self.loss_type == 'l1': - loss = (target - pred).abs() - if mean: - loss = loss.mean() - elif self.loss_type == 'l2': - if mean: - loss = torch.nn.functional.mse_loss(target, pred) - else: - loss = torch.nn.functional.mse_loss(target, pred, reduction='none') - else: - raise NotImplementedError("unknown loss type '{loss_type}'") - - return loss - - def p_losses(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_out = self.model(x_noisy, t) - - loss_dict = {} - if self.parameterization == "eps": - target = noise - elif self.parameterization == "x0": - target = x_start - else: - raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") - - loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) - - log_prefix = 'train' if self.training else 'val' - - loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) - loss_simple = loss.mean() * self.l_simple_weight - - loss_vlb = (self.lvlb_weights[t] * loss).mean() - loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) - - loss = loss_simple + self.original_elbo_weight * loss_vlb - - loss_dict.update({f'{log_prefix}/loss': loss}) - - return loss, loss_dict - - def forward(self, x, *args, **kwargs): - # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size - # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' - t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() - return self.p_losses(x, t, *args, **kwargs) - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = rearrange(x, 'b h w c -> b c h w') - x = x.to(memory_format=torch.contiguous_format).float() - return x - - def shared_step(self, batch): - x = self.get_input(batch, self.first_stage_key) - loss, loss_dict = self(x) - return loss, loss_dict - - def training_step(self, batch, batch_idx): - loss, loss_dict = self.shared_step(batch) - - self.log_dict(loss_dict, prog_bar=True, - logger=True, on_step=True, on_epoch=True) - - self.log("global_step", self.global_step, - prog_bar=True, logger=True, on_step=True, on_epoch=False) - - if self.use_scheduler: - lr = self.optimizers().param_groups[0]['lr'] - self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) - - return loss - - @torch.no_grad() - def validation_step(self, batch, batch_idx): - _, loss_dict_no_ema = self.shared_step(batch) - with self.ema_scope(): - _, loss_dict_ema = self.shared_step(batch) - loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} - self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) - self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) - - def on_train_batch_end(self, *args, **kwargs): - if self.use_ema: - self.model_ema(self.model) - - def _get_rows_from_list(self, samples): - n_imgs_per_row = len(samples) - denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): - log = dict() - x = self.get_input(batch, self.first_stage_key) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - x = x.to(self.device)[:N] - log["inputs"] = x - - # get diffusion row - diffusion_row = list() - x_start = x[:n_row] - - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(x_start) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - diffusion_row.append(x_noisy) - - log["diffusion_row"] = self._get_rows_from_list(diffusion_row) - - if sample: - # get denoise row - with self.ema_scope("Plotting"): - samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) - - log["samples"] = samples - log["denoise_row"] = self._get_rows_from_list(denoise_row) - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.model.parameters()) - if self.learn_logvar: - params = params + [self.logvar] - opt = torch.optim.AdamW(params, lr=lr) - return opt - - -class LatentDiffusion(DDPM): - """main class""" - def __init__(self, - first_stage_config, - cond_stage_config, - num_timesteps_cond=None, - cond_stage_key="image", - cond_stage_trainable=False, - concat_mode=True, - cond_stage_forward=None, - conditioning_key=None, - scale_factor=1.0, - scale_by_std=False, - *args, **kwargs): - self.num_timesteps_cond = default(num_timesteps_cond, 1) - self.scale_by_std = scale_by_std - assert self.num_timesteps_cond <= kwargs['timesteps'] - # for backwards compatibility after implementation of DiffusionWrapper - if conditioning_key is None: - conditioning_key = 'concat' if concat_mode else 'crossattn' - if cond_stage_config == '__is_unconditional__': - conditioning_key = None - ckpt_path = kwargs.pop("ckpt_path", None) - ignore_keys = kwargs.pop("ignore_keys", []) - super().__init__(conditioning_key=conditioning_key, *args, **kwargs) - self.concat_mode = concat_mode - self.cond_stage_trainable = cond_stage_trainable - self.cond_stage_key = cond_stage_key - try: - self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 - except: - self.num_downs = 0 - if not scale_by_std: - self.scale_factor = scale_factor - else: - self.register_buffer('scale_factor', torch.tensor(scale_factor)) - self.instantiate_first_stage(first_stage_config) - self.instantiate_cond_stage(cond_stage_config) - self.cond_stage_forward = cond_stage_forward - self.clip_denoised = False - self.bbox_tokenizer = None - - self.restarted_from_ckpt = False - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys) - self.restarted_from_ckpt = True - - def make_cond_schedule(self, ): - self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) - ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() - self.cond_ids[:self.num_timesteps_cond] = ids - - @rank_zero_only - @torch.no_grad() - def on_train_batch_start(self, batch, batch_idx, dataloader_idx): - # only for very first batch - if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: - assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' - # set rescale weight to 1./std of encodings - print("### USING STD-RESCALING ###") - x = super().get_input(batch, self.first_stage_key) - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - del self.scale_factor - self.register_buffer('scale_factor', 1. / z.flatten().std()) - print(f"setting self.scale_factor to {self.scale_factor}") - print("### USING STD-RESCALING ###") - - def register_schedule(self, - given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) - - self.shorten_cond_schedule = self.num_timesteps_cond > 1 - if self.shorten_cond_schedule: - self.make_cond_schedule() - - def instantiate_first_stage(self, config): - model = instantiate_from_config(config) - self.first_stage_model = model.eval() - self.first_stage_model.train = disabled_train - for param in self.first_stage_model.parameters(): - param.requires_grad = False - - def instantiate_cond_stage(self, config): - if not self.cond_stage_trainable: - if config == "__is_first_stage__": - print("Using first stage also as cond stage.") - self.cond_stage_model = self.first_stage_model - elif config == "__is_unconditional__": - print(f"Training {self.__class__.__name__} as an unconditional model.") - self.cond_stage_model = None - # self.be_unconditional = True - else: - model = instantiate_from_config(config) - self.cond_stage_model = model.eval() - self.cond_stage_model.train = disabled_train - for param in self.cond_stage_model.parameters(): - param.requires_grad = False - else: - assert config != '__is_first_stage__' - assert config != '__is_unconditional__' - model = instantiate_from_config(config) - self.cond_stage_model = model - - def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): - denoise_row = [] - for zd in tqdm(samples, desc=desc): - denoise_row.append(self.decode_first_stage(zd.to(self.device), - force_not_quantize=force_no_decoder_quantization)) - n_imgs_per_row = len(denoise_row) - denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W - denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - def get_first_stage_encoding(self, encoder_posterior): - if isinstance(encoder_posterior, DiagonalGaussianDistribution): - z = encoder_posterior.sample() - elif isinstance(encoder_posterior, torch.Tensor): - z = encoder_posterior - else: - raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") - return self.scale_factor * z - - def get_learned_conditioning(self, c): - if self.cond_stage_forward is None: - if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): - c = self.cond_stage_model.encode(c) - if isinstance(c, DiagonalGaussianDistribution): - c = c.mode() - else: - c = self.cond_stage_model(c) - else: - assert hasattr(self.cond_stage_model, self.cond_stage_forward) - c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) - return c - - def meshgrid(self, h, w): - y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) - x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) - - arr = torch.cat([y, x], dim=-1) - return arr - - def delta_border(self, h, w): - """ - :param h: height - :param w: width - :return: normalized distance to image border, - wtith min distance = 0 at border and max dist = 0.5 at image center - """ - lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) - arr = self.meshgrid(h, w) / lower_right_corner - dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] - dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] - edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] - return edge_dist - - def get_weighting(self, h, w, Ly, Lx, device): - weighting = self.delta_border(h, w) - weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], - self.split_input_params["clip_max_weight"], ) - weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) - - if self.split_input_params["tie_braker"]: - L_weighting = self.delta_border(Ly, Lx) - L_weighting = torch.clip(L_weighting, - self.split_input_params["clip_min_tie_weight"], - self.split_input_params["clip_max_tie_weight"]) - - L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) - weighting = weighting * L_weighting - return weighting - - def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code - """ - :param x: img of size (bs, c, h, w) - :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) - """ - bs, nc, h, w = x.shape - - # number of crops in image - Ly = (h - kernel_size[0]) // stride[0] + 1 - Lx = (w - kernel_size[1]) // stride[1] + 1 - - if uf == 1 and df == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) - - weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) - - elif uf > 1 and df == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), - dilation=1, padding=0, - stride=(stride[0] * uf, stride[1] * uf)) - fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) - - weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) - - elif df > 1 and uf == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), - dilation=1, padding=0, - stride=(stride[0] // df, stride[1] // df)) - fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) - - weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) - - else: - raise NotImplementedError - - return fold, unfold, normalization, weighting - - @torch.no_grad() - def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, - cond_key=None, return_original_cond=False, bs=None): - x = super().get_input(batch, k) - if bs is not None: - x = x[:bs] - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - - if self.model.conditioning_key is not None: - if cond_key is None: - cond_key = self.cond_stage_key - if cond_key != self.first_stage_key: - if cond_key in ['caption', 'coordinates_bbox']: - xc = batch[cond_key] - elif cond_key == 'class_label': - xc = batch - else: - xc = super().get_input(batch, cond_key).to(self.device) - else: - xc = x - if not self.cond_stage_trainable or force_c_encode: - if isinstance(xc, dict) or isinstance(xc, list): - # import pudb; pudb.set_trace() - c = self.get_learned_conditioning(xc) - else: - c = self.get_learned_conditioning(xc.to(self.device)) - else: - c = xc - if bs is not None: - c = c[:bs] - - if self.use_positional_encodings: - pos_x, pos_y = self.compute_latent_shifts(batch) - ckey = __conditioning_keys__[self.model.conditioning_key] - c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} - - else: - c = None - xc = None - if self.use_positional_encodings: - pos_x, pos_y = self.compute_latent_shifts(batch) - c = {'pos_x': pos_x, 'pos_y': pos_y} - out = [z, c] - if return_first_stage_outputs: - xrec = self.decode_first_stage(z) - out.extend([x, xrec]) - if return_original_cond: - out.append(xc) - return out - - @torch.no_grad() - def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) - z = rearrange(z, 'b h w c -> b c h w').contiguous() - - z = 1. / self.scale_factor * z - - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - uf = self.split_input_params["vqf"] - bs, nc, h, w = z.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) - - z = unfold(z) # (bn, nc * prod(**ks), L) - # 1. Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): - output_list = [self.first_stage_model.decode(z[:, :, :, :, i], - force_not_quantize=predict_cids or force_not_quantize) - for i in range(z.shape[-1])] - else: - - output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) - o = o * weighting - # Reverse 1. reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization # norm is shape (1, 1, h, w) - return decoded - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - # same as above but without decorator - def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) - z = rearrange(z, 'b h w c -> b c h w').contiguous() - - z = 1. / self.scale_factor * z - - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - uf = self.split_input_params["vqf"] - bs, nc, h, w = z.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) - - z = unfold(z) # (bn, nc * prod(**ks), L) - # 1. Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): - output_list = [self.first_stage_model.decode(z[:, :, :, :, i], - force_not_quantize=predict_cids or force_not_quantize) - for i in range(z.shape[-1])] - else: - - output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) - o = o * weighting - # Reverse 1. reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization # norm is shape (1, 1, h, w) - return decoded - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - @torch.no_grad() - def encode_first_stage(self, x): - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - df = self.split_input_params["vqf"] - self.split_input_params['original_image_size'] = x.shape[-2:] - bs, nc, h, w = x.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) - z = unfold(x) # (bn, nc * prod(**ks), L) - # Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) - o = o * weighting - - # Reverse reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization - return decoded - - else: - return self.first_stage_model.encode(x) - else: - return self.first_stage_model.encode(x) - - def shared_step(self, batch, **kwargs): - x, c = self.get_input(batch, self.first_stage_key) - loss = self(x, c) - return loss - - def forward(self, x, c, *args, **kwargs): - t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() - if self.model.conditioning_key is not None: - assert c is not None - if self.cond_stage_trainable: - c = self.get_learned_conditioning(c) - if self.shorten_cond_schedule: # TODO: drop this option - tc = self.cond_ids[t].to(self.device) - c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) - return self.p_losses(x, c, t, *args, **kwargs) - - def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset - def rescale_bbox(bbox): - x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) - y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) - w = min(bbox[2] / crop_coordinates[2], 1 - x0) - h = min(bbox[3] / crop_coordinates[3], 1 - y0) - return x0, y0, w, h - - return [rescale_bbox(b) for b in bboxes] - - def apply_model(self, x_noisy, t, cond, return_ids=False): - - if isinstance(cond, dict): - # hybrid case, cond is exptected to be a dict - pass - else: - if not isinstance(cond, list): - cond = [cond] - key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' - cond = {key: cond} - - if hasattr(self, "split_input_params"): - assert len(cond) == 1 # todo can only deal with one conditioning atm - assert not return_ids - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - - h, w = x_noisy.shape[-2:] - - fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) - - z = unfold(x_noisy) # (bn, nc * prod(**ks), L) - # Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] - - if self.cond_stage_key in ["image", "LR_image", "segmentation", - 'bbox_img'] and self.model.conditioning_key: # todo check for completeness - c_key = next(iter(cond.keys())) # get key - c = next(iter(cond.values())) # get value - assert (len(c) == 1) # todo extend to list with more than one elem - c = c[0] # get element - - c = unfold(c) - c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] - - elif self.cond_stage_key == 'coordinates_bbox': - assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' - - # assuming padding of unfold is always 0 and its dilation is always 1 - n_patches_per_row = int((w - ks[0]) / stride[0] + 1) - full_img_h, full_img_w = self.split_input_params['original_image_size'] - # as we are operating on latents, we need the factor from the original image size to the - # spatial latent size to properly rescale the crops for regenerating the bbox annotations - num_downs = self.first_stage_model.encoder.num_resolutions - 1 - rescale_latent = 2 ** (num_downs) - - # get top left postions of patches as conforming for the bbbox tokenizer, therefore we - # need to rescale the tl patch coordinates to be in between (0,1) - tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, - rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) - for patch_nr in range(z.shape[-1])] - - # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) - patch_limits = [(x_tl, y_tl, - rescale_latent * ks[0] / full_img_w, - rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] - # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] - - # tokenize crop coordinates for the bounding boxes of the respective patches - patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) - for bbox in patch_limits] # list of length l with tensors of shape (1, 2) - print(patch_limits_tknzd[0].shape) - # cut tknzd crop position from conditioning - assert isinstance(cond, dict), 'cond must be dict to be fed into model' - cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) - print(cut_cond.shape) - - adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) - adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') - print(adapted_cond.shape) - adapted_cond = self.get_learned_conditioning(adapted_cond) - print(adapted_cond.shape) - adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) - print(adapted_cond.shape) - - cond_list = [{'c_crossattn': [e]} for e in adapted_cond] - - else: - cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient - - # apply model by loop over crops - output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] - assert not isinstance(output_list[0], - tuple) # todo cant deal with multiple model outputs check this never happens - - o = torch.stack(output_list, axis=-1) - o = o * weighting - # Reverse reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - x_recon = fold(o) / normalization - - else: - x_recon = self.model(x_noisy, t, **cond) - - if isinstance(x_recon, tuple) and not return_ids: - return x_recon[0] - else: - return x_recon - - def _predict_eps_from_xstart(self, x_t, t, pred_xstart): - return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) - - def _prior_bpd(self, x_start): - """ - Get the prior KL term for the variational lower-bound, measured in - bits-per-dim. - This term can't be optimized, as it only depends on the encoder. - :param x_start: the [N x C x ...] tensor of inputs. - :return: a batch of [N] KL values (in bits), one per batch element. - """ - batch_size = x_start.shape[0] - t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) - qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) - kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) - return mean_flat(kl_prior) / np.log(2.0) - - def p_losses(self, x_start, cond, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_output = self.apply_model(x_noisy, t, cond) - - loss_dict = {} - prefix = 'train' if self.training else 'val' - - if self.parameterization == "x0": - target = x_start - elif self.parameterization == "eps": - target = noise - else: - raise NotImplementedError() - - loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) - loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) - - logvar_t = self.logvar[t].to(self.device) - loss = loss_simple / torch.exp(logvar_t) + logvar_t - # loss = loss_simple / torch.exp(self.logvar) + self.logvar - if self.learn_logvar: - loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) - loss_dict.update({'logvar': self.logvar.data.mean()}) - - loss = self.l_simple_weight * loss.mean() - - loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) - loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() - loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) - loss += (self.original_elbo_weight * loss_vlb) - loss_dict.update({f'{prefix}/loss': loss}) - - return loss, loss_dict - - def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, - return_x0=False, score_corrector=None, corrector_kwargs=None): - t_in = t - model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) - - if score_corrector is not None: - assert self.parameterization == "eps" - model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) - - if return_codebook_ids: - model_out, logits = model_out - - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - else: - raise NotImplementedError() - - if clip_denoised: - x_recon.clamp_(-1., 1.) - if quantize_denoised: - x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - if return_codebook_ids: - return model_mean, posterior_variance, posterior_log_variance, logits - elif return_x0: - return model_mean, posterior_variance, posterior_log_variance, x_recon - else: - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, - return_codebook_ids=False, quantize_denoised=False, return_x0=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): - b, *_, device = *x.shape, x.device - outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, - return_codebook_ids=return_codebook_ids, - quantize_denoised=quantize_denoised, - return_x0=return_x0, - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) - if return_codebook_ids: - raise DeprecationWarning("Support dropped.") - model_mean, _, model_log_variance, logits = outputs - elif return_x0: - model_mean, _, model_log_variance, x0 = outputs - else: - model_mean, _, model_log_variance = outputs - - noise = noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - - if return_codebook_ids: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) - if return_x0: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 - else: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, - img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., - score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, - log_every_t=None): - if not log_every_t: - log_every_t = self.log_every_t - timesteps = self.num_timesteps - if batch_size is not None: - b = batch_size if batch_size is not None else shape[0] - shape = [batch_size] + list(shape) - else: - b = batch_size = shape[0] - if x_T is None: - img = torch.randn(shape, device=self.device) - else: - img = x_T - intermediates = [] - if cond is not None: - if isinstance(cond, dict): - cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} - else: - cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', - total=timesteps) if verbose else reversed( - range(0, timesteps)) - if type(temperature) == float: - temperature = [temperature] * timesteps - - for i in iterator: - ts = torch.full((b,), i, device=self.device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img, x0_partial = self.p_sample(img, cond, ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised, return_x0=True, - temperature=temperature[i], noise_dropout=noise_dropout, - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) - if mask is not None: - assert x0 is not None - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(x0_partial) - if callback: callback(i) - if img_callback: img_callback(img, i) - return img, intermediates - - @torch.no_grad() - def p_sample_loop(self, cond, shape, return_intermediates=False, - x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, start_T=None, - log_every_t=None): - - if not log_every_t: - log_every_t = self.log_every_t - device = self.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - intermediates = [img] - if timesteps is None: - timesteps = self.num_timesteps - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( - range(0, timesteps)) - - if mask is not None: - assert x0 is not None - assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match - - for i in iterator: - ts = torch.full((b,), i, device=device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img = self.p_sample(img, cond, ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised) - if mask is not None: - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(img) - if callback: callback(i) - if img_callback: img_callback(img, i) - - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, - verbose=True, timesteps=None, quantize_denoised=False, - mask=None, x0=None, shape=None,**kwargs): - if shape is None: - shape = (batch_size, self.channels, self.image_size, self.image_size) - if cond is not None: - if isinstance(cond, dict): - cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} - else: - cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] - return self.p_sample_loop(cond, - shape, - return_intermediates=return_intermediates, x_T=x_T, - verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, - mask=mask, x0=x0) - - @torch.no_grad() - def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): - - if ddim: - ddim_sampler = DDIMSampler(self) - shape = (self.channels, self.image_size, self.image_size) - samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, - shape,cond,verbose=False,**kwargs) - - else: - samples, intermediates = self.sample(cond=cond, batch_size=batch_size, - return_intermediates=True,**kwargs) - - return samples, intermediates - - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, - quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, - plot_diffusion_rows=True, **kwargs): - - use_ddim = ddim_steps is not None - - log = dict() - z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, - return_first_stage_outputs=True, - force_c_encode=True, - return_original_cond=True, - bs=N) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - log["inputs"] = x - log["reconstruction"] = xrec - if self.model.conditioning_key is not None: - if hasattr(self.cond_stage_model, "decode"): - xc = self.cond_stage_model.decode(c) - log["conditioning"] = xc - elif self.cond_stage_key in ["caption"]: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) - log["conditioning"] = xc - elif self.cond_stage_key == 'class_label': - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) - log['conditioning'] = xc - elif isimage(xc): - log["conditioning"] = xc - if ismap(xc): - log["original_conditioning"] = self.to_rgb(xc) - - if plot_diffusion_rows: - # get diffusion row - diffusion_row = list() - z_start = z[:n_row] - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(z_start) - z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) - diffusion_row.append(self.decode_first_stage(z_noisy)) - - diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') - diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) - log["diffusion_row"] = diffusion_grid - - if sample: - # get denoise row - with self.ema_scope("Plotting"): - samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, - ddim_steps=ddim_steps,eta=ddim_eta) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - if plot_denoise_rows: - denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log["denoise_row"] = denoise_grid - - if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( - self.first_stage_model, IdentityFirstStage): - # also display when quantizing x0 while sampling - with self.ema_scope("Plotting Quantized Denoised"): - samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, - ddim_steps=ddim_steps,eta=ddim_eta, - quantize_denoised=True) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, - # quantize_denoised=True) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_x0_quantized"] = x_samples - - if inpaint: - # make a simple center square - b, h, w = z.shape[0], z.shape[2], z.shape[3] - mask = torch.ones(N, h, w).to(self.device) - # zeros will be filled in - mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. - mask = mask[:, None, ...] - with self.ema_scope("Plotting Inpaint"): - - samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, - ddim_steps=ddim_steps, x0=z[:N], mask=mask) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_inpainting"] = x_samples - log["mask"] = mask - - # outpaint - with self.ema_scope("Plotting Outpaint"): - samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, - ddim_steps=ddim_steps, x0=z[:N], mask=mask) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_outpainting"] = x_samples - - if plot_progressive_rows: - with self.ema_scope("Plotting Progressives"): - img, progressives = self.progressive_denoising(c, - shape=(self.channels, self.image_size, self.image_size), - batch_size=N) - prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") - log["progressive_row"] = prog_row - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.model.parameters()) - if self.cond_stage_trainable: - print(f"{self.__class__.__name__}: Also optimizing conditioner params!") - params = params + list(self.cond_stage_model.parameters()) - if self.learn_logvar: - print('Diffusion model optimizing logvar') - params.append(self.logvar) - opt = torch.optim.AdamW(params, lr=lr) - if self.use_scheduler: - assert 'target' in self.scheduler_config - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }] - return [opt], scheduler - return opt - - @torch.no_grad() - def to_rgb(self, x): - x = x.float() - if not hasattr(self, "colorize"): - self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) - x = nn.functional.conv2d(x, weight=self.colorize) - x = 2. * (x - x.min()) / (x.max() - x.min()) - 1. - return x - - -class DiffusionWrapper(pl.LightningModule): - def __init__(self, diff_model_config, conditioning_key): - super().__init__() - self.diffusion_model = instantiate_from_config(diff_model_config) - self.conditioning_key = conditioning_key - assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm'] - - def forward(self, x, t, c_concat: list = None, c_crossattn: list = None): - if self.conditioning_key is None: - out = self.diffusion_model(x, t) - elif self.conditioning_key == 'concat': - xc = torch.cat([x] + c_concat, dim=1) - out = self.diffusion_model(xc, t) - elif self.conditioning_key == 'crossattn': - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(x, t, context=cc) - elif self.conditioning_key == 'hybrid': - xc = torch.cat([x] + c_concat, dim=1) - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(xc, t, context=cc) - elif self.conditioning_key == 'adm': - cc = c_crossattn[0] - out = self.diffusion_model(x, t, y=cc) - else: - raise NotImplementedError() - - return out - - -class Layout2ImgDiffusion(LatentDiffusion): - # TODO: move all layout-specific hacks to this class - def __init__(self, cond_stage_key, *args, **kwargs): - assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' - super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) - - def log_images(self, batch, N=8, *args, **kwargs): - logs = super().log_images(batch=batch, N=N, *args, **kwargs) - - key = 'train' if self.training else 'validation' - dset = self.trainer.datamodule.datasets[key] - mapper = dset.conditional_builders[self.cond_stage_key] - - bbox_imgs = [] - map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno)) - for tknzd_bbox in batch[self.cond_stage_key][:N]: - bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256)) - bbox_imgs.append(bboximg) - - cond_img = torch.stack(bbox_imgs, dim=0) - logs['bbox_image'] = cond_img - return logs diff --git a/spaces/jonathanli/youtube-sponsor-detection/README.md b/spaces/jonathanli/youtube-sponsor-detection/README.md deleted file mode 100644 index 19198e1e5bf05b550a3f8b7d3bd2d47858724b5e..0000000000000000000000000000000000000000 --- a/spaces/jonathanli/youtube-sponsor-detection/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Youtube Sponsor Detection -emoji: 🚫 -colorFrom: green -colorTo: yellow -sdk: gradio -sdk_version: 3.1.7 -app_file: app.py -pinned: false -license: mit ---- - -Block youtube sponsors with deep learning. \ No newline at end of file diff --git a/spaces/juanhuggingface/ChuanhuChatGPT_Beta/modules/webui_locale.py b/spaces/juanhuggingface/ChuanhuChatGPT_Beta/modules/webui_locale.py deleted file mode 100644 index 1ce4d97b9b41cbb2d9be3fdadc4c85f6ef897604..0000000000000000000000000000000000000000 --- a/spaces/juanhuggingface/ChuanhuChatGPT_Beta/modules/webui_locale.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import locale -import commentjson as json - -class I18nAuto: - def __init__(self): - if os.path.exists("config.json"): - with open("config.json", "r", encoding='utf-8') as f: - config = json.load(f) - else: - config = {} - lang_config = config.get("language", "auto") - language = os.environ.get("LANGUAGE", lang_config) - if language == "auto": - language = locale.getdefaultlocale()[0] # get the language code of the system (ex. zh_CN) - self.language_map = {} - self.file_is_exists = os.path.isfile(f"./locale/{language}.json") - if self.file_is_exists: - with open(f"./locale/{language}.json", "r", encoding="utf-8") as f: - self.language_map.update(json.load(f)) - - def __call__(self, key): - if self.file_is_exists and key in self.language_map: - return self.language_map[key] - else: - return key diff --git a/spaces/katielink/brain_tumor_segmentation/app.py b/spaces/katielink/brain_tumor_segmentation/app.py deleted file mode 100644 index 4b073e923696146f24f926b8e5ad50be24a9c736..0000000000000000000000000000000000000000 --- a/spaces/katielink/brain_tumor_segmentation/app.py +++ /dev/null @@ -1,167 +0,0 @@ -import os -import gradio as gr -import torch -from monai import bundle -from monai.transforms import ( - Compose, - LoadImaged, - EnsureChannelFirstd, - Orientationd, - NormalizeIntensityd, - Activationsd, - AsDiscreted, - ScaleIntensityd, -) - -# Define the bundle name and path for downloading -BUNDLE_NAME = 'spleen_ct_segmentation_v0.1.0' -BUNDLE_PATH = os.path.join(torch.hub.get_dir(), 'bundle', BUNDLE_NAME) - -# Title and description -title = '

              Segment Brain Tumors with MONAI! 🧠

              ' -description = """ -## 🚀 To run - -Upload a brain MRI image file, or try out one of the examples below! -If you want to see a different slice, update the slider. - -More details on the model can be found [here!](https://huggingface.co/katielink/brats_mri_segmentation_v0.1.0) - -## ⚠️ Disclaimer - -This is an example, not to be used for diagnostic purposes. -""" - -references = """ -## 👀 References - -1. Myronenko, Andriy. "3D MRI brain tumor segmentation using autoencoder regularization." International MICCAI Brainlesion Workshop. Springer, Cham, 2018. https://arxiv.org/abs/1810.11654. -2. Menze BH, et al. "The Multimodal Brain Tumor Image Segmentation Benchmark (BRATS)", IEEE Transactions on Medical Imaging 34(10), 1993-2024 (2015) DOI: 10.1109/TMI.2014.2377694 -3. Bakas S, et al. "Advancing The Cancer Genome Atlas glioma MRI collections with expert segmentation labels and radiomic features", Nature Scientific Data, 4:170117 (2017) DOI:10.1038/sdata.2017.117 -""" - -examples = [ - ['examples/BRATS_485.nii.gz', 65], - ['examples/BRATS_486.nii.gz', 80] -] - -# Load the MONAI pretrained model from Hugging Face Hub -model, _, _ = bundle.load( - name = BUNDLE_NAME, - source = 'huggingface_hub', - repo = 'katielink/brats_mri_segmentation_v0.1.0', - load_ts_module=True, -) - -# Use GPU if available -device = "cuda:0" if torch.cuda.is_available() else "cpu" - -# Load the parser from the MONAI bundle's inference config -parser = bundle.load_bundle_config(BUNDLE_PATH, 'inference.json') - -# Compose the preprocessing transforms -preproc_transforms = Compose( - [ - LoadImaged(keys=["image"]), - EnsureChannelFirstd(keys="image"), - Orientationd(keys=["image"], axcodes="RAS"), - NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True), - ] -) - -# Get the inferer from the bundle's inference config -inferer = parser.get_parsed_content( - 'inferer', - lazy=True, eval_expr=True, instantiate=True -) - -# Compose the postprocessing transforms -post_transforms = Compose( - [ - Activationsd(keys='pred', sigmoid=True), - AsDiscreted(keys='pred', threshold=0.5), - ScaleIntensityd(keys='image', minv=0., maxv=1.) - ] -) - - -# Define the predict function for the demo -def predict(input_file, z_axis, model=model, device=device): - # Load and process data in MONAI format - data = {'image': [input_file.name]} - data = preproc_transforms(data) - - # Run inference and post-process predicted labels - model.to(device) - model.eval() - with torch.no_grad(): - inputs = data['image'].to(device) - data['pred'] = inferer(inputs=inputs[None,...], network=model) - data = post_transforms(data) - - # Convert tensors back to numpy arrays - data['image'] = data['image'].numpy() - data['pred'] = data['pred'].cpu().detach().numpy() - - # Magnetic resonance imaging sequences - t1c = data['image'][0, :, :, z_axis] # T1-weighted, post contrast - t1 = data['image'][1, :, :, z_axis] # T1-weighted, pre contrast - t2 = data['image'][2, :, :, z_axis] # T2-weighted - flair = data['image'][3, :, :, z_axis] # FLAIR - - # BraTS labels - tc = data['pred'][0, 0, :, :, z_axis] # Tumor core - wt = data['pred'][0, 1, :, :, z_axis] # Whole tumor - et = data['pred'][0, 2, :, :, z_axis] # Enhancing tumor - - return [t1c, t1, t2, flair], [tc, wt, et] - - -# Use blocks to set up a more complex demo -with gr.Blocks() as demo: - - # Show title and description - gr.Markdown(title) - gr.Markdown(description) - - with gr.Row(): - # Get the input file and slice slider as inputs - input_file = gr.File(label='input file') - z_axis = gr.Slider(0, 200, label='slice', value=50) - - with gr.Row(): - # Show the button with custom label - button = gr.Button("Segment Tumor!") - - with gr.Row(): - with gr.Column(): - # Show the input image with different MR sequences - input_image = gr.Gallery(label='input MRI sequences (T1+, T1, T2, FLAIR)') - - with gr.Column(): - # Show the segmentation labels - output_segmentation = gr.Gallery(label='output segmentations (TC, WT, ET)') - - - # Run prediction on button click - button.click( - predict, - inputs=[input_file, z_axis], - outputs=[input_image, output_segmentation] - ) - - # Have some example for the user to try out - examples = gr.Examples( - examples=examples, - inputs=[input_file, z_axis], - outputs=[input_image, output_segmentation], - fn=predict, - cache_examples=False - ) - - # Show references at the bottom of the demo - gr.Markdown(references) - - -# Launch the demo -demo.launch() diff --git a/spaces/kboaten/MIDI-Audio-Extension/MIDI-song-extender/musicautobot/music_transformer/learner.py b/spaces/kboaten/MIDI-Audio-Extension/MIDI-song-extender/musicautobot/music_transformer/learner.py deleted file mode 100644 index 49efebd8ebc173c453ef0ae5b1a82f25ca04dfa2..0000000000000000000000000000000000000000 --- a/spaces/kboaten/MIDI-Audio-Extension/MIDI-song-extender/musicautobot/music_transformer/learner.py +++ /dev/null @@ -1,171 +0,0 @@ -from fastai.basics import * -from fastai.text.learner import LanguageLearner, get_language_model, _model_meta -from .model import * -from .transform import MusicItem -from ..numpy_encode import SAMPLE_FREQ -from ..utils.top_k_top_p import top_k_top_p -from ..utils.midifile import is_empty_midi - -_model_meta[MusicTransformerXL] = _model_meta[TransformerXL] # copy over fastai's model metadata - -def music_model_learner(data:DataBunch, arch=MusicTransformerXL, config:dict=None, drop_mult:float=1., - pretrained_path:PathOrStr=None, **learn_kwargs) -> 'LanguageLearner': - "Create a `Learner` with a language model from `data` and `arch`." - meta = _model_meta[arch] - - if pretrained_path: - state = torch.load(pretrained_path, map_location='cpu') - if config is None: config = state['config'] - - model = get_language_model(arch, len(data.vocab.itos), config=config, drop_mult=drop_mult) - learn = MusicLearner(data, model, split_func=meta['split_lm'], **learn_kwargs) - - if pretrained_path: - get_model(model).load_state_dict(state['model'], strict=False) - if not hasattr(learn, 'opt'): learn.create_opt(defaults.lr, learn.wd) - try: learn.opt.load_state_dict(state['opt']) - except: pass - del state - gc.collect() - - return learn - -# Predictions -from fastai import basic_train # for predictions -class MusicLearner(LanguageLearner): - def save(self, file:PathLikeOrBinaryStream=None, with_opt:bool=True, config=None): - "Save model and optimizer state (if `with_opt`) with `file` to `self.model_dir`. `file` can be file-like (file or buffer)" - out_path = super().save(file, return_path=True, with_opt=with_opt) - if config and out_path: - state = torch.load(out_path) - state['config'] = config - torch.save(state, out_path) - del state - gc.collect() - return out_path - - def beam_search(self, xb:Tensor, n_words:int, top_k:int=10, beam_sz:int=10, temperature:float=1., - ): - "Return the `n_words` that come after `text` using beam search." - self.model.reset() - self.model.eval() - xb_length = xb.shape[-1] - if xb.shape[0] > 1: xb = xb[0][None] - yb = torch.ones_like(xb) - - nodes = None - xb = xb.repeat(top_k, 1) - nodes = xb.clone() - scores = xb.new_zeros(1).float() - with torch.no_grad(): - for k in progress_bar(range(n_words), leave=False): - out = F.log_softmax(self.model(xb)[0][:,-1], dim=-1) - values, indices = out.topk(top_k, dim=-1) - scores = (-values + scores[:,None]).view(-1) - indices_idx = torch.arange(0,nodes.size(0))[:,None].expand(nodes.size(0), top_k).contiguous().view(-1) - sort_idx = scores.argsort()[:beam_sz] - scores = scores[sort_idx] - nodes = torch.cat([nodes[:,None].expand(nodes.size(0),top_k,nodes.size(1)), - indices[:,:,None].expand(nodes.size(0),top_k,1),], dim=2) - nodes = nodes.view(-1, nodes.size(2))[sort_idx] - self.model[0].select_hidden(indices_idx[sort_idx]) - xb = nodes[:,-1][:,None] - if temperature != 1.: scores.div_(temperature) - node_idx = torch.multinomial(torch.exp(-scores), 1).item() - return [i.item() for i in nodes[node_idx][xb_length:] ] - - def predict(self, item:MusicItem, n_words:int=128, - temperatures:float=(1.0,1.0), min_bars=4, - top_k=30, top_p=0.6): - "Return the `n_words` that come after `text`." - self.model.reset() - new_idx = [] - vocab = self.data.vocab - x, pos = item.to_tensor(), item.get_pos_tensor() - last_pos = pos[-1] if len(pos) else 0 - y = torch.tensor([0]) - - start_pos = last_pos - - sep_count = 0 - bar_len = SAMPLE_FREQ * 4 # assuming 4/4 time - vocab = self.data.vocab - - repeat_count = 0 - if hasattr(self.model[0], 'encode_position'): - encode_position = self.model[0].encode_position - else: encode_position = False - - for i in progress_bar(range(n_words), leave=True): - with torch.no_grad(): - if encode_position: - batch = { 'x': x[None], 'pos': pos[None] } - logits = self.model(batch)[0][-1][-1] - else: - logits = self.model(x[None])[0][-1][-1] - - prev_idx = new_idx[-1] if len(new_idx) else vocab.pad_idx - - # Temperature - # Use first temperatures value if last prediction was duration - temperature = temperatures[0] if vocab.is_duration_or_pad(prev_idx) else temperatures[1] - repeat_penalty = max(0, np.log((repeat_count+1)/4)/5) * temperature - temperature += repeat_penalty - if temperature != 1.: logits = logits / temperature - - - # Filter - # bar = 16 beats - filter_value = -float('Inf') - if ((last_pos - start_pos) // 16) <= min_bars: logits[vocab.bos_idx] = filter_value - - logits = filter_invalid_indexes(logits, prev_idx, vocab, filter_value=filter_value) - logits = top_k_top_p(logits, top_k=top_k, top_p=top_p, filter_value=filter_value) - - # Sample - probs = F.softmax(logits, dim=-1) - idx = torch.multinomial(probs, 1).item() - - # Update repeat count - num_choices = len(probs.nonzero().view(-1)) - if num_choices <= 2: repeat_count += 1 - else: repeat_count = repeat_count // 2 - - if prev_idx==vocab.sep_idx: - duration = idx - vocab.dur_range[0] - last_pos = last_pos + duration - - bars_pred = (last_pos - start_pos) // 16 - abs_bar = last_pos // 16 - # if (bars % 8 == 0) and (bars_pred > min_bars): break - if (i / n_words > 0.80) and (abs_bar % 4 == 0): break - - - if idx==vocab.bos_idx: - print('Predicted BOS token. Returning prediction...') - break - - new_idx.append(idx) - x = x.new_tensor([idx]) - pos = pos.new_tensor([last_pos]) - - pred = vocab.to_music_item(np.array(new_idx)) - full = item.append(pred) - return pred, full - -# High level prediction functions from midi file -def predict_from_midi(learn, midi=None, n_words=400, - temperatures=(1.0,1.0), top_k=30, top_p=0.6, seed_len=None, **kwargs): - vocab = learn.data.vocab - seed = MusicItem.from_file(midi, vocab) if not is_empty_midi(midi) else MusicItem.empty(vocab) - if seed_len is not None: seed = seed.trim_to_beat(seed_len) - - pred, full = learn.predict(seed, n_words=n_words, temperatures=temperatures, top_k=top_k, top_p=top_p, **kwargs) - return full - -def filter_invalid_indexes(res, prev_idx, vocab, filter_value=-float('Inf')): - if vocab.is_duration_or_pad(prev_idx): - res[list(range(*vocab.dur_range))] = filter_value - else: - res[list(range(*vocab.note_range))] = filter_value - return res diff --git a/spaces/keanteng/job/app.py b/spaces/keanteng/job/app.py deleted file mode 100644 index 439d5b7c9211a0f61cddeca49b80c1a537702c7a..0000000000000000000000000000000000000000 --- a/spaces/keanteng/job/app.py +++ /dev/null @@ -1,1060 +0,0 @@ -""" -MIT License - -Copyright (c) 2023 Khor Kean Teng, Ang Zhi Nuo, Connie Hui Kang Yi, Ling Sing Cheng, Tan Yu Jing - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -""" - -# load packages -import pandas as pd -import numpy as np -import streamlit as st -import geopandas as gpd -from backend.functions import * -try: - from backend.configs import * -except ImportError: - pass -import matplotlib.pyplot as plt -from datetime import * -import google.generativeai as palm - -# website settings -# turn off the side bar by default -st.set_page_config(layout="wide", initial_sidebar_state="auto") - -## customize the side bar -st.sidebar.title("🌍 Geo-Sustainable Jobs Solution") -st.sidebar.caption("A Job Solution Prototype") - -# user input -with st.sidebar: - with st.expander("PaLM-2 API Configuration", expanded=True): - st.caption(":red[You must enable API and input your own API key when using Streamlit web]") - web_toggle = st.toggle('Enable API') - api_input = st.text_input("Your Google API Token", type = "password", placeholder="Enter your API key here") - st.caption("You can get your API key from https://developers.generativeai.google/") - - location_input = st.text_input( - "Enter a location (Malaysia Only):", "University Malaya" - ) - state_input = st.selectbox( - "Select a state:", - [ - "W.P Kuala Lumpur", - "Johor", - "Kedah", - "Kelantan", - "Melaka", - "Negeri Sembilan", - "Pahang", - "Perak", - "Perlis", - "Pulau Pinang", - "Sabah", - "Sarawak", - "Selangor", - "Terengganu", - "W.P Labuan", - "W.P Putrajaya", - ], - ) - # add exapander - with st.expander("Your Job Profile", expanded=False): - user_skills = st.text_input( - "Enter Your Skills, Desired Sector & Qualifications:", - "English, Leadership, Problem Solving, Malay, Program Planning", - ) - user_qualification = st.selectbox( - "Enter Your Qualification:", - ( - "1-Skill Certificate 1", - "2-Skill Certificate 2", - "3-Skill Certificate 3", - "4-Diploma", - "5-Advanced Diploma", - "6-Bachelor Degree", - "7-Master Degree", - "8-Doctorate Degree", - ), - ) - user_sector = st.text_input("Enter Your Desired Sector:", "Teacher") - - submit = st.button("Compute", type="primary") - st.subheader("📈 OpenDOSM Statistics") - st.caption("Source: Labour Market Review, 2023 Q2") - -# setup the layout with columns -col1, col2 = st.columns(spec=[0.7, 0.3], gap="small") - -# load data -df = gpd.read_file("data/shapefile/polbnda_mys.shp") -jobdata = pd.read_csv("data/job_posting_cleaned.csv") -dosm_supply = pd.ExcelFile("data/dosm_supply.xlsx") -dosm_demand = pd.ExcelFile("data/dosm_demand.xlsx") -airports = gpd.read_file("data/airports/hotosm_mys_airports_polygons.shp") -edu_fac = gpd.read_file( - "data/edu_facilities/hotosm_mys_education_facilities_polygons.shp" -) -financial = gpd.read_file( - "data/financial_service/hotosm_mys_financial_services_polygons.shp" -) -health = gpd.read_file( - "data/health_facilities/hotosm_mys_health_facilities_polygons.shp" -) -point_of_interest = gpd.read_file( - "data/point_of_interest/hotosm_mys_points_of_interest_polygons.shp" -) -seaports = gpd.read_file("data/sea_ports/hotosm_mys_sea_ports_polygons.shp") -qualification_data = pd.read_excel("data/qualification level.xlsx") -sectors_data = pd.read_excel("data/skill by sector.xlsx") -course_suggestions_data = pd.read_excel("data/course suggestion.xlsx") -vacancy_rate = pd.read_csv("data/Vacancy Rate.csv", index_col=[0], parse_dates=[0]) -unemployment_rate = pd.read_csv("data/Unemployment Rate.csv") - -# configure geodataframe -airports = gpd.GeoDataFrame(airports, geometry=airports.geometry, crs="EPSG:4326") -edu_fac = gpd.GeoDataFrame(edu_fac, geometry=edu_fac.geometry, crs="EPSG:4326") -financial = gpd.GeoDataFrame(financial, geometry=financial.geometry, crs="EPSG:4326") -health = gpd.GeoDataFrame(health, geometry=health.geometry, crs="EPSG:4326") -point_of_interest = gpd.GeoDataFrame( - point_of_interest, geometry=point_of_interest.geometry, crs="EPSG:4326" -) -seaports = gpd.GeoDataFrame(seaports, geometry=seaports.geometry, crs="EPSG:4326") - -# tidy up the data -airports = airports[["name", "aeroway", "geometry"]] -edu_fac = edu_fac[["name", "amenity", "geometry"]] -financial = financial[["name", "amenity", "geometry"]] -health = health[["healthcare", "name", "amenity", "geometry"]] -point_of_interest = point_of_interest[ - ["tourism", "name", "amenity", "shop", "geometry"] -] -seaports = seaports[["name", "amenity", "geometry"]] -vacancy_rate = vacancy_rate.dropna(axis=1, how="all") -vacancy_rate.dropna(inplace=True) -unemployment_rate = unemployment_rate.dropna(axis=1, how="all") - -# change the type of vacancy_rate_employment as data frame -unemployment_rate = pd.DataFrame(unemployment_rate) -unemployment_rate.iloc[:, 1:] = unemployment_rate.iloc[:, 1:].astype(float) -unemployment_rate.iloc[:, 0] = pd.to_datetime(unemployment_rate.iloc[:, 0]) -unemployment_rate = unemployment_rate.set_index("Date") - -# temp map display -placeholder = st.empty() -with placeholder.container(): - m = leafmap.Map(center=[3.8, 101.4], zoom=7, google_map="HYBRID") - m.to_streamlit(height=700) - - -############################## -# forecast engine -def dateofforecast(data): - forecast_date = [ - "2023-04-01", - "2023-07-01", - "2023-10-01", - "2024-01-01", - "2024-04-01", - "2024-07-01", - "2024-10-01", - "2025-01-01", - ] - forecast_date = pd.to_datetime(forecast_date) - data.index = forecast_date - - -# ETS prediction -VETS_Kedah = ets_fore_a(vacancy_rate["Kedah"], 6) -VETS_Melaka = ets_fore_a(vacancy_rate["Melaka"], 6) -VETS_Negeri_Sembilan = ets_fore_c(vacancy_rate["Negeri Sembilan"], 6) -VETS_Pahang = ets_fore_a(vacancy_rate["Pahang"], 6) -VETS_Perlis = ets_fore_c(vacancy_rate["Perlis"], 6) -VETS_Terengganu = ets_fore_a(vacancy_rate["Terengganu"], 6) -VETS_Sabah = ets_fore_a(vacancy_rate["Sabah"], 6) -VETS_Sarawak = ets_fore_a(vacancy_rate["Sarawak"], 6) -VETS_Putrajaya = ets_fore_a(vacancy_rate["W.P Putrajaya"], 6) - -# ARIMA prediction -VETS_Johor = ARIMA_fore(vacancy_rate["Johor"]) -VETS_Kelantan = ARIMA_fore(vacancy_rate["Kelantan"]) -VETS_Penang = ARIMA_fore(vacancy_rate["Pulau Pinang"]) -VETS_Perak = ARIMA_fore(vacancy_rate["Perak"]) -VETS_Selangor = ARIMA_fore(vacancy_rate["Selangor"]) -VETS_Kuala_Lumpur = ARIMA_fore(vacancy_rate["W.P Kuala Lumpur"]) -VETS_Labuan = ARIMA_fore(vacancy_rate["W.P Labuan"]) -VETS_Total = ARIMA_fore(vacancy_rate["Total"]) - -# Change the Starting date of the forecast -dateofforecast(VETS_Kedah) -dateofforecast(VETS_Melaka) -dateofforecast(VETS_Negeri_Sembilan) -dateofforecast(VETS_Pahang) -dateofforecast(VETS_Perlis) -dateofforecast(VETS_Terengganu) -dateofforecast(VETS_Sabah) -dateofforecast(VETS_Sarawak) -dateofforecast(VETS_Putrajaya) -dateofforecast(VETS_Johor) -dateofforecast(VETS_Kelantan) -dateofforecast(VETS_Penang) -dateofforecast(VETS_Perak) -dateofforecast(VETS_Selangor) -dateofforecast(VETS_Kuala_Lumpur) -dateofforecast(VETS_Labuan) -dateofforecast(VETS_Total) - -# ETS Model -Johor_vpred = ets_fore_a(unemployment_rate[["Johor"]], 6) -Kedah_pred = ets_fore_d(unemployment_rate[["Kedah"]], 6) -Kelantan_pred = ets_fore_d(unemployment_rate[["Kelantan"]], 6) -Melaka_pred = ets_fore_b(unemployment_rate[["Melaka"]], 6) -Pahang_pred = ets_fore_c(unemployment_rate[["Pahang"]], 6) -Perak_pred = ets_fore_c(unemployment_rate[["Perak"]], 6) -Perlis_pred = ets_fore_c(unemployment_rate[["Perlis"]], 6) -Sabah_pred = ets_fore_a(unemployment_rate[["Sabah"]], 6) -Sarawak_pred = ets_fore_d(unemployment_rate[["Sarawak"]], 6) -Selangor_pred = ets_fore_d(unemployment_rate[["Selangor"]], 6) -Terengganu_pred = ets_fore_b(unemployment_rate[["Terengganu"]], 6) -Labuan_pred = ets_fore_d(unemployment_rate["W.P Labuan"], 6) - -# ARIMA Model -UARIMA_N9_pred = ARIMA_fore(unemployment_rate[["Negeri Sembilan"]]) -UARIMA_Penang_pred = ARIMA_fore(unemployment_rate[["Pulau Pinang"]]) -UARIMA_KL_pred = ARIMA_fore(unemployment_rate[["W.P Kuala Lumpur"]]) -UARIMA_Putrajaya_pred = ARIMA_fore(unemployment_rate[["W.P Putrajaya"]]) -UARIMA_Malaysia_pred = ARIMA_fore(unemployment_rate[["Total"]]) - -# Change the Starting date of the forecast -dateofforecast(Johor_vpred) -dateofforecast(Kedah_pred) -dateofforecast(Kelantan_pred) -dateofforecast(Melaka_pred) -dateofforecast(Pahang_pred) -dateofforecast(Perak_pred) -dateofforecast(Perlis_pred) -dateofforecast(Sabah_pred) -dateofforecast(Sarawak_pred) -dateofforecast(Selangor_pred) -dateofforecast(Terengganu_pred) -dateofforecast(Labuan_pred) -dateofforecast(UARIMA_N9_pred) -dateofforecast(UARIMA_Penang_pred) -dateofforecast(UARIMA_KL_pred) -dateofforecast(UARIMA_Putrajaya_pred) -dateofforecast(UARIMA_Malaysia_pred) - -vacancy_rate.dateformat = vacancy_rate.index.strftime("%Y-%m-%d") -unemployment_rate.dateformat = unemployment_rate.index.strftime("%Y-%d-%m") -plt.rcParams["figure.figsize"] = (20, 12) - - -def forecast_plot(state): - state = state.title() - if state == "Kedah": - fig, ax1 = plt.subplots() - plt.figure(figsize=(12, 8)) - - ax1.plot(vacancy_rate.index, vacancy_rate["Kedah"], label="Vacancy Rate") - ax1.plot(VETS_Kedah, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot( - unemployment_rate.index, - unemployment_rate["Kedah"], - "r", - label="Unemployment Rate", - ) - ax2.plot(Kedah_pred, "b", label="Forecast of Unemployment Rate") - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - elif state == "Johor": - fig, ax1 = plt.subplots() - plt.figure(figsize=(12, 8)) - - ax1.plot(vacancy_rate.index, vacancy_rate["Johor"], label="Vacancy Rate") - ax1.plot(VETS_Johor, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot( - unemployment_rate.index, - unemployment_rate["Johor"], - "r", - label="Unemployment Rate", - ) - ax2.plot(Johor_vpred, "b", label="Forecast of Unemployment Rate") - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - elif state == "Kelantan": - fig, ax1 = plt.subplots() - plt.figure(figsize=(12, 8)) - - ax1.plot(vacancy_rate.index, vacancy_rate["Kelantan"], label="Vacancy Rate") - ax1.plot(VETS_Kelantan, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot( - unemployment_rate.index, - unemployment_rate["Kelantan"], - "r", - label="Unemployment Rate", - ) - ax2.plot(Kelantan_pred, "b", label="Forecast of Unemployment Rate") - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - elif state == "Melaka": - fig, ax1 = plt.subplots() - plt.figure(figsize=(12, 8)) - - ax1.plot(vacancy_rate.index, vacancy_rate["Melaka"], label="Vacancy Rate") - ax1.plot(VETS_Melaka, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot( - unemployment_rate.index, - unemployment_rate["Melaka"], - "r", - label="Unemployment Rate", - ) - ax2.plot(Melaka_pred, "b", label="Forecast of Unemployment Rate") - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - elif state == "Negeri Sembilan": - fig, ax1 = plt.subplots() - plt.figure(figsize=(12, 8)) - - ax1.plot( - vacancy_rate.index, vacancy_rate["Negeri Sembilan"], label="Vacancy Rate" - ) - ax1.plot(VETS_Negeri_Sembilan, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot( - unemployment_rate.index, - unemployment_rate["Negeri Sembilan"], - "r", - label="Unemployment Rate", - ) - ax2.plot(UARIMA_N9_pred, "b", label="Forecast of Unemployment Rate") - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - elif state == "Pahang": - fig, ax1 = plt.subplots() - plt.figure(figsize=(12, 8)) - - ax1.plot(vacancy_rate.index, vacancy_rate["Pahang"], label="Vacancy Rate") - ax1.plot(VETS_Pahang, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot( - unemployment_rate.index, - unemployment_rate["Pahang"], - "r", - label="Unemployment Rate", - ) - ax2.plot(Pahang_pred, "b", label="Forecast of Unemployment Rate") - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - elif state == "Penang": - fig, ax1 = plt.subplots() - plt.figure(figsize=(12, 8)) - - ax1.plot(vacancy_rate.index, vacancy_rate["Pulau Pinang"], label="Vacancy Rate") - ax1.plot(VETS_Penang, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot( - unemployment_rate.index, - unemployment_rate["Pulau Pinang"], - "r", - label="Unemployment Rate", - ) - ax2.plot(UARIMA_Penang_pred, "b", label="Forecast of Unemployment Rate") - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - elif state == "Perak": - fig, ax1 = plt.subplots() - plt.figure(figsize=(12, 8)) - - ax1.plot(vacancy_rate.index, vacancy_rate["Perak"], label="Vacancy Rate") - ax1.plot(VETS_Perak, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot( - unemployment_rate.index, - unemployment_rate["Perak"], - "r", - label="Unemployment Rate", - ) - ax2.plot(Perak_pred, "b", label="Forecast of Unemployment Rate") - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - elif state == "Perlis": - fig, ax1 = plt.subplots() - plt.figure(figsize=(12, 8)) - - ax1.plot(vacancy_rate.index, vacancy_rate["Perlis"], label="Vacancy Rate") - ax1.plot(VETS_Perlis, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot(Perlis_pred, "b", label="Forecast of Unemployment Rate") - ax2.plot( - unemployment_rate.index, - unemployment_rate["Perlis"], - "r", - label="Unemployment Rate", - ) - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - elif state == "Sabah": - fig, ax1 = plt.subplots() - plt.figure(figsize=(12, 8)) - - ax1.plot(vacancy_rate.index, vacancy_rate["Sabah"], label="Vacancy Rate") - ax1.plot(VETS_Sabah, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot(Sabah_pred, "b", label="Forecast of Unemployment Rate") - ax2.plot( - unemployment_rate.index, - unemployment_rate["Sabah"], - "r", - label="Unemployment Rate", - ) - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - elif state == "Sarawak": - fig, ax1 = plt.subplots() - plt.figure(figsize=(12, 8)) - - ax1.plot(vacancy_rate.index, vacancy_rate["Sarawak"], label="Vacancy Rate") - ax1.plot(VETS_Sarawak, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot(Sarawak_pred, "b", label="Forecast of Unemployment Rate") - ax2.plot( - unemployment_rate.index, - unemployment_rate["Sarawak"], - "r", - label="Unemployment Rate", - ) - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - elif state == "Selangor": - fig, ax1 = plt.subplots() - plt.figure(figsize=(12, 8)) - - ax1.plot(vacancy_rate.index, vacancy_rate["Selangor"], label="Vacancy Rate") - ax1.plot(VETS_Selangor, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot(Selangor_pred, "b", label="Forecast of Unemployment Rate") - ax2.plot( - unemployment_rate.index, - unemployment_rate["Selangor"], - "r", - label="Unemployment Rate", - ) - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - elif state == "Terengganu": - fig, ax1 = plt.subplots() - plt.figure(figsize=(12, 8)) - - ax1.plot(vacancy_rate.index, vacancy_rate["Terengganu"], label="Vacancy Rate") - ax1.plot(VETS_Terengganu, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot(Terengganu_pred, "b", label="Forecast of Unemployment Rate") - ax2.plot( - unemployment_rate.index, - unemployment_rate["Terengganu"], - "r", - label="Unemployment Rate", - ) - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - elif state == "W.P Kuala Lumpur": - fig, ax1 = plt.subplots() - plt.figure(figsize=(12, 8)) - - ax1.plot( - vacancy_rate.index, vacancy_rate["W.P Kuala Lumpur"], label="Vacancy Rate" - ) - ax1.plot(VETS_Kuala_Lumpur, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot(UARIMA_KL_pred, "b", label="Forecast of Unemployment Rate") - ax2.plot( - unemployment_rate.index, - unemployment_rate["W.P Kuala Lumpur"], - "r", - label="Unemployment Rate", - ) - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - elif state == "W.P Labuan": - fig, ax1 = plt.subplots() - plt.figure(figsize=(12, 8)) - - ax1.plot(vacancy_rate.index, vacancy_rate["W.P Labuan"], label="Vacancy Rate") - ax1.plot(VETS_Labuan, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot(Labuan_pred, "b", label="Forecast of Unemployment Rate") - ax2.plot( - unemployment_rate.index, - unemployment_rate["W.P Labuan"], - "r", - label="Unemployment Rate", - ) - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - elif state == "W.P Putrajaya": - fig, ax1 = plt.subplots() - plt.figure(figsize=(12, 8)) - - ax1.plot( - vacancy_rate.index, vacancy_rate["W.P Putrajaya"], label="Vacancy Rate" - ) - ax1.plot(VETS_Putrajaya, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot(UARIMA_Putrajaya_pred, "b", label="Forecast of Unemployment Rate") - ax2.plot( - unemployment_rate.index, - unemployment_rate["W.P Putrajaya"], - "r", - label="Unemployment Rate", - ) - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - elif state == "Malaysia": - fig, ax1 = plt.subplots() - plt.figure(figsize=(20, 12)) - - ax1.plot(vacancy_rate.index, vacancy_rate["Total"], label="Vacancy Rate") - ax1.plot(VETS_Total, label="Forecast of Vacancy Rate") - ax1.legend(loc="upper left") - ax1.set_xlabel("Date") - ax1.set_ylabel("Vacancy Rate") - - ax2 = ax1.twinx() - ax2.plot(UARIMA_Malaysia_pred, "b", label="Forecast of Unemployment Rate") - ax2.plot( - unemployment_rate.index, - unemployment_rate["Total"], - "r", - label="Unemployment Rate", - ) - ax2.legend(loc="upper left", bbox_to_anchor=(0, 0.95)) - ax2.set_ylabel("Unemployment Rate") - plt.rcParams["figure.figsize"] = (20, 12) - st.pyplot(fig) - - -############################# - -# visualization part for the selected state -if state_input: - with st.sidebar: - with st.expander("Vacancy & Unemployment Rate (%) Projection", expanded=False): - forecast_plot(state_input) - with st.expander("State Labour Supply ('000)", expanded=False): - supply_state = dosm_supply.parse("state") - supply_state = supply_state[supply_state["state"] == state_input] - st.dataframe(supply_state, hide_index=True) - with st.expander("By Ethnic Group ('000)", expanded=False): - ethnic_state = dosm_supply.parse("ethnic") - st.dataframe(ethnic_state, hide_index=True) - with st.expander("By Age Group ('000)", expanded=False): - age_state = dosm_supply.parse("age") - st.dataframe(age_state, hide_index=True) - with st.expander("By Education Level ('000)", expanded=False): - edu_state = dosm_supply.parse("edu") - st.dataframe(edu_state, hide_index=True) - with st.expander("Vacancy Ads Count", expanded=False): - vacancy_ads_state = dosm_demand.parse("vacancy_ads_state") - vacancy_ads_state = vacancy_ads_state[ - vacancy_ads_state["state"] == state_input - ] - st.dataframe(vacancy_ads_state, hide_index=True) - with st.expander("Sector Performance", expanded=False): - sector_wage_prob = dosm_demand.parse("sector_wage_prob") - st.dataframe(sector_wage_prob, hide_index=True) - -# submit button is clicked -if submit: - # clear the map - placeholder.empty() - with st.spinner("Hang on for a second..."): - with col1: - ## map generation - input_df = geocoder(location_input) - - if input_df["Latitude"][0] == None: - st.error("Location not found. Please try again.") - st.stop() - - intersected_df = intersection_check(input_df, df) - - m = leafmap.Map( - center=[input_df["Latitude"][0], input_df["Longitude"][0]], - zoom=15, - google_map="HYBRID", - ) - style = { - "stroke": True, - "color": "#0000ff", - "weight": 2, - "opacity": 1, - "fill": True, - "fillColor": "#0000ff", - "fillOpacity": 0.1, - } - style_extras = { - "stroke": True, - "color": "#d82c02", - "weight": 2, - "opacity": 1, - "fill": True, - "fillColor": "#d82c02", - "fillOpacity": 0.1, - } - - m.add_points_from_xy( - input_df, - x="Longitude", - y="Latitude", - icon_names=["gear", "map", "leaf", "globe"], - ) - m.add_gdf(intersected_df, layer_name="Region of Interest", style=style) - m.add_gdf(df, layer_name="Region of Interest", style=style) - m.add_gdf(airports, layer_name="Airports", style=style_extras) - m.add_gdf(edu_fac, layer_name="Education Facilities", style=style_extras) - m.add_gdf(financial, layer_name="Financial Services", style=style_extras) - m.add_gdf(health, layer_name="Health Facilities", style=style_extras) - m.add_gdf( - point_of_interest, layer_name="Point of Interest", style=style_extras - ) - m.add_gdf(seaports, layer_name="Sea Ports", style=style_extras) - m.to_streamlit(height=700) - - st.sidebar.expander("ℹ️ About", expanded=True) - - with col2: - # configure the API - if web_toggle: - configure_api(api_key=api_input) - else: - configure_api(api_key=PALM_TOKEN) - - # get the skill list - skill_list = skill_suggest_model(location_input) - # get the job list - job_list = job_suggest_model(skill_list) - - # cleaning - skill_list = list_cleaning(skill_list) - job_list = list_cleaning(job_list) - - st.subheader("🧪 Skill List:") - st.write( - "Based on the location of interest, we suggest the following skills:" - + " " - + skill_list - ) - - st.divider() - - st.subheader("💼 Job List:") - st.write( - "Based on the skills, we suggest the following jobs:" + " " + job_list - ) - - # job recommendation engine - job_list = job_list.split(", ") - result_df = [] - - for i in range(0, len(job_list)): - job_match = job_matcher( - jobdata, column="title", string_to_match=str(job_list[i]) - ) - - if job_match.empty: - continue - else: - job_key = job_match["title"][0] - - # result = job_recom_engine(jobdata, job_key=job_key) - sig = job_recom_engine(jobdata) - result = give_rec( - titlename=job_key, sig=sig, jobdata=jobdata - ).sort_values(by="View", ascending=False) - result_df.append(result) - - # if the result is empty we will return empty dataframe - if len(result_df) == 0: - pass - else: - result_df = pd.concat([df for df in result_df], ignore_index=True) - - with st.expander("Job Recommendation", expanded=False): - st.table(result_df) - - # resume recommendation engine - qualification_dict = dict( - zip( - qualification_data["qualification"], qualification_data["mqf level"] - ) - ) - matching_sectors = [] - - for _, row in sectors_data.iterrows(): - sector = row["sector"] - sector_skills = row["skills"] - min_qualification = row["qualification"] - # compute the similarity score between user skills and all sector skills - similarity_score = compare_skills(user_skills, sector_skills) - - # check if the similarity score is above 0 and if the user's qualification is above the minimum qualification level - if similarity_score > 0 and int(user_qualification[0:1]) >= int( - min_qualification - ): - if not user_sector or user_sector.lower() in sector.lower(): - matching_sectors.append(sector) - - # output the results - with st.expander("Job Profile Analysis", expanded=False): - if matching_sectors: - # the matches sector could be more than one, so we need to loop through all of them - for sector in matching_sectors: - sector_row = sectors_data.loc[ - sectors_data["sector"] == sector - ].iloc[0] - required_skills = set(sector_row["skills"].split(",")) - user_input_skills = set(user_skills.lower().split(",")) - matching_skills = user_input_skills.intersection( - required_skills - ) - lacking_skills = required_skills.difference(user_input_skills) - - st.write(f"**Sector:** {sector.title()}") - st.write( - "**Matching Skills:**", ", ".join(matching_skills).title() - ) - st.write( - "**Lacking Skills:**", ", ".join(lacking_skills)[2:].title() - ) - st.write( - f"**Minimum Qualification:** MQF Level {sector_row['qualification']}" - ) - - course_suggestions = course_suggestions_data.loc[ - course_suggestions_data["sector"] == sector - ] - if not course_suggestions.empty: - st.write("**Course Suggestions:**") - - for _, suggestion_row in course_suggestions.iterrows(): - suggestion_row = ( - pd.DataFrame(suggestion_row) - .transpose() - .reset_index(drop=True) - ) - - text1 = str(suggestion_row["course suggestion 1"][0]) - suggestion_link1 = f"{text1}" - text2 = str(suggestion_row["course suggestion 2"][0]) - suggestion_link2 = f"{text2}" - text3 = str(suggestion_row["course suggestion 3"][0]) - suggestion_link3 = f"{text3}" - st.markdown( - "- " + suggestion_link1, unsafe_allow_html=True - ) - st.markdown( - "- " + suggestion_link2, unsafe_allow_html=True - ) - st.markdown( - "- " + suggestion_link3, unsafe_allow_html=True - ) - - if len(lacking_skills) > 0: - st.write("**Role & Responsibilities:**") - job_description = sector_row["job description"].split(";") - for desc in job_description: - st.write(desc.strip()) - st.divider() - # if no match found, output this message - else: - st.write( - "Sorry, no matching sectors found in our database for your skills and qualification level." - ) - - # regional analysis - airport_count = [] - edu_fac_count = [] - financial_count = [] - health_count = [] - seaports_count = [] - - intersected_df = intersected_df.reset_index() - - for index in range(len(airports)): - if ( - intersected_df["geometry"][0].contains(airports["geometry"][index]) - == True - ): - airport_count.append(airports["name"][index]) - for index in range(len(edu_fac)): - if ( - intersected_df["geometry"][0].contains(edu_fac["geometry"][index]) - == True - ): - edu_fac_count.append(edu_fac["name"][index]) - for index in range(len(financial)): - if ( - intersected_df["geometry"][0].contains(financial["geometry"][index]) - == True - ): - financial_count.append(financial["name"][index]) - for index in range(len(health)): - if ( - intersected_df["geometry"][0].contains(health["geometry"][index]) - == True - ): - health_count.append(health["name"][index]) - for index in range(len(seaports)): - if ( - intersected_df["geometry"][0].contains(seaports["geometry"][index]) - == True - ): - seaports_count.append(seaports["name"][index]) - - poi_name = [] - poi_tourism = [] - poi_amenity = [] - poi_shop = [] - - for index in range(len(point_of_interest)): - if ( - intersected_df["geometry"][0].contains( - point_of_interest["geometry"][index] - ) - == True - ): - poi_name.append(point_of_interest["name"][index]) - poi_tourism.append(point_of_interest["tourism"][index]) - poi_amenity.append(point_of_interest["amenity"][index]) - poi_shop.append(point_of_interest["shop"][index]) - - # make a - poi = pd.DataFrame( - { - "name": poi_name, - "Tourism": poi_tourism, - "Amenity": poi_amenity, - "Shop": poi_shop, - } - ) - - # find the location of interest - no_hotel = len( - poi[(poi["Tourism"] == "hotel") | (poi["Tourism"] == "hostel")] - ) - no_theme_park = len(poi[(poi["Tourism"] == "theme_park")]) - no_food_place = len( - poi[ - (poi["Amenity"] == "restaurant") - | (poi["Amenity"] == "cafe") - | (poi["Amenity"] == "fast_food") - ] - ) - no_fuel = len(poi[(poi["Amenity"] == "fuel")]) - no_marketplace = len(poi[poi["Shop"] == "marketplace"]) - no_supermarket = len( - poi[(poi["Shop"] == "supermarket") | (poi["Shop"] == "convenience")] - ) - no_cloth_jewelry = len( - poi[(poi["Shop"] == "clothes") | (poi["Shop"] == "jewelry")] - ) - no_mall = len( - poi[(poi["Shop"] == "mall") | (poi["Shop"] == "department_store")] - ) - no_repair = len( - poi[(poi["Shop"] == "car_repair") | poi["Shop"] == "motorcycle"] - ) - no_hair = len(poi[poi["Shop"] == "hairdresser"]) - no_telco = len( - poi[ - (poi["Shop"] == "telecommunication") - | (poi["Shop"] == "electronics") - ] - ) - - # make a dataframe - poi_df = pd.DataFrame( - { - "Place": [ - "Airport", - "Education Facilities", - "Financial Facilities", - "Health Facilities", - "Seaports", - "Hotel", - "Theme Park", - "Food Place", - "Fuel Station", - "Marketplace", - "Supermarket", - "Cloth & Jewelry", - "Mall", - "Repair Shops", - "Hair Salon", - "Telco", - ], - "Count": [ - len(airport_count), - len(edu_fac_count), - len(financial_count), - len(health_count), - len(seaports), - no_hotel, - no_theme_park, - no_food_place, - no_fuel, - no_marketplace, - no_supermarket, - no_cloth_jewelry, - no_mall, - no_repair, - no_hair, - no_telco, - ], - } - ) - - with st.sidebar: - st.subheader("📍 Location Analysis") - with st.expander("Point of Interest", expanded=False): - st.table(poi_df) - -# sidebar footer -st.sidebar.caption( - "MIT License 2023 © Isekai Truck: Ang Zhi Nuo, Connie Hui Kang Yi, Khor Kean Teng, Ling Sing Cheng, Tan Yu Jing" -) diff --git a/spaces/keras-io/ProbabilisticBayesianNetwork/load_bnn_model.py b/spaces/keras-io/ProbabilisticBayesianNetwork/load_bnn_model.py deleted file mode 100644 index 045460d338ebbef9639d0e61fefb0ece96d6fe86..0000000000000000000000000000000000000000 --- a/spaces/keras-io/ProbabilisticBayesianNetwork/load_bnn_model.py +++ /dev/null @@ -1,100 +0,0 @@ -import tensorflow as tf -from tensorflow import keras -from tensorflow.keras import layers -import tensorflow_probability as tfp - -def load_bnn_model(): - FEATURE_NAMES = [ - "fixed acidity", - "volatile acidity", - "citric acid", - "residual sugar", - "chlorides", - "free sulfur dioxide", - "total sulfur dioxide", - "density", - "pH", - "sulphates", - "alcohol", - ] - - hidden_units=[8,8] - learning_rate = 0.001 - def create_model_inputs(): - inputs = {} - for feature_name in FEATURE_NAMES: - inputs[feature_name] = layers.Input( - name=feature_name, shape=(1,), dtype=tf.float32 - ) - return inputs - - # Define the prior weight distribution as Normal of mean=0 and stddev=1. - # Note that, in this example, the we prior distribution is not trainable, - # as we fix its parameters. - def prior(kernel_size, bias_size, dtype=None): - n = kernel_size + bias_size - prior_model = keras.Sequential( - [ - tfp.layers.DistributionLambda( - lambda t: tfp.distributions.MultivariateNormalDiag( - loc=tf.zeros(n), scale_diag=tf.ones(n) - ) - ) - ] - ) - return prior_model - - - # Define variational posterior weight distribution as multivariate Gaussian. - # Note that the learnable parameters for this distribution are the means, - # variances, and covariances. - def posterior(kernel_size, bias_size, dtype=None): - n = kernel_size + bias_size - posterior_model = keras.Sequential( - [ - tfp.layers.VariableLayer( - tfp.layers.MultivariateNormalTriL.params_size(n), dtype=dtype - ), - tfp.layers.MultivariateNormalTriL(n), - ] - ) - return posterior_model - - def create_probablistic_bnn_model(train_size): - inputs = create_model_inputs() - features = keras.layers.concatenate(list(inputs.values())) - features = layers.BatchNormalization()(features) - - # Create hidden layers with weight uncertainty using the DenseVariational layer. - for units in hidden_units: - features = tfp.layers.DenseVariational( - units=units, - make_prior_fn=prior, - make_posterior_fn=posterior, - kl_weight=1 / train_size, - activation="sigmoid", - )(features) - - # Create a probabilistic output (Normal distribution), and use the `Dense` layer - # to produce the parameters of the distribution. - # We set units=2 to learn both the mean and the variance of the Normal distribution. - distribution_params = layers.Dense(units=2)(features) - outputs = tfp.layers.IndependentNormal(1)(distribution_params) - - model = keras.Model(inputs=inputs, - outputs=outputs) - - return model - - def negative_loglikelihood(targets, estimated_distribution): - estimated_distirbution = tfp.distributions.MultivariateNormalTriL(estimated_distribution) - return -estimated_distribution.log_prob(targets) - - model = create_probablistic_bnn_model(4163) - model.compile( - optimizer=keras.optimizers.RMSprop(learning_rate=learning_rate), - loss=negative_loglikelihood, - metrics=[keras.metrics.RootMeanSquaredError()], - ) - model.load_weights('bnn_wine_model.h5') - return model \ No newline at end of file diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/util/detect_lm68.py b/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/util/detect_lm68.py deleted file mode 100644 index b7e40997289e17405e1fb6c408d21adce7b626ce..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/util/detect_lm68.py +++ /dev/null @@ -1,106 +0,0 @@ -import os -import cv2 -import numpy as np -from scipy.io import loadmat -import tensorflow as tf -from util.preprocess import align_for_lm -from shutil import move - -mean_face = np.loadtxt('util/test_mean_face.txt') -mean_face = mean_face.reshape([68, 2]) - -def save_label(labels, save_path): - np.savetxt(save_path, labels) - -def draw_landmarks(img, landmark, save_name): - landmark = landmark - lm_img = np.zeros([img.shape[0], img.shape[1], 3]) - lm_img[:] = img.astype(np.float32) - landmark = np.round(landmark).astype(np.int32) - - for i in range(len(landmark)): - for j in range(-1, 1): - for k in range(-1, 1): - if img.shape[0] - 1 - landmark[i, 1]+j > 0 and \ - img.shape[0] - 1 - landmark[i, 1]+j < img.shape[0] and \ - landmark[i, 0]+k > 0 and \ - landmark[i, 0]+k < img.shape[1]: - lm_img[img.shape[0] - 1 - landmark[i, 1]+j, landmark[i, 0]+k, - :] = np.array([0, 0, 255]) - lm_img = lm_img.astype(np.uint8) - - cv2.imwrite(save_name, lm_img) - - -def load_data(img_name, txt_name): - return cv2.imread(img_name), np.loadtxt(txt_name) - -# create tensorflow graph for landmark detector -def load_lm_graph(graph_filename): - with tf.gfile.GFile(graph_filename, 'rb') as f: - graph_def = tf.GraphDef() - graph_def.ParseFromString(f.read()) - - with tf.Graph().as_default() as graph: - tf.import_graph_def(graph_def, name='net') - img_224 = graph.get_tensor_by_name('net/input_imgs:0') - output_lm = graph.get_tensor_by_name('net/lm:0') - lm_sess = tf.Session(graph=graph) - - return lm_sess,img_224,output_lm - -# landmark detection -def detect_68p(img_path,sess,input_op,output_op): - print('detecting landmarks......') - names = [i for i in sorted(os.listdir( - img_path)) if 'jpg' in i or 'png' in i or 'jpeg' in i or 'PNG' in i] - vis_path = os.path.join(img_path, 'vis') - remove_path = os.path.join(img_path, 'remove') - save_path = os.path.join(img_path, 'landmarks') - if not os.path.isdir(vis_path): - os.makedirs(vis_path) - if not os.path.isdir(remove_path): - os.makedirs(remove_path) - if not os.path.isdir(save_path): - os.makedirs(save_path) - - for i in range(0, len(names)): - name = names[i] - print('%05d' % (i), ' ', name) - full_image_name = os.path.join(img_path, name) - txt_name = '.'.join(name.split('.')[:-1]) + '.txt' - full_txt_name = os.path.join(img_path, 'detections', txt_name) # 5 facial landmark path for each image - - # if an image does not have detected 5 facial landmarks, remove it from the training list - if not os.path.isfile(full_txt_name): - move(full_image_name, os.path.join(remove_path, name)) - continue - - # load data - img, five_points = load_data(full_image_name, full_txt_name) - input_img, scale, bbox = align_for_lm(img, five_points) # align for 68 landmark detection - - # if the alignment fails, remove corresponding image from the training list - if scale == 0: - move(full_txt_name, os.path.join( - remove_path, txt_name)) - move(full_image_name, os.path.join(remove_path, name)) - continue - - # detect landmarks - input_img = np.reshape( - input_img, [1, 224, 224, 3]).astype(np.float32) - landmark = sess.run( - output_op, feed_dict={input_op: input_img}) - - # transform back to original image coordinate - landmark = landmark.reshape([68, 2]) + mean_face - landmark[:, 1] = 223 - landmark[:, 1] - landmark = landmark / scale - landmark[:, 0] = landmark[:, 0] + bbox[0] - landmark[:, 1] = landmark[:, 1] + bbox[1] - landmark[:, 1] = img.shape[0] - 1 - landmark[:, 1] - - if i % 100 == 0: - draw_landmarks(img, landmark, os.path.join(vis_path, name)) - save_label(landmark, os.path.join(save_path, txt_name)) diff --git a/spaces/kevinwang676/VoiceChanger/src/face3d/models/facerecon_model.py b/spaces/kevinwang676/VoiceChanger/src/face3d/models/facerecon_model.py deleted file mode 100644 index 7de8ca6eebc50ff1ed52c5ba37d31b43f977b5e1..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChanger/src/face3d/models/facerecon_model.py +++ /dev/null @@ -1,220 +0,0 @@ -"""This script defines the face reconstruction model for Deep3DFaceRecon_pytorch -""" - -import numpy as np -import torch -from src.face3d.models.base_model import BaseModel -from src.face3d.models import networks -from src.face3d.models.bfm import ParametricFaceModel -from src.face3d.models.losses import perceptual_loss, photo_loss, reg_loss, reflectance_loss, landmark_loss -from src.face3d.util import util -from src.face3d.util.nvdiffrast import MeshRenderer -# from src.face3d.util.preprocess import estimate_norm_torch - -import trimesh -from scipy.io import savemat - -class FaceReconModel(BaseModel): - - @staticmethod - def modify_commandline_options(parser, is_train=False): - """ Configures options specific for CUT model - """ - # net structure and parameters - parser.add_argument('--net_recon', type=str, default='resnet50', choices=['resnet18', 'resnet34', 'resnet50'], help='network structure') - parser.add_argument('--init_path', type=str, default='./checkpoints/init_model/resnet50-0676ba61.pth') - parser.add_argument('--use_last_fc', type=util.str2bool, nargs='?', const=True, default=False, help='zero initialize the last fc') - parser.add_argument('--bfm_folder', type=str, default='./checkpoints/BFM_Fitting/') - parser.add_argument('--bfm_model', type=str, default='BFM_model_front.mat', help='bfm model') - - # renderer parameters - parser.add_argument('--focal', type=float, default=1015.) - parser.add_argument('--center', type=float, default=112.) - parser.add_argument('--camera_d', type=float, default=10.) - parser.add_argument('--z_near', type=float, default=5.) - parser.add_argument('--z_far', type=float, default=15.) - - if is_train: - # training parameters - parser.add_argument('--net_recog', type=str, default='r50', choices=['r18', 'r43', 'r50'], help='face recog network structure') - parser.add_argument('--net_recog_path', type=str, default='checkpoints/recog_model/ms1mv3_arcface_r50_fp16/backbone.pth') - parser.add_argument('--use_crop_face', type=util.str2bool, nargs='?', const=True, default=False, help='use crop mask for photo loss') - parser.add_argument('--use_predef_M', type=util.str2bool, nargs='?', const=True, default=False, help='use predefined M for predicted face') - - - # augmentation parameters - parser.add_argument('--shift_pixs', type=float, default=10., help='shift pixels') - parser.add_argument('--scale_delta', type=float, default=0.1, help='delta scale factor') - parser.add_argument('--rot_angle', type=float, default=10., help='rot angles, degree') - - # loss weights - parser.add_argument('--w_feat', type=float, default=0.2, help='weight for feat loss') - parser.add_argument('--w_color', type=float, default=1.92, help='weight for loss loss') - parser.add_argument('--w_reg', type=float, default=3.0e-4, help='weight for reg loss') - parser.add_argument('--w_id', type=float, default=1.0, help='weight for id_reg loss') - parser.add_argument('--w_exp', type=float, default=0.8, help='weight for exp_reg loss') - parser.add_argument('--w_tex', type=float, default=1.7e-2, help='weight for tex_reg loss') - parser.add_argument('--w_gamma', type=float, default=10.0, help='weight for gamma loss') - parser.add_argument('--w_lm', type=float, default=1.6e-3, help='weight for lm loss') - parser.add_argument('--w_reflc', type=float, default=5.0, help='weight for reflc loss') - - opt, _ = parser.parse_known_args() - parser.set_defaults( - focal=1015., center=112., camera_d=10., use_last_fc=False, z_near=5., z_far=15. - ) - if is_train: - parser.set_defaults( - use_crop_face=True, use_predef_M=False - ) - return parser - - def __init__(self, opt): - """Initialize this model class. - - Parameters: - opt -- training/test options - - A few things can be done here. - - (required) call the initialization function of BaseModel - - define loss function, visualization images, model names, and optimizers - """ - BaseModel.__init__(self, opt) # call the initialization method of BaseModel - - self.visual_names = ['output_vis'] - self.model_names = ['net_recon'] - self.parallel_names = self.model_names + ['renderer'] - - self.facemodel = ParametricFaceModel( - bfm_folder=opt.bfm_folder, camera_distance=opt.camera_d, focal=opt.focal, center=opt.center, - is_train=self.isTrain, default_name=opt.bfm_model - ) - - fov = 2 * np.arctan(opt.center / opt.focal) * 180 / np.pi - self.renderer = MeshRenderer( - rasterize_fov=fov, znear=opt.z_near, zfar=opt.z_far, rasterize_size=int(2 * opt.center) - ) - - if self.isTrain: - self.loss_names = ['all', 'feat', 'color', 'lm', 'reg', 'gamma', 'reflc'] - - self.net_recog = networks.define_net_recog( - net_recog=opt.net_recog, pretrained_path=opt.net_recog_path - ) - # loss func name: (compute_%s_loss) % loss_name - self.compute_feat_loss = perceptual_loss - self.comupte_color_loss = photo_loss - self.compute_lm_loss = landmark_loss - self.compute_reg_loss = reg_loss - self.compute_reflc_loss = reflectance_loss - - self.optimizer = torch.optim.Adam(self.net_recon.parameters(), lr=opt.lr) - self.optimizers = [self.optimizer] - self.parallel_names += ['net_recog'] - # Our program will automatically call to define schedulers, load networks, and print networks - - def set_input(self, input): - """Unpack input data from the dataloader and perform necessary pre-processing steps. - - Parameters: - input: a dictionary that contains the data itself and its metadata information. - """ - self.input_img = input['imgs'].to(self.device) - self.atten_mask = input['msks'].to(self.device) if 'msks' in input else None - self.gt_lm = input['lms'].to(self.device) if 'lms' in input else None - self.trans_m = input['M'].to(self.device) if 'M' in input else None - self.image_paths = input['im_paths'] if 'im_paths' in input else None - - def forward(self, output_coeff, device): - self.facemodel.to(device) - self.pred_vertex, self.pred_tex, self.pred_color, self.pred_lm = \ - self.facemodel.compute_for_render(output_coeff) - self.pred_mask, _, self.pred_face = self.renderer( - self.pred_vertex, self.facemodel.face_buf, feat=self.pred_color) - - self.pred_coeffs_dict = self.facemodel.split_coeff(output_coeff) - - - def compute_losses(self): - """Calculate losses, gradients, and update network weights; called in every training iteration""" - - assert self.net_recog.training == False - trans_m = self.trans_m - if not self.opt.use_predef_M: - trans_m = estimate_norm_torch(self.pred_lm, self.input_img.shape[-2]) - - pred_feat = self.net_recog(self.pred_face, trans_m) - gt_feat = self.net_recog(self.input_img, self.trans_m) - self.loss_feat = self.opt.w_feat * self.compute_feat_loss(pred_feat, gt_feat) - - face_mask = self.pred_mask - if self.opt.use_crop_face: - face_mask, _, _ = self.renderer(self.pred_vertex, self.facemodel.front_face_buf) - - face_mask = face_mask.detach() - self.loss_color = self.opt.w_color * self.comupte_color_loss( - self.pred_face, self.input_img, self.atten_mask * face_mask) - - loss_reg, loss_gamma = self.compute_reg_loss(self.pred_coeffs_dict, self.opt) - self.loss_reg = self.opt.w_reg * loss_reg - self.loss_gamma = self.opt.w_gamma * loss_gamma - - self.loss_lm = self.opt.w_lm * self.compute_lm_loss(self.pred_lm, self.gt_lm) - - self.loss_reflc = self.opt.w_reflc * self.compute_reflc_loss(self.pred_tex, self.facemodel.skin_mask) - - self.loss_all = self.loss_feat + self.loss_color + self.loss_reg + self.loss_gamma \ - + self.loss_lm + self.loss_reflc - - - def optimize_parameters(self, isTrain=True): - self.forward() - self.compute_losses() - """Update network weights; it will be called in every training iteration.""" - if isTrain: - self.optimizer.zero_grad() - self.loss_all.backward() - self.optimizer.step() - - def compute_visuals(self): - with torch.no_grad(): - input_img_numpy = 255. * self.input_img.detach().cpu().permute(0, 2, 3, 1).numpy() - output_vis = self.pred_face * self.pred_mask + (1 - self.pred_mask) * self.input_img - output_vis_numpy_raw = 255. * output_vis.detach().cpu().permute(0, 2, 3, 1).numpy() - - if self.gt_lm is not None: - gt_lm_numpy = self.gt_lm.cpu().numpy() - pred_lm_numpy = self.pred_lm.detach().cpu().numpy() - output_vis_numpy = util.draw_landmarks(output_vis_numpy_raw, gt_lm_numpy, 'b') - output_vis_numpy = util.draw_landmarks(output_vis_numpy, pred_lm_numpy, 'r') - - output_vis_numpy = np.concatenate((input_img_numpy, - output_vis_numpy_raw, output_vis_numpy), axis=-2) - else: - output_vis_numpy = np.concatenate((input_img_numpy, - output_vis_numpy_raw), axis=-2) - - self.output_vis = torch.tensor( - output_vis_numpy / 255., dtype=torch.float32 - ).permute(0, 3, 1, 2).to(self.device) - - def save_mesh(self, name): - - recon_shape = self.pred_vertex # get reconstructed shape - recon_shape[..., -1] = 10 - recon_shape[..., -1] # from camera space to world space - recon_shape = recon_shape.cpu().numpy()[0] - recon_color = self.pred_color - recon_color = recon_color.cpu().numpy()[0] - tri = self.facemodel.face_buf.cpu().numpy() - mesh = trimesh.Trimesh(vertices=recon_shape, faces=tri, vertex_colors=np.clip(255. * recon_color, 0, 255).astype(np.uint8)) - mesh.export(name) - - def save_coeff(self,name): - - pred_coeffs = {key:self.pred_coeffs_dict[key].cpu().numpy() for key in self.pred_coeffs_dict} - pred_lm = self.pred_lm.cpu().numpy() - pred_lm = np.stack([pred_lm[:,:,0],self.input_img.shape[2]-1-pred_lm[:,:,1]],axis=2) # transfer to image coordinate - pred_coeffs['lm68'] = pred_lm - savemat(name,pred_coeffs) - - - diff --git a/spaces/kiin/andite-anything-v4.0/README.md b/spaces/kiin/andite-anything-v4.0/README.md deleted file mode 100644 index 123712bb84344a5a1efdac1a7c0737a30cfc8c3d..0000000000000000000000000000000000000000 --- a/spaces/kiin/andite-anything-v4.0/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Andite Anything V4.0 -emoji: 👀 -colorFrom: blue -colorTo: indigo -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/rxf/rxf_src/__init__.py b/spaces/koajoel/PolyFormer/fairseq/examples/rxf/rxf_src/__init__.py deleted file mode 100644 index 306e232d6f386b26153864601114e162080dcee4..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/rxf/rxf_src/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from . import label_smoothed_cross_entropy_r3f, sentence_prediction_r3f # noqa diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/__main__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/__main__.py deleted file mode 100644 index d9b2a465d7767b2dcb16107c25c043092fe5c654..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/__main__.py +++ /dev/null @@ -1,100 +0,0 @@ -import sys -from fontTools.ttLib import TTLibError, TTLibFileIsCollectionError -from fontTools.ttLib.ttFont import * -from fontTools.ttLib.ttCollection import TTCollection - - -def main(args=None): - """Open/save fonts with TTFont() or TTCollection() - - ./fonttools ttLib [-oFILE] [-yNUMBER] files... - - If multiple files are given on the command-line, - they are each opened (as a font or collection), - and added to the font list. - - If -o (output-file) argument is given, the font - list is then saved to the output file, either as - a single font, if there is only one font, or as - a collection otherwise. - - If -y (font-number) argument is given, only the - specified font from collections is opened. - - The above allow extracting a single font from a - collection, or combining multiple fonts into a - collection. - - If --lazy or --no-lazy are give, those are passed - to the TTFont() or TTCollection() constructors. - """ - from fontTools import configLogger - - if args is None: - args = sys.argv[1:] - - import argparse - - parser = argparse.ArgumentParser( - "fonttools ttLib", - description="Open/save fonts with TTFont() or TTCollection()", - epilog=""" - If multiple files are given on the command-line, - they are each opened (as a font or collection), - and added to the font list. - - The above, when combined with -o / --output, - allows for extracting a single font from a - collection, or combining multiple fonts into a - collection. - """, - ) - parser.add_argument("font", metavar="font", nargs="*", help="Font file.") - parser.add_argument( - "-o", "--output", metavar="FILE", default=None, help="Output file." - ) - parser.add_argument( - "-y", metavar="NUMBER", default=-1, help="Font number to load from collections." - ) - parser.add_argument( - "--lazy", action="store_true", default=None, help="Load fonts lazily." - ) - parser.add_argument( - "--no-lazy", dest="lazy", action="store_false", help="Load fonts immediately." - ) - parser.add_argument( - "--flavor", - dest="flavor", - default=None, - help="Flavor of output font. 'woff' or 'woff2'.", - ) - options = parser.parse_args(args) - - fontNumber = int(options.y) if options.y is not None else None - outFile = options.output - lazy = options.lazy - flavor = options.flavor - - fonts = [] - for f in options.font: - try: - font = TTFont(f, fontNumber=fontNumber, lazy=lazy) - fonts.append(font) - except TTLibFileIsCollectionError: - collection = TTCollection(f, lazy=lazy) - fonts.extend(collection.fonts) - - if outFile is not None: - if len(fonts) == 1: - fonts[0].flavor = flavor - fonts[0].save(outFile) - else: - if flavor is not None: - raise TTLibError("Cannot set flavor for collections.") - collection = TTCollection() - collection.fonts = fonts - collection.save(outFile) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/grUtils.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/grUtils.py deleted file mode 100644 index 785684b1eb30a76ae598bfe46416d4556fc422a0..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/grUtils.py +++ /dev/null @@ -1,92 +0,0 @@ -import struct, warnings - -try: - import lz4 -except ImportError: - lz4 = None -else: - import lz4.block - -# old scheme for VERSION < 0.9 otherwise use lz4.block - - -def decompress(data): - (compression,) = struct.unpack(">L", data[4:8]) - scheme = compression >> 27 - size = compression & 0x07FFFFFF - if scheme == 0: - pass - elif scheme == 1 and lz4: - res = lz4.block.decompress(struct.pack("L", (scheme << 27) + (len(data) & 0x07FFFFFF)) - if scheme == 0: - return data - elif scheme == 1 and lz4: - res = lz4.block.compress( - data, mode="high_compression", compression=16, store_size=False - ) - return hdr + res - else: - warnings.warn("Table failed to compress by unsupported compression scheme") - return data - - -def _entries(attrs, sameval): - ak = 0 - vals = [] - lastv = 0 - for k, v in attrs: - if len(vals) and (k != ak + 1 or (sameval and v != lastv)): - yield (ak - len(vals) + 1, len(vals), vals) - vals = [] - ak = k - vals.append(v) - lastv = v - yield (ak - len(vals) + 1, len(vals), vals) - - -def entries(attributes, sameval=False): - g = _entries(sorted(attributes.items(), key=lambda x: int(x[0])), sameval) - return g - - -def bininfo(num, size=1): - if num == 0: - return struct.pack(">4H", 0, 0, 0, 0) - srange = 1 - select = 0 - while srange <= num: - srange *= 2 - select += 1 - select -= 1 - srange //= 2 - srange *= size - shift = num * size - srange - return struct.pack(">4H", num, srange, select, shift) - - -def num2tag(n): - if n < 0x200000: - return str(n) - else: - return ( - struct.unpack("4s", struct.pack(">L", n))[0].replace(b"\000", b"").decode() - ) - - -def tag2num(n): - try: - return int(n) - except ValueError: - n = (n + " ")[:4] - return struct.unpack(">L", n.encode("ascii"))[0] diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/voltLib/ast.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/voltLib/ast.py deleted file mode 100644 index 82c2cca8b7f350bbf2ee579b0978937c22331a2f..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/voltLib/ast.py +++ /dev/null @@ -1,448 +0,0 @@ -from fontTools.voltLib.error import VoltLibError -from typing import NamedTuple - - -class Pos(NamedTuple): - adv: int - dx: int - dy: int - adv_adjust_by: dict - dx_adjust_by: dict - dy_adjust_by: dict - - def __str__(self): - res = " POS" - for attr in ("adv", "dx", "dy"): - value = getattr(self, attr) - if value is not None: - res += f" {attr.upper()} {value}" - adjust_by = getattr(self, f"{attr}_adjust_by", {}) - for size, adjustment in adjust_by.items(): - res += f" ADJUST_BY {adjustment} AT {size}" - res += " END_POS" - return res - - -class Element(object): - def __init__(self, location=None): - self.location = location - - def build(self, builder): - pass - - def __str__(self): - raise NotImplementedError - - -class Statement(Element): - pass - - -class Expression(Element): - pass - - -class VoltFile(Statement): - def __init__(self): - Statement.__init__(self, location=None) - self.statements = [] - - def build(self, builder): - for s in self.statements: - s.build(builder) - - def __str__(self): - return "\n" + "\n".join(str(s) for s in self.statements) + " END\n" - - -class GlyphDefinition(Statement): - def __init__(self, name, gid, gunicode, gtype, components, location=None): - Statement.__init__(self, location) - self.name = name - self.id = gid - self.unicode = gunicode - self.type = gtype - self.components = components - - def __str__(self): - res = f'DEF_GLYPH "{self.name}" ID {self.id}' - if self.unicode is not None: - if len(self.unicode) > 1: - unicodes = ",".join(f"U+{u:04X}" for u in self.unicode) - res += f' UNICODEVALUES "{unicodes}"' - else: - res += f" UNICODE {self.unicode[0]}" - if self.type is not None: - res += f" TYPE {self.type}" - if self.components is not None: - res += f" COMPONENTS {self.components}" - res += " END_GLYPH" - return res - - -class GroupDefinition(Statement): - def __init__(self, name, enum, location=None): - Statement.__init__(self, location) - self.name = name - self.enum = enum - self.glyphs_ = None - - def glyphSet(self, groups=None): - if groups is not None and self.name in groups: - raise VoltLibError( - 'Group "%s" contains itself.' % (self.name), self.location - ) - if self.glyphs_ is None: - if groups is None: - groups = set({self.name}) - else: - groups.add(self.name) - self.glyphs_ = self.enum.glyphSet(groups) - return self.glyphs_ - - def __str__(self): - enum = self.enum and str(self.enum) or "" - return f'DEF_GROUP "{self.name}"\n{enum}\nEND_GROUP' - - -class GlyphName(Expression): - """A single glyph name, such as cedilla.""" - - def __init__(self, glyph, location=None): - Expression.__init__(self, location) - self.glyph = glyph - - def glyphSet(self): - return (self.glyph,) - - def __str__(self): - return f' GLYPH "{self.glyph}"' - - -class Enum(Expression): - """An enum""" - - def __init__(self, enum, location=None): - Expression.__init__(self, location) - self.enum = enum - - def __iter__(self): - for e in self.glyphSet(): - yield e - - def glyphSet(self, groups=None): - glyphs = [] - for element in self.enum: - if isinstance(element, (GroupName, Enum)): - glyphs.extend(element.glyphSet(groups)) - else: - glyphs.extend(element.glyphSet()) - return tuple(glyphs) - - def __str__(self): - enum = "".join(str(e) for e in self.enum) - return f" ENUM{enum} END_ENUM" - - -class GroupName(Expression): - """A glyph group""" - - def __init__(self, group, parser, location=None): - Expression.__init__(self, location) - self.group = group - self.parser_ = parser - - def glyphSet(self, groups=None): - group = self.parser_.resolve_group(self.group) - if group is not None: - self.glyphs_ = group.glyphSet(groups) - return self.glyphs_ - else: - raise VoltLibError( - 'Group "%s" is used but undefined.' % (self.group), self.location - ) - - def __str__(self): - return f' GROUP "{self.group}"' - - -class Range(Expression): - """A glyph range""" - - def __init__(self, start, end, parser, location=None): - Expression.__init__(self, location) - self.start = start - self.end = end - self.parser = parser - - def glyphSet(self): - return tuple(self.parser.glyph_range(self.start, self.end)) - - def __str__(self): - return f' RANGE "{self.start}" TO "{self.end}"' - - -class ScriptDefinition(Statement): - def __init__(self, name, tag, langs, location=None): - Statement.__init__(self, location) - self.name = name - self.tag = tag - self.langs = langs - - def __str__(self): - res = "DEF_SCRIPT" - if self.name is not None: - res += f' NAME "{self.name}"' - res += f' TAG "{self.tag}"\n\n' - for lang in self.langs: - res += f"{lang}" - res += "END_SCRIPT" - return res - - -class LangSysDefinition(Statement): - def __init__(self, name, tag, features, location=None): - Statement.__init__(self, location) - self.name = name - self.tag = tag - self.features = features - - def __str__(self): - res = "DEF_LANGSYS" - if self.name is not None: - res += f' NAME "{self.name}"' - res += f' TAG "{self.tag}"\n\n' - for feature in self.features: - res += f"{feature}" - res += "END_LANGSYS\n" - return res - - -class FeatureDefinition(Statement): - def __init__(self, name, tag, lookups, location=None): - Statement.__init__(self, location) - self.name = name - self.tag = tag - self.lookups = lookups - - def __str__(self): - res = f'DEF_FEATURE NAME "{self.name}" TAG "{self.tag}"\n' - res += " " + " ".join(f'LOOKUP "{l}"' for l in self.lookups) + "\n" - res += "END_FEATURE\n" - return res - - -class LookupDefinition(Statement): - def __init__( - self, - name, - process_base, - process_marks, - mark_glyph_set, - direction, - reversal, - comments, - context, - sub, - pos, - location=None, - ): - Statement.__init__(self, location) - self.name = name - self.process_base = process_base - self.process_marks = process_marks - self.mark_glyph_set = mark_glyph_set - self.direction = direction - self.reversal = reversal - self.comments = comments - self.context = context - self.sub = sub - self.pos = pos - - def __str__(self): - res = f'DEF_LOOKUP "{self.name}"' - res += f' {self.process_base and "PROCESS_BASE" or "SKIP_BASE"}' - if self.process_marks: - res += " PROCESS_MARKS " - if self.mark_glyph_set: - res += f'MARK_GLYPH_SET "{self.mark_glyph_set}"' - elif isinstance(self.process_marks, str): - res += f'"{self.process_marks}"' - else: - res += "ALL" - else: - res += " SKIP_MARKS" - if self.direction is not None: - res += f" DIRECTION {self.direction}" - if self.reversal: - res += " REVERSAL" - if self.comments is not None: - comments = self.comments.replace("\n", r"\n") - res += f'\nCOMMENTS "{comments}"' - if self.context: - res += "\n" + "\n".join(str(c) for c in self.context) - else: - res += "\nIN_CONTEXT\nEND_CONTEXT" - if self.sub: - res += f"\n{self.sub}" - if self.pos: - res += f"\n{self.pos}" - return res - - -class SubstitutionDefinition(Statement): - def __init__(self, mapping, location=None): - Statement.__init__(self, location) - self.mapping = mapping - - def __str__(self): - res = "AS_SUBSTITUTION\n" - for src, dst in self.mapping.items(): - src = "".join(str(s) for s in src) - dst = "".join(str(d) for d in dst) - res += f"SUB{src}\nWITH{dst}\nEND_SUB\n" - res += "END_SUBSTITUTION" - return res - - -class SubstitutionSingleDefinition(SubstitutionDefinition): - pass - - -class SubstitutionMultipleDefinition(SubstitutionDefinition): - pass - - -class SubstitutionLigatureDefinition(SubstitutionDefinition): - pass - - -class SubstitutionReverseChainingSingleDefinition(SubstitutionDefinition): - pass - - -class PositionAttachDefinition(Statement): - def __init__(self, coverage, coverage_to, location=None): - Statement.__init__(self, location) - self.coverage = coverage - self.coverage_to = coverage_to - - def __str__(self): - coverage = "".join(str(c) for c in self.coverage) - res = f"AS_POSITION\nATTACH{coverage}\nTO" - for coverage, anchor in self.coverage_to: - coverage = "".join(str(c) for c in coverage) - res += f'{coverage} AT ANCHOR "{anchor}"' - res += "\nEND_ATTACH\nEND_POSITION" - return res - - -class PositionAttachCursiveDefinition(Statement): - def __init__(self, coverages_exit, coverages_enter, location=None): - Statement.__init__(self, location) - self.coverages_exit = coverages_exit - self.coverages_enter = coverages_enter - - def __str__(self): - res = "AS_POSITION\nATTACH_CURSIVE" - for coverage in self.coverages_exit: - coverage = "".join(str(c) for c in coverage) - res += f"\nEXIT {coverage}" - for coverage in self.coverages_enter: - coverage = "".join(str(c) for c in coverage) - res += f"\nENTER {coverage}" - res += "\nEND_ATTACH\nEND_POSITION" - return res - - -class PositionAdjustPairDefinition(Statement): - def __init__(self, coverages_1, coverages_2, adjust_pair, location=None): - Statement.__init__(self, location) - self.coverages_1 = coverages_1 - self.coverages_2 = coverages_2 - self.adjust_pair = adjust_pair - - def __str__(self): - res = "AS_POSITION\nADJUST_PAIR\n" - for coverage in self.coverages_1: - coverage = " ".join(str(c) for c in coverage) - res += f" FIRST {coverage}" - res += "\n" - for coverage in self.coverages_2: - coverage = " ".join(str(c) for c in coverage) - res += f" SECOND {coverage}" - res += "\n" - for (id_1, id_2), (pos_1, pos_2) in self.adjust_pair.items(): - res += f" {id_1} {id_2} BY{pos_1}{pos_2}\n" - res += "\nEND_ADJUST\nEND_POSITION" - return res - - -class PositionAdjustSingleDefinition(Statement): - def __init__(self, adjust_single, location=None): - Statement.__init__(self, location) - self.adjust_single = adjust_single - - def __str__(self): - res = "AS_POSITION\nADJUST_SINGLE" - for coverage, pos in self.adjust_single: - coverage = "".join(str(c) for c in coverage) - res += f"{coverage} BY{pos}" - res += "\nEND_ADJUST\nEND_POSITION" - return res - - -class ContextDefinition(Statement): - def __init__(self, ex_or_in, left=None, right=None, location=None): - Statement.__init__(self, location) - self.ex_or_in = ex_or_in - self.left = left if left is not None else [] - self.right = right if right is not None else [] - - def __str__(self): - res = self.ex_or_in + "\n" - for coverage in self.left: - coverage = "".join(str(c) for c in coverage) - res += f" LEFT{coverage}\n" - for coverage in self.right: - coverage = "".join(str(c) for c in coverage) - res += f" RIGHT{coverage}\n" - res += "END_CONTEXT" - return res - - -class AnchorDefinition(Statement): - def __init__(self, name, gid, glyph_name, component, locked, pos, location=None): - Statement.__init__(self, location) - self.name = name - self.gid = gid - self.glyph_name = glyph_name - self.component = component - self.locked = locked - self.pos = pos - - def __str__(self): - locked = self.locked and " LOCKED" or "" - return ( - f'DEF_ANCHOR "{self.name}"' - f" ON {self.gid}" - f" GLYPH {self.glyph_name}" - f" COMPONENT {self.component}" - f"{locked}" - f" AT {self.pos} END_ANCHOR" - ) - - -class SettingDefinition(Statement): - def __init__(self, name, value, location=None): - Statement.__init__(self, location) - self.name = name - self.value = value - - def __str__(self): - if self.value is True: - return f"{self.name}" - if isinstance(self.value, (tuple, list)): - value = " ".join(str(v) for v in self.value) - return f"{self.name} {value}" - return f"{self.name} {self.value}" diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/fontconfig_pattern.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/fontconfig_pattern.py deleted file mode 100644 index 292435b1487af66db8b0f86ac98ad8d9696e0527..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/fontconfig_pattern.py +++ /dev/null @@ -1,20 +0,0 @@ -import re -from pyparsing import ParseException - -from matplotlib._fontconfig_pattern import * # noqa: F401, F403 -from matplotlib._fontconfig_pattern import ( - parse_fontconfig_pattern, _family_punc, _value_punc) -from matplotlib import _api -_api.warn_deprecated("3.6", name=__name__, obj_type="module") - - -family_unescape = re.compile(r'\\([%s])' % _family_punc).sub -value_unescape = re.compile(r'\\([%s])' % _value_punc).sub -family_escape = re.compile(r'([%s])' % _family_punc).sub -value_escape = re.compile(r'([%s])' % _value_punc).sub - - -class FontconfigPatternParser: - ParseException = ParseException - - def parse(self, pattern): return parse_fontconfig_pattern(pattern) diff --git a/spaces/lambdalabs/LambdaSuperRes/KAIR/models/model_base.py b/spaces/lambdalabs/LambdaSuperRes/KAIR/models/model_base.py deleted file mode 100644 index 0ae3bce9453fa21b8ce0e037b437ba738b67f76b..0000000000000000000000000000000000000000 --- a/spaces/lambdalabs/LambdaSuperRes/KAIR/models/model_base.py +++ /dev/null @@ -1,220 +0,0 @@ -import os -import torch -import torch.nn as nn -from utils.utils_bnorm import merge_bn, tidy_sequential -from torch.nn.parallel import DataParallel, DistributedDataParallel - - -class ModelBase(): - def __init__(self, opt): - self.opt = opt # opt - self.save_dir = opt['path']['models'] # save models - self.device = torch.device('cuda' if opt['gpu_ids'] is not None else 'cpu') - self.is_train = opt['is_train'] # training or not - self.schedulers = [] # schedulers - - """ - # ---------------------------------------- - # Preparation before training with data - # Save model during training - # ---------------------------------------- - """ - - def init_train(self): - pass - - def load(self): - pass - - def save(self, label): - pass - - def define_loss(self): - pass - - def define_optimizer(self): - pass - - def define_scheduler(self): - pass - - """ - # ---------------------------------------- - # Optimization during training with data - # Testing/evaluation - # ---------------------------------------- - """ - - def feed_data(self, data): - pass - - def optimize_parameters(self): - pass - - def current_visuals(self): - pass - - def current_losses(self): - pass - - def update_learning_rate(self, n): - for scheduler in self.schedulers: - scheduler.step(n) - - def current_learning_rate(self): - return self.schedulers[0].get_lr()[0] - - def requires_grad(self, model, flag=True): - for p in model.parameters(): - p.requires_grad = flag - - """ - # ---------------------------------------- - # Information of net - # ---------------------------------------- - """ - - def print_network(self): - pass - - def info_network(self): - pass - - def print_params(self): - pass - - def info_params(self): - pass - - def get_bare_model(self, network): - """Get bare model, especially under wrapping with - DistributedDataParallel or DataParallel. - """ - if isinstance(network, (DataParallel, DistributedDataParallel)): - network = network.module - return network - - def model_to_device(self, network): - """Model to device. It also warps models with DistributedDataParallel - or DataParallel. - Args: - network (nn.Module) - """ - network = network.to(self.device) - if self.opt['dist']: - find_unused_parameters = self.opt.get('find_unused_parameters', True) - use_static_graph = self.opt.get('use_static_graph', False) - network = DistributedDataParallel(network, device_ids=[torch.cuda.current_device()], find_unused_parameters=find_unused_parameters) - if use_static_graph: - print('Using static graph. Make sure that "unused parameters" will not change during training loop.') - network._set_static_graph() - else: - network = DataParallel(network) - return network - - # ---------------------------------------- - # network name and number of parameters - # ---------------------------------------- - def describe_network(self, network): - network = self.get_bare_model(network) - msg = '\n' - msg += 'Networks name: {}'.format(network.__class__.__name__) + '\n' - msg += 'Params number: {}'.format(sum(map(lambda x: x.numel(), network.parameters()))) + '\n' - msg += 'Net structure:\n{}'.format(str(network)) + '\n' - return msg - - # ---------------------------------------- - # parameters description - # ---------------------------------------- - def describe_params(self, network): - network = self.get_bare_model(network) - msg = '\n' - msg += ' | {:^6s} | {:^6s} | {:^6s} | {:^6s} || {:<20s}'.format('mean', 'min', 'max', 'std', 'shape', 'param_name') + '\n' - for name, param in network.state_dict().items(): - if not 'num_batches_tracked' in name: - v = param.data.clone().float() - msg += ' | {:>6.3f} | {:>6.3f} | {:>6.3f} | {:>6.3f} | {} || {:s}'.format(v.mean(), v.min(), v.max(), v.std(), v.shape, name) + '\n' - return msg - - """ - # ---------------------------------------- - # Save prameters - # Load prameters - # ---------------------------------------- - """ - - # ---------------------------------------- - # save the state_dict of the network - # ---------------------------------------- - def save_network(self, save_dir, network, network_label, iter_label): - save_filename = '{}_{}.pth'.format(iter_label, network_label) - save_path = os.path.join(save_dir, save_filename) - network = self.get_bare_model(network) - state_dict = network.state_dict() - for key, param in state_dict.items(): - state_dict[key] = param.cpu() - torch.save(state_dict, save_path) - - # ---------------------------------------- - # load the state_dict of the network - # ---------------------------------------- - def load_network(self, load_path, network, strict=True, param_key='params'): - network = self.get_bare_model(network) - if strict: - state_dict = torch.load(load_path) - if param_key in state_dict.keys(): - state_dict = state_dict[param_key] - network.load_state_dict(state_dict, strict=strict) - else: - state_dict_old = torch.load(load_path) - if param_key in state_dict_old.keys(): - state_dict_old = state_dict_old[param_key] - state_dict = network.state_dict() - for ((key_old, param_old),(key, param)) in zip(state_dict_old.items(), state_dict.items()): - state_dict[key] = param_old - network.load_state_dict(state_dict, strict=True) - del state_dict_old, state_dict - - # ---------------------------------------- - # save the state_dict of the optimizer - # ---------------------------------------- - def save_optimizer(self, save_dir, optimizer, optimizer_label, iter_label): - save_filename = '{}_{}.pth'.format(iter_label, optimizer_label) - save_path = os.path.join(save_dir, save_filename) - torch.save(optimizer.state_dict(), save_path) - - # ---------------------------------------- - # load the state_dict of the optimizer - # ---------------------------------------- - def load_optimizer(self, load_path, optimizer): - optimizer.load_state_dict(torch.load(load_path, map_location=lambda storage, loc: storage.cuda(torch.cuda.current_device()))) - - def update_E(self, decay=0.999): - netG = self.get_bare_model(self.netG) - netG_params = dict(netG.named_parameters()) - netE_params = dict(self.netE.named_parameters()) - for k in netG_params.keys(): - netE_params[k].data.mul_(decay).add_(netG_params[k].data, alpha=1-decay) - - """ - # ---------------------------------------- - # Merge Batch Normalization for training - # Merge Batch Normalization for testing - # ---------------------------------------- - """ - - # ---------------------------------------- - # merge bn during training - # ---------------------------------------- - def merge_bnorm_train(self): - merge_bn(self.netG) - tidy_sequential(self.netG) - self.define_optimizer() - self.define_scheduler() - - # ---------------------------------------- - # merge bn before testing - # ---------------------------------------- - def merge_bnorm_test(self): - merge_bn(self.netG) - tidy_sequential(self.netG) diff --git a/spaces/lcf001/newbingai/Dockerfile b/spaces/lcf001/newbingai/Dockerfile deleted file mode 100644 index 76697a279c8749d9e55688d29a3f764b03a2b414..0000000000000000000000000000000000000000 --- a/spaces/lcf001/newbingai/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# Build Stage -# 使用 golang:alpine 作为构建阶段的基础镜像 -FROM golang:alpine AS builder - -# 添加 git,以便之后能从GitHub克隆项目 -RUN apk --no-cache add git - -# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下 -RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app - -# 设置工作目录为之前克隆的项目目录 -WORKDIR /workspace/app - -# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小 -RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go - -# Runtime Stage -# 使用轻量级的 alpine 镜像作为运行时的基础镜像 -FROM alpine - -# 设置工作目录 -WORKDIR /workspace/app - -# 从构建阶段复制编译后的二进制文件到运行时镜像中 -COPY --from=builder /workspace/app/go-proxy-bingai . - -# 设置环境变量,此处为随机字符-仅可进行对话,如需绘画,需要修改为自己的token -ENV Go_Proxy_BingAI_USER_TOKEN_1="1rd3l3wyYyGasasas45g6f7w" - -# 暴露8080端口 -EXPOSE 8080 - -# 容器启动时运行的命令 -CMD ["/workspace/app/go-proxy-bingai"] diff --git a/spaces/leave7/kazunaAI2.0/models.py b/spaces/leave7/kazunaAI2.0/models.py deleted file mode 100644 index bdbce8445304abda792f235a4761b831fd6f4d12..0000000000000000000000000000000000000000 --- a/spaces/leave7/kazunaAI2.0/models.py +++ /dev/null @@ -1,351 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import attentions -import commons -import modules - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding -from vdecoder.hifigan.models import Generator -from utils import f0_to_coarse - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class Encoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - # print(x.shape,x_lengths.shape) - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - filter_channels=None, - n_heads=None, - p_dropout=None): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - self.f0_emb = nn.Embedding(256, hidden_channels) - - self.enc_ = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - - def forward(self, x, x_lengths, f0=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = x + self.f0_emb(f0).transpose(1,2) - x = self.enc_(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - - return z, m, logs, x_mask - - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SpeakerEncoder(torch.nn.Module): - def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256): - super(SpeakerEncoder, self).__init__() - self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True) - self.linear = nn.Linear(model_hidden_size, model_embedding_size) - self.relu = nn.ReLU() - - def forward(self, mels): - self.lstm.flatten_parameters() - _, (hidden, _) = self.lstm(mels) - embeds_raw = self.relu(self.linear(hidden[-1])) - return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True) - - def compute_partial_slices(self, total_frames, partial_frames, partial_hop): - mel_slices = [] - for i in range(0, total_frames-partial_frames, partial_hop): - mel_range = torch.arange(i, i+partial_frames) - mel_slices.append(mel_range) - - return mel_slices - - def embed_utterance(self, mel, partial_frames=128, partial_hop=64): - mel_len = mel.size(1) - last_mel = mel[:,-partial_frames:] - - if mel_len > partial_frames: - mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop) - mels = list(mel[:,s] for s in mel_slices) - mels.append(last_mel) - mels = torch.stack(tuple(mels), 0).squeeze(1) - - with torch.no_grad(): - partial_embeds = self(mels) - embed = torch.mean(partial_embeds, axis=0).unsqueeze(0) - #embed = embed / torch.linalg.norm(embed, 2) - else: - with torch.no_grad(): - embed = self(last_mel) - - return embed - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - ssl_dim, - n_speakers, - **kwargs): - - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - self.ssl_dim = ssl_dim - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - self.enc_p_ = TextEncoder(ssl_dim, inter_channels, hidden_channels, 5, 1, 16,0, filter_channels, n_heads, p_dropout) - hps = { - "sampling_rate": 32000, - "inter_channels": 192, - "resblock": "1", - "resblock_kernel_sizes": [3, 7, 11], - "resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - "upsample_rates": [10, 8, 2, 2], - "upsample_initial_channel": 512, - "upsample_kernel_sizes": [16, 16, 4, 4], - "gin_channels": 256, - } - self.dec = Generator(h=hps) - self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - def forward(self, c, f0, spec, g=None, mel=None, c_lengths=None, spec_lengths=None): - if c_lengths == None: - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - if spec_lengths == None: - spec_lengths = (torch.ones(spec.size(0)) * spec.size(-1)).to(spec.device) - - g = self.emb_g(g).transpose(1,2) - - z_ptemp, m_p, logs_p, _ = self.enc_p_(c, c_lengths, f0=f0_to_coarse(f0)) - z, m_q, logs_q, spec_mask = self.enc_q(spec, spec_lengths, g=g) - - z_p = self.flow(z, spec_mask, g=g) - z_slice, pitch_slice, ids_slice = commons.rand_slice_segments_with_pitch(z, f0, spec_lengths, self.segment_size) - - # o = self.dec(z_slice, g=g) - o = self.dec(z_slice, g=g, f0=pitch_slice) - - return o, ids_slice, spec_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, c, f0, g=None, mel=None, c_lengths=None): - if c_lengths == None: - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - g = self.emb_g(g).transpose(1,2) - - z_p, m_p, logs_p, c_mask = self.enc_p_(c, c_lengths, f0=f0_to_coarse(f0)) - z = self.flow(z_p, c_mask, g=g, reverse=True) - - o = self.dec(z * c_mask, g=g, f0=f0) - - return o diff --git a/spaces/lewisliuX123/wechatglm_demo/channel/channel_factory.py b/spaces/lewisliuX123/wechatglm_demo/channel/channel_factory.py deleted file mode 100644 index bfeaacfd835dec6b69109e025e43c8b6eacb121b..0000000000000000000000000000000000000000 --- a/spaces/lewisliuX123/wechatglm_demo/channel/channel_factory.py +++ /dev/null @@ -1,17 +0,0 @@ -""" -channel factory -""" - -def create_channel(channel_type): - """ - create a channel instance - :param channel_type: channel type code - :return: channel instance - """ - if channel_type == 'wx': - from channel.wechat.wechat_channel import WechatChannel - return WechatChannel() - elif channel_type == 'wxy': - from channel.wechat.wechaty_channel import WechatyChannel - return WechatyChannel() - raise RuntimeError diff --git a/spaces/liimefruit/RVCollection/infer_pack/onnx_inference.py b/spaces/liimefruit/RVCollection/infer_pack/onnx_inference.py deleted file mode 100644 index 94da183b9363b38e9624d1c52510c8f0775e560b..0000000000000000000000000000000000000000 --- a/spaces/liimefruit/RVCollection/infer_pack/onnx_inference.py +++ /dev/null @@ -1,142 +0,0 @@ -import onnxruntime -import librosa -import numpy as np -import soundfile - -class ContentVec: - def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None): - print("load model(s) from {}".format(vec_path)) - if device == "cpu" or device is None: - providers = ["CPUExecutionProvider"] - elif device == "cuda": - providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] - elif device == "dml": - providers = ["DmlExecutionProvider"] - else: - raise RuntimeError("Unsportted Device") - self.model = onnxruntime.InferenceSession(vec_path, providers=providers) - - def __call__(self, wav): - return self.forward(wav) - - def forward(self, wav): - feats = wav - if feats.ndim == 2: # double channels - feats = feats.mean(-1) - assert feats.ndim == 1, feats.ndim - feats = np.expand_dims(np.expand_dims(feats, 0), 0) - onnx_input = {self.model.get_inputs()[0].name: feats} - logits = self.model.run(None, onnx_input)[0] - return logits.transpose(0, 2, 1) - - -def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs): - if f0_predictor == "pm": - from infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor - - f0_predictor_object = PMF0Predictor( - hop_length=hop_length, sampling_rate=sampling_rate - ) - elif f0_predictor == "harvest": - from infer_pack.modules.F0Predictor.HarvestF0Predictor import HarvestF0Predictor - - f0_predictor_object = HarvestF0Predictor( - hop_length=hop_length, sampling_rate=sampling_rate - ) - elif f0_predictor == "dio": - from infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor - - f0_predictor_object = DioF0Predictor( - hop_length=hop_length, sampling_rate=sampling_rate - ) - else: - raise Exception("Unknown f0 predictor") - return f0_predictor_object - - -class OnnxRVC: - def __init__( - self, - model_path, - sr=40000, - hop_size=512, - vec_path="vec-768-layer-12", - device="cpu", - ): - vec_path = f"pretrained/{vec_path}.onnx" - self.vec_model = ContentVec(vec_path, device) - if device == "cpu" or device is None: - providers = ["CPUExecutionProvider"] - elif device == "cuda": - providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] - elif device == "dml": - providers = ["DmlExecutionProvider"] - else: - raise RuntimeError("Unsportted Device") - self.model = onnxruntime.InferenceSession(model_path, providers=providers) - self.sampling_rate = sr - self.hop_size = hop_size - - def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd): - onnx_input = { - self.model.get_inputs()[0].name: hubert, - self.model.get_inputs()[1].name: hubert_length, - self.model.get_inputs()[2].name: pitch, - self.model.get_inputs()[3].name: pitchf, - self.model.get_inputs()[4].name: ds, - self.model.get_inputs()[5].name: rnd, - } - return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16) - - def inference( - self, - raw_path, - sid, - f0_method="dio", - f0_up_key=0, - pad_time=0.5, - cr_threshold=0.02, - ): - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - f0_predictor = get_f0_predictor( - f0_method, - hop_length=self.hop_size, - sampling_rate=self.sampling_rate, - threshold=cr_threshold, - ) - wav, sr = librosa.load(raw_path, sr=self.sampling_rate) - org_length = len(wav) - if org_length / sr > 50.0: - raise RuntimeError("Reached Max Length") - - wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000) - wav16k = wav16k - - hubert = self.vec_model(wav16k) - hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32) - hubert_length = hubert.shape[1] - - pitchf = f0_predictor.compute_f0(wav, hubert_length) - pitchf = pitchf * 2 ** (f0_up_key / 12) - pitch = pitchf.copy() - f0_mel = 1127 * np.log(1 + pitch / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - pitch = np.rint(f0_mel).astype(np.int64) - - pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32) - pitch = pitch.reshape(1, len(pitch)) - ds = np.array([sid]).astype(np.int64) - - rnd = np.random.randn(1, 192, hubert_length).astype(np.float32) - hubert_length = np.array([hubert_length]).astype(np.int64) - - out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze() - out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant") - return out_wav[0:org_length] \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Driver Talent Pro 7.1.28.100 ((INSTALL)).md b/spaces/lincquiQcaudo/Top-20-Diffusion/Driver Talent Pro 7.1.28.100 ((INSTALL)).md deleted file mode 100644 index f0bfc8c8b4d5d965b98f8b84e850c1acac9d5e42..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Driver Talent Pro 7.1.28.100 ((INSTALL)).md +++ /dev/null @@ -1,141 +0,0 @@ - -

              Driver Talent Pro 7.1.28.100: A Comprehensive Guide

              -

              If you are looking for a reliable and easy-to-use tool to update and manage your drivers on Windows PC, you may want to check out Driver Talent Pro 7.1.28.100. This software can help you solve various driver issues, such as outdated, missing, corrupted, faulty, or incompatible drivers, with just a few clicks.

              -

              Driver Talent Pro 7.1.28.100


              Download Filehttps://bytlly.com/2uGyqc



              -

              In this article, we will give you a comprehensive guide on Driver Talent Pro 7.1.28.100, including its features, benefits, system requirements, installation process, and more. By the end of this article, you will have a clear idea of whether Driver Talent Pro 7.1.28.100 is the right choice for you.

              - -

              What is Driver Talent Pro 7.1.28.100?

              -

              Driver Talent Pro 7.1.28.100 is the latest version of Driver Talent Pro, a professional Windows driver download and update utility developed by OSToto Co., Ltd. It was released on September 27, 2022, and has received positive feedback from users and experts alike.

              -

              Driver Talent Pro 7.1.28.100 can scan your entire computer and find all the drivers that need to be updated, repaired, or replaced. It can also download and install the best-matched drivers for your computer hardware and all connected peripheral devices, such as printer, scanner, webcam, keyboard, mouse, etc.

              -

              Moreover, Driver Talent Pro 7.1.28.100 can also backup, restore, uninstall, and pre-download drivers with one click. You can use it to backup your drivers before updating them or to restore them if something goes wrong. You can also use it to uninstall unwanted drivers or to pre-download drivers for another PC.

              -

              -

              Driver Talent Pro 7.1.28.100 supports driver downloads and updates for all hardware devices and manufacturers, and is compatible with Windows 10/8/8/7/XP/Vista and Windows Server.

              - -

              What are the features and benefits of Driver Talent Pro 7.1.28.100?

              -

              Driver Talent Pro 7.1.28.100 has many features and benefits that make it stand out from other driver updater tools on the market.

              -
                -
              • It can scan your computer quickly and accurately to find all the driver problems.
              • -
              • It can download and install the latest drivers from a large online database that contains over 500 million driver files.
              • -
              • It can update your drivers to the most recent versions without any compatibility issues.
              • -
              • It can repair and fix all the driver errors with one click.
              • -
              • It can backup and restore your drivers in case of any emergency.
              • -
              • It can uninstall and remove any unwanted or corrupted drivers from your system.
              • -
              • It can pre-download and save drivers for another PC that has no internet connection or network issues.
              • -
              • It can keep your computer and devices in top condition and performance.
              • -
              • It has a user-friendly interface that is easy to navigate and operate.
              • -
              • It has a lifetime license that allows you to use it on up to three PCs with free updates.
              • -
              - -

              What are the system requirements for Driver Talent Pro 7.1.28.100?

              -

              To run Driver Talent Pro 7.1.28.100 smoothly on your PC, you need to meet the following system requirements:

              -
                -
              • Supported OS: Windows 11/10/8/8/7/XP/Vista or Windows Server
              • -
              • Processor: Intel Pentium 4 or higher
              • -
              • RAM: At least 2 GB (4 GB recommended)
              • -
              • Disk Space: At least 200 MB of free space
              • -
              • Internet Connection: Required for downloading and updating drivers
              • -
              - -

              How to download and install Driver Talent Pro 7.1.28.100?

              -

              To download and install Driver Talent Pro 7.1.28.100 on your PC, you need to follow these simple steps:

              -
                -
              1. Go to the official website of Driver Talent Pro at https://www.drivethelife.com/driver-talent-pro.html
              2. -
              3. Click on the "Buy Now" button to purchase the lifetime license for $29 (or $19 if you use the coupon code "DT20OFF")
              4. -
              5. You will receive an email with the license code and the download link for Driver Talent Pro 7.1..28..100
              6. -
              7. Click on the download link to download the setup file (about 26 MB) to your PC
              8. -
              9. Double-click on the setup file to launch the installation wizard
              10. -
              11. Follow the on-screen instructions to complete the installation process
              12. -
              13. Enter your license code to activate Driver Talent Pro 7..1..28..100
              14. -
              15. You are ready to use Driver Talent Pro 7..1..28..100 to update and manage your drivers
              16. -
              - -

              How to use Driver Talent Pro 7..1..28..100?

              -

              To use Driver Talent Pro 7..1..28..100 to update and manage your drivers, you need to follow these simple steps:

              -
                -
              1. Launch Driver Talent Pro 7..1..28..100 on your PC
              2. -
              3. The program will automatically scan your computer for any driver issues
              4. -
              5. You will see a list of all the outdated, missing, corrupted, faulty or incompatible drivers on your PC
              6. -
              7. You can choose to update all the drivers with one click or select specific drivers to update individually
              8. -
              9. The program will download and install the best-matched drivers for your devices from its online database
              10. -
              11. You can also use the menu options on the left side of the interface to backup, restore, uninstall or pre-download drivers as you wish
              12. -
              13. You can also access more settings and features by clicking on the gear icon at the top right corner of the interface
              14. -
              15. You can restart your computer after updating or installing drivers to make them take effect
              16. -

                How to backup and restore drivers with Driver Talent Pro 7.1.28.100?

                -

                One of the useful features of Driver Talent Pro 7.1.28.100 is that it can backup and restore your drivers in case of any emergency. This can help you avoid driver-related issues such as system crashes, blue screens, or device malfunctions.

                -

                To backup and restore your drivers with Driver Talent Pro 7.1.28.100, you need to follow these simple steps:

                -
                  -
                1. Launch Driver Talent Pro 7.1.28.100 on your PC
                2. -
                3. Click on the "Backup" option on the left side of the interface
                4. -
                5. You will see a list of all the drivers that can be backed up on your PC
                6. -
                7. You can choose to backup all the drivers or select specific drivers to backup individually
                8. -
                9. Click on the "Start" button to begin the backup process
                10. -
                11. The program will backup your drivers and save them in a default location on your PC
                12. -
                13. You can also change the backup location by clicking on the "Settings" icon at the top right corner of the interface
                14. -
                15. To restore your drivers, click on the "Restore" option on the left side of the interface
                16. -
                17. You will see a list of all the backup files that you have created with Driver Talent Pro 7.1.28.100
                18. -
                19. You can choose to restore all the drivers or select specific drivers to restore individually
                20. -
                21. Click on the "Restore" button to begin the restore process
                22. -
                23. The program will restore your drivers and ask you to restart your computer to make them take effect
                24. -
                - -

                How to uninstall and remove drivers with Driver Talent Pro 7.1.28.100?

                -

                Another useful feature of Driver Talent Pro 7.1.28.100 is that it can uninstall and remove any unwanted or corrupted drivers from your system. This can help you free up disk space, improve system performance, and avoid driver conflicts.

                -

                To uninstall and remove drivers with Driver Talent Pro 7.1.28.100, you need to follow these simple steps:

                -
                  -
                1. Launch Driver Talent Pro 7.1.28.100 on your PC
                2. -
                3. Click on the "Uninstall" option on the left side of the interface
                4. -
                5. You will see a list of all the drivers that can be uninstalled and removed from your PC
                6. -
                7. You can choose to uninstall all the drivers or select specific drivers to uninstall individually
                8. -
                9. Click on the "Uninstall" button to begin the uninstallation process
                10. -
                11. The program will uninstall and remove your drivers and ask you to restart your computer to make them take effect
                12. -
                - -

                How to pre-download and save drivers with Driver Talent Pro 7.1.28.100?

                -

                A final useful feature of Driver Talent Pro 7.1..28..100 is that it can pre-download and save drivers for another PC that has no internet connection or network issues. This can help you install drivers on another PC without any hassle.

                -

                To pre-download and save drivers with Driver Talent Pro 7..1..28..100, you need to follow these simple steps:

                -
                  -
                1. Launch Driver Talent Pro 7..1..28..100 on your PC that has internet connection
                2. -
                3. Click on the "Pre-download" option on the left side of the interface
                4. -
                5. You will see two options: "Pre-download for this PC" and "Pre-download for another PC"
                6. -
                7. Select "Pre-download for another PC" and click on "Next"
                8. -
                9. The program will ask you to select a destination folder where you want to save the driver files
                10. -
                11. Click on "Browse" to choose a folder or use the default one
                12. -
                13. Click on "Scan" to scan your PC for hardware information
                14. -
                15. The program will show you a list of all the compatible drivers for your PC's hardware configuration
                16. -
                17. You can choose to download all the drivers or select specific drivers to download individually
                18. -
                19. Click on "Start" to begin the download process
                20. -
                21. The program will download and save your drivers in the destination folder you have chosen
                22. -
                23. You can copy or transfer these driver files to another PC that has no internet connection or network issues
                24. -
                25. You can use Driver Talent Pro 7..1..28..100 or any other driver installer tool to install these driver files on another PC
                26. -

                  What are the pros and cons of Driver Talent Pro 7.1.28.100?

                  -

                  Driver Talent Pro 7.1.28.100 is not a perfect software, and it has its own pros and cons that you should consider before using it.

                  -

                  Some of the pros of Driver Talent Pro 7.1.28.100 are:

                  -
                    -
                  • It is easy to use and has a user-friendly interface.
                  • -
                  • It can scan and update your drivers quickly and accurately.
                  • -
                  • It can download and install drivers from a large online database that contains over 500 million driver files.
                  • -
                  • It can backup and restore your drivers in case of any emergency.
                  • -
                  • It can uninstall and remove any unwanted or corrupted drivers from your system.
                  • -
                  • It can pre-download and save drivers for another PC that has no internet connection or network issues.
                  • -
                  • It has a lifetime license that allows you to use it on up to three PCs with free updates.
                  • -
                  -

                  Some of the cons of Driver Talent Pro 7.1.28.100 are:

                  -
                    -
                  • It can't modify the automatic scanning schedule.
                  • -
                  • It must download each driver one at a time (no bulk downloads).
                  • -
                  • It doesn't install drivers automatically.
                  • -
                  • It has several plugs to buy the pro version.
                  • -
                  • It may be identified as malware by some antivirus programs.
                  • -
                  - -

                  How to contact the support team of Driver Talent Pro 7.1.28.100?

                  -

                  If you have any questions, problems, or feedbacks about Driver Talent Pro 7.1..28..100, you can contact the support team of OSToto Co., Ltd., the developer of the software.

                  -

                  You can contact them by email at support@drivethelife.com or by phone at +86-755-2650-9948.

                  -

                  You can also visit their official website at https://www.drivethelife.com/ or their Facebook page at https://www.facebook.com/OSTotoOfficial/ for more information and updates.

                  - -

                  Conclusion

                  -

                  Driver Talent Pro 7..1..28..100 is a professional Windows driver download and update utility that can help you solve various driver issues on your PC. It can scan, download, install, update, repair, backup, restore, uninstall, and pre-download drivers with ease and efficiency.

                  -

                  Driver Talent Pro 7..1..28..100 has many features and benefits that make it stand out from other driver updater tools on the market. However, it also has some drawbacks that you should be aware of before using it.

                  -

                  If you are looking for a reliable and easy-to-use tool to update and manage your drivers on Windows PC, you may want to give Driver Talent Pro 7..1..28..100 a try. You can download it from its official website or from other trusted sources online.

                  3cee63e6c2
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Gta San Andreas Hoodlum Crack Fix PATCHED.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Gta San Andreas Hoodlum Crack Fix PATCHED.md deleted file mode 100644 index a7ee7d1824a5d7cf43b9db3a86a1d295092275b2..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Gta San Andreas Hoodlum Crack Fix PATCHED.md +++ /dev/null @@ -1,11 +0,0 @@ -

                  gta san andreas hoodlum crack fix


                  Download Ziphttps://bytlly.com/2uGxvh



                  - -Please see: "Parthokko (Official Music Video) Basar | Emon | Bangla Rap Song 2019" . We are proud that you are watching our channel. -Thank you for watching. -Text: Emon, I see you for the first time, But you want me for the third time. -I don't see you, but I know that you are. -I see you for the first time, But you want me for the third time. -I see you in 8a78ff9644
                  -
                  -
                  -

                  diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Igoprimoexedownload.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Igoprimoexedownload.md deleted file mode 100644 index 283bbe658abad592e4a1e3189fa93e642e2d2eef..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Igoprimoexedownload.md +++ /dev/null @@ -1,26 +0,0 @@ -
                  -

                  How to Download and Install iGO Primo Maps on Your GPS Device

                  -

                  iGO Primo is a popular navigation software that can help you find your way around the world. It offers high-quality maps, accurate routing, voice guidance, speed camera alerts, and more. But how do you download and install iGO Primo maps on your GPS device? Here are some steps to follow:

                  -
                    -
                  1. First, you need to download the latest iGO Primo map files from a reliable source. You can find them on various websites or forums, such as mygpsmaps.com or gpspower.net. Make sure you choose the map files that are compatible with your device and region.
                  2. -
                  3. Next, you need to copy the downloaded map files to a SD card. You can use a Windows computer or a Macbook to do this. However, if you use a Macbook, you may encounter some errors due to the different file systems. To avoid this, you can format the SD card to FAT32 before copying the files.
                  4. -
                  5. Then, you need to insert the SD card into the GPS card slot of your navigation unit. You may need to turn off your device and turn it on again for it to recognize the SD card.
                  6. -
                  7. Finally, you need to set the GPS path in the settings of your device. You need to locate the iGO Primo executable file (usually named igo.exe or primo.exe) on the SD card and select it as the GPS path. This will make your device run iGO Primo software and load the new maps.
                  8. -
                  -

                  Congratulations! You have successfully downloaded and installed iGO Primo maps on your GPS device. Now you can enjoy the benefits of this advanced navigation software and explore new places with ease.

                  -

                  igoprimoexedownload


                  Download Filehttps://bytlly.com/2uGwnR



                  - -

                  What are some features of iGO Primo software?

                  -

                  iGO Primo software is not only a map viewer, but also a powerful navigation tool that offers many features to enhance your driving experience. Here are some of them:

                  -
                    -
                  • Destination guidance: iGO Primo software provides voice guidance and text-to-speech (TTS) function that announce street names and other information. You can also choose from different routing options, such as fastest, shortest, economical, or easy. For truck drivers and caravan owners, there is a special truck version that adapts the route to your vehicle dimensions and restrictions.
                  • -
                  • Lane assistant and junction view: iGO Primo software helps you to find the right lane before turning or exiting a highway. It also shows you realistic 3D images of junctions and road signs to avoid confusion.
                  • -
                  • Speed warning and speed camera alerts: iGO Primo software warns you when you exceed the speed limit or approach a speed camera. You can also customize the warning settings and update the speed camera database online.
                  • -
                  • Traffic information and rerouting: iGO Primo software receives real-time traffic information via TMC (Traffic Message Channel) or internet connection. It shows you the traffic conditions on your route and suggests alternative routes to avoid congestion.
                  • -
                  • Location-based search and POI: iGO Primo software allows you to search for destinations by address, coordinates, categories, or keywords. You can also use Google Local Search to find nearby places of interest, such as restaurants, hotels, gas stations, etc.
                  • -
                  • Off-road navigation: iGO Primo software enables you to navigate in areas where there are no roads or maps. You can use point-to-point navigation to create your own route by selecting waypoints on the map. You can also record your track and save it for later use.
                  • -
                  • Automatic speech recognition: iGO Primo software supports voice control for hands-free operation. You can use voice commands to enter destinations, change settings, or access functions.
                  • -
                  -

                  iGO Primo software is a versatile and user-friendly navigation solution that can make your journey easier and safer. You can download and install iGO Primo maps on your GPS device following the steps above. If you have any questions or need any assistance, please contact us at support@mygpsmaps.com.

                  d5da3c52bf
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/LOST Progressive House Sample Pack WAV FLP FXP FXB.md b/spaces/lincquiQcaudo/Top-20-Diffusion/LOST Progressive House Sample Pack WAV FLP FXP FXB.md deleted file mode 100644 index 8ad85cc05edbc5ca6b573435c3c2bda30a01e14c..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/LOST Progressive House Sample Pack WAV FLP FXP FXB.md +++ /dev/null @@ -1,9 +0,0 @@ - -

                  edm genesis sample pack [wav flp midi fxp flp] what about: edm genesis is an amazing value sample pack. featuring 2.3 gb+ of the best electro, big room and progressive sounds, samples, kits and templates.

                  -

                  LOST Progressive House Sample Pack WAV FLP FXP FXB


                  Download Zip >>>>> https://bytlly.com/2uGwlV



                  -

                  the psy-riotor progressive house essentials pack is a powerful collection of 808's, rich melodies, lush rhythms, and gorgeous chord progressions. the psy-riotor progressive house essentials pack is complete with a wide collection of solid production instruments. from uplifting melodies, dynamic drum riffs, and uplifting melodies to melodic hooks and addictive synths the psy-riotor collection has it all! available in wav, midi, au, and vst read more

                  -

                  the psy-riotor - the prog house essentials pack is full of soul-stirring melodies, head-nodding rhythms, and more. this collection of well-crafted production instruments has all the elements to set your track apart from the competition. the psy-riotor: prog house essentials pack features all the essentials for your next underground dancefloor masterpiece - perfect for any producer looking to make an increadible impact on dancefloors around the world.

                  -

                  the psy-riotor - the melodic house essentials pack is packed with uplifting melodies and powerful arrangements that will drive your productions. this collection of well-crafted production instruments has all the elements to set your track apart from the competition. the psy-riotor - the melodic house essentials pack features all the essentials for your next underground dancefloor masterpiece - perfect for any producer looking to make an increadible impact on dancefloors around the world.

                  -

                  899543212b
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r34.py b/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r34.py deleted file mode 100644 index 5f78337a3d1f9eb6e9145eb5093618796c6842d2..0000000000000000000000000000000000000000 --- a/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r34.py +++ /dev/null @@ -1,26 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.loss = "arcface" -config.network = "r34" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "/train_tmp/ms1m-retinaface-t1" -config.num_classes = 93431 -config.num_image = 5179510 -config.num_epoch = 25 -config.warmup_epoch = -1 -config.decay_epoch = [10, 16, 22] -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/lithiumice/SadTalker/src/facerender/sync_batchnorm/replicate.py b/spaces/lithiumice/SadTalker/src/facerender/sync_batchnorm/replicate.py deleted file mode 100644 index b71c7b8ed51a1d6c55b1f753bdd8d90bad79bd06..0000000000000000000000000000000000000000 --- a/spaces/lithiumice/SadTalker/src/facerender/sync_batchnorm/replicate.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# File : replicate.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import functools - -from torch.nn.parallel.data_parallel import DataParallel - -__all__ = [ - 'CallbackContext', - 'execute_replication_callbacks', - 'DataParallelWithCallback', - 'patch_replication_callback' -] - - -class CallbackContext(object): - pass - - -def execute_replication_callbacks(modules): - """ - Execute an replication callback `__data_parallel_replicate__` on each module created by original replication. - - The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` - - Note that, as all modules are isomorphism, we assign each sub-module with a context - (shared among multiple copies of this module on different devices). - Through this context, different copies can share some information. - - We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback - of any slave copies. - """ - master_copy = modules[0] - nr_modules = len(list(master_copy.modules())) - ctxs = [CallbackContext() for _ in range(nr_modules)] - - for i, module in enumerate(modules): - for j, m in enumerate(module.modules()): - if hasattr(m, '__data_parallel_replicate__'): - m.__data_parallel_replicate__(ctxs[j], i) - - -class DataParallelWithCallback(DataParallel): - """ - Data Parallel with a replication callback. - - An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by - original `replicate` function. - The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` - - Examples: - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - # sync_bn.__data_parallel_replicate__ will be invoked. - """ - - def replicate(self, module, device_ids): - modules = super(DataParallelWithCallback, self).replicate(module, device_ids) - execute_replication_callbacks(modules) - return modules - - -def patch_replication_callback(data_parallel): - """ - Monkey-patch an existing `DataParallel` object. Add the replication callback. - Useful when you have customized `DataParallel` implementation. - - Examples: - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallel(sync_bn, device_ids=[0, 1]) - > patch_replication_callback(sync_bn) - # this is equivalent to - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - """ - - assert isinstance(data_parallel, DataParallel) - - old_replicate = data_parallel.replicate - - @functools.wraps(old_replicate) - def new_replicate(module, device_ids): - modules = old_replicate(module, device_ids) - execute_replication_callbacks(modules) - return modules - - data_parallel.replicate = new_replicate diff --git a/spaces/liyucheng/selective_context/app.py b/spaces/liyucheng/selective_context/app.py deleted file mode 100644 index aae9a73c23272488a5ef89c7676bded8658d1a87..0000000000000000000000000000000000000000 --- a/spaces/liyucheng/selective_context/app.py +++ /dev/null @@ -1,276 +0,0 @@ -from transformers import GPT2Tokenizer, GPT2LMHeadModel, BertTokenizer -import torch -import streamlit as st -import re -from typing import List, Tuple -import spacy -import numpy as np -from dataclasses import dataclass -from nltk.tokenize import sent_tokenize, word_tokenize - -DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' -st.set_page_config(layout="wide") - -@dataclass -class LexicalUnits: - unit_type: str - text: List[str] - self_info: List[float] = None - - def __add__(self, other): - assert self.unit_type == other.unit_type, 'Cannot add two different unit types' - return LexicalUnits(self.unit_type, self.text + other.text, self.self_info + other.self_info) - - def __radd__(self, other): - if other == 0: - return self - return NotImplementedError() - - def add_to_head(self, token, self_info): - return LexicalUnits(self.unit_type, [token] + self.text, [self_info] + self.self_info) - - def add_to_tail(self, token, self_info): - return LexicalUnits(self.unit_type, self.text + [token], self.self_info + [self_info]) - -class SelectiveContext: - - def __init__(self, model_type = 'gpt2', lang = 'en'): - - self.model_type = model_type - self.lang = lang - - # this means we calculate self-information sentence by sentence - self.sent_level_self_info = True - - self._prepare_phrase_tokenizer() - self.sent_tokenize_pattern = r"(?" - - self._prepare_model() - - def _prepare_phrase_tokenizer(self): - # we use space to tokenize sentence into phrases - # for English, we should use `spacy.load("en_core_web_sm").add_pipe('merge_noun_chunks')` - # for Chinese, use `nlp = spacy.load('zh_core_web_sm')`` directly - lang = self.lang - if lang == "en": - self.nlp = spacy.load("en_core_web_sm", disable=["ner"]) - self.nlp.add_pipe('merge_noun_chunks') - elif lang == "zh": - self.nlp = spacy.load('zh_core_web_sm', disable=["ner"]) - - def _prepare_model(self): - if self.model_type == 'gpt2': - if self.lang == 'zh': - self.model = GPT2LMHeadModel.from_pretrained('uer/gpt2-chinese-cluecorpussmall') - self.tokenizer = BertTokenizer.from_pretrained('uer/gpt2-chinese-cluecorpussmall') - else: - self.model = GPT2LMHeadModel.from_pretrained('gpt2') - self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2') - self.model.to(DEVICE) - self.model.eval() - - print('model loaded') - - self.max_token_length = self.model.config.n_positions - self.get_self_information = self._get_self_info_via_gpt2 - - def get_self_information(self, text: str) -> Tuple[List[str], List[float]]: - # it takes text as input, and return a list of words and a list of self-information scores - raise NotImplementedError - - def _get_self_info_via_gpt2(self, text: str) -> Tuple[List[str], List[float]]: - if self.lang == 'en': - text = f"<|endoftext|>{text}" - elif self.lang == 'zh': - text = f"[CLS]{text}" - with torch.no_grad(): - encoding = self.tokenizer(text, add_special_tokens=False, return_tensors='pt') - encoding = encoding.to(DEVICE) - outputs = self.model(**encoding) - logits = outputs.logits - probs = torch.softmax(logits, dim=-1) - self_info = -torch.log(probs) - - input_ids = encoding['input_ids'] - input_ids_expaned = input_ids[:, 1:].unsqueeze(-1) - - tokens = [self.tokenizer.decode(token_) for token_ in input_ids.squeeze().tolist()[1:]] - return tokens, self_info[:, :-1].gather(-1, input_ids_expaned).squeeze(-1).squeeze(0).tolist() - - def _lexical_unit(self, sents): - - if self.sent_level_self_info: - sent_self_info = [] - all_noun_phrases = [] - all_noun_phrases_info = [] - all_tokens = [] - all_token_self_info = [] - - for sent in sents: - print(sent) - tokens, self_info = self.get_self_information(sent) - sent_self_info.append(np.mean(self_info)) - - all_tokens.extend(tokens) - all_token_self_info.extend(self_info) - - noun_phrases, noun_phrases_info = self._calculate_lexical_unit(tokens, self_info) - - # We need to add a space before the first noun phrase for every sentence except the first one - if len(all_noun_phrases) != 0: - noun_phrases[0] = f" {noun_phrases[0]}" - all_noun_phrases.extend(noun_phrases) - all_noun_phrases_info.extend(noun_phrases_info) - - return [ - LexicalUnits('sent', text=sents, self_info=sent_self_info), - LexicalUnits('phrase', text=all_noun_phrases, self_info=all_noun_phrases_info), - LexicalUnits('token', text=all_tokens, self_info=all_token_self_info) - ] - - def _calculate_lexical_unit(self, tokens, self_info): - def _unit_info(tokens, self_info, units): - current_unit_idx = 0 - current_position = 0 - unit_self_info = [[] for _ in range(len(units))] - - for idx, (token, info) in enumerate(zip(tokens, self_info)): - current_position += len(token) - if current_position == len(units[current_unit_idx]): - unit_self_info[current_unit_idx].append(info) - current_position = current_position - len(units[current_unit_idx]) - current_unit_idx += 1 - elif current_position > len(units[current_unit_idx]): - counter_ = 1 - current_position = current_position - len(units[current_unit_idx]) - current_unit_idx += 1 - while current_position >= len(units[current_unit_idx]): - counter_ += 1 - current_position = current_position - len(units[current_unit_idx]) - current_unit_idx += 1 - if current_unit_idx >= len(units): - break - partial_info = info/counter_ - for _ in range(counter_): - unit_self_info[(current_unit_idx-1) - _].append(partial_info) - else: - if token == " ": - continue - unit_self_info[current_unit_idx].append(info) - - unit_self_info_ = [np.mean(info) for info in unit_self_info] - return unit_self_info_ - - def _noun_phrases(sent): - noun_phrases = [] - doc = self.nlp(sent) - for index, chunk in enumerate(doc): - if index == 0: - noun_phrases.append(chunk.text) - else: - noun_phrases.append(doc[index-1].whitespace_ + chunk.text) - return noun_phrases - - if self.sent_level_self_info: - # in this case, the self_info is for each sentence - # we only need to calculate the self_info for each phrase - - sent = ''.join(tokens) - # noun_phrases = [chunk.text for chunk in self.nlp(sent).noun_chunks] - noun_phrases = _noun_phrases(sent) - # noun_phrases[-1] = noun_phrases[-1] + ' ' - noun_phrases_info = _unit_info(tokens, self_info, noun_phrases) - - return noun_phrases, noun_phrases_info - - def beautify_context(self, context: str) -> str: - context = re.sub(r"\s+", " ", context) - return context - - def self_info_mask(self, sents: List[str], self_info: List[float], mask_level): - # mask_level: mask sentences, phrases, or tokens - sents_after_mask = [] - masked_sents = [] - - self.ppl_threshold = np.nanpercentile(self_info, self.mask_ratio * 100) - - # if title is not None: - # with open(os.path.join(self.path, title+'_prob_token.tsv'), 'w', encoding='utf-8') as f: - # for token, info in zip(tokens, self_info): - # f.write(f"{token}\t{info}\n") - # with open(os.path.join(self.path, title+'_prob_sent.tsv'), 'w', encoding='utf-8') as f: - # for sent, info in zip(sents, sent_self_info): - # f.write(f"{sent}\n{info}\n\n") - - for sent, info in zip(sents, self_info): - if info < self.ppl_threshold: - masked_sents.append(sent) - sents_after_mask.append(self.mask_a_sent(sent, mask_level)) - else: - sents_after_mask.append(sent) - masked_context = " ".join(sents_after_mask) if mask_level == 'sent' else "".join(sents_after_mask) - - return masked_context, masked_sents - - def mask_a_sent(self, sent, level): - if level == 'phrase': - return self.phrase_mask_token - elif level == 'sent': - return self.sent_mask_token - elif level == 'token': - return '' - - def __call__(self, text: str, reduce_ratio: float = 0.35, reduce_level :str = 'phrase') -> List[str]: - context = self.beautify_context(text) - - self.mask_ratio = reduce_ratio - - sents = re.split(self.sent_tokenize_pattern, context) - sents = [sent.strip() for sent in sents if sent.strip()] - - # You want the reduce happen at sentence level, phrase level, or token level? - assert reduce_level in ['sent', 'phrase', 'token'], f"reduce_level should be one of ['sent', 'phrase', 'token'], got {reduce_level}" - sent_lus, phrase_lus, token_lus = self._lexical_unit(sents) - lexical_level = { - 'sent': sent_lus, - 'phrase': phrase_lus, - 'token': token_lus - } - - # context is the reduced context, masked_sents denotes what context has been filtered out - context, masked_sents = self.self_info_mask(lexical_level[reduce_level].text, lexical_level[reduce_level].self_info, reduce_level) - return context, masked_sents - -# streamlit app.py -# here we ask the user to input the text and the reduce ratio -# then we call the SelectiveContext to compress the text - -st.title("Selective Context: Compress your prompt") -st.markdown("This is a demo for the **Selective Context** algorithm.") -st.markdown("Use this algorithm to **compress** your prompt, so that LLMs can deal with **2x more context**!") -st.markdown("- The algorithm filters out the content that is less informative. \n - You can also choose to filter out phrases or tokens instead of sentences. \n - Checkout the paper for details and experiments! [https://arxiv.org/abs/2304.12102](https://arxiv.org/abs/2304.12102).") -st.write("") - -st.subheader("Demo") - -lang = st.radio("Please choose the language: ", ('en', 'zh')) -ratio = st.radio("Please choose the compress ratio [we recommend 0.5]: ", (0.5, 0.2, 0.35, 0.65, 0.8)) -reduce_level = st.radio("Please choose the reduce level: ", ('phrase', 'token', 'sent')) - -text = st.text_area("Please input your text here", height=300) - -@st.cache_resource() -def load_model(lang): - model = SelectiveContext(lang=lang) - return model - -if st.button("Compress"): - model = load_model(lang) - context, masked_sents = model(text, reduce_ratio=ratio, reduce_level=reduce_level) - st.subheader("The compressed context is:") - st.code(context) - # st.divider() - st.subheader("The filtered out content is:") - st.write(masked_sents) \ No newline at end of file diff --git a/spaces/ljiy/GGG/README.md b/spaces/ljiy/GGG/README.md deleted file mode 100644 index e9b0eaef0d35401e0a21724baf8226e10e610651..0000000000000000000000000000000000000000 --- a/spaces/ljiy/GGG/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: QQsign -emoji: 🦀 -colorFrom: blue -colorTo: purple -sdk: docker -pinned: false -license: mit -duplicated_from: AIxPha/GGG ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/ltgoslo/ssa-perin/mtool/validate/utilities.py b/spaces/ltgoslo/ssa-perin/mtool/validate/utilities.py deleted file mode 100644 index 57d1ef2b3342df906cbe0c54bf1810223a65b688..0000000000000000000000000000000000000000 --- a/spaces/ltgoslo/ssa-perin/mtool/validate/utilities.py +++ /dev/null @@ -1,20 +0,0 @@ -import sys; - -def report(graph, message, node = None, edge = None, - framework = None, level = "E", stream = sys.stderr): - if node is not None: - node = "; node #{}".format(node.id); - else: - node = ""; - if edge is not None: - edge = "; edge {} -{}-> {}".format(edge.src, edge.tgt, - edge.lab if edge.lab else ""); - else: - edge = ""; - if framework is not None: - framework = "{{{}}} ".format(framework); - else: - framework = ""; - print("validate(): [{}] {}graph #{}{}{}: {}." - "".format(level, framework, graph.id, node, edge, message), - file = stream); diff --git a/spaces/luost26/DiffAb/anarci/germlines.py b/spaces/luost26/DiffAb/anarci/germlines.py deleted file mode 100644 index 7429e4e7eced6467848fc082508feea6887bb35e..0000000000000000000000000000000000000000 --- a/spaces/luost26/DiffAb/anarci/germlines.py +++ /dev/null @@ -1 +0,0 @@ -all_germlines = {'J': {'H': {'human': {'IGHJ2*01': '------------------------------------------------------------------------------------------------------------------FDLWGRGTLVTVSS', 'IGHJ1*01': '------------------------------------------------------------------------------------------------------------------FQHWGQGTLVTVSS', 'IGHJ5*01': '------------------------------------------------------------------------------------------------------------------FDSWGQGTLVTVSS', 'IGHJ5*02': '------------------------------------------------------------------------------------------------------------------FDPWGQGTLVTVSS', 'IGHJ4*01': '------------------------------------------------------------------------------------------------------------------FDYWGQGTLVTVSS', 'IGHJ4*02': '------------------------------------------------------------------------------------------------------------------FDYWGQGTLVTVSS', 'IGHJ4*03': '------------------------------------------------------------------------------------------------------------------FDYWGQGTLVTVSS', 'IGHJ6*01': '------------------------------------------------------------------------------------------------------------------MDVWGQGTTVTVSS', 'IGHJ6*04': '------------------------------------------------------------------------------------------------------------------MDVWGKGTTVTVSS', 'IGHJ3*01': '------------------------------------------------------------------------------------------------------------------FDVWGQGTMVTVSS', 'IGHJ3*02': '------------------------------------------------------------------------------------------------------------------FDIWGQGTMVTVSS'}, 'mouse': {'IGHJ1*01': '------------------------------------------------------------------------------------------------------------------FDVWGAGTTVTVSS', 'IGHJ1*02': '------------------------------------------------------------------------------------------------------------------FDVWGAGTTVTVSS', 'IGHJ1*03': '------------------------------------------------------------------------------------------------------------------FDVWGTGTTVTVSS', 'IGHJ2*02': '------------------------------------------------------------------------------------------------------------------FDYWGQGTSLTVSS', 'IGHJ2*03': '------------------------------------------------------------------------------------------------------------------FDYWGQGTSLTVSS', 'IGHJ2*01': '------------------------------------------------------------------------------------------------------------------FDYWGQGTTLTVSS', 'IGHJ3*01': '------------------------------------------------------------------------------------------------------------------FAYWGQGTLVTVSA', 'IGHJ4*01': '------------------------------------------------------------------------------------------------------------------MDYWGQGTSVTVSS'}, 'rabbit': {'IGHJ1*01': '------------------------------------------------------------------------------------------------------------------LDPWGTGTLVTISS', 'IGHJ4*01': '------------------------------------------------------------------------------------------------------------------FNLWGPGTLVTVSS', 'IGHJ4*02': '------------------------------------------------------------------------------------------------------------------FNIWGPGTLVTVSS', 'IGHJ3*02': '------------------------------------------------------------------------------------------------------------------LDPWGQGTLVTVSS', 'IGHJ3*01': '------------------------------------------------------------------------------------------------------------------LDLWGQGTLVTVSS', 'IGHJ5*01': '------------------------------------------------------------------------------------------------------------------LDLWGQGTLVTVSS', 'IGHJ5*02': '------------------------------------------------------------------------------------------------------------------LDLWGQGTLVTVSS', 'IGHJ2*01': '------------------------------------------------------------------------------------------------------------------FDPWGPGTLVTVSS', 'IGHJ2*02': '------------------------------------------------------------------------------------------------------------------FDPWGPGTLVTVSS', 'IGHJ6*01': '------------------------------------------------------------------------------------------------------------------MDLWGPGTLVTVSS', 'IGHJ6*02': '------------------------------------------------------------------------------------------------------------------MDPWGPGTLVTVSS'}, 'rhesus': {'IGHJ6*01': '------------------------------------------------------------------------------------------------------------------LDSWGQGVVVTVSS', 'IGHJ2*01': '------------------------------------------------------------------------------------------------------------------FDLWGPGTPITISS', 'IGHJ1*01': '------------------------------------------------------------------------------------------------------------------FEFWGQGALVTVSS', 'IGHJ1*02': '------------------------------------------------------------------------------------------------------------------FEFWGQGALVTVSS', 'IGHJ4*01': '------------------------------------------------------------------------------------------------------------------FDYWGQGVLVTVSS', 'IGHJ5-1*01': '------------------------------------------------------------------------------------------------------------------FDVWGPGVLVTVSS', 'IGHJ5-1*02': '------------------------------------------------------------------------------------------------------------------FDVWGPGVLVTVSS', 'IGHJ5-2*01': '------------------------------------------------------------------------------------------------------------------LDVWGQGVLVTVSS', 'IGHJ5-2*02': '------------------------------------------------------------------------------------------------------------------LDVWGRGVLVTVSS', 'IGHJ3*01': '------------------------------------------------------------------------------------------------------------------FDFWGQGLRVTVSS'}, 'pig': {'IGHJ4*01': '------------------------------------------------------------------------------------------------------------------LESWGQGTLVYDAS', 'IGHJ3*01': '------------------------------------------------------------------------------------------------------------------LHSWGRGVEVTVSS', 'IGHJ5*01': '------------------------------------------------------------------------------------------------------------------MDLWGPGVEVVVSS', 'IGHJ1*01': '------------------------------------------------------------------------------------------------------------------LDSWGQGILVTVSS', 'IGHJ2*01': '------------------------------------------------------------------------------------------------------------------LDHWGRGVLVTVSS'}, 'alpaca': {'IGHJ6*01': '------------------------------------------------------------------------------------------------------------------FGSWGQGTQVTVSS', 'IGHJ2*01': '------------------------------------------------------------------------------------------------------------------LEVWGQGTLVTVSS', 'IGHJ5*01': '------------------------------------------------------------------------------------------------------------------FEYWGQGTLVTVS-', 'IGHJ3*01': '------------------------------------------------------------------------------------------------------------------LDAWGQGTLVTVSS', 'IGHJ4*01': '------------------------------------------------------------------------------------------------------------------YDYWGQGTQVTVSS', 'IGHJ7*01': '------------------------------------------------------------------------------------------------------------------MDYWGKGTLVTVSS'}, 'cow': {'IGHJ1-4*01': '------------------------------------------------------------------------------------------------------------------FDNWGPGIQNTVSS', 'IGHJ1-6*01': '------------------------------------------------------------------------------------------------------------------IDAWGRGLRVTVSS', 'IGHJ2-4*01': '------------------------------------------------------------------------------------------------------------------VDAWGQGLLVTVSS'}}, 'K': {'human': {'IGKJ3*01': '-------------------------------------------------------------------------------------------------------------------FTFGPGTKVDIK-', 'IGKJ5*01': '-------------------------------------------------------------------------------------------------------------------ITFGQGTRLEIK-', 'IGKJ4*01': '-------------------------------------------------------------------------------------------------------------------LTFGGGTKVEIK-', 'IGKJ1*01': '-------------------------------------------------------------------------------------------------------------------WTFGQGTKVEIK-', 'IGKJ2*01': '-------------------------------------------------------------------------------------------------------------------YTFGQGTKLEIK-'}, 'mouse': {'IGKJ5*01': '-------------------------------------------------------------------------------------------------------------------LTFGAGTKLELK-', 'IGKJ1*02': '-------------------------------------------------------------------------------------------------------------------PTFGGGTKLEIN-', 'IGKJ4*02': '-------------------------------------------------------------------------------------------------------------------FTFGTGTKLEIK-', 'IGKJ4*01': '-------------------------------------------------------------------------------------------------------------------FTFGSGTKLEIK-', 'IGKJ2*02': '-------------------------------------------------------------------------------------------------------------------YTFGSGTKLEMK-', 'IGKJ2*03': '-------------------------------------------------------------------------------------------------------------------YTFGSGTKLEIK-', 'IGKJ1*01': '-------------------------------------------------------------------------------------------------------------------WTFGGGTKLEIK-', 'IGKJ2*01': '-------------------------------------------------------------------------------------------------------------------YTFGGGTKLEIK-'}, 'rat': {'IGKJ2-2*01': '-------------------------------------------------------------------------------------------------------------------DTFGAGTKLELK-', 'IGKJ2-1*01': '-------------------------------------------------------------------------------------------------------------------NTFGAGTKLELK-', 'IGKJ2-3*01': '-------------------------------------------------------------------------------------------------------------------YTFGAGTKLELK-', 'IGKJ5*01': '-------------------------------------------------------------------------------------------------------------------LTFGSGTKLEIK-', 'IGKJ4*01': '-------------------------------------------------------------------------------------------------------------------FTFGSGTKLEIK-', 'IGKJ1*01': '-------------------------------------------------------------------------------------------------------------------WTFGGGTKLELK-'}, 'rabbit': {'IGKJ1-1*03': '-------------------------------------------------------------------------------------------------------------------WAFGAGTNVEIK-', 'IGKJ2-3*01': '-------------------------------------------------------------------------------------------------------------------ITFGKGTKLEIK-', 'IGKJ2-2*01': '------------------------------------------------------------------------------------------------------------------SNTFGAGTKVEIK-', 'IGKJ2-1*01': '-------------------------------------------------------------------------------------------------------------------LTFGAGTKVEIK-', 'IGKJ1-2*03': '------------------------------------------------------------------------------------------------------------------YNTFGGGTKVVVE-', 'IGKJ1-2*01': '------------------------------------------------------------------------------------------------------------------YNAFGGGTEVVVK-', 'IGKJ1-2*02': '------------------------------------------------------------------------------------------------------------------YNAFGGGTEVVVK-'}, 'rhesus': {'IGKJ1*01': '-------------------------------------------------------------------------------------------------------------------WTFGQGTKVEIK-'}, 'pig': {'IGKJ5*01': '-------------------------------------------------------------------------------------------------------------------ITFGEGTSVEIE-', 'IGKJ5*02': '-------------------------------------------------------------------------------------------------------------------ITFGEGTSVEIE-', 'IGKJ2*02': '-------------------------------------------------------------------------------------------------------------------NGFGAGTKLELK-', 'IGKJ2*01': '-------------------------------------------------------------------------------------------------------------------YGFGAGTKLELK-', 'IGKJ3*01': '-------------------------------------------------------------------------------------------------------------------FTFGSGTKVEPK-', 'IGKJ4*01': '-------------------------------------------------------------------------------------------------------------------VVFGSGTKLEIK-', 'IGKJ4*02': '-------------------------------------------------------------------------------------------------------------------VVFGSGTKLEIK-', 'IGKJ1*01': '-------------------------------------------------------------------------------------------------------------------WTFGQGTKLELK-'}, 'cow': {'IGKJ2*01': '-------------------------------------------------------------------------------------------------------------------NTFGQGTKVEIK-'}}, 'L': {'human': {'IGLJ1*01': '-------------------------------------------------------------------------------------------------------------------YVFGTGTKVTVL-', 'IGLJ6*01': '-------------------------------------------------------------------------------------------------------------------NVFGSGTKVTVL-', 'IGLJ2*01': '-------------------------------------------------------------------------------------------------------------------VVFGGGTKLTVL-', 'IGLJ3*01': '-------------------------------------------------------------------------------------------------------------------VVFGGGTKLTVL-', 'IGLJ3*02': '-------------------------------------------------------------------------------------------------------------------WVFGGGTKLTVL-', 'IGLJ7*02': '-------------------------------------------------------------------------------------------------------------------AVFGGGTQLTAL-', 'IGLJ7*01': '-------------------------------------------------------------------------------------------------------------------AVFGGGTQLTVL-'}, 'mouse': {'IGLJ3*01': '-------------------------------------------------------------------------------------------------------------------FIFGSGTKVTVL-', 'IGLJ2*01': '-------------------------------------------------------------------------------------------------------------------YVFGGGTKVTVL-', 'IGLJ1*01': '-------------------------------------------------------------------------------------------------------------------WVFGGGTKLTVL-'}, 'rat': {'IGLJ1*01': '-------------------------------------------------------------------------------------------------------------------PVFGGGTKLTVL-', 'IGLJ3*01': '-------------------------------------------------------------------------------------------------------------------PVFGGGTKLTVL-'}, 'rabbit': {'IGLJ5*01': '-------------------------------------------------------------------------------------------------------------------YVFGGGTQLTVT-', 'IGLJ6*01': '-------------------------------------------------------------------------------------------------------------------VVFGGGTQLTVT-'}, 'rhesus': {'IGLJ6*01': '-------------------------------------------------------------------------------------------------------------------DVFGSGTKLTVL-', 'IGLJ3*01': '-------------------------------------------------------------------------------------------------------------------VLFGGGTRLTVL-', 'IGLJ2*01': '-------------------------------------------------------------------------------------------------------------------GLFGGGTRLTVL-', 'IGLJ5*01': '-------------------------------------------------------------------------------------------------------------------WVFGEGTKLTIL-', 'IGLJ2A*01': '-------------------------------------------------------------------------------------------------------------------WVFGGGTRLTVL-', 'IGLJ7*01': '-------------------------------------------------------------------------------------------------------------------VMFGRGTRLTDI-', 'IGLJ1*01': '-------------------------------------------------------------------------------------------------------------------YIFGAGTRLTVL-'}, 'pig': {'IGLJ2*01': '-------------------------------------------------------------------------------------------------------------------NIFGGGTHLTVL-', 'IGLJ2*02': '-------------------------------------------------------------------------------------------------------------------NIFGGGTHLTVL-', 'IGLJ3*01': '-------------------------------------------------------------------------------------------------------------------VPFGGGTHLTVL-', 'IGLJ4*01': '-------------------------------------------------------------------------------------------------------------------DRFGRGTRLSVL-'}, 'cow': {'IGLJ4*01': '-------------------------------------------------------------------------------------------------------------------AVFGSGTTLTVL-', 'IGLJ7*01': '-------------------------------------------------------------------------------------------------------------------AVFGSGTTLTVL-', 'IGLJ8*01': '-------------------------------------------------------------------------------------------------------------------AVFGSGTTLTVL-', 'IGLJ3*01': '-------------------------------------------------------------------------------------------------------------------DLFGGGTTVTVL-', 'IGLJ2*01': '-------------------------------------------------------------------------------------------------------------------DLFGGGTRVTVL-'}}, 'A': {'human': {'TRAJ7*01': '------------------------------------------------------------------------------------------------------------------RLAFGKGNQVVVIP', 'TRAJ50*01': '------------------------------------------------------------------------------------------------------------------KVIFGPGTSLSVIP', 'TRAJ15*01': '------------------------------------------------------------------------------------------------------------------ALIFGKGTTLSVSS', 'TRAJ15*02': '------------------------------------------------------------------------------------------------------------------ALIFGKGTHLSVSS', 'TRAJ45*01': '------------------------------------------------------------------------------------------------------------------GLTFGKGTHLIIQP', 'TRAJ24*02': '------------------------------------------------------------------------------------------------------------------KLQFGAGTQVVVTP', 'TRAJ24*01': '------------------------------------------------------------------------------------------------------------------KFEFGAGTQVVVTP', 'TRAJ24*03': '------------------------------------------------------------------------------------------------------------------KFQFGAGTQVVVTP', 'TRAJ31*01': '------------------------------------------------------------------------------------------------------------------RLMFGDGTQLVVKP', 'TRAJ38*01': '------------------------------------------------------------------------------------------------------------------KLIWGLGTSLAVNP', 'TRAJ35*01': '------------------------------------------------------------------------------------------------------------------VLHCGSGTQVIVLP', 'TRAJ6*01': '------------------------------------------------------------------------------------------------------------------IPTFGRGTSLIVHP', 'TRAJ28*01': '------------------------------------------------------------------------------------------------------------------QLTFGKGTKLSVIP', 'TRAJ47*01': '------------------------------------------------------------------------------------------------------------------KLVFGAGTILRVKS', 'TRAJ14*01': '------------------------------------------------------------------------------------------------------------------TFIFGSGTRLSVKP', 'TRAJ3*01': '------------------------------------------------------------------------------------------------------------------KIIFGSGTRLSIRP', 'TRAJ5*01': '------------------------------------------------------------------------------------------------------------------ALTFGSGTRLQVQP', 'TRAJ52*01': '------------------------------------------------------------------------------------------------------------------KLTFGQGTILTVHP', 'TRAJ27*01': '------------------------------------------------------------------------------------------------------------------KSTFGDGTTLTVKP', 'TRAJ37*01': '------------------------------------------------------------------------------------------------------------------KLIFGQGTTLQVKP', 'TRAJ37*02': '------------------------------------------------------------------------------------------------------------------KLIFGQGTTLQVKP', 'TRAJ34*01': '------------------------------------------------------------------------------------------------------------------KLIFGTGTRLQVFP', 'TRAJ23*01': '------------------------------------------------------------------------------------------------------------------KLIFGQGTELSVKP', 'TRAJ23*02': '------------------------------------------------------------------------------------------------------------------KLIFGQGTELSVKP', 'TRAJ48*01': '------------------------------------------------------------------------------------------------------------------KLTFGTGTRLTIIP', 'TRAJ53*01': '------------------------------------------------------------------------------------------------------------------KLTFGKGTLLTVNP', 'TRAJ33*01': '------------------------------------------------------------------------------------------------------------------QLIWGAGTKLIIKP', 'TRAJ21*01': '------------------------------------------------------------------------------------------------------------------KFYFGSGTKLNVKP', 'TRAJ42*01': '------------------------------------------------------------------------------------------------------------------NLIFGKGTKLSVKP', 'TRAJ32*01': '------------------------------------------------------------------------------------------------------------------KLIFGTGTLLAVQP', 'TRAJ32*02': '------------------------------------------------------------------------------------------------------------------KLIFGTGTLLAVQP', 'TRAJ16*01': '------------------------------------------------------------------------------------------------------------------KLLFARGTMLKVDL', 'TRAJ16*02': '------------------------------------------------------------------------------------------------------------------KLLFARGTMLKVDL', 'TRAJ41*01': '------------------------------------------------------------------------------------------------------------------ALNFGKGTSLLVTP', 'TRAJ46*01': '------------------------------------------------------------------------------------------------------------------KLTFGTGTRLAVRP', 'TRAJ18*01': '------------------------------------------------------------------------------------------------------------------RLYFGRGTQLTVWP', 'TRAJ54*01': '------------------------------------------------------------------------------------------------------------------KLVFGQGTRLTINP', 'TRAJ13*01': '------------------------------------------------------------------------------------------------------------------KVTFGIGTKLQVIP', 'TRAJ13*02': '------------------------------------------------------------------------------------------------------------------KVTFGTGTKLQVIP', 'TRAJ20*01': '------------------------------------------------------------------------------------------------------------------KLSFGAGTTVTVRA', 'TRAJ17*01': '------------------------------------------------------------------------------------------------------------------KLTFGGGTRVLVKP', 'TRAJ39*01': '------------------------------------------------------------------------------------------------------------------MLTFGGGTRLMVKP', 'TRAJ57*01': '------------------------------------------------------------------------------------------------------------------KLVFGKGTKLTVNP', 'TRAJ22*01': '------------------------------------------------------------------------------------------------------------------QLTFGSGTQLTVLP', 'TRAJ26*01': '------------------------------------------------------------------------------------------------------------------NFVFGPGTRLSVLP', 'TRAJ10*01': '------------------------------------------------------------------------------------------------------------------KLTFGTGTQLKVEL', 'TRAJ44*01': '------------------------------------------------------------------------------------------------------------------KLTFGTGTRLQVTL', 'TRAJ56*01': '------------------------------------------------------------------------------------------------------------------KLTFGKGITLSVRP', 'TRAJ11*01': '------------------------------------------------------------------------------------------------------------------TLTFGKGTMLLVSP', 'TRAJ8*01': '------------------------------------------------------------------------------------------------------------------KLVFGTGTRLLVSP', 'TRAJ49*01': '------------------------------------------------------------------------------------------------------------------QFYFGTGTSLTVIP', 'TRAJ30*01': '------------------------------------------------------------------------------------------------------------------KIIFGKGTRLHILP', 'TRAJ36*01': '------------------------------------------------------------------------------------------------------------------NLFFGTGTRLTVIP', 'TRAJ12*01': '------------------------------------------------------------------------------------------------------------------KLIFGSGTRLLVRP', 'TRAJ4*01': '------------------------------------------------------------------------------------------------------------------KLIFGAGTRLAVHP', 'TRAJ29*01': '------------------------------------------------------------------------------------------------------------------PLVFGKGTRLSVIA', 'TRAJ43*01': '------------------------------------------------------------------------------------------------------------------DMRFGAGTRLTVKP', 'TRAJ9*01': '------------------------------------------------------------------------------------------------------------------KTIFGAGTRLFVKA', 'TRAJ40*01': '------------------------------------------------------------------------------------------------------------------KYIFGTGTRLKVLA'}, 'mouse': {'TRAJ13*01': '------------------------------------------------------------------------------------------------------------------YQRFGTGTKLQVVP', 'TRAJ26*01': '------------------------------------------------------------------------------------------------------------------GLTFGLGTRVSVFP', 'TRAJ15*01': '------------------------------------------------------------------------------------------------------------------ALIFGTGTTVSVSP', 'TRAJ45*01': '------------------------------------------------------------------------------------------------------------------RLTFGKGTQLIIQP', 'TRAJ24*01': '------------------------------------------------------------------------------------------------------------------KLQFGTGTQVVVTP', 'TRAJ31*01': '------------------------------------------------------------------------------------------------------------------RIFFGDGTQLVVKP', 'TRAJ38*01': '------------------------------------------------------------------------------------------------------------------KLIWGLGTSLVVNP', 'TRAJ6*01': '------------------------------------------------------------------------------------------------------------------KPTFGKGTSLVVHP', 'TRAJ28*01': '------------------------------------------------------------------------------------------------------------------RLTFGKGTKFSLIP', 'TRAJ52*01': '------------------------------------------------------------------------------------------------------------------KLTFGHGTILRVHP', 'TRAJ27*01': '------------------------------------------------------------------------------------------------------------------KLTFGDGTVLTVKP', 'TRAJ37*01': '------------------------------------------------------------------------------------------------------------------KLIFGLGTTLQVQP', 'TRAJ34*01': '------------------------------------------------------------------------------------------------------------------KVVFGTGTRLQVSP', 'TRAJ53*01': '------------------------------------------------------------------------------------------------------------------KLTFGKGTLLTVTP', 'TRAJ42*01': '------------------------------------------------------------------------------------------------------------------KLTFGKGTKLSVKS', 'TRAJ33*01': '------------------------------------------------------------------------------------------------------------------QLIWGSGTKLIIKP', 'TRAJ23*01': '------------------------------------------------------------------------------------------------------------------KLIFGQGTKLSIKP', 'TRAJ35*02': '------------------------------------------------------------------------------------------------------------------ALTFGSGTKVIVLP', 'TRAJ35*01': '------------------------------------------------------------------------------------------------------------------ALTFGSGTKVIPCL', 'TRAJ48*01': '------------------------------------------------------------------------------------------------------------------KITFGAGTKLTIKP', 'TRAJ32*01': '------------------------------------------------------------------------------------------------------------------KLIFGIGTLLSVKP', 'TRAJ50*01': '------------------------------------------------------------------------------------------------------------------KLVFGQGTSLSVVP', 'TRAJ16*01': '------------------------------------------------------------------------------------------------------------------KLVFGQGTILKVYL', 'TRAJ9*01': '------------------------------------------------------------------------------------------------------------------KLTFGTGTSLLVDP', 'TRAJ2*01': '------------------------------------------------------------------------------------------------------------------KLTFGEGTQVTVIS', 'TRAJ2*02': '------------------------------------------------------------------------------------------------------------------KLTFGEGTQVTVIS', 'TRAJ58*01': '------------------------------------------------------------------------------------------------------------------KLSFGKGAKLTVSP', 'TRAJ18*01': '------------------------------------------------------------------------------------------------------------------RLHFGAGTQLIVIP', 'TRAJ22*01': '------------------------------------------------------------------------------------------------------------------QLIFGSGTQLTVMP', 'TRAJ17*01': '------------------------------------------------------------------------------------------------------------------KLTFGIGTRVLVRP', 'TRAJ57*01': '------------------------------------------------------------------------------------------------------------------KLIFGEGTKLTVSS', 'TRAJ39*01': '------------------------------------------------------------------------------------------------------------------KLTFGGGTRLTVRP', 'TRAJ30*01': '------------------------------------------------------------------------------------------------------------------KVIFGKGTHLHVLP', 'TRAJ21*01': '------------------------------------------------------------------------------------------------------------------VLYFGSGTKLTVEP', 'TRAJ56*01': '------------------------------------------------------------------------------------------------------------------KLTFGQGTVLSVIP', 'TRAJ11*01': '------------------------------------------------------------------------------------------------------------------KLTFGKGTVLLVSP', 'TRAJ12*01': '------------------------------------------------------------------------------------------------------------------KVVFGSGTRLLVSP', 'TRAJ49*01': '------------------------------------------------------------------------------------------------------------------NFYFGKGTSLTVIP', 'TRAJ5*01': '------------------------------------------------------------------------------------------------------------------QLTFGRGTRLQVYA', 'TRAJ4*02': '------------------------------------------------------------------------------------------------------------------KLTFGAGTRLAVCP', 'TRAJ43*01': '------------------------------------------------------------------------------------------------------------------APRFGAGTKLSVKP', 'TRAJ40*01': '------------------------------------------------------------------------------------------------------------------KYVFGAGTRLKVIA'}}, 'B': {'human': {'TRBJ1-4*01': '------------------------------------------------------------------------------------------------------------------KLFFGSGTQLSVL-', 'TRBJ1-1*01': '------------------------------------------------------------------------------------------------------------------EAFFGQGTRLTVV-', 'TRBJ1-6*01': '------------------------------------------------------------------------------------------------------------------PLHFGNGTRLTVT-', 'TRBJ1-6*02': '------------------------------------------------------------------------------------------------------------------PLHFGNGTRLTVT-', 'TRBJ1-3*01': '------------------------------------------------------------------------------------------------------------------TIYFGEGSWLTVV-', 'TRBJ2-2*01': '------------------------------------------------------------------------------------------------------------------ELFFGEGSRLTVL-', 'TRBJ1-5*01': '------------------------------------------------------------------------------------------------------------------PQHFGDGTRLSIL-', 'TRBJ2-4*01': '------------------------------------------------------------------------------------------------------------------IQYFGAGTRLSVL-', 'TRBJ2-6*01': '------------------------------------------------------------------------------------------------------------------VLTFGAGSRLTVL-', 'TRBJ1-2*01': '------------------------------------------------------------------------------------------------------------------GYTFGSGTRLTVV-', 'TRBJ2-3*01': '------------------------------------------------------------------------------------------------------------------TQYFGPGTRLTVL-', 'TRBJ2-5*01': '------------------------------------------------------------------------------------------------------------------TQYFGPGTRLLVL-', 'TRBJ2-1*01': '------------------------------------------------------------------------------------------------------------------EQFFGPGTRLTVL-', 'TRBJ2-7*01': '------------------------------------------------------------------------------------------------------------------EQYFGPGTRLTVT-'}, 'mouse': {'TRBJ1-4*01': '------------------------------------------------------------------------------------------------------------------RLFFGHGTKLSVL-', 'TRBJ1-4*02': '------------------------------------------------------------------------------------------------------------------RLFFGHGTKLSVL-', 'TRBJ1-1*01': '------------------------------------------------------------------------------------------------------------------EVFFGKGTRLTVV-', 'TRBJ1-6*01': '------------------------------------------------------------------------------------------------------------------PLYFGMGTRLTVT-', 'TRBJ1-3*01': '------------------------------------------------------------------------------------------------------------------TLYFGEGSRLIVV-', 'TRBJ2-2*01': '------------------------------------------------------------------------------------------------------------------QLYFGEGSKLTVL-', 'TRBJ1-2*01': '------------------------------------------------------------------------------------------------------------------DYTFGSGTRLLVI-', 'TRBJ1-5*01': '------------------------------------------------------------------------------------------------------------------APLFGEGTRLSVL-', 'TRBJ1-5*03': '------------------------------------------------------------------------------------------------------------------AQHFGEGTRLSVL-', 'TRBJ2-4*01': '------------------------------------------------------------------------------------------------------------------TQYFGAGTRLTVL-', 'TRBJ2-3*01': '------------------------------------------------------------------------------------------------------------------TQYFGRGTRLTVL-', 'TRBJ2-5*01': '------------------------------------------------------------------------------------------------------------------TQYFGPGTRLLVL-', 'TRBJ2-1*01': '------------------------------------------------------------------------------------------------------------------EQFFGPGTRLTVL-', 'TRBJ2-7*01': '------------------------------------------------------------------------------------------------------------------EQYFGPGTRLTVL-', 'TRBJ2-6*01': '------------------------------------------------------------------------------------------------------------------EQYFGSGTRLTVI-'}}, 'G': {'human': {'TRGJP*01': '------------------------------------------------------------------------------------------------------------------IKVFGPGTKLIIT-', 'TRGJP2*01': '------------------------------------------------------------------------------------------------------------------IKTFAKGTRLIVTS', 'TRGJP1*01': '------------------------------------------------------------------------------------------------------------------FKIFAEGTKLIVTS', 'TRGJ1*01': '------------------------------------------------------------------------------------------------------------------KKLFGSGTTLVVT-', 'TRGJ1*02': '------------------------------------------------------------------------------------------------------------------KKLFGSGTTLVVT-', 'TRGJ2*01': '------------------------------------------------------------------------------------------------------------------KKLFGSGTTLVVT-'}, 'mouse': {'TRGJ4*01': '------------------------------------------------------------------------------------------------------------------VKIFAKGTKLVVIP', 'TRGJ1*01': '------------------------------------------------------------------------------------------------------------------HKVFAEGTKLIVIP', 'TRGJ2*01': '------------------------------------------------------------------------------------------------------------------HKVFAEGTKLIVIP', 'TRGJ3*01': '------------------------------------------------------------------------------------------------------------------HKVFAEGTKLIVIP'}}, 'D': {'human': {'TRDJ3*01': '------------------------------------------------------------------------------------------------------------------QMFFGTGIKLFVEP', 'TRDJ4*01': '------------------------------------------------------------------------------------------------------------------PLIFGKGTYLEVQQ', 'TRDJ2*01': '------------------------------------------------------------------------------------------------------------------QLFFGKGTQLIVEP', 'TRDJ1*01': '------------------------------------------------------------------------------------------------------------------KLIFGKGTRVTVEP'}, 'mouse': {'TRDJ2*01': '------------------------------------------------------------------------------------------------------------------QMFFGTGIELFVEP', 'TRDJ1*01': '------------------------------------------------------------------------------------------------------------------KLVFGQGTQVTVEP', 'TRDJ2*02': '------------------------------------------------------------------------------------------------------------------TDVFGTGIELFVEP'}}}, 'V': {'H': {'human': {'IGHV1-18*01': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----TSYGISWVRQAPGQGLEWMGWISAY--NGNTNYAQKLQ-GRVTMTTDTSTSTAYMELRSLRSDDTAVYYCAR----------------------', 'IGHV1-18*03': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----TSYGISWVRQAPGQGLEWMGWISAY--NGNTNYAQKLQ-GRVTMTTDTSTSTAYMELRSLRSDDMAVYYCAR----------------------', 'IGHV1-18*04': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----TSYGISWVRQAPGQGLEWMGWISAY--NGNTNYAQKLQ-GRVTMTTDTSTSTAYMELRSLRSDDTAVYYCAR----------------------', 'IGHV1-2*01': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----TGYYMHWVRQAPGQGLEWMGRINPN--SGGTNYAQKFQ-GRVTSTRDTSISTAYMELSRLRSDDTVVYYCAR----------------------', 'IGHV1-2*02': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----TGYYMHWVRQAPGQGLEWMGWINPN--SGGTNYAQKFQ-GRVTMTRDTSISTAYMELSRLRSDDTAVYYCAR----------------------', 'IGHV1-2*04': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----TGYYMHWVRQAPGQGLEWMGWINPN--SGGTNYAQKFQ-GWVTMTRDTSISTAYMELSRLRSDDTAVYYCAR----------------------', 'IGHV1-2*06': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----TGYYMHWVRQAPGQGLEWMGRINPN--SGGTNYAQKFQ-GRVTMTRDTSISTAYMELSRLRSDDTAVYYCAR----------------------', 'IGHV1-2*07': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----TGYYMHWVRQAPGQGLEWMGWINPN--SGGTNYAHKFQ-GRVTMTRDTSISTAYMELSRLRSDDTAVYYCAR----------------------', 'IGHV1-24*01': 'QVQLVQSGA-EVKKPGASVKVSCKVSGYTL----TELSMHWVRQAPGKGLEWMGGFDPE--DGETIYAQKFQ-GRVTMTEDTSTDTAYMELSSLRSEDTAVYYCAT----------------------', 'IGHV1-3*01': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----TSYAMHWVRQAPGQRLEWMGWINAG--NGNTKYSQKFQ-GRVTITRDTSASTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-3*02': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----TSYAMHWVRQAPGQRLEWMGWSNAG--NGNTKYSQEFQ-GRVTITRDTSASTAYMELSSLRSEDMAVYYCAR----------------------', 'IGHV1-3*03': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----TSYAMHWVRQAPGQRLEWMGWINAG--NGNTKYSQEFQ-GRVTITRDTSASTAYMELSSLRSEDMAVYYCAR----------------------', 'IGHV1-3*05': 'QVQLVQSGA-EEKKPGASVKVSCKASGYTF----TSYAMHWVRQAPGQRLEWMGWINAG--NGNTKYSQKFQ-GRVTITRDTSASTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-45*01': 'QMQLVQSGA-EVKKTGSSVKVSCKASGYTF----TYRYLHWVRQAPGQALEWMGWITPF--NGNTNYAQKFQ-DRVTITRDRSMSTAYMELSSLRSEDTAMYYCAR----------------------', 'IGHV1-45*02': 'QMQLVQSGA-EVKKTGSSVKVSCKASGYTF----TYRYLHWVRQAPGQALEWMGWITPF--NGNTNYAQKFQ-DRVTITRDRSMSTAYMELSSLRSEDTAMYYCAR----------------------', 'IGHV1-45*03': 'QMQLVQSGA-EVKKTGSSVKVSCKASGYTF----TYRYLHWVRQAPRQALEWMGWITPF--NGNTNYAQKFQ-DRVTITRDRSMSTAYMELSSLRSEDTAMYYCAR----------------------', 'IGHV1-46*01': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----TSYYMHWVRQAPGQGLEWMGIINPS--GGSTSYAQKFQ-GRVTMTRDTSTSTVYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-46*02': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----NSYYMHWVRQAPGQGLEWMGIINPS--GGSTSYAQKFQ-GRVTMTRDTSTSTVYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-46*03': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----TSYYMHWVRQAPGQGLEWMGIINPS--GGSTSYAQKFQ-GRVTMTRDTSTSTVYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-46*04': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----TSYYMHWVRQAPGQGLEWMGIINPS--GGSTSYAQKLQ-GRVTMTRDTSTSTVYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-58*01': 'QMQLVQSGP-EVKKPGTSVKVSCKASGFTF----TSSAVQWVRQARGQRLEWIGWIVVG--SGNTNYAQKFQ-ERVTITRDMSTSTAYMELSSLRSEDTAVYYCAA----------------------', 'IGHV1-58*02': 'QMQLVQSGP-EVKKPGTSVKVSCKASGFTF----TSSAMQWVRQARGQRLEWIGWIVVG--SGNTNYAQKFQ-ERVTITRDMSTSTAYMELSSLRSEDTAVYYCAA----------------------', 'IGHV1-69*01': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYAISWVRQAPGQGLEWMGGIIPI--FGTANYAQKFQ-GRVTITADESTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-69*02': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYTISWVRQAPGQGLEWMGRIIPI--LGIANYAQKFQ-GRVTITADKSTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-69*04': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYAISWVRQAPGQGLEWMGRIIPI--LGIANYAQKFQ-GRVTITADKSTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-69*05': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYAISWVRQAPGQGLEWMGGIIPI--FGTANYAQKFQ-GRVTITTDESTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-69*06': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYAISWVRQAPGQGLEWMGGIIPI--FGTANYAQKFQ-GRVTITADKSTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-69*08': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYTISWVRQAPGQGLEWMGRIIPI--LGTANYAQKFQ-GRVTITADKSTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-69*09': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYAISWVRQAPGQGLEWMGRIIPI--LGIANYAQKFQ-GRVTITADKSTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-69*10': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYAISWVRQAPGQGLEWMGGIIPI--LGIANYAQKFQ-GRVTITADKSTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-69*11': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYAISWVRQAPGQGLEWMGRIIPI--LGTANYAQKFQ-GRVTITADESTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-69*12': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYAISWVRQAPGQGLEWMGGIIPI--FGTANYAQKFQ-GRVTITADESTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-69*13': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYAISWVRQAPGQGLEWMGGIIPI--FGTANYAQKFQ-GRVTITADESTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-69*14': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYAISWVRQAPGQGLEWMGGIIPI--FGTANYAQKFQ-GRVTITADKSTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-69*15': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYAISWVRQAPGQGLEWMGRIIPI--FGTANYAQKFQ-GRVTITADESTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-69*16': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYTISWVRQAPGQGLEWMGGIIPI--LGTANYAQKFQ-GRVTITTDESTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-69*17': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYAISWVRQAPGQGLEWMGGIIPI--FGIANYAQKFQ-GRVTITADKSTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-69*19': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYAISWVRQAPGQGLEWMGGIIPI--FGTANYAQKFQ-GRVTITADESTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-69-2*01': 'EVQLVQSGA-EVKKPGATVKISCKVSGYTF----TDYYMHWVQQAPGKGLEWMGLVDPE--DGETIYAEKFQ-GRVTITADTSTDTAYMELSSLRSEDTAVYYCAT----------------------', 'IGHV1-69D*01': 'QVQLVQSGA-EVKKPGSSVKVSCKASGGTF----SSYAISWVRQAPGQGLEWMGGIIPI--FGTANYAQKFQ-GRVTITADESTSTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-8*01': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----TSYDINWVRQATGQGLEWMGWMNPN--SGNTGYAQKFQ-GRVTMTRNTSISTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV1-8*03': 'QVQLVQSGA-EVKKPGASVKVSCKASGYTF----TSYDINWVRQATGQGLEWMGWMNPN--SGNTGYAQKFQ-GRVTITRNTSISTAYMELSSLRSEDTAVYYCAR----------------------', 'IGHV2-26*01': 'QVTLKESGP-VLVKPTETLTLTCTVSGFSLS--NARMGVSWIRQPPGKALEWLAHIFSN---DEKSYSTSLK-SRLTISKDTSKSQVVLTMTNMDPVDTATYYCARI---------------------', 'IGHV2-26*02': 'QVTLKESGP-VLVKPTETLTLTCTVSGFSLS--NARMGVSWIRQPPGKALEWLAHIFSN---DEKSYSTSLK-SRLTISKDTSKSQVVLTMTNMDPVDTATYYCARI---------------------', 'IGHV2-26*03': 'QVTLKESGP-VLVKPTETLTLTCTISGFSLS--NARMGVSWIRQPPGKALEWLAHIFSN---DEKSYSTSLK-SRLTISKDTSKSQVVLTMTNMDPVDTATYYCARI---------------------', 'IGHV2-26*04': 'QVTLKESGP-VLVKPTETLTLTCTVSGFSLS--NARMGVSWIRQPPGKALEWLAHIFSN---DEKSYSTSLK-SRLTISKDTSKSQVVLTMTNMDPVDTATYYCAWI---------------------', 'IGHV2-5*01': 'QITLKESGP-TLVKPTQTLTLTCTFSGFSLS--TSGVGVGWIRQPPGKALEWLALIYWN---DDKRYSPSLK-SRLTITKDTSKNQVVLTMTNMDPVDTATYYCAHR---------------------', 'IGHV2-5*02': 'QITLKESGP-TLVKPTQTLTLTCTFSGFSLS--TSGVGVGWIRQPPGKALEWLALIYWD---DDKRYSPSLK-SRLTITKDTSKNQVVLTMTNMDPVDTATYYCAHR---------------------', 'IGHV2-5*05': 'QITLKESGP-TLVKPTQTLTLTCTFSGFSLS--TSGVGVGWIRQPPGKALEWLALIYWD---DDKRYGPSLK-SRLTITKDTSKNQVVLTMTNMDPVDTATYYCAHR---------------------', 'IGHV2-5*06': 'QITLKESGP-TLVKPTQTLTLTCTFSGFSLS--TSGVGVGWIRQPPGKALEWLALIYWD---DDKRYGPSLK-SRLTITKDTSKNQVVLTMTNMDPVDTATYYCAHR---------------------', 'IGHV2-5*08': 'QVTLKESGP-ALVKPTQTLTLTCTFSGFSLS--TSGMRVSWIRQPPGKALEWLALIYWD---DDKRYSPSLK-SRLTITKDTSKNQVVLTMTNMDPVDTATYYCAHR---------------------', 'IGHV2-5*09': 'QVTLKESGP-TLVKPTQTLTLTCTFSGFSLS--TSGVGVGWIRQPPGKALEWLALIYWD---DDKRYGPSLK-SRLTITKDTSKNQVVLTMTNMDPVDTATYYCAHR---------------------', 'IGHV2-70*01': 'QVTLRESGP-ALVKPTQTLTLTCTFSGFSLS--TSGMCVSWIRQPPGKALEWLALIDWD---DDKYYSTSLK-TRLTISKDTSKNQVVLTMTNMDPVDTATYYCARI---------------------', 'IGHV2-70*04': 'QVTLKESGP-ALVKPTQTLTLTCTFSGFSLS--TSGMRVSWIRQPPGKALEWLARIDWD---DDKFYSTSLK-TRLTISKDTSKNQVVLTMTNMDPVDTATYYCARI---------------------', 'IGHV2-70*10': 'QVTLKESGP-ALVKPTQTLTLTCTFSGFSLS--TSGMRVSWIRQPPGKALEWIARIDWD---DDKYYSTSLK-TRLTISKDTSKNQVVLTMTNMDPVDTATYYCARI---------------------', 'IGHV2-70*11': 'RVTLRESGP-ALVKPTQTLTLTCTFSGFSLS--TSGMCVSWIRQPPGKALEWLARIDWD---DDKYYSTSLK-TRLTISKDTSKNQVVLTMTNMDPVDTATYYCARI---------------------', 'IGHV2-70*12': 'QITLKESGP-TLVKPTQTLTLTCTFSGFSLS--TSGMCVSWIRQPPGKALEWLALIDWD---DDKYYSTSLK-TRLTISKDTSKNQVVLTMTNMDPVDTATYYCAHR---------------------', 'IGHV2-70*13': 'QVTLRESGP-ALVKPTQTLTLTCTFSGFSLS--TSGMCVSWIRQPPGKALEWLALIDWD---DDKYYSTSLK-TRLTISKDTSKNQVVLTMTNMDPVDTATYYCARI---------------------', 'IGHV2-70*15': 'QVTLRESGP-ALVKPTQTLTLTCTFSGFSLS--TSGMCVSWIRQPPGKALEWLARIDWD---DDKYYSTSLK-TRLTISKDTSKNQVVLTMTNMDPVDTATYYCARI---------------------', 'IGHV2-70*16': 'QVTLKESGP-VLVKPTQTLTLTCTFSGFSLS--TSGMCVSWIRQPPGKALEWLARIDWD---DDKFYSTSLK-TRLTISKDTSKNQVVLTMTNMDPVDTATYYCARI---------------------', 'IGHV2-70*17': 'QVTLRESGP-ALVKPTQTLTLTCTFSGFSLS--TSGMCVSWIRQPPGKALEWLARIDWD---DDKFYSTSLK-TRLTISKDTSKNQVVLTMTNMDPVDTATYYCARI---------------------', 'IGHV2-70*18': 'QVTLRESGP-ALVKPTQTLTLTCTFSGFSLS--TSEMCVSWVRQPPGKALEWLALIDWD---DDKYYSTSLK-TRLTISKDTSKNQVVLTMTNMDPVDTATYYCARI---------------------', 'IGHV2-70*19': 'QVTLRESGP-ALVKPTQTLTLTCTFSGFSLS--TSGMCVSWVRQPPGKALEWLALIDWD---DDKHYSTSLK-TRLTISKDTSKNQVVLTMTNMDPVDTATYYCARI---------------------', 'IGHV2-70*20': 'QVTLRESGP-ALVKPTQTLTLTCTFSGFSLS--TSGMCVSWVRQPPGKALEWLALIDWD---DDKYYSTSLK-TRLTISKDTSKNQVVLTMTNMDPVDTATYYCARI---------------------', 'IGHV2-70D*04': 'QVTLKESGP-ALVKPTQTLTLTCTFSGFSLS--TSGMRVSWIRQPPGKALEWLARIDWD---DDKFYSTSLK-TRLTISKDTSKNQVVLTMTNMDPVDTATYYCARI---------------------', 'IGHV2-70D*14': 'QVTLKESGP-ALVKPTQTLTLTCTFSGFSLS--TSGMRVSWIRQPPGKALEWLARIDWD---DDKFYSTSLK-TRLTISKDTSKNQVVLTMTNMDPVDTATYYCARI---------------------', 'IGHV3-11*01': 'QVQLVESGG-GLVKPGGSLRLSCAASGFTF----SDYYMSWIRQAPGKGLEWVSYISSS--GSTIYYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-11*03': 'QVQLLESGG-GLVKPGGSLRLSCAASGFTF----SDYYMSWIRQAPGKGLEWVSYISSS--SSYTNYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-11*05': 'QVQLVESGG-GLVKPGGSLRLSCAASGFTF----SDYYMSWIRQAPGKGLEWVSYISSS--SSYTNYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-11*06': 'QVQLVESGG-GLVKPGGSLRLSCAASGFTF----SDYYMSWIRQAPGKGLEWVSYISSS--SSYTNYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-13*01': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SSYDMHWVRQATGKGLEWVSAIGTA---GDTYYPGSVK-GRFTISRENAKNSLYLQMNSLRAGDTAVYYCAR----------------------', 'IGHV3-13*02': 'EVHLVESGG-GLVQPGGALRLSCAASGFTF----SNYDMHWVRQATGKGLEWVSANGTA---GDTYYPGSVK-GRFTISRENAKNSLYLQMNSLRAGDTAVYYCAR----------------------', 'IGHV3-13*03': 'EVQLVESGG-GLVQPGGSLRLSCAACGFTF----SSYDMHWVRQATGKGLEWVSAIGTA---GDTYYPGSVK-GQFTISRENAKNSLYLQMNSLRAGDTAVYYCAR----------------------', 'IGHV3-13*04': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SSYDMHWVRQATGKGLEWVSAIGTA---GDTYYPGSVK-GRFTISRENAKNSLYLQMNSLRAGDTAVYYCAR----------------------', 'IGHV3-13*05': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SSYDMHWVRQATGKGLEWVSAIGTA---GDPYYPGSVK-GRFTISRENAKNSLYLQMNSLRAGDTAVYYCAR----------------------', 'IGHV3-15*01': 'EVQLVESGG-GLVKPGGSLRLSCAASGFTF----SNAWMSWVRQAPGKGLEWVGRIKSKTDGGTTDYAAPVK-GRFTISRDDSKNTLYLQMNSLKTEDTAVYYCTT----------------------', 'IGHV3-15*02': 'EVQLVESGG-ALVKPGGSLRLSCAASGFTF----SNAWMSWVRQAPGKGLEWVGRIKSKTDGGTTDYAAPVK-GRFTISRDDSKNTLYLQMNSLKTEDTAVYYCTT----------------------', 'IGHV3-15*03': 'EVQLVESAG-ALVQPGGSLRLSCAASGFTC----SNAWMSWVRQAPGKGLEWVGRIKSKANGGTTDYAAPVK-GRFTISRVDSKNTLYLQMNSLKTEDTAVYYCTT----------------------', 'IGHV3-15*04': 'EVQLVESGG-GLVKPGGSLRLSCAASGFTF----SNAWMSWVRQAPGKGLEWVGRIESKTDGGTTDYAAPVK-GRFTISRDDSKNTLYLQMNSLKTEDTAVYYCTT----------------------', 'IGHV3-15*05': 'EVQLVESGG-GLVKPGGSLRLSCAASGFTF----SNAWMSWVRQAPGKGLEWVGRIKSKTDGGTTDYAAPVK-GRFTISRDDSKNTLYLQMNSLKTEDTAVYYCTT----------------------', 'IGHV3-15*06': 'EVQLVESGG-GLVKPGGSLRLSCAASGFTF----SNAWMSWVRQAPGKGLEWVGRIKSKTDGGTTNYAAPVK-GRFTISRDDSKNTLYLQMNSLKTEDTAVYYCTT----------------------', 'IGHV3-15*07': 'EVQLVESGG-GLVKPGGSLRLSCAASGFTF----SNAWMNWVRQAPGKGLEWVGRIKSKTDGGTTDYAAPVK-GRFTISRDDSKNTLYLQMNSLKTEDTAVYYCTT----------------------', 'IGHV3-15*08': 'EVQLVESAG-GLVQPGGSLRLSCAASGFTC----SNAWMSWVRQAPGKGLEWVGCIKSKANGGTTDYAAPVK-GRFTISRDDSKNTLYLQMISLKTEDTAVYYCTT----------------------', 'IGHV3-20*01': 'EVQLVESGG-GVVRPGGSLRLSCAASGFTF----DDYGMSWVRQAPGKGLEWVSGINWN--GGSTGYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDTALYHCAR----------------------', 'IGHV3-20*04': 'EVQLVESGG-GVVRPGGSLRLSCAASGFTF----DDYGMSWVRQAPGKGLEWVSGINWN--GGSTGYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDTALYYCAR----------------------', 'IGHV3-21*01': 'EVQLVESGG-GLVKPGGSLRLSCAASGFTF----SSYSMNWVRQAPGKGLEWVSSISSS--SSYIYYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-21*02': 'EVQLVESGG-GLVKPGGSLRLSCAASGFTF----SSYSMNWVRQAPGKGLEWVSSISSS--SSYIYYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-21*03': 'EVQLVESGG-GLVKPGGSLRLSCAASGFTF----SSYSMNWVRQAPGKGLEWVSSISSS--SSYIYYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-21*06': 'EVQLVESGG-GLVKPGGSLRLSCAASGFTF----SSYSMNWVRQAPGKGLEWVSSISSS--SSYIYYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-23*01': 'EVQLLESGG-GLVQPGGSLRLSCAASGFTF----SSYAMSWVRQAPGKGLEWVSAISGS--GGSTYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAK----------------------', 'IGHV3-23*02': 'EVQLLESGG-GLVQPGGSLRLSCAASGFTF----SSYAMSWVRQAPGKGLEWVSAISGS--GGSTYYGDSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAK----------------------', 'IGHV3-23*03': 'EVQLLESGG-GLVQPGGSLRLSCAASGFTF----SSYAMSWVRQAPGKGLEWVSVIYSG--GSSTYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAK----------------------', 'IGHV3-23*04': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SSYAMSWVRQAPGKGLEWVSAISGS--GGSTYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAK----------------------', 'IGHV3-23D*01': 'EVQLLESGG-GLVQPGGSLRLSCAASGFTF----SSYAMSWVRQAPGKGLEWVSAISGS--GGSTYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAK----------------------', 'IGHV3-30*01': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYAMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-30*02': 'QVQLVESGG-GVVQPGGSLRLSCAASGFTF----SSYGMHWVRQAPGKGLEWVAFIRYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAK----------------------', 'IGHV3-30*03': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYGMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-30*04': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYAMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-30*05': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYGMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEGTAVYYCAR----------------------', 'IGHV3-30*06': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYGMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-30*07': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYAMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-30*08': 'QVQLVDSGG-GVVQPGRSLRLSCAASAFTF----SSYAMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-30*09': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYAMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFAISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-30*10': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYAMHWVRQAPGKGLEWVAVISYD--GSNKYYTDSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-30*11': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYAMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-30*12': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYGMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-30*13': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYGMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNRLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-30*14': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYAMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-30*15': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYAMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMSSLRAEDTAVYYCAR----------------------', 'IGHV3-30*16': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYAMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-30*17': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYAMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-30*18': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYGMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAK----------------------', 'IGHV3-30*19': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYGMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-30-3*02': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYAMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAK----------------------', 'IGHV3-30-3*03': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYAMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-33*01': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYGMHWVRQAPGKGLEWVAVIWYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-33*02': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYGMHWVRQAPGKGLEWVAVIWYD--GSNKYYADSAK-GRFTISRDNSTNTLFLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-33*03': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYGMHWVRQAPGKGLEWVAVIWYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAK----------------------', 'IGHV3-33*04': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYGMHWVRQAPGKGLEWVAVIWYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-33*05': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYGMHWVRQAPGKGLEWVAVISYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-33*06': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYGMHWVRQAPGKGLEWVAVIWYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAK----------------------', 'IGHV3-33*07': 'QVQLVESGG-RVVQPGRSLRLSCAASGFTF----SRYGMYWVRQAPGKGLEWVAVIWYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-33*08': 'QVQLVESGG-GVVQPGRSLRLSCAASGFTF----SSYGMHWVRQAPGKGLEWVAVIWYD--GSNKYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-35*02': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SNSDMNWVHQAPGKGLEWVSGVSWN--GSRTHYADSVK-GQFIISRDNSRNTLYLQTNSLRAEDTAVYYCVR----------------------', 'IGHV3-43*01': 'EVQLVESGG-VVVQPGGSLRLSCAASGFTF----DDYTMHWVRQAPGKGLEWVSLISWD--GGSTYYADSVK-GRFTISRDNSKNSLYLQMNSLRTEDTALYYCAKD---------------------', 'IGHV3-43*02': 'EVQLVESGG-GVVQPGGSLRLSCAASGFTF----DDYAMHWVRQAPGKGLEWVSLISGD--GGSTYYADSVK-GRFTISRDNSKNSLYLQMNSLRTEDTALYYCAKD---------------------', 'IGHV3-43D*03': 'EVQLVESGG-VVVQPGGSLRLSCAASGFTF----DDYAMHWVRQAPGKGLEWVSLISWD--GGSTYYADSVK-GRFTISRDNSKNSLYLQMNSLRAEDTALYYCAKD---------------------', 'IGHV3-43D*04': 'EVQLVESGG-VVVQPGGSLRLSCAASGFTF----DDYAMHWVRQAPGKGLEWVSLISWD--GGSTYYADSVK-GRFTISRDNSKNSLYLQMNSLRAEDTALYYCAKD---------------------', 'IGHV3-48*01': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SSYSMNWVRQAPGKGLEWVSYISSS--SSTIYYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-48*02': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SSYSMNWVRQAPGKGLEWVSYISSS--SSTIYYADSVK-GRFTISRDNAKNSLYLQMNSLRDEDTAVYYCAR----------------------', 'IGHV3-48*03': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SSYEMNWVRQAPGKGLEWVSYISSS--GSTIYYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-49*01': 'EVQLVESGG-GLVQPGRSLRLSCTASGFTF----GDYAMSWFRQAPGKGLEWVGFIRSKAYGGTTEYTASVK-GRFTISRDGSKSIAYLQMNSLKTEDTAVYYCTR----------------------', 'IGHV3-49*02': 'EVQLVESGG-GLVQPGPSLRLSCTASGFTF----GYYPMSWVRQAPGKGLEWVGFIRSKAYGGTTEYAASVK-GRFTISRDDSKSIAYLQMNSLKTEDTAVYYCTR----------------------', 'IGHV3-49*03': 'EVQLVESGG-GLVQPGRSLRLSCTASGFTF----GDYAMSWFRQAPGKGLEWVGFIRSKAYGGTTEYAASVK-GRFTISRDDSKSIAYLQMNSLKTEDTAVYYCTR----------------------', 'IGHV3-49*04': 'EVQLVESGG-GLVQPGRSLRLSCTASGFTF----GDYAMSWVRQAPGKGLEWVGFIRSKAYGGTTEYAASVK-GRFTISRDDSKSIAYLQMNSLKTEDTAVYYCTR----------------------', 'IGHV3-49*05': 'EVQLVESGG-GLVKPGRSLRLSCTASGFTF----GDYAMSWFRQAPGKGLEWVGFIRSKAYGGTTEYAASVK-GRFTISRDDSKSIAYLQMNSLKTEDTAVYYCTR----------------------', 'IGHV3-53*01': 'EVQLVESGG-GLIQPGGSLRLSCAASGFTV----SSNYMSWVRQAPGKGLEWVSVIYSG---GSTYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-53*02': 'EVQLVETGG-GLIQPGGSLRLSCAASGFTV----SSNYMSWVRQAPGKGLEWVSVIYSG---GSTYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-53*03': 'EVQLVESGG-GLIQPGGSLRLSCAASGFTV----SSNYMSWVRQPPGKGLEWVSVIYSG---GSTYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-62*04': 'EVQLVKSGG-GLVQPGGSLRLSCAASGFTF----SSSAMHWVRQAPRKGLEWVSVISTS--GDTVLYTDSVK-GRFTISRDNAQNSLSLQMNSLRAEDMAVYYCVK----------------------', 'IGHV3-64*01': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SSYAMHWVRQAPGKGLEYVSAISSN--GGSTYYANSVK-GRFTISRDNSKNTLYLQMGSLRAEDMAVYYCAR----------------------', 'IGHV3-64*02': 'EVQLVESGE-GLVQPGGSLRLSCAASGFTF----SSYAMHWVRQAPGKGLEYVSAISSN--GGSTYYADSVK-GRFTISRDNSKNTLYLQMGSLRAEDMAVYYCAR----------------------', 'IGHV3-64*03': 'EVQLVESGG-GLVQPGGSLRLSCSASGFTF----SSYAMHWVRQAPGKGLEYVSAISSN--GGSTYYADSVK-GRFTISRDNSKNTLYVQMSSLRAEDTAVYYCVK----------------------', 'IGHV3-64*04': 'QVQLVESGG-GLVQPGGSLRLSCSASGFTF----SSYAMHWVRQAPGKGLEYVSAISSN--GGSTYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-64*05': 'EVQLVESGG-GLVQPGGSLRLSCSASGFTF----SSYAMHWVRQAPGKGLEYVSAISSN--GGSTYYADSVK-GRFTISRDNSKNTLYVQMSSLRAEDTAVYYCVK----------------------', 'IGHV3-64*07': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SSYAMHWVRQAPGKGLEYVSAISSN--GGSTYYADSVK-GRFTISRDNSKNTLYLQMGSLRAEDMAVYYCAR----------------------', 'IGHV3-64D*06': 'EVQLVESGG-GLVQPGGSLRLSCSASGFTF----SSYAMHWVRQAPGKGLEYVSAISSN--GGSTYYADSVK-GRFTISRDNSKNTLYLQMSSLRAEDTAVYYCVK----------------------', 'IGHV3-64D*08': 'EVQLVESGG-GLVQPGGSLRLSCSASGFTF----SSYAMHWVRQAPGKGLEYVSAISSN--GGSTYYADSVK-GRFTISRDNSKNTLYLQMSSLRAEDTAVYYCVK----------------------', 'IGHV3-64D*09': 'EVQLVESGG-GLVQPGGSLRLSCSASGFTF----SSYAMHWVRQAPGKGLEYVSAISSN--GGSTYYADSVK-GRFTISRDNSKNTLYLQMSSLRAEDTAVYYCVK----------------------', 'IGHV3-66*01': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTV----SSNYMSWVRQAPGKGLEWVSVIYSG---GSTYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-66*02': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTV----SSNYMSWVRQAPGKGLEWVSVIYSG---GSTYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-66*03': 'EVQLVESGG-GLIQPGGSLRLSCAASGFTV----SSNYMSWVRQAPGKGLEWVSVIYSC---GSTYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-66*04': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTV----SSNYMSWVRQAPGKGLEWVSVIYSG---GSTYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-7*01': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SSYWMSWVRQAPGKGLEWVANIKQD--GSEKYYVDSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-7*02': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SSYWMSWVRQAPGKGLEWVANIKQD--GSEKYYVDSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-7*05': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SSYWMSWVRQAPGKGLEWVANIKQD--GSEKYYVDSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-72*01': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SDHYMDWVRQAPGKGLEWVGRTRNKANSYTTEYAASVK-GRFTISRDDSKNSLYLQMNSLKTEDTAVYYCAR----------------------', 'IGHV3-73*01': 'EVQLVESGG-GLVQPGGSLKLSCAASGFTF----SGSAMHWVRQASGKGLEWVGRIRSKANSYATAYAASVK-GRFTISRDDSKNTAYLQMNSLKTEDTAVYYCTR----------------------', 'IGHV3-73*02': 'EVQLVESGG-GLVQPGGSLKLSCAASGFTF----SGSAMHWVRQASGKGLEWVGRIRSKANSYATAYAASVK-GRFTISRDDSKNTAYLQMNSLKTEDTAVYYCTR----------------------', 'IGHV3-74*01': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SSYWMHWVRQAPGKGLVWVSRINSD--GSSTSYADSVK-GRFTISRDNAKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-74*02': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SSYWMHWVRQAPGKGLVWVSRINSD--GSSTSYADSVK-GRFTISRDNAKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-74*03': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----SSYWMHWVRQAPGKGLVWVSRINSD--GSSTTYADSVK-GRFTISRDNAKNTLYLQMNSLRAEDTAVYYCAR----------------------', 'IGHV3-9*01': 'EVQLVESGG-GLVQPGRSLRLSCAASGFTF----DDYAMHWVRQAPGKGLEWVSGISWN--SGSIGYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDTALYYCAKD---------------------', 'IGHV3-9*02': 'EVQLVESGG-GLVQPGRSLRLSCAASGFTS----DDYAMHWVRQAPGKGLEWVSGISWN--SGSIGYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDTALYYCAKD---------------------', 'IGHV3-9*03': 'EVQLVESGG-GLVQPGRSLRLSCAASGFTF----DDYAMHWVRQAPGKGLEWVSGISWN--SGSIGYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDMALYYCAKD---------------------', 'IGHV3-NL1*01': 'QVQLVESGG-GVVQPGGSLRLSCAASGFTF----SSYGMHWVRQAPGKGLEWVSVIYSG--GSSTYYADSVK-GRFTISRDNSKNTLYLQMNSLRAEDTAVYYCAK----------------------', 'IGHV4-28*01': 'QVQLQESGP-GLVKPSDTLSLTCAVSGYSIS---SSNWWGWIRQPPGKGLEWIGYIYYS---GSTYYNPSLK-SRVTMSVDTSKNQFSLKLSSVTAVDTAVYYCAR----------------------', 'IGHV4-28*02': 'QVQLQESGP-GLVKPSQTLSLTCAVSGYSIS---SSNWWGWIRQPPGKGLEWIGYIYYS---GSIYYNPSLK-SRVTMSVDTSKNQFSLKLSSVTAVDTAVYYCAR----------------------', 'IGHV4-28*03': 'QVQLQESGP-GLVKPSDTLSLTCAVSGYSIS---SSNWWGWIRQPPGKGLEWIGYIYYS---GSTYYNPSLK-SRVTMSVDTSKNQFSLKLSSVTAVDTAVYYCAR----------------------', 'IGHV4-28*04': 'QVQLQESGP-GLVKPSDTLSLTCAVSGYSIS---SSNWWGWIRQPPGKGLEWIGYIYYS---GSTYYNPSLK-SRVTMSVDTSKNQFSLKLSSVTAVDTGVYYCAR----------------------', 'IGHV4-28*07': 'QVQLQESGP-GLVKPSDTLSLTCAVSGYSIS---SSNWWGWIRQPPGKGLEWIGYIYYS---GSTYYNPSLK-SRVTMSVDTSKNQFSLKLSSVTAVDTAVYYCAR----------------------', 'IGHV4-30-2*01': 'QLQLQESGS-GLVKPSQTLSLTCAVSGGSIS--SGGYSWSWIRQPPGKGLEWIGYIYHS---GSTYYNPSLK-SRVTISVDRSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-30-2*03': 'QLQLQESGS-GLVKPSQTLSLTCAVSGGSIS--SGGYSWSWIRQPPGKGLEWIGSIYYS---GSTYYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-30-2*05': 'QLQLQESGS-GLVKPSQTLSLTCAVSGGSIS--SGGYSWSWIRQPPGKGLEWIGYIYHS---GSTYYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-30-2*06': 'QLQLQESGS-GLVKPSQTLSLTCAVSGGSIS--SGGYSWSWIRQSPGKGLEWIGYIYHS---GSTYYNPSLK-SRVTISVDRSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-30-4*01': 'QVQLQESGP-GLVKPSQTLSLTCTVSGGSIS--SGDYYWSWIRQPPGKGLEWIGYIYYS---GSTYYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-30-4*02': 'QVQLQESGP-GLVKPSDTLSLTCTVSGGSIS--SGDYYWSWIRQPPGKGLEWIGYIYYS---GSTYYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-30-4*07': 'QVQLQESGP-GLVKPSQTLSLTCAVSGGSIS--SGGYSWSWIRQPPGKGLEWIGYIYYS---GSTYYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-31*01': 'QVQLQESGP-GLVKPSQTLSLTCTVSGGSIS--SGGYYWSWIRQHPGKGLEWIGYIYYS---GSTYYNPSLK-SLVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-31*02': 'QVQLQESGP-GLVKPSQTLSLTCTVSGGSIS--SGGYYWSWIRQHPGKGLEWIGYIYYS---GSTYYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-31*03': 'QVQLQESGP-GLVKPSQTLSLTCTVSGGSIS--SGGYYWSWIRQHPGKGLEWIGYIYYS---GSTYYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-31*10': 'QVQLQESGP-GLLKPSQTLSLTCTVSGGSIS--SGGYYWSWIRQHPGKGLEWIGCIYYS---GSTYYNPSLK-SRVTISVDPSKNQFSLKPSSVTAADTAVDYCAR----------------------', 'IGHV4-34*01': 'QVQLQQWGA-GLLKPSETLSLTCAVYGGSF----SGYYWSWIRQPPGKGLEWIGEINHS---GSTNYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-34*02': 'QVQLQQWGA-GLLKPSETLSLTCAVYGGSF----SGYYWSWIRQPPGKGLEWIGEINHS---GSTNYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-34*04': 'QVQLQQWGA-GLLKPSETLSLTCAVYGGSF----SGYYWSWIRQPPGKGLEWIGEINHS---GSTNNNPSLK-SRATISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-34*05': 'QVQLQQWGA-GLLKPSETLSLTCAVYGGSF----SGYYWCWIRQPLGKGLEWIGEINHS---GSTNNNPSLK-SRATISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-34*09': 'QVQLQESGP-GLVKPSQTLSLTCAVYGGSF----SGYYWSWIRQPPGKGLEWIGEINHS---GSTNYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-34*10': 'QVQLQESGP-GLVKPSETLSLTCAVYGGSF----SGYYWSWIRQPPGKGLEWIGEINHS---GSTNYNPSLK-SRITMSVDTSKNQFYLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-34*11': 'QVQLQQWGA-GLLKPSETLSLTCAVYGGSV----SGYYWSWIRQPPGKGLEWIGYIYYS---GSTNNNPSLK-SRATISVDTSKNQFSLNLSSVTAADTAVYCCAR----------------------', 'IGHV4-34*12': 'QVQLQQWGA-GLLKPSETLSLTCAVYGGSF----SGYYWSWIRQPPGKGLEWIGEIIHS---GSTNYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-38-2*01': 'QVQLQESGP-GLVKPSETLSLTCAVSGYSIS---SGYYWGWIRQPPGKGLEWIGSIYHS---GSTYYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-38-2*02': 'QVQLQESGP-GLVKPSETLSLTCTVSGYSIS---SGYYWGWIRQPPGKGLEWIGSIYHS---GSTYYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-39*01': 'QLQLQESGP-GLVKPSETLSLTCTVSGGSIS--SSSYYWGWIRQPPGKGLEWIGSIYYS---GSTYYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-39*02': 'QLQLQESGP-GLVKPSETLSLTCTVSGGSIS--SSSYYWGWIRQPPGKGLEWIGSIYYS---GSTYYNPSLK-SRVTISVDTSKNHFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-39*06': 'RLQLQESGP-GLVKPSETLSLTCTVSGGSIS--SSSYYWGWIRQPPGKGLEWIGSIYYS---GSTYYNPSLK-SRVTISVDTSKNQFPLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-39*07': 'QLQLQESGP-GLVKPSETLSLTCTVSGGSIS--SSSYYWGWIRQPPGKGLEWIGSIYYS---GSTYYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-4*01': 'QVQLQESGP-GLVKPPGTLSLTCAVSGGSIS---SSNWWSWVRQPPGKGLEWIGEIYHS---GSTNYNPSLK-SRVTISVDKSKNQFSLKLSSVTAADTAVYCCAR----------------------', 'IGHV4-4*02': 'QVQLQESGP-GLVKPSGTLSLTCAVSGGSIS---SSNWWSWVRQPPGKGLEWIGEIYHS---GSTNYNPSLK-SRVTISVDKSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-4*07': 'QVQLQESGP-GLVKPSETLSLTCTVSGGSI----SSYYWSWIRQPAGKGLEWIGRIYTS---GSTNYNPSLK-SRVTMSVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-4*08': 'QVQLQESGP-GLVKPSETLSLTCTVSGGSI----SSYYWSWIRQPPGKGLEWIGYIYTS---GSTNYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-59*01': 'QVQLQESGP-GLVKPSETLSLTCTVSGGSI----SSYYWSWIRQPPGKGLEWIGYIYYS---GSTNYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-59*02': 'QVQLQESGP-GLVKPSETLSLTCTVSGGSV----SSYYWSWIRQPPGKGLEWIGYIYYS---GSTNYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-59*07': 'QVQLQESGP-GLVKPSDTLSLTCTVSGGSI----SSYYWSWIRQPPGKGLEWIGYIYYS---GSTNYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-59*08': 'QVQLQESGP-GLVKPSETLSLTCTVSGGSI----SSYYWSWIRQPPGKGLEWIGYIYYS---GSTNYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-59*10': 'QVQLQQWGA-GLLKPSETLSLTCAVYGGSI----SSYYWSWIRQPAGKGLEWIGRIYTS---GSTNYNPSLK-SRVTMSVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-59*11': 'QVQLQESGP-GLVKPSETLSLTCTVSGGSI----SSHYWSWIRQPPGKGLEWIGYIYYS---GSTNYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-59*13': 'QVQLQESGP-GLVKPSETLSLTCTVSGGSI----SSYYWSWIRQPPGKGLEWIGYIYYS---GSTNYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-61*01': 'QVQLQESGP-GLVKPSETLSLTCTVSGGSVS--SGSYYWSWIRQPPGKGLEWIGYIYYS---GSTNYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-61*02': 'QVQLQESGP-GLVKPSQTLSLTCTVSGGSIS--SGSYYWSWIRQPAGKGLEWIGRIYTS---GSTNYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-61*03': 'QVQLQESGP-GLVKPSETLSLTCTVSGGSVS--SGSYYWSWIRQPPGKGLEWIGYIYYS---GSTNYNPSLK-SRVTISVDTSKNHFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-61*05': 'QLQLQESGP-GLVKPSETLSLTCTVSGGSIS--SSSYYWGWIRQPPGKGLEWIGYIYYS---GSTNYNPSLK-SRVTISVDKSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-61*08': 'QVQLQESGP-GLVKPSETLSLTCTVSGGSVS--SGGYYWSWIRQPPGKGLEWIGYIYYS---GSTNYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV4-61*09': 'QVQLQESGP-GLVKPSQTLSLTCTVSGGSIS--SGSYYWSWIRQPAGKGLEWIGHIYTS---GSTNYNPSLK-SRVTISVDTSKNQFSLKLSSVTAADTAVYYCAR----------------------', 'IGHV5-10-1*01': 'EVQLVQSGA-EVKKPGESLRISCKGSGYSF----TSYWISWVRQMPGKGLEWMGRIDPS--DSYTNYSPSFQ-GHVTISADKSISTAYLQWSSLKASDTAMYYCAR----------------------', 'IGHV5-10-1*02': 'EVQLVQSGA-EVKKPGESLRISCKGSGYSF----TSYWISWVRQMPGKGLEWMGRIDPS--DSYTNYSPSFQ-GHVTISADKSISTAYLQWSSLKASDTAMYYCAR----------------------', 'IGHV5-10-1*03': 'EVQLVQSGA-EVKKPGESLRISCKGSGYSF----TSYWISWVRQMPGKGLEWMGRIDPS--DSYTNYSPSFQ-GHVTISADKSISTAYLQWSSLKASDTAMYYCAR----------------------', 'IGHV5-10-1*04': 'EVQLVQSGA-EVKKPGESLRISCKGSGYSF----TSYWISWVRQMPGKGLEWMGRIDPS--DSYTNYSPSFQ-GQVTISADKSISTAYLQWSSLKASDTAMYYCAR----------------------', 'IGHV5-51*01': 'EVQLVQSGA-EVKKPGESLKISCKGSGYSF----TSYWIGWVRQMPGKGLEWMGIIYPG--DSDTRYSPSFQ-GQVTISADKSISTAYLQWSSLKASDTAMYYCAR----------------------', 'IGHV5-51*02': 'EVQLVQSGA-EVKKPGESLKISCKGSGYSF----TSYWTGWVRQMPGKGLEWMGIIYPG--DSDTRYSPSFQ-GQVTISADKSISTAYLQWSSLKASDTAMYYCAR----------------------', 'IGHV5-51*03': 'EVQLVQSGA-EVKKPGESLKISCKGSGYSF----TSYWIGWVRQMPGKGLEWMGIIYPG--DSDTRYSPSFQ-GQVTISADKSISTAYLQWSSLKASDTAMYYCAR----------------------', 'IGHV5-51*04': 'EVQLVQSGA-EVKKPGESLKISCKGSGYSF----TSYWIGWVRQMPGKGLEWMGIIYPG--DSDTRYSPSFQ-GQVTISADKPISTAYLQWSSLKASDTAMYYCAR----------------------', 'IGHV5-51*07': 'EVQLVQSGA-EVKKPGESLKISCKGSGYSF----TSYWIGWVHQMPGKGLEWMGIIYPG--DSDTRYSPSFQ-GQVTISADKSISTAYLQWSSLKASDTAMYYCAR----------------------', 'IGHV6-1*01': 'QVQLQQSGP-GLVKPSQTLSLTCAISGDSVS--SNSAAWNWIRQSPSRGLEWLGRTYYRS-KWYNDYAVSVK-SRITINPDTSKNQFSLQLNSVTPEDTAVYYCAR----------------------', 'IGHV6-1*02': 'QVQLQQSGP-GLVKPSQTLSLTCAISGDSVS--SNSAAWNWIRQSPSRGLEWLGRTYYRS-KWYNDYAVSVK-SRITINPDTSKNQFSLQLNSVTPEDTAVYYCAR----------------------', 'IGHV7-4-1*01': 'QVQLVQSGS-ELKKPGASVKVSCKASGYTF----TSYAMNWVRQAPGQGLEWMGWINTN--TGNPTYAQGFT-GRFVFSLDTSVSTAYLQICSLKAEDTAVYYCAR----------------------', 'IGHV7-4-1*02': 'QVQLVQSGS-ELKKPGASVKVSCKASGYTF----TSYAMNWVRQAPGQGLEWMGWINTN--TGNPTYAQGFT-GRFVFSLDTSVSTAYLQISSLKAEDTAVYYCAR----------------------'}, 'mouse': {'IGHV1-11*01': 'QIQLQQSGA-ELASPGASVTLSCKASGYTF----TDHIMNWVKKRPGQGLEWIGRIYPV--SGETNYNQKFM-GKATFSVDRSSSTVYMVLNSLTSEDPAVYYCGR----------------------', 'IGHV1-12*01': 'QAYLQQSGA-ELVRPGASVKMSCKASGYTF----TSYNMHWVKQTPRQGLEWIGAIYPG--NGDTSYNQKFK-GKATLTVDKSSSTAYMQLSSLTSEDSAVYFCAR----------------------', 'IGHV1-15*01': 'QVQLQQSGA-ELVRPGASVTLSCKASGYTF----TDYEMHWVKQTPVHGLEWIGAIDPE--TGGTAYNQKFK-GKAILTADKSSSTAYMELRSLTSEDSAVYYCTR----------------------', 'IGHV1-18*01': 'EVQLQQSGP-ELVKPGASVKIPCKASGYTF----TDYNMDWVKQSHGKSLEWIGDINPN--NGGTIYNQKFK-GKATLTVDKSSSTAYMELRSLTSEDTAVYYCAR----------------------', 'IGHV1-19*01': 'EVQLQQSGP-VLVKPGASVKMSCKASGYTF----TDYYMNWVKQSHGKSLEWIGVINPY--NGGTSYNQKFK-GKATLTVDKSSSTAYMELNSLTSEDSAVYYCAR----------------------', 'IGHV1-20*01': 'EVQLQQSGP-ELVKPGDSVKISCKASGYSF----TGYFMNWVMQSHGKSLEWIGRINPY--NGDTFYNQKFK-GKATLTVDKSSSTAHMELRSLTSEDSAVYYCAR----------------------', 'IGHV1-20*02': 'EVQLQQSGP-ELVKPGASVKISCKASGYSF----TGYFMNWVMQSHGKSLEWIGRINPY--NGDTFYNQKFK-GKATLTVDKSSSTAHMELRSLASEDSAVYYCAR----------------------', 'IGHV1-22*01': 'EVQLQQSGP-ELVKPGASVKMSCKASGYTF----TDYNMHWVKQSHGKSLEWIGYINPN--NGGTSYNQKFK-GKATLTVNKSSSTAYMELRSLTSEDSAVYYCAR----------------------', 'IGHV1-26*01': 'EVQLQQSGP-ELVKPGASVKISCKASGYTF----TDYYMNWVKQSHGKSLEWIGDINPN--NGGTSYNQKFK-GKATLTVDKSSSTAYMELRSLTSEDSAVYYCAR----------------------', 'IGHV1-31*01': 'EVQLQQSGP-ELVKPGASVKISCKASGYSF----TGYYMHWVKQSHGNILDWIGYIYPY--NGVSSYNQKFK-GKATLTVDKSSSTAYMELRSLTSEDSAVYYCAR----------------------', 'IGHV1-34*01': 'EVQLQQSGP-ELVKPGASVKMSCKASGYTF----TDYYMHWVKQSHGKSLEWIGYIYPN--NGGNGYNQKFK-GKATLTVDKSSSTAYMELRSLTSEDSAVYYCAR----------------------', 'IGHV1-34*02': 'EVQLQQSGP-ELVKPGDSVKMSCKASGYTF----TDYYMDWVKQSHGKSLEWIGYIYPN--NGGTSYNQKFK-GKATLTVDKSSSTAYMELHSLTSEDSAVYYCAR----------------------', 'IGHV1-36*01': 'EVQLQQSGP-VLVKPGPSVKISCKASGFTF----TDYYMHWVKQSHGKSLEWIGLVYPY--NGGTSYNQKFK-GKATLTVDTSSSTAYMELNSLTSEDSAVYYCAR----------------------', 'IGHV1-37*01': 'EVQLQQSGP-ELVKPGASVKISCKASGYSF----TGYFMNWVKQSHGKSLEWIGRINPY--NGDTFYNQKFK-GKATLTVDKSSSTAHMELLSLTSEDFAVYYCAR----------------------', 'IGHV1-39*01': 'EFQLQQSGP-ELVKPGASVKISCKASGYSF----TDYNMNWVKQSNGKSLEWIGVINPN--YGTTSYNQKFK-GKATLTVDQSSSTAYMQLNSLTSEDSAVYYCAR----------------------', 'IGHV1-4*01': 'QVQLQQSGA-ELARPGASVKMSCKASGYTF----TSYTMHWVKQRPGQGLEWIGYINPS--SGYTKYNQKFK-DKATLTADKSSSTAYMQLSSLTSEDSAVYYCAR----------------------', 'IGHV1-4*02': 'QVQLQQSAA-ELARPGASVKMSCKASGYTF----TSYTMHWVKQRPGQGLEWIGYINPS--SGYTEYNQKFK-DKTTLTADKSSSTAYMQLSSLTSEDSAVYYCAR----------------------', 'IGHV1-42*01': 'EVQLQQSGP-ELVKPGASVKISCKASGYSF----TGYYMNWVKQSPEKSLEWIGEINPS--TGGTTYNQKFK-AKATLTVDKSSSTAYMQLKSLTSEDSAVYYCAR----------------------', 'IGHV1-43*01': 'EVKLQQSGP-ELVKPGASVKISCKASGYSF----TGYYMHWVKQSSEKSLEWIGEINPS--TGGTSYNQKFK-GKATLTVDKSSSTAYMQLKSLTSEDSAVYYCAR----------------------', 'IGHV1-47*01': 'QVQLQQSGA-ELVKPGASVKMSCKASGYTF----TTYPIEWMKQNHGKSLEWIGNFHPY--NDDTKYNEKFK-GKATLTVEKSSSTVYLELSRLTSDDSAVYYCAR----------------------', 'IGHV1-49*01': 'QRELQQSGA-ELVRPGSSVKLSCKDSYFAF----MASAMHWVKQRPGHGLEWIGSFTMY--SDATEYSENFK-GKATLTANTSSSTAYMELSSLTSEDSAVYYCAR----------------------', 'IGHV1-5*01': 'EVQLQQSGT-VLARPGASVKMSCKTSGYTF----TSYWMHWVKQRPGQGLEWIGAIYPG--NSDTSYNQKFK-GKAKLTAVTSASTAYMELSSLTNEDSAVYYCTR----------------------', 'IGHV1-50*01': 'QVQLQQPGA-ELVKPGASVKLSCKASGYTF----TSYWMQWVKQRPGQGLEWIGEIDPS--DSYTNYNQKFK-GKATLTVDTSSSTAYMQLSSLTSEDSAVYYCAR----------------------', 'IGHV1-52*01': 'QVQLQQPGA-ELVRPGSSVKLSCKASGYTF----TSYWMHWVKQRPIQGLEWIGNIDPS--DSETHYNQKFK-DKATLTVDKSSSTAYMQLSSLTSEDSAVYYCAR----------------------', 'IGHV1-53*01': 'QVQLQQPGT-ELVKPGASVKLSCKASGYTF----TSYWMHWVKQRPGQGLEWIGNINPS--NGGTNYNEKFK-SKATLTVDKSSSTAYMQLSSLTSEDSAVYYCAR----------------------', 'IGHV1-54*01': 'QVQLQQSGA-ELVRPGTSVKVSCKASGYAF----TNYLIEWVKQRPGQGLEWIGVINPG--SGGTNYNEKFK-GKATLTADKSSSTAYMQLSSLTSEDSAVYFCAR----------------------', 'IGHV1-54*02': 'QVQLQQSGA-ELVRPGTSVKVSCKASGYAF----TNYLIEWVKQRPGQGLEWIGVINPG--SGGTNYNEKFK-GKATLTADKSSNTAYMQLSSLTSEDSAVYFCAR----------------------', 'IGHV1-55*01': 'QVQLQQPGA-ELVKPGASVKMSCKASGYTF----TSYWITWVKQRPGQGLEWIGDIYPG--SGSTNYNEKFK-SKATLTVDTSSSTAYMQLSSLTSEDSAVYYCAR----------------------', 'IGHV1-56*01': 'QVQLQQSGP-ELVRPGASVKISCKAPGYTF----TSHWMQWVRQRPGQGLEWIGEIFPG--SGSTYYNEKFK-GKATLTVDTSSSTAYMQLSSLTSEDSAVYFCAR----------------------', 'IGHV1-58*01': 'EVQLQQSGA-ELVRPGSSVKMSCKTSGYTF----TSYGINWVKQRPGQGLEWIGYIYIG--NGYTEYNEKFK-GKATLTSDTSSSTAYMQLSSLTSEDSAIYFCAR----------------------', 'IGHV1-59*01': 'QVQLQQPGA-ELVRPGTSVKLSCKASGYTF----TSYWMHWVKQRPGQGLEWIGVIDPS--DSYTNYNQKFK-GKATLTVDTSSSTAYMQLSSLTSEDSAVYYCAR----------------------', 'IGHV1-61*01': 'QVQLQQPGA-ELVRPGSSVKLSCKASGYTF----TSYWMDWVKQRPGQGLEWIGNIYPS--DSETHYNQKFK-DKATLTVDKSSSTAYMQLSSLTSEDSAVYYCAR----------------------', 'IGHV1-62-1*01': 'QVQLQQSGA-ELVRPGASVKLSCKASGYTF----TSYWMQWVKQRPGQGLEWIGEIFPG--SGSTYYNEKFK-GKATLTVDTSSSTAYMQLSSLTAENSAIYLCKE----------------------', 'IGHV1-62-2*01': 'QVQLQQSGA-ELVKPGASVKLSCKASGYTF----TEYTIHWVKQRSGQGLEWIGWFYPG--SGSIKYNEKFK-DKATLTADKSSSTVYMELSRLTSEDSAVYFCARHE--------------------', 'IGHV1-63*01': 'QVQLQQSGA-ELVRPGTSVKMSCKASGYTF----TNYWIGWAKQRPGHGLEWIGDIYPG--GGYTNYNEKFK-GKATLTADKSSSTAYMQFSSLTSEDSAIYYCAR----------------------', 'IGHV1-63*02': 'QVQLQQSGA-ELVRPGTSVKMSCKAAGYTF----TNYWIGWVKQRPGHGLEWIGDIYPG--GGYTNYNEKFK-GKATLTADTSSSTAYMQLSSLTSEDSAIYYCAR----------------------', 'IGHV1-64*01': 'QVQLQQPGA-ELVKPGASVKLSCKASGYTF----TSYWMHWVKQRPGQGLEWIGMIHPN--SGSTNYNEKFK-SKATLTVDKSSSTAYMQLSSLTSEDSAVYYCAR----------------------', 'IGHV1-66*01': 'QVQLQQSGP-ELVKPGASVKISCKASGYSF----TSYYIHWVKQRPGQGLEWIGWIYPG--SGNTKYNEKFK-GKATLTADTSSSTAYMQLSSLTSEDSAVYYCAR----------------------', 'IGHV1-69*01': 'QVQLQQPGA-ELVMPGASVKLSCKASGYTF----TSYWMHWVKQRPGQGLEWIGEIDPS--DSYTNYNQKFK-GKSTLTVDKSSSTAYMQLSSLTSEDSAVYYCAR----------------------', 'IGHV1-69*02': 'QVQLQQPGA-ELVKPGASVKLSCKASGYTF----TSYWMHWVKQRPGQGLEWIGEIDPS--DSYTNYNQKFK-GKATLTVDKSSSTAYMQLSSLTSEDSAVYYCAR----------------------', 'IGHV1-7*01': 'QVQLQQSGA-ELAKPGASVKLSCKASGYTF----TSYWMHWVKQRPGQGLEWIGYINPS--SGYTKYNQKFK-DKATLTADKSSSTAYMQLSSLTYEDSAVYYCAR----------------------', 'IGHV1-71*01': 'QVQLQQSGA-ELVKPGASVKLSCKASGYTF----TEYTIHWVKQRSGQGLEWIGWFYPG--SGSIKYNEKFK-DKATLTADKSSSTVYMELSRLTSEDSAVYFCARHE--------------------', 'IGHV1-72*01': 'QVQLQQPGA-ELVKPGASVKLSCKASGYTF----TSYWMHWVKQRPGRGLEWIGRIDPN--SGGTKYNEKFK-SKATLTVDKPSSTAYMQLSSLTSEDSAVYYCAR----------------------', 'IGHV1-72*04': 'QVQLQQPGA-ELVKPGASVKLSCKASGYTF----TSYWMHWVKQRPGRGLEWIGRIDPN--SGGTKYNEKFK-SKATLTVDTSSSTAYMQLSSLTSEDSAVHYCAR----------------------', 'IGHV1-74*01': 'QVQLQQPGA-ELVKPGASVKVSCKASGYTF----TSYWMHWVKQRPGQGLEWIGRIHPS--DSDTNYNQKFK-GKATLTVDKSSSTAYMQLSSLTSEDSAVYYCAI----------------------', 'IGHV1-74*04': 'HVQLQQPGA-ELVKPGASVKVSCKASGYTF----TSYWMHWVKQRPGQGLEWIGRIHPS--DSDTNYNQKFK-GKATLTVDKSSSTAYMQLSSLTSEDSAVYYCAI----------------------', 'IGHV1-75*01': 'QVQLQQSGP-ELVKPGASVKISCKASGYTF----TDYYINWVKQRPGQGLEWIGWIFPG--SGSTYYNEKFK-GKATLTVDKSSSTAYMLLSSLTSEDSAVYFCAR----------------------', 'IGHV1-76*01': 'QVQLKQSGA-ELVRPGASVKLSCKASGYTF----TDYYINWVKQRPGQGLEWIARIYPG--SGNTYYNEKFK-GKATLTAEKSSSTAYMQLSSLTSEDSAVYFCAR----------------------', 'IGHV1-77*01': 'QVQLKQSGA-ELVKPGASVKISCKASGYTF----TDYYINWVKQRPGQGLEWIGKIGPG--SGSTYYNEKFK-GKATLTADKSSSTAYMQLSSLTSEDSAVYFCAR----------------------', 'IGHV1-78*01': 'QVQLQQSDA-ELVKPGASVKISCKVSGYTF----TDHTIHWMKQRPEQGLEWIGYIYPR--DGSTKYNEKFK-GKATLTADKSSSTAYMQLNSLTSEDSAVYFCAR----------------------', 'IGHV1-80*01': 'QVQLQQSGA-ELVKPGASVKISCKASGYAF----SSYWMNWVKQRPGKGLEWIGQIYPG--DGDTNYNGKFK-GKATLTADKSSSTAYMQLSSLTSEDSAVYFCAR----------------------', 'IGHV1-81*01': 'QVQLQQSGA-ELARPGASVKLSCKASGYTF----TSYGISWVKQRTGQGLEWIGEIYPR--SGNTYYNEKFK-GKATLTADKSSSTAYMELRSLTSEDSAVYFCAR----------------------', 'IGHV1-82*01': 'QVQLQQSGP-ELVKPGASVKISCKASGYAF----SSSWMNWVKQRPGKGLEWIGRIYPG--DGDTNYNGKFK-GKATLTADKSSSTAYMQLSSLTSEDSAVYFCAR----------------------', 'IGHV1-84*01': 'QIQLQQSGP-ELVKPGASVKISCKASGYTF----TDYYINWVKQRPGQGLEWIGWIYPG--SGNTKYNEKFK-GKATLTVDTSSSTAYMQLSSLTSEDSAVYFCAR----------------------', 'IGHV1-85*01': 'QVQLQQSGP-ELVKPGASVKLSCKASGYTF----TSYDINWVKQRPGQGLEWIGWIYPR--DGSTKYNEKFK-GKATLTVDTSSSTAYMELHSLTSEDSAVYFCAR----------------------', 'IGHV1-87*01': 'QVQLQQSGA-ELARPGASVKMSCKASGYTF----TSYWMQWVKQRPGQGLEWIGAIYPG--DGDTRYTQKFK-GRATLTADKSSSTAYMQLSSLTSEDSAVYYCAT----------------------', 'IGHV1-9*01': 'QVQLQQSGA-ELMKPGASVKLSCKATGYTF----TGYWIEWVKQRPGHGLEWIGEILPG--SGSTNYNEKFK-GKATFTADTSSNTAYMQLSSLTTEDSAIYYCAR----------------------', 'IGHV10-1*01': 'EVQLVESGG-GLVQPKGSLKLSCAASGFSF----NTYAMNWVRQAPGKGLEWVARIRSKSNNYATYYADSVK-DRFTISRDDSESMLYLQMNNLKTEDTAMYYCVR----------------------', 'IGHV10-1*02': 'EVQLVESGG-GLVQPKGSLKLSCAASGFTF----NTYAMNWVRQAPGKGLEWVARIRSKSNNYATYYADSVK-DRFTISRDDSQSMLYLQMNNLKTEDTAMYYCVS----------------------', 'IGHV10-3*01': 'EVQLVESGG-GLVQPKGSLKLSCAASGFTF----NTYAMHWVRQAPGKGLEWVARIRSKSSNYATYYADSVK-DRFTISRDDSQSMLYLQMNNLKTEDTAMYYCVR----------------------', 'IGHV10-3*02': 'EVQLVESGG-GLVQPKGSLKLSCAASVFTF----NTYAMHWVRQAPGKGLEWVARIRSKSSNYATYYADSVK-DRFTISRDDSQSMLYLQMNNLKTEDTAMYYCVR----------------------', 'IGHV10-3*03': 'EVQLVESGG-GLVQPKGSLKLSCAASVFTF----NTYAMHWVCQAPGKGLEWVARIRSKSNNYATYYADSVK-DRFTISRDDSQSMLYLQMNNLKTEDTAMYYCVR----------------------', 'IGHV10S3*01': 'EVQLVETGG-GLVQPKGSLKLSCAASGFTF----NTNAMNWVRQAPGKGLEWVARIRSKSNNYATYYADSVK-DRFTISRDDSQSMLYLQMNNLKTEDTAMYYCVR----------------------', 'IGHV11-1*01': 'EVQLLETGE-GLVPPGGSRGLSCEGSGFTF----SGFWMSWVRQTPGKTLEWIGDINSD--GSAINYAPSIK-DRFTIFRDNDKSTLYLQMSNVRSEDTATYFCMR----------------------', 'IGHV11-1*02': 'EVQLLETGG-GLVQPGGSRGLSCEGSGFTF----SGFWMSWVRQTPGKTVEWIGDINSD--GSAINYAPSIK-DRFTIFRDNDKSTLYLQMSNVRSEDPATYFCMR----------------------', 'IGHV11-2*01': 'EVQLLETGG-GLVQPGGSRGLSCEGSGFTF----SGFWMSWVRQTPGKTLEWIGDINSD--GSAINYAPSIK-DRFTIFRDNDKSTLYLQMSNVRSEDTATYFCMR----------------------', 'IGHV11-2*02': 'EVQLLETGG-GLVQPGGSRGLSCEGSGFTF----SGFWMSWVRQTPGKTLEWIGDINSD--GSAINYAPSIK-DRFTIFRDNDKSTLYLQMSNVRSEDTATYFCMR----------------------', 'IGHV12-1-1*01': 'QIQLKESGP-AVIKPSQSLSLTCIVSGFSIT--SSSYCWHWIRQPPGKGLEWMGRICYE---GSIYYSPSIK-SRSTISRDTSLNKFFIQLSSVTNEDTAMYYCSREN--------------------', 'IGHV12-3*01': 'QMQLQESGP-GLVKPSQSLFLTCSITGFPIT---SGYYWIWIRQSPGKPLEWMGYITHS---GETFYNPSLQ-SPISITRETSKNQFFLQLNSVTTEDTAMYYCAGDR--------------------', 'IGHV12-3*02': 'QMQLQESGP-GLVKPSQSLFLACSITGFPIT---SGYYWIWIRQSPGKPLEWMGYITHS---GETFYNPSLQ-SPISITRETSKNQFFLQLNSVTTEDTAMYYCAGDR--------------------', 'IGHV13-2*01': 'QVQLVETGG-GLVRPGNSLKLSCVTSGFTF----SNYRMHWLRQPPGKRLEWIAVITVKSDNYGANYAESVK-GRFAISRDDSKSSVYLEMNRLREEDTATYFCSR----------------------', 'IGHV13-2*02': 'QVQLVETGG-GLVRPGNSLKLSCVTSGFTF----SNYRMHWLRQPPGKRLEWIAVITVKSDNYGANYAESVK-GRFTISRDDSKSSVYLQMNRLREEDTATYYCSR----------------------', 'IGHV14-1*01': 'EVQLQQSGA-ELVRPGASVKLSCTASGFNI----KDYYMHWVKQRPEQGLEWIGRIDPE--DGDTEYAPKFQ-GKATMTADTSSNTAYLQLSSLTSEDTAVYYCTT----------------------', 'IGHV14-1*02': 'EVQLQQSGA-ELVRPGALVKLSCKASGFNI----KDYYMHWVKQRPEQGLEWIGWIDPE--NGNTIYDPKFQ-GKASITADTSSNTAYLQLSSLTSEDTAVYYCAR----------------------', 'IGHV14-2*01': 'EVQLQQSGA-ELVKPGASVKLSCTASGFNI----KDYYMHWVKQRTEQGLEWIGRIDPE--DGETKYAPKFQ-GKATITADTSSNTAYLQLSSLTSEDTAVYYCAR----------------------', 'IGHV14-3*01': 'EVQLQQSVA-ELVRPGASVKLSCTASGFNI----KNTYMHWVKQRPEQGLEWIGRIDPA--NGNTKYAPKFQ-GKATITADTSSNTAYLQLSSLTSEDTAIYYCAR----------------------', 'IGHV14-3*02': 'EVQLQQSGA-ELVKPGASVKLSCTASGFNI----KDTYMHWVKQRPEQGLEWIGRIDPA--NGNTKYDPKFQ-GKATITADTSSNTAYLQLSSLTSEDTAVYYCAR----------------------', 'IGHV14-4*01': 'EVQLQQSGA-ELVRPGASVKLSCTASGFNI----KDDYMHWVKQRPEQGLEWIGWIDPE--NGDTEYASKFQ-GKATITADTSSNTAYLQLSSLTSEDTAVYYCTT----------------------', 'IGHV14-4*02': 'EVQLQQSGA-ELVRSGASVKLSCTASGFNI----KDYYMHWVKQRPEQGLEWIGWIDPE--NGDTEYAPKFQ-GKATMTADTSSNTAYLQLSSLTSEDTAVYYCNA----------------------', 'IGHV15-2*01': 'QVHLQQSGS-ELRSPGSSVKLSCKDFDSEVF---PIAYMSWVRQKPGHGFEWIGGILPS--IGRTIYGEKFE-DKATLDADTLSNTAYLELNSLTSEDSAIYYCAR----------------------', 'IGHV1S12*01': 'QVQLQQSGP-ELVKPGASVKISCKASGYTF----TSYYIHWVKQRPGQGLEWIGYIYPR--DGSTNYNEKFK-GKATLTADTSSSTAYMQLSSLTSEDSAVYFCAR----------------------', 'IGHV1S14*01': 'QVQLQQPGS-VLVRPGTSVKLSCKASGYTF----TSYWMHWAKQRPGQGLEWIGEIHPN--CGNINYNEKFK-GKATLTVDTSSSTAYVDLSSLTSEDSAVYYCAR----------------------', 'IGHV1S26*01': 'QVQLQQSGA-ELVKTGASVKMSCKASGYTF----TSYTMHWVKQRPGQGLEWIGYINPS--SGYTNYNQKFK-DKATLTADKSSSTAYMQLSSLTSEDSAVYYCAR----------------------', 'IGHV1S29*02': 'EVQLQQSGP-ELVKPGASVKISCKASGYTF----TDYNMHWVKQSHGKSLEWIGYIYPY--NGGTGYNQKFK-SKATLTVDNSSSTAYMELSSLTSEDSAVYYCAR----------------------', 'IGHV1S35*01': 'QVQLQQPGA-VLVRHGASVKLSCKASGYTF----TSSWMHWAKQRHGQGLEWIGEIHPN--SGNTNYNEKFK-GKATLTVDKSSSTAYVDLSSLTSEDSAVYYCAR----------------------', 'IGHV1S36*01': 'PVQLQQPGA-ELVKPGASVKLSCKASGYTF----TSYWMHWVKQRPGRGLEWIGNIDPN--SGGTKYNEKFK-SKATLTVDKPSSTAYMQLSSLTSEDSAVYYCTR----------------------', 'IGHV1S40*01': 'QVQLQQSGP-ELVRPGLSVKLSCKASGYIF----ITYWMNWVKQRPGQGLEWIGQIFPA--SGSTNYNEMFE-GKATLTVDTSSSTAYMQLSSLTSEDSAVYYCAR----------------------', 'IGHV1S45*01': 'EVQLQQSGP-ELVKPGASVKMSCKASGYTF----TDYVMHWVKQSNGKSLEWIGYINPY--NDYTSYNQKFK-GKATLTVDKSSSTAYMQLNSLTSEDSAVYYCAR----------------------', 'IGHV1S49*01': 'EVQLQQSGA-ELVRPGSSVKLSCKTSGYTF----TSYGINWVKQRPGQGLEWIGYIYLG--NGYTAYNEKFK-GKATLTSDTSSSTAYMQLRSLTSEDSVIKFCAR----------------------', 'IGHV1S5*01': 'QVQLQQPGA-ELVKPGASVKLSCKASGYTF----TSYWMHWVKQRPGEGLEWIGNIYPG--SSSTNYNEKFK-SKATLTVDTPSSTAYMQLSSLTSEDSAVYYCAR----------------------', 'IGHV1S50*01': 'QVQLQQSGA-ELVKPGASVRISCKTSGYTF----TSYNIHWVKERPGQGLEWIGWIYPG--DGNTKYNEKFK-GKTTLTADKSSSTAYMQLSSLTSEDSAVYFCAR----------------------', 'IGHV1S52*01': 'QVQLQQSGA-ELVRPGTSVKVSCKASGYVF----TNYLIEWVKQRPGQGLEWIGVINPG--SGGTNYNEKFK-GKATLTADKSSSTAYMQLSSLTSDDSAVYFCAR----------------------', 'IGHV1S53*01': 'QVQLQQSDA-ELVKPGASVKISCKASGYTF----TDHAIHWVKQKPEQGLEWIGYISPG--NGDIKYNEKFK-GKATLTADKSSSTAYMQLNSLTSEDSAVYFCKR----------------------', 'IGHV1S53*02': 'QVQLQQSDA-ELVKPGASVKISCKASGYTF----TDHAIHWVKQKPEQGLEWIGYISPG--NGDIKYNEKFK-GKATLTADKSSSTAYMQLNSLTSEDSAVYFCKR----------------------', 'IGHV1S53*03': 'QVQLQQSDT-ELVKPGASVKISCKASGYTF----TDHAIHWVKQRPEQGLEWIGYISPG--NGDIKYNEKFK-GKATLTADKSSSTAYMQLNSLTSEDSAVYFCKR----------------------', 'IGHV1S55*01': 'QVQLQQPGS-VLVRPGASVKLSCKASGYTF----TSYWMNWVKQRPGQGLEWIGGIYPN--SGSTDYNEKFK-GKATLTVDTSSSTTYMDLSSLTSKDSAVYYCAR----------------------', 'IGHV1S56*01': 'QVQLQQSGP-ELVKPGASVRISCKASGYTF----TSYNIHWVKQRPGQGLEWIGWIYPG--DGNTKYNEKFK-GKTTLTADKSSSTAYMQLSSLTSEDSAVYFCAR----------------------', 'IGHV1S61*01': 'QVQLQQPGA-ELVKPGASVKLSCKASGYTF----TSYWINWVKQRPGQGLEWIGNIYPG--SSSTNYNEKFK-SKATLTVDTSSSTAYMQLSSLTSDDSAVYYCAR----------------------', 'IGHV2-2*01': 'QVQLKQSGP-GLVQPSQSLSITCTVSGFSL----TSYGVHWVRQSPGKGLEWLGVIWSG---GSTDYNAAFI-SRLSISKDNSKSQVFFKMNSLQADDTAIYYCAR----------------------', 'IGHV2-2*02': 'QVQLKQSGP-GLVQPSQSLSITCTVSGFSL----TSYGVHWVRQSPGKGLEWLGVIWSG---GSTDYNAAFI-SRLSISKDNSKSQVFFKMNSLQANDTAIYYCAR----------------------', 'IGHV2-2*03': 'QVQLKQSGP-GLVQPSQSLSITCTVSGFSL----TSYGVHWVRQSPGKGLEWLGVIWSG---GSTDYNAAFI-SRLSISKDNSKSQVFFKMNSLQSNDTAIYYCAR----------------------', 'IGHV2-2-2*01': 'QVQMKQSGP-GLVQPSQSLSITCTVSGFSL----TSYGVHWVRQSPGKGLEWLGVIWSG---GSTDYNAAFI-SRLSISKDNSKSQVFFKMNSLQADDTAIYYCVR----------------------', 'IGHV2-3*01': 'QVQLKESGP-GLVAPSQSLSITCTVSGFSL----TSYGVSWVRQPPGKGLEWLGVIWGD---GSTNYHSALI-SRLSISKDNSKSQVFLKLNSLQTDDTATYYCAK----------------------', 'IGHV2-3-1*01': 'QVQLKESGP-GLVQPSQSLSITCTVSGFSL----TSYGVHWVRQSPGKGLEWLGVIWSG---GSTDYNAPFI-SRLSISKDNSKSQIFFKMNSLQADDTAIYYCAR----------------------', 'IGHV2-4*01': 'QVQLKQSGP-GLVQPSQSLSITCTVSGFSL----TSYGVHWVRQPPGKGLEWLGVIWSG---GSTDYNAAFI-SRLSISKDNSKSQVFFKMNSLQADDTAIYYCAK----------------------', 'IGHV2-4*02': 'QVQLKQSGP-GLVQPSQSLSITCTVSGFSL----TSYGVHWVRQPPGKGLEWLGVIWSG---GSTDYNAAFI-SRLSISKDNSKSQVFFKMNSLQADDTAIYYCAR----------------------', 'IGHV2-4-1*01': 'QVQLKQSGP-GLVQPSQSLSITCTVSGFSL----TSYGVHWVRQSPGKGLEWLGVIWSG---GSTDYNAAFI-SRLSISKDNSKSQVFFKMNSLQADDTAIYYCAR----------------------', 'IGHV2-5*01': 'QVQLKQSGP-GLVQPSQSLSITCTVSGFSL----TSYGVHWVRQSPGKGLEWLGVIWRG---GSTDYNAAFM-SRLSITKDNSKSQVFFKMNSLQADDTAIYYCAK----------------------', 'IGHV2-5-1*01': 'QVQLKQSGP-SLVQPSQSLSITCTVSGFSL----TSYGVHWVRQSPGKGLEWLGVIWRG---GSTDYNAAFM-SRLSITKDNSKSQVFFKMNSLQADDTAIYYCAK----------------------', 'IGHV2-6*01': 'QVQLKESGP-GLVAPSQSLSITCTVSGFSL----TSYGVDWVRQSPGKGLEWLGVIWGV---GSTNYNSALK-SRLSISKDNSKSQVFLKMNSLQTDDTAMYYCAS----------------------', 'IGHV2-6*02': 'QVQLKESGP-GLVAPSQSLSITCTVSGFSL----TSYGVHWVRQPPGKGLEWLVVIWSD---GSTTYNSALK-SRLSISKDNSKSQVFLKMNSLQTDDTAMYYCAR----------------------', 'IGHV2-6*03': 'QVQLKESGP-GLVAPSQSLSITCTVSGFSL----TSYGVHWVRQPPGKGLEWLVVIWSD---GSTTYNSALK-SRLSISKDNSKSQVFLKMNSLQTDDTAMYYCAR----------------------', 'IGHV2-6-1*01': 'QVQLKESGP-GLVAPSQSLSITCTISGFSL----TSYGVHWVRQPPGKGLEWLVVIWSD---GSTTYNSALK-SRLSISKDNSKSQVFLKMNSLQTDDTAMYYCAR----------------------', 'IGHV2-6-2*01': 'QVQLKESGP-DLVAPSQSLSITCTVSGFSL----TSYGVHWVRQPPGKGLEWLVVIWSD---GSTTYNSALK-SRLSISKDNSKSQVFLKMNSLQTDDTAMYYCAR----------------------', 'IGHV2-6-3*01': 'QVQ-NKSGP-GLVEPSQSLSITCTVYWFSL----TSYGVSWVRQPPGKGLKWLGVIWAG---GSTNYNSALI-SRLSISKDNSKSQVFLKMNSLQTDDTAIYYCVR----------------------', 'IGHV2-6-4*01': 'QVQLKESGP-GLVAPSQSLSITCTVSGFSL----SRYSVHWVRQPPGKGLEWLGMIWGG---GSTDYNSALK-SRLSISKDNSKSQVFLKMNSLQTDDTAMYYCAR----------------------', 'IGHV2-6-5*01': 'QVQLKESGP-GLVAPSQSLSITCTVSGFSL----TDYGVSWIRQPPGKGLEWLGVIWGG---GSTYYNSALK-SRLSISKDNSKSQVFLKMNSLQTDDTAMYYCAK----------------------', 'IGHV2-6-6*01': 'QVQLKESGP-GLVAPSQSLSITCTVSGFSL----TNSGVHWVRQSPGKGLEWLGVIWGD---GSTNYNSAFK-SRLSISKDNSKSQVFLKMNSLQTDDTARYYCAK----------------------', 'IGHV2-6-7*01': 'QVQLKESGP-GLVAPSQSLSITCTVSGFSL----TGYGVNWVRQPPGKGLEWLGMIWGD---GSTDYNSALK-SRLSISKDNSKSQVFLKMNSLQTDDTARYYCAR----------------------', 'IGHV2-6-7*02': 'QVQLKESGP-GLVAPSQSLSITCTVSGFSL----TGYGVNWVRQPPGKGLEWLGMIWGD---GSTDYNSALK-SRLSISKDNSKSQVFLKMNSLQTDDTARYYCAR----------------------', 'IGHV2-6-8*01': 'QVQLKESGP-GLVAPSQSLSITCTVSGFSL----TSYGVDWVRQSPGKGLEWLGVIWGV---GSTNYNSALK-SRLSISKDNSKSQVFLKMNSLQTDDTAMYYCAS----------------------', 'IGHV2-7*01': 'QVQMKESGP-DLVQPSQTLSLTCTVSGFSL----SSYGVHWFRKPPRKGLEWLGGIWSG---GSIYYTPALS-SRLSVSRDTSKSQVFFKMSSLQSEDTAVYHCAR----------------------', 'IGHV2-9*01': 'QVQLKESGP-GLVAPSQSLSITCTVSGFSL----TSYGVDWVRQPPGKGLEWLGVIWGG---GSTNYNSALM-SRLSISKDNSKSQVFLKMNSLQTDDTAMYYCAK----------------------', 'IGHV2-9*02': 'QVQLKESGP-GLVAPSQSLSITCTVSGFSL----TSYGVHWVRQPPGKGLEWLGVIWAG---GSTNYNSALM-SRLSISKDNSKSQVFLKMNSLQTDDTAMYYCAR----------------------', 'IGHV2-9-1*01': 'QVQLKESGP-GLVAPSQSLSITCTVSGFSL----TSYAISWVRQPPGKGLEWLGVIWTG---GGTNYNSALK-SRLSISKDNSKSQVFLKMNSLQTDDTARYYCAR----------------------', 'IGHV2-9-2*01': 'QVQLKESGP-GLVAPSQSLSITCTVSGFSL----TSYDISWIRQPPGKGLEWLGVIWTG---GGTNYNSAFM-SRLSISKDNSKSQVFLKMNSLQTDDTAIYYCVR----------------------', 'IGHV2S3*01': 'QVQLKQSGP-GLVAPSQSLFITCTVYGFSL----TSYEINWVRQPPGKGLEWLGVIWTG---GSTNYNSALI-SRLSISKDNSKSLVFLKMNSLQTDDTAIYYCVR----------------------', 'IGHV3-1*01': 'DVQLQESGP-GMVKPSQSLSLTCTVTGYSIT---SGYDWHWIRHFPGNKLEWMGYISYS---GSTNYNPSLK-SRISITHDTSKNHFFLKLNSVTTEDTATYYCAR----------------------', 'IGHV3-1*02': 'DVQLQESGP-DLVKPSQSLSLTCTVTGYSIT---SGYSWHWIRQFPGNKLEWMGYIHYS---GSTNYNPSLK-SRISITRDTSKNQFFLQLNSVTTEDTATYYCAR----------------------', 'IGHV3-2*02': 'DVQLQESGP-GLVKPSQSLSLTCTVTGYSIT---SDYAWNWIRQFPGNKLEWMGYISYS---GSTSYNPSLK-SRISITRDTSKNQFFLQLNSVTTEDTATYYCAR----------------------', 'IGHV3-3*01': 'DVQLQESGP-SLVRPSQTLSLTCTVTGFSIN---SDCYWIWIRQFPGNKLEYIGYTFYS---GITYYNPSLE-SRTYITRDTSKNQFSLKLSSVTTEDTATYYCAR----------------------', 'IGHV3-4*01': 'DVQLQESGP-ALVKPSQTVSLTCTVTGYSIT--NGNHWWNWIRQVSGSKLEWIGYISSS---GSTDSNPSLK-SRISITRDTSKNQLFLQLNSVTTEDIATYYCAR----------------------', 'IGHV3-4*02': 'DVQLQESGP-GLVKPSQTVSLTCTVTGYSIT--NGNHWWNWIRQVSGNKLEWMGYISSS---GSTDSNPSLK-SQISITRDTSKNQLFLQLNSVTIEDIATYYCAR----------------------', 'IGHV3-5*01': 'DVQLQESGP-GLVKPSQTVFLTCTVTGISIT--TGNYRWSWIRQFPGNKLEWIGYIYYS---GTITYNPSLT-SRTTITRDTPKNQFFLEMNSLTAEDTATYYCAR----------------------', 'IGHV3-5*02': 'DVQLQESGP-GLVKPSQTVSLTCTVTGISIT--TGNYRWSWIRQFPGNKLEWIGYIYYS---GTITYNPSLT-SRTTITRDTSKNQFFLEMNSLTAEDTATYYCAR----------------------', 'IGHV3-6*01': 'DVQLQESGP-GLVKPSQSLSLTCSVTGYSIT---SGYYWNWIRQFPGNKLEWMGYISYD---GSNNYNPSLK-NRISITRDTSKNQFFLKLNSVTTEDTATYYCAR----------------------', 'IGHV3-6*02': 'DVQLQESGP-GLVKPSQSLSLTCSVTGYSIT---SGYYWNWIRQFPGNKLEWMGYISYD---GSNNYNPSLK-NRISITRDTSKNQFFLKLNSVTTEDTATYYCAR----------------------', 'IGHV3-8*01': 'EVQLQESGP-GLAKPSQTLSLTCSVTGYSI----TSDYWNWIRKFPGNKLEYMGYISYS---GSTYYNPSLK-SRISITRDTSKNQYYLQLNSVTTEDTATYYCAR----------------------', 'IGHV3-8*02': 'EVQLQESGP-SLVKPSQTLSLTCSVTGDSI----TSGYWNWIRKFPGNKLEYMGYISYS---GSTYYNPSLK-SRISITRDTSKNQYYLQLNSVTTEDTATYYCAR----------------------', 'IGHV3S1*01': 'EVQLQESGP-SLVKPSQTLSLTCSVTGDSI----TSDYWNWIRKFPGNKLEYMGYISYS---GSTYYNPSLK-SRISITRDTSKNQYYLQLNSVTSEDTATYYCAR----------------------', 'IGHV3S1*02': 'EVQLQESGP-SLVKPSQTLSLTCSVTGDSI----TSGYWNWIRKFPGNKLEYMGYISYS---GSTYYNPSLK-SRISITRDTSKNQYYLQLNSVTTEDTPTYYCAR----------------------', 'IGHV4-1*01': 'EVKLLQSGG-GLVQPGGSLKLSCAASGIDF----SRYWMSWVRRAPGKGLEWIGEINPD--SSTINYAPSLK-DKFIISRDNAKNTLYLQMSKVRSEDTALYYCAR----------------------', 'IGHV4-1*02': 'EVKLLESGG-GLVQPGGSLKLSCAASGFDF----SRYWMSWVRQAPGKGLEWIGEINPD--SSTINYTPSLK-DKFIISRDNAKNTLYLQMSKVRSEDTALYYCAR----------------------', 'IGHV4-2*02': 'EVKLLESGG-GLVQPGGSLNLSCAASGFDF----SRYWMSWARQAPGKGQEWIGEINPG--SSTINYTPSLK-DKFIISRDNAKNTLYLQMSKVRSEDTALYYCAR----------------------', 'IGHV5-12*01': 'EVKLVESGG-GLVQPGGSLKLSCAASGFTF----SDYYMYWVRQTPEKRLEWVAYISNG--GGSTYYPDTVK-GRFTISRDNAKNTLYLQMSRLKSEDTAMYYCAR----------------------', 'IGHV5-12*02': 'EVKLVESGG-GLVQPGGSLKLSCATSGFTF----SDYYMYWVRQTPEKRLEWVAYISNG--GGSTYYPDTVK-GRFTISRDNAKNTLYLQMSRLKSEDTAMYYCAR----------------------', 'IGHV5-12-1*01': 'EVQLVESGG-GLVKPGGSLKLSCAASGFAF----SSYDMSWVRQTPEKRLEWVAYISSG--GGSTYYPDTVK-GRFTISRDNAKNTLYLQMSSLKSEDTAMYYCAR----------------------', 'IGHV5-12-2*01': 'EVKLVESGG-GLVQPGGSLKLSCAASGFTF----SSYTMSWVRQTPEKRLEWVAYISNG--GGSTYYPDTVK-GRFTISRDNAKNTLYLQMSSLKSEDTAMYYCAR----------------------', 'IGHV5-12-4*01': 'EVKLVESGG-GLVKPGQSLKLSCAASGFTF----SNYYMSWVHQTPEKRLEWVAYISSS--GVSTYYPDNVK-GRFAISRDNAKNTLYLQMTSLKSEDTALYYCAR----------------------', 'IGHV5-15*01': 'EVKLVESGG-GLVQPGGSLKLSCAASGFTF----SDYGMAWVRQAPRKGPEWVAFISNL--AYSIYYADTVT-GRFTISRENAKNTLYLEMSSLRSEDTAMYYCAR----------------------', 'IGHV5-15*02': 'EVKLVESGG-GLVQPGGSRKLSCAASGFTF----SDYGMAWVRQAPGKGPEWVAFISNL--AYSIYYADTVT-GRFTISRENAKNTLYLEMSSLRSEDTAMYYCAR----------------------', 'IGHV5-15*05': 'EVKLVESGG-ALVQPGGSLKLSCAASGFTF----SDYGMAWVRQAPRKGPEWVAFISNL--AYSIYYADTVT-GRFTISRENAKNTLYLEMSSLRSEDTAMYYCAR----------------------', 'IGHV5-16*01': 'EVKLVESEG-GLVQPGSSMKLSCTASGFTF----SDYYMAWVRQVPEKGLEWVANINYD--GSSTYYLDSLK-SRFIISRDNAKNILYLQMSSLKSEDTATYYCAR----------------------', 'IGHV5-17*01': 'EVQLVESGG-GLVKPGGSLKLSCAASGFTF----SDYGMHWVRQAPEKGLEWVAYISSG--SSTIYYADTVK-GRFTISRDNAKNTLFLQMTSLRSEDTAMYYCAR----------------------', 'IGHV5-17*02': 'DVQLVESGG-GLVQPGGSRKLSCAASGFTF----SSFGMHWVRQAPEKGLEWVAYISSG--SSTIYYADTVK-GRFTISRDNPKNTLFLQMTSLRSEDTAMYYCAR----------------------', 'IGHV5-2*01': 'EVQLVESGG-GLVQPGESLKLSCESNEYEF----PSHDMSWVRKTPEKRLELVAAINSD--GGSTYYPDTME-RRFIISRDNTKKTLYLQMSSLRSEDTALYYCAR----------------------', 'IGHV5-2*02': 'EVQLVESGG-GLVQPRESLKLSCESNEYEF----PSHDMSWVRKTPEKRLELVAAINSD--GGSTYYPDTME-RRFIISRDNTKKTLYLQMSSLRSEDTALYYCAR----------------------', 'IGHV5-4*01': 'EVQLVESGG-GLVKPGGSLKLSCAASGFTF----SSYAMSWVRQTPEKRLEWVATISDG--GSYTYYPDNVK-GRFTISRDNAKNNLYLQMSHLKSEDTAMYYCAR----------------------', 'IGHV5-4*02': 'EVQLVESGG-GLVKPGGSLKLSCAASGFTF----SDYYMYWVRQTPEKRLEWVATISDG--GSYTYYPDSVK-GRFTISRDNAKNNLYLQMSSLKSEDTAMYYCAR----------------------', 'IGHV5-6*01': 'EVQLVESGG-DLVKPGGSLKLSCAASGFTF----SSYGMSWVRQTPDKRLEWVATISSG--GSYTYYPDSVK-GRFTISRDNAKNTLYLQMSSLKSEDTAMYYCAR----------------------', 'IGHV5-6-1*01': 'EVQLVESGG-DLVKPGGSLKLSCAASGFTF----SSYGMSWVRQTPDKRLEWVATISSG--GSYTYYPDSVK-GRFTISRDNAKNTLYLQMSSLKSEDTAMYYCAR----------------------', 'IGHV5-6-2*01': 'DVKLVESGG-GLVKLGGSLKLSCAASGFTF----SSYYMSWVRQTPEKRLELVAAINSN--GGSTYYPDTVK-GRFTISRDNAKNTLYLQMSSLKSEDTALYYCAR----------------------', 'IGHV5-6-3*01': 'EVQLVESGG-GLVQPGGSLKLSCAASGFTF----SSYGMSWVRQTPDKRLELVATINSN--GGSTYYPDSVK-GRFTISRDNAKNTLYLQMSSLKSEDTAMYYCAR----------------------', 'IGHV5-6-4*01': 'DVKLVESGG-GLVKPGGSLKLSCAASGFTF----SSYTMSWVRQTPEKRLEWVATISSG--GSYTYYPDSVK-GRFTISRDNAKNTLYLQMSSLKSEDTAMYYCTR----------------------', 'IGHV5-6-4*02': 'EVKLVESGG-GLVKPGGSLKLSCAASGFTF----SSYTMSWVRQSPEKRLEWVATISSG--GSYTYYPDSVK-GRFTISRDNAKNTLYLQMSSLKSEDTAMYYCTR----------------------', 'IGHV5-6-5*01': 'EVKLVESGG-GLVKPGGSLKLSCAASGFTF----SSYAMSWVRQTPEKRLEWVASISSG---GSTYYPDSVK-GRFTISRDNARNILYLQMSSLRSEDTAMYYCAR----------------------', 'IGHV5-6-6*01': 'EVKLVESGG-GLVQPGGSLKLSCAASGFTF----SSYAMSWIRQTPDKRLEWVASISSG--GSYTYYPDSVK-GRFTIPRDNTKNTLYLQMSSLSSKDTALYYCAR----------------------', 'IGHV5-9*01': 'EVMLVESGG-GLVKPGGSLKLSCAASGFTF----SSYTMSWVRQTPEKRLEWVATISGG--GGNTYYPDSVK-GRFTISRDNAKNTLYLQMSSLRSEDTALYYCAR----------------------', 'IGHV5-9*02': 'EVKLVESGG-GLVKPGGSLKLSCAASGFAF----SSYDMSWVRQTPEKRLEWVATISSG--GSYTYYPDSVK-GRFTISRDNARNTLYLQMSSLRSEDTALYYCAR----------------------', 'IGHV5-9*03': 'EVMLVESGG-GLVKPGGSLKLSCAASGFTF----SSYTMSWVRQTPEKRLEWVATISSG--GGNTYYPDSVK-GRFTISRDNAKNNLYLQMSSLRSEDTALYYCAR----------------------', 'IGHV5-9-1*01': 'EVMLVESGG-GLVKPGGSLKLSCAASGFTF----SSYAMSWVRQTPEKRLEWVATISSG--GSYTYYPDSVK-GRFTISRDNAKNTLYLQMSSLRSEDTAMYYCAR----------------------', 'IGHV5-9-1*02': 'DVKLVESGE-GLVKPGGSLKLSCAASGFTF----SSYAMSWVRQTPEKRLEWVAYISSG--GDYIYYADTVK-GRFTISRDNARNTLYLQMSSLKSEDTAMYYCTR----------------------', 'IGHV5-9-2*01': 'EVKLVESGG-GLVKPGGSLKLSCAASGFTF----SSYGMSWVRQTPEKRLEWVATISGG--GSYTYYPDSVK-GRFTISRDNAKNNLYLQMSSLRSEDTALYYCAR----------------------', 'IGHV5-9-3*01': 'EVQLVESGG-GLVKPGGSLKLSCAASGFTF----SSYAMSWVRQTPEKRLEWVATISSG--GSYTYYPDSVK-GRFTISRDNAKNTLYLQMSSLRSEDTAMYYCAR----------------------', 'IGHV5-9-4*01': 'EVQLVESGG-GLVKPGGSLKLSCAASGFTF----SSYAMSWVRQSPEKRLEWVAEISSG--GSYTYYPDTVT-GRFTISRDNAKNTLYLEMSSLRSEDTAMYYCAR----------------------', 'IGHV5-9-5*01': 'EVMLVESGG-GLVKPGGSLKLSCAASGFTF----SSYTMSWVRQTPEKRLEWVATISSG--GGNTYYPDSVK-GRFTISRDNAKNNLYLQMSSLRSEDTALYYCAR----------------------', 'IGHV5S4*01': 'EVKLVESGG-GLVKPGGSLKLSCATSGFTF----SSYGMSWVRQTPEKRLEWVATISGG--GSYTYYPDSVK-GRFTISRDNAKNNLYLQMSSLRSEDTALYYCAR----------------------', 'IGHV6-3*01': 'EVKLEESGG-GLVQPGGSMKLSCVASGFTF----SNYWMNWVRQSPEKGLEWVAQIRLKSDNYATHYAESVK-GRFTISRDDSKSSVYLQMNNLRAEDTGIYYCT-----------------------', 'IGHV6-3*02': 'EVKLEESGG-GLVQPGGSMKLSCVASGFTF----SNYWMSWVRQSPEKGLEWVAQIRLKSDNYATHYAESVK-GRFTISRDDSKSSVYLQMNNLRAEDTGIYYCT-----------------------', 'IGHV6-3*03': 'EVKLEESGG-GLVQPGGSMKLSCVASGFTF----SNYWMSWVRQSPEKGLEWVAQIRLKSDNYATHYAESVK-GRFTISRDDSKSSVYLQMNNLRAEDTGIYYCTG----------------------', 'IGHV6-6*01': 'EVKLEESGG-GLVQPGGSMKLSCAASGFTF----SDAWMDWVRQSPEKGLEWVAEIRNKANNHATYYAESVK-GRFTISRDDSKSSVYLQMNSLRAEDTGIYYCTR----------------------', 'IGHV6-6*02': 'EVKLEESGG-GLVQPGGSMKLSCVASGFTF----SNYWMNWVRQSPEKGLEWVAEIRLKSNNYATHYAESVK-GRFTISRDDSKSSVYLQMNNLRAEDTGIYYCTR----------------------', 'IGHV6-7*01': 'EEKLDESGG-GLVQPGRSMKLSCVASGFTF----TNSWMNWFCQSPEKGLEWVAQIKSKPYNYETYYSDSVK-GRFTISRDDSKSSVYLQMNNLRAEDTGIYYCTW----------------------', 'IGHV6-7*02': 'EVKLDETGG-GLVQPGRPMKLSCVASGFTF----SDYWMNWVRQSPEKGLEWVAQIRNKPYNYETYYSDSVK-GRFTISRDDSKSSVYLQMNNLRAEDMGIYYCTW----------------------', 'IGHV7-1*01': 'EVKLVESGG-GLVQSGRSLRLSCATSGFTF----SDFYMEWVRQAPGKGLEWIAASRNKANDYTTEYSASVK-GRFIVSRDTSQSILYLQMNALRAEDTAIYYCARDA--------------------', 'IGHV7-1*02': 'EVKLVESGG-GLVQPGGSLRLSCATSGFTF----SDFYMEWVRQPPGKRLEWIAASRNKANDYTTEYSASVK-GRFIVSRDTSQSILYLQMNALRAEDTAIYYCARDA--------------------', 'IGHV7-1*03': 'EVKLVESGG-GLVQSGRSLRLSCATSGFTF----SDFYMEWVRQAPGKGLEWIAASRNKANDYTTEYSASVK-GRFIVSRDTSQSILYLQMNALRAEDTAIYYCARDA--------------------', 'IGHV7-3*01': 'EVKLVESGG-GLVQPGGSLSLSCAASGFTF----TDYYMSWVRQPPGKALEWLGFIRNKANGYTTEYSASVK-GRFTISRDNSQSILYLQMNALRAEDSATYYCARY---------------------', 'IGHV7-3*02': 'EVKLVESGG-GLVQPGGSLRLSCATSGFTF----TDYYMSWVRQPPGKALEWLGFIRNKANGYTTEYSASVK-GRFTISRDNSQSILYLQMNTLRAEDSATYYCARD---------------------', 'IGHV7-3*03': 'EVKLVESGG-GLVQPGGSLSLSCAASGFTF----TDYYMSWVRQLPGKALEWLGFIRNKANGYTTEYSASVK-GRFTISRDNSQSILYLQMNALRAEDSATYYCAKD---------------------', 'IGHV7-3*04': 'EVKLVESGG-GLVQPGGSLSLSCAASGFTF----TDYYMSWVRQPPGKALEWLALIRNKANGYTTEYSASVK-GRFTISRDNSQSILYLQMNALRAEDSATYYCARD---------------------', 'IGHV7-4*01': 'EVKLMESGG-GLVQPGASLRLSCAASGFTF----TDYYMSWVRQPPGKAPEWLALIRNKANGYTTEYTASVK-GRFTISRDNSQNILYLQMNTLRAEDSATYYCVKAV--------------------', 'IGHV7-4*02': 'EVKLMESGG-GLVQPGASLRLSCEASGFTF----TDYYMSWVRQPPGKSPEWLALIRNKANGYTTEYSASVK-GRFTISRDNSQNILYLQMNTLRAEASATYYCAKDV--------------------', 'IGHV7-4*03': 'EVKLMESGG-GLVQPGASLRLSCEASGFTF----TDYYMSWVRQPPGKSPEWLALIRNKANGYTTEYSASVK-GRFTISRDNSQNILYLQMNTLRAEASATYYCAKDV--------------------', 'IGHV7-4*04': 'EVKLVESGG-GLVQPGGSLRLSCAASGFTF----TDYYMSWVRQPPGKAPEWLALIRNKANGYTTEYTASVK-GRFTISRDNSQNILYLQMNTLRAEDSATYYCVKAV--------------------', 'IGHV8-11*01': 'QITQKESGP-GILQPSQTLSLTCSFSGFSLS--TSGMGVGWIHQPSGNGLEWLAHIWWN---DNKYYNTALK-SRLTISKDTSNNQVFLKIASVDTADTATYYCARI---------------------', 'IGHV8-12*01': 'QVTLKESGP-GILQSSQTLSLTCSFSGFSLS--TSGMGVSWIRQPSGKGLEWLAHIYWD---DDKRYNPSLK-SRLTISKDTSRNQVFLKITSVDTADTATYYCARR---------------------', 'IGHV8-6*01': 'QVTLKESGP-GILQPSQTLSLTCSFSGFSLS--TFGMGVSWIRQPSGKDLEWLAHIYWD---DDKHYNPSLK-SQLRISKDTSNNQVFLKITTVDTVDTATYYCARR---------------------', 'IGHV8-8*01': 'QVTLKESGP-GILQPSQTLSLTCSFSGFSLS--TFGMGVGWIRQPSGKGLEWLAHIWWD---DDKYYNPALK-SRLTISKDTSKNQVFLKIANVDTADTATYYCARI---------------------', 'IGHV9-1*01': 'QIQLVQSGP-ELKKPGETVKISCKASGYTF----TEYPMHWVKQAPGKGFKWMGMIYTD--TGEPTYAEEFK-GRFAFSLETSASTAYLQINNLKNEDTATYFCVR----------------------', 'IGHV9-1*02': 'QIQLVQSGP-ELKKPGETVKISCKASGYTF----TNYGMNWVKQAPGKGLKWMGWINTY--TGEPTYADDFK-GRFAFSLETSASTAYLQINNLKNEDMATYFCAR----------------------', 'IGHV9-2*01': 'QIQFVQSGP-ELKKPGETVKISCKASVYTF----TEYPMHWVKQAPGKGFKWMGWINTY--SGEPTYADDFK-GRFAFSLETSASTAYLQINNLKNEDTATYFCAR----------------------', 'IGHV9-2*02': 'QIQLVQSGP-ELKKPGETVKISCKASGYTF----TNYAMHWVKQAPGKGLKWMGWKYTN--TGEPTYGDDFK-GRFAFSLETSASTAYLQINNLKNEDMATYFCAR----------------------', 'IGHV9-2-1*01': 'QIQLVQSGP-ELKKPGETVKISCKASGYTF----TDYSMHWVKQAPGKGLKWMGWINTE--TGEPTYADDFK-GRFAFSLETSASTAYLQINNLKNEDTATYFCAR----------------------', 'IGHV9-3*01': 'QIQLVQSGP-ELKKPGETVKISCKASGYTF----TTYGMSWVKQAPGKGLKWMGWINTY--SGVPTYADDFK-GRFAFSLETSASTAYLQINNLKNEDTATYFCAR----------------------', 'IGHV9-3*02': 'QIQLVQSGP-ELKKPGETVKISCKASGYTF----TNYGMNWVKQAPGKGLKWMGWINTN--TGEPTYAEEFK-GRFAFSLETSASTAYLQINNLKNEDTATYFCAR----------------------', 'IGHV9-3*03': 'QIQLVQSGP-ELKKPGETVKISCKASGYTF----TNYGMNWVKQAPGKGLKWMGWINTN--TGEPTYAEEFK-GRFAFSLETSASTAYLQINNLKNEDTATYFC------------------------', 'IGHV9-3-1*01': 'QIQLVQSGP-ELKKPGETVKISCKASGYTF----TNYGMNWVKQAPGKGLKWMGWINTY--TGEPTYADDFK-GRFAFSLETSASTAYLQINNLKNEDTATYFCAR----------------------', 'IGHV9-4*01': 'QIQLVQSGP-ELKKPGETVKISCKASGYTF----TTAGMQWVQKMPGKGFKWIGWINTH--SGEPKYAEDFK-GRFAFSLETSASTAYLQISNLKNEDTATYFCAR----------------------', 'IGHV9-4*02': 'QIQLVQSGP-ELKKPGETVRISCKASGYTF----TTAGMQWVQKMPGKGLKWIGWINTH--SGVPKYAEDFK-GRFAFSLETSASTAYLQISNLKNEDTATYFCAR----------------------'}, 'rat': {'IGHV2S13*01': 'QVQLKESGP-GLVQPSQTLSLTCTVSGFSL-----TSYNVHWVRQPPGKGLEWMGVIWSG---GNTDYNSALK-PRLSISRDTSKSQVFLTMNSLQTEDTGIY-YCNR--------------------', 'IGHV2S18*01': 'QVQLKESGP-GLVQPSETLSLTCTVSGFSL-----TSYSVHWVRQHSGKSLEWMGRMWSD---GDTSYNSAFT-SRLSISRDTSKSQVFLKMNSLQTEDTGTY-YCAR--------------------', 'IGHV2S61*01': 'QVQLKESGP-GLVQPSQTLSLTCTVSGFSL-----SSYGVIWVRQPPGKGLEWMGVIWGN---GNTNYNSALK-SRLSISRDTSKSQVFLKMNNLQTEDTAMY-FCA---------------------', 'IGHV2S63*01': 'EVQLKESGP-GLVQPSQTLSLTCTVSGFSL-----TDYSVHWVRQPPGKGLEWMGVMWSG---GSTAYNSALK-SRLSISRDTSKSQVFLKMNSLQTEDTAIY-YCTR--------------------', 'IGHV5-43*01': 'EVQLVESGG-GLVQPGSSLKVSCVASGFTF-----SSYVMHWFRQAPENGIEWLAYINTD--SSSTHYAETVK-GRFTISRDNAKNTVDMQLSSLRSEDTAMY-FCAR--------------------', 'IGHV5S10*01': 'EVQLVESGG-GLVQPGRSLKLSCAASGFTF-----SDYNMAWVRQAPKKGLEWVATIIYD--GSRTYYRDSVK-GRFTISRDNAKSTLYLQMDSLRSEDTATY-YCAT--------------------', 'IGHV5S11*01': 'EVQLVESGG-GLVQPGRSMKLSCAASGFTF-----SNYYMAWVRQAPTKGLEWVASISTG--GGNTYYRDSVK-GRFTISRDNAKSTLYLQMDSLRSEETATY-YCAR--------------------', 'IGHV5S13*01': 'EVQLVESGG-GLVQPGRSLKLSCAASGFTF-----SNYGMAWVRQAPTKGLEWVASISTG--GGNTYYRDSVK-GRFTISRDNAKNTQYLQMDSLRSEDTATY-YCAR--------------------', 'IGHV5S8*01': 'EVKLVESGG-GLVQPGRPLKLSCAASGFTF-----SSNWLNWIRQAPGKGLEWVASINPD--GSSTLYPDTVK-GRFVVSKDNAKNTRYLQMNNLRSEDTAMY-YCAR--------------------'}, 'rabbit': {'IGHV1S1*01': 'QEQLKESGG-RLVMPGGILTLTCTASGSNI----SSYGVSWFRQAPGKGLEWIRYISYG---GSAYYKSWVK-GRFTISKTSS--TVDLKMTSLTASDTATYFCAR----------------------', 'IGHV1S13*01': 'QEQLEESGG-GLVTPGGTLTLTCTVSGFSL----SSYGVSWVRQAAGKGLEWIGYISSS---GSAYYASWVN-GRFTISKTSS--TVDLKMTSLRAADTATYFCAR----------------------', 'IGHV1S17*01': 'QEQQKESGG-RLVMPGGSLTLTCTVSGFSL----SSYNMGWVRQAPGEGLEYIGWISTG---GSAYYASWVN-GRFTISKTST--TMDLKMTSLTAADTATYFCAR----------------------', 'IGHV1S24*01': 'QEQLKESGG-GLVTPGGILSLTCTASGFSI----SSYRMGWVRQAPGKGLEYIGYISYG---GSAYYKSWVK-GRFTISKTSS--TVDLKMTSLTASDKATYFCAR----------------------', 'IGHV1S25*01': 'QEQLKESGG-VLVTPGGILSLTCTASGFSI----SSYRMGWVRQAPGKGLEYIGIIYTG---GSAYYASWVN-GRFTISKTSS--TVDLKMTSLTAADMATYFCAR----------------------', 'IGHV1S26*01': 'Q-SVKESEG-GLFKPTDTLTLTCTVSGFSL----SSYAISWVRQAPGNGLEWIGIINSY---GSTYYASWAK-SRSTITRNTNENTVTLKMTSLTAADTATYFCAR----------------------', 'IGHV1S28*01': 'Q-SLEESRG-GLIKPGGTLTLTCTASGFTI----SSYDMSWVRQAPGKELEWIGYISYG---GSAYYASWAK-SRSTITRNTNENTVTLKMTSLTAADTATYFCAR----------------------', 'IGHV1S31*01': 'Q-SVEESRG-GLIKPTDTLTLTCTVSGFSL----SSYGVIWVRQAPGNGLEYIGTIGSS---GSAYYASWAK-SRSTITRNTNLNTVTLKMTSLTAADTATYFCAR----------------------', 'IGHV1S33*01': 'QEQLEESGG-GLVKPGDTLTLTCKASGFSL----SSYDMSWVRQAPGKGLEWIGFIWSG---GSTDYASWVN-GRIIISSDNTQNTVSLLMNSLSARDTATYFCAG----------------------', 'IGHV1S34*01': 'Q-SVKESEG-GLFKPTDTLTLTCTVSGFSL----SSNAISWVRQAPGNGLEWIGAIGSS---GSAYYASWAK-SRSTITRNTNLNTVTLKMTSLTAADTATYFCAR----------------------', 'IGHV1S36*01': 'QEQLEESGG-GLVTPGGTLTLTCTASGFSL----SSYGVSWVRQAAGKGLEWIGYISSS---GSAYYASWVN-GRFTISKTSS--TVDLKMTSLTASDTATYFCAR----------------------', 'IGHV1S40*01': 'Q-SLEESGG-DLVKPGASLTLTCTASGFSFS---SSYYMCWVRQAPGKGLEWIACIYAGS-SGSTYYASWAK-GRFTISKTSS-TTVTLQMTSLTAADTATYFCAR----------------------', 'IGHV1S43*01': 'QQQLEESGG-GLVKPGGTLTLTCKASGIDFS---SYYYICWVRQAPGKGLELIACIYTS--SGSTWYASWVN-GRFTISRSTSLNTVDLKMTSLTAADTATYFCAR----------------------', 'IGHV1S44*01': 'Q-SLEESGG-RLVTPGGSLTLTCTVSGIDL----TSYAMGWVRQAPGKGLEYIGIISSS---GSTYYASWAK-GRFTISKTSST-TVDLKMTSLTTEDTATYFCAG----------------------', 'IGHV1S45*01': 'QEQLEESGG-DLVKPEGSLTLTCTASGFSFS---SSYWICWVRQAPGKGLEWIACIYAGS-SGSTYYASWAK-GRFTISKTSST-TVTLQMTSLTAADTATYFCAR----------------------', 'IGHV1S47*01': 'QEQLVESGG-GLVQPEGSLTLTCKASGFDF----SSNAMCWVRQAPGKGPEWIACIYNG--DGSTYYASWVN-GRFTISRSTSLNTVTLQMTSLTAVDTATYFCAR----------------------', 'IGHV1S49*01': 'Q-SVKESEG-GLFKPADTLTLTCTASGFTI----SSYGVSWVRQAPGKGPEWIGAIDIN---GRTYYATWAK-SRATITRNVNENTVTLRVTSLTAADTATYFCAR----------------------', 'IGHV1S50*01': 'Q-SVKESEG-GLFKPTDTLTLTCTVSGFSL----SSYAISWVRQAPGNGLEYIGYISFT---NTAYYASWAK-SRSTITRNTNENTVTLKMTSLTAADTATYFCAR----------------------', 'IGHV1S51*01': 'Q-SVKESEG-GLFKPTDTLTLTCTVSGFSL----SSYNIGWVRQAPGSGLEWIGIISYG---GSAYYASWAK-SRSTITRNTNENTVTLKMTSLTAADTATYFCAR----------------------', 'IGHV1S52*01': 'Q-SVKESEG-GLFKPTDTLTLTCTVSGFSL----SSYNMGWVRQAPGNELEWIGIITSY---GSTYYASWAK-SRSTITRNTNENPVTLKMTSLTAADTATYFCAR----------------------', 'IGHV1S53*01': 'QEQLEESGG-GLVNPGGTLTLTCTVSGFTI----STYGVSWVRQAPGNGLEWIGTVNYD---GSTHYASWAK-SRSTITRNTNENTATLKMTSLTGADTATYFCAR----------------------', 'IGHV1S54*01': 'Q-SLGESRG-GLIKPGGTLTLTCTASGFTI----SSYDMSWVRQAPGEGLEYIGCINSY---GTTYYASWAK-SRSTITRNTNENTVTLKMTSLTAADTATYFCGRE---------------------', 'IGHV1S55*01': 'Q-SVKESEG-GLFKPTDTLTLSCTVSGFSL----SSYGVSWVRQAPGEGLECIGWISTD---GSTYYASWAK-SRSTITRNTNENTVTLKMTSLTAADTATYFCARA---------------------', 'IGHV1S56*01': 'QEQLKESGG-GLVTPGGILSLTCTASGFSL----STYNMGWVPPAPGKGLEYIGWINTG---GSPYCTSWAG-KRSTITRNTSENTVTLEMTSLTAADTATYLCAK----------------------', 'IGHV1S57*01': 'Q-TVKESEG-GLFKPTHTLTLTCTASGFSL----SSYPIIWVRQAPGNGLEWIGITNTY---GSPYYASWAK-SRSTITRNTNENTVTLKMTSLTAADTATYFCAR----------------------', 'IGHV1S58*01': 'QEQLKESGG-RLVMPGGSLTITCTVSGFSL----SSNAISWVRQAPGNGLEWIGVINSG---GTAYYASWAK-GRSTISRNTKENTVTLQMTSLTAADTATYFCAR----------------------', 'IGHV1S59*01': 'Q-SVKESEG-GLFKPTETLTLTCTVSGFSL----RDYRTGWVRQAPGKELEVVAYIRGD---GVIYYASWAK-KRSTITRNTNENTVTLKMTSLTAADTATYFCGR----------------------', 'IGHV1S60*01': 'Q-SVKESEG-GLFKATETLTLTCTLSGFSL----NNNAIHWVRQAPGKGLEWIGMIYGS---GATYYASWVS-GRATITRDTNENTVTLKMTSLTDADTATYFCAR----------------------', 'IGHV1S61*01': 'Q-SVKESEG-GLFKPTDTLTLTCTVSGFSL----TSMSISWVRQAPGNGLEWIGAINRV---STTYYTTWAK-SRSTITRNTNENTVTLKMTSLTVADTATYFCTR----------------------', 'IGHV1S62*01': 'Q-SVKESEG-GLIKPTDTLTLTCTVSGFSL----TNYIIFWVRQAPGKELEWIGYIHGG---GNTYYASWAK-SRSTITRDTKENTVTLKMASLTASDTATYFCAR----------------------', 'IGHV1S63*01': 'Q-SVKESEG-GLFKPTDTLTLTCTASGFSI----SSYRMGWVRQAPGNGLEVIGYIRGD---GVTYCASWAK-SRSTITRNTNENTATLKMTSLTAADTATYFCGR----------------------', 'IGHV1S65*01': 'Q-SLEEFGG-GLIRPASTLTLTCTVSGFSL----NEVGVIWVRQAPGKELEWIGYISYR---GNAYYASWAK-SRSTITRNTKENTVTLKVTGLTAADTATYFCAR----------------------', 'IGHV1S66*01': 'Q-SVKESEG-GLFEPTDTLTLTCTVSGFSL----TKYGVMWVRQAPGNGLEWIGFIAYS---GNTYYTTWAK-SRSTITRNTNENTVTLKMTSLTAADTATYFCAR----------------------', 'IGHV1S67*01': 'Q-SVKESEG-GLFKPTDTLTLTCTVSGFSL----STYGVIWVRQAPGNGLEYIGSIASG---GSAGYASWAK-SRSTITRNTNENTVTLKMTSLTAADTATYFCAA----------------------', 'IGHV1S68*01': 'Q-SVEESRG-GLIKPADTLTLTCTVSGFSL----NNYGVIWVRQAPGSGLEYIGTIDIG---VTAFYASWAK-SRSTITKNTNENTVTLRMTSLTAADTATYFCAS----------------------', 'IGHV1S69*01': 'Q-SVEESGG-RLVTPGTPLTLTCTVSGFSL----SSYAMSWVRQAPGKGLEWIGIISSS---GSTYYASWAK-GRFTISKTST--TVDLKITSPTTEDTATYFCAR----------------------', 'IGHV1S7*01': 'Q-QLKESGG-GLVKPGGSLKLCCKASGFTF----SSYYMCWVRQAPGKGLEWIGCIYAG--SGSTHYASWVN-GRFTLSRDNAQSTVCLQLNSLTAADTATYFCAR----------------------', 'IGHV1S8*01': 'QKQLVESGG-GLDQPAGSLKLSCKDSGFTL----SSNAMCWVHQAPGKGLEWIACIDSY---GSTNYVSRVN-GRFTISSDNTQNMVDLEMNSLTAADMAIYFCAR----------------------'}, 'rhesus': {'IGHV1-111*01': 'EVQLVQSGA-EVKKP-GASVKISCKAS-GYTF----TDYYLHWVRQAPGKGLEWMGRVDPE--DGEAIHAQKFQ-DRVTITRDTSTDTAYMELSSLRSEDTAVYYCAT--------------------', 'IGHV1-111*02': 'EVQLVQSGA-EVKKP-GASVKISCKAS-GYTF----TDYYLHWVRQAPGKGLEWMGRVDPE--DGEAIHAQKFQ-DRVTITADTSTDTAYMELSSLRSEDTAVYYCAT--------------------', 'IGHV1-138*01': 'QVQLVQSGA-EVKKP-GSSVKVSCKAS-GYIF----TDYYMHWVRQAPGQGLEWMGEINPK--TGGTNYAQKFQ-GRVTTTRDTSTSTAYMELSSLRSEDTAVYYCER--------------------', 'IGHV1-151*01': 'QVQLVQSGA-EVKKP-GASVKLSCKAS-GYTF----SIYAISWVRQAPGQGLEWMGGIIPL--VGITNYAQKFQ-GRVTITADTSTSTAYMELSSLRSEDTAVYYCAR--------------------', 'IGHV1-156*01': 'EVQLVQSGA-EVKKP-GASVKVSCKVS-GYTF----TELSMHWVRQAPGKGLEWMGGVDPV--YGEIIHAEKFQ-GRVTMTEDTSTDTAYMELSSLRSEDTAVYYCAR--------------------', 'IGHV1-156D*01': 'EVQLVQSGA-EVKKP-GASVKVSCKVS-GYTF----TELSMHWVRQAPGKGLEWMGGVDPV--YGEIIHAEKFQ-GRVTMTEDTSTDTAYMELSSLRSEDTAVYYCAR--------------------', 'IGHV1-180*01': 'QVQLVQSGA-EIKQP-GASVKLSCKAS-GYTF----TSYYMHWVRQAPGQGLEWIGLISPY--NGNKGYAQNFQ-GRVTITTDTSTSTGYMELSSLRSEDTAVYYCTR--------------------', 'IGHV1-198*01': 'QVQLVQSGA-EVKKP-GASVKVSCKAS-GFTF----GSYAISWVRQAPGQGLEWMGVIIPL--VGVTNYAEKFQ-GRVTITADTSTSTAYMELSSLRSEDTAVYYCAR--------------------', 'IGHV1-198*02': 'QVQLVQSGA-EVKKP-GASVKVSCKAS-GFTF----GSYAISWVRQAPGQGLEWMGVIIPL--VGITNYAEKFQ-GRVTITADTSTSTAYMELSSLRSEDTAVYYCAR--------------------', 'IGHV1-200*01': 'QVQLVQSGA-EVKKP-GASVKLSCKAS-GYTF----TSYYINWVRQAPGQGLDWMGWINPS--NGNTGYAQKFQ-GRVTMTRDTSTSTAYMELNSLRSEDTAVYYCAR--------------------', 'IGHV1-69*01': 'EMQLVQSEA-EVKKP-GASVKISCKAS-GYTF----TYRYLHWLRQTPGQGLEWMGWITPY--NGNTNYAQKFQ-DRATITRDRSMSTAYMELSSLRSEDTAVYYCAR--------------------', 'IGHV1-70*01': 'QEQLVQSGA-EVKKP-GASVKVSCKAS-GYIF----TSYVISWLRQAPGQGFEWMGGIHPG--YGSTSYAQKFQ-GRVTITADMSTSTVYMELSSLRSEDMAVYYCAA--------------------', 'IGHV1S2*01': 'QVQLVQSGA-EVKKP-GSSVKVSCKAS-GYTF----TDYYMHWVRQAPRQGLEWMGWINPY--NGNTKYAQKFQ-GRVTMTRDTSTSTAYMELSSLRSEDTAVYYCAR--------------------', 'IGHV2-152*01': 'QVTLKESGP-ALVKP-TQTLTLTCTFS-GFSLT--TSGMGVGWIRQPPGKALEWLALIYWD---DDKRYSTSLK-SRLTISKDTSKNQVVLTMTNMDPMDTATYYCAR--------------------', 'IGHV2-161*01': 'QVTLKESGP-ALVKP-TQTLTLTCTFS-GFSIS--TTGTGVSWIRQPPGKALEWLASIYWD---DDKYYSTSLK-SRLTISKDTSKNQVVLTMTNMDPVDTATYYCAR--------------------', 'IGHV2-174*01': 'QVTLKESGP-ALVKP-TQTLTLTCTFS-GFSLT--TSGMGVGWIRQPPGKALEWLALIYWD---DDKRYSTSLK-SRLTISKDTSKNQVVLTMTNMDPVDTATYYCAR--------------------', 'IGHV2-174*02': 'QVTLKESGP-ALVKP-TQTLTLTCTFS-GFSLS--TSGMGVGWIRQPSRKTLEWLAHIYWD---DDKRYSTSLK-SRLTISKDTSKNQVVLTMTNMDPMDTATYYCAR--------------------', 'IGHV2-95*01': 'QVTLKESGP-ALVKP-TQTLTLTCTFS-GFSIS--TTGTGVGWIRQPPGKALEWLASIYWN---DSKYYSTSLK-SRLTISKDTSKNQVVLTMTNMDPVDTATYYCAR--------------------', 'IGHV2S1*01': 'QVTLKESGP-ALVKP-TQTLTLTCTFS-GFSLS--TSGMGVGWIRQPPGKALEWLASIYWD---DDKYYSTSLK-SRLTISKDTSKNQVVLTMTNMDPVDTATYYCAR--------------------', 'IGHV3-100*01': 'EVQLVESGG-GLVKP-GGSLRLSCVAS-GFTF----SSYVMHWVRQAPGKGLEWVSVISES--GGTTYYADSVK-GRFTISRDNAKNSLFLQMNSLRAEDTAVYYCTR--------------------', 'IGHV3-100*02': 'DVQLVESGG-GLVKP-GGSLRLSCVAS-GFTF----SSYEMHWVRQAPGKGLEWVSVISES--GGTTYYADSVK-GRFTISRDNAKNSLFLQMNSLRAEDTAVYYCTR--------------------', 'IGHV3-103*01': 'EVQLVESGG-GLAKP-GGSLRLSCAAS-GFTF----SSYAMHWVRQAPGKGLEWVSAINSG---GSTYYADSVK-GRFTISRDNSKNTLSLQMNSLRAEDTAVYYCAK--------------------', 'IGHV3-108*01': 'EVQLVESGR-GLVQP-GGSLRLSCAVS-GFTF----SDHYMSWVRQAPGKGPEWVGFMRNKANGGRTEYAASGK-GRFTISRDDSKSIASLQMSSLKTEDTAVYYCAR--------------------', 'IGHV3-110*01': 'EVQLVESGG-GLVQP-GGSLRLSCVAS-GFSF----SDHYMDWVRQAPGKGLEWVSSISSGS-GSTTLYPDSVK-GRFTISRDNAKNTVYLQMNSLRAEDTAVYYCAR--------------------', 'IGHV3-110*02': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SDHYMDWVRQAPGKGLEWVSSISSGS-GSTTLYPDSVK-GRFTISRDNAKNTVYLQMNSLRAEDTAVYYCAR--------------------', 'IGHV3-115*01': 'EVQLAESGG-GLVQP-GGSLRLSCAAS-GFTF----SGYEMHWVRQAPGKGLESVSVIGGD--SSYTHYADSVK-GRFTISRDNAKNSLSLQMNSLRAADTAVYYCAR--------------------', 'IGHV3-115*02': 'EVQLAESGG-GLVQP-GGSLRLSCAAS-GFTF----SGYEMHWVRQAPGKGLESVSVIGGD--SSYTHYADSVK-GRFTISRDNAKNSLSLQMNSLRAEDTAVYYCAR--------------------', 'IGHV3-116*01': 'EVQLVESGG-GLVQP-GGSLRVSCAAS-GFTF----SDYYMQWVRQAPGKGPEWVGFIRNKANGGTAEYAASVK-GRFTISRDDSKSIASLQMNSLKTEDTAVYYCTR--------------------', 'IGHV3-116*02': 'EVRLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SDYYMSWVRQAPGKGPEWVGFIRNKANGGTAEYAASVK-GRFTISRDDSKSIASLQMNSLKTEDTAVYYCAR--------------------', 'IGHV3-118*01': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SSSAMHWVRQASGKGLEWVGRIRSKSNNYETGYAASVK-GRFTISRDDSKNTAYLQMNSLKTEDTAVYYCTT--------------------', 'IGHV3-119*01': 'EVQLAESGG-GLVQP-GGSLRLSCAAS-GFTF----SSYWMYWVRQAPGKGLEWVSRISSD--GSSTSYADSVK-GRFTISRENAKNSLYLQMNSLRAEDTAVYYCAK--------------------', 'IGHV3-12*01': 'EVQLVESGG-GLVQP-GRSLRPSCAAS-GFTF----SSYGMHWVRQAPEEGLVWVSYIGS----STMYYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCVR--------------------', 'IGHV3-12*02': 'EVQLVESGG-GLVQP-GRSLRPSCAAS-GFTF----SSYGMHWVRQAPEEGLVWVSYIGS----STMYYADSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCVR--------------------', 'IGHV3-124*01': 'EVQLVESGG-GLAKP-GGSLRLSCAAS-GFTF----SDYYMDWVRQAPGKGLEWVSRIKWW---GSTYYADSVK-GRFTISRENAKNTLYLQMDSLRAEDTAVYYCAR--------------------', 'IGHV3-13*01': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SNYYMHWVRQAQGKGLEWVGLIRNKANSYTTEYAAAVK-GRFTISRDDSKNTLYLQMSSLKTEDTALYYCTK--------------------', 'IGHV3-132*01': 'VEQLVESGG-GLVQP-GASLRLSCAAS-EFTF----SSYDMHWVRQAPGKGLEWVSGISIG---GGTYYPDSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR--------------------', 'IGHV3-132*02': 'VEQLVESGG-ALVQP-GASLRLSCAAS-EFTF----SSYDMHWVRQAPGKGLEWVSAISIG---GGTYYPDSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR--------------------', 'IGHV3-134*01': 'EVQLVESGG-GLVKP-GGSLRLSCAAS-GFTF----DDYAMSWVRQAPGKGLEWVSRISWD--GGSTYYADSVK-GRFTISRDNAKNTLYLQMDRLRAEDTALYYCSR--------------------', 'IGHV3-136*01': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SSYDMSWVRQAPGKGLEWVSYISYT--GKTIYYADSVK-GRFTISRDNAKNSLSLQMSSLRAEDTAVYYCTR--------------------', 'IGHV3-153*01': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SDYAMDWVRQAPGKGPEWVGFIRSKAYGGTAEYAASVK-GRFTISRDDSKNTAYLQMSSLKTEDTAVYYCTR--------------------', 'IGHV3-153*02': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SDYAMDWVRQAPGKGLEWVGFIRSKAYGGTAEYAASVK-GRFTISRDDSKNTAYLQMSSLKTEDTAVYYCTR--------------------', 'IGHV3-153D*01': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SDYAMDWVRQAPGKGLEWVGFIRSKAYGGTAEYAASVK-GRFTISRDDSKNTAYLQMSSLKTEDTAVYYCTR--------------------', 'IGHV3-16*01': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SNYWMNWVRQAPGKGLEWVGFIKNKADGGTAAYAESVK-GRFTISRDDSKNTLYLQMNSLKTEDTAVYYCTR--------------------', 'IGHV3-16*02': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SNYWMSWVRQAPGKGLDWVGFIKNKADGGTAAYAESVK-GRFTISRDDSKNTLYLQMSSLNTEDTAVYYCTR--------------------', 'IGHV3-175*01': 'EVQLAESGG-GLVQP-GGSLRLSCAAS-GFTI----SSYWMSWVCQAPGKGLEWLSDIYG----STMYYGDSVK-GLFTVSRDNAKNSLYLQMNSLRAEDTAVYYCTR--------------------', 'IGHV3-175*02': 'EVQLAESGG-GLVQP-GGSLRLSCAAS-GFTI----SSYWMSWVCQAPGKGLEWLSDIYG----STMYYGDSVK-GLFTVSRDNAKNSLSLQMNSLRAEDTAVYYCTR--------------------', 'IGHV3-176*01': 'EVQLVESGG-GLVQPGGGSLRLSCAAS-GFTF----SDDYMEWVRQAPGKGLEWVGQINPN--GGTTFLMDSVK-GRFTISRDNAKNTLYLQINSLKIEDTAVYYCTR--------------------', 'IGHV3-176*02': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SDDYMEWVRQAPGKGLEWVGQINPN--GGTTFLMDSVK-GRFTISRDNAKNTLYLQINSLKIEDTAVYYCTR--------------------', 'IGHV3-178*01': 'EVQLVESGG-GLAKP-GGSLRLSCAAS-GFTF----SDYYMDWVRQAPGKGLEWVSRISNG--GGSTWYADSVK-GRFTISRENAKNTLYLQMNSLRAEDTAVYYCAR--------------------', 'IGHV3-178*02': 'EVQLVESGG-GLAKP-GGSLRLSCAAS-GFTF----SDYYMDWVRQAPGKGLEWVSRISNG--GGSTWYADSVK-GRFTISRENAKNTLYLQMNSLRAEDTAVYYCAK--------------------', 'IGHV3-183*01': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----GDYGMHWVRQAPGKGLEWVSSISNT--GKTVYYADSVK-GRFTISRDNAKNSLSLQMSSLRAEDTAVYYCTR--------------------', 'IGHV3-183*02': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----GDYGMHWVRQAPGKGLEWVSSISNT--GKTVYYADSVK-GRFTVSRDNAKNSLSLQMSSLRAEDTAVYYCTR--------------------', 'IGHV3-184*01': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SDYYMYWVRQAPGKGLEWVGFIRSKAYGGTAEYAASVK-GRFTISRDDSKSIAYLQMSSLKTEDTAVYYCTR--------------------', 'IGHV3-184*02': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SDYYMYWVRQAPGKGLEWVGFIRSKAYGGTAEYAASVK-GRFTISRDDSKSIAYLQMSSLKTEDTAVYYCTR--------------------', 'IGHV3-186*02': 'EVQLVESGG-GLVQP-GGSLRPSCAAS-GFTF----SSSAMHWVRQASGKGLEWVGRIRSKSNNYATEYAASVK-GRFTISRDDSKNTAYLQMNSLKTEDTAVYYCAR--------------------', 'IGHV3-201*01': 'EVQLVESGG-GVVQP-GGSLRLSCAAS-GFTF----DDYAMHWVRQAPGKGLEWVSGISWS--GGSTYYADSVK-GRFTISRDNAKNSLYLQMGSLRAEDTALYYCAK--------------------', 'IGHV3-28*01': 'EVQLVESGG-GLAKP-GGSLRLSCAAS-GFTF----SSYWMHWVRQAPGKGLEWISAINSA--GSSTYYADSVK-GRFTISRENAKNTLYLQMDSLRAEDTALYYCAG--------------------', 'IGHV3-28*02': 'EVQLVESGG-GLAKP-GGSLRLSCAAS-GFTF----SNYWMHWVRQAPGKGLEWISAINSA--GSSTYYADSVK-GRFTISRENAKNTLYLQMDGLRAEDTAVYYCAG--------------------', 'IGHV3-30*01': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SNVWMNWVRQAPGKGLEWVARIKRKADGETADYAASVK-GRFTISRDDSKNTLYLQMNSLKTEDTAVYYCTT--------------------', 'IGHV3-30*02': 'EVQLVESGA-GLVQP-GGSLRLSCAAS-GFTF----SNSWMSWVRQAPGKGLEWVARIKRKADGETADYAASVK-GRFTISRDDSKNTLYLQMNSLKTEDTAVYYCTT--------------------', 'IGHV3-32*01': 'EVQLVESGG-GLVQP-GGSLRLSCAAP-GFTS----GNSDLIWIRQAPGKGLEWVSYISSG---GSIYYSDSVK-GRFTISRDNAKNTLYLQMSSLRVEDTAVYYCAK--------------------', 'IGHV3-32*02': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTS----GNSDLIWIRQAPGKGLEWVSYISSG---GSIYYSDSVK-GRFTISRDNAKNTLYLQMSSLRVEDTAVYYCAK--------------------', 'IGHV3-34*01': 'EVKLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SNYWMFWVRQAPGKGLEWVSSISGS--SSSTYYPDSVK-GRFTISRDNAKNTLYLQMNSLRAEDTAVHYCAR--------------------', 'IGHV3-34*02': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SNYWMFWVRQAPGKGLEWVSSISGS--SSSTYYPDSVK-GRFTISRDNAKNTLYLQMNSLRAEDTTVHYCAR--------------------', 'IGHV3-35*01': 'EVQLVEYGG-GLVQP-GGSLRLSC----GFTF----SVHFMSWVRQAPGKGPEWVGFMRNKANGGTAEYATSVK-GRFTISRDDSKSIAYLQMSSLNTEDTAVYYCAR--------------------', 'IGHV3-35*02': 'EVQLVAYGG-GLEQP-GGSLRLSC----GFTF----SDHYMSWVRQAPGKGPEWVGFMRNKANGGTTEYATSVK-GRFTISRADSKSMASLQMSSLKTEDTAVYYCAR--------------------', 'IGHV3-37*01': 'EVQLVESGG-GLVQP-GGSLRLSCVAS-GFTF----SDYCMDWVRQASGKGLEWVSSISGS--SSNTYYPDSVK-GRFTISRDNAKNTLYLQMNSLRAEDTAVYYCAR--------------------', 'IGHV3-37*02': 'EVQLVESGG-GLVQP-GGSLRLSCATS-GFTF----SNYWMFWVRQAPGKGLEWVSSISGS--SSSTYYPDSVK-GRFTISRDNAKNTLYLQMNSPRAEDTAVYYCAR--------------------', 'IGHV3-38*01': 'EVQLVESGG-GLAQP-GGSLRLSCAAS-GFTF----SDHYMDWVRQAPGKGLEWVGRIRNKANSYTTEYAASVK-GRFTISRDDSKNTLYLQMSSLKTEDTAVYYCAR--------------------', 'IGHV3-38*02': 'EVQLVESGG-GLAQP-GGSLRLSCAAS-GFTF----SDHYMDWVRQAPGKGLEWVSRIRNKANSYTTEYAASVK-GRFTISRDDSKNTLYLQMSSLKTEDTAVYYCAR--------------------', 'IGHV3-46*02': 'EVQLVESGG-GLVQP-GGSLRLSCTAS-GFTF----SSTRINWIRQSPGKRLEWVADIKYD--GSEKYYVDSVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCVR--------------------', 'IGHV3-54*01': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SSYGMSWVRQTPGKGLEWVAVIWYD--GSKKYYADSVK-DRFTISRDNSKNMLYLQMNNLKLEDTAVYYCGR--------------------', 'IGHV3-54*02': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SSYGMHWVRQAPGKGLEWVAVIWYD--GSKKYYADSVK-DRFTISRDNSKNMLYLQMNNLKLEDTAVYYCAR--------------------', 'IGHV3-58*01': 'EAQLMETGG-GLVQP-GGSLRLSCAAS-GFTF----SDHYMQWVRQAPGKGLEWVGLIRNKADGETTDYALSVK-GRFTISRDDSKSITYLQMNNLKTEDTAVYYCAR--------------------', 'IGHV3-58*02': 'EAQLMETGG-GLVQP-GGSLRLSCADS-GFTF----SDHYMQWVRQAPGKGLEWVGLIRNKADGETTDYAASVK-GRFTISRDDSKSITYLQMNNLKTEDTAVYYCAR--------------------', 'IGHV3-59*01': 'EVQLVESGG-GLAKP-GGSLRLSCAAS-GFTF----SDYYMHWVRQASGKGLEWVSRISNG--GGSTWYADSVK-GRFTISRENAKNTLYLQMDSLRAEDTAVYYCAR--------------------', 'IGHV3-59*02': 'EVQLVESGG-GLVKP-GGSLRLSCAAS-GFTF----SDYYMHWVRQASGKGLEWVSRISNG--GGSTWYADSVK-GRFTISRENAKNTLYFQMDSLRAEDTAVYYCAR--------------------', 'IGHV3-72*01': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SSYAMQWVHQAPGKGLEWVSAIGPG---GDTYYADAVK-GRFTISRDNAKNSLYLQMNSLRAEDTAVYYCAR--------------------', 'IGHV3-78*01': 'EVQLVESGG-GVVQP-GGSLRLSCAAS-GFTF----DDYAMGWVRQAPGKGLEWVSAISWN--GDSTYYADSVK-GRFTISRENAKNSLYLQINRLRAEDTALYYCAR--------------------', 'IGHV3-8*01': 'EVQLVESGG-GLVQP-GGSLRLSCTGS-GFTF----SSYYMYWVRQAPGKGLEWVSAINTG--GGSTWYTDSVK-GRFTISKENAKNTLYLQMDSLRAEDTAVYYCAK--------------------', 'IGHV3S4*01': 'EVQLVESGG-GLVQP-GGSLRLSCAAS-GFTF----SSYDMSWVRQALGKGLEWVSSISNT--GKTIYYADSVK-GRFTISRDNAKNSLSLQMNSLKTEDTAVYYCTR--------------------', 'IGHV3S40*01': 'EVQRVESGG-GLVQP-GGSLRLSCAAS-GFTI----SSSWMNWDFQAPGKGLECVSHISSG---VSTDYPDSIK-GQFTISRDNTETMLYVQMNSLRAEDMAVNYCAR--------------------', 'IGHV3S41*01': 'EVQLVESGG-GLVQP-GGSLRLSCATS-GFTF----SNYWMYWFRQAPGKGLEWVSSISGS--SSNTYYPDSVK-GRFTISRDNAKNTLYLQMNSLRAEDTAVYYCAR--------------------', 'IGHV3S42*01': 'EVQLVESGG-GLAKP-GGSLRLSCAAS-GFTF----SSYWMNWVRQTPGKGLEWISAINSG--GGSTYYADSVK-GRFTISRDNSKNTLSLQMNSLRAEDTAVYYCAK--------------------', 'IGHV3S43*01': 'EVQLVESGG-GLVQP-GGSLRLSCAAP-GFTS----GNSDLIWIRQAPGKGLEWVSYISSG---GSIYYSDSVK-GRFTISRDNAKNTLYLQMSSLRVEDTAVYYCAK--------------------', 'IGHV4-106*01': 'QVQLQESGP-GLVKP-SETLSLTCAVS-GGSIS---DDYYWSWIRQPPGKGLEWIGYIYGS--GGGTNYNPSLK-NRVTISIDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-122*01': 'QVQLQESGP-GLVKP-SETLSLTCAVS-GGSIS--SSYYYWSWIRQAPGKGLEWIGYIYGG--SGSTSYNPSLK-SRVTISRDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-122*02': 'QVQLQESGP-GLVKP-SETLSLTCAVS-GGSIS--SGYYYWSWIRQPPGKGLEWIGYITYS---GSTSYNPSLK-SRVTISRDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-127*01': 'QVQLQESGP-GLVKP-SETLSLTCAVS-GYSIS---SGYGWSWIRQPPGKGLEWIGYIGGS--SGSTNYNPSLK-SRVTISKDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-143*01': 'QVQLQESGP-GLVKP-SETLSLTCTVS-GGSIS---GYYYWSWIRQPPGKGLEWIGGIYGN--SASTYYNPSLK-SRVTISKDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-147*01': 'QVQLQESGP-GLVKP-SETLSLTCAVS-GGSI----SSNYWSWIRQPPGKGLEWIGRIYGS--SGSTSYNPSLT-SRVTISTDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-160*01': 'QVQLQESGP-GLVKP-SETLSLTCAVS-GGSI----SSNYWSWIRQPPGKGLEWIGRIYGS--GGSTDYNPSLK-SRVTISTDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-165*01': 'QVQLQESGP-GLVKP-SETLSLTCAVS-GGSI----SGYYWSWIRQPPGKGLEWIGYIGGS--SGSTYYNPSLK-SRVTISTDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-165*02': 'QVQLQESGP-GLVKP-SETLSLTCAVS-GGSI----SGYYWNWIRQPPGKGLEWIGYIGGS--SGSTYYNPSLK-SRVTISTDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-169*01': 'QLQLQESGP-GLVKP-SETLSVTCAVS-GGSI----SSSYWSWIRQAPGKGLEWIGYIYGS--GSSTNYNPSLK-SRVTLSVDTSKNQLSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-169*02': 'QLQLQESGP-GLVKP-SETLSVTCAVS-GGSI----SSSYWSWIRQAPGKGLEWIGYIYGS--GSSTNYNPSLK-SRVTLSVDTSKNQLSLKLSSVTAADTAVYYCAS--------------------', 'IGHV4-173*01': 'QLQLQESGP-GLVKP-SETLSLTCAVS-GGSI----SSNYWSWIRQPPGKGLEWIGRISGS--GGSTDYNPSLK-SRVTISTDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-57*02': 'QLQLQESGP-GLVKP-SETLSLTCAVS-GGSIS---SSNWWSWIRQPPGKGLEWIGRISGS--GGSTSDNPSLK-SRVTISKDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-65*01': 'QVQLQESGP-GLVKP-SETLSLTCAVS-GGSIS---SSNWWSWIRQPPGKGLEWIGYISGS--SGSTYYNPSLK-SRVTISTDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-65*02': 'QVQLQESGP-GLVKP-SETLSLTCAVS-GGSIS---SSNWWSWIRQPPGKGLEWIGNIGGS--SGSTYYNPSLK-SRVTISKDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-73*01': 'QVKLQQWGE-GLVKP-SETLSLTCAVY-GGSIS---GYYYWSWIRQPPGKGLEWIGYIYGN--SASTNYNPSLK-NRVTISKDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-76*01': 'QVQLQESGP-GLVKP-SETLSLTCAVS-GGSIS---GGYDWSWIRQPPGKGLEWIGYIYGS--SGSTNYNPSLK-NRVTISKDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-80*01': 'QVQLQESGP-GLVKP-SETLSLTCAVS-GASI----SSYWWSWIRQPPGKGLEWIGEINGN--SGSTYYNPSLK-SRVTISKDASKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-81*01': 'QLQLQESGP-GLVKP-SETLSLTCAVS-GGSI----SGYYWSWIRQPPGKGLEWIGNIDGN--IAGTNYNPSLK-SRVTISKDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-92*01': 'QVQLQESGP-GLVKP-SETLSLTCAVS-GGSIS---SSNWWSWIRQPPGKGLEWIGRISGS--GGSTSDNPSLK-SRVTISKDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-93*01': 'QVQLQESGP-AVVKP-SETLSLTCAVS-GGSIS---SSNWWSWIRQSPGKGLEWIGGIYGS--GGSTEYNPSLK-SRVTISKDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-93*02': 'QVQLQESGP-AVVKP-SETLSLTCAVS-GGSIS---SSNWWSWIRQSPGKGLEWIGGIYGS--GGSTEYNPSLK-SRVTISIDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-99*01': 'QVQLQESGP-GLVKP-SETLSLTCAVS-GYSIS---SGYYWGWIRQPPGKGLEYIGYISGS--SGSTYYNPSLK-SRVTISKDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4-99*02': 'QVQLQESGP-GLVKP-SETLSLTCAVS-GYSIS---SGYYWGWIRQPPGKGLEYIGYISGS--SGSTYYNPSLK-SRVTISKDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4S10*01': 'QVQLQESGP-GVVKP-SETLSLTCAVS-GGSIS---DSYWWSWIRQPPGKGLEWIGYIYGS--STSTNYNPSLK-SRVTISKDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4S13*01': 'QVQLQESGP-GVVKP-SETLSLTCAVS-GGSIS--SGYYYWSWIRQPPGKGLEWIGGIYSN--SESTNYNPSLK-SRVTISKDTSKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4S2*01': 'QVQLQESGP-GLVKP-SETLPLTCAVS-GASI----SSNYWSWIRQAPGKGLEWIGRIYGS--GGSTDYNPSLK-SRVTISIDTCKNQFSLKLSSVTAADTAVYYCAR--------------------', 'IGHV4S9*01': 'QVQLQESGP-GLVKP-SETLSLTCAVS-GGSIS---DYYYWNWIRQPPGKGLEWIGNIYGN--SASTYYNPSLK-SRVTISKDTSKNQFFLKLSSVTAADTAVYYCAR--------------------', 'IGHV5-20*01': 'EVQLVQSGA-EVKRP-GESLKISCKTS-GYSF----TSYWISWVRQMPGKGLEWMGAIDPS--DSDTRYNPSFQ-GQVTISADKSISTAYLQWSRLKASDTATYYCAK--------------------', 'IGHV5-20*02': 'EVQLVQSGA-EVKRP-GESLKISCKTS-GYSF----TSYWISWVRQMPGKGLEWMGAIDPS--DSDTRYSPSFQ-GQVTISADKSISTAYLQWSSLKASDTATYYCAK--------------------', 'IGHV5-43*01': 'EVQLVQSGA-EVKRP-GESLRISCKTS-GYSF----TSSWISWVRQMPGKGLEWMGSIYPG--DSDTRYNPSFQ-GHVTISADKSISTTYLQWSSLKASDTATYYCAK--------------------', 'IGHV5-43*02': 'EVQLVQSGA-EVKRP-GESLKISCKTS-GYSF----TSSWISWVRQMPGKGLEWMGSIYPG--DSDTKYNPSFQ-GHVTISADKSISTTYLQWSSLKASDTATYYCAK--------------------', 'IGHV6-1*01': 'QVQLQESGP-GLVKP-SQTLSLTCAIS-GDSVS--SNSATWNWIRQSPSRGLEWLGRTYYRS-KWYNDYAQSVQ-NRISINPDTSKNQFSLQLNSVTPEDMAVYYCAR--------------------', 'IGHV7-114*01': 'QVQLVQSGA-EVKQP-GASVKVSCKAS-GYTF----TSYGMNWVRQAHGQRLEWMGWINTD--TGNPTYAQGFK-ERFTFSMDTSISTAYLQISSLKAEDTAVYYCAR--------------------', 'IGHV7-193*01': 'QVQLVQSGP-EVKQP-GASVKVSCKAS-GYSS----TTYGMNWVRQAPGQGLEWMGWMNTY--TGNPTYAQGFT-ERFVFSMDTSVSTVYLQISSLKAEDTAVYYCAR--------------------', 'IGHV7-193*02': 'QVQLVQSGP-EVKQP-GASVKVSCKAS-GYSF----TTYGMNWVRQAPGQGLEWMGWMNTY--TGNPTYAQGFT-ERFVFSMDTSVSTVYLQISSLKGEDTAVYYCAR--------------------'}, 'pig': {'IGHV1-10*01': 'EVKLVESGG-GLVQPGGSLRLSCVGSGFTF----SSTYINWVRQAPGKGLEWLAAISTS--GGSTYYADSVK-GRFTISRDDSQNTAYLQMNSLRTEDTARYYCAT----------------------', 'IGHV1-11*01': 'EEKLVESGG-GLVQPGGSLRLSCVGSGFDF----SSYGVGWVRQAPGKGLESLASIGSGSYIGSTYYADSVK-GRFTISSDDSQNTVYLQMNSLRTEDTARYYCAR----------------------', 'IGHV1-12*01': 'EEKLVESGG-GLVQPGGSLRLSCVGSGFDF----SDYAFSWVRQAPGKGLEWVAAIASSDYDGSTYYADSVK-GRFTISSDDSQNMVYLQMNSLRTEDTARYYCAI----------------------', 'IGHV1-14*01': 'EVKLVESGG-GLVQPGGSLRLSCVGSGFTF----SSYEISWVRQAPGKGLEWLAAISTS--GGSTYYADSVK-GRFTISKDDSQNTAYLQMNSLRTEDTARYYCAR----------------------', 'IGHV1-15*01': 'EEKLVESGG-GLVQPGGSLRLSCVGSGFTF----SSYSMSWVRQAPGKGLEWLAGIYSS--GSSTYYADSVK-GRFTISSDNSQNTAYLQMNSLRTEDTARYYCAR----------------------', 'IGHV1-2*01': 'EVKLVECGG-GLVQPGGSLRLSCVGSGYTF----SSYGMSWVRQAPGKGLEWLAGIDSGSYSGSSYYADSVK-GRFTISRDDSQNTAYLQMNSLRTEDTARYYCAT----------------------', 'IGHV1-4*01': 'EEKLVESGG-GLVQPGGSLRLSCVGSGFTF----SSTYINWVRQAPGKGLEWLAAISTS--GGSTYYADSVK-GRFTISRDNSQNTAYLQMNSLRTEDTARYYCAT----------------------', 'IGHV1-4*02': 'EEKLVESGG-GLVQPGGSLRLSCVGSGFTF----SSTYINWVRQAPGKGLEWLAAISTS--GGSTYYADSVK-GRFTISRDNSQNTAYLQMNSLRTEDTARYYCAR----------------------', 'IGHV1-5*01': 'EEKLVESGG-GLVQPGGSLRLSCVGSGITF----SSYAVSWVRQAPGKGLESLASIGSGSYIGSTDYADSVK-GRFTISSDDSQNTVYLQMNSLRTEDTARYYCAR----------------------', 'IGHV1-6*01': 'EEKLVESGG-GLVQPGGSLRLSCVGSGFDF----SDNAFSWVRQAPGKGLEWVAAIASSDYDGSTYYADSVK-GRFTISSDNSQNTVYLQMNSLRTEDTARYYCAI----------------------', 'IGHV1-6*02': 'EEKLVESGG-GLVQPGGSLRLSCVGSGFDF----SDNAFSWVRQAPGKGLEWVAAIASSDYDGSTYYADSVK-GRFTISRDNSQNTVYLQMNSLRTEDTARYYCAI----------------------', 'IGHV1-8*01': 'EEKLVESGG-GLVQPGGSLRLSCVGSGITF----SSYAVSWVRQAPGKGLEWLAGIDSGSYSGSTYYADSVK-GRFTISRDDSQNTAYLQMTSLRTEDTARYYCAG----------------------', 'IGHV1S2*01': 'EEKLVESGG-GLVQPGGSLRLSCVGSGFTF----SSYEISWVRQAPGKGLEWLAGIYSS--GGSTYYADSVK-GRFTISRDNSQNTAYLQMNSLRTEDTARYYCAR----------------------', 'IGHV1S5*01': 'EEKLVESGG-GLVQPGGSLRLSCVGSGFTF----SSYAVSWVRQAPGKGLEWLAGIDSGSYSGSTYYADSVK-GRFTISRDNSQNTAYLQMNSLRTEDTARYYCAR----------------------', 'IGHV1S6*01': 'QEKLVESGG-GLVQPGGSLRLSCVGSGFDF----SSYGVGWVRQAPGKGLESLASIGSGSYIGSTDDADSVK-GRFTISSDNSQNTAYLQMNSLRTEDTARYYCAR----------------------'}, 'alpaca': {'IGHV3-1*01': 'EVQLVESGG-GLVQPGGSLRLSCAASGFTF----DDYAMSWVRQAPGKGLEWVSAISWN--GGSTYYAESMK-GRFTISRDNAKNTLYLQMNSLKSEDTAVYYCAK----------------------', 'IGHV3-3*01': 'QVQLVESGG-GLVQAGGSLRLSCAASGRTF----SSYAMGWFRQAPGKEREFVAAISWS--GGSTYYADSVK-GRFTISRDNAKNTVYLQMNSLKPEDTAVYYCAA----------------------', 'IGHV3S1*01': 'QVQLVESGG-GLVQPGGSLRLSCAASGFTF----SSYWMYWVRQAPGKGLEWVSAINTG--GGSTYYADSVK-GRFTISRDNAKNTLYLQMNSLKSEDTAVYYCAK----------------------', 'IGHV3S53*01': 'QVQLVESGG-GLVQPGGSLRLSCAASGSIF----SINAMGWYRQAPGKQRELVAAITSG---GSTNYADSVK-GRFTISRDNAKNTVYLQMNSLKPEDTAVYYCNA----------------------', 'IGHV4S1*01': 'QVQLQESGP-GLVKPSQTLSLTCTVSGGSIT--TSYYAWSWIRQPPGKGLEWMGVIAYD---GSTYYSPSLK-SRTSISRDTSKNQFSLQLSSVTPEDTAVYYCAR----------------------'}, 'cow': {'IGHV1-10*01': 'QVQLRESGP-SLVKPSQTLSLTCTVSGFSL----SSYGVGWVRQAPGKALECLGGISSG---GSTGYNPALK-YRLSITKDNSKSQVSLSLSSVTTEDTATYYCAK----------------------', 'IGHV1-14*01': 'QVQLRESGP-SLVKPSQTLSLTCTVSGFSL----SDNSVGWVRQAPGKALEWLGVIYSG---GSTGYNPALK-SRLSITKDNSKSQVSLSLSSVTTEDTATYYCAR----------------------', 'IGHV1-14*02': 'QVQLRESGP-SLVKPSQTLSLTCTVSGFSL----SDNSVGWVRQAPGKALEWLGVIYSG---GSTGYNPALK-SRLSITKDNSKSQVSLSLSSVTTEDTATYYCAR----------------------', 'IGHV1-17*01': 'QVQLRESGP-SLVKPSQTLSLTCTVSGFSL----SSYAVSWVRQAPGKALEWLGDISSG---GSTGYNPALK-SRLSITKDNSKSQVSLSVSSVTPEDTATYYCAK----------------------', 'IGHV1-20*01': 'QVQLRESGP-SLVKPSQTLSLTCTVSGFSL----SSYAVGWVRQAPGKALEWLGGISSG---GSTYYNPALK-SRLSITKDNSKSQVSLSVSSVTPEDTATYYCAK----------------------', 'IGHV1-21*01': 'QVQLRESGP-SLVKPSQTLSLTCTISGFSL----SSYAVGWVRQAPGKALEWVGGISSG---GSTCLNPALK-SRLSITKDNSKSQVSLSVSSVTTEDTATYYCAK----------------------', 'IGHV1-25*01': 'QVQLQESGP-SLVKTSQTLSLTCTASGLSL----TRYGIHWVRQAPGKALEWLGDISSG---GSTGYNPGLK-SRLSITKDNSKSQVSLSLSSLTPEDSATYYCAR----------------------', 'IGHV1-27*01': 'QVQLRESGP-SLVKPSQTLSLTCTVSGFSL----SSNGVGWVRQAPGKALEWVGGIDND---GDTYYNPALK-SRLSITKDNSKSQVSLSVSSVTPEDTATYYCAK----------------------', 'IGHV1-30*01': 'QVQLRESGP-SLVKPSQTLSLTCTVSGFSL----SSNGVVWVRQAPGKALEWLGGICSG---GSTSFNPALK-SRLSITKDNSKSQVSLSVSSVTPEDTATYYCAR----------------------', 'IGHV1-30*02': 'QVQLRESGP-SLVKPSQTLSLTCTVSGFSL----SSNGVVWVRQAPGKALEWLGGICSG---GSTSFNPALK-SRLSITKDNSKSQVSLSVSSVTPEDTATYYCAKR---------------------', 'IGHV1-33*01': 'QVQLRESGP-SLVKPSQTLSLTCTISGFSL----SSYAVGWVRQAPGKALEWVGGISSG---GSTCLNPALK-SRLSITKDNSKSQVSLSVSSVTTEDTATYYCAK----------------------', 'IGHV1-37*01': 'QVQLQESGP-SLVKTSQTLSLTCTASGLSL----TRYGIHWVRQAPGKALEWLGDISSG---GSTGYNPGLK-SRLSITKDNSKSQVSLSLSSLTPEDSATYYCAR----------------------', 'IGHV1-39*01': 'KVQLQESGP-SLVKPSQTLSLTCTTSGFSL----TSYGVSWVRQAPGKALEWLGGIDSG---GSTGYNPGLK-SRLSITRDNSKSQVSLSVSSVTPEDTATYYCAK----------------------', 'IGHV1-7*01': 'QVQLRESGP-SLVKPSQTLSLTCTVSGFSL----SDKAVGWVRQAPGKALEWLGGIDTG---GSTGYNPGLK-SRLSITKDNSKSQVSLSVSSVTTEDSATYYCTTVH--------------------'}}, 'K': {'human': {'IGKV1-12*01': 'DIQMTQSPSSVSASVGDRVTITCRASQGI------SSWLAWYQQKPGKAPKLLIYAA-------SSLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQAN--------------------', 'IGKV1-12*02': 'DIQMTQSPSSVSASVGDRVTITCRASQGI------SSWLAWYQQKPGKAPKLLIYAA-------SSLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQAN--------------------', 'IGKV1-13*02': 'AIQLTQSPSSLSASVGDRVTITCRASQGI------SSALAWYQQKPGKAPKLLIYDA-------SSLESGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQFN--------------------', 'IGKV1-16*01': 'DIQMTQSPSSLSASVGDRVTITCRASQGI------SNYLAWFQQKPGKAPKSLIYAA-------SSLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQYN--------------------', 'IGKV1-17*01': 'DIQMTQSPSSLSASVGDRVTITCRASQGI------RNDLGWYQQKPGKAPKRLIYAA-------SSLQSGVP-SRFSGSG--SGTEFTLTISSLQPEDFATYYCLQHN--------------------', 'IGKV1-17*02': 'DIQMTQSPSSLSASVGDRVTITCRASQGI------RNDLGWYQQKPGKAPKRLIYAA-------SSLQSGVP-SRFSGSG--SGTEFTLTISNLQPEDFATYYCLQHN--------------------', 'IGKV1-17*03': 'DIQMTQSPSAMSASVGDRVTITCRASQGI------SNYLAWFQQKPGKVPKRLIYAA-------SSLQSGVP-SRFSGSG--SGTEFTLTISSLQPEDFATYYCLQHN--------------------', 'IGKV1-27*01': 'DIQMTQSPSSLSASVGDRVTITCRASQGI------SNYLAWYQQKPGKVPKLLIYAA-------STLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDVATYYCQKYN--------------------', 'IGKV1-33*01': 'DIQMTQSPSSLSASVGDRVTITCQASQDI------SNYLNWYQQKPGKAPKLLIYDA-------SNLETGVP-SRFSGSG--SGTDFTFTISSLQPEDIATYYCQQYD--------------------', 'IGKV1-39*01': 'DIQMTQSPSSLSASVGDRVTITCRASQSI------SSYLNWYQQKPGKAPKLLIYAA-------SSLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQSY--------------------', 'IGKV1-5*01': 'DIQMTQSPSTLSASVGDRVTITCRASQSI------SSWLAWYQQKPGKAPKLLIYDA-------SSLESGVP-SRFSGSG--SGTEFTLTISSLQPDDFATYYCQQYN--------------------', 'IGKV1-5*02': 'DIQMTQSPSTLSASVGDRVTIICRASQSI------SSWLAWYQQKPGKAPKLLIYDA-------SSLESGVP-SRFSGSG--SGTEFTLTISSLQPDDFATYYCQQYN--------------------', 'IGKV1-5*03': 'DIQMTQSPSTLSASVGDRVTITCRASQSI------SSWLAWYQQKPGKAPKLLIYKA-------SSLESGVP-SRFSGSG--SGTEFTLTISSLQPDDFATYYCQQYN--------------------', 'IGKV1-6*01': 'AIQMTQSPSSLSASVGDRVTITCRASQGI------RNDLGWYQQKPGKAPKLLIYAA-------SSLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCLQDY--------------------', 'IGKV1-6*02': 'AIQMTQSPSSLSASVGDRVTITCRASQGI------RNDLGWYQQKPGKAPKLLIYAA-------SSLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCLQDY--------------------', 'IGKV1-8*01': 'AIRMTQSPSSFSASTGDRVTITCRASQGI------SSYLAWYQQKPGKAPKLLIYAA-------STLQSGVP-SRFSGSG--SGTDFTLTISCLQSEDFATYYCQQYY--------------------', 'IGKV1-9*01': 'DIQLTQSPSFLSASVGDRVTITCRASQGI------SSYLAWYQQKPGKAPKLLIYAA-------STLQSGVP-SRFSGSG--SGTEFTLTISSLQPEDFATYYCQQLN--------------------', 'IGKV1-NL1*01': 'DIQMTQSPSSLSASVGDRVTITCRASQGI------SNSLAWYQQKPGKAPKLLLYAA-------SRLESGVP-SRFSGSG--SGTDYTLTISSLQPEDFATYYCQQYY--------------------', 'IGKV1D-12*01': 'DIQMTQSPSSVSASVGDRVTITCRASQGI------SSWLAWYQQKPGKAPKLLIYAA-------SSLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQAN--------------------', 'IGKV1D-12*02': 'DIQMTQSPSSVSASVGDRVTITCRASQGI------SSWLAWYQQKPGKAPKLLIYAA-------SSLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQAN--------------------', 'IGKV1D-13*01': 'AIQLTQSPSSLSASVGDRVTITCRASQGI------SSALAWYQQKPGKAPKLLIYDA-------SSLESGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQFN--------------------', 'IGKV1D-13*02': 'AIQLTQSPSSLSASVGDRVTITCRASQGI------SSALAWYQQKPGKAPKLLIYDA-------SSLESGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQFN--------------------', 'IGKV1D-16*01': 'DIQMTQSPSSLSASVGDRVTITCRASQGI------SSWLAWYQQKPEKAPKSLIYAA-------SSLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQYN--------------------', 'IGKV1D-16*02': 'DIQMTQSPSSLSASVGDRVTITCRARQGI------SSWLAWYQQKPEKAPKSLIYAA-------SSLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQYN--------------------', 'IGKV1D-17*01': 'NIQMTQSPSAMSASVGDRVTITCRARQGI------SNYLAWFQQKPGKVPKHLIYAA-------SSLQSGVP-SRFSGSG--SGTEFTLTISSLQPEDFATYYCLQHN--------------------', 'IGKV1D-33*01': 'DIQMTQSPSSLSASVGDRVTITCQASQDI------SNYLNWYQQKPGKAPKLLIYDA-------SNLETGVP-SRFSGSG--SGTDFTFTISSLQPEDIATYYCQQYD--------------------', 'IGKV1D-39*01': 'DIQMTQSPSSLSASVGDRVTITCRASQSI------SSYLNWYQQKPGKAPKLLIYAA-------SSLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQSY--------------------', 'IGKV1D-43*01': 'AIRMTQSPFSLSASVGDRVTITCWASQGI------SSYLAWYQQKPAKAPKLFIYYA-------SSLQSGVP-SRFSGSG--SGTDYTLTISSLQPEDFATYYCQQYY--------------------', 'IGKV1D-8*01': 'VIWMTQSPSLLSASTGDRVTISCRMSQGI------SSYLAWYQQKPGKAPELLIYAA-------STLQSGVP-SRFSGSG--SGTDFTLTISCLQSEDFATYYCQQYY--------------------', 'IGKV1D-8*02': 'AIWMTQSPSLLSASTGDRVTISCRMSQGI------SSYLAWYQQKPGKAPELLIYAA-------STLQSGVP-SRFSGSG--SGTDFTLTISCLQSEDFATYYCQQYY--------------------', 'IGKV1D-8*03': 'VIWMTQSPSLLSASTGDRVTISCRMSQGI------SSYLAWYQQKPGKAPELLIYAA-------STLQSGVP-SRFSGSG--SGTDFTLTISCLQSEDFATYYCQQYY--------------------', 'IGKV2-24*01': 'DIVMTQTPLSSPVTLGQPASISCRSSQSLVHS-DGNTYLSWLQQRPGQPPRLLIYKI-------SNRFSGVP-DRFSGSG--AGTDFTLKISRVEAEDVGVYYCMQAT--------------------', 'IGKV2-28*01': 'DIVMTQSPLSLPVTPGEPASISCRSSQSLLHS-NGYNYLDWYLQKPGQSPQLLIYLG-------SNRASGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCMQAL--------------------', 'IGKV2-29*02': 'DIVMTQTPLSLSVTPGQPASISCKSSQSLLHS-DGKTYLYWYLQKPGQSPQLLIYEV-------SSRFSGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCMQGI--------------------', 'IGKV2-29*03': 'DIVMTQTPLSLSVTPGQPASISCKSSQSLLHS-DGKTYLYWYLQKPGQSPQLLIYEV-------SSRFSGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCMQGI--------------------', 'IGKV2-30*01': 'DVVMTQSPLSLPVTLGQPASISCRSSQSLVYS-DGNTYLNWFQQRPGQSPRRLIYKV-------SNRDSGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCMQGT--------------------', 'IGKV2-40*01': 'DIVMTQTPLSLPVTPGEPASISCRSSQSLLDSDDGNTYLDWYLQKPGQSPQLLIYTL-------SYRASGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCMQRI--------------------', 'IGKV2D-26*01': 'EIVMTQTPLSLSITPGEQASISCRSSQSLLHS-DGYTYLYWFLQKARPVSTLLIYEV-------SNRFSGVP-DRFSGSG--SGTDFTLKISRVEAEDFGVYYCMQDA--------------------', 'IGKV2D-26*02': 'EIVMTQTPLSLSITPGEQASMSCRSSQSLLHS-DGYTYLYWFLQKARPVSTLLICEV-------SNRFSGVP-DRFSGSG--SGTDFTLKISRVEAEDFGVYYCMQDA--------------------', 'IGKV2D-26*03': 'EIVMTQTPLSLSITPGEQASMSCRSSQSLLHS-DGYTYLYWFLQKARPVSTLLIYEV-------SNRFSGVP-DRFSGSG--SGTDFTLKISRVEAEDFGVYYCMQDA--------------------', 'IGKV2D-28*01': 'DIVMTQSPLSLPVTPGEPASISCRSSQSLLHS-NGYNYLDWYLQKPGQSPQLLIYLG-------SNRASGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCMQAL--------------------', 'IGKV2D-29*01': 'DIVMTQTPLSLSVTPGQPASISCKSSQSLLHS-DGKTYLYWYLQKPGQPPQLLIYEV-------SNRFSGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCMQSI--------------------', 'IGKV2D-29*02': 'DIVMTQTPLSLSVTPGQPASISCKSSQSLLHS-DGKTYLYWYLQKPGQSPQLLIYEV-------SNRFSGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCMQSI--------------------', 'IGKV2D-30*01': 'DVVMTQSPLSLPVTLGQPASISCRSSQSLVYS-DGNTYLNWFQQRPGQSPRRLIYKV-------SNWDSGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCMQGT--------------------', 'IGKV2D-40*01': 'DIVMTQTPLSLPVTPGEPASISCRSSQSLLDSDDGNTYLDWYLQKPGQSPQLLIYTL-------SYRASGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCMQRI--------------------', 'IGKV3-11*01': 'EIVLTQSPATLSLSPGERATLSCRASQSV------SSYLAWYQQKPGQAPRLLIYDA-------SNRATGIP-ARFSGSG--SGTDFTLTISSLEPEDFAVYYCQQRS--------------------', 'IGKV3-11*02': 'EIVLTQSPATLSLSPGERATLSCRASQSV------SSYLAWYQQKPGQAPRLLIYDA-------SNRATGIP-ARFSGSG--SGRDFTLTISSLEPEDFAVYYCQQRS--------------------', 'IGKV3-15*01': 'EIVMTQSPATLSVSPGERATLSCRASQSV------SSNLAWYQQKPGQAPRLLIYGA-------STRATGIP-ARFSGSG--SGTEFTLTISSLQSEDFAVYYCQQYN--------------------', 'IGKV3-20*01': 'EIVLTQSPGTLSLSPGERATLSCRASQSVS-----SSYLAWYQQKPGQAPRLLIYGA-------SSRATGIP-DRFSGSG--SGTDFTLTISRLEPEDFAVYYCQQYG--------------------', 'IGKV3D-11*01': 'EIVLTQSPATLSLSPGERATLSCRASQGV------SSYLAWYQQKPGQAPRLLIYDA-------SNRATGIP-ARFSGSG--PGTDFTLTISSLEPEDFAVYYCQQRS--------------------', 'IGKV3D-11*02': 'EIVLTQSPATLSLSPGERATLSCRASQSV------SSYLAWYQQKPGQAPRLLIYDA-------SNRATGIP-ARFSGSG--PGTDFTLTISSLEPEDFAVYYCQQRS--------------------', 'IGKV3D-11*03': 'EIVLTQSPATLSLSPGERATLSCRASQGV------SSNLAWYQQKPGQAPRLLIYDA-------SNRATGIP-ARFSGSG--PGTDFTLTISSLEPEDFAVYYCQQRS--------------------', 'IGKV3D-15*01': 'EIVMTQSPATLSVSPGERATLSCRASQSV------SSNLAWYQQKPGQAPRLLIYGA-------STRATGIP-ARFSGSG--SGTEFTLTISSLQSEDFAVYYCQQYN--------------------', 'IGKV3D-15*03': 'EIVMTQSPATLSVSPGERATLSCRASQSV------SSNLAWYQQKPGQAPRLLIYGA-------SIRATGIP-ARFSGSG--SGTEFTLTISILQSEDFAVYYCQQYN--------------------', 'IGKV3D-20*01': 'EIVLTQSPATLSLSPGERATLSCGASQSVS-----SSYLAWYQQKPGLAPRLLIYDA-------SSRATGIP-DRFSGSG--SGTDFTLTISRLEPEDFAVYYCQQYG--------------------', 'IGKV3D-7*01': 'EIVMTQSPATLSLSPGERATLSCRASQSVS-----SSYLSWYQQKPGQAPRLLIYGA-------STRATGIP-ARFSGSG--SGTDFTLTISSLQPEDFAVYYCQQDY--------------------', 'IGKV4-1*01': 'DIVMTQSPDSLAVSLGERATINCKSSQSVLYSSNNKNYLAWYQQKPGQPPKLLIYWA-------STRESGVP-DRFSGSG--SGTDFTLTISSLQAEDVAVYYCQQYY--------------------', 'IGKV5-2*01': 'ETTLTQSPAFMSATPGDKVNISCKASQDI------DDDMNWYQQKPGEAAIFIIQEA-------TTLVPGIP-PRFSGSG--YGTDFTLTINNIESEDAAYYFCLQHD--------------------', 'IGKV6-21*01': 'EIVLTQSPDFQSVTPKEKVTITCRASQSI------GSSLHWYQQKPDQSPKLLIKYA-------SQSFSGVP-SRFSGSG--SGTDFTLTINSLEAEDAATYYCHQSS--------------------', 'IGKV6-21*02': 'EIVLTQSPDFQSVTPKEKVTITCRASQSI------GSSLHWYQQKPDQSPKLLIKYA-------SQSISGVP-SRFSGSG--SGTDFTLTINSLEAEDAATYYCHQSS--------------------', 'IGKV6D-21*01': 'EIVLTQSPDFQSVTPKEKVTITCRASQSI------GSSLHWYQQKPDQSPKLLIKYA-------SQSFSGVP-SRFSGSG--SGTDFTLTINSLEAEDAATYYCHQSS--------------------', 'IGKV6D-21*02': 'EIVLTQSPDFQSVTPKEKVTITCRASQSI------GSSLHWYQQKPDQSPKLLIKYA-------SQSISGVP-SRFSGSG--SGTDFTLTINSLEAEDAAAYYCHQSS--------------------'}, 'mouse': {'IGKV1-110*01': 'DVVMTQTPLSLPVSLGDQASISCRSSQSLVHS-NGNTYLHWYLQKPGQSPKLLIYKV-------SNRFSGVP-DRFSGSG--SGTDFTLKISRVEAEDLGVYFCSQST--------------------', 'IGKV1-110*02': 'DVVMTQTPLSLPVSLGDQASISCRSSQSLVHS-NGNTYLYWYLQKPGQSPKLLIYRV-------SNRFSGVP-DRFSGSG--SGTDFTLKISRVEAEDLGVYFCFQGT--------------------', 'IGKV1-117*01': 'DVLMTQTPLSLPVSLGDQASISCRSSQSIVHS-NGNTYLEWYLQKPGQSPKLLIYKV-------SNRFSGVP-DRFSGSG--SGTDFTLKISRVEAEDLGVYYCFQGS--------------------', 'IGKV1-117*02': 'DVVMTQTPLSLPVSLGDQASISCRSSQSIVHS-NGNTYLEWYLQKPGQSPKLLIYKV-------SNRLSGVP-DRFSGSG--SGTDFTLKISRVEAEDLGVYYCFQGS--------------------', 'IGKV1-122*01': 'DAVMTQTPLSLPVSLGDQASISCRSSQSLENS-NGNTYLNWYLQKPGQSPQLLIYRV-------SNRFSGVL-DRFSGSG--SGTDFTLKISRVEAEDLGVYFCLQVT--------------------', 'IGKV1-132*01': 'DVVMTQTPLSLSVTIGQPASISCKSSQSLLYS-NGKTYLNWLQQRPGQAPKHLMYQV-------SKLDPGIP-DRFSGSG--SETDFTLKISRVEAEDLGVYYCLQGT--------------------', 'IGKV1-133*01': 'DVVMTQTPLTLSVTIGQPASISCKSSQSLLYS-NGKTYLNWLLQRPGQSPKRLIYLV-------SKLDSGVP-DRFTGSG--SGTDFTLKISRVEAEDLGVYYCVQGT--------------------', 'IGKV1-135*01': 'DVVMTQTPLTLSVTIGQPASISCKSSQSLLDS-DGKTYLNWLLQRPGQSPKRLIYLV-------SKLDSGVP-DRFTGSG--SGTDFTLKISRVEAEDLGVYYCWQGT--------------------', 'IGKV1-88*01': 'DVVVTQTPLSLPVSFGDQVSISCRSSQSLANS-YGNTYLSWYLHKPGQSPQLLIYGI-------SNRFSGVP-DRFSGSG--SGTDFTLKISTIKPEDLGMYYCLQGT--------------------', 'IGKV1-99*01': 'DVVLTQTPLSLPVNIGDQASISCKSTKSLLNS-DGFTYLDWYLQKPGQSPQLLIYLV-------SNRFSGVP-DRFSGSG--SGTDFTLKISRVEAEDLGVYYCFQSN--------------------', 'IGKV10-94*01': 'DIQMTQTTSSLSASLGDRVTISCRASEDI------SNYLNWYQQKPDGTVKLLIYYA-------SSLHSGVP-SRFSGSG--SGTDYSLTISNLEPEDIATYYCQQYS--------------------', 'IGKV10-94*02': 'DIQMTQTTSSLSASLGDRVTISCSASQGI------SNYLNWYQQKPDGTVKLLIYYT-------SRLHSGVP-SRFSGSG--SGTDYSLTISNLEPEDIATYYCQQYS--------------------', 'IGKV10-94*03': 'DIQMTQTTSSLSASLGDRVTISCRASQDI------SNYLNWYQQKPDGTVKLLIYYT-------SRLHSGVP-SRFSGSG--SGTDYSLTISNLEPEDIATYYCQQYS--------------------', 'IGKV10-94*04': 'DIQMTQTTSSLSASLGDRVTISCRASQDI------SNYLNWYQQKPDGTVKLLIYYT-------SRLHSGVP-SRFSGSG--SGTDYSLTISNLEPEDIATYYCQQYS--------------------', 'IGKV10-94*05': 'DIQMTQTTSSLSASLGDRVTISCRASQDI------SNYLNWYQQKPDGTVKLLIYYT-------SRLHSGVP-SRFSGSG--SGTDYSLTISNLEPEDIATYYCQQYS--------------------', 'IGKV10-94*06': 'DIQMTQTTSSLSASLGDRVTISCRASQDI------SNYLNWYQQKPDGTVKLLIYYT-------SRLHSGVP-SRFSGSG--SGTDYSLTISNLEPEDIATYYCQQDS--------------------', 'IGKV10-94*07': 'DIQMTQTTSSLSASLGDRVTISCRASQDI------SNYLNWYQQRPDGTVKLLIYYT-------SRLHSGVP-SRFSGSG--SGTDYSLTISNLEPEDIATYYCQQYS--------------------', 'IGKV10-94*08': 'DIQMTQTTSSLSASLGDRVTISCRASQDI------SNYLNWYQQKPDGTVKLLIYYT-------SRLHSGVP-SRFSGSG--SGTDYSLTISNLEPEDIATYYCQQYS--------------------', 'IGKV10-95*01': 'DIQMTQTTSSLSASLGDRVTISCRASEDI------STYLNWYQQKPDGTVKLLIYYT-------SGLHSGVP-SRFSGSG--SGADYSLTISNLEPEDIATYYCQQYS--------------------', 'IGKV10-96*01': 'DIQMTQTTSSLSASLGDRVTISCRASQDI------SNYLNWYQQKPDGTVKLLIYYT-------SRLHSGVP-SRFSGSG--SGTDYSLTISNLEQEDIATYFCQQGS--------------------', 'IGKV10-96*02': 'DIQMTQTTSSLSASLGDRVTISCRASQDI------SNYLNWYQQKPDGTVKLLIYYT-------SRLHSGVP-SRFSGSG--SGTDYSLTISNLEQEDIATYFCQQGS--------------------', 'IGKV10-96*03': 'DIQMTQTTSSLSASLGDRVTISCRASQDI------SNYLNWYQQKPDGTVKLLIYYT-------SRLHSGVP-SRFSGSG--SGTDYSLTISNLEQEDIATYFCQQDS--------------------', 'IGKV10-96*04': 'DIQMTQTTSSLSASLGDRVTISCRASQDI------SNYLNWYQQKPDGTVKLLIYYT-------SRLHSGVP-SRFSGSG--SGTDYSLTISNLEQEDIATYFCQQDS--------------------', 'IGKV11-106*02': 'DVLMTQSPSSLSASLGERVSLTCQASQGI------SNNLNWYQQTPGKAPRLLIYDA-------SKLEDGVP-SRFSGTG--YRTDFNFTISSLEEEDVATYFCLQHR--------------------', 'IGKV11-125*01': 'DVQMIQSPSSLSASLGDIVTMTCQASQGT------SINLNWFQQKPGKAPKLLIYGA-------SNLEDGVP-SRFSGSR--YGTDFTLTISSLEDEDMATYFCLQHS--------------------', 'IGKV12-104-2*01': 'DIRMIQTPASLSGSLGESVTITCQASQDI------GKSLLWYQQKTGNPPKILIYTT-------SNLADGIS-SRVSGSG--SGTQFFLKFSSLKPEDTATYYCCQGY--------------------', 'IGKV12-38*01': 'DIQMTQSPASLAASVGETVTITCRASENI------YYSLAWYQQKQGKSPQLLIYNA-------NSLEDGVP-SRFSGSG--SGTQYSMKINSMQPEDTATYFCKQAY--------------------', 'IGKV12-41*01': 'DIQMTQSPASLSASVGETVTITCRASGNI------HNYLAWYQQKQGKSPQLLVYNA-------KTLADGVP-SRFSGSG--SGTQYSLKINSLQPEDFGSYYCQHFW--------------------', 'IGKV12-41*02': 'DIQMTQSPASLSASVGETVTITCRASGNI------HNYLAWYQQKQGKSPQLLVYNA-------KTLADGVP-SRFSGSG--SGTQYSLKINSLQPEDFGSYYCQHFW--------------------', 'IGKV12-44*01': 'DIQMTQSPASLSASVGETVTITCRASENI------YSYLAWYQQKQGKSPQLLVYNA-------KTLAEGVP-SRFSGSG--SGTQFSLKINSLQPEDFGSYYCQHHY--------------------', 'IGKV12-46*01': 'DIQMTQSPASLSVSVGETVTITCRASENI------YSNLAWYQQKQGKSPQLLVYAA-------TNLADGVP-SRFSGSG--SGTQYSLKINSLQSEDFGSYYCQHFW--------------------', 'IGKV12-89*01': 'DIQMTQSPASLSASVGETVTITCGASENI------YGALNWYQRKQGKSPQLLIYGA-------TNLADGMS-SRFSGSG--SGRQYSLKISSLHPDDVATYYCQNVL--------------------', 'IGKV12-98*01': 'DIQMTQSPASQSASLGESVTITCLASQTI------GTWLAWYQQKPGKSPQLLIYAA-------TSLADGVP-SRFSGSG--SGTKFSFKISSLQAEDFVSYYCQQLY--------------------', 'IGKV12-e*01': 'DIQMTQSPASLSVSVGETVTITCRASENI------YSNLAWLFSRNRENPPSLVYAA-------TNLADGVP-SRFSGSG--SGTQYSLKINSQQPEDFGSYYCQHFW--------------------', 'IGKV13-84*01': 'DIQMTQSSSSFSVSLGDRVTITCKASEDI------YNRLAWYQQKPGNAPRLLISGA-------TSLETGVP-SRFSGSG--SGKDYTLSITSLQTEDVATYYCQQYW--------------------', 'IGKV13-85*01': 'DIQMTQSSSYLSVSLGGRVTITCKASDHI------NNWLAWYQQKPGNAPRLLISGA-------TSLETGVP-SRFSGSG--SGKDYTLSITSLQTEDVATYYCQQYW--------------------', 'IGKV14-100*01': 'DILMTQSPSSMSVSLGDTVSITCHASQGI------SSNIGWLQQKPGKSFKGLIYHG-------TNLEDGVP-SRFSGSG--SGADYSLTISSLESEDFADYYCVQYA--------------------', 'IGKV14-111*01': 'DIKMTQSPSSMYASLGERVTITCKASQDI------NSYLSWFQQKPGKSPKTLIYRA-------NRLVDGVP-SRFSGSG--SGQDYSLTISSLEYEDMGIYYCLQYD--------------------', 'IGKV14-126*01': 'DIKMTQSPSSMYASLGERVTITCKASQDI------KSYLSWYQQKPWKSPKTLIYYA-------TSLADGVP-SRFSGSG--SGQDYSLTISSLESDDTATYYCLQHG--------------------', 'IGKV14-130*01': 'EIQMTQSPSSMSASLGDRITITCQATQDI------VKNLNWYQQKPGKPPSFLIYYA-------TELAEGVP-SRFSGSG--SGSDYSLTISNLESEDFADYYCLQFY--------------------', 'IGKV16-104*01': 'DVQITQSPSYLAASPGETITINCRASKSI------SKYLAWYQEKPGKTNKLLIYSG-------STLQSGIP-SRFSGSG--SGTDFTLTISSLEPEDFAMYYCQQHN--------------------', 'IGKV17-121*01': 'ETTVTQSPASLSMAIGEKVTIRCITSTDI------DDDMNWYQQKPGEPPKLLISEG-------NTLRPGVP-SRFSSSG--YGTDFVFTIENMLSEDVADYYCLQSD--------------------', 'IGKV17-127*01': 'ETTVTQSPASLSVATGEKVTIRCITSTDI------DDDMNWYQQKPGEPPKLLISEG-------NTLRPGVP-SRFSSSG--YGTDFVFTIENTLSEDVADYYCLQSD--------------------', 'IGKV18-36*01': 'TGETTQAPASLSFSLGETATLSCRSSESV------GSYLAWYQQKAEQVPRLLIHSA-------STRAGGVP-VRFSGTG--SGTDFTLTISSLEPEDAAVYYCQPFK--------------------', 'IGKV19-93*01': 'DIQMTQSPSSLSASLGGKVTITCKASQDI------NKYIAWYQHKPGKGPRLLIHYT-------STLQPGIP-SRFSGSG--SGRDYSFSISNLEPEDIATYYCLQYD--------------------', 'IGKV19-93*02': 'DIQMTQSPSSLSASLGGKVTITCKASQDI------NKYIAWYQHKPGKGPRLLIHYT-------STLQPGIP-SRFSGSG--SGRDYSFSISNLEPEDIATYYCLQYD--------------------', 'IGKV2-109*01': 'DIVMTQAAFSNPVTLGTSASISCRSSKSLLHS-NGITYLYWYLQKPGQSPQLLIYQM-------SNLASGVP-DRFSSSG--SGTDFTLRISRVEAEDVGVYYCAQNL--------------------', 'IGKV2-109*02': 'DIVMTQAAFSNPVTLGTSASISCRSSKSLLHS-NGITYLYWYLQKPGQSPQLLIYQM-------SNLASGVP-DRFSSSG--SGTDFTLRISRVEAEDVGVYYCAQNL--------------------', 'IGKV2-109*03': 'DIVMTQAAFSNPVTLGTSASISCSSSKSLLHS-NGITYLYWYLQRPGQSPQLLIYRM-------SNLASGVP-DRFSGSG--SGTDFTLRISRVEAEDVGVYYCAQML--------------------', 'IGKV2-109*04': 'DIVMTQAAFSNPVTLGTSASISCRSSKSLLHS-DGITYLYWYLQRPGQSPQLLIYRM-------SNLASGVP-DRFSGSG--SGTDFTLRISRVEAEDVGVYYCAQML--------------------', 'IGKV2-112*01': 'DIVITQDELSNPVTSGESVSISCRSSKSLLYK-DGKTYLNWFLQRPGQSPQLLIYLM-------STRASGVS-DRFSGSG--SGTDFTLEISRVKAEDVGVYYCQQLV--------------------', 'IGKV2-112*02': 'DIVITQDELSNPVTSGESVSISCRSSKSLLYK-DGKTYLNWFLQRPGQSPQLLVYWM-------STRASGVS-DRFSGSG--SGTDFTLEISRVKAEDVGVYYCQQVV--------------------', 'IGKV2-116*01': 'DIVMTQAAFSNPVTLGTSASISCRSSKNLLHS-NGITYLYWYLQRPGQSPQLLIYRV-------SNLASGVP-NRFSGSE--SGTDFTLRISRVEAEDVGVYYCAQLL--------------------', 'IGKV2-137*01': 'DIVMTQAAPSVPVTPGESVSISCRSSKSLLHS-NGNTYLYWFLQRPGQSPQLLIYRM-------SNLASGVP-DRFSGSG--SGTAFTLRISRVEAEDVGVYYCMQHL--------------------', 'IGKV2-a*01': 'DIVMTQAAFSNPVTLGTSASISCRSSKSLLHS-SGNTYLYWFLQKPGQSPQLLIYYI-------SNLASGVP-DRFSGSG--SGTDFTLRISRVEAEDVGVYYCMQGL--------------------', 'IGKV20-101-2*01': 'NIQVIQSPF-LSASVGERVTISCKTHQHI------NSSIAWYQQKVGKAPILLIRDA-------SFSLTDTP-SRFTGNG--FGTDFTLSISSMQSQDGATYFCQQHF--------------------', 'IGKV3-1*01': 'DIVLTQSPASLAVSLGQRATISCRASESVEY--YGTSLMQWYQQKPGQPPKLLIYAA-------SNVESGVP-ARFSGSG--SGTDFSLNIHPVEEDDIAMYFCQQSR--------------------', 'IGKV3-10*01': 'NIVLTQSPASLAVSLGQRATISCRASESVDS--YGNSFMHWYQQKPGQPPKLLIYLA-------SNLESGVP-ARFSGSG--SRTDFTLTIDPVEADDAATYYCQQNN--------------------', 'IGKV3-12*01': 'DIVLTQSPASLAVSLGQRATISCRASKSVST--SGYSYMHWYQQKPGQPPKLLIYLA-------SNLESGVP-ARFSGSG--SGTDFTLNIHPVEEEDAATYYCQHSR--------------------', 'IGKV3-2*01': 'DIVLTQSPASLAVSLGQRATISCRASESVDN--YGISFMNWFQQKPGQPPKLLIYAA-------SNQGSGVP-ARFSGSG--SGTDFSLNIHPMEEDDTAMYFCQQSK--------------------', 'IGKV3-3*01': 'DIVLTQSPASLAVSLGQRATIFCRASQSVDY--NGISYMHWFQQKPGQPPKLLIYAA-------SNLESGIP-ARFSGSG--SGTDFTLNIHPVEEEDAATYYCQQSI--------------------', 'IGKV3-4*01': 'DIVLTQSPASLAVSLGQRATISCKASQSVDY--DGDSYMNWYQQKPGQPPKLLIYAA-------SNLESGIP-ARFSGSG--SGTDFTLNIHPVEEEDAATYYCQQSN--------------------', 'IGKV3-5*01': 'DIVLTQSPASLAVSLGQRATISCRASESVDS--YGNSFMHWYQQKPGQPPKLLIYRA-------SNLESGIP-ARFSGSG--SRTDFTLTINPVEADDVATYYCQQSN--------------------', 'IGKV3-7*01': 'DIVLTQSPASLAVSLGQRATISCRASQSVST--SSYSYMHWYQQKPGQPPKLLIKYA-------SNLESGVP-ARFSGSG--SGTDFTLNIHPVEEEDTATYYCQHSW--------------------', 'IGKV3-7*02': 'DIVLTQSPASLAVSLGQRATISCRASQSVST--SSYSYMHWYQQKPGQPPKLLIKYA-------SNLESGVP-ARFSGSG--SGTDFTLNIHPVEEEDTATYYCQHSW--------------------', 'IGKV3-9*01': 'DIVLTQSPASLAVSLGQRATISCQASESVSF--AGTSLMHWYQQKPGQPPKLLIYRA-------SNLESGVP-ARFSGSG--SESDFTLTIDPVEEDDAAMYYCMQSM--------------------', 'IGKV4-50*01': 'ENVLTQSPAIMSASLGEKVTMSCRASSSV-------NYMYWYQQKSDASPKLWIYYT-------SNLAPGVP-ARFSGSG--SGNSYSLTISSMEGEDAATYYCQQFT--------------------', 'IGKV4-51*01': 'ENVLTQSPAIMAASLGEKVTMTCSASSSVS-----SSYLHWYQQKSGTSPKLWIYGT-------SNLASGVP-ARFSGSG--AGISYSLTISSMEAENDATYYCQQWS--------------------', 'IGKV4-53*01': 'EIVLTQSPALMAASPGEKVTITCSVSSSIS-----SSNLHWYQQKSETSPKPWIYGT-------SNLASGVP-VRFSGSG--SGTSYSLTISSMEAEDAATYYCQQWS--------------------', 'IGKV4-55*01': 'QIVLTQSPAIMSASPGEKVTMTCSASSSV-------SYMYWYQQKPGSSPRLLIYDT-------SNLASGVP-VRFSGSG--SGTSYSLTISRMEAEDAATYYCQQWS--------------------', 'IGKV4-57*01': 'QIVLTQSPAIMSASPGEKVTITCSASSSV-------SYMHWFQQKPGTSPKLWIYST-------SNLASGVP-ARFSGSG--SGTSYSLTISRMEAEDAATYYCQQRS--------------------', 'IGKV4-57-1*01': 'ENVLTQSPAIMSASPGEKVTMTCRASSSVS-----SSYLHWYQQKSGASPKLWIYST-------SNLASGVP-ARFSGSG--SGTSYSLTISSVEAEDAATYYCQQYS--------------------', 'IGKV4-58*01': 'ENVLTQSPAIMAASLGQKVTMTCSASSSVS-----SSYLHWYQQKSGASPKPLIHRT-------SNLASGVP-ARFSGSG--SGTSYSLTISSVEAEDDATYYCQQWS--------------------', 'IGKV4-59*01': 'QIVLTQSPAIMSASPGEKVTMTCSASSSV-------SYMHWYQQKSGTSPKRWIYDT-------SKLASGVP-ARFSGSG--SGTSYSLTISSMEAEDAATYYCQQWS--------------------', 'IGKV4-61*01': 'QIVLTQSPAIMSASPGEKVTISCSASSSV-------SYMYWYQQKPGSSPKPWIYRT-------SNLASGVP-ARFSGSG--SGTSYSLTISSMEAEDAATYYCQQYH--------------------', 'IGKV4-63*01': 'ENVLTQSPAIMSASPGEKVTMTCSASSSV-------SYMHWYQQKSSTSPKLWIYDT-------SKLASGVP-GRFSGSG--SGNSYSLTISSMEAEDVATYYCFQGS--------------------', 'IGKV4-68*01': 'QIVLTQSPALMSASPGEKVTMTCSASSSV-------SYMYWYQQKPRSSPKPWIYLT-------SNLASGVP-ARFSGSG--SGTSYSLTISSMEAEDAATYYCQQWS--------------------', 'IGKV4-69*01': 'QILLTQSPAIMSASPGEKVTMTCSASSSV-------SYMHWYQQKPGSSPKPWIYDT-------SNLASGFP-ARFSGSG--SGTSYSLIISSMEAEDAATYYCHQRS--------------------', 'IGKV4-70*01': 'QIVLTQSPAIMSASPGEKVTMTCSASSSI-------SYMHWYQQKPGTSPKRWIYDT-------SKLASGVP-ARFSGSG--SGTSYSLTISSMEAEDAATYYCHQRS--------------------', 'IGKV4-71*01': 'QIVLTQSPAIMSASPGEKVTMTCSASSSV-------SYMHWYQQKPGSSPRLWIYLT-------FNLASGVP-ARFSGSG--SGTSYSLSISSMEAEDAATYYCQQWS--------------------', 'IGKV4-72*01': 'QIVLSQSPAILSASPGEKVTMTCRASSSV-------SYMHWYQQKPGSSPKPWIYAT-------SNLASGVP-ARFSGSG--SGTSYSLTISRVEAEDAATYYCQQWS--------------------', 'IGKV4-74*01': 'QIVLTQSPAIMSASLGERVTMTCTASSSVS-----SSYLHWYQQKPGSSPKLWIYST-------SNLASGVP-ARFSGSG--SGTSYSLTISSMEAEDAATYYCHQYH--------------------', 'IGKV4-78*01': 'QIVLTQSPAIMSASPGEKVTMTCSARSSVS-----SSYLYWYQQKPGSSPKLWIYST-------SNLASGVP-ARFSGSG--SGTSYSLTISSMEAEDAATFYCQQYS--------------------', 'IGKV4-79*01': 'QIVLTQSPAIMSASPGEKVTLTCSASSSVS-----SSYLYWYQQKPGSSPKLWIYST-------SNLASGVP-ARFSGSG--SGTSYSLTISSMEAEDAASYFCHQWS--------------------', 'IGKV4-80*01': 'QIVLTQSPAIMSASLGEEITLTCSASSSV-------SYMHWYQQKSGTSPKLLIYST-------SNLASGVP-SRFSGSG--SGTFYSLTISSVEAEDAADYYCHQWS--------------------', 'IGKV4-81*01': 'ENVLTQSPAIMAASPGEKVTMTCSASSSVS-----SSNLHWYQQKSGTSTKFWIYRT-------SNLASEVP-APFSGSG--SGTSYSLTISSVEAEDAATYYCQQWS--------------------', 'IGKV4-86*01': 'EIVLTQSPAITAASLGQKVTITCSASSSV-------SYMHWYQQKSGTSPKPWIYEI-------SKLASGVP-ARFSGSG--SGTSYSLTISSMEAEDAAIYYCQQWN--------------------', 'IGKV4-90*01': 'EILLTQSPAIIAASPGEKVTITCSASSSV-------SYMNWYQQKPGSSPKIWIYGI-------SNLASGVP-ARFSGSG--SGTSFSFTINSMEAEDVATYYCQQRS--------------------', 'IGKV4-91*01': 'EIVLTQSPTTMAASPGEKITITCSASSSIS-----SNYLHWYQQKPGFSPKLLIYRT-------SNLASGVP-ARFSGSG--SGTSYSLTIGTMEAEDVATYYCQQGS--------------------', 'IGKV4-92*01': 'EMVLTQSPVSITASRGEKVTITCRASSSIS-----SNYLHWYQQKPGSSPKLLIYRT-------SILASGVL-DSFSGSG--SESSYTLTISCMQDEVAATYYCQQGS--------------------', 'IGKV5-37*01': 'DILLTQSPATLSVTPGETVSLSCRASQSI------YKNLHWYQQKSHRSPRLLIKYA-------SDSISGIP-SRFTGSG--SGTDYTLSINSVKPEDEGIYYCLQGY--------------------', 'IGKV5-39*01': 'DIVMTQSPATLSVTPGDRVSLSCRASQSI------SDYLHWYQQKSHESPRLLIKYA-------SQSISGIP-SRFSGSG--SGSDFTLSINSVEPEDVGVYYCQNGH--------------------', 'IGKV5-43*01': 'DIVLTQSPATLSVTPGDSVSLSCRASQSI------SNNLHWYQQKSHESPRLLIKYA-------SQSISGIP-SRFSGSG--SGTDFTLSINSVETEDFGMYFCQQSN--------------------', 'IGKV5-45*01': 'DIVLTQSPATLSVTPGDRVSLSCRASQSI------SNYLHWYQQKSHESPRLLIKYA-------SQSISGIP-SRFSGSG--SGTDFTLSINSVETEDFGMYFCQQSN--------------------', 'IGKV5-48*01': 'DILLTQSPAILSVSPGERVSFSCRASQSI------GTSIHWYQQRTNGSPRLLIKYA-------SESISGIP-SRFSGSG--SGTDFTLSINSVESEDIADYYCQQSN--------------------', 'IGKV6-13*01': 'DIVMTQSQKFMSTSVGDRVSITCKASQNV------GTAVAWYQQKPGQSPKLLIYSA-------SNRYTGVP-DRFTGSG--SGTDFTLTISNMQSEDLADYFCQQYS--------------------', 'IGKV6-14*01': 'DIVMTQSQKFMSTSVGDRVSITCKASQNV------RTAVAWYQQKPGQSPKALIYLA-------SNRHTGVP-DRFTGSG--SGTDFTLTISNVQSEDLADYFCLQHW--------------------', 'IGKV6-15*01': 'DIVMTQSQKFMSTSVGDRVSVTCKASQNV------GTNVAWYQQKPGQSPKALIYSA-------SYRYSGVP-DRFTGSG--SGTDFTLTISNVQSEDLAEYFCQQYN--------------------', 'IGKV6-17*01': 'DIVMTQSHKFMSTSVGDRVSITCKASQDV------STAVAWYQQKPGQSPKLLIYSA-------SYRYTGVP-DRFTGSG--SGTDFTFTISSVQAEDLAVYYCQQHY--------------------', 'IGKV6-20*01': 'NIVMTQSPKSMSMSVGERVTLSCKASENV------GTYVSWYQQKPEQSPKLLIYGA-------SNRYTGVP-DRFTGSG--SATDFTLTISSVQAEDLADYHCGQSY--------------------', 'IGKV6-23*01': 'DIVMTQSHKFMSTSVGDRVSITCKASQDV------GTAVAWYQQKPGQSPKLLIYWA-------STRHTGVP-DRFTGSG--SGTDFTLTISNVQSEDLADYFCQQYS--------------------', 'IGKV6-25*01': 'DIVMTQSHKFMSTSVGDRVSITCKASQDV------STAVAWYQQKPGQSPKLLIYWA-------STRHTGVP-DRFTGSG--SGTDYTLTISSVQAEDLALYYCQQHY--------------------', 'IGKV6-29*01': 'NIVMTQSPKSMSMSVGERVTLSCKASENV------GTYVSWYQQKPEQSPKLLIYGA-------SNRYPGVP-DRFTGSG--SATDFTLTISSLQAEDLADYHCGQGY--------------------', 'IGKV6-32*01': 'SIVMTQTPKFLLVSAGDRVTITCKASQSV------SNDVAWYQQKPGQSPKLLIYYA-------SNRYTGVP-DRFTGSG--YGTDFTFTISTVQAEDLAVYFCQQDY--------------------', 'IGKV6-32*02': 'SIVMTQTPKFLLVSAGERVTITCKASQSV------SNDVAWYQQKPGQSPKLLIYYA-------SNRYTGVP-DRFTGSG--YGTDFTFTISTVQAEDLAVYFCQQDY--------------------', 'IGKV6-b*01': 'SIVMTQTPKFLPVSAGDRVTMTCKASQSV------GNNVAWYQQKPGQSPKLLIYYA-------SNRYTGVP-DRFTGSG--SGTDFTFTISSVQVEDLAVYFCQQHY--------------------', 'IGKV6-c*01': 'SIVMTQTPKFLPVTAEDRVTITCKASQSV------SNEVAWYQQKPGQSPKLLIYYA-------SNRYTGVP-DRFTGSG--SGTDFTFTISSVQVEDLAVYFCQQHY--------------------', 'IGKV6-d*01': 'SIVMTQSPKSLPVSAGDRVTMTCKASQSV------SNDVAWYQQKPGQSPKLLIYYA-------SNRYTGVP-ERFTGSG--SGTDFTFTISGVQAEDLAVYFCQQHY--------------------', 'IGKV7-33*01': 'DIVMTQSPTFLAVTASKKVTISCTASESLYSSKHKVHYLAWYQKKPEQSPKLLIYGA-------SNRYIGVP-DRFTGSG--SGTDFTLTISSVQVEDLTHYYCAQFY--------------------', 'IGKV8-16*01': 'EIVLTQSIPSLTVSAGERVTISCKSNQNLLWSGNQRYCLVWHQWKPGQTPTPLITWT-------SDRYSGVP-DRFIGSG--SVTDFTLTISSVQAEDVAVYFCQQHL--------------------', 'IGKV8-19*01': 'DIVMTQSPSSLTVTAGEKVTMSCKSSQSLLNSGNQKNYLTWYQQKPGQPPKLLIYWA-------STRESGVP-DRFTGSG--SGTDFTLTISSVQAEDLAVYYCQNDY--------------------', 'IGKV8-21*01': 'DIVMSQSPSSLAVSAGEKVTMSCKSSQSLLNSRTRKNYLAWYQQKPGQSPKLLIYWA-------STRESGVP-DRFTGSG--SGTDFTLTISSVQAEDLAVYYCKQSY--------------------', 'IGKV8-24*01': 'DIVMTQSPSSLAMSVGQKVTMSCKSSQSLLNSSNQKNYLAWYQQKPGQSPKLLVYFA-------STRESGVP-DRFIGSG--SGTDFTLTISSVQAEDLADYFCQQHY--------------------', 'IGKV8-27*01': 'NIMMTQSPSSLAVSAGEKVTMSCKSSQSVLYSSNQKNYLAWYQQKPGQSPKLLIYWA-------STRESGVP-DRFTGSG--SGTDFTLTISSVQAEDLAVYYCHQYL--------------------', 'IGKV8-28*01': 'DIVMTQSPSSLSVSAGEKVTMSCKSSQSLLNSGNQKNYLAWYQQKPGQPPKLLIYGA-------STRESGVP-DRFTGSG--SGTDFTLTISSVQAEDLAVYYCQNDH--------------------', 'IGKV8-28*02': 'DIVMTQSPSSLSVSAGDKVTMSCKSSQSLLNSRNQKNYLAWYQQKPWQPPKLLIYGA-------STRESGVP-DRFTGSG--SGTDFTLTISSVQAEDLAVYYCQNDY--------------------', 'IGKV8-30*01': 'DIVMSQSPSSLAVSVGEKVTMSCKSSQSLLYSSNQKNYLAWYQQKPGQSPKLLIYWA-------STRESGVP-DRFTGSG--SGTDFTLTISSVKAEDLAVYYCQQYY--------------------', 'IGKV8-34*01': 'DILMTQSPSSLTVSAGEKVTMSCKSSQSLLASGNQNNYLAWHQQKPGRSPKMLIIWA-------STRVSGVP-DRFIGSG--SGTDFTLTINSVQAEDLAVYYCQQSY--------------------', 'IGKV9-120*01': 'DIQMTQSPSSLSASLGERVSLTCRASQDI------GSSLNWLQQEPDGTIKRLIYAT-------SSLDSGVP-KRFSGSR--SGSDYSLTISSLESEDFVDYYCLQYA--------------------', 'IGKV9-120*02': 'DIQMTQSPSSLSASLGERVSLTCRASQDI------GSSLNWLQQEPDGTIKRLIYAT-------SSLDSGVP-KRFSGSR--SGSDYSLTISSLESEDFVDYYCLQYA--------------------', 'IGKV9-123*01': 'DIQMIQSPSSMFASLGDRVSLSCRASQGI------RGNLDWYQQKPGGTIKLLIYST-------SNLNSGVP-SRFSGSG--SGSDYSLTISSLESEDFADYYCLQRN--------------------', 'IGKV9-124*01': 'DIQMTQSPSSLSASLGERVSLTCRASQEI------SGYLSWLQQKPDGTIKRLIYAA-------STLDSGVP-KRFSGSR--SGSDYSLTISSLESEDFADYYCLQYA--------------------'}, 'rat': {'IGKV10S6*01': 'AIQVTQSPTSLSASLGDRVTLTCRASQDI------NNKMAWYQQKPGEVPQLLIYYA-------STLQSGTP-SRFSGSG--AGTDFSFTISHLQSEDFATYYCLQGY--------------------', 'IGKV12S1*01': 'DIQVTQSPASLSASPEEIVTITCQASQDI------GSSLLWYQQKPGKSPQLLIYSA-------TILADGVP-SRFSGSR--SGTQYSLKISRLQVEDIGTYYCLQVS--------------------', 'IGKV12S14*01': 'DIQMTQSPASLSASLEEIVTITCQASQDI------GNWLAWYQQKPGKSPQLLIYGA-------TSLADGVP-SRFSGSR--SGTQYSLKISRLQVEDIGIYYCQQAS--------------------', 'IGKV12S16*01': 'DIQMTQSPASLSASLEEIVTITCQASQDI------GNWLSWYQQKPGKSPQLLIYGA-------TSLADGVP-SRFSGSR--SGTQYSLKISRLQVEDIGIYYCLQAY--------------------', 'IGKV12S17*01': 'DIQMTQSPASLSASLEEIVTITCQASQDI------GNWLAWYQQKPGKSPQLLIYGA-------TSLADGVP-SRFSGSR--SGTQYSLKISRLQVEDPGIYYCLQGY--------------------', 'IGKV12S20*01': 'DIQMTQSPASLSASPEEIVTITCQASQDI------GNWLAWYQQKPGKSPQLLIYSA-------TSLADGIP-SRFSGSR--SGTQYSLKISRLQVEDTGIYYCLQRY--------------------', 'IGKV12S22*01': 'DIQMTQSPASLSASLEEIVTITCQPSQGI------GNYLSWYQQKLGKSPQLLIHSA-------TSLEDGVP-SRFSGSR--SGTQYSLKINRLQVEDTGIYYCLQIS--------------------', 'IGKV12S24*01': 'DIQMTQSPASLSASLEEIVTITCQASQDI------GNYLSWYQQKPGKSPQLLIHSA-------TSLADGVP-SRFSGSR--SGTQYSLKINRLQVEDTGIYYCLQHY--------------------', 'IGKV12S25*01': 'DIQMTQSPASLSASLDEIVTITCQASLDI------GNWLAWYQQKPGKSPQLLIYGA-------TSLADGVP-SRFSGSR--SGTQYSLKICKLQVEDTGIYYCLQHY--------------------', 'IGKV12S7*01': 'DIHVTQSPASLSASPEEIVTITCQASQDI------GSSLLWYQQKPGKSPQLLIYSA-------TILADGVP-SRFSGSR--SGTQYSLKISRLQVEDIGTYYCLQFS--------------------', 'IGKV14S1*01': 'DIQMTQSPSSMSASLGDRVTITCQASQDI------GNNLIWFQQKPGKSPRPMIYYA-------TNLANGVP-SRFSGSR--SGSDYSLTISSLESEDMADYHCLQYK--------------------', 'IGKV14S13*01': 'DIQMTQSPSSLPASLGDRVTITCRASQDI------GNYLRWFQQKPGKSPRLMIYGA-------TNLAAGVP-SRFSGSR--SGSDYSLTISSLESEDMADYYCLQSK--------------------', 'IGKV14S14*01': 'DIQMTQSPSSLSASLGDRVTITCRASQDI------GNYLTWFQQKPGKSPRRMIYGA-------TNLAAGVP-SRFSGSR--SGSDYSLTISSLESEDVADYHCLQSI--------------------', 'IGKV14S15*01': 'DIQMTQSPSSMSASLGDRVTITCRASQDI------GNYLSWFQQKPGKSPRRMIYGA-------TNLAAGVP-SRFSGSR--SGSDYSLTISSLESEDMAIYYCLQSI--------------------', 'IGKV14S16*01': 'NIQMTQSSSSMPASLIDREILACRASQDI------RNYLSWYQQKPGKSPKLMISGG-------TNLAARIP-SRFSGSR--SGSDYSLTISSLESEDEADYHCLQYD--------------------', 'IGKV14S18*01': 'DIQMTQSPSSLPASLGDRVTITCRASQDI------GNYLRWFQQKPGKSPRLMIYGA-------TNLANGVP-SRFSGSR--SGSDYSLTINSLESEDMAIYYCLQHN--------------------', 'IGKV14S19*01': 'DIQMTQSPSSMSVSLGDRVTITCRASQDI------GNYLSWYQQKPEKSPKLMIYGA-------TNLEDGVP-SRFSGSR--SGSDYSLTINSLESEDTGIYFCLQHK--------------------', 'IGKV14S2*01': 'DIQMTQAPSSLPASLGDRVTITCRASQDI------GNYLRWFQQKPGKSPRRMIYGA-------TNLAAGVP-SRFSGSR--SGSDYSLTISSLESEDMADYYCVQSK--------------------', 'IGKV14S8*01': 'DIQMTQSPSSMSASLGDTVTINCLASQDI------GNYLSWYQQKPGKSPKLMIYGA-------TNLEDGVP-SRFSGSR--SGSDYSLTINSLGYDDEGIYHCHQYY--------------------', 'IGKV14S9*01': 'DIQMTQSPSSMSVSLGDTVTITCRASQDV------GIYVNWFQQKPGKSPRHMIYRA-------TNLADGVP-SRFSGSR--SGSDYSLTISSLESEDVADYHCLQYD--------------------', 'IGKV15S4*01': 'DIQMTQSPSFLSASLGNSITITCHASQNI------KGWLAWYQQKSGNAPELLIYKA-------SSLQSGVP-SRFSGSG--SGTDYIFTISNLQPEDIATYYCQHYQ--------------------', 'IGKV17S1*01': 'ETTVTQSPASLSMAVGEKVSISCKTSTDI------DDDMNWYQQKSGEAPKLLISEG-------NTLRPGVP-SRFSSSG--YGTDFVFTINNVLLGDEGIYYCQQSD--------------------', 'IGKV1S1*01': 'DVVMTQTPVSLSLAIGQPASISCKSSQSLLGT-SGKTFLNWILQRPGQSPKRLIYQV-------SKLYSEVP-DRFSGSG--SETEFTLKISRVEAEDLGVYYCWQGT--------------------', 'IGKV1S12*01': 'DVVMTQTPPSLSVAIGQSVSISCKSSQSLVHS-DGKTYLNWLLQNPGQSPKRLIYQV-------SNLGSGVP-DRFSGTG--SEKDFTLKISRVEAEDLGVYHCVQAT--------------------', 'IGKV1S14*01': 'DVVMTQTPPSLSVAIGQSVSISCKSSQSLVYS-DGKTYLHWLLQSPGRSPKRLIYQV-------SNLGSGVP-DRFSGTG--SQKDFTLKISRVEAEDLGVYYCAQTT--------------------', 'IGKV1S18*01': 'DVVMTQTPVSLSVAIGQPASISCKSSQSLVHS-DGKTYLNWLLQRPGQSPKRLIYLV-------SKLDSGIP-DRFSGSG--SETDFTLKISRVEADDLGVYYCLQGT--------------------', 'IGKV1S5*01': 'DIVMTQTPLSLSVAIGQSAFICCKSSQSLLYS-NGKKYLNWFLQRPGQSPKCLIYLV-------SKLDFGVP-DRFTGSG--SETNFTLEISRVEAENLGVYYCMQGS--------------------', 'IGKV1S7*01': 'DIVMTQTPLSLSVAIGQSASISCKSSQSLKYS-DGKTYLNWVFQSPGQSPKRLIYQV-------SKLDSGVP-DRFSGTG--SETDFTLKISRVEAEDLGVYYCCQVH--------------------', 'IGKV1S8*01': 'DVVMTQTPVSLSLAIGQPASISCKSSQSLIHS-DGKTYLSWILQRPGQSPKRLIYLV-------SKLDSGVP-DRFSGSG--SETEFTLKISRVEAEDLGVYYCWQAT--------------------', 'IGKV20S1*01': 'DIRMTQTPASLSASLGESVTITCRASQDI------GKSLLWFQQKTGKPPKILIYTA-------SNLVSGIS-PRFSGSG--SGTQFSLKISSLKPEDTANYYCCQGY--------------------', 'IGKV22S1*01': 'DIQMTQSPSVLSASVGDRVTLNCKASQNI------NKYLNWYQQKLGEAPKLLIYNT-------NNLQTGIP-SRFSGSG--SGTDFTLTISSLQPEDFATYFCFQHN--------------------', 'IGKV22S2*01': 'DIQMTQSPSFLSASVGDRVTLSCKASQNI------NKYLAWYQQKLGEAPKLLIYNA-------NSLQTGIP-SRFSGSG--SGTDFTLTISSLQPEDVATYFCLQHN--------------------', 'IGKV22S4*01': 'DIQMTQSPSFLSASVGDRVTINCKASQNI------NRYLNWYQQKLGEAPKLLIYNA-------NSLQTGIP-SRFSGSG--SGTDFTLTISSLQPEDVATYFCLQHN--------------------', 'IGKV22S7*01': 'DIQMTQSPSFLSASVGDRVTINCKASQNI------NKYLNWYQQKLGEAPKLLIYNT-------NNLQTGIP-SRFSGSG--SGTDYTLTISSLQPEDVATYFCLQHS--------------------', 'IGKV22S9*01': 'DIQMTQSPSLLSASVGDRVTLSCKASQSI------YNSLAWYQQKLGEAPKLLIYKT-------NSLQTGIP-SSFSGSG--SGTDYTLTISSLQPEDVATYFCQKYN--------------------', 'IGKV2S3*01': 'DIMMTQSPLSVAVTPGESASISCRSSKSLLHS-NGITYLSWYLQRPEKSPQLLIYQI-------SNLASGVS-GRFSGSG--SGTDFTLKISRVETEDVGIYYCVQFL--------------------', 'IGKV3S1*01': 'DIVLTQSPA-LAVSLEQRATISCKTSQNVDN--YGISYMHWYQQKPGQQPKLLIYEG-------SNLASGIP-ARFSGSG--SGTDFTLTIDPVEADDIATYYCQQSK--------------------', 'IGKV3S18*01': 'DIVLTQSPV-LAVSLGQRATISCRASQSVSI--SSINLMHWYQQKPGQQPKLLIYRA-------SNLASGIP-ARFSGSG--SGTDFTLTIDPVQADDIAAYYCQQSR--------------------', 'IGKV3S19*01': 'DIVLTQSPA-LAVSLGQRATISCRASQSVSI--SRYNLMHWYQQKPGQQPKLLIYRA-------SNLASGIP-ARFSGSG--SGTDFTLTINPVQADDIATYYCQQSR--------------------', 'IGKV3S8*01': 'DTVLTQSPA-LAVSPGERVTISCRASESV------STLMHWYQQKPGQQPTLLIYLA-------SNLESGVP-AMFSGSG--SGTDFTLTIDPVEADDTATYFCQQSW--------------------', 'IGKV3S9*01': 'DTVLTQSPA-LAVSPGERVTISCRASESV------STLMHWYQQKPGQQPTLLIYLA-------SNLESGVP-ARFSGSG--SGTDFTLTIDPVEADDTATYYCQQSW--------------------', 'IGKV6S11*01': 'NTVMTQSPTSMFISVGDRVTMNCKASQNV------GTNVDWYQQKTGQSPKLLIYGA-------SNRYTGVP-DRFTGSG--SGTDFTLTISNMQAEDLAVYYCLQYN--------------------', 'IGKV6S8*01': 'ETVMTQSPTSMSTSIGERVTLNCKASQSV------GINVDWYQQTPGQSPKLLIYGA-------SNRHTGVP-DRFTGSG--FGRDFTLTISNMEAEDLAVYYCLQYG--------------------', 'IGKV8S5*01': 'DIVMTQSPSSLAVSAGETVTINCKSSQSLFGSVRQKNYLAWYQQKPGQSPKLLIYLA-------STRESGVP-DRFIGSG--SGTDFTLTISSVQAEDLANYYCQQYY--------------------', 'IGKV8S6*01': 'DIVMTQSPSSLAVSAGETVTINCKSSQSLLSSGNQKNYLAWYQQKPGQSPKLLIYLA-------STRESGVP-DRFIGSG--SGTDFTLTISSVQAEDLADYYCQQHY--------------------', 'IGKV9S2*01': 'QITLTQQAESLWVSPGEKVSITCRASQSLLYT-DGKHYLSWYQQRPGQTTKALIYHA-------SIRTDGVP-TRFIGSG--SGTEFTLSIEDVQPEDIALYYCLQTL--------------------'}, 'rabbit': {'IGKV1S2*01': 'AQVLTQTESPVSAPVGGTVTINCQASQSVY----DNNWLSWYQQKPGQPPKLLIYDA-------SKLASGVP-SRFSGSG--SGTQFTLTISGVQCDDAATYYCQGSY--------------------', 'IGKV1S2*02': 'AQVLTQTESPVSAPVGGTVTINCQASQSVY----DNNYLSWYQQKPGQPPKLLIYDA-------SKLASGVP-SRFSGSG--SGTQFTLTISGVQCDDAATYYCQGSY--------------------', 'IGKV1S3*01': 'AQVLTQTPASVSAAVGGTVTINCQASESI------SSYLNWYQQKLGQPPKLLIYYA-------STLASGVP-SRFKGSG--SGTEYTLTISGVQCDDAATYYCQHGY--------------------', 'IGKV1S3*02': 'AQVMTQTPASVSAAVGGTVTIICQASESI------SSYLNWYQQKLGQPPKLLIYYA-------STLASGVP-SRFKGSG--SGTEYTLTISGVQCDDAATYYCQHGY--------------------', 'IGKV1S5*01': 'DPVMTQTPSSTSAAVGGTVTINCQSSQNVY----SNNYLSWFQQKPGQPPKLLIYGA-------SKLASGVP-SRFSGSG--SGKQFTLTISGVQCDDAATYYCAGYY--------------------', 'IGKV1S6*01': 'DGVMTQTPAPVSAAVGGTVTINCQASQSI------GSDLSWYQQKPGQPPKLLIYSA-------SKLATGVP-SRFNGSG--SGTQFTLTISGVQCDDAATYYCQCTY--------------------'}, 'rhesus': {'IGKV1-16*01': 'DIQMTQSPSSLSASVGDKVT-ITCQASQSI------SSWLAWYQQKPGKAPKPLIYKA-------SSLESGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQY--------------------', 'IGKV1-18*01': 'DIQMTQSPSSLSASVGDKVT-ITCRASQGI------SSWLAWYQQKPGKAPKLLIYAA-------SSLQSGVP-SRFSGSG--SGTDYTLTISSLQPEDFATYYCQQG--------------------', 'IGKV1-19*01': 'DIQMTQSPSSLSASVGDKVT-ITCHASQGI------SSWLAWYQQKPGKAPKPLIYAA-------SSLQSGVP-SRFSGSG--SGTDYTLTISSLQPEDFATYYCQQY--------------------', 'IGKV1-21*01': 'DIQMTQSPSSLSASVGDRVT-ITCRASQGI------SSWLAWYQQKPGKAPKLLIYKA-------SSLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQY--------------------', 'IGKV1-22*01': 'DIQMTQSPSSLSASVGDTVT-ITCRASQSI------SSWLAWYQQKPGKAPKLLIYKA-------SSLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCLQY--------------------', 'IGKV1-25*01': 'DIQMTQSPSSLSASVGDRVT-ITCRASQGI------SSYLAWYQQKPGKAPKLLIYKA-------STLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQH--------------------', 'IGKV1-27*01': 'DIQMTQSPSSLSASVGDRVT-ITCQASQGI------SSWLAWYQQKPGKAPKLLLYKA-------PGLQSGVP-SMFSGSG--SGTDFTLTISSLQPEYFATYYCQQF--------------------', 'IGKV1-28*01': 'DIQMTQSPSSLSASVGDTVT-ITCRASQGI------SSYLNWFQQKPGKAPKLLIYAA-------TTLQSGVP-SRFSGSG--SGTEFTLTISSLQPEDFAAYYCLQH--------------------', 'IGKV1-28*03': 'DIQMTQSPSSLSASVGDTVT-ITCRASQGI------SSYLNWFQQKPGKAPKLLIYAA-------TTLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFAAYYCLQH--------------------', 'IGKV1-32*01': 'DIQMTQSPSSLSASVGDRVT-ITCRASQGI------SSYLNWYQQKPGKAPKLLIYYA-------NRLESGVP-SRFSGSG--SGTEFTLTISSLQPEDFATYYCQQY--------------------', 'IGKV1-32*03': 'DIQMSQSPSSLSASVGDRVT-ITCRASQGI------SSYLNWYQQKPGKAPKLLIYYA-------NSLASGVP-SRFSGSG--SGTEFTLTISSLQPEDFAAYYCLQG--------------------', 'IGKV1-32*04': 'DIQMSQSPSSLSASVGDRVT-ITCRASQGI------SSYLNWYQQKPGKAPKLLIYYA-------NSLASGVP-SRFSGSG--SGTEFTLTISSLQPEDFATYYCQQG--------------------', 'IGKV1-32*05': 'DIQMTQSPSSLSASVGDRVT-ITCRASQGI------SSYLNWYQQKPGKAPKLLIYYA-------NRLESGVP-SRFSGSG--SGTEFTLTISSLQPEDFATYYCQQY--------------------', 'IGKV1-33*01': 'DIQMTQSPSSLSASVGDKVT-ITCRASQGI------SNALAWYQQKPGKAPKLLIYAA-------SNLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFAVYYCQQR--------------------', 'IGKV1-33*02': 'DIQMTQSPSSLSASVGDKVT-ITCRASQGI------SNALAWYQQKPGKAPKLLIYAA-------SSLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQH--------------------', 'IGKV1-36*01': 'DIQMTQSPSSLSASVGDRVT-ITCRASQGI------SNYLSWYQQKPGKAPKLLIYAA-------SSLESGVP-SRFSGSG--SGTEFTLTISSLQPEDFAAYYCLQY--------------------', 'IGKV1-36*03': 'DIQMTQSPSSLSASVGDRVT-ITCRASQGI------SDYLSWYQQKPGKAPKLLIYAA-------SSLESGVP-SRFSGSG--SGTEFTLTISSLQPEDFAAYYCLQG--------------------', 'IGKV1-37*01': 'DIQMTQSPSSLSASVGDRVT-ITCRASQGI------SSYLAWYQQKPGKAPKPLIYYA-------SNLESGVP-SRFSGSG--SGTEFTLTISSLQPEDFAIYYCQQY--------------------', 'IGKV1-38*01': 'DIQLTQSPSSLSASVGDRVT-ITCRASQGI------SSYLAWYQQKPGKAPKLLIYDA-------SNLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFAVYYCQQR--------------------', 'IGKV1-41*01': 'DIQMTQSPSSLSASVGDRVT-ITCRASQGI------SNYLNWYQQEPGKAPKLLIYAA-------SSLESGVP-SRFSGSG--SGTEFTLTISSLQPEDFATYYCQPF--------------------', 'IGKV1-43*01': 'DIQMTQSPSSLSASAGDRVT-ITCRASQGI------STYLNWYQQKPGKAPKRLIYAA-------SSLESGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCLQY--------------------', 'IGKV1-43*03': 'DIQMTQSPSSLSASVGDRVT-ITCRASQGI------STYLNWYQQKPGKAPKLLIYAA-------SSLESGVP-SRFSGSG--SGTEFTLTISSLQPEDFATYYCLQY--------------------', 'IGKV1-44*01': 'DIQMTQSPSSLSASVGDRVT-ITCRASQTI------SSYLAWYQQKPGKVPKLLIYAA-------SSLESGVP-SRFSGSG--SGTEFTLTISSLQPEDFATYYCQQH--------------------', 'IGKV1-44*03': 'DIQMTQSPSSLSASVGDRVT-ITCRASQTI------SSYLAWYQQKPGKVPKLLIYAA-------STLQSGVP-SRFSGSG--SGTEFTLTISSLQPEDFATYYCQQH--------------------', 'IGKV1-46*01': 'DIQMTQSPSSLSASVGDSVT-ITCRASQSF------SSSLAWYQQKPGKAPKLLIYSA-------SSLQSGVP-SRFSGSK--SGTDFTLTISSLQPEDIASYYCQQY--------------------', 'IGKV1-59*01': 'AIQMTQSPSSLSASVGDKVT-ITCRASQSI------GSNLAWYQQKPGKVPKLLIYAA-------STLQSEVP-SRFSGSG--SGTDFTLTISSLQPEEVATYYCQKC--------------------', 'IGKV1-66*01': 'DIQMTQSPSSLSASVGDRVT-ITCRASQGI------NNYLSWYQQKPGKAPKPLIYYA-------SSLERGVP-SRFSGSR--SGTDYTLTISSLQPEDIATYYCQQY--------------------', 'IGKV1-69*01': 'DIQMTQSPSSLSASVGDRVT-ITCRASQGI------SNWLAWYQQKPGKAPKLLIYRA-------SNLETGVP-SRFSGSG--SGTDFTLTISSLQPEDIATYYCQQH--------------------', 'IGKV1-69*02': 'DIQMTQSPSSLSASVGDRVT-ITCRASQGI------SNWLAWYQQKPGKAPKLLIYAA-------SNLETGVP-SRFSGSG--SGTDFTLTISSLQPEDIATYYCQQH--------------------', 'IGKV1-74*01': 'DIQMTQSPSSLSASVGDRVT-ITCRASENV------NNYLNWYQQKPGKAPKLLIYKA-------STLQSGVP-SRFSGSG--SGTDYTFTISSLQPEDVATYYCQHG--------------------', 'IGKV1-80*01': 'DIQMTQSPSSLSASVGDTVT-ITCRASQGI------NNELAWYQQKPGKAPTLLLYSG-------SSLHTGVP-SQFSGSG--SGTDFTLTISSLQPEDVATYYCRQD--------------------', 'IGKV1-84*01': 'DIQMTQPPSSLSASVGDRVN-ITCQASQSI------SNYLNWYPQKTWKAPKFLTYRA-------SGLQRGVP-SQFSGSG--YGRDFTLTISSLRPEDFAIYYCQQE--------------------', 'IGKV1-94*01': 'DIQMTQSPSSLSASVGDRVT-VTCRASQGI------NKELSWYQQKPGKAPTLLIYAA-------SSLQTGVS-SRFSGSG--SGTDFTLTISSLQPEDVATYYCQQD--------------------', 'IGKV1S11*01': 'DIQMTQSPSSLSASVGDTVT-ITCRASQGI------SSWLAWYQQKPGKAPKLLIYAA-------SSLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFAVYYCQQR--------------------', 'IGKV1S12*01': 'DIQMTQSPSSLSASVGDRVT-ITCRASQTI------SSYLAWYQQKPGKVPKLLIYAA-------STLQSGVP-SRFSGSG--SGTEFTLTISSLQPEDFATYYCLQY--------------------', 'IGKV1S13*01': 'DIQMTQSPSSLSASVGDTVT-ITCRASQGI------SNYLAWYQQKPGKAPKLLIYAA-------SSLESGVP-SRFSGSG--SGTDFTLTISSLQPEDFAAYYCLQH--------------------', 'IGKV1S14*01': 'DIQMTQSPSSLSASVGDTVT-ITCRASQGI------SNYLAWYQQKPGKAPKPLIYYA-------SNLESGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQH--------------------', 'IGKV1S15*01': 'DIQMTQSPSSLSASVGDTVT-ITCRASQGI------SNYLAWYQQKPGKAPKPLIYYA-------SSLESGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQG--------------------', 'IGKV1S16*01': 'DIQMTQSPSSLSASVGDTVT-ITCRASQGI------SNYLAWYQQKPGKAPKPLIYYA-------SSLESGVP-SRFSGSG--YGTDFTLTISSLQPEDFATYYCQQH--------------------', 'IGKV1S17*01': 'DIQMTQSPSSLSASVGDRVT-ITCRASQGI------SNNLAWYQQKPGKAPKLLIYAA-------SSLESGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCLQY--------------------', 'IGKV1S19*01': 'DIQMTQSPSSLSASVGDTVT-ITCRASQGI------SNYLNWYQQKPGKAPKLLIYAA-------SSLESGVP-SRFSGSG--SGTDFTLTISSLQPEDFAAYYCLQH--------------------', 'IGKV1S21*01': 'DIQMTQSPSSLSASVGDRVT-ITCRASQGI------SNYLNWYQQKPGKAPKRLIYDA-------SSLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFAAYYCLQY--------------------', 'IGKV1S25*01': 'DIQMTQSPSSLSASVGDRVT-ITCRASQGI------SSWLAWYQQKPGKVPKLLIYAA-------STLQSGVP-SRFSGSG--SGTEFTLTISSLQPEDFATYYCQQH--------------------', 'IGKV1S3*01': 'DIQMTQSPSSLSASVGDTVT-ITCRASQGI------SNYLAWYQQKPGKAPKPLIYYA-------SSLESGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQH--------------------', 'IGKV1S4*01': 'DIQMTQSPSSLSASVGDRVT-ITCQASQGI------SSWLAWYQQKPGKAPKLLLYKA-------PGLQSGVP-SMFSGSG--SGTEFTLTISSLQPEDFATYYCQQH--------------------', 'IGKV1S5*01': 'DIQMTQSPSSLSASVGDRVT-ITCRASQTI------SSYLAWYQQKPGKAPKRLIYAA-------SSLESGVP-SRFSGSG--SGTEFTLTISSLQPEDFATYYCQQH--------------------', 'IGKV1S6*01': 'DIQMTQSPSSLSASVGDKVT-ITCRASQGI------SSWLAWYQQKPGKAPKLLIYKA-------SSLASGVP-SRFSGSG--SGTEFTLTISSLQPEDFATYYCQQH--------------------', 'IGKV1S8*01': 'DIQMTQSPSSLSASVGDRVT-ITCRASQTI------SSYLAWYQQKPGKVPKLLIYAA-------STLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQY--------------------', 'IGKV1S9*01': 'DIQMSQSPSSLSASVGDTVT-ITCRASQGI------SNYLNWFQQKPGKAPKLLIYAA-------TTLQSGVP-SRFSGSG--SGTDFTLTISSLQPEDFATYYCQQH--------------------', 'IGKV2-104*02': 'DIVMTQTPLSLPVTPGEPAS-ISCRSSQSLLDSEDGNTYLDWYLQKPGQSPQLLIYEV-------SNRASGVP-DRFSGSG--SDTDFTLKISRVEAEDVGVYYCMQA--------------------', 'IGKV2-58*01': 'DVAMTQSPLSLPVTLGQPAS-ISCRSSQSLLHS-NGNTYLSWFQQKPGQSPRRLIYKV-------SNRDSGVP-DRFSGSG--AGTDFTLKISRVEAEDVGVYYCMEG--------------------', 'IGKV2-58*03': 'DVAMTQSPLSLPVTPGQPAS-ISCRSSQSLLHS-NGNTYLSWFQQKPGQSPRRLIYKV-------SNRDSGVP-DRFSGSG--AGTDFTLKISRVEAEDVGVYYCMEG--------------------', 'IGKV2-60*01': 'DIVMTQTPLSLPVTLGEPAS-ISCRSSQSLLSS-NGYNYLNWYLQKPGQSPQLLIYYG-------SNRASGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCMQA--------------------', 'IGKV2-61*01': 'DIVMTQTPLSLPVTPGEPAS-ISCRSSQSLLHT-DGYTYLDWYLQKPGQSPQLLIYGG-------SNRASGVP-DRFSGSG--SGTDFTLKISKVEAEDVGVYYCMQH--------------------', 'IGKV2-61*03': 'DIVMTQTPLSLPVTPGEPAS-ISCRSSQSLLHT-DGYTYLDWYLQKPGQSPQLLIYGG-------SNRASGVP-DRFSGSG--SGTDFTLKISKVEAEDVGVYYCMQH--------------------', 'IGKV2-64*01': 'DVVMTQSPLSLPITPGQPAS-ISCRSSQSLVHS-DGNTYLSWYQQKPGQPPRLLIYKV-------SNRDSGVP-DRFSGSG--AGTDFTLKISRVEAEDVGVYYCGQG--------------------', 'IGKV2-64*02': 'DVVMTQSPLSLPITPGQPAS-ISCRSSQSLVHS-DGNTYLSWYQQKPGQPPRLLIYKV-------SNRYSGVP-DRFSGSG--AGTDFTLKISRVEAEDVGVYYCGQG--------------------', 'IGKV2-65*01': 'DVVMTQSPLSLPITPGQPAS-ISCRSSQSLVHS-NGNTYLSWYQQKPGQPPRRLIYEV-------SNRDSGVP-DRFSGSG--AGTDFTLKISRVEAEDVGVYYCGQG--------------------', 'IGKV2-68*01': 'DIVMTQTLLSLPVTPGEPAS-ISCRSSQSLLHS-NGNTYLDWYLQKPGQSPRFLIYKV-------TNREPGVP-DRFSGSG--SGTDFTLKISRVEPEDVGVCYCMQS--------------------', 'IGKV2-7*02': 'DTVMTQTPLSLPVT-G-PAS-ISCRSSQSLPYG-NGVNYLNWYLQKPDQPPQLLIYLG-------SSRFPGVP-DRFTVSR--SDTDFTLQISRVKAEDVGVYYCVQC--------------------', 'IGKV2-70*01': 'DIVMTQTPLSLSVTPREPAS-ISCRSSQSLLHT-DGRTYLYWYLQKPGQPPRLLIYRV-------SNRFSGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCMQA--------------------', 'IGKV2-72*01': 'DIVMTQTPLSLPITPGEPAS-ISCRSSQSLLHS-NGNTYLHWYLQKPGQSPQLLIYGG-------SNRASGVP-DRFSGSG--SGTDFTLKISKVEAEDVGVYYCVQA--------------------', 'IGKV2-72*02': 'DIVMTQTPLSLPITPGEPAS-ISCRSSQSLLHS-NGNTYLHWYLQKPGQSPQLLIYGG-------SNRASGVP-DRFSGSG--SGTDFTLKISKVEAEDVGVYYCVQA--------------------', 'IGKV2-72*03': 'DIVMTQTPLSLPITPGEPAS-ISCRSSQSLLHS-NGNTYLHWYLQKPGQSPQLLIYGG-------SNRASGVP-DRFSGSG--SGTDFTLKISKVEAEDVGVYYCVHA--------------------', 'IGKV2-73*01': 'DIVMTQTPLSLPVTPGEPAS-ISCRSSQSLLHS-DGNTYLYWYLQKPGQPPRLLIYRV-------SNRFSGVP-DRFSGSG--SGTDFTLKISRVKAEDVGVYYCMQA--------------------', 'IGKV2-76*01': 'DIVMTQTPLSLPITPGEPAS-ISCRSSQSFLDSDDGYTYLDWYLQKPGQPPQPLIYFV-------SSRASGVP-DRFNGSG--SGSDFTLKISGVEADDVGVYYCMQC--------------------', 'IGKV2-78*01': 'DIVMTQTPLSLPVTPGEPAS-ISCRSSQSLLDS-DGYTHLHWYLQKPGQSPQLLIYLG-------SNRASGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCMQT--------------------', 'IGKV2-82*01': 'DIVMTQTPLSLPVTLGEPAS-ISCRSSQSLVYS-DGKTYLDWYLQKPGQSPQLLMYLV-------SKRASGVP-DKFSGSG--SGTDFTLKISRVEAEDVGVYYCMQA--------------------', 'IGKV2-82*02': 'DIVMIQTPLSLPVTLGEPAS-ISCRSSQSLVYS-DGKTYLYWYLQKPGQSPQLLMYLV-------SKRASGVP-DKFSGSG--SGTDFTLKISRVEAEDVGVYYCMQA--------------------', 'IGKV2-82*03': 'DIVMIQTPLSLPVTLGEPAS-ISCRSSQSLVYS-DGKTYLYWYLQKPGQSPQLLMYLV-------SKRASGVP-DKFSGSG--SGTDFTLKISRVEAEDVGVYYCMQA--------------------', 'IGKV2-86*01': 'DIVMTQTPLSLPVTPGEPAS-ISCRSSQSLLDSEDGNTYLDWYLQKPGQSPQPLIYEV-------SNRASGVP-DRFSGSG--SDTDFTLKISRVEAEDVGVYYCMQY--------------------', 'IGKV2-90*01': 'DIVMTQTPLSLPVTPGEPAS-ISCRSSQSLLDS-DGYTCLDWYLQKPGQSPQLLIYEV-------SNRVSGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCMQS--------------------', 'IGKV2-91*01': 'DIVMTQTPLSLPVTPGEPAS-ISCRSSQSLLHS-NGYTYLYWYLQKPGQSPQLLMYFA-------SYRASGVP-DRFSGSG--SGTDFTLGISRVEAEDIGVYYCMQG--------------------', 'IGKV2-99*01': 'DIVMTQTPLSLPVTPGEPAS-ISCRSSQSLFDSDYANTYLDWCLQKPGQSPQLLIYML-------FNRVSGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCMQS--------------------', 'IGKV2S15*01': 'DIVMTQTPLSLSVTPGQPAS-ISCKSSQSLLHS-DGKTYLYWYLQKPGQSPQLLIYEV-------SSRFSGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCMQG--------------------', 'IGKV2S2*01': 'DIVMTQTPLSLPVTPGEPAS-ISCRSSQSLLHS-NGNTYLDWYLQKPGQSPRLLIYKV-------TNRESGVP-DRFSGSG--SGTDFTLKISRVEPEDVGVYYCMQS--------------------', 'IGKV2S20*01': 'DIVMTQTPLSLPVTPGEPAS-ISCRSSQSLLDSEDGNTYLEWYLQKPGQSPQPLIYEV-------SNRASGVP-DRFSGSG--SDTDFTLKISRVEAEDVGVYYCMQG--------------------', 'IGKV2S3*01': 'DIVMTQTPLSLPVTPGEPAS-ISCRSSQSLLHS-NGNTYLHWYLQKPGQSPRLLIYKV-------TNRESGVP-DRFSGSG--SGTDFTLKISRVEPEDVGVYYCMQS--------------------', 'IGKV2S8*01': 'DVVMTQSPLSLPVTPGQPAS-ISCRSSQSLVHS-DGKTYLNWLQQKPGQPPRRLIYQV-------SNRDSGVP-DRFSGSG--AGTDFTLKISRVEAEDVGVYYCGQG--------------------', 'IGKV2S9*01': 'DVVMTQSPLSLPVTPGQPAS-ISCRSSQSLVHS-DGKTYLNWLQQKPGQPPRRLIYQV-------SNRDSGVP-DRFSGSG--AGTDFTLKISRVEAEDVGVYYCVQG--------------------', 'IGKV3-10*02': 'QVILTQSPATLSLSPGERAT-LSCRASQSV------SSYLAWYQQKPGQAPRLLIHSA-------SSRATGIP-DRFSGSG--SGTEFTLTISSLEPEDVGVYHCYQY--------------------', 'IGKV3-17*01': 'EIVMTQSPATLSLSPGERAT-LSCRASQSV------SSSLAWYQQKPGQAPRLLIYDA-------SSRVTGIP-DRFSGSG--SGTDFTLTISSLEPEDVGVYFCQQE--------------------', 'IGKV3-17*02': 'EIVMTQSPATLSLSPGERAT-LSCRASQSV------SSRLAWYQQKPGQAPRLLIYDA-------SSRVTGIP-DRFSGSG--SGTDFTLTISSLEPEDVAVYFCQQE--------------------', 'IGKV3-17*03': 'EIVMTQSPATLSLSPGERAT-LSCRASQSV------SSRLAWYQQKPGQAPRLLIYDA-------SSRATGIP-DRFSGSG--SGTEFTLTISSLEPEDVAVYFCQQE--------------------', 'IGKV3-24*01': 'EIVMTQSPATLSLSPGERAT-LSCRASQSV------SSSLAWYQQKPGQAPRLLIYGA-------SSRATGIP-DRFSGSG--SGTDFTLTISSLEPEDVAVYYCLQR--------------------', 'IGKV3-24*03': 'EIVMTQSPATLSLSPGERAT-LSCRASQSV------SSSLAWYQQKPGQAPRLLIYGA-------SSRATGIP-DRFSGSG--SGTEFTLTISSLEPEDVAVYYCLQR--------------------', 'IGKV3-24*04': 'EIVMTQSPATLSLSPGERAT-LSCRASQSV------GSSLAWYQQKPGQAPRLLIYGA-------SSRATGIP-DRFSGSG--SGTDFTLTISSLEPEDVAVYYCLQR--------------------', 'IGKV3-31*01': 'EIVMTQSPATLSLSPGETAT-ISCRTSQSV------SSYLAWYQQKPGQAPRLLIYGA-------SSRATGIP-DRFSGSG--SGTDFTLTISSLEPEYFAVYYCQET--------------------', 'IGKV3-35*01': 'EIVMTQSPATLSLSPGERAT-LSCRASQSV------SSNLAWYQQKPGQAPRLLIYDA-------SNRATGIP-DRFSGSG--SGTDFTLTISSLEPEDVGVYYCQQE--------------------', 'IGKV3-35*02': 'EIVMTQSPATLSLSPRERAT-LSCRASQSV------SSNLAWYQQKPGQAPRLLIYYA-------SNRATGIP-DRFSGSG--SGTDFTLTISSLEPEDVGVYYCQQE--------------------', 'IGKV3-35*03': 'EIVMTQSPATLSLSPRERAT-LSCRASQSV------SSNLAWYQQKPGQAPRLLIYYA-------SNRATGIP-DRFSGSG--SGTDFTLTISSLEPEDVGVYYCQQE--------------------', 'IGKV3-40*01': 'EIVMTQSPATLSLSPGETAT-LSCRASESV------GSYLAWYQQKPGQAPKLLVRSA-------YFRATGIP-DRFSGSG--SRTDFTLTISSLEPEDVGVYHCQQY--------------------', 'IGKV3-40*03': 'EIVMTQSPATLSLSPGETAT-LSCRASESV------GSYLAWYQQKPGQAPKLLVHSA-------YFRATGIP-DRFSGSG--SRTEFTLTISSLEPEDVGVYHCQQY--------------------', 'IGKV3-42*01': 'EIVMTQSPATLSLSPGERAT-LSCRASQSV------SSSLAWYQQKPGQAPRLLIYGA-------SSRATGIP-DRFSGSG--SGTEFTLTISSLEPEDVAVYYCQQN--------------------', 'IGKV3-42*03': 'EIVMTQSPATLSLSPGERAT-LSCRASQSV------SSSLAWYQQKPGQAPRLLIYGA-------SSRATGIP-DRFSGSG--SGTEFTLTISSLEPEDFAVYYCQQY--------------------', 'IGKV3-53*01': 'QVILTQSPATLSLSPGERAT-LSCRASQSV------SSSLAWYQQKPGQAPRLLIYGA-------SSRATGIP-DRFSGSG--SGTEFTLTISSLEPEDFAVYYCQKY--------------------', 'IGKV3S11*01': 'QVILTQSPATLSLSPGERAT-LSCRASQSV------GSNLAWYQQKPGQAPRLLIYGA-------SSRATGIP-DRFSGSG--SGTDFTLTISSLEPEDVAVYYCLQR--------------------', 'IGKV3S5*01': 'EIVMTQSPATLSLSPGERAT-LSCRASQSV------SSRLAWYKQKPGQAPRLLIYDA-------SSRATGIP-DRFSGSG--SGTEFTLTISSLEPEDVAVYLCQQE--------------------', 'IGKV3S9*01': 'EIVMTQSPATLSLSPGERAT-LSCRASQSV------SSYVAWYQQKPGQAPRLLIYGA-------SSRATGIP-DRFSGSG--SGTEFTLTISSLEPEDFAVYYCQQY--------------------', 'IGKV4-1*02': 'DIVMTQSPDSLAVSLGERVT-INCKSSQSLLYSSNNKNYLAWYQQKPGQAPKLLIYWA-------STRESGVP-NRFSGSG--SGSDFTLTISGLQAEDVAVYYCQQY--------------------', 'IGKV5-11*02': 'ETILTQSAAFVSATPGDKVT-ISCRAGQDI------DDDMNWYQQEPGEAPKLIIKDA-------TTLVSGIP-PRFSGSG--YGTDFTLTINNVESEDAAYYFCLQH--------------------', 'IGKV6-47*01': 'DIVMTQSPAFVSVTPGEKVT-ITCQVSEGI------SNYLHWYQQKPDQAPKLFIQYA-------SQSISGVP-SRFTGSG--SGTDFTFTISSLEVEDAATYYCQQG--------------------', 'IGKV6-55*01': 'EIVLTQSPAFRSVTLKEKVT-ITCQASQSI------GSSLHWYQQKPDQSPKLLIKYA-------SQSISGVP-SRFSGSG--SGTDFTLTINSLEAEDAATYYCQQS--------------------'}, 'pig': {'IGKV1-11*01': 'AIQLTQSPASLAASLGDTVSITCRASQSI------NKWLAWYQQQAGKAPKLLIYSA-------STLQSGVP-SRFKGSG--SGTDFTLTISGLQAEDVATYYCQQHH--------------------', 'IGKV1-7*01': 'AIQLTQSPASLAASLGDTVSITCRAHQTI------SSYLAWYQQQPGKPPKLLLCDA-------CTLQSGVP-CGFKGSG--SGTHFTLTISGLQAEDVATYYCQQLN--------------------', 'IGKV1-9*01': 'AIQLTQSPASLAASLGDTVSITCRASQSV------SNNLAWYQQQAGKPPKLLIYWA-------SALQSGVP-SRFKGSV--SGTDFTLTISGLQAEDVATYYCQQLN--------------------', 'IGKV2-10*01': 'AIVLTQTPLSLSVSPGEPASISCRSSQSLVDS-DGDSLLHWYLQKPGQSPQLLIYEA-------TNRASGVP-DRFSGSG--SGTDFTLKISRVEAEDVGVYYCFQAL--------------------', 'IGKV2-12*01': 'AIVLTQTPLSLSVSPGEPASISCRSTQSLRGS-YGKNYLNWYQQKPGQSPKLLIYWA-------TNRASGVP-DRFSGSR--SGTDFTLKIIRLEAEDAGVYSCLQDI--------------------', 'IGKV2-13*02': 'AIVLTQTPLSLSVSPGEPASISCRSSQSLEE--YGSNLLSWYQQKPGQSPQLLIYEA-------TNRASGVP-DRFSGSG--SGTDFTLKISRVEAEDAGVYYCQQFK--------------------', 'IGKV2-6*01': 'AIVLTQSPLSLSVSPGAPASISCRSSQSLES--YSYNFLSWYQQKPGQSPRLLIYFA-------TNKASGVP-DRFSGSG--SGTDFTLKISRVEAEDAGVYYCQQNK--------------------', 'IGKV2-8*01': 'AIVLTQTPLSLSVSPGEPASISCRSSQSLEI--YGSNFLSWYQQKPGQSPQLLIYEA-------TNRASGVP-DRFSGSG--SGTDFTLKISRVEAEDAGVYYCQQHK--------------------'}, 'cow': {'IGKV1-4*01': 'DIQVTQSPSYLSASLGDRVSITCQANQSV------SHYLNWYQQKPGEAPKLLIYYA-------TSRYTRVP-SRFSGSG--SGTDFTLTISSLEADDAANYYCQQDY--------------------', 'IGKV2-15*01': 'DVVLTQTPLSLSVIPGETVSISCKSTQSLKYS-DGKTYLRWVQHKPGQSPQGVIYQV-------SNRNTGVP-DRFTGSG--SETDFTLTISSVQAEDAGVYYCFQGT--------------------', 'IGKV2-18*01': 'DVVLTQTPLSLSVIPGETVSISCKSTQSLKY--SGKTYLRWLQHKPGQSPQSLIYQV-------SNRYTGVP-DRFTGSG--SETDFTLTISSVQAEDAGVYYCVQET--------------------', 'IGKV2-6*01': 'DVVLTQTPLSLSIIPGEMASISCKSSQSLVHS-DGKTYLNWIQYKPGQSPQGLIYQV-------SNRYSGVS-DRFTGSG--SGTDFTLTISRVQAEDAGVYYCYQGT--------------------', 'IGKV2-9*01': 'DVVLTQTPLSLSVIPGETVTISCKSTQSLKYS-DGKTYLQWFQHKPGQSPRLLIYQI-------SNRYTGVP-DRFTGSG--SETDFTLTISSVQAEDAGVYYCLQRS--------------------', 'IGKV8-3*01': 'EAVLYQTPAYIAASLGESISITCRANQSI------SDYLSWYKQKPGQAPMILIYDA-------DNRYNGVP-ERFTATQ--SETEFVFTISQVEADDAAMYYCQQDY--------------------'}}, 'L': {'human': {'IGLV1-36*01': 'QSVLTQPPS-VSEAPRQRVTISCSGSSSNI----GNNAVNWYQQLPGKAPKLLIYYD-------DLLPSGVS-DRFSGSK--SGTSASLAISGLQSEDEADYYCAAWD--------------------', 'IGLV1-40*01': 'QSVLTQPPS-VSGAPGQRVTISCTGSSSNIG---AGYDVHWYQQLPGTAPKLLIYGN-------SNRPSGVP-DRFSGSK--SGTSASLAITGLQAEDEADYYCQSYD--------------------', 'IGLV1-40*02': 'QSVVTQPPS-VSGAPGQRVTISCTGSSSNIG---AGYDVHWYQQLPGTAPKLLIYGN-------SNRPSGVP-DRFSGSK--SGTSASLAITGLQAEDEADYYCQSYD--------------------', 'IGLV1-40*03': 'QSVVTQPPS-VSGAPGQRVTISCTGSSSNIG---AGYDVHWYQQLPGTAPKLLIYGN-------SNRPSGVP-DRFSGSK--SGASASLAITGLQAEDEADYYCQSYD--------------------', 'IGLV1-44*01': 'QSVLTQPPS-ASGTPGQRVTISCSGSSSNI----GSNTVNWYQQLPGTAPKLLIYSN-------NQRPSGVP-DRFSGSK--SGTSASLAISGLQSEDEADYYCAAWD--------------------', 'IGLV1-47*01': 'QSVLTQPPS-ASGTPGQRVTISCSGSSSNI----GSNYVYWYQQLPGTAPKLLIYRN-------NQRPSGVP-DRFSGSK--SGTSASLAISGLRSEDEADYYCAAWD--------------------', 'IGLV1-47*02': 'QSVLTQPPS-ASGTPGQRVTISCSGSSSNI----GSNYVYWYQQLPGTAPKLLIYSN-------NQRPSGVP-DRFSGSK--SGTSASLAISGLRSEDEADYYCAAWD--------------------', 'IGLV1-47*03': 'QSVLTQPPS-ASGTPGQRVTISCSGSSSNI----GSNYVYWYQQLPGTAPKLLIYRN-------NQRPSGVP-DRFSGSK--SGTSASLAISGLWSEDEADYYCAAWD--------------------', 'IGLV1-51*01': 'QSVLTQPPS-VSAAPGQKVTISCSGSSSNI----GNNYVSWYQQLPGTAPKLLIYDN-------NKRPSGIP-DRFSGSK--SGTSATLGITGLQTGDEADYYCGTWD--------------------', 'IGLV1-51*02': 'QSVLTQPPS-VSAAPGQKVTISCSGSSSNI----GNNYVSWYQQLPGTAPKLLIYEN-------NKRPSGIP-DRFSGSK--SGTSATLGITGLQTGDEADYYCGTWD--------------------', 'IGLV10-54*01': 'QAGLTQPPS-VSKGLRQTATLTCTGNSNNV----GNQGAAWLQQHQGHPPKLLSYRN-------NNRPSGIS-ERLSASR--SGNTASLTITGLQPEDEADYYCSAWD--------------------', 'IGLV10-54*02': 'QAGLTQPPS-VSKGLRQTATLTCTGNSNIV----GNQGAAWLQQHQGHPPKLLSYRN-------NNRPSGIS-ERFSASR--SGNTASLTITGLQPEDEADYYCSALD--------------------', 'IGLV2-11*01': 'QSALTQPRS-VSGSPGQSVTISCTGTSSDVG---GYNYVSWYQQHPGKAPKLMIYDV-------SKRPSGVP-DRFSGSK--SGNTASLTISGLQAEDEADYYCCSYA--------------------', 'IGLV2-11*02': 'QSALTQPRS-VSGSPGQSVTISCTGTSSDVG---GYNYVSWYQQHPGKAPKLMIYDV-------SKRPSGVP-DRFSGSK--SGNTASLTISGLQAEDEADYYCCSYA--------------------', 'IGLV2-14*01': 'QSALTQPAS-VSGSPGQSITISCTGTSSDVG---GYNYVSWYQQHPGKAPKLMIYEV-------SNRPSGVS-NRFSGSK--SGNTASLTISGLQAEDEADYYCSSYT--------------------', 'IGLV2-14*02': 'QSALTQPAS-VSGSPGQSITISCTGTSSDVG---SYNLVSWYQQHPGKAPKLMIYEG-------SKRPSGVS-NRFSGSK--SGNTASLTISGLQAEDEADYYCSSYT--------------------', 'IGLV2-14*03': 'QSALTQPAS-VSGSPGQSITISCTGTSSDVG---GYNYVSWYQQHPGKAPKLMIYDV-------SNRPSGVS-NRFSGSK--SGNTASLTISGLQAEDEADYYCSSYT--------------------', 'IGLV2-18*01': 'QSALTQPPS-VSGSPGQSVTISCTGTSSDVG---SYNRVSWYQQPPGTAPKLMIYEV-------SNRPSGVP-DRFSGSK--SGNTASLTISGLQAEDEADYYCSLYT--------------------', 'IGLV2-18*02': 'QSALTQPPS-VSGSPGQSVTISCTGTSSDVG---SYNRVSWYQQPPGTAPKLMIYEV-------SNRPSGVP-DRFSGSK--SGNTASLTISGLQAEDEADYYCSSYT--------------------', 'IGLV2-18*03': 'QSALTQPPS-VSGSPGQSVTISCTGTSSDVG---SYNRVSWYQQPPGTAPKLMIYEV-------SNRPSGVP-DRFSGSK--SGNTASLTTSGLQAEDEADYYCSSYT--------------------', 'IGLV2-18*04': 'QSALTQPPS-VSGSPGQSVTISCTGTSSDVG---SYNRVSWYQQPPGTAPKLMIYEV-------SNRPSGVP-DRSSGSK--SGNTASLTISGLQAEDEADYYCSSYT--------------------', 'IGLV2-23*01': 'QSALTQPAS-VSGSPGQSITISCTGTSSDVG---SYNLVSWYQQHPGKAPKLMIYEG-------SKRPSGVS-NRFSGSK--SGNTASLTISGLQAEDEADYYCCSYA--------------------', 'IGLV2-23*02': 'QSALTQPAS-VSGSPGQSITISCTGTSSDVG---SYNLVSWYQQHPGKAPKLMIYEV-------SKRPSGVS-NRFSGSK--SGNTASLTISGLQAEDEADYYCCSYA--------------------', 'IGLV2-23*03': 'QSALTQPAS-VSGSPGQSITISCTGTSSDVG---SYNLVSWYQQHPGKAPKLMIYEG-------SKRPSGVS-NRFSGSK--SGNTASLTISGLQAEDEADYYCCSYA--------------------', 'IGLV2-8*01': 'QSALTQPPS-ASGSPGQSVTISCTGTSSDVG---GYNYVSWYQQHPGKAPKLMIYEV-------SKRPSGVP-DRFSGSK--SGNTASLTVSGLQAEDEADYYCSSYA--------------------', 'IGLV2-8*02': 'QSALTQPPS-ASRSPGQSVTISCTGTSSDVG---GYNYVSWYQQHPGKAPKLMIYEV-------SKRPSGVP-DRFSGSK--SGNTASLTVSGLQAEDEADYYCSSYA--------------------', 'IGLV3-1*01': 'SYELTQPPS-VSVSPGQTASITCSGDKLG------DKYACWYQQKPGQSPVLVIYQD-------SKRPSGIP-ERFSGSN--SGNTATLTISGTQAMDEADYYCQAWD--------------------', 'IGLV3-10*01': 'SYELTQPPS-VSVSPGQTARITCSGDALP------KKYAYWYQQKSGQAPVLVIYED-------SKRPSGIP-ERFSGSS--SGTMATLTISGAQVEDEADYYCYSTD--------------------', 'IGLV3-10*03': 'SYELTQPPS-VSVSPGQTARITCSGDALP------KKYAYWYQQKSGQAPVLVIYED-------SKRPSGIP-ERFSGSS--SGTMATLTISGAQVEDEDDYYCYSTD--------------------', 'IGLV3-12*02': 'SYELTQPHS-VSVATAQMARITCGGNNIG------SKAVHWYQQKPGQDPVLVIYSD-------SNRPSGIP-ERFSGSN--PGNTATLTISRIEAGDEADYYCQVWD--------------------', 'IGLV3-16*01': 'SYELTQPPS-VSVSLGQMARITCSGEALP------KKYAYWYQQKPGQFPVLVIYKD-------SERPSGIP-ERFSGSS--SGTIVTLTISGVQAEDEADYYCLSAD--------------------', 'IGLV3-19*01': 'SSELTQDPA-VSVALGQTVRITCQGDSLR------SYYASWYQQKPGQAPVLVIYGK-------NNRPSGIP-DRFSGSS--SGNTASLTITGAQAEDEADYYCNSRD--------------------', 'IGLV3-19*02': 'SSELTQDPA-VSVALGQTVRITCQGDSLR------SYYASWYQQKPGQAPVRVIYGK-------NNRPSGIP-DRFSGSS--SGNTASLTITGAQAEDEADYYCNSWD--------------------', 'IGLV3-21*01': 'SYVLTQPPS-VSVAPGKTARITCGGNNIG------SKSVHWYQQKPGQAPVLVIYYD-------SDRPSGIP-ERFSGSN--SGNTATLTISRVEAGDEADYYCQVWD--------------------', 'IGLV3-21*02': 'SYVLTQPPS-VSVAPGQTARITCGGNNIG------SKSVHWYQQKPGQAPVLVVYDD-------SDRPSGIP-ERFSGSN--SGNTATLTISRVEAGDEADYYCQVWD--------------------', 'IGLV3-21*03': 'SYVLTQPPS-VSVAPGKTARITCGGNNIG------SKSVHWYQQKPGQAPVLVVYDD-------SDRPSGIP-ERFSGSN--SGNTATLTISRVEAGDEADYYCQVWD--------------------', 'IGLV3-21*04': 'SYVLTQPPS-VSVAPGKTARITCGGNNIG------SKSVHWYQQKPGQAPVLVIYYD-------SDRPSGIP-ERFSGSN--SGNTATLTISRVEAGDEADYYCQVWD--------------------', 'IGLV3-22*01': 'SYELTQLPS-VSVSPGQTARITCSGDVLG------ENYADWYQQKPGQAPELVIYED-------SERYPGIP-ERFSGST--SGNTTTLTISRVLTEDEADYYCLSGD--------------------', 'IGLV3-22*03': 'SYELTQLPS-VSLSPGQKARITCSGDVLG------KNYADWYQQKPGQAPELVIYED-------SERYPGIP-ERFSGST--SGNTTTLTISRVLTEDEADYYCLSGN--------------------', 'IGLV3-25*01': 'SYELMQPPS-VSVSPGQTARITCSGDALP------KQYAYWYQQKPGQAPVLVIYKD-------SERPSGIP-ERFSGSS--SGTTVTLTISGVQAEDEADYYCQSAD--------------------', 'IGLV3-25*02': 'SYELTQPPS-VSVSPGQTARITCSGDALP------KQYAYWYQQKPGQAPVLVIYKD-------SERPSGIP-ERFSGSS--SGTTVTLTISGVQAEDEADYYCQSAD--------------------', 'IGLV3-25*03': 'SYELTQPPS-VSVSPGQTARITCSGDALP------KQYAYWYQQKPGQAPVLVIYKD-------SERPSGIP-ERFSGSS--SGTTVTLTISGVQAEDEADYYCQSAD--------------------', 'IGLV3-27*01': 'SYELTQPSS-VSVSPGQTARITCSGDVLA------KKYARWFQQKPGQAPVLVIYKD-------SERPSGIP-ERFSGSS--SGTTVTLTISGAQVEDEADYYCYSAA--------------------', 'IGLV3-9*01': 'SYELTQPLS-VSVALGQTARITCGGNNIG------SKNVHWYQQKPGQAPVLVIYRD-------SNRPSGIP-ERFSGSN--SGNTATLTISRAQAGDEADYYCQVWD--------------------', 'IGLV3-9*02': 'SYELTQPLS-VSVALGQAARITCGGNNLG------YKSVHWYQQKPGQAPVLVIYRD-------NNRPSGIP-ERFSGSN--SGNTATLTISRAQAGDEADYYCQVWD--------------------', 'IGLV4-60*01': 'QPVLTQSSS-ASASLGSSVKLTCTLSSGHS-----SYIIAWHQQQPGKAPRYLMKLEGS---GSYNKGSGVP-DRFSGSS--SGADRYLTISNLQLEDEADYYCETWD--------------------', 'IGLV4-60*02': 'QPVLTQSSS-ASASLGSSVKLTCTLSSGHS-----SYIIAWHQQQPGKAPRYLMKLEGS---GSYNKGSGVP-DRFSGSS--SGADRYLTISNLQFEDEADYYCETWD--------------------', 'IGLV4-60*03': 'QPVLTQSSS-ASASLGSSVKLTCTLSSGHS-----SYIIAWHQQQPGKAPRYLMKLEGS---GSYNKGSGVP-DRFSGSS--SGADRYLTISNLQSEDEADYYCETWD--------------------', 'IGLV4-69*01': 'QLVLTQSPS-ASASLGASVKLTCTLSSGHS-----SYAIAWHQQQPEKGPRYLMKLNSD---GSHSKGDGIP-DRFSGSS--SGAERYLTISSLQSEDEADYYCQTWG--------------------', 'IGLV4-69*02': 'QLVLTQSPS-ASASLGASVKLTCTLSSGHS-----SYAIAWHQQQPEKGPRYLMKLNSD---GSHSKGDGIP-DRFSGSS--SGAERYLTISSLQSEDEADYYCQTWG--------------------', 'IGLV5-37*01': 'QPVLTQPPS-SSASPGESARLTCTLPSDINV---GSYNIYWYQQKPGSPPRYLLYYYSD---SDKGQGSGVP-SRFSGSKDASANTGILLISGLQSEDEADYYCMIWP--------------------', 'IGLV5-39*01': 'QPVLTQPTS-LSASPGASARFTCTLRSGINV---GTYRIYWYQQKPGSLPRYLLRYKSD---SDKQQGSGVP-SRFSGSKDASTNAGLLLISGLQSEDEADYYCAIWY--------------------', 'IGLV5-39*02': 'QPVLTQPTS-LSASPGASARFTCTLRSGINV---GTYRIYWYQQNPGSLPRYLLRYKSD---SDKQQGSGVP-SRFSGSKDASTNAGLLLISGLQSEDEADYYCAIWY--------------------', 'IGLV5-45*01': 'QAVLTQPAS-LSASPGASASLTCTLRSGINV---GTYRIYWYQQKPGSPPQYLLRYKSD---SDKQQGSGVP-SRFSGSKDASANAGILLISGLQSEDEADYYCMIWH--------------------', 'IGLV5-45*02': 'QAVLTQPSS-LSASPGASASLTCTLRSGINV---GTYRIYWYQQKPGSPPQYLLRYKSD---SDKQQGSGVP-SRFSGSKDASANAGILLISGLQSEDEADYYCMIWH--------------------', 'IGLV5-45*03': 'QAVLTQPSS-LSASPGASASLTCTLRSGINV---GTYRIYWYQQKPGSPPQYLLRYKSD---SDKQQGSGVP-SRFSGSKDASANAGILLISGLQSEDEADYYCMIWH--------------------', 'IGLV5-45*04': 'QAVLTQPSS-LSASPGASASLTCTLCSGINV---GTYRIYWYQQKPGSPPQYLLRYKSD---SDKQQGSGVP-SRFSGSKDASANAGILLISGLQSEDEADYYCMIWH--------------------', 'IGLV5-52*01': 'QPVLTQPSS-HSASSGASVRLTCMLSSGFSV---GDFWIRWYQQKPGNPPRYLLYYHSD---SNKGQGSGVP-SRFSGSNDASANAGILRISGLQPEDEADYYCGTWH--------------------', 'IGLV6-57*01': 'NFMLTQPHS-VSESPGKTVTISCTRSSGSI----ASNYVQWYQQRPGSSPTTVIYED-------NQRPSGVP-DRFSGSIDSSSNSASLTISGLKTEDEADYYCQSYD--------------------', 'IGLV6-57*02': 'NFMLTQPHS-VSESPGKTVTISCTGSSGSI----ASNYVQWYQQRPGSAPTTVIYED-------NQRPSGVP-DRFSGSIDSSSNSASLTISGLKTEDEADYYCQSYD--------------------', 'IGLV6-57*03': 'NFMLTQPHS-VSESPGKTVTISCTRSSGSI----ASNYVQWYQQRPGSAPTTVIYED-------NQRPSGVP-DRFSGSIDSSSNSASLTISGLKTEDEADYYCQSYD--------------------', 'IGLV7-43*01': 'QTVVTQEPS-LTVSPGGTVTLTCASSTGAVT---SGYYPNWFQQKPGQAPRALIYST-------SNKHSWTP-ARFSGSL--LGGKAALTLSGVQPEDEAEYYCLLYY--------------------', 'IGLV7-46*01': 'QAVVTQEPS-LTVSPGGTVTLTCGSSTGAVT---SGHYPYWFQQKPGQAPRTLIYDT-------SNKHSWTP-ARFSGSL--LGGKAALTLSGAQPEDEAEYYCLLSY--------------------', 'IGLV7-46*02': 'QAVVTQEPS-LTVSPGGTVTLTCGSSTGAVT---SGHYPYWFQQKPGQAPRTLIYDT-------SNKHSWTP-ARFSGSL--LGGKAALTLLGAQPEDEAEYYCLLSY--------------------', 'IGLV8-61*01': 'QTVVTQEPS-FSVSPGGTVTLTCGLSSGSVS---TSYYPSWYQQTPGQAPRTLIYST-------NTRSSGVP-DRFSGSI--LGNKAALTITGAQADDESDYYCVLYM--------------------', 'IGLV8-61*02': 'QTVVTQEPS-FSVSPGGTVTLTCGLSSGSVS---TSYYPSWYQQTPGQAPRTLIYST-------NTRSSGVP-DCFSGSI--LGNKAALTITGAQADDESDYYCVLYM--------------------'}, 'mouse': {'IGLV1*01': 'QAVVTQESA-LTTSPGETVTLTCRSSTGAVT---TSNYANWVQEKPDHLFTGLIGGT-------NNRAPGVP-ARFSGSL--IGDKAALTITGAQTEDEAIYFCALWY--------------------', 'IGLV1*02': 'QAVVTQESA-LTTSPGETVTLTCRSSTGAAT---TSNYANWVQEKPDHLFTGLIGGT-------NNRAPGVP-ARFSGSL--IGDKAALTITGAQTEDEAIYFCALWY--------------------', 'IGLV2*01': 'QAVVTQESA-LTTSPGGTVILTCRSSTGAVT---TSNYAIWVQEKTDHLFAGVIGDT-------SNRAPGVP-ARFSGSL--IGDKAALTITGAQTEDDAMYFCALWY--------------------', 'IGLV2*02': 'QAVVTQESA-LTTSPGGTVILTCRSSTGAVT---TSNYANWVQEKPDHLFTGLIGGT-------SNRAPGVP-VRFSGSL--IGDKAALTITGAQTEDDAMYFCALWY--------------------', 'IGLV3*01': 'QPVLTQSSS-ASFSLGASAKLTCTLSSEHS-----TYIIEWYQQQPLKPPKYVMQLKKD---GSHSKGDGIP-DRFSGSS--SGADRYLSISNIQPEDEAIYICGVDD--------------------'}, 'rat': {'IGLV1S1*01': 'QAVVTQESA-LTTLPGGTVTLTCHSSTGAVT---TSNYANWIQEKADHLFTGIVGDT-------SNRAPGAP-ARFSGSL--LEGKAALTITGAQIEDEATYFCSLWY--------------------', 'IGLV2S1*01': 'QFTLTQPKS-VSGSLRSTITIPCERSSGDI----GDSYVSWYQQHLGRPPINVIYAD-------DQRPSEVS-DRFSGSIDSSSNSASLTITNLQMDDEADYFCQSYD--------------------', 'IGLV3S1*01': 'QAVLTQPNS-VSTSLGSTVKLSCTLSSGNI----ENNYVHWYQQYEGRSPTTMIYND-------DKRPDGVP-DRFSGSIDSSSNSAFLTINNVEIEDEAIYFCHSYV--------------------', 'IGLV3S2*01': 'QFVLTQPNS-VSTNLGSTVKLSCKRSTGNI----GSNYVNWYQQHEGRSPTTMIYRD-------DKRPDGVP-DRFSGSIDRSSNSALLTINNVQTEDEADYFCQSYS--------------------', 'IGLV3S3*01': 'QFVLTQSNS-MSTSLGSTVKLSCKRSTGNI----GSSYVYWYQQHEGRSPTTMIYDD-------DKRPDGVP-DRFSGSIDSSSNSAFLTINNVQIEDEAIYFCQSYS--------------------', 'IGLV4S1*01': 'SYELIQPPS-ASVTLGNTVSLTCVGDELS------KRYAQWYQQKPDKTIVSVIYKD-------SERPSGIS-DRFSGS--SSGTTATLTIHGTLAEDEADYYCLSTY--------------------'}, 'rabbit': {'IGLV2S1*01': 'QPALTQPSS-AFGALGGSVTISCTGTSDDVG---YTNAVYWYRQLPGMSPTLLIYYD-------SKRPSGIP-ERFSGSK--SGNTASLTISWLQPEDEAAYYCSSYR--------------------', 'IGLV2S2*01': 'QPALTQPSS-VSGALGGSVTITCAGSNSDIG---YNSLISWYQQLPGSVPKLLMFRV-------DRLASGIP-ERFSGSK--SGTTASLTISGLQPEDEADYYCVSYT--------------------', 'IGLV3S2*01': 'SYELTQLPS-VSVSLGQTARITCGGNSIG------SKAVHWYQQKPGLAPGLLIYND-------DERPSGVP-DRFSGSN--SGDTATLTISGAQAGDEADYYCQLWD--------------------', 'IGLV3S6*01': 'SHELTKLPS-VSVSLGQTARITCGGDSIE------EYSVHWYQKRPGQAPVLLIYRD-------SNRLSGIP-DHFSGSN--SGNTATLTISGAQAGDEADYYCQVWD--------------------', 'IGLV3S9*01': 'SYELTQLPS-VSVSLGQTARITCGGDSIE------SYAVSWYQQKPGLAPVLLIYRD-------SKWPSGIP-DRFSGSN--SGNTATLTISRAQAGDEADYYCQVFN--------------------', 'IGLV4S3*01': 'QPVLTQSPS-ASAALGSSAKLTCTLSSAHK-----TYYIEWYQQQQGEAPRYLMQLKSD---GSYTKGTGVP-DRFSGSS--SGADRYLIISSVQAEDEADYICGVTG--------------------', 'IGLV4S4*01': 'QPVLTQSPS-VSAALGASAKLTCTLSSAHK-----TYTIDWYQQQQGEAPRYLMQLKSD---GSYTKGTGVP-DRFSGSS--SGADRYLIIPSVQADDEADYYCGADY--------------------', 'IGLV5S1*01': 'QPVLTQPPS-LSASLGTTARLTCTLSTGYSV---GSLGVLWLQQVPGRPPRYLLTYHTE---EFKHQGSGVP-TRFSGSKDTSENSFVLSISGLQPEDEADYYCFTAH--------------------', 'IGLV5S10*01': 'QPVLTQPPS-LSASLDTTARLTCTLSTGYSV---GEYPLVWLQQVPGRPPRYLLGYHTD---DIKHQGSGVP-SRFSGSKDDSANAGVLSISGLQPEDEADYYCAVG---------------------', 'IGLV5S2*01': 'QPVLTQPPS-LSASLGTTARLTCTLSTGYSV---GEYPLVWLQQVPGRPPRYLLGYHTD---DIKHQGSGVH-SRFSGSKDTSENAGVLSISGLQPEDEADYYCATAH--------------------', 'IGLV5S3*01': 'QPVLTQPPS-LSASLGTTARLTCTLSTGYSV---GKYPLVWLQQVPGRPPRYLLTYHTE---EFKHQGSGVH-SRFSGSKDTSENAGVLSISGLQPEDEADYYCVTAH--------------------', 'IGLV5S5*01': 'QPVLTQPPS-LSASLDTTARLTCTLSTGYSV---GSLGVLWLQQVPGRPPRYLLAYHTD---DMKHQGSGVP-SRFSGSKDTSENSFVLSISGLQPEDEADYYCATAC--------------------', 'IGLV5S6*01': 'QPVLTQPPS-LSASLGTTARLTCTLSTGYSV---GSLGVLWLQQVPGRAPRYLLSYNTD---EEKHQGSGVP-TRFSGSKDTSENSFVLSISGLQPEDEADYYCATAH--------------------', 'IGLV5S9*01': 'QPVLTQPPS-LSASLDTTARLTCTLSTGYSV---GSYVIGWYQQVPGRPPRYLLTYHTE---EIKHQGSGVH-SRFSGSKDDSANAGVLSISGLQPEDEADYYCATAH--------------------', 'IGLV6S1*01': 'SVVFTQPQA-VSGSLGETVSISCTRSSGNI----GANYVYWYQQHQGHAPSQLIYQY-------DKRPSGVP-DWFSDSKDSASNSASLTIAGLQPEDEADYYCLSGY--------------------', 'IGLV6S3*01': 'QFVLTQPQS-VSGSLGQTVSISCNRDSGNI----EDYYVHWYQQHPGKAPTTVIYND-------DQRPSGVP-DRFSGSIDSTSNSASLTITGLLAEDEADYYCLSSD--------------------', 'IGLV6S5*01': 'QFVLTQPQS-VSGSLGQTVSISCNRDSGNI----EDYYVHWYQQHPGKAPTTVIYND-------DQRPSGVP-DRFSGSIDSTSNSASLTIAGLQAEDEADYHCQSYD--------------------', 'IGLV6S6*01': 'QFVLNQPQS-VSGSLGQTVSISCNRDSGNI----EEKYVHWYQQHPGKAPTTVIYSD-------DQRPSGVP-DRFSGSINSASNSASLTITGLLAEDEADYHCQSYD--------------------', 'IGLV6S7*01': 'QFVLTQPQS-VSGSLGQTVSISCNRDSGNI----EDYYVHWYQQHPGKAPTTVIYND-------DQRPSGVP-DRFSGSIDSTSNSASLTITGLLAEDEADYYCLSSD--------------------'}, 'rhesus': {'IGLV1-60*01': 'QSVLTQPPS-ASEAARKSVTISCSGSSSNI----GSNSVSWYQQLPGTAPKLLIYYN-------DQRASGVS-DRFSGSK--SGTSASLAISGLQTEDEADYYCAAWD--------------------', 'IGLV1-64*01': 'QSVLTQPPS-VSGAPGQRVTISCTGSSSNI----GGYYVSWYQQLPGTTPKLLIYQD-------NKRPSGVS-DRFSGSK--SGTSASLTITGLQTEDEADYYCLSYD--------------------', 'IGLV1-64*02': 'QSVLTQPPS-VSGAPGQRVTISCTGSSSNI----GGYYVSWYQQLPGTTPKLLIYQD-------NKRPSGVS-DRFSGSK--SGTSASLTITGLQTEDEADYYCLSYD--------------------', 'IGLV1-65*01': 'QSVLTQPPS-VSGDPGQRVTISCTGSSSNI----GGYYVYWYQQFPGTAPKLLIYDN-------NKRPSGVS-DRFSGSK--SGTSASLTITGLQPGDEADYYCGAWD--------------------', 'IGLV1-66*01': 'QSVLTQPPS-VSGDPGQRVTISCTGSSSNI----GGYDVYWYQQLPGTAPKLLIYEN-------NKRPSGVS-DRFSGSK--SGTSASLTITGLQSEDEAEYYCETWD--------------------', 'IGLV1-67*01': 'QSVLTQPPS-VSAAPGQRVTISCSGSSSNI----GRSYVSWYQQVPGTAPKLLIYQD-------NKRPSGVS-DRFSGSK--SGTSASLAITGLQTGDEADYYCSAWD--------------------', 'IGLV1-67*02': 'QSVLTQPPS-VSAAPGQKVTISCSGSSSNI----GRSYVSWYQQVPGTAPKLLIYQD-------NKRPSGVS-DRFSGSK--SGTSASLAITGLQTGDEADYYCSAWD--------------------', 'IGLV1-72*01': 'QSVLTQPPS-ASGAPGQSVTISCSGSSSNI----GSNYVYWYQQLSGKAPKLLIYNN-------NQRPSGVP-DRFSGSK--SGTSASLAISGLQSKDEADYYCSAWD--------------------', 'IGLV1-72*02': 'QSVLTQPPS-ASGAPGQSVTISCSGSSSNI----GSNYVYWYQQLSGKAPKLLIYNN-------NQRPSGVP-DRFSGSK--SGTSASLAISGLQSEDEADYYCAAWD--------------------', 'IGLV1-77*01': 'QSVLTQPPS-ASGAPGQSVTISCSGSSSNI----RGNGVHWYQQLSGTAPKLLIYNN-------NQRPSGVP-DRFSGSK--SGTSASLAITGLRSEDEVDYYCEAWD--------------------', 'IGLV1-77*02': 'QSVLTQPPS-ASGAPGQSVTISCSGSSSNI----RGNGVHWYQQLSGMAPKLLIYNN-------NQRPSGVP-DRFSGSK--SGTSASLAITGLQSEDEADYYCEAWD--------------------', 'IGLV1-81*01': 'QSVLTQPPS-ASGAPGQSVTISCSGSSSNI----GSNYVYWYQQLPGTAPKLLIYYS-------NQRPSGVP-DRFSGSK--SGTSASLAITGLRSEDEADYYCAAWD--------------------', 'IGLV1-85*01': 'QSVLTQPPS-VSGAPGQRVTISCTGSSSNI----GGYYVQWYQQLPGTAPKLLIYEN-------NKRPSGVS-DRFSGSQ--SGTSASLTITGLQSEDEADYYCQSYD--------------------', 'IGLV1-86*01': 'QSVLTQPPS-VSAAPGQRVTISCSGSSFNF----RRYYVSWYQQLPGAAPKLLIYDV-------NKRPSGVS-DRFSGSQ--SGTSATLGISGLRPEDEADYYCSAWD--------------------', 'IGLV1-86*02': 'QSVLTQPPS-VSAAPGQRVTISCSGSSFNF----RRYYVSWYQQLPGTAPKLLIYDV-------NKQPSGVS-DRFSGSQ--SGTSATLGISGLRPEDEADYYCSAWD--------------------', 'IGLV10-114*01': 'QAGLTQPPS-VSKGLRQTATLTCTGNSNNV----GNQGAAWLQQHQGHPPKLLSYRN-------NNRPSGIS-ERFSASR--SGNTASLTITGLQPEDEADYYCSAWD--------------------', 'IGLV11-117*01': 'QPVLTQPPS-LSASPGASARLPCTLSSDLSV---GSKNMYWYQQKPGSAPRLFLYYYSD---SDKQLGPGVP-NRVSGSKETSSNTAFLLISGLQPEDEADYYCQVYD--------------------', 'IGLV1S1*01': 'QSVLTQPPS-VSGAPGQRVTISCTGSSSNIG---AGYYVQWYQQLPGTAPKLLIYEN-------NKRPSGVS-DRFSGSK--SGTSASLTITGLQSEDEADYYCQSYD--------------------', 'IGLV1S2*01': 'QSVLTQPPS-VSGAPGQRVTISCTGSSSNIG---AGYGVQWYQQLPGTAPKLLIYEN-------NKRPSGVS-DRFSGSQ--SGTSASLTITGLQSEDEADYYCLSYD--------------------', 'IGLV1S4*01': 'QSVLTQPPS-ASGAPGQSVTISCSGSSSNI----GGNNVYWYQQLPGTAPKLLIYYS-------NQRPSGVP-DRFSGSK--SGTSASLAITGLRSEDEADYYCAAWD--------------------', 'IGLV1S6*01': 'QSVLTQPPS-VSAAPGQKVTISCSGSSSNI----GRSYVSWYQQVPGTAPKLLIYDN-------NKRPSGVS-DRFSGSK--SGTSASLAITGLQTGDEADYYCGAWD--------------------', 'IGLV2-11*01': 'QSAPIQSPS-VSGSLGQSVTISCTGTSSDIG---RYNYVSWYRQQPGTTTKLMMYKV-------NMRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCSSYE--------------------', 'IGLV2-11*03': 'QSAPIQSPS-VSGSLGQSVTISCTGTSSDIG---RYNYVSWYRQQPGTTTKLMMYKV-------NMRPSGVS-DRFSGSK--SGNTASLTISGLQAEDKADYYCSSYE--------------------', 'IGLV2-13*01': 'QAALTQSPS-VSGSPGQSVTISCTGTSSDIG---GYNRVSWYQQHPGKAPKLMIYEV-------SKRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCSSYA--------------------', 'IGLV2-13*02': 'QAALTQSPS-VSGSPGQSVTISCTGTSSDIG---GYNRVSWYQQHPGKAPKLMIYEV-------SKRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCSSYA--------------------', 'IGLV2-13*03': 'QAAPTQSPS-VSGSPGQSVTISCTGTSSDIG---GYNRVSWYQQHPGKAPKLMIYEV-------SKRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCSSYA--------------------', 'IGLV2-19*01': 'QAAPTQPPS-VSGSPGQSVTISCTGTSSDIG---YYNAVSWYQQHPGKAPKLMIYEV-------SKRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCSSYA--------------------', 'IGLV2-19*02': 'QAAPTQSPS-VSGSAGQSVTISCTGTSSDIG---YYNAVSWYQQHPGKAPKLMIYEV-------SKRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCSSYA--------------------', 'IGLV2-19*03': 'QAAPTQPPS-VSGSPGQSVTISCTGTSSDIG---YYNAVSWYQQHPGKAPKLMIYEV-------SKRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCSSYA--------------------', 'IGLV2-23*01': 'QAALTQPPS-VSGSPGQSVTISCTGTSSDIG---GYNYVSWYQQHPGKAPKLMIYDV-------SKRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCSSYA--------------------', 'IGLV2-23*02': 'QAALTQPPS-MSGSPGQSVTISCTGTSSDIG---GYNRVSWYQQHPGKAPKLMIYEV-------SKRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCSSYA--------------------', 'IGLV2-23*03': 'QAAPTQPPS-VSGSPGQSVTISCTGTSSDIG---GYNYVSWYQQHPGKAPKLMIYDV-------SKRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCSSYA--------------------', 'IGLV2-26*01': 'QAALTQPPS-VSKSLGQSVTISCAGTSSGIA---SYSDISWYQQHPGTDPRLLIYRV-------SNRPSGVS-DRFSGFK--SGSTTSLTISGLQAEDEAIYYCCSYR--------------------', 'IGLV2-26*02': 'QAALTQPPS-VSKSLGQSVTISCAGTSSGIA---SYSDVSWYQQHPGTAPRLLIYRV-------SNRPSGVS-DRFSGFK--SGSTASLTISGLQAEDEAIYYCCSYR--------------------', 'IGLV2-32*01': 'QAALTQPRS-VSGSPGQSVTISCTGTSSDIG---GYNYVSWYQQHPGTAPKLMIYAV-------SKRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCCSYA--------------------', 'IGLV2-32*02': 'QAALTQPRS-VSGSPGQSVTISCTGTSSDIG---GYNYVSWYQQHPGTAPKLMIYEV-------SKRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCSSYA--------------------', 'IGLV2-38*01': 'QSALTQPPS-VSKSLGQSVTISCTGTSSDIG---GYNGVSWYQQHSGTAPRLLIYEV-------SKRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCGSYR--------------------', 'IGLV2-46*01': 'QSAPTQPPS-VSGSPGQSVTISCTGTSSDIG---YYNAVSWYQQHPGTAPKLMIYGV-------SNRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCCSYT--------------------', 'IGLV2S4*01': 'QAAPTQSPS-VSGSPGQSVTISCTGTSSDIG---GYNRVSWYQQHPGKAPKLMIYEV-------SKRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCSSYA--------------------', 'IGLV2S7*01': 'QSAPTQPPS-VSGSPGQSVTISCTGTSSDVG---GYNYVSWYQQHPGKAPKLMIYGV-------SNRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCCSYT--------------------', 'IGLV2S9*01': 'QAALTQPPS-VSKSLGQSVTISCTGTSNDVG---GYNDVSWYQQHPGTAPRLLIYDV-------SKRPSGVS-DRFSGSK--SGNTASLTISGLQAEDEADYYCCSYR--------------------', 'IGLV3-14*01': 'SYELTQPLS-VSVALGQMARITCGGNNIG------RKYVYWYQQKPDQAPVLVIYED-------SKRPSGIP-ERFAGSN--SGNTATLTIDGAQARDEADYYCQVWD--------------------', 'IGLV3-16*02': 'SYELTQPPS-VSVSPGQTARITCSGDALP------KKYAYWFQQKPGQSPVLIIYED-------SKRPSGIP-ERFSGSS--SGTVATLTISGAQVEDEADYYCYSTD--------------------', 'IGLV3-22*01': 'SYELTQPPS-VSVSPGQTARITCSGEILA------KKYAQWFQQKPGQAPVLVIYKD-------SERPSGIP-ERFSSSS--SGTTVTLTISGAQAEDEADYYCQSAD--------------------', 'IGLV3-25*01': 'SYELTQPPS-VSAASGQTARITCGGDNIG------SKYVHWYQQKPAQAPVQVIYAD-------SKRPSGIP-ERFSGSN--SGNTATLTISGVEAGDEADYYCQVWD--------------------', 'IGLV3-25*02': 'SYELTQPPS-VSAASGQTARITCGGDNIG------SKNVHWYQQKPAQAPVLVIYAD-------SKRPSGIP-ERFSGSN--SGNTATLTISRVEAGDEADYYCQVWD--------------------', 'IGLV3-27*01': 'SSELTQDPA-VSVALGQTVRITCQGDSLR------SYYASWYQQKPGQAPVLVVYGN-------NNRPSGIP-ERFSGSS--SGNTASLTITGAQVEDEADYYCDSWD--------------------', 'IGLV3-27*02': 'SSELTQDPA-VSVALGQTVRITCQGDSLR------SYYASWYQQKPGQAPVLVVYGN-------NNRPSGIP-ERFSGSS--SGNTASLTITGAQVEDEADYYCDSWD--------------------', 'IGLV3-29*01': 'SYDVTQPRS-VSVSPGQTARITCGGDNIG------SKVVHWYQQKPAQAPVLVIYRD-------SKRPSGIP-ERFSGSN--SGNTATLTISGVEAGDEADYYCQVWD--------------------', 'IGLV3-30*01': 'SYELTQPPL-VSVSPGQTARITCSGDVLK------ENYADWYQQKPGQAPVLLIHED-------SKRPSGIP-ERFSGST--SGDTTTLTISSTLSEDEADYSCFSGN--------------------', 'IGLV3-33*01': 'SSELTQEPA-LSVALGHTVRMTCQGDSLK------TYYASWYQQKPGQVPVLVIYGN-------NYRPSGIP-GRFSVSW--SGNTGSLTITAAQVEDEADYYCNSWD--------------------', 'IGLV3-33*02': 'SSELTQEPA-LSVALGHTVRMTCQGDSLK------TYYASWYQQKPGQVPVLVIYGN-------NYRPSGIP-GRFSVSW--SGNTGSLTITAAQVEDEADYYCNSWD--------------------', 'IGLV3-33*03': 'SSELTQEPA-LSVALGHTVSMTCQGDSLK------TYYASWYQQKPGQVPVLVIYGN-------NYRPSGIP-GRFSVSW--SGNTGSLTITAAQVEDEADYYCNSWD--------------------', 'IGLV3-33*04': 'SSELTQEPA-LSVALGHTVSMTCQGDSLK------TYYASWYQQKPGQVPVLVIYGN-------NYRPSGIP-GRFSVSW--SGNTGSLTITAAQVEDEADYYCNSWD--------------------', 'IGLV3-33*05': 'SSELTQEPA-LSVALGHTVSMTCQGDSLK------TYYASWYQQKPGQVPVLVIYGN-------NYRPSGIP-GRFSVSW--SGNRGSLTITAAQVEDEADYYCNSWD--------------------', 'IGLV3-33*06': 'SSELTQEPA-LSVALGHTVSMTCQGDSLK------TYYASWYQQKPGQVPVLVIYGN-------NYRPSGIP-GRFSVSW--SGNTGSLTITAAQVEDEADYYCNSWD--------------------', 'IGLV3-33*07': 'SSELTQEPA-LSVALGHTVSMTCQGDSLK------TYYASWYQQKPGQVPVLVIYGN-------NYRPSGIP-GRFSVSW--SGNTGSLTITAAQVEDEADYYCNSWD--------------------', 'IGLV3-33*08': 'SSELTQEPA-LSVALGHTVSMTCQGDSLK------TYYASWYQQKPGQVPVLVIYGN-------NYRPSGIP-GRFSVSW--SGNTGSLTITAAQVEDEADYYCNSWD--------------------', 'IGLV3-34*01': 'SYELTQPRS-VSVSPGQTARITCGGDNIG------SKSVQWYQQKPPQAPVLVIYAD-------SERPSGIP-ERFSGSN--SGNTATLTISGVEAGDEADYYCQVWD--------------------', 'IGLV3-36*01': 'SYELTQPPS-VSVSPGQMARITCGGDNLG------SKYVHWYQQKPAQAPVLVIYYD-------SDRPSGIP-ERFSGSK--SGNTATLTISGVEAGDEADYYCQVWD--------------------', 'IGLV3-36*02': 'SYELTQPPS-VSVSPGQTARITCGGDNLG------SKYVHWYQQKPAQAPVLVIYYD-------SDRPSGIP-ERFSGSK--SGNTATLTISGVEAGDEADYYCQVWD--------------------', 'IGLV3-39*01': 'SSELTQEPA-LSVALGHTVRMTCQGDSLK------TYYASWYQQKPGQVPVLVVYGN-------NYRLSGIP-GRFSVSW--SGNTGSLTITAAQVEDEADYYCNSWD--------------------', 'IGLV3-40*01': 'SYELTQPRS-VSVSPGQTARITCGGDNIG------SKSVQWYQQKPPQAPVLVIYAD-------SERPSGIP-ERFSGSN--SGNTATLTISGVEAGDEADYYCQVWD--------------------', 'IGLV3-43*01': 'SSGLTQEPA-LSVALGHTVRMTCQGDSLK------TYYASWYQQKPGQVPVLVIYGN-------NYRPSGIP-GRFSVSW--SGNTGSLTITAAQVEDEADYYCNSWD--------------------', 'IGLV3-44*01': 'SYDLTQPPS-VSVSPGQTARITCGGDNIG------SEAVHWYQQKPPQAPVQVIYSD-------SERPSGIP-ERFSGSK--SGNTATLTISGVEAGDEADYYCQVWD--------------------', 'IGLV3-48*01': 'SYELTQPPS-VSVSPGQTARITCSGDALP------KNYAYWYQQKPGQVPVLVIYKD-------SERPSGIP-ERFSGSS--SGTTVTLTVSGAQAEDEADYYCYSGD--------------------', 'IGLV3S10*01': 'SSGLTQEPA-LSVALGHTVRMTCQGDSLK------TYYASWYQQKPGQVPVLVIYGN-------TNRPSGIP-GRFSGSW--SGNTGSLTITGAQVEDEADYYCGSWD--------------------', 'IGLV3S11*01': 'SSGLTQEPA-LSVALGHTVRMTCQGDSLK------TYYASWYQQKPGQVPVLVIYGN-------TNRPSGIP-GRFSGSW--SGNTGSLTITGAQVEDEADYYCGSWD--------------------', 'IGLV3S12*01': 'SSELTQKPA-LSVALGHTVRMTCQGDSLK------TYYASWYQQKPGQVPVLVIYGN-------TNRPSGIP-GRFSVSW--SGNTASLTITGAQVEDEADYYCGSWD--------------------', 'IGLV3S13*01': 'SSELTQDPA-VSVALGQTVRITCQGDSLR------SYYASWYQQKPGQVPVLVIYGN-------NYRPSGIP-GRFSVSW--SGNTGSLTITGAQVEDEADYYCGSWD--------------------', 'IGLV3S14*01': 'SYELTQPPS-VSVSPGQTAKITCSGEILA------KKYARWFQQKPGQAPVLVIYKD-------SERPSGIP-ERFSGSS--SGTTVTLTVSGAQAEDEADYYCYSGD--------------------', 'IGLV3S15*01': 'SYELTQPPS-VSVSPGQTAKITCSGEILA------KKYARWFQQKPGQAPVLVIYKD-------SERPSGIP-ERFSGSS--SGTTVTLTVSGAQAEDEADYYCYSGD--------------------', 'IGLV3S16*01': 'SYELTQPPS-VSVSLGQTAKITCSGDVLA------KYYAHWFQQKPGQAPVLVIYKD-------SERPSGIP-ERFSGSS--SGTTVTLTISGAQAEDEADYYCYSGD--------------------', 'IGLV3S3*01': 'SSELTQEPA-LSVALGHTVSMTCQGDSLK------TYYASWYQQKPGQVPVLVIYGN-------NYRPSGIS-ERFSGSS--SGNTASLTITGAQVEDEADYYCDSWD--------------------', 'IGLV3S7*01': 'SSGLTQEPA-LSVALGHTVRMTCQGDSLK------TYYASWYQQKPGQVPVLVIYGN-------TNRPSGIP-GRFSGSW--SGNRGSLTITAAQVEDEADYYCNSWD--------------------', 'IGLV3S9*01': 'SSGLTQEPA-LSVALGHTVSMTCQGDSLK------TYYASWYQQKPGQVPVLVIYGN-------NYRPSGIP-GRFSVSW--SGNRGSLTITAAQVEDEADYYCGSWD--------------------', 'IGLV4-97*01': 'QPVLTQSPS-ASASLGASVKLTCTLSSGHS-----SYAIAWHQQQQGKAPRYLMRLNSV---GSHSKGDGIP-DRFSGSS--SGAERYLTISNLQSEDEADYYCQTWT--------------------', 'IGLV4-97*02': 'QPVLTQSPS-ASASLGASVKLTCTLSSGHS-----SYAIAWHQQQQGKAPRYLMRLNSD---GSHSKGDGIP-DRFSGSS--SGAERYLTISNLQSEDEADYYCQTWD--------------------', 'IGLV4-97*03': 'QPVLTQSPS-ASASLGASVKLTCTLSSGHS-----SYTIAWHQQQQGKAPRYLMWLKSD---GSHSKGDGIP-DRFSGSS--SGAERYLTISNLQSEDEADYYCQTWD--------------------', 'IGLV4S1*01': 'QPVLTQSPS-ASASLGASIKLTCTLSSGHS-----SYTIAWHQQQQGKAPRYLMWLKSD---GSHSKGDGIP-DRFSGSS--SGAERYLTISNLQSEDEADYYCQTWD--------------------', 'IGLV5-103*01': 'QPVLTQPPS-LSASQGASARLSCTLSSGFSA---DLYWIYWYQHKPGSPPRYLLSLYQN---SLHDLGSGVP-RRISGLMEDWSNKGLLLISDLQPEDEADYYCMIEH--------------------', 'IGLV5-62*01': 'KPMLTQPAS-LSASPGASASLTCTFSGGINV---AGYHIFWYQQKPGSPPRYLLRYKSD---SDKGQGSGVP-SRFSGSKDASANTGILRISGLQSEDEADYYCAIGH--------------------', 'IGLV5-62*02': 'KPMLTQPAS-LSASPGASASLTCTFSGGINV---AGYHIFWYQQKPGSPPRYLLRYKSD---SDKGQGSGVP-SRFSGSKDASANAGILRISGLQSEDEADYYCAIGR--------------------', 'IGLV5-69*01': 'QPVLTQPTS-LSASPGASARLSCTLSSGINV---GSYSIFWYQQKPGSPPRYLLYYYSD---SSKHQGSGVP-SRFSGSKDASANAGLLLISGLQSEDEADYYCAIWH--------------------', 'IGLV5-74*01': 'QPVLTQPTS-LSASPGASVRLTCTLRSGISV---GGYNIHWYQQKPRSPPRYLLYYYSD---SNKGQGSGVP-SRFSGSKDASANAGILLISGFQSEDEADYYCTTWH--------------------', 'IGLV5-74*02': 'QPVLTQPTS-LSASPGASVRLTCTLRSGISV---GGYNIHWYQQKPGSPPRYLLYYYSD---SNKGQGSGVP-SRFSGSKDASANAGILLISGFQSEDEADYYCTTWH--------------------', 'IGLV5-83*01': 'QPVLTQPTS-LSASPGASARLTCTLRSGISV---GSYRIFWYQQKPGSPPRYLLNYHTD---SDKHQGSGVP-SRFSGSKDASANAGILLISGLQSEDEADYYCMIWH--------------------', 'IGLV5-83*02': 'QPVLTQPAS-LSASPGASARLTCTLSSGISV---GSYRIFWYQQKPGSPPRYLLNYHTD---SDKHQGSGVP-SRFSGSKDASANAGILLISGLQSEDEADYYCMIWH--------------------', 'IGLV5-87*01': 'QPVLTQPAS-LSASPGASASLTCTFSGGTNV---GDYTIHWYQQKPGSPPRYLLKYKSD---SDKHQGSGVP-SRFSGSKDASANTGILRISGLQSEDEADYYCAIGR--------------------', 'IGLV5-87*02': 'QPVLTQPAS-VSASPGASASLTCTFSGGTNV---GDYTIHWYQQKPGSPPRYLLKYKSD---SDKHQGSGVP-SRFSGSKDASANTGILRISGLQSEDEADYYCAIGR--------------------', 'IGLV5-95*01': 'QPVLTQPSS-HSASPGAPARLTCTLSSGFSV---GDFWIRWYQQKPGSPPRYLLYYHSD---SDKHQGSGVP-SRFSGSNDASANAGILHISGLQPEDEADYYCCTWH--------------------', 'IGLV5-99*01': 'QSVLTQPPS-LSASLEALARLTCTLSSGISV---GGKIVYWYQQKPGSNPRYLLSYYSE---SSKHQGSGVP-GRFSGSKDASTNSGILHVSGLQPEDEADYYCKIWH--------------------', 'IGLV6-110*01': 'EVVFTQPHS-VSGSPGQTVTISCTRSSGSI----DSEYVQWYQQRPGNAPTTVIYKD-------NQRPSGVP-DRFSGSIDSSSNSASLAISGLKSEDEADYYCQSAD--------------------', 'IGLV6-110*02': 'EVVFTQPHS-VSGSPGQTVTISCTRSSGSI----DSEYVQWYQQRPGNAPTTVIYKD-------NQRPSGVP-DRFSGSIDSSSNSASLAISGLKSEDEADYYCQSAD--------------------', 'IGLV6-110*03': 'EVVFTQPHS-VSGSPGQTVTISCTRSSGSI----DSEYVQWYQQRPGNAPTTVIYKD-------NQRPSGVP-DRFSGSIDSSSNSASLAISGLKSEDEADYYCQSAD--------------------', 'IGLV6-112*01': 'EVVFTQPHS-VSGSPGQTVTISCTRSSGSI----DSKYVQWYQQRPGSAPTTVIYKD-------NQRPSGVP-DRFSGSIDSSSNSASLTISGLKSEDEADYYCQSAD--------------------', 'IGLV6-112*02': 'EVVFTQPHS-VSGSPGQTVTISCTRSSGSI----DSEYVQWYQQRPGSAPTTVIYKD-------NQRPSGVP-DRFSGSIDSSSNSASLAISGLKSEDEADYYCQSYD--------------------', 'IGLV6-92*01': 'EVVFTQPHS-VSGSPGQTVTISCTHSSGSI----DNSYVYWYQQRPGSAPTTVIYND-------DQRPSGVP-DRSSGSIDSSSNSASLTISGLKSEDEADYYCQSYD--------------------', 'IGLV7-71*01': 'QAVVTQEPS-LTVSPGGTVTLTCASSTGAVT---SGHSPHWCQQKPGQAPRTLIYNT-------SFKHSWTP-ARFSGSL--LGGKAALILSGAQPEDEAEYYCLLHY--------------------', 'IGLV7-76*01': 'QAVVTQEPS-MTVSPGGTVTLTCASSTGAVT---SGHSPHWFQQKPGQAPKTLIYNT-------NYKHSWTP-ARFSGSL--LGGKAALTLSGAQPEDEAEYYCWLYY--------------------', 'IGLV7-76*02': 'QAVVTQEPS-MTVSPGGTVTLTCASSTGAVT---SGHSPHWFQQKPGQAPKTLIYNT-------NYKHSWTP-ARFSGSL--LGGKAALTLSGAQPEDKAEYYCWLYY--------------------', 'IGLV7-80*01': 'QAVVTQEPS-LTVSPGGTVTLTCASSTGAVT---SGHYPHWFQQKPGQAPKTLIYDT-------SNKLSWTP-ARFSGSL--AGGKAALTLSGAQPEDEAEYYCWLYY--------------------', 'IGLV7-88*01': 'QAVVTQEPS-LTVSPGGTVTLTCGSSAGAVT---GSHYPYWFQQKPGQAPRTLIYDT-------SNKLSWTP-ARFSGSL--LGGKAALTLSGAQPEDEAEYYCWLHY--------------------', 'IGLV8-125*01': 'ETVVTQEPS-LSVSPGGTVTLTCGLSSGSVS---TSNYPSWYQQTPGQAPRMLIYST-------NTRPSGVP-DRFSGSI--LGNKAALTITGAQADDESDYYCMLYM--------------------', 'IGLV8-125*02': 'ETVVTQEPS-LSVYPGGTVTLTCGLSSGSVS---TSNYPSWYQQTPGQAPRTLIYST-------NTRPSGVP-DRFSGSI--LGNKAALTITGAQADDESDYYCTLYM--------------------'}, 'pig': {'IGLV2-6*01': 'QSALTQPPS-VSRNLKEMETISCAGTSSDI-----GGYVSWYQQHPGLAPKFLIYYV-------NTRASGIP-DGFCGSK--SGNTASLTISGLQAEDEADYYCSSPR--------------------', 'IGLV5-14*01': 'QAVLTQPPS-LSASPGPSARLPCTLSSGSSV---GSYHISWYQRKPGRPPWYLLRFHFAS-SKDQGSGVPSC-FSGDKDA--SAHAGLLLISGLQPEDKADCDCLNWQ--------------------', 'IGLV8-18*01': 'QTV-IQEPA-MSVSLGGTVTLTCAFSSGSVT---SSNYPGWFQQTPGQPPRTVIYST-------NSRPTGVP-SRFSGAI--SGNKATLTITGAQAEDEADYFCALYK--------------------', 'IGLV8-19*01': 'QTV-IQEPA-MSVSLGGTVTLTCAFSSGSVT---SSNNPGWFQQTPGQPPRTVIYQT-------NNRPTGVP-SRFSGAI--SGNKATLTITGAQAEDEADYFCALGK--------------------', 'IGLV8-19*02': 'QTV-IQEPA-MSVSLGGTVTLTCAFSSGSVT---SSNYPSWYQQTPGQPPRQLIYST-------NSRPTGVP-SRFSGAI--SGNKATLTITGAQAEDEADYFCALYK--------------------'}, 'cow': {'IGLV1-12*01': 'QAVLTQPPS-VSGSLGQTVTISCTGSSNNI----GILGVSWYQQIPGSAPRTLIYNS-------NKRPSGVP-DRFSGTK--SGNTGTLTIASLQAEDEADYYCASAD--------------------', 'IGLV1-16*02': 'QDVLTQPSS-VSGSLGQNVSITCSGSSSNVG---YANYVSWHQQKQGSAPRTLIYGA-------TSRASGVP-DQFSGSK--SGNTATLTISSLQPEDEADYYCSSYD--------------------', 'IGLV1-21*01': 'QAVLTQPSS-VSGSLGQRVSITCSGSSNNI----GSYGVGWYQQVPGSGLRTIIYGS-------SSRPSGVP-DRFSGSK--SGNTATLTISSLQAEDEADYFCATVD--------------------', 'IGLV1-26*01': 'QAVLTQPSS-VSGSLGQRVSITCSGSSSNVG---YGNYVSWFQDIPGSAPRTLIYGD-------TSRASGVP-DRFSGSR--SGNTATLTISSLQAEDEADYFCASYQ--------------------', 'IGLV1-31*01': 'QAVLTQPSS-VSGSLGQRVSITCSGSSSNVG---TGNYVSWFQQIPGSAPRTLIYGA-------TSRASGVP-DRFSGSR--SGNTATLTISSLQAEDEADYFCASYQ--------------------', 'IGLV1-40*01': 'QAVLTQPSS-VSGSLGQRVSITCSGSSSNVG---LGNYVSWFQQIPGSAPRTLIYGA-------TSRASGVP-DRFSGSR--SGNTATLTISSLQAEDEADYFCASPD--------------------', 'IGLV1-43*01': 'QAVLTQPSS-VSGSLGQRVSITCSGSSSNVG---YGNYVSWFQEIPGSAPRTLIYGD-------TSRASGVP-DRFSGSR--SGNTATLTISSLQAEDEADYFCASYQ--------------------', 'IGLV1-47*01': 'QAVLTQPSS-VSGSLGQRVSITCSGSSSNV----GNGYVSWYQLIPGSAPRTLIYGD-------TSRASGVP-DRFSGSR--SGNTATLTISSLQAEDEADYFCASAE--------------------', 'IGLV1-52*01': 'QAVLTQPSS-VSGSLGQRVSITCSGSSSNV----GNGYVSWYQLIPGSAPRTLIYGD-------TSRASGVP-DRFSGSR--SGNTATLTISSLQAEDEADYFCASAE--------------------', 'IGLV1-67*02': 'QAVLTQPSS-VSGSLGQRVSITCSGSSNNI----GRYGVGWYQQVPGSGLRTIIYGS-------SSRPSGVP-DRFSGSK--SGNTATLTISSLQAEDEADYFCVAYD--------------------', 'IGLV1-70*01': 'QAVLTQPPS-VSGSLGQRVTISCTGSSSNIG---GGNYVGWYQQIPGSAPKTLIYRS-------TSRPSGVP-DRFSGSR--SGNTATLTISSLQAEDEADYYCATYE--------------------', 'IGLV1-71*01': 'QAVLTQLPS-VFRTLGQRVTISCTGSSNNI----GGYYVSWYQQLPGKAPRLLTYEI-------SKRPPGVP-DRVSGSK--SGNSASLT-SSVHAEDDTDYYCFSWA--------------------', 'IGLV1-73*01': 'QAVLTQPPS-VSGSLGQRVTITCTGSSSYVS---RGNHVSWYQLIPGLAPKTLIYNS-------NKRPSGVP-DRFSGTK--SGNTGTLTIASLQAEDEADYYCASAD--------------------', 'IGLV2-6*01': 'QSGLTQPSS-VSGNLGQTVTISCAGTSSDVG---AYNGVGWYQQLPGSAPKTLIYNL-------NKRSSGIP-ARFSGSK--SGNTATLTISGLQAEDEADYYCSSYK--------------------', 'IGLV2-9*01': 'QSGLTQPSS-VSGNLGQTVITSCAGTSSYVG---SYNGVGWYQQLPGSAPKTLIYNV-------SKRPSGIP-DRFSGSK--SGNTATLTVSGLQAEDEADYYCSSYK--------------------', 'IGLV3-2*01': 'SSQLTQPPA-VSVSLGQTASITCQGDDLE------LLSAHWYQQKPGQAPVLVIYAD-------DNLASGIP-DRFSGSK--SDTTATLTIRGAQAEDEADYYCQSAD--------------------', 'IGLV3-3*01': 'SYELTQLTS-VSVALGQTAKITCSGELLD------EQYTQWYQQKPGQAPKLVIYKD-------SKRRSGIP-DQFSGSS--SGKTAILTISGVRAEDEADYYCLSWD--------------------', 'IGLV3-4*01': 'SYELTQPTS-VSVALGQTAKITCSGDLLD------EQYTQWYQQKPGQGPVRVIYKD-------SERPSGIS-DRFSGSS--SGKTATLTISGAQTEDEADYYCQSAD--------------------', 'IGLV3-5*01': 'SSQLTQPPA-VSVSLGQTASITCQGDDLE------SYYAHWYQQKPSQAPVLVIYES-------SERPSGIP-DRFSGSS--SGNTATLTISGAQTEDEADYYCQSYD--------------------', 'IGLV5-72*01': 'QPVLTQPVT-VSASLGASARLSCTLSSGYNV---SNYSIYWYQQKAGNPLRYLLRFKSD---SDKHQGSGVP-SRFSGSKDASTNAGLLLISGLQPEDEADYYCAVWH--------------------', 'IGLV8-38*01': 'QIV-IQEPS-LSVSPGGTVTLTCGLSSGSVT---TYNEPSWYQQTPGQAPRNVIYNT-------NTRTSGVP-DRFSASI--SGNKATLTITGAQPKDEADYHCLLYQ--------------------'}}, 'A': {'human': {'TRAV1-1*01': 'GQSLEQ-PSEVTAVEGAIVQINCTYQTSG------FYGLSWYQQHDGGAPTFLSYNAL----DGLEET-----GRFSSFLSRSDSYGYLLLQELQMKDSASYFCAVR---------------------', 'TRAV1-2*01': 'GQNIDQ-PTEMTATEGAIVQINCTYQTSG------FNGLFWYQQHAGEAPTFLSYNVL----DGLEEK-----GRFSSFLSRSKGYSYLLLKELQMKDSASYLCAVR---------------------', 'TRAV1-2*03': 'GQNIDQ-PTEMTATEGAIVQINCTYQTSG------FNGLFWYQQHAGEAPTFLSYNVL----DGLEEK-----GRFSSFLSRSKGYSYLLLKELQMKDSASYLCAVR---------------------', 'TRAV10*01': 'KNQVEQSPQSLIILEGKNCTLQCNYTVSP------FSNLRWYKQDTGRGPVSLTIMTFS---ENTKSN-----GRYTATLDADTKQSSLHITASQLSDSASYICVVS---------------------', 'TRAV10*02': 'KNQVEQSPQSLIILEGKNCTLQCNYTVSP------FSNLRWYKQDTGRGPVSLTIMTFS---ENTKSN-----GRYTATLDADTKQSSLHITASQLSDSASYICVVS---------------------', 'TRAV12-1*01': 'RKEVEQDPGPFNVPEGATVAFNCTYSNSA------SQSFFWYRQDCRKEPKLLMSVYS----SGN-ED-----GRFTAQLNRASQYISLLIRDSKLSDSATYLCVVN---------------------', 'TRAV12-2*01': 'QKEVEQNSGPLSVPEGAIASLNCTYSDRG------SQSFFWYRQYSGKSPELIMFIYS----NGDKED-----GRFTAQLNKASQYVSLLIRDSQPSDSATYLCAVN---------------------', 'TRAV12-3*01': 'QKEVEQDPGPLSVPEGAIVSLNCTYSNSA------FQYFMWYRQYSRKGPELLMYTYS----SGNKED-----GRFTAQVDKSSKYISLFIRDSQPSDSATYLCAMS---------------------', 'TRAV13-1*01': 'GENVEQHPSTLSVQEGDSAVIKCTYSDSA------SNYFPWYKQELGKGPQLIIDIRSN---VGEKKD-----QRIAVTLNKTAKHFSLHITETQPEDSAVYFCAAS---------------------', 'TRAV13-2*01': 'GESVGLHLPTLSVQEGDNSIINCAYSNSA------SDYFIWYKQESGKGPQFIIDIRSN---MDKRQG-----QRVTVLLNKTVKHLSLQIAATQPGDSAVYFCAEN---------------------', 'TRAV14/DV4*01': 'AQKITQTQPGMFVQEKEAVTLDCTYDTSDP-----SYGLFWYKQPSSGEMIFLIYQGSY--DQQNATE-----GRYSLNFQKARKSANLVISASQLGDSAMYFCAMRE--------------------', 'TRAV14/DV4*02': 'AQKITQTQPGMFVQEKEAVTLDCTYDTSDQ-----SYGLFWYKQPSSGEMIFLIYQGSY--DEQNATE-----GRYSLNFQKARKSANLVISASQLGDSAMYFCAMRE--------------------', 'TRAV16*01': 'AQRVTQPEKLLSVFKGAPVELKCNYSYSG------SPELFWYVQYSRQRLQLLLRHI------SRESI-----KGFTADLNKGETSFHLKKPFAQEEDSAMYYCALS---------------------', 'TRAV17*01': 'SQQGEEDPQALSIQEGENATMNCSYKTSI-------NNLQWYRQNSGRGLVHLILIRSN---EREKHS-----GRLRVTLDTSKKSSSLLITASRAADTASYFCATD---------------------', 'TRAV18*01': 'GDSVTQTEGPVTLPERAALTLNCTYQSSY------STFLFWYVQYLNKEPELLLKSSE----NQETDS-----RGFQASPIKSDSSFHLEKPSVQLSDSAVYYCALR---------------------', 'TRAV19*01': 'AQKVTQAQTEISVVEKEDVTLDCVYETRDT-----TYYLFWYKQPPSGELVFLIRRNSF--DEQNEIS-----GRYSWNFQKSTSSFNFTITASQVVDSAVYFCALSE--------------------', 'TRAV2*01': 'KDQVFQ-PSTVASSEGAVVEIFCNHSVSN------AYNFFWYLHFPGCAPRLLVKGS------KPSQQ-----GRYNMTYER--FSSSLLILQVREADAAVYYCAVE---------------------', 'TRAV20*01': 'EDQVTQSPEALRLQEGESSSLNCSYTVSG------LRGLFWYRQDPGKGPEFLFTLYSA---GEEKEK-----ERLKATLTK--KESFLHITAPKPEDSATYLCAVQ---------------------', 'TRAV20*02': 'EDQVTQSPEALRLQEGESSSLNCSYTVSG------LRGLFWYRQDPGKGPEFLFTLYSA---GEEKEK-----ERLKATLTK--KESFLHITAPKPEDSATYLCAVQ---------------------', 'TRAV21*01': 'KQEVTQIPAALSVPEGENLVLNCSFTDSA------IYNLQWFRQDPGKGLTSLLLIQSS---QREQTS-----GRLNASLDKSSGRSTLYIAASQPGDSATYLCAVR---------------------', 'TRAV22*01': 'GIQVEQSPPDLILQEGANSTLRCNFSDSV-------NNLQWFHQNPWGQLINLFYIPS-----GTKQN-----GRLSATTVATERYSLLYISSSQTTDSGVYFCAVE---------------------', 'TRAV23/DV6*01': 'QQQVKQSPQSLIVQKGGISIINCAYENTA------FDYFPWYQQFPGKGPALLIAIRPD---VSEKKE-----GRFTISFNKSAKQFSLHIMDSQPGDSATYFCAAS---------------------', 'TRAV23/DV6*05': 'QQQVKQSPQSLIVQKGGISIINCAYENTA------FDYFPWYQQFPGKGPALLIAIRPD---VSEKKE-----GRFTISFNKSAKQFSSHIMDSQPGDSATYFCAAS---------------------', 'TRAV24*01': 'ILNVEQSPQSLHVQEGDSTNFTCSFPSSN------FYALHWYRWETAKSPEALFVMTLN---GDEKKK-----GRISATLNTKEGYSYLYIKGSQPEDSATYLCAF----------------------', 'TRAV25*01': 'GQQVMQIPQYQHVQEGEDFTTYCNSSTTL-------SNIQWYKQRPGGHPVFLIQLVKS---GEVKKQ-----KRLTFQFGEAKKNSSLHITATQTTDVGTYFCAG----------------------', 'TRAV26-1*01': 'DAKTTQ-PPSMDCAEGRAANLPCNHSTISG-----NEYVYWYRQIHSQGPQYIIHGLK-----NNETN-----EMASLIITEDRKSSTLILPHATLRDTAVYYCIVRV--------------------', 'TRAV26-1*02': 'DAKTTQ-PTSMDCAEGRAANLPCNHSTISG-----NEYVYWYRQIHSQGPQYIIHGLK-----NNETN-----EMASLIITEDRKSSTLILPHATLRDTAVYYCIVRV--------------------', 'TRAV26-2*01': 'DAKTTQ-PNSMESNEEEPVHLPCNHSTISG-----TDYIHWYRQLPSQGPEYVIHGLT-----SNVNN-----RMASLAIAEDRKSSTLILHRATLRDAAVYYCILRD--------------------', 'TRAV27*01': 'TQLLEQSPQFLSIQEGENLTVYCNSSSVF-------SSLQWYRQEPGEGPVLLVTVVTG---GEVKKL-----KRLTFQFGDARKDSSLHITAAQPGDTGLYLCAG----------------------', 'TRAV27*03': 'TQLLEQSPQFLSIQEGENLTVYCNSSSVF-------SSLQWYRQEPGEGPVLLVTVVTG---GEVKKL-----KRLTFQFGDARKDSSLHITAAQTGDTGLYLCAG----------------------', 'TRAV29/DV5*01': 'DQQVKQNSPSLSVQEGRISILNCDYTNSM------FDYFLWYKKYPAEGPTFLISISSI---KDKNED-----GRFTVFLNKSAKHLSLHIVPSQPGDSAVYFCAAS---------------------', 'TRAV29/DV5*02': 'DQQVKQNSPSLSVQEGRISILNCDYTNSM------FDYFLWYKKYPAEGPTFLISISSI---KDKNED-----GRFTVFLNKSAKHLSLDIVPSQPGDSAVYFCAAS---------------------', 'TRAV29/DV5*04': 'DQQVKQNSPSLSVQEGRISILNCDYTNSM------FDYFLWYKKYPAEGPTFLISISSI---KDKNED-----GRFTVFLNKSAKHLSLHIVPSQPGDSAVYFCAAS---------------------', 'TRAV3*01': 'AQSVAQPEDQVNVAEGNPLTVKCTYSVSG------NPYLFWYVQYPNRGLQFLLKYITG--DNLVKGS-----YGFEAEFNKSQTSFHLKKPSALVSDSALYFCAVRD--------------------', 'TRAV30*01': 'QQPV-QSPQAVILREGEDAVINCSSSKAL-------YSVHWYRQKHGEAPVFLMILLKG---GEQKGH-----EKISASFNEKKQQSSLYLTASQLSYSGTYFCGTE---------------------', 'TRAV30*05': 'QQPV-QSPQAVILREGEDAVINCSSSKAL-------YSVHWYRQKHGEAPVFLMILLKG---GEQKGH-----DKISASFNEKKQQSSLYLTASQLSYSGTYFCGTE---------------------', 'TRAV34*01': 'SQELEQSPQSLIVQEGKNLTINCTSSKTL-------YGLYWYKQKYGEGLIFLMMLQKG---GEEKSH-----EKITAKLDEKKQQSSLHITASQPSHAGIYLCGAD---------------------', 'TRAV35*01': 'GQQLNQSPQSMFIQEGEDVSMNCTSSSIF-------NTWLWYKQEPGEGPVLLIALYKA---GELTSN-----GRLTAQFGITRKDSFLNISASIPSDVGIYFCAGQ---------------------', 'TRAV36/DV7*01': 'EDKVVQSPLSLVVHEGDTVTLNCSYEVTN------FRSLLWYKQEKKAP-TFLFMLTSS---GIEKKS-----GRLSSILDKKELSSILNITATQTGDSAIYLCAVE---------------------', 'TRAV36/DV7*05': 'EDKVVQSPLSLVVHEGDTVTLNCSYEVTN------FRSLLWYKQEKKAP-TFLFMLTSS---GIEKKS-----GRLSSILDKKELFSILNITATQTGDSAIYLCAVE---------------------', 'TRAV38-1*01': 'AQTVTQSQPEMSVQEAETVTLSCTYDTSEN-----NYYLFWYKQPPSRQMILVIRQEAY--KQQNATE-----NRFSVNFQKAAKSFSLKISDSQLGDTAMYFCAFMK--------------------', 'TRAV38-2/DV8*01': 'AQTVTQSQPEMSVQEAETVTLSCTYDTSES-----DYYLFWYKQPPSRQMILVIRQEAY--KQQNATE-----NRFSVNFQKAAKSFSLKISDSQLGDAAMYFCAYRS--------------------', 'TRAV39*01': 'ELKVEQNPLFLSMQEGKNYTIYCNYSTTS-------DRLYWYRQDPGKSLESLFVLLSN---GAVKQE-----GRLMASLDTKARLSTLHITAAVHDLSATYFCAVD---------------------', 'TRAV4*01': 'LAKTTQ-PISMDSYEGQEVNITCSHNNIAT-----NDYITWYQQFPSQGPRFIIQGYK-----TKVTN-----EVASLFIPADRKSSTLSLPRVSLSDTAVYYCLVGD--------------------', 'TRAV40*01': 'SNSVKQT-GQITVSEGASVTMNCTYTSTG------YPTLFWYVEYPSKPLQLLQRET------MENSK-----NFGGGNIKD--KNSPIVKYSVQVSDSAVYYCLLG---------------------', 'TRAV41*01': 'KNEVEQSPQNLTAQEGEFITINCSYSVGI-------SALHWLQQHPGGGIVSLFMLSS-----GKKKH-----GRLIATINIQEKHSSLHITASHPRDSAVYICAVR---------------------', 'TRAV5*01': 'GEDVEQS-LFLSVREGDSSVINCTYTDSS------STYLYWYKQEPGAGLQLLTYIFSN---MDMKQD-----QRLTVLLNKKDKHLSLRIADTQTGDSAIYFCAES---------------------', 'TRAV6*01': 'SQKIEQNSEALNIQEGKTATLTCNYTNYS------PAYLQWYRQDPGRGPVFLLLIREN---EKEKRK-----ERLKVTFDTTLKQSLFHITASQPADSATYLCALD---------------------', 'TRAV6*07': 'SQKIEQNSEALNIQEGKTATLTCNYTNYS------PAYLQWYRQDPGRGPVFLLLIREN---EKEKRK-----ERLKVTFDTTLKQSLFHITASQPADSATYLCALD---------------------', 'TRAV7*01': 'ENQVEHSPHFLGPQQGDVASMSCTYSVSR------FNNLQWYRQNTGMGPKHLLSMYSA---GYEKQK-----GRLNATLLK--NGSSLYITAVQPEDSATYFCAVD---------------------', 'TRAV8-1*01': 'AQSVSQHNHHVILSEAASLELGCNYSYGG------TVNLFWYVQYPGQHLQLLLKYFSG--DPLVKGI-----KGFEAEFIKSKFSFNLRKPSVQWSDTAEYFCAVN---------------------', 'TRAV8-2*01': 'AQSVTQLDSHVSVSEGTPVLLRCNYSSSY------SPSLFWYVQHPNKGLQLLLKYTSA--ATLVKGI-----NGFEAEFKKSETSFHLTKPSAHMSDAAEYFCVVS---------------------', 'TRAV8-2*03': 'AQSVTQLDSHVSVSEGTPVLLRCNYSSSY------SPSLFWYVQHPNKGLQLLLKYTSA--ATLVKGI-----NGFEAEFKKSETSFHLTKPSAHMSDAAEYFCVVS---------------------', 'TRAV8-3*01': 'AQSVTQPDIHITVSEGASLELRCNYSYGA------TPYLFWYVQSPGQGLQLLLKYFSG--DTLVQGI-----KGFEAEFKRSQSSFNLRKPSVHWSDAAEYFCAVG---------------------', 'TRAV8-4*01': 'AQSVTQLGSHVSVSEGALVLLRCNYSSSV------PPYLFWYVQYPNQGLQLLLKYTSA--ATLVKGI-----NGFEAEFKKSETSFHLTKPSAHMSDAAEYFCAVS---------------------', 'TRAV8-6*01': 'AQSVTQLDSQVPVFEEAPVELRCNYSSSV------SVYLFWYVQYPNQGLQLLLKYLSG--STLVESI-----NGFEAEFNKSQTSFHLRKPSVHISDTAEYFCAVS---------------------', 'TRAV8-6*02': 'AQSVTQLDSQVPVFEEAPVELRCNYSSSV------SVYLFWYVQYPNQGLQLLLKYLSG--STLVKGI-----NGFEAEFNKSQTSFHLRKPSVHISDTAEYFCAVS---------------------', 'TRAV9-1*01': 'GDSVVQTEGQVLPSEGDSLIVNCSYETTQ------YPSLFWYVQYPGEGPQLHLKAMKA---NDKGRN-----KGFEAMYRKETTSFHLEKDSVQESDSAVYFCALS---------------------', 'TRAV9-2*01': 'GNSVTQMEGPVTLSEEAFLTINCTYTATG------YPSLFWYVQYPGEGLQLLLKATKA---DDKGSN-----KGFEATYRKETTSFHLEKGSVQVSDSAVYFCALS---------------------', 'TRAV9-2*02': 'GDSVTQMEGPVTLSEEAFLTINCTYTATG------YPSLFWYVQYPGEGLQLLLKATKA---DDKGSN-----KGFEATYRKETTSFHLEKGSVQVSDSAVYFCALS---------------------'}, 'mouse': {'TRAV1*01': 'GQGVEQ-PDNLMSVEGTFARVNCTYSTSG------FNGLSWYQQREGHAPVFLSYVVL----DGLKDS-----GHFSTFLSRSNGYSYLLLTELQIKDSASYLCAVR---------------------', 'TRAV10*01': 'GEKVEQHESTLSVREGDSAVINCTYTDTA------SSYFPWYKQEAGKGLHFVIDIRSN---VDRKQS-----QRLIVLLDKKAKRFSLHITATQPEDSAIYFCAAS---------------------', 'TRAV10D*01': 'GEKVEQHQSTLSVREGDSAVINCTYTDTA------SSYFPWYKQEAGKSLHFVIDIRSN---VDRKQS-----QRLTVLLDKKAKRFSLHITATQPEDSAIYFCAAS---------------------', 'TRAV10N*01': 'GEKVEQHESTLSVREGDSAVINCTYTDTA------SSYFPWYKQEAGKSLHFVIDIRSN---VDRKQS-----QRLTVLLDKKAKRFSLHITATQPEDSAIYFCAAS---------------------', 'TRAV11*01': 'KTQVEQSPQSLVVRQGENCVLQCNYSVTP------DNHLRWFKQDTGKGLVSLTVLVHE---NDKTSN-----GRYSATLDKDAKHSTLHITATLLDDTATYICVVG---------------------', 'TRAV11*02': 'KTQVEQSPQSLVVRQGENCVLQCNYSVTP------DNHLRWFKQDTGKGLVSLTVLVDQ---KDKTSN-----GRYSATLDKDAKHSTLHITATLLDDTATYICVVG---------------------', 'TRAV11D*01': 'KTQVEQSPQSLVVRQGENCVLQCNYSVTP------DNHLRWFKQDTGKGLVSLTVLVHE---NDKTSN-----GRYSATLDKDAKHSTLHITATLLDDTATYICVVG---------------------', 'TRAV12-1*01': 'GDSVTQTEGLVTVTEGLPVMLNCTYQTAYS-----DVAFFWYVQYLNEAPKLLLRSSTD---NKRTEH-----QGFHATLHKSSSSFHLQKSSVQLSDSALYYCALS---------------------', 'TRAV12-1*02': 'GDSVTQTEGLVTVTEGLPVMLNCTYQTTYS-----DVAFFWYVQYLNEAPKLLLRSSTD---NKRTEH-----QGFHATLHKSSSSFHLQKSYAQLSDSGLYYCALS---------------------', 'TRAV12-2*01': 'GDSVTQTEGLVTLTEGLPVMLNCTYQTAY------STFLFWYVQHLNEAPKLLLKSSTD---NKRTEH-----QGFHATLHKSSSSFHLQKSSAQLSDSALYYCALS---------------------', 'TRAV12-3*01': 'GDSVTQTEGLVTLTEGLPVMLNCTYQTIYS-----NPFLFWYVHYLNESPRLLLKSSTD---NKRTEH-----QGFHATLHKSSSSFHLQKSSAQLSDSALYYCALS---------------------', 'TRAV12D-1*01': 'GDSVTQTEGLVTVTEGLPVKLNCTYQTTYL-----TIAFFWYVQYLNEAPQVLLKSSTD---NKRTEH-----QGFHATLHKSSSSFHLQKSSAQLSDSALYYCALS---------------------', 'TRAV12D-1*02': 'GDSVTQTEGLVTVTEGLPVKLNCTYQTTYL-----TIAFFWYVQYLNEAPQVLLRSSTD---NKRTEH-----QGFHATLHKSSSSFHLQKSSAQLSDSALYYCALS---------------------', 'TRAV12D-2*01': 'GDSVTQTEGLVTLTEGLPVMLNCTYQSTY------SPFLFWYVQHLNEAPKLLLKSFTD---NKRPEH-----QGFHATLHKSSSSFHLQKSSAQLSDSALYYCALS---------------------', 'TRAV12D-2*02': 'GDSVTQTEGLVTLTKGLPVMLNCTYQTTY------SPFLFWYVQHLNEAPKLLLKSSTD---NKRTEH-----QGFYATLHKSSSSFHLQKSSVQLSDSALYFCALS---------------------', 'TRAV12D-3*02': 'GDSVTQTEGLVTLTEGLPVMLNCTYQTIYS-----NAFLFWYVHYLNESPWLLLRSSTD---NKRTEH-----QGFHATLHKSSSSFHLQKSSAQLSDSALYYCALS---------------------', 'TRAV12N-1*01': 'GDSVTQTEGLVTVTEGLPVMLNCTYQTAYS-----DVAFFWYVQYLNEAPKLLLRSSTD---NKRTEH-----QGFHATLHKSSSSFHLQKSSVQLSDSALYYCALS---------------------', 'TRAV12N-2*01': 'GDSVTQTEGLVTLTKGLPVMLNCTYQTTY------SPFLFWYVQHLNEAPKLLLKSSTD---NKRTEH-----QGFYATLHKSSSSFHLQKSSVQLSDSALYFCALS---------------------', 'TRAV12N-3*01': 'GDSVTQTEGLVTLTEGLPVMLNCTYQTIYS-----NPFLFWYVQHLNESPRLLLKSFTD---NKRTEH-----QGFHATLHKSSSSFHLQKSSAQLSDSALYYCALS---------------------', 'TRAV13-1*01': 'GQQVQQSPASLVLQEGENAELQCNFSTSL-------NSMQWFYQRPEGSLVSLFYNPS-----GTKQS-----GRLTSTTVIKERRSSLHISSSQITDSGTYLCAME---------------------', 'TRAV13-1*02': 'GQQVQQSPASLVLQEGENAELQCNFSTSL-------NSMQWFYQRPGGSLVSLFYNPS-----GTKHS-----GRLTSTTVIKERRSSLHISSSQTTDSGTYLCALE---------------------', 'TRAV13-2*01': 'GQQVQQSPSSLVLQEGENAELQCNFSSTA-------TQLQWFYQSPGGSLVSLLSNPS-----GTKHT-----GRLTSTTVTKERRSSLHISSSQTTDSGTYLCAID---------------------', 'TRAV13-2*03': 'GQQVQQSPASLVLQEGENAELQCNFSSTA-------TRLQWFYQHPGGRLVSLFYNPS-----GTKHT-----GRLTSTTVTNERRSSLHISSSQTTDSGTYFCAID---------------------', 'TRAV13-3*01': 'GQQVQQSPASLVLQEGENAELQCTYSTTL-------NSMQWFYQRPGGRLVSLLYSPS----WAEQRG-----GRLTSSAASNESRSSLHISSSQITDSGTYLCAME---------------------', 'TRAV13-4/DV7*01': 'GQQVQQSPASLVLQEGENAELQCNFSSTA-------TQLQWFYQRPGGSLVSLLYNPS-----GTKHT-----GRLTSTTVTKERRSSLHISSSQITDSGTYFCAME---------------------', 'TRAV13-4/DV7*02': 'GQQVQQSPASLVLQEGENAELQCNFSSTA-------TRLQWFYQRPGGSLVSLLSNPS-----GTKHT-----GRLTSTTVTKERRGSLHISSSQITDSGTYLCAME---------------------', 'TRAV13-5*01': 'GQQVQQSPASLVLQEGENAELQCSFSIST-------NQVQWFYQRPGGRLIGLSYIP------GMKPT-----GKQTSSTVTKGRHSSLTISSSQTTDSGTYFCVLS---------------------', 'TRAV13D-1*01': 'GQQVQQSPASLVLQEGENAELQCNFSTSL-------NSMQWFYQRPGGSLVSLFYNPS-----GTKQS-----GRLTSTTVIKERRSSLHISSSQTTDSGTYLCAME---------------------', 'TRAV13D-1*04': 'GQQVQQSPTSLVLQEGENAELQCNFSTSL-------NSMQWFYQRPGGSLVSVFYNPS-----GTKQS-----GRLTSTTVIKERRSSLHISSSQITDSGTYLCAME---------------------', 'TRAV13D-2*01': 'GQQVQQSPASLVLQEGENAELQCNFSSTA-------TRLQWFYQHPGGRLVSLFYNPS-----GTKHT-----GRLTSTTVTNERRGSLHISSSQTTDSGTYFCAID---------------------', 'TRAV13D-2*03': 'GQQVQQSPASLVLQEGENAELQCNFSSTA-------TQLQWFYQSPGGSLVSLLSNPS-----GTKHT-----GRLTSTTVTKERRSSLHISSSQTTDSGTYLCAID---------------------', 'TRAV13D-3*01': 'GQQVEQSPASLVLQEGENAELQCTYSTTL-------NSMQWFYQRPGGRLVSLLYSPS----WAEQRG-----GRLTSSAASNESRSSLHISSSQITDSGTYLCAME---------------------', 'TRAV13D-3*02': 'GQQVQQSPASLVLQEGENAELQCTYSTTL-------NSMQWFYQRPGGRLVSLLYSPS----WAEQRG-----GRLTSSAASNESRSSLHISSSQITDSGTYLCAME---------------------', 'TRAV13D-4*01': 'EQQVQQSPASLVLQEGENAELQCSFSIFT-------NQVQWFYQRPGGRLVSLLYNPS-----GTKQS-----GRLTSTTVIKERRSSLHISSSQITDSGTYLCAME---------------------', 'TRAV13D-4*03': 'GQQVQQSPASLVLQEGENAELQCNFSSTA-------TRLQWFYQRPGGSLVSLLYNPS-----GTKHT-----GRLTSTTVTKERRSSLHISSSQTTDSGTYFCAME---------------------', 'TRAV13N-1*01': 'GQQVQQSPTSLVLQEGENAELQCNFSTSL-------NSMQWFYQRPGGSLISVFYNPS-----GTKQS-----GRLTSTTVIKERRSSLHISSSQITDSGTYLCAME---------------------', 'TRAV13N-2*01': 'GQQVQQSPASLVLQEGENAELQCNFSSTA-------TQLQWFYQSPGGSLVSLLSNPS-----GTKHT-----GRLTSTTVTKERRSSLHISSSQTTDSGTYLCAID---------------------', 'TRAV13N-3*01': 'GQQVQQSPASLVLQEGENAELQCTYSTTL-------NSMQWFYQRPGGRLVSLLYSPS----WAEQRG-----GRLTSSAASNESRSSLHISSSQITDSGTYLCAME---------------------', 'TRAV13N-4*01': 'EQQVQQSPASLVLQEAENAELQCSFSIFT-------NQVQWFYQRPGGRLVSLLYNPS-----GTKQS-----GRLTSTTVIKERRSSLHISSSQITDSGTYLCAME---------------------', 'TRAV14-1*01': 'QQQVRQSPQSLTVWEGGTTVLTCSYEDST------FNYFPWYQQFPGEGPALLISILSV---SDKKED-----GRFTTFFNKREKKLSLHIIDSQPGDSATYFCAAS---------------------', 'TRAV14-1*02': 'QQQVRQSPQSLTVWEGETAILNCSYEDST------FNYFPWYQQFPGEGPALLISISSV---SDKKED-----GRFTIFFNKREKKLSLHITDSQPGDSATYFCAAS---------------------', 'TRAV14-2*01': 'QQQVRQSPQSLTVWEGETAILNCSYEDST------FDYFPWYWQFPRESPALLIAIRPV---SNKKED-----GRFTIFFNKREKKLSLHITDSQPGDSATYFCAAS---------------------', 'TRAV14-2*02': 'QQQVRQSPQSLTVWEGETAILNCSYEDST------FDYFPWYHQFPGESPALLIAIRPV---SNKKED-----GRFTIFFNKREKKFSLHIADSQPGDSATYFCAAS---------------------', 'TRAV14-2*03': 'QQQVRQSPQSLTVWEGETAILNCSYEDST------FDYFPWYRLFPGESPALLIAIRPV---SNKKED-----GRFTIFFNKREKKLSLHITDSQPGDSATYFCAAS---------------------', 'TRAV14-3*01': 'QQQVRQSPQSLTVWEGETAILNCSYENSA------FDYFPWYQQFPGEGPALLISILSV---SDKKED-----GRFTIFFNKREKKLSLHIADSQPGDSATYFCAAS---------------------', 'TRAV14-3*02': 'QQQVRQSPQSLTVWEGETAILNCSYENSA------FDYFPWYQQFPGEGPALLIAILSV---SNKKED-----GRFTIFFNKREKKLSLHIADSQPGDSATYFCAAS---------------------', 'TRAV14-3*04': 'QQQVRQSPQSLTVWEGETAILNCSYENSA------FDYFPWYQQFPGEGPALLIAIRSV---SDKKED-----GRFTIFFNKREKKLSLHITDSQPGDSATYFCAAS---------------------', 'TRAV14D-1*01': 'QQQVRQSPQSLTVWEGETAILNCSYEDST------FNYFPWYQQFPGEGPALLISIRSV---SDKKED-----GRFTIFFNKREKKLSLHITDSQPGDSATYFCAAS---------------------', 'TRAV14D-2*01': 'QQQVRQSPQSLTVWEGETTILNCSYEDST------FDYFPWYRQFPGKSPALLIAISLV---SNKKED-----GRFTIFFNKREKKLSLHITDSQPGDSATYFCAAS---------------------', 'TRAV14D-2*03': 'QQQVRQSPQSLTVWEGETAILNCSYEDST------FDYFPWYWQFPRESPALLIAIRPV---SNKKED-----GRFTIFFNKREKKLSLHITDSQPGDSATYFCAAS---------------------', 'TRAV14D-3/DV8*01': 'QQQVRQSSQSLTVWEGETAILNCSYENSA------FDYFPWYQQFPGEGPALLIAIRSV---SDKKED-----GRFTIFFNKREKNLSLHIKDSQPGDSATYFCAAS---------------------', 'TRAV14D-3/DV8*02': 'QQQVRQSPQSLTVWEGETAILNCSYENSA------FDYFPWYQQFPGEGPALLIAIRSV---SDKKED-----GRFTIFFNKREKKLSLHITDSQPGDSATYFCAAS---------------------', 'TRAV14D-3/DV8*03': 'QQQVRQSPQSLTVWEGETAILNCSYENSA------FDYFPWYQQFPGEGPALLIAIRSV---SDKKED-----GGFTIFFNKREKNLSLHIKDSQPGDSATYFCAAS---------------------', 'TRAV14D-3/DV8*08': 'QQQVRQSPQSLTVWEGETAILNCSYENSA------FDYFPWYQQFPGEGPALLISILSV---SDKKED-----GRFTIFFNKREKKLSLHIADSQPGDSATYFCAAS---------------------', 'TRAV14N-1*01': 'QQQVRQSPQSLTVWEGETAILNCSYEDST------FNYFPWYQQFPGEGPALLISIRSV---SDKKED-----GRFTIFFNKREKKLSLHITDSQPGDSATYFCAAS---------------------', 'TRAV14N-2*01': 'QQQVRQSPQSLTVWEGETAILNCSYEDST------FDYFPWYWQFPRESPALLIAIRPV---SNKKED-----GRFTIFFNKREKKLSLHITDSQPGDSATYFCAAS---------------------', 'TRAV14N-3*01': 'QQQVRQSSQSLTVWEGETAILNCSYENSA------FDYFPWYQQFPGEGPALLIAIRSV---SDKKED-----GRFTIFFNKREKNLSLHIKDSQPGDSATYFCAAS---------------------', 'TRAV15-1/DV6-1*01': 'AQKVIQVWSTTSRQEGEKLTLDCSYKTSQV-----LYHLFWYKHLLSGEMVLLIRQMPS--TIAIERS-----GRYSVVFQKSRKSISLVISTLQPDDSGKYFCALWE--------------------', 'TRAV15-2/DV6-2*01': 'AQKVTQVQSTGSSQWG-EVTLHCSYETSEY-----FYVILWYKQLFSGEMVFLIYQTSF--DTQNQRN-----SRYSVVFQKSLKSISLVISASQPEDSGTYFCALSE--------------------', 'TRAV15D-1/DV6D-1*01': 'AQKVIQVWSTPSRQEGEELTLDCSYETSQV-----LYHLFWYKHLLSGEMVFLIRQMSS--STAKERS-----GRYSVVFQKSLKSISLVISALQPDDSGKYFCALWE--------------------', 'TRAV15D-1/DV6D-1*02': 'AQKVIQVWSTASRQEGEELTLDCSYETSQV-----LYHLFWYKHLLSGEMVFLIRQMSS--STAKERS-----GRYSVVFQKSLKSISLVISALQPDDSGKYFCALWE--------------------', 'TRAV15D-2/DV6D-2*01': 'AQRVTQVQPTGSSQWGEEVTLDCSYETSEY-----FYCIIWYRQLFSGEMVFLIYQTSF--DTQNQRN-----GRYSVVFQKSLKSISLVISASQPEDSGTYFCALSE--------------------', 'TRAV15D-2/DV6D-2*03': 'AQRVTQVQPTGSSQWGEEVTLDCSYETSEY-----FYRIFWYRQLFSGEMVFLIYQPSF--DTQNQRS-----GRYSVVFQKSFKSISLVISASQPEDSGTYFCALSE--------------------', 'TRAV15N-1*01': 'AEKVIQVWSTASRQEGEELTLDCSYETSQV-----LYHLFWYKHLLSGEMVFLIRQTSS--STAKERS-----GRYSVVFQKSLKSISLIISALQPDDSGKYFCALWE--------------------', 'TRAV15N-2*01': 'AQRVTQVQPTGSSQWGEEVTLDCSYETSEY-----FYRIFWYRQLFSGEMVFLIYQPSF--DTQNQRS-----GRYSVVFQKSFKSISLVISASQPEDSGTYFCALSE--------------------', 'TRAV16*01': 'AQKVTQTQTSISVMEKTTVTMDCVYETQDS-----SYFLFWYKQTASGEIVFLIRQDSY--KKENATV-----GHYSLNFQKPKSSIGLIITATQIEDSAVYFCAMRE--------------------', 'TRAV16D/DV11*01': 'AQKVTQTQTSISVMEKTTVTMDCVYETQDS-----SYFLFWYKQTASGEIVFLIRQDSY--KKENATV-----GHYSLNFQKPKSSIGLIITATQIEDSAVYFCAMRE--------------------', 'TRAV16D/DV11*02': 'AQKVTQTQTSISVMEKTTVTMDCVYETQDS-----SYFLFWYKQTASGEIVFLIRQDSY--KKENATV-----GHYSLNFQKPKSSIGLIITATQIEDSAVYFCAMRE--------------------', 'TRAV16N*01': 'AQKVTQTQTSISVVEKTTVTMDCVYETRDS-----SYFLFWYKQTASGEIVFLIRQDSY--KKENATV-----GHYSLNFQKPKSSIGLIITATQIEDSAVYFCAMRE--------------------', 'TRAV17*01': 'AQSVDQPDAHVTLYEGASLELRCSYSYSA------APYLFWYVQYPGQSLQFLLKYITG--DAVVKGT-----KGFEAEFRKSNSSFNLKKSPAHWSDSAKYFCALE---------------------', 'TRAV19*01': 'GQQVKQSSPSLTVQEGGILILNCDYENDM------FDYFAWYKKYPDNSPTLLISVRSN---VDKRED-----GRFTVFLNKSGKHFSLHITASQPEDTAVYLCAAG---------------------', 'TRAV2*01': 'LAKTTQ-PPSMEAYEGQEVNVSCSHTNIAT-----SEYIYWYRQVPHQGPQFIIQGYK-----DYVVN-----EVASLFISADRKLSTLSLPWVSLRDAAVYYCIVTD--------------------', 'TRAV21/DV12*01': 'DAKTTQ-PDSMESTEGETVHLPCSHATISG-----NEYIYWYRQVPLQGPEYVTHGLQ-----QNTTN-----SMAFLAIASDRKSSTLILTHVSLRDAAVYHCILRV--------------------', 'TRAV3-1*01': 'GEQVEQRPPHLSVREGDSAIIICTYTDSA------TAYFSWYKQEAGAGLQLLMSVLSN---VDRKEE-----QGLTVLLNKKDKRLSLNLTAAHPGDSAVYFCAVS---------------------', 'TRAV3-3*01': 'GEQVEQRPPHLSVREGDSAVITCTYTDPN------SYYFFWYKQEPGASLQLLMKVFSS---TEINEG-----QGFTVLLNKKDKRLSLNLTAAHPGDSAAYFCAVS---------------------', 'TRAV3-4*01': 'GEQVEQRPPHLSVPEGDSAVIICTYTDSA------TAYFYWYKQEPGAGLQLLMSVFSN---VDRKEE-----QGLTVLLNKKDKQLSLNLTAAHPGDSAVYFCAVS---------------------', 'TRAV3D-3*01': 'GEQVEQRPPHLSVREGDSAFITCTYTDPN------SYYFFWYKQEPGASLQLLMKVFSS---TEINEG-----QGFTVLLNKKDKRLSLNLTAAHPGDSAAYFCAVS---------------------', 'TRAV3D-3*02': 'GEQVEQRPPHLSVREGDSAVIICTYTDPN------SYYFFWYKQEPGAGLQLLMKVFSS---TEINEG-----QGFTVLLNKKDKQLSLNLTAAHPGDSAVYFCAVS---------------------', 'TRAV3N-3*01': 'GEQVEQRPPHLSVREGDSAVIICTYTDPN------SYYFFWYKQEPGAGLQLLMKVFSS---TEINEG-----QGFTVLLNKKDKQLSLNLTAAHPGDSAVYFCAVS---------------------', 'TRAV4-2*01': 'GMPVEQNPPALSLYEGADSGLRCNFSTTM-------KSVQWFQQNHRGRLITLFYLAQ-----GTKEN-----GRLKSTFNSKERYSTLHIKDAQLEDSGTYFCAAE---------------------', 'TRAV4-2*02': 'GMPVEQNPPALSLYEGAESGLRCNFSTTM-------KGVQWFQQNHRGRLITLFYLAQ-----GTKEN-----GRLKSTFNSKERYSTLHIKDAQLEDSGTYFCAVE---------------------', 'TRAV4-3*01': 'GDQVKQSPSALSLQEGTNSALRCNFSIAT-------TTVQWFLQNPRGSLMNLFYLVP-----GTKEN-----GRLKSTFNSKESYSTLHIRDAQLEDSGTYFCAAE---------------------', 'TRAV4-4/DV10*01': 'GDQVEQSPSALSLHEGTDSALRCNFTTTM-------RSVQWFRQNSRGSLISLFYLAS-----GTKEN-----GRLKSAFDSKERYSTLHIRDAQLEDSGTYFCAAE---------------------', 'TRAV4D-3*01': 'GDKVKQSPSALSLQEGTNSALRCNFSIAA-------TTVQWFLQNPRGSLINLFYLVP-----GTKEN-----GRLKSTFNSKESYSTLHIRDAQLEDSGTYFCAAE---------------------', 'TRAV4D-4*01': 'GDQVEQSPSALSLHEGTSSALRCNFTTTT-------RSVQWFRQNSRGSLINLFYLAS-----GTKEN-----GRLKSAFDSKELYSTLHIRDAQLEDSGTYFCAAE---------------------', 'TRAV4D-4*04': 'GDQVEQSPSALSLHKGTGSALRCNFTTTT-------RAVQWFRQNSRGSLINLFYLAS-----GTKEN-----GRLKSAFDSKERYSTLHIRDAQLEDSGTYFCAAE---------------------', 'TRAV4N-3*01': 'GDKVKQSPSALSLQEGTNSALRCNFSIAA-------TTVQWFLQNPRGSLMNLFYLVP-----GTKEN-----GRLKSAFDSKESYSTLHIRDAQLEDSGTYFCAAE---------------------', 'TRAV4N-4*01': 'GDQVEQSPSALSLHEGTGSALRCNFTTTM-------RAVQWFRKNSRGSLINLFYLAS-----GTKEN-----GRLKSAFDSKERYSTLHIRDAQLEDSGTYFCAAE---------------------', 'TRAV5-1*01': 'GEQVEQLPSSLIVQEGASVLINCSYTDSA------SVYFPWYKQEPGKRLQFIIDIRSN---MERKQN-----QRLTLLFDKKTKHLSLHITATQPGDSAIYFCSAS---------------------', 'TRAV5D-4*05': 'GEQVEQLPSILRVQEGSSASINCTYENSA------SNYFPWYKQEPGENPKLIIDIRSN---MERKQT-----QGLIVLLDKKAKRFSLHITDTQPGDSAMYFCAAS---------------------', 'TRAV5N-4*01': 'GEQVEQLPSILRVQEGSSASINCTYENSA------SNYFPWYKQEPGENPKLIIDIRSN---MERKQT-----QGLIVLLDKKAKRFSLHITDTQPGDSAMYFCAAS---------------------', 'TRAV6-1*01': 'GDSVTQMQGQVTLSEDDFLFINCTYSTTW------YPTLFWYVQYPGEGPQLLLKVTTA---NNKGIS-----RGFEATYDKRTTSFHLQKASVQESDSAVYYCVLG---------------------', 'TRAV6-2*01': 'GNSVTQMQGQVTLSEEEFLFINCTYSTTG------YPTLFWYVQYPGEGPQLLLKVTTA---NNKGSS-----RGFEATYDKGTTSFHLQKASVQESDSAVYYCVLG---------------------', 'TRAV6-3*01': 'GDSVIQMQGQVTLSENDFLFINCTYSTTG------YPTLFWYVQYSGEGPQLLLQVTTA---NNKGSS-----RGFEATYDKGTTSFHLQKTSVQEIDSAVYYCAMR---------------------', 'TRAV6-3*02': 'GDSVIQMQGQVTLSENDFLFINCTYSTTG------YPTLFWYVQYSGEGPQLLLQVTTA---NNKGSS-----RGFEATYDKGTTSFHLQKTSVQEIDSAVYYCAMR---------------------', 'TRAV6-4*01': 'GDSVTQKQGQVTLSEDDFLFINCTYSTTT------YPTLLWYVQYPGQGPQLLLKVTTA---NNKGIS-----RGFEATYDKGTTSFHLQKASVQESDSAVYFCALV---------------------', 'TRAV6-4*03': 'GDSVTQKQGQVTLSEDDFLFINCTYSTTT------YPTLLWYVQYLGQGPQLLLKVTTA---NNKGIS-----RGFEATYDKGTTSFHLQKASVQESDSAVYFCALV---------------------', 'TRAV6-5*01': 'GDSVTQTEGPVTLSEGTSLTVNCSYETKQ------YPTLFWYVQYPGEGPQLLFKVPKA---NEKGSS-----RGFEATYNKEATSFHLQKASVQESDSAVYYCALS---------------------', 'TRAV6-5*04': 'GDSVTQTEGPVTLSEGTSLTVNCSYETKQ------YPTLFWYVQYPGEGPQLLFKVPKA---NEKGSN-----RGFEATYNKEATSFHLQKASVQESDSAVYYCALS---------------------', 'TRAV6-6*01': 'GDSVTQTEGQVTVSESKSLIINCTYSTTSI----AYPNLFWYVRYPGEGLQLLLKVITA---GQKGSS-----RGFEATYNKETTSFHLQKASVQESDSAVYYCALG---------------------', 'TRAV6-7/DV9*01': 'GDSVTQTEGQVALSEEDFLTIHCNYSASG------YPALFWYVQYPGEGPQFLFRASRD---KEKGSS-----RGFEATYNKETTSFHLQKASVQESDSAVYYCALG---------------------', 'TRAV6D-3*01': 'GDSVIQMQGQVTLSENDFLFINCTYSTTG------YPTLFWYVQYSGEGPQLLLQVTTA---NNKGSS-----RGFEATYDKGTTSFHLQKTSVQEIDSAVYYCAMR---------------------', 'TRAV6D-4*01': 'GDSVTQKQGQVTLSEDDFLFINCTYSTTT------YPTLFWYVQYPGQGPQLLLKVTTA---NNKGIS-----RGFEATYDKGTTSFHLQKASVQESDSAVYFCALV---------------------', 'TRAV6D-5*01': 'GDSVTQTEGPVTLSEGTSLTVNCSYETKQ------YPTLFWYVQYPGEGPQLLFKVPKA---NEKGSN-----RGFEATYNKEATSFHLQKASVQESDSAVYYCALG---------------------', 'TRAV6D-6*01': 'GDSVTQTEGQVTVSESKSLIINCTYSATSI----AYPNLFWYVRYPGEGLQLLLKVITA---GQKGSS-----RGFEATYNKETTSFHLQKASVQESDSAVYYCALS---------------------', 'TRAV6D-6*02': 'GDSVTQTEGPVTVSESESLIINCTYSATSI----AYPNLFWYVRYPGEGLQLLLKVITA---GQKGSS-----RGFEATYNKETTSFHLQKASVQESDSAVYYCALG---------------------', 'TRAV6D-6*05': 'GDSVTQTEGQVTVSESKSLIINCTYSATSI----GYPNLFWYVRYPGEGLQLLLKVITA---GQKGSS-----RGFEATYNKEATSFHLQKASVQESDSAVYYCALS---------------------', 'TRAV6D-7*01': 'GDSVTQTEGQVALSEEDFLTIHCNYSASG------YPALFWYVQYPGEGPQFLFRASRD---KEKGSS-----RGFEATYNKEATSFHLQKASVQESDSAVYYCALS---------------------', 'TRAV6D-7*04': 'GDSVTQTEGQVALSEEDFLTIHCNYSASG------YPALFWYVQYPGEGPQFLFRASRD---KEKGSS-----RGFEATYDKGTTSFHLRKASVQESDSAVYYCALG---------------------', 'TRAV6N-5*01': 'GDSVTQTEGPVTLSEGTSLTVNCSYETKQ------YPTLFWYVQYPGEGPQLLFKVPKA---NEKGSN-----RGFEATYNKEATSFHLQKASVQESDSAVYYCALG---------------------', 'TRAV6N-6*01': 'GDSVTQTEGQVTVSESKSLIINCTYSATSI----GYPNLFWYVRYPGEGLQLLLKVITA---GQKGSS-----RGFEATYNKEATSFHLQKASVQESDSAVYYCALS---------------------', 'TRAV6N-7*01': 'GDSVTQTEGQVALSEEDFLTIHCNYSASG------YPALFWYVQYPGEGPQFLFRASRD---KEKGSS-----RGFEATYDKGTTSFHLRKASVQESDSAVYYCALG---------------------', 'TRAV7-1*01': 'QQKVQQSPESLIVPEGGMASLNCTFSDRN------SQYFWWYRQHSGEGPKALMSIFS----NGDKKE-----GRFTAHLNKASLYVSLHIKDSQPSDSALYFCAVS---------------------', 'TRAV7-2*01': 'QQKVQQSPESLIVPEGGMASLNCTSSDRN------VDYFWWYRQRSGKSPKMLMAIFS----NGEKEE-----GRFTVHLNKASLHTSLHIRDSQPSDSALYFCAVS---------------------', 'TRAV7-2*02': 'QQKVQQSPESLIVPEGGMASLNCTSSDRN------VDYFWWYRQHSGKSPKMLMSIFS----NGEKEE-----GRFTVHLNKASLHTSLHIRDSQPSDSALYLCAAS---------------------', 'TRAV7-3*01': 'QQNVQQSPESLIVPEGARTSLNCTFSDSA------SQYFWWYRQHSGKAPKALMSIFS----NGEKEE-----GRFTIHLNKASLHFSLHIRDSQPSDSALYLCAVS---------------------', 'TRAV7-3*04': 'QQKVQQSPESLIVPEGAMTSLNCTFSDSA------SQYFAWYRQHSGKAPKALMSIFS----NGEKEE-----GRFTIHLNKASLHFSLHIRDSQPSDSALYLCAVS---------------------', 'TRAV7-4*01': 'QQKVQQSPESLSVPEGGMASFNCTSSDRN------FQYFWWYRQHSGEGPKALMSIFS----DGDKKE-----GRFTAHLNKASLHVSLHIRDSQPSDSALYFCAASE--------------------', 'TRAV7-4*02': 'QQKVQQSPESLSVPEGGMASLNCTSSDRN------FQYFWWYRQHSGEGPKALMSIFS----DGDKKE-----GRFTAHLNKASLHVSLHIRDSQPSDSALYFCAASE--------------------', 'TRAV7-5*01': 'QQKVQQSPESLTVSEGAMASLNCTFSDGT------SDNFRWYRQHSGKGLEVLVSIFS----DGEKEE-----GRFTAHLNRASLHVSLHIREPQPSDSAVYLCAMS---------------------', 'TRAV7-6*01': 'QEKVQQSPESLIVPEGAMASLNCTFSNSA------SQSIWWYQQHPGKGPEALISIFS----NGNKKE-----GRLTVYLNRASLHVSLHIRDSQPTDSAIYLCAVS---------------------', 'TRAV7D-2*01': 'QQKVQQSPESLIVPEGGMASLNCTSSDRN------VDYFWWYRQHSGKSPKMLMSIFS----NGEKEE-----GRFTVHLNKASLHTSLHIRDSQPSDSALYLCAAS---------------------', 'TRAV7D-3*01': 'QQKVQQSPESLIVPEGAMTSLNCTFSDSA------SQYFAWYRQHSGKAPKALMSIFS----NGEKEE-----GRFTIHLNKASLHFSLHIRDSQPSDSALYLCAVS---------------------', 'TRAV7D-4*01': 'QQKVQQSPESLSVPEGGMASLNCTSSDRN------FQYFWWYRQHSGEGPKALMSIFS----DGDKKE-----GRFTAHLNKASLHVSLHIRDSQPSDSALYFCAASE--------------------', 'TRAV7D-4*02': 'QQKVQQSPESLSVPEGGMASLNCTSSDRN------FQYFWWYRQHSGEGPKALMSIFS----DGDKKE-----GRFTAHLNKASLHVSLHIRDSQPSDSALYFCAASE--------------------', 'TRAV7D-5*01': 'QQKVQQSPESLTVSEGAMASLNCTFSDGT------SDNFRWYRQHSGKGLEMLVSIFS----DGEKEE-----GRFTAHLNRASLHVSLHIREPQPSDSAVYLCAVS---------------------', 'TRAV7D-6*01': 'QEKVQQSPESLTVPEGAMASLNCTISDSA------SQSIWWYQQNPGKGPKALISIFS----NGNKKE-----GRLTVYLNRASLHVSLHIRDSHPSDSAVYLCAAS---------------------', 'TRAV7D-6*02': 'QEKVQQSPESLIVPEGAMSSLNCTFSNSA------SQSIWWYQQHPGKGPEALISIFS----NGNKKE-----GRLTVYLNRASLHVSLHIRDSQPSDSAVYLCAVS---------------------', 'TRAV7N-4*01': 'QQKVQQSPESLSVPEGGMASLNCTSSDRN------FQYFWWYRQHSGEGPKALMSIFS----DGDKKE-----GRFTAHLNKASLHVSLHIRDSQPSDSALYFCAVSE--------------------', 'TRAV7N-5*01': 'QQKVQQSPESLTVSEGAMASLNCTFSDRS------SDNFRWYRQHSGKGLEVLVSIFS----DGEKEE-----GSFTAHLNRASLHVFLHIREPQPSDSALYLCAVS---------------------', 'TRAV7N-6*01': 'QEKVQQSPESLIVPEGAMSSLNCTFSNSA------SQSIWWYQQHPGKGPEALISIFS----NGNKKE-----GRLTVYLNRASLHVSLHIRDSQPSDSAVYLCAVS---------------------', 'TRAV8-1*01': 'SQLAEENPWALSVHEGESVTVNCSYKTSI-------TALQWYRQKSGEGPAQLILIRSN---EREKRN-----GRLRATLDTSSQSSSLSITATRCEDTAVYFCATD---------------------', 'TRAV8-1*03': 'SQLAEENSWALSVHEGESVTVNCSYKTSI-------TALQWYRQKSGKGPAQLILIRSN---EREKRN-----GRLRATLDTSSQSSSLSITATRCEDTAVYFCATD---------------------', 'TRAV8-2*01': 'SQWGEENLQALSIQEGEDVTMNCSYKTYT-------TVVHWYRQDSGRGPALIILIRSN---EREKRS-----GRLRATLDTSSQSSSLSITAAQCEDTAVYFCATD---------------------', 'TRAV8D-1*01': 'SQLAEENLWALSVHEGESVTVNCSYKTSI-------TALQWYRQKSGEGPAQLILIRSN---EREKRN-----GRLRATLDTSSQSSSLSITATRCEDTAVYFCATD---------------------', 'TRAV8D-2*01': 'SQWGEENLQALSIQEGEDVTMNCSYKTYT-------TVVQWYRQKSGKGPALIILIRSN---EREKRS-----GRLRATLDTSSQSSSLSITGTLATDTAVYFCATD---------------------', 'TRAV8D-2*03': 'SQWGEENLQALSIQEGEDVTMNCSYKTYT-------TVVHWYRQDSGRGPALIILIRSN---EREKRS-----GRLRATLDTSSQSSSLSITAAQCEDTAVYFCATD---------------------', 'TRAV8N-2*01': 'SQWGEENLQALSIQEGEDVTMNCSYKTYT-------TVVQWYRQKSGKGPAQLILIRSN---EREKRS-----GRLRATLDTSSQSSSLSITGTLATDTAVYFCATD---------------------', 'TRAV9-1*01': 'TQTVSQSDAHVTVFEGDSVELRCNYSYGG------SIYLSWYIQHHGRGLQFLLKYYSG--NPVVQGV-----NGFEAEFSKSDSSFHLRKASVHWSDSAVYFCAVS---------------------', 'TRAV9-1*02': 'TQTVSQSDAHVTVFEGDSVELRCNYSYGG------SIYLSWYIQHHGHGLQFLLKYYSG--NPVVQGV-----NGFEAEFSKSDSSFHLRKASVHWSDSAVYFCAVS---------------------', 'TRAV9-2*01': 'AQSVTQPDARVTVSEGASLQLRCKYSYSG------TPYLFWYVQYPRQGLQLLLKYYSG--DPVVQGV-----NGFEAEFSKSDSSFHLRKASVHWSDSAVYFCAAS---------------------', 'TRAV9-3*01': 'AQSVTQPDARVTVSEGASLQLRCKYSSSV------TPYLFWYVQYPRQGLQLLLKYYSG--DPVVQGV-----NGFEAEFSKSNSSFHLRKASVHWSDSAVYFCAVS---------------------', 'TRAV9-4*01': 'AQSVTQPDARVTVSEGASLQLRCKYSYSA------TPYLFWYVQYPRQGLQLLLKYYSG--DPVVQGV-----NGFEAEFSKSNSSFHLRKASVHWSDSAVYFCAVS---------------------', 'TRAV9D-1*01': 'TQTVSQSDAHVTVFEGDSVELRCNYSYGG------SIYLSWYIQHHGRGLQFLLKYYSG--NPVVQGV-----NGFEAEFSKSDSSFHLRKASVHWSDSAVYFCAAS---------------------', 'TRAV9D-2*01': 'AQSVTQPDARVTVSQGASLQLRCKYSYSG------TPYLFWYVQYPRQGLQLLLKYYSG--DPVVQGV-----NGFEAEFSKSNSSFHPRKASVHWSDSAVYFCAVS---------------------', 'TRAV9D-2*02': 'AQSVTQPDARVTVSQGASLQLRCKYSYSG------TPYLFWYVQYPRQGLQLLLKYYSG--DPVVQGV-----NGFEAEFSKSNSSFHPRKASVHWSDSAVYFCAVS---------------------', 'TRAV9D-3*01': 'AQSVTQPDARVTVSEGASLQLRCKYSYSA------TPYLFWYVQYPRQGLQMLLKYYSG--DPVVQGV-----NGFEAEFSKSDSSFHLRKASVHWSDSAVYFCAVS---------------------', 'TRAV9D-4*01': 'AQSVTQPDARVTVSEGASLQLRCKYSYSG------TPYLFWYVQYPRQGLQLLLKYYSG--DPVVQGV-----NGFEAEFSKSNSSFHLRKASVHWSDSAVYFCALS---------------------', 'TRAV9N-2*01': 'AQSVTQPDARVTVSEGASLQLRCKYSSSG------TPYLFWYVQYPRQGLQLLLKYYSG--DPVVQGV-----NGFEAEFSKSNSSFHLRKASVHWSDSAVYFCVLS---------------------', 'TRAV9N-3*01': 'AQSVTQPDARVTVSEGASLQLRCKYSYFG------TPYLFWYVQYPRQGLQLLLKYYPG--DPVVQGV-----NGFEAEFSKSNSSFHLRKASVHWSDWAVYFCAVS---------------------', 'TRAV9N-4*01': 'AQSVTQPDARVTVSEGASLQLRCKYSYSA------TPYLFWYVQYPRQGLQLLLKYYSG--DPVVQGV-----NSFEAEFSKSNSSFHLQKASVHWSDSAVYFCALS---------------------'}}, 'B': {'human': {'TRBV10-1*01': 'DAEITQSPRHKITETGRQVTLACHQTWNH-------NNMFWYRQDLGHGLRLIHYSYG----VQDTNKGEVS-DGYSVSRS-NTEDLPLTLESAASSQTSVYFCASSE--------------------', 'TRBV10-1*02': 'DAEITQSPRHKITETGRQVTLACHQTWNH-------NNMFWYRQDLGHGLRLIHYSYG----VHDTNKGEVS-DGYSVSRS-NTEDLPLTLESAASSQTSVYFCASSE--------------------', 'TRBV10-2*01': 'DAGITQSPRYKITETGRQVTLMCHQTWSH-------SYMFWYRQDLGHGLRLIYYSAA----ADITDKGEVP-DGYVVSRS-KTENFPLTLESATRSQTSVYFCASSE--------------------', 'TRBV10-2*02': 'DAGITQSPRYKITETGRQVTLMCHQTWSH-------SYMFWYRQDLGHGLRLIYYSAA----ADITDKGEVP-DGYVVSRS-KTENFPLTLESATRSQTSVYFCASSE--------------------', 'TRBV10-3*01': 'DAGITQSPRHKVTETGTPVTLRCHQTENH-------RYMYWYRQDPGHGLRLIHYSYG----VKDTDKGEVS-DGYSVSRS-KTEDFLLTLESATSSQTSVYFCAISE--------------------', 'TRBV10-3*02': 'DAGITQSPRHKVTETGTPVTLRCHQTENH-------RYMYWYRQDPGHGLRLIHYSYG----VKDTDKGEVS-DGYSVSRS-KTEDFLLTLESATSSQTSVYFCAISE--------------------', 'TRBV11-1*01': 'EAEVAQSPRYKITEKSQAVAFWCDPISGH-------ATLYWYRQILGQGPELLVQFQD----ESVVDDSQLPKDRFSAERL-KGVDSTLKIQPAELGDSAMYLCASSL--------------------', 'TRBV11-2*01': 'EAGVAQSPRYKIIEKRQSVAFWCNPISGH-------ATLYWYQQILGQGPKLLIQFQN----NGVVDDSQLPKDRFSAERL-KGVDSTLKIQPAKLEDSAVYLCASSL--------------------', 'TRBV11-2*03': 'EAGVAQSPRYKIIEKRQSVAFWCNPISGH-------ATLYWYQQILGQGPKLLIQFQN----NGVVDDSQLPKDRFSAERL-KGVDSTLKIQPAKLEDSAVYLCASSL--------------------', 'TRBV11-3*01': 'EAGVVQSPRYKIIEKKQPVAFWCNPISGH-------NTLYWYLQNLGQGPELLIRYEN----EEAVDDSQLPKDRFSAERL-KGVDSTLKIQPAELGDSAVYLCASSL--------------------', 'TRBV12-3*01': 'DAGVIQSPRHEVTEMGQEVTLRCKPISGH-------NSLFWYRQTMMRGLELLIYFNN----NVPIDDSGMPEDRFSAKMP-NASFSTLKIQPSEPRDSAVYFCASSL--------------------', 'TRBV12-4*01': 'DAGVIQSPRHEVTEMGQEVTLRCKPISGH-------DYLFWYRQTMMRGLELLIYFNN----NVPIDDSGMPEDRFSAKMP-NASFSTLKIQPSEPRDSAVYFCASSL--------------------', 'TRBV12-5*01': 'DARVTQTPRHKVTEMGQEVTMRCQPILGH-------NTVFWYRQTMMQGLELLAYFRN----RAPLDDSGMPKDRFSAEMP-DATLATLKIQPSEPRDSAVYFCASGL--------------------', 'TRBV13*01': 'AAGVIQSPRHLIKEKRETATLKCYPIPRH-------DTVYWYQQGPGQDPQFLISFYE----KMQSDKGSIP-DRFSAQQF-SDYHSELNMSSLELGDSALYFCASSL--------------------', 'TRBV14*01': 'EAGVTQFPSHSVIEKGQTVTLRCDPISGH-------DNLYWYRRVMGKEIKFLLHFVK----ESKQDESGMPNNRFLAERT-GGTYSTLKVQPAELEDSGVYFCASSQ--------------------', 'TRBV15*01': 'DAMVIQNPRYQVTQFGKPVTLSCSQTLNH-------NVMYWYQQKSSQAPKLLFHYYD----KDFNNEADTP-DNFQSRRP-NTSFCFLDIRSPGLGDTAMYLCATSR--------------------', 'TRBV15*02': 'DAMVIQNPRYQVTQFGKPVTLSCSQTLNH-------NVMYWYQQKSSQAPKLLFHYYD----KDFNNEADTP-DNFQSRRP-NTSFCFLDIRSPGLGDAAMYLCATSR--------------------', 'TRBV16*01': 'GEEVAQTPKHLVRGEGQKAKLYCAPIKGH-------SYVFWYQQVLKNEFKFLISFQN----ENVFDETGMPKERFSAKCL-PNSPCSLEIQATKLEDSAVYFCASSQ--------------------', 'TRBV18*01': 'NAGVMQNPRHLVRRRGQEARLRCSPMKGH-------SHVYWYRQLPEEGLKFMVYLQK----ENIIDESGMPKERFSAEFP-KEGPSILRIQQVVRGDSAAYFCASSP--------------------', 'TRBV19*01': 'DGGITQSPKYLFRKEGQNVTLSCEQNLNH-------DAMYWYRQDPGQGLRLIYYSQI----VNDFQKGDIA-EGYSVSRE-KKESFPLTVTSAQKNPTAFYLCASSI--------------------', 'TRBV19*02': 'DGGITQSPKYLFRKEGQNVTLSCEQNLNH-------DAMYWYRQVPGQGLRLIYYSHI----VNDFQKGDIA-EGYSVSRE-KKESFPLTVTSAQKNPTAFYLCASSI--------------------', 'TRBV2*01': 'EPEVTQTPSHQVTQMGQEVILRCVPISNH-------LYFYWYRQILGQKVEFLVSFYN----NEISEKSEIFDDQFSVERP-DGSNFTLKIRSTKLEDSAMYFCASSE--------------------', 'TRBV20-1*01': 'GAVVSQHPSWVICKSGTSVKIECRSLDFQ------ATTMFWYRQFPKQSLMLMATSNEG---SKATYEQGVEKDKFLINHA-SLTLSTLTVTSAHPEDSSFYICSAR---------------------', 'TRBV24-1*01': 'DADVTQTPRNRITKTGKRIMLECSQTKGH-------DRMYWYRQDPGLGLRLIYYSFD----VKDINKGEIS-DGYSVSRQ-AQAKFSLSLESAIPNQTALYFCATSD--------------------', 'TRBV24-1*02': 'DADVTQTPRNRITKTGKRIMLECSQTKGH-------DRMYWYRQDPGLGLQLIYYSFD----VKDINKGEIS-DGYSVSRQ-AQAKFSLSLESAIPNQTALYFCATSD--------------------', 'TRBV25-1*01': 'EADIYQTPRYLVIGTGKKITLECSQTMGH-------DKMYWYQQDPGMELHLIHYSYG----VNSTEKGDLS-SESTVSRI-RTEHFPLTLESARPSHTSQYLCASSE--------------------', 'TRBV27*01': 'EAQVTQNPRYLITVTGKKLTVTCSQNMNH-------EYMSWYRQDPGLGLRQIYYSMN----VEVTDKGDVP-EGYKVSRK-EKRNFPLILESPSPNQTSLYFCASSL--------------------', 'TRBV28*01': 'DVKVTQSSRYLVKRTGEKVFLECVQDMDH-------ENMFWYRQDPGLGLRLIYFSYD----VKMKEKGDIP-EGYSVSRE-KKERFSLILESASTNQTSMYLCASSL--------------------', 'TRBV29-1*01': 'SAVISQKPSRDICQRGTSLTIQCQVDSQV-------TMMFWYRQQPGQSLTLIATANQG---SEATYESGFVIDKFPISRP-NLTFSTLTVSNMSPEDSSIYLCSVE---------------------', 'TRBV3-1*01': 'DTAVSQTPKYLVTQMGNDKSIKCEQNLGH-------DTMYWYKQDSKKFLKIMFSYNN----KELIINETVP-NRFSPKSP-DKAHLNLHINSLELGDSAVYFCASSQ--------------------', 'TRBV30*02': 'SQTIHQWPATLVQPVGSPLSLECTVEGTS------NPNLYWYRQAAGRGLQLLFYSVG-----IGQISSEVP-QNLSASRP-QDRQFILSSKKLLLSDSGFYLCAWS---------------------', 'TRBV4-1*01': 'DTEVTQTPKHLVMGMTNKKSLKCEQHMGH-------RAMYWYKQKAKKPPELMFVYSY----EKLSINESVP-SRFSPECP-NSSLLNLHLHALQPEDSALYLCASSQ--------------------', 'TRBV4-2*01': 'ETGVTQTPRHLVMGMTNKKSLKCEQHLGH-------NAMYWYKQSAKKPLELMFVYNF----KEQTENNSVP-SRFSPECP-NSSHLFLHLHTLQPEDSALYLCASSQ--------------------', 'TRBV4-3*01': 'ETGVTQTPRHLVMGMTNKKSLKCEQHLGH-------NAMYWYKQSAKKPLELMFVYSL----EERVENNSVP-SRFSPECP-NSSHLFLHLHTLQPEDSALYLCASSQ--------------------', 'TRBV5-1*01': 'KAGVTQTPRYLIKTRGQQVTLSCSPISGH-------RSVSWYQQTPGQGLQFLFEYFS----ETQRNKGNFP-GRFSGRQF-SNSRSEMNVSTLELGDSALYLCASSL--------------------', 'TRBV5-4*01': 'ETGVTQSPTHLIKTRGQQVTLRCSSQSGH-------NTVSWYQQALGQGPQFIFQYYR----EEENGRGNFP-PRFSGLQF-PNYSSELNVNALELDDSALYLCASSL--------------------', 'TRBV5-5*01': 'DAGVTQSPTHLIKTRGQQVTLRCSPISGH-------KSVSWYQQVLGQGPQFIFQYYE----KEERGRGNFP-DRFSARQF-PNYSSELNVNALLLGDSALYLCASSL--------------------', 'TRBV5-6*01': 'DAGVTQSPTHLIKTRGQQVTLRCSPKSGH-------DTVSWYQQALGQGPQFIFQYYE----EEERQRGNFP-DRFSGHQF-PNYSSELNVNALLLGDSALYLCASSL--------------------', 'TRBV5-8*01': 'EAGVTQSPTHLIKTRGQQATLRCSPISGH-------TSVYWYQQALGLGLQFLLWYDE----GEERNRGNFP-PRFSGRQF-PNYSSELNVNALELEDSALYLCASSL--------------------', 'TRBV6-1*01': 'NAGVTQTPKFQVLKTGQSMTLQCAQDMNH-------NSMYWYRQDPGMGLRLIYYSAS----EGTTDKGEVP-NGYNVSRL-NKREFSLRLESAAPSQTSVYFCASSE--------------------', 'TRBV6-2*01': 'NAGVTQTPKFRVLKTGQSMTLLCAQDMNH-------EYMYWYRQDPGMGLRLIHYSVG----EGTTAKGEVP-DGYNVSRL-KKQNFLLGLESAAPSQTSVYFCASSY--------------------', 'TRBV6-3*01': 'NAGVTQTPKFRVLKTGQSMTLLCAQDMNH-------EYMYWYRQDPGMGLRLIHYSVG----EGTTAKGEVP-DGYNVSRL-KKQNFLLGLESAAPSQTSVYFCASSY--------------------', 'TRBV6-4*01': 'IAGITQAPTSQILAAGRRMTLRCTQDMRH-------NAMYWYRQDLGLGLRLIHYSNT----AGTTGKGEVP-DGYSVSRA-NTDDFPLTLASAVPSQTSVYFCASSD--------------------', 'TRBV6-4*02': 'TAGITQAPTSQILAAGRSMTLRCTQDMRH-------NAMYWYRQDLGLGLRLIHYSNT----AGTTGKGEVP-DGYSVSRA-NTDDFPLTLASAVPSQTSVYFCASSD--------------------', 'TRBV6-5*01': 'NAGVTQTPKFQVLKTGQSMTLQCAQDMNH-------EYMSWYRQDPGMGLRLIHYSVG----AGITDQGEVP-NGYNVSRS-TTEDFPLRLLSAAPSQTSVYFCASSY--------------------', 'TRBV6-6*01': 'NAGVTQTPKFRILKIGQSMTLQCTQDMNH-------NYMYWYRQDPGMGLKLIYYSVG----AGITDKGEVP-NGYNVSRS-TTEDFPLRLELAAPSQTSVYFCASSY--------------------', 'TRBV6-6*02': 'NAGVTQTPKFRILKIGQSMTLQCAQDMNH-------NYMYWYRQDPGMGLKLIYYSVG----AGITDKGEVP-NGYNVSRS-TTEDFPLRLELAAPSQTSVYFCASSY--------------------', 'TRBV6-8*01': 'NAGVTQTPKFHILKTGQSMTLQCAQDMNH-------GYMSWYRQDPGMGLRLIYYSAA----AGTTDK-EVP-NGYNVSRL-NTEDFPLRLVSAAPSQTSVYLCASSY--------------------', 'TRBV6-9*01': 'NAGVTQTPKFHILKTGQSMTLQCAQDMNH-------GYLSWYRQDPGMGLRRIHYSVA----AGITDKGEVP-DGYNVSRS-NTEDFPLRLESAAPSQTSVYFCASSY--------------------', 'TRBV7-2*01': 'GAGVSQSPSNKVTEKGKDVELRCDPISGH-------TALYWYRQSLGQGLEFLIYFQG----NSAPDKSGLPSDRFSAERT-GGSVSTLTIQRTQQEDSAVYLCASSL--------------------', 'TRBV7-2*02': 'GAGVSQSPSNKVTEKGKDVELRCDPISGH-------TALYWYRQRLGQGLEFLIYFQG----NSAPDKSGLPSDRFSAERT-GESVSTLTIQRTQQEDSAVYLCASSL--------------------', 'TRBV7-2*03': 'GAGVSQSPSNKVTEKGKDVELRCDPISGH-------TALYWYRQRLGQGLEFLIYFQG----NSAPDKSGLPSDRFSAERT-GESVSTLTIQRTQQEDSAVYLCTSSL--------------------', 'TRBV7-3*01': 'GAGVSQTPSNKVTEKGKYVELRCDPISGH-------TALYWYRQSLGQGPEFLIYFQG----TGAADDSGLPNDRFFAVRP-EGSVSTLKIQRTERGDSAVYLCASSL--------------------', 'TRBV7-4*01': 'GAGVSQSPRYKVAKRGRDVALRCDSISGH-------VTLYWYRQTLGQGSEVLTYSQS----DAQRDKSGRPSGRFSAERP-ERSVSTLKIQRTEQGDSAVYLCASSL--------------------', 'TRBV7-6*01': 'GAGVSQSPRYKVTKRGQDVALRCDPISGH-------VSLYWYRQALGQGPEFLTYFNY----EAQQDKSGLPNDRFSAERP-EGSISTLTIQRTEQRDSAMYRCASSL--------------------', 'TRBV7-7*01': 'GAGVSQSPRYKVTKRGQDVTLRCDPISSH-------ATLYWYQQALGQGPEFLTYFNY----EAQPDKSGLPSDRFSAERP-EGSISTLTIQRTEQRDSAMYRCASSL--------------------', 'TRBV7-8*01': 'GAGVSQSPRYKVAKRGQDVALRCDPISGH-------VSLFWYQQALGQGPEFLTYFQN----EAQLDKSGLPSDRFFAERP-EGSVSTLKIQRTQQEDSAVYLCASSL--------------------', 'TRBV7-8*02': 'GAGVSQSPRYKVAKRGQDVALRCDPISGH-------VSLFWYQQALGQGPEFLTYFQN----EAQLDKSGLPSDRFFAERP-EGSVSTLKIQRTQKEDSAVYLCASSL--------------------', 'TRBV7-9*01': 'DTGVSQNPRHKITKRGQNVTFRCDPISEH-------NRLYWYRQTLGQGPEFLTYFQN----EAQLEKSRLLSDRFSAERP-KGSFSTLEIQRTEQGDSAMYLCASSL--------------------', 'TRBV7-9*03': 'DTGVSQDPRHKITKRGQNVTFRCDPISEH-------NRLYWYRQTLGQGPEFLTYFQN----EAQLEKSRLLSDRFSAERP-KGSFSTLEIQRTEQGDSAMYLCASSL--------------------', 'TRBV9*01': 'DSGVTQTPKHLITATGQRVTLRCSPRSGD-------LSVYWYQQSLDQGLQFLIQYYN----GEERAKGNIL-ERFSAQQF-PDLHSELNLSSLELGDSALYFCASSV--------------------', 'TRBV9*02': 'DSGVTQTPKHLITATGQRVTLRCSPRSGD-------LSVYWYQQSLDQGLQFLIHYYN----GEERAKGNIL-ERFSAQQF-PDLHSELNLSSLELGDSALYFCASSV--------------------'}, 'mouse': {'TRBV1*01': 'VTLLEQNPRWRLVPRGQAVNLRCILKNSQ------YPWMSWYQQDLQKQLQWLFTLRS----PGDKEVKSLPGADYLATRV-TDTELRLQVANMS--QGRTLYCTCSA--------------------', 'TRBV12-1*01': 'DAGVIQTPRHKVTKMGQEVTLKCQPISGH-------EGLFWYSQTSVQGPKLLISFNN----EAPIDDSGMPKEWFSAEIS-NKSLSTLKIKSTEPGDSATYLCASSI--------------------', 'TRBV12-2*01': 'DAGVIQTPRHNVTKMGQEVTLKCQPISGH-------AALYWYSQTSVQGPKLLIYFNN----QAPIDDSGMPKERFSAEIS-NKSLSTLKIKPTEPGDSATYLCASSV--------------------', 'TRBV13-1*01': 'EAAVTQSPRNKVTVTGGNVTLSCRQTNSH-------NYMYWYRQDTGHGLRLIHYSYG----AGNLRIGDVP-DGYKATRT-TQEDFFLLLELASPSQTSLYFCASSD--------------------', 'TRBV13-1*02': 'EAAVTQSPRNKVTVTGGNVTLSCRQTNSH-------NYMYWYRQDTGHGLRLIHYSYG----AGNLQIGDVP-DGYKATRT-TQEDFFLLLELASPSQTSLYFCASSD--------------------', 'TRBV13-2*01': 'EAAVTQSPRNKVAVTGGKVTLSCNQTNNH-------NNMYWYRQDTGHGLRLIHYSYG----AGSTEKGDIP-DGYKASRP-SQENFSLILELATPSQTSVYFCASGD--------------------', 'TRBV13-3*01': 'EAAVTQSPRSKVAVTGGKVTLSCHQTNNH-------DYMYWYRQDTGHGLRLIHYSYV----ADSTEKGDIP-DGYKASRP-SQENFSLILELASLSQTAVYFCASSD--------------------', 'TRBV14*01': 'EAGVTQSPRYAVLQEGQAVSFWCDPISGH-------DTLYWYQQPRDQGPQLLVYFRD----EAVIDNSQLPSDRFSAVRP-KGTNSTLKIQSAKQGDTATYLCASSF--------------------', 'TRBV15*01': 'GAMVIQSSRYQVTRVGKPVNLSCSQNMNH-------DSMYWYQQKLNQAPKLLLYYYD----KNFNREAETS-DNFQASRP-NTSFCSLNILSPGLGDSGVYLCASSK--------------------', 'TRBV16*01': 'HAEVTQTPGHLVKGKGQKAKMHCVPIKGH-------SYVDWYQQIPAKEFKFFISFQN----DAIFDKTGMPEKRFSAACP-QNASCSLEIKPTELQDSAVYFCASSE--------------------', 'TRBV17*01': 'DTTVKQNPRYKLARVGKPVNLICSQTMNH-------DTMYWYQKKPNQAPKLLLFYYD----KILNREADTF-EKFQSSRP-NNSFCSLYIGSAGLEYSAMYLCASSR--------------------', 'TRBV19*01': 'DGGITQTPKYLLREEGREVTLKCEQDFNH-------DYMYWYRQDPGQGPRLIYYSPL----KDDVQRGDIP-EGYFGSRG-KKTIFSLAVTSTRKNHTALYLCASSR--------------------', 'TRBV2*01': 'DPKIIQKPKYLVAVTGSEKILICEQYLGH-------NAMYWYRQSAKKPLEFMFSYSY----QKLMDNQTAS-SRFQPQSS-KKNHLDLQITALKPDDSATYFCASSQ--------------------', 'TRBV20*01': 'GALVYQYPRRTICKSGTSMRMECQAVGFQ------ATSVAWYRQSPQKTFELIALSTVN---SAIKYEQNFTQEKFPISHP-NLSFSSMTVLNAYLEDRGLYLCGAR---------------------', 'TRBV23*01': 'DAAVTQKPRYLIKMKGQEAEMKCIPEKGH-------TAVFWYQQKQSKELKFLIYFQN----QQPLDQIDMVKERFSAVCP-SSSLCSLGIRTCEAEDSALYLCSSSQ--------------------', 'TRBV24*01': 'VAGVTQTPRYLVKEKGQKAHMSCSPEKGH-------TAFYWYQQNQKQELTFLISFRN----EEIMEQTDLVKKRFSAKCS-SNSRCILEILSSEEDDSALYLCASSL--------------------', 'TRBV26*01': 'GAVVTQFPKHRIVGPGKELTLQCLQDMNY-------VLMYWYRQDPGFGLQLIYYSTG----ARNFEKGDVP-QGYRVSRK-DLQSFSLTLESASTNQTSVYLCTSSE--------------------', 'TRBV29*01': 'STLLSQEPRRDICQCGTSMTIQCETDTQV-------SLMYWYRQLPGQSLILIATANQG---MEATYESGFTKEKFPISRP-TLTFSSLTVNNMSFEDSSFYLCSAE---------------------', 'TRBV3*01': 'DTAVSQTPKHLIAQTGNKKFLKCEQKLGH-------DTMYWYKQDSQKLLKVMFIYNN----KDLILNETVP-RCFLPESP-DKTRLNLHINFLETGDTAVYFCASSL--------------------', 'TRBV30*01': 'SVLLYQKPNRDICQSGTSLKIQCVADSQV-------VSMFWYQQFQEQSLMLMATANEG---SEATYESGFTKDKFPISRP-NLTFSTLTVNNARPGDSSIYFCSSR---------------------', 'TRBV31*01': 'AQTIHQWPVAEIKAVGSPLSLGCTIKGKS------SPNLYWYWQATGGTLQQLFYSIT-----VGQVESVVQ-LNLSASRP-KDDQFILSTEKLLLSHSGFYLCAWS---------------------', 'TRBV5*01': 'NTKITQSPRYLIL-GRANKSLECEQHLGH-------NAMYWYKQSAEKPPELMFLYNL----KQLIRNETVP-SRFIPECP-DSSKLLLHISAVDPEDSAVYFCASSQ--------------------', 'TRBV18*01': 'NAGVTQNPRHLVIRTGQEATLKCSPEKGH-------VYFYWYQQPQGESLKFMVYLQN----AQTVDDSGMSKERFSTNIS-KEGQSILKIQPAELGDSAVYFCASSL--------------------', 'TRBV21*01': 'DPNVTQTPGYLVKGKGQKAKMECVPIKGH-------SYVFWYHRKLEEEFKFLVYLQD----KDIVDKIEGFDNQFSAECP-KNSPCTLEINSTEPGDSALYMCASNK--------------------', 'TRBV22*01': 'HAEIYQMPAFLLTRAGRAVTLECKQNLRY-------NAMYWYWQDPGQSLRLIYYSTV----EKDVQRGDIT-EGYNVSRE-EKGLFPLTMNLAHTNQTGVYLCSGSA--------------------', 'TRBV25*01': 'DAGITQTPRHCVSGTGKKIVLECSQTMGY-------ENMYWYRQDPGKALQLIHYSYG----VNNTENEELS-SGSTVSRL-RKEVFPLTLESASPSQTSLYLCASSE--------------------', 'TRBV27*01': 'EAGVTQTPRHFITRTGRQLTLYCSQDMDH-------DVMFWYRQDPGVGLKLIYFSRN----VKFIEVGDVP-DGYSVLRK-EKKDFPLTLKSTGTNQTSLYLCASSV--------------------', 'TRBV4-1*01': 'GNGVTQTPKHLVSGTGRSVTLKCKQHLGH-------NAMYWYKQSVQKPPKLMLAYSY----KDLLQNETAS-SRFLPDRS-NNSQLDLQIKALEPEDSALYLCASSK--------------------', 'TRBV4-2*01': 'DPGVIQTPKHLVTGTTRSVTLKCKQHLGH-------NAMYWYKQSVQKPPKLMLAYNY----KNLFENATAS-SRFLPDRS-NNSQLDLQIKALEPEDSALYLCASSK--------------------', 'TRBV5-2*01': 'ASGVTQTPRHLIKTRGQKVTLRCSFVSGH-------LSVYWYQQVLGQGPRFLIQYYD----RKERDKGDMP-ERFSARQF-SNSSSQLDLDLLELEDSALYLCASSE--------------------', 'TRBV7-1*01': 'GPGVSQFSRHRVTERGQNVTLGCDPISGH-------AVLYWYRQTIGKGPEFLVYFQN----ADALDKSGMTNDRLSAKRS-EGTNSTLQIQRAEQGDSAVHLCASSP--------------------', 'TRBV7-2*01': 'GPGVSQFPRHRVTERGQNVTLGCDPISGH-------TVLYWYRQTTGKGLEFLVYFQN----TDAVDKSGMTNDRLSVQRS-KGTNSTLQIQRAEQGDSAVYLCASSP--------------------', 'TRBV8*01': 'EAEITQTPRYSIIQIGTKKTLECSQDMNH-------FAMFWYRQDPGQGLRLIYYSSD----SPSTTKGDVA-EGYSVSRK-EQKFSPLTLESASTNQTSVYLCASSE--------------------'}}, 'G': {'human': {'TRGV2*01': 'SSNLEGRTKSVIRQTGSSAEITCDLAEGS------NGYIHWYLHQEGKAPQRLQYYDSY--NSKVVLESGVSPGKYYTYAS-TRNNLRLILRNLIENDSGVYYCATWD--------------------', 'TRGV2*03': 'SSNLEGRTKSVIRQTGSSAEITCDLAEGS------NGYIHWYLHQEGKAPQRLQYYDSY--NSKVVLESGVSPGKYYTYAS-TRNNLRLILRNLIENDFGVYYCATWD--------------------', 'TRGV3*01': 'SSNLEGRTKSVTRQTGSSAEITCDLTVTN------TFYIHWYLHQEGKAPQRLLYYDVS--TARDVLESGLSPGKYYTHTP-RRWSWILRLQNLIENDSGVYYCATWD--------------------', 'TRGV3*02': 'SSNLEGRTKSVTRQTGSSAEITCDLTVTN------TFYIHWYLHQEGKAPQRLLYYDVS--TARDVLESGLSPGKYYTHTP-RRWSWILRLQNLIENDSGVYYCATWD--------------------', 'TRGV4*01': 'SSNLEGRTKSVIRQTGSSAEITCDLAEGS------TGYIHWYLHQEGKAPQRLLYYDSY--TSSVVLESGISPGKYDTYGS-TRKNLRMILRNLIENDSGVYYCATWD--------------------', 'TRGV4*02': 'SSNLEGRTKSVIRQTGSSAEITCDLAEGS------TGYIHWYLHQEGKAPQRLLYYDSY--TSSVVLESGISPGKYDTYGS-TRKNLRMILRNLIENDSGVYYCATWD--------------------', 'TRGV5*01': 'SSNLEGGTKSVTRPTRSSAEITCDLTVIN------AFYIHWYLHQEGKAPQRLLYYDVS--NSKDVLESGLSPGKYYTHTP-RRWSWILILRNLIENDSGVYYCATWD--------------------', 'TRGV8*01': 'SSNLEGRTKSVTRPTGSSAVITCDLPVEN------AVYTHWYLHQEGKAPQRLLYYDSY--NSRVVLESGISREKYHTYAS-TGKSLKFILENLIERDSGVYYCATWD--------------------', 'TRGV9*01': 'AGHLEQPQISSTKTLSKTARLECVVSGITI----SATSVYWYRERPGEVIQFLVSISYD---GTVRKESGIPSGKFEVDRIPETSTSTLTIHNVEKQDIATYYCALWE--------------------', 'TRGV9*02': 'AGHLEQPQISSTKTLSKTARLECVVSGIKI----SATSVYWYRERPGEVIQFLVSISYD---GTVRKESGIPSGKFEVDRIPETSTSTLTIHNVEKQDIATYYCALWE--------------------'}, 'mouse': {'TRGV1*01': 'LGQLEQTELSVTRETDESAQISCIVSLPYF----SNTAIHWYRQKAKK-FEYLIYVST----NYNQRPLGGKNKKIEASKDFQTSTSTLKINYLKKEDEATYYCAVWI--------------------', 'TRGV1*02': 'LGQLEQTELSVTRETDESAQISCIVSLPYF----SNTAIHWYRQKAKK-FEYLIYVST----NYNQRPLGGKNKKIEASKDFQTSTSTLKINYLKKEDEATYYCAVWI--------------------', 'TRGV2*01': 'LGQLEQTELSVTRETDENVQISCIVYLPYF----SNTAIHWYRQKTNQQFEYLIYVAT----NYNQRPLGGKHKKIEASKDFKSSTSTLEINYLKKEDEATYYCAVWM--------------------', 'TRGV3*01': 'LGQLEQTELSVTRATDESAQISCIVSLPCF----SNTAIHWYRQKPNQQFEYLIYVET----NYNQQPLGGKNKKIEASKDFQTSTSTLKINYLKKEDEATYYCAVWI--------------------', 'TRGV5*01': 'DSWISQDQLSFTRRPNKTVHISCKLSGVPL----HNTIVHWYQLKEGEPLRRIFYGS------VKTYKQDKSHSRLEIDEK-DDGTFYLIINNVVTSDEATYYCACWD--------------------', 'TRGV6*01': 'TSLTSPLGSYVIKRKGNTAFLKCQIKTSVQK---PDAYIHWYQEKPGQRLQRMLCSSSK---ENIVYEKDFSDERYEARTWQSDLSSVLTIHQVREEDTGTYYCACWD--------------------', 'TRGV6*02': 'SSLTSPLGSYVIKRKGNTAFLKCQIKTSVQK---PDAYIHWYQEKPGQRLQRMLCSSSK---ENIVYEKDFSDERYEARTWQSDLSSVLTIHQVTEEDTGTYYCACWD--------------------', 'TRGV7*01': 'SSNLEERIMSITKLEGSSAIMTCDTHRTG-------TYIHWYRFQKGRAPEHLLYYNFV--SSTTVVDSRFNSEKYHVYEG-PDKRYKFVLRNVEESDSALYYCASWA--------------------', 'TRGV7*02': 'SSNLEERIMSITKLEGSSAIMTCDTHRTG-------TYIHWYRFQKGRAPEHLLYYNFV--SSTTVVDSRFNLEKYHVYEG-PDKRYKFVLRNVEESDSALYYCASWA--------------------'}}, 'D': {'human': {'TRAV14/DV4*01': 'AQKITQTQPGMFVQEKEAVTLDCTYDTSDP-----SYGLFWYKQPSSGEMIFLIYQGSY--DQQNATE-----GRYSLNFQKARKSANLVISASQLGDSAMYFCAMRE--------------------', 'TRAV14/DV4*02': 'AQKITQTQPGMFVQEKEAVTLDCTYDTSDQ-----SYGLFWYKQPSSGEMIFLIYQGSY--DEQNATE-----GRYSLNFQKARKSANLVISASQLGDSAMYFCAMRE--------------------', 'TRAV23/DV6*01': 'QQQVKQSPQSLIVQKGGISIINCAYENTA------FDYFPWYQQFPGKGPALLIAIRPD---VSEKKE-----GRFTISFNKSAKQFSLHIMDSQPGDSATYFCAAS---------------------', 'TRAV23/DV6*05': 'QQQVKQSPQSLIVQKGGISIINCAYENTA------FDYFPWYQQFPGKGPALLIAIRPD---VSEKKE-----GRFTISFNKSAKQFSSHIMDSQPGDSATYFCAAS---------------------', 'TRAV29/DV5*01': 'DQQVKQNSPSLSVQEGRISILNCDYTNSM------FDYFLWYKKYPAEGPTFLISISSI---KDKNED-----GRFTVFLNKSAKHLSLHIVPSQPGDSAVYFCAAS---------------------', 'TRAV29/DV5*02': 'DQQVKQNSPSLSVQEGRISILNCDYTNSM------FDYFLWYKKYPAEGPTFLISISSI---KDKNED-----GRFTVFLNKSAKHLSLDIVPSQPGDSAVYFCAAS---------------------', 'TRAV29/DV5*04': 'DQQVKQNSPSLSVQEGRISILNCDYTNSM------FDYFLWYKKYPAEGPTFLISISSI---KDKNED-----GRFTVFLNKSAKHLSLHIVPSQPGDSAVYFCAAS---------------------', 'TRAV36/DV7*01': 'EDKVVQSPLSLVVHEGDTVTLNCSYEVTN------FRSLLWYKQEKKAP-TFLFMLTSS---GIEKKS-----GRLSSILDKKELSSILNITATQTGDSAIYLCAVE---------------------', 'TRAV36/DV7*05': 'EDKVVQSPLSLVVHEGDTVTLNCSYEVTN------FRSLLWYKQEKKAP-TFLFMLTSS---GIEKKS-----GRLSSILDKKELFSILNITATQTGDSAIYLCAVE---------------------', 'TRAV38-2/DV8*01': 'AQTVTQSQPEMSVQEAETVTLSCTYDTSES-----DYYLFWYKQPPSRQMILVIRQEAY--KQQNATE-----NRFSVNFQKAAKSFSLKISDSQLGDAAMYFCAYRS--------------------', 'TRDV1*01': 'AQKVTQAQSSVSMPVRKAVTLNCLYETSWW-----SYYIFWYKQLPSKEMIFLIRQG-------SDEQNAKS-GRYSVNFKKAAKSVALTISALQLEDSAKYFCALGE--------------------', 'TRDV2*01': 'AIELVPEHQTVPVSIGVPATLRCSMKGEAI----GNYYINWYRKTQGNTITFIYREK-------DIYGPGFK-DNFQGDIDIAKNLAVLKILAPSERDEGSYYCACDT--------------------', 'TRDV2*03': 'AIELVPEHQTVPVSIGVPATLRCSMKGEAI----GNYYINWYRKTQGNTMTFIYREK-------DIYGPGFK-DNFQGDIDIAKNLAVLKILAPSERDEGSYYCACDT--------------------', 'TRDV3*01': 'CDKVTQSSPDQTVASGSEVVLLCTYDTVYS-----NPDLFWYRIRPDYSFQFVFYGDN----SRSEGADFTQ-GRFSVKHILTQKAFHLVISPVRTEDSATYYCAF----------------------', 'TRDV3*02': 'CDKVTQSSPDQTVASGSEVVLLCTYDTVYS-----NPDLFWYWIRPDYSFQFVFYGDN----SRSEGADFTQ-GRFSVKHILTQKAFHLVISPVRTEDSATYYCAF----------------------'}, 'mouse': {'TRAV13-4/DV7*01': 'GQQVQQSPASLVLQEGENAELQCNFSSTA-------TQLQWFYQRPGGSLVSLLYNPS-----GTKHT-----GRLTSTTVTKERRSSLHISSSQITDSGTYFCAME---------------------', 'TRAV13-4/DV7*02': 'GQQVQQSPASLVLQEGENAELQCNFSSTA-------TRLQWFYQRPGGSLVSLLSNPS-----GTKHT-----GRLTSTTVTKERRGSLHISSSQITDSGTYLCAME---------------------', 'TRAV14D-3/DV8*01': 'QQQVRQSSQSLTVWEGETAILNCSYENSA------FDYFPWYQQFPGEGPALLIAIRSV---SDKKED-----GRFTIFFNKREKNLSLHIKDSQPGDSATYFCAAS---------------------', 'TRAV14D-3/DV8*02': 'QQQVRQSPQSLTVWEGETAILNCSYENSA------FDYFPWYQQFPGEGPALLIAIRSV---SDKKED-----GRFTIFFNKREKKLSLHITDSQPGDSATYFCAAS---------------------', 'TRAV14D-3/DV8*03': 'QQQVRQSPQSLTVWEGETAILNCSYENSA------FDYFPWYQQFPGEGPALLIAIRSV---SDKKED-----GGFTIFFNKREKNLSLHIKDSQPGDSATYFCAAS---------------------', 'TRAV14D-3/DV8*08': 'QQQVRQSPQSLTVWEGETAILNCSYENSA------FDYFPWYQQFPGEGPALLISILSV---SDKKED-----GRFTIFFNKREKKLSLHIADSQPGDSATYFCAAS---------------------', 'TRAV15-1/DV6-1*01': 'AQKVIQVWSTTSRQEGEKLTLDCSYKTSQV-----LYHLFWYKHLLSGEMVLLIRQMPS--TIAIERS-----GRYSVVFQKSRKSISLVISTLQPDDSGKYFCALWE--------------------', 'TRAV15-2/DV6-2*01': 'AQKVTQVQSTGSSQWG-EVTLHCSYETSEY-----FYVILWYKQLFSGEMVFLIYQTSF--DTQNQRN-----SRYSVVFQKSLKSISLVISASQPEDSGTYFCALSE--------------------', 'TRAV15D-1/DV6D-1*01': 'AQKVIQVWSTPSRQEGEELTLDCSYETSQV-----LYHLFWYKHLLSGEMVFLIRQMSS--STAKERS-----GRYSVVFQKSLKSISLVISALQPDDSGKYFCALWE--------------------', 'TRAV15D-1/DV6D-1*02': 'AQKVIQVWSTASRQEGEELTLDCSYETSQV-----LYHLFWYKHLLSGEMVFLIRQMSS--STAKERS-----GRYSVVFQKSLKSISLVISALQPDDSGKYFCALWE--------------------', 'TRAV15D-2/DV6D-2*01': 'AQRVTQVQPTGSSQWGEEVTLDCSYETSEY-----FYCIIWYRQLFSGEMVFLIYQTSF--DTQNQRN-----GRYSVVFQKSLKSISLVISASQPEDSGTYFCALSE--------------------', 'TRAV15D-2/DV6D-2*03': 'AQRVTQVQPTGSSQWGEEVTLDCSYETSEY-----FYRIFWYRQLFSGEMVFLIYQPSF--DTQNQRS-----GRYSVVFQKSFKSISLVISASQPEDSGTYFCALSE--------------------', 'TRAV16D/DV11*01': 'AQKVTQTQTSISVMEKTTVTMDCVYETQDS-----SYFLFWYKQTASGEIVFLIRQDSY--KKENATV-----GHYSLNFQKPKSSIGLIITATQIEDSAVYFCAMRE--------------------', 'TRAV16D/DV11*02': 'AQKVTQTQTSISVMEKTTVTMDCVYETQDS-----SYFLFWYKQTASGEIVFLIRQDSY--KKENATV-----GHYSLNFQKPKSSIGLIITATQIEDSAVYFCAMRE--------------------', 'TRAV21/DV12*01': 'DAKTTQ-PDSMESTEGETVHLPCSHATISG-----NEYIYWYRQVPLQGPEYVTHGLQ-----QNTTN-----SMAFLAIASDRKSSTLILTHVSLRDAAVYHCILRV--------------------', 'TRAV4-4/DV10*01': 'GDQVEQSPSALSLHEGTDSALRCNFTTTM-------RSVQWFRQNSRGSLISLFYLAS-----GTKEN-----GRLKSAFDSKERYSTLHIRDAQLEDSGTYFCAAE---------------------', 'TRAV6-7/DV9*01': 'GDSVTQTEGQVALSEEDFLTIHCNYSASG------YPALFWYVQYPGEGPQFLFRASRD---KEKGSS-----RGFEATYNKETTSFHLQKASVQESDSAVYYCALG---------------------', 'TRDV1*01': 'TQMLHQSPQSLTIQEGDEVTMSCNLSTSL-------YALLWYRQGDDGSLVSLVTLQ-------KGGDEKSK-DKITAKLDKKMQQSSLQIQASQPSHSGTYLCGGK---------------------', 'TRDV2-1*01': 'AQTVSQHQQEKSVQVAESATLDCTYDTSDT-----NYLLFWYKQQGGQVTLVIRQEA-------YKQYNAME-NRFSVNFQKAAKSFSLEISDSQLGDAATYFCALRG--------------------', 'TRDV2-2*01': 'AQTVSQPQKKKSVQVAESATLDCTYDTSDT-----NYLLFWYKQQGGQVTLVILQEA-------YKQYNATL-NRFSVNFQKAAKSFSLEISDSQLGDAATYFCALME--------------------', 'TRDV4*01': 'DVYLEPVAKTFTVVAGDPASFYCTVTGGDM----KNYHMSWYKKNGTNALFLVYKLN-------SNSTDGGK-SNLKGKINISKNQFILDIQKATMKDAGTYYCGSDI--------------------', 'TRDV5*01': 'CITLTQSSTDQTVASGTEVTLLCTYNADSP-----NPDLFWYRKRPDRSFQFILYRDD----TSSHDADFVQ-GRFSVKHSKANRTFHLVISPVSLEDSATYYCASGY--------------------', 'TRDV5*02': 'CITLTQSSTDQTVASGTEVTLLCTYNADSP-----NPDLFWYRKRPDRSFQFILYRDD----TSSHDADFVQ-GRFSVKHSKANRTFHLVISPVSLEDSATYYCASGY--------------------', 'TRDV5*03': 'CITLTQSSTDQTVASGTEVTLLCTYNADSP-----NPDLFWYRKRPDRSFQFILYRDD----TSSHDADFVQ-GRFSVKHSKANRTFHLVISPVSLEDSATYYCASGY--------------------'}}}} diff --git a/spaces/ma-xu/LIVE/color.h b/spaces/ma-xu/LIVE/color.h deleted file mode 100644 index c787105636d42b4706110500982d0ce576eda47e..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/color.h +++ /dev/null @@ -1,63 +0,0 @@ -#pragma once - -#include "diffvg.h" -#include "vector.h" -#include "ptr.h" - -enum class ColorType { - Constant, - LinearGradient, - RadialGradient -}; - -struct Constant { - Vector4f color; - - ptr get_ptr() { - return ptr(this); - } -}; - -struct LinearGradient { - LinearGradient(const Vector2f &begin, - const Vector2f &end, - int num_stops, - ptr stop_offsets, - ptr stop_colors) - : begin(begin), end(end), num_stops(num_stops), - stop_offsets(stop_offsets.get()), stop_colors(stop_colors.get()) {} - - ptr get_ptr() { - return ptr(this); - } - - void copy_to(ptr stop_offset, - ptr stop_colors) const; - - Vector2f begin, end; - int num_stops; - float *stop_offsets; - float *stop_colors; // rgba -}; - -struct RadialGradient { - RadialGradient(const Vector2f ¢er, - const Vector2f &radius, - int num_stops, - ptr stop_offsets, - ptr stop_colors) - : center(center), radius(radius), num_stops(num_stops), - stop_offsets(stop_offsets.get()), stop_colors(stop_colors.get()) {} - - ptr get_ptr() { - return ptr(this); - } - - void copy_to(ptr stop_offset, - ptr stop_colors) const; - - Vector2f center, radius; - int num_stops; - float *stop_offsets; - float *stop_colors; // rgba -}; diff --git a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/apply_sr.py b/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/apply_sr.py deleted file mode 100644 index 2bc5da02c5ca700228af406fd6185aa29de656b9..0000000000000000000000000000000000000000 --- a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/apply_sr.py +++ /dev/null @@ -1,25 +0,0 @@ -import cv2 -import numpy as np -import torch - -from basicsr.archs.rrdbnet_arch import RRDBNet - - -def init_sr_model(model_path): - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32) - model.load_state_dict(torch.load(model_path)['params'], strict=True) - model.eval() - model = model.cuda() - return model - - -def enhance(model, image): - img = image.astype(np.float32) / 255. - img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float() - img = img.unsqueeze(0).cuda() - with torch.no_grad(): - output = model(img) - output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy() - output = np.transpose(output[[2, 1, 0], :, :], (1, 2, 0)) - output = (output * 255.0).round().astype(np.uint8) - return output diff --git a/spaces/matthoffner/open-codetree/components/Modals/AuthModal.tsx b/spaces/matthoffner/open-codetree/components/Modals/AuthModal.tsx deleted file mode 100644 index 9141f9641ce07e88f4dea6e91bcf9c5e068fd035..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/open-codetree/components/Modals/AuthModal.tsx +++ /dev/null @@ -1,73 +0,0 @@ -import React from "react"; -import { motion } from "framer-motion"; -import { useAppDispatch } from "../../store/hook"; -import { getGithubOAuthURL, getGoogleOAuthURL } from "../../utils/getOAuthUrl"; -import { nativePopup } from "../../utils/nativePopup"; -import { close_modal } from "../../store/features/modalSlice"; -import { modalVariant } from "./config"; -import { RiGithubFill, RiGoogleFill } from "react-icons/ri"; - -const AuthModal = () => { - const dispatch = useAppDispatch(); - - return ( - e.stopPropagation()} - > -
                  -
                  - -
                  - - - -
                  -
                  - - ); -}; - -export default AuthModal; diff --git a/spaces/merve/data-leak/public/data-leak/index.html b/spaces/merve/data-leak/public/data-leak/index.html deleted file mode 100644 index 48382c629935410818fbefd120b3f743019c4f40..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/public/data-leak/index.html +++ /dev/null @@ -1,170 +0,0 @@ - - - - - - - - - - - - - - - - - - Why Some Models Leak Data - - - - - - - - - - - - - - - -
                  - -
                  - -

                  Why Some Models Leak Data

                  -
                  Machine learning models use large amounts of data, some of which can be sensitive. If they're not trained correctly, sometimes that data is inadvertently revealed.
                  - - - -

                  Let’s take a look at a game of soccer.

                  - - -
                  - -



                  -

                  Using the position of each player as training data, we can teach a model to predict which team would get to a loose ball first at each spot on the field, indicated by the color of the pixel.

                  -
                  - -

                  It updates in real-time—drag the players around to see the model change.

                  -



                  -

                  This model reveals quite a lot about the data used to train it. Even without the actual positions of the players, it is simple to see where players might be.

                  -
                  - -

                  Click this button to move the players

                  -

                  Take a guess at where the yellow team’s goalie is now, then check their actual position. How close were you?

                  -

                  Sensitive Salary Data

                  - -

                  In this specific soccer example, being able to make educated guesses about the data a model was trained on doesn’t matter too much. But what if our data points represent something more sensitive?

                  -
                  - -

                  We’ve fed the same numbers into the model, but now they represent salary data instead of soccer data. Building models like this is a common technique to detect discrimination. A union might test if a company is paying men and women fairly by building a salary model that takes into account years of experience. They can then publish the results to bring pressure for change or show improvement.

                  -

                  In this hypothetical salary study, even though no individual salaries have been published, it is easy to infer the salary of the newest male hire. And carefully cross referencing public start dates on LinkedIn with the model could almost perfectly reveal everyone’s salary.

                  -

                  Because the model here is so flexible (there are hundreds of square patches with independently calculated predictions) and we have so few data points (just 22 people), it is able to “memorize” individual data points. If we’re looking to share information about patterns in salaries, a simpler and more constrained model like a linear regression might be more appropriate.

                  -
                  - -

                  By boiling down the 22 data points to two lines we’re able to see broad trends without being able to guess anyone’s salary.

                  -

                  Subtle Leaks

                  - -

                  Removing complexity isn’t a complete solution though. Depending on how the data is distributed, even a simple line can inadvertently reveal information.

                  -
                  - -

                  In this company, almost all the men started several years ago, so the slope of the line is especially sensitive to the salary of the new hire.

                  -

                  Is their salary higher or lower than average? Based on the line, we can make a pretty good guess.

                  -

                  Notice that changing the salary of someone with a more common tenure barely moves the line. In general, more typical data points are less susceptible to being leaked. This sets up a tricky trade off: we want models to learn about edge cases while being sure they haven’t memorized individual data points.

                  -

                  Real World Data

                  - -

                  Models of real world data are often quite complex—this can improve accuracy, but makes them more susceptible to unexpectedly leaking information. Medical models have inadvertently revealed patients’ genetic markers. Language models have memorized credit card numbers. Faces can even be reconstructed from image models:

                  -
                  - -

                  Fredrikson et al were able to extract the image on the left by repeatedly querying a facial recognition API. It isn’t an exact match with the individual’s actual face (on the right), but this attack only required access to the model’s predictions, not its internal state.

                  -

                  Protecting Private Data

                  - -

                  Training models with differential privacy stops the training data from leaking by limiting how much the model can learn from any one data point. Differentially private models are still at the cutting edge of research, but they’re being packaged into machine learning frameworks, making them much easier to use. When it isn’t possible to train differentially private models, there are also tools that can measure how much data is the model memorizing. Also, standard techniques such as aggregation and limiting how much data a single source can contribute are still useful and usually improve the privacy of the model.

                  -

                  As we saw in the Collecting Sensitive Information Explorable, adding enough random noise with differential privacy to protect outliers like the new hire can increase the amount of data required to reach a good level of accuracy. Depending on the application, the constraints of differential privacy could even improve the model—for instance, not learning too much from one data point can help prevent overfitting.

                  -

                  Given the increasing utility of machine learning models for many real-world tasks, it’s clear that more and more systems, devices and apps will be powered, to some extent, by machine learning in the future. While standard privacy best practices developed for non-machine learning systems still apply to those with machine learning, the introduction of machine learning introduces new challenges, including the ability of the model to memorize some specific training data points and thus be vulnerable to privacy attacks that seek to extract this data from the model. Fortunately, techniques such as differential privacy exist that can be helpful in overcoming this specific challenge. Just as with other areas of Responsible AI, it’s important to be aware of these new challenges that come along with machine learning and what steps can be taken to mitigate them.

                  -

                  Credits

                  - -

                  Adam Pearce and Ellen Jiang // December 2020

                  -

                  Thanks to Andreas Terzis, Ben Wedin, Carey Radebaugh, David Weinberger, Emily Reif, Fernanda Viégas, Hal Abelson, Kristen Olson, Martin Wattenberg, Michael Terry, Miguel Guevara, Thomas Steinke, Yannick Assogba, Zan Armstrong and our other colleagues at Google for their help with this piece.

                  -

                  More Explorables

                  - -

                  - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/merve/hidden-bias/public/third_party/recirc.js b/spaces/merve/hidden-bias/public/third_party/recirc.js deleted file mode 100644 index 37b65f4b8cf3c3ba504a0a3b906f8c19befc6730..0000000000000000000000000000000000000000 --- a/spaces/merve/hidden-bias/public/third_party/recirc.js +++ /dev/null @@ -1,58 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - - - -d3.loadData('../posts.json', (err, res) => { - var posts = res[0] - .filter(d => !window.location.href.includes(d.permalink)) - .filter(d => d.shareimg.includes('http')) - posts = d3.shuffle(posts) - - var isMobile = innerWidth < 900 - var postSel = d3.select('#recirc').html('').appendMany('a.post', posts) - .st({ - width: isMobile ? '100%' : '330px', - display: 'inline-block', - verticalAlign: 'top', - marginRight: isMobile ? 0 : 30, - textDecoration: 'none', - }) - .at({href: d => '..' + d.permalink}) - - - postSel.append('div.img') - .st({ - width: '100%', - height: 200, - backgroundImage: d => `url(${d.shareimgabstract || d.shareimg})`, - backgroundSize: 'cover', - backgroundPosition: 'center', - }) - - postSel.append('p.title') - .text(d => d.shorttitle || d.title) - .st({ - verticalAlign: 'top', - marginTop: 10, - textDecoration: 'none', - }) - - postSel.append('p.summary') - .text(d => d.socialsummary || d.summary) - - -}) \ No newline at end of file diff --git a/spaces/merve/hidden-bias/public/third_party/swoopy-drag.js b/spaces/merve/hidden-bias/public/third_party/swoopy-drag.js deleted file mode 100644 index 3c740601b5111efdf47f0fd5da9d41de58ceb757..0000000000000000000000000000000000000000 --- a/spaces/merve/hidden-bias/public/third_party/swoopy-drag.js +++ /dev/null @@ -1,193 +0,0 @@ -// https://github.com/1wheel/swoopy-drag Copyright (c) 2016 Adam Pearce - -(function (global, factory) { - typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('d3')) : - typeof define === 'function' && define.amd ? define(['exports', 'd3'], factory) : - (factory((global.d3 = global.d3 || {}),global.d3)); -}(this, function (exports,d3) { 'use strict'; - - function swoopyDrag(){ - var x = function(d){ return d } - var y = function(d){ return d } - - var annotations = [] - var annotationSel - - var draggable = false - - var dispatch = d3.dispatch('drag') - - var textDrag = d3.drag() - .on('drag', function(d){ - var x = d3.event.x - var y = d3.event.y - d.textOffset = [x, y].map(Math.round) - - d3.select(this).call(translate, d.textOffset) - - dispatch.call('drag') - }) - .subject(function(d){ return {x: d.textOffset[0], y: d.textOffset[1]} }) - - var circleDrag = d3.drag() - .on('drag', function(d){ - var x = d3.event.x - var y = d3.event.y - d.pos = [x, y].map(Math.round) - - var parentSel = d3.select(this.parentNode) - - var path = '' - var points = parentSel.selectAll('circle').data() - if (points[0].type == 'A'){ - path = calcCirclePath(points) - } else{ - points.forEach(function(d){ path = path + d.type + d.pos }) - } - - parentSel.select('path').attr('d', path).datum().path = path - d3.select(this).call(translate, d.pos) - - dispatch.call('drag') - }) - .subject(function(d){ return {x: d.pos[0], y: d.pos[1]} }) - - - var rv = function(sel){ - annotationSel = sel.html('').selectAll('g') - .data(annotations).enter() - .append('g') - .call(translate, function(d){ return [x(d), y(d)] }) - - var textSel = annotationSel.append('text') - .call(translate, ƒ('textOffset')) - .text(ƒ('text')) - - annotationSel.append('path') - .attr('d', ƒ('path')) - - if (!draggable) return - - annotationSel.style('cursor', 'pointer') - textSel.call(textDrag) - - annotationSel.selectAll('circle').data(function(d){ - var points = [] - - if (~d.path.indexOf('A')){ - //handle arc paths seperatly -- only one circle supported - var pathNode = d3.select(this).select('path').node() - var l = pathNode.getTotalLength() - - points = [0, .5, 1].map(function(d){ - var p = pathNode.getPointAtLength(d*l) - return {pos: [p.x, p.y], type: 'A'} - }) - } else{ - var i = 1 - var type = 'M' - var commas = 0 - - for (var j = 1; j < d.path.length; j++){ - var curChar = d.path[j] - if (curChar == ',') commas++ - if (curChar == 'L' || curChar == 'C' || commas == 2){ - points.push({pos: d.path.slice(i, j).split(','), type: type}) - type = curChar - i = j + 1 - commas = 0 - } - } - - points.push({pos: d.path.slice(i, j).split(','), type: type}) - } - - return points - }).enter().append('circle') - .attr('r', 8) - .attr('fill', 'rgba(0,0,0,0)') - .attr('stroke', '#333') - .attr('stroke-dasharray', '2 2') - .call(translate, ƒ('pos')) - .call(circleDrag) - - dispatch.call('drag') - } - - - rv.annotations = function(_x){ - if (typeof(_x) == 'undefined') return annotations - annotations = _x - return rv - } - rv.x = function(_x){ - if (typeof(_x) == 'undefined') return x - x = _x - return rv - } - rv.y = function(_x){ - if (typeof(_x) == 'undefined') return y - y = _x - return rv - } - rv.draggable = function(_x){ - if (typeof(_x) == 'undefined') return draggable - draggable = _x - return rv - } - rv.on = function() { - var value = dispatch.on.apply(dispatch, arguments); - return value === dispatch ? rv : value; - } - - return rv - - //convert 3 points to an Arc Path - function calcCirclePath(points){ - var a = points[0].pos - var b = points[2].pos - var c = points[1].pos - - var A = dist(b, c) - var B = dist(c, a) - var C = dist(a, b) - - var angle = Math.acos((A*A + B*B - C*C)/(2*A*B)) - - //calc radius of circle - var K = .5*A*B*Math.sin(angle) - var r = A*B*C/4/K - r = Math.round(r*1000)/1000 - - //large arc flag - var laf = +(Math.PI/2 > angle) - - //sweep flag - var saf = +((b[0] - a[0])*(c[1] - a[1]) - (b[1] - a[1])*(c[0] - a[0]) < 0) - - return ['M', a, 'A', r, r, 0, laf, saf, b].join(' ') - } - - function dist(a, b){ - return Math.sqrt( - Math.pow(a[0] - b[0], 2) + - Math.pow(a[1] - b[1], 2)) - } - - - //no jetpack dependency - function translate(sel, pos){ - sel.attr('transform', function(d){ - var posStr = typeof(pos) == 'function' ? pos(d) : pos - return 'translate(' + posStr + ')' - }) - } - - function ƒ(str){ return function(d){ return d[str] } } - } - - exports.swoopyDrag = swoopyDrag; - - Object.defineProperty(exports, '__esModule', { value: true }); - -})); diff --git a/spaces/mfrashad/CharacterGAN/models/stylegan/stylegan_tf/dnnlib/__init__.py b/spaces/mfrashad/CharacterGAN/models/stylegan/stylegan_tf/dnnlib/__init__.py deleted file mode 100644 index ad43827d8a279c4a797e09b51b8fd96e8e003ee6..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/CharacterGAN/models/stylegan/stylegan_tf/dnnlib/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -from . import submission - -from .submission.run_context import RunContext - -from .submission.submit import SubmitTarget -from .submission.submit import PathType -from .submission.submit import SubmitConfig -from .submission.submit import get_path_from_template -from .submission.submit import submit_run - -from .util import EasyDict - -submit_config: SubmitConfig = None # Package level variable for SubmitConfig which is only valid when inside the run function. diff --git a/spaces/mikeee/qwen-7b-chat/run_cmd.py b/spaces/mikeee/qwen-7b-chat/run_cmd.py deleted file mode 100644 index ca6478146da158f001edc0745e76aa6bfa151077..0000000000000000000000000000000000000000 --- a/spaces/mikeee/qwen-7b-chat/run_cmd.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Run a command.""" -import subprocess as sp - -import rich -from loguru import logger - - -def run_cmd(cmd): - """Execute cmd.""" - logger.info(f"{cmd=}") - ret = sp.run(cmd, capture_output=1, check=0, shell=1, encoding="utf8") - if ret.stdout: - rich.print(ret.stdout) - if ret.stderr: - rich.print("[red bold]" + ret.stdout) diff --git a/spaces/mindtube/maximum_multiplier_places/app.py b/spaces/mindtube/maximum_multiplier_places/app.py deleted file mode 100644 index ef5c86c66986d4116d9747460247ff5639580ec2..0000000000000000000000000000000000000000 --- a/spaces/mindtube/maximum_multiplier_places/app.py +++ /dev/null @@ -1,298 +0,0 @@ -import gradio as gr -import os -import sys -from pathlib import Path - -models = [ - {"name": "💥 All Purpose", "url": "dreamlike-art/dreamlike-diffusion-1.0"}, - {"name": "💥 Anime", "url": "dreamlike-art/dreamlike-anime-1.0"}, - {"name": "💥 Realistic Photo", "url": "dreamlike-art/dreamlike-photoreal-2.0"}, - {"name": "❤❤❤ Favorite Models ❤❤❤", "url": "dreamlike-art/dreamlike-diffusion-1.0"}, - {"name": "Anything 5.0 (Anime)", "url": "stablediffusionapi/anything-v5"}, - {"name": "Anything Else 5 (Anime)", "url": "stablediffusionapi/anything-v5"}, - {"name": "Abyss Orange Mix 2 (Anime)", "url": "WarriorMama777/AbyssOrangeMix2"}, - {"name": "Counterfeit 3.0 (Anime)", "url": "stablediffusionapi/counterfeit-v30"}, - {"name": "Cool Japan Diffusion (Anime)", "url": "aipicasso/cool-japan-diffusion-2-1-2"}, - {"name": "Cyberrealistic (3D, Photoreal)", "url": "stablediffusionapi/cyberrealistic"}, - {"name": "Deliberate (3D, Photoreal)", "url": "Masagin/Deliberate"}, - {"name": "Dreamlike Art", "url": "dreamlike-art/dreamlike-diffusion-1.0"}, - {"name": "Dreamlike Anime", "url": "dreamlike-art/dreamlike-anime-1.0"}, - {"name": "Dreamlike Photoreal", "url": "dreamlike-art/dreamlike-photoreal-2.0"}, - {"name": "Dreamshaper (Anime, Photoreal)", "url": "Lykon/DreamShaper"}, - {"name": "DucHaiten Art (3D Characters)", "url": "DucHaiten/DucHaitenAIart"}, - {"name": "DucHaiten DreamWorld", "url": "DucHaiten/DucHaitenDreamWorld"}, - {"name": "Epic Diffusion (3D, Photoreal)", "url": "johnslegers/epic-diffusion"}, - {"name": "EpicMix Realism (Art & Anime)", "url": "Duskfallcrew/EpicMix_Realism"}, - {"name": "Midjourney 4.0 (Art & Anime)", "url": "flax/midjourney-v4-diffusion"}, - {"name": "Midjourney 4.1 (Art & Anime)", "url": "Joeythemonster/anything-midjourney-v-4-1"}, - {"name": "Never Ending Dream 2 (Art & Anime)", "url": "luongphamit/NeverEnding-Dream2"}, - {"name": "Openjourney 4 (Art & Anime)", "url": "prompthero/openjourney-v4"}, - {"name": "Protogen X 3.4 (Photoreal)", "url": "darkstorm2150/Protogen_x3.4_Official_Release"}, - {"name": "Protogen X 5.8 (Photoreal)", "url": "darkstorm2150/Protogen_x5.8_Official_Release"}, - {"name": "Protogen 2.2 (Anime)", "url": "darkstorm2150/Protogen_v2.2_Official_Release"}, - {"name": "Picasso Diffusion 1.1 (Anime)", "url": "aipicasso/picasso-diffusion-1-1"}, - {"name": "Realistic Vision 1.4 (3D, Photoreal)", "url": "SG161222/Realistic_Vision_V1.4"}, - {"name": "Rev Animated (Anime)", "url": "LottePeisch/RevAnimated-Diffusers"}, - {"name": "🟩 Best ART MODELS 🟩🟩🟩", "url": "dreamlike-art/dreamlike-diffusion-1.0"}, - {"name": "Alice in Diffusion Land", "url": "Guizmus/SDArt_AliceInDiffusionLand"}, - {"name": "Alt Clip", "url": "BAAI/AltCLIP"}, - {"name": "Anything Midjourney 4.1", "url": "Joeythemonster/anything-midjourney-v-4-1"}, - {"name": "Chaos and Order", "url": "Guizmus/SDArt_ChaosAndOrder768"}, - {"name": "Chilloutclara", "url": "Fred99774/chilloutvlara"}, - {"name": "Comic Diffusion", "url": "ogkalu/Comic-Diffusion"}, - {"name": "Cosmic Horros 768", "url": "Guizmus/SDArt_cosmichorrors768"}, - {"name": "Cosmic Horros", "url": "Guizmus/SDArt_cosmichorrors"}, - {"name": "DGSpitzer", "url": "DGSpitzer/DGSpitzer-Art-Diffusion"}, - {"name": "Dreamlike Diffusion", "url": "dreamlike-art/dreamlike-diffusion-1.0"}, - {"name": "Dungeons and Diffusion", "url": "0xJustin/Dungeons-and-Diffusion"}, - {"name": "Elden Ring", "url": "nitrosocke/elden-ring-diffusion"}, - {"name": "Epic Diffusion 1.1", "url": "johnslegers/epic-diffusion-v1.1"}, - {"name": "Epic Diffusion", "url": "johnslegers/epic-diffusion"}, - {"name": "EpicMix Realism", "url": "Duskfallcrew/EpicMix_Realism"}, - {"name": "Fantasy Mix", "url": "theintuitiveye/FantasyMix"}, - {"name": "Girl New 1", "url": "Fred99774/girlnew1"}, - {"name": "Lit 6B", "url": "hakurei/lit-6B"}, - {"name": "Luna Diffusion", "url": "proximasanfinetuning/luna-diffusion"}, - {"name": "Midjourney 4.0", "url": "flax/midjourney-v4-diffusion"}, - {"name": "Midjourney 4.1", "url": "Joeythemonster/anything-midjourney-v-4-1"}, - {"name": "Mo-Di Diffusion", "url": "nitrosocke/mo-di-diffusion"}, - {"name": "Nitro Diffusion", "url": "nitrosocke/Nitro-Diffusion"}, - {"name": "Openjourney V2", "url": "prompthero/openjourney-v2"}, - {"name": "Openjourney", "url": "prompthero/openjourney"}, - {"name": "Seek Art Mega", "url": "coreco/seek.art_MEGA"}, - {"name": "Something", "url": "Guizmus/SDArt_something"}, - {"name": "Spider Verse diffusion", "url": "nitrosocke/spider-verse-diffusion"}, - {"name": "Vintedois 1.0", "url": "22h/vintedois-diffusion-v0-1"}, - {"name": "Vintedois 2.0", "url": "22h/vintedois-diffusion-v0-2"}, - {"name": "🟩 Best ART STYLES 🟩🟩🟩", "url": "joachimsallstrom/Double-Exposure-Diffusion"}, - {"name": "Balloon Art", "url": "Fictiverse/Stable_Diffusion_BalloonArt_Model"}, - {"name": "Double Exposure Diffusion", "url": "joachimsallstrom/Double-Exposure-Diffusion"}, - {"name": "Fluid Art", "url": "Fictiverse/Stable_Diffusion_FluidArt_Model"}, - {"name": "GTA5 Artwork Diffusion", "url": "ItsJayQz/GTA5_Artwork_Diffusion"}, - {"name": "Marvel WhatIf Diffusion", "url": "ItsJayQz/Marvel_WhatIf_Diffusion"}, - {"name": "Naruto Diffuser", "url": "lambdalabs/sd-naruto-diffusers"}, - {"name": "Papercut", "url": "Fictiverse/Stable_Diffusion_PaperCut_Model"}, - {"name": "Pokemon Diffuser", "url": "lambdalabs/sd-pokemon-diffusers"}, - {"name": "Synthwave Punk 2", "url": "ItsJayQz/SynthwavePunk-v2"}, - {"name": "Valorant Diffusion", "url": "ItsJayQz/Valorant_Diffusion"}, - {"name": "Van Gogh Diffusion", "url": "dallinmackay/Van-Gogh-diffusion"}, - {"name": "Vectorartz Diffusion", "url": "coder119/Vectorartz_Diffusion"}, - {"name": "VoxelArt", "url": "Fictiverse/Stable_Diffusion_VoxelArt_Model"}, - {"name": "🟩 Best ANIME MODELS 🟩🟩🟩", "url": "dreamlike-art/dreamlike-anime-1.0"}, - {"name": "7 Pa", "url": "AIARTCHAN/7pa"}, - {"name": "A Certain Model", "url": "JosephusCheung/ACertainModel"}, - {"name": "A Certain Thing", "url": "JosephusCheung/ACertainThing"}, - {"name": "A Certainity", "url": "JosephusCheung/ACertainty"}, - {"name": "Abyss Hell Hero", "url": "AIARTCHAN/AbyssHellHero"}, - {"name": "Abyss Maple 3", "url": "AIARTCHAN/AbyssMapleVer3"}, - {"name": "Abyss Orange Mix 2", "url": "WarriorMama777/AbyssOrangeMix2"}, - {"name": "Abyss Orange Mix", "url": "WarriorMama777/AbyssOrangeMix"}, - {"name": "AbyssHell 3", "url": "AIARTCHAN/AbyssHellVer3"}, - {"name": "All 526 Animated", "url": "stablediffusionapi/all-526-animated"}, - {"name": "Anidosmix 3", "url": "AIARTCHAN/anidosmixV2"}, - {"name": "Anime Kawai Diffusion", "url": "Ojimi/anime-kawai-diffusion"}, - {"name": "Anireal 3D V2", "url": "circulus/sd-anireal-3d-v2"}, - {"name": "AnyLORA", "url": "kubanemil/AnyLORA"}, - {"name": "Anything 2.1", "url": "swl-models/anything-v2.1"}, - {"name": "Anything 3.0 Light", "url": "mm00/anything-v3.0-light"}, - {"name": "Anything 3.0", "url": "Linaqruf/anything-v3.0"}, - {"name": "Anything 3.1", "url": "cag/anything-v3-1"}, - {"name": "Anything 3X", "url": "iZELX1/Anything-V3-X"}, - {"name": "Anything 4.0", "url": "andite/anything-v4.0"}, - {"name": "Anything 5.0", "url": "stablediffusionapi/anything-v5"}, - {"name": "Anything Else 4", "url": "stablediffusionapi/anythingelse-v4"}, - {"name": "Anything Else 5", "url": "stablediffusionapi/anything-v5"}, - {"name": "Arcane Diffusion", "url": "nitrosocke/Arcane-Diffusion"}, - {"name": "Archer Diffusion", "url": "nitrosocke/archer-diffusion"}, - {"name": "Asian Mix", "url": "D1b4l4p/AsianMix"}, - {"name": "Blood Orange Mix", "url": "WarriorMama777/BloodOrangeMix"}, - {"name": "CamelliaMix 2.5D","url": "stablediffusionapi/camelliamix25d"}, - {"name": "CamelliaMix Line","url": "stablediffusionapi/camelliamixline"}, - {"name": "CamelliaMix","url": "Powidl43/CamelliaMix"}, - {"name": "Cetusmix", "url": "stablediffusionapi/cetusmix"}, - {"name": "Chik Mix", "url": "stablediffusionapi/chikmix"}, - {"name": "Chikmix", "url": "stablediffusionapi/chikmix"}, - {"name": "Chillout App Factory","url": "stablediffusionapi/chillout-app-factory"}, - {"name": "Classic Anime", "url": "nitrosocke/classic-anim-diffusion"}, - {"name": "Cool Japan Diffusion 2.1.2", "url": "aipicasso/cool-japan-diffusion-2-1-2"}, - {"name": "Cosmic Babes", "url": "stablediffusionapi/cosmic-babes"}, - {"name": "Counterfeit 1.0", "url": "gsdf/counterfeit-v1.0"}, - {"name": "Counterfeit 2", "url": "gsdf/Counterfeit-V2.0"}, - {"name": "Counterfeit 2.0", "url": "gsdf/Counterfeit-V2.0"}, - {"name": "Counterfeit 3.0", "url": "stablediffusionapi/counterfeit-v30"}, - {"name": "CuteSexyRobutts", "url": "andite/cutesexyrobutts-diffusion"}, - {"name": "CyberPunk Anime", "url": "DGSpitzer/Cyberpunk-Anime-Diffusion"}, - {"name": "Dark Sushi Mix", "url": "stablediffusionapi/dark-sushi-mix"}, - {"name": "Dash Sushi 25d", "url": "stablediffusionapi/dark-sushi-25d"}, - {"name": "Dreamlike Anime", "url": "dreamlike-art/dreamlike-anime-1.0"}, - {"name": "DucHaiten Anime", "url": "DucHaiten/DucHaitenAnime"}, - {"name": "Eerie Orange Mix", "url": "WarriorMama777/EerieOrangeMix"}, - {"name": "Eimis Anime Diffusion", "url": "eimiss/EimisAnimeDiffusion_1.0v"}, - {"name": "Ghibli Diffusion", "url": "nitrosocke/Ghibli-Diffusion"}, - {"name": "GrapeFruit", "url": "iZELX1/Grapefruit"}, - {"name": "GuoFeng 3", "url": "xiaolxl/GuoFeng3"}, - {"name": "Guweiz Diffusion", "url": "andite/guweiz-diffusion"}, - {"name": "Hiten Diffusion", "url": "andite/hiten-diffusion"}, - {"name": "Icomix 2", "url": "stablediffusionapi/icomix-2"}, - {"name": "InkPunk Diffusion", "url": "Envvi/Inkpunk-Diffusion"}, - {"name": "Mama Orange Mixs", "url": "WarriorMama777/OrangeMixs"}, - {"name": "Mashuu Diffusion", "url": "andite/mashuu-diffusion"}, - {"name": "Meina Alter", "url": "stablediffusionapi/meinaalter"}, - {"name": "Meina Pastel", "url": "stablediffusionapi/meinapastel"}, - {"name": "MeinaMix 7", "url": "Nacholmo/meinamixv7-diffusers"}, - {"name": "Mignon Diffusion", "url": "andite/mignon-diffusion"}, - {"name": "MikaPikazo Diffusion", "url": "andite/mikapikazo-diffusion"}, - {"name": "Mikapikazo", "url": "andite/mikapikazo-diffusion"}, - {"name": "Mix Pro V4", "url": "AIARTCHAN/MIX-Pro-V4"}, - {"name": "NeverEnding-Dream", "url": "Lykon/NeverEnding-Dream"}, - {"name": "Openjourney 4", "url": "prompthero/openjourney-v4"}, - {"name": "OpenNiji", "url": "Korakoe/OpenNiji"}, - {"name": "Pastel Mix", "url": "andite/pastel-mix"}, - {"name": "Picasso Diffusion 1.1", "url": "aipicasso/picasso-diffusion-1-1"}, - {"name": "Piromizu Diffusion", "url": "andite/piromizu-diffusion"}, - {"name": "Protogen 2.2", "url": "darkstorm2150/Protogen_v2.2_Official_Release"}, - {"name": "Protogen Infinity", "url": "darkstorm2150/Protogen_Infinity_Official_Release"}, - {"name": "Protogen X 3.4", "url": "darkstorm2150/Protogen_x3.4_Official_Release"}, - {"name": "Rev Anim", "url": "stablediffusionapi/rev-anim"}, - {"name": "Rev Animated", "url": "coreml/coreml-ReV-Animated"}, - {"name": "Rev Animated", "url": "LottePeisch/RevAnimated-Diffusers"}, - {"name": "Something V 2.2","url": "NoCrypt/SomethingV2_2"}, - {"name": "Something V2","url": "NoCrypt/SomethingV2"}, - {"name": "Three Delicacy", "url": "stablediffusionapi/three-delicacy"}, - {"name": "Three Delicacy wonto", "url": "stablediffusionapi/three-delicacy-wonto"}, - {"name": "TMND mix", "url": "stablediffusionapi/tmnd-mix"}, - {"name": "Waifu Diffusion", "url": "hakurei/waifu-diffusion"}, - {"name": "🟩 Best REALISTIC MODELS 🟩🟩🟩", "url": "dreamlike-art/dreamlike-photoreal-2.0"}, - {"name": "AmiIReal", "url": "stablediffusionapi/amireal"}, - {"name": "Analog Diffusion", "url": "wavymulder/Analog-Diffusion"}, - {"name": "Circulus 2.8", "url": "circulus/sd-photoreal-v2.8"}, - {"name": "Circulus Photoreal V2", "url": "circulus/sd-photoreal-real-v2"}, - {"name": "Claudfuen 1", "url": "claudfuen/photorealistic-fuen-v1"}, - {"name": "Collage Diffusion", "url": "wavymulder/collage-diffusion"}, - {"name": "Cyberrealistic", "url": "stablediffusionapi/cyberrealistic"}, - {"name": "Dreamlike Photoreal", "url": "dreamlike-art/dreamlike-photoreal-2.0"}, - {"name": "Dreamful 2", "url": "Hius/DreamFul-V2"}, - {"name": "GakkiMix768", "url": "Sa1i/gakki-mix-768"}, - {"name": "Grimoeresigils", "url": "ECarbenia/grimoiresigils"}, - {"name": "HARDBlend", "url": "theintuitiveye/HARDblend"}, - {"name": "HassanBlend 1.4", "url": "hassanblend/hassanblend1.4"}, - {"name": "HassanBlend 1.5.1.2", "url": "hassanblend/HassanBlend1.5.1.2"}, - {"name": "Lomo Diffusion", "url": "wavymulder/lomo-diffusion"}, - {"name": "Model Shoot", "url": "wavymulder/modelshoot"}, - {"name": "Portrait Plus", "url": "wavymulder/portraitplus"}, - {"name": "QuinceMix", "url": "Hemlok/QuinceMix"}, - {"name": "Realistic Vision 1.4", "url": "SG161222/Realistic_Vision_V1.4"}, - {"name": "The Ally", "url": "stablediffusionapi/the-ally"}, - {"name": "Timeless Diffusion", "url": "wavymulder/timeless-diffusion"}, - {"name": "UltraSkin", "url": "VegaKH/Ultraskin"}, - {"name": "Wavyfusion", "url": "wavymulder/wavyfusion"}, - {"name": "🟩 Best SEMI-REALISTIC MODELS 🟩🟩🟩", "url": "stablediffusionapi/all-526"}, - {"name": "All 526", "url": "stablediffusionapi/all-526"}, - {"name": "All 526 animated", "url": "stablediffusionapi/all-526-animated"}, - {"name": "Circulus Semi Real 2", "url": "circulus/sd-photoreal-semi-v2"}, - {"name": "Semi Real Mix", "url": "robotjung/SemiRealMix"}, - {"name": "SpyBG", "url": "stablediffusionapi/spybg"}, - {"name": "🟩 STABLE DIFFUSION MODELS 🟩🟩🟩", "url": "stabilityai/stable-diffusion-2-1"}, - {"name": "Stable Diffusion 1.4","url": "CompVis/stable-diffusion-v1-4"}, - {"name": "Stable Diffusion 1.5","url": "runwayml/stable-diffusion-v1-5"}, - {"name": "Stable Diffusion 2.1","url": "stabilityai/stable-diffusion-2-1"}, - {"name": "Stable Diffusion 2.1 Base","url": "stabilityai/stable-diffusion-2-1-base"}, - {"name": "Stable Diffusion 2.1 Unclip","url": "stabilityai/stable-diffusion-2-1-unclip"}, - {"name": "🟩 Best SCI FI MODELS 🟩🟩🟩", "url": "nitrosocke/Future-Diffusion"}, - {"name": "Future Diffusion", "url": "nitrosocke/Future-Diffusion"}, - {"name": "JWST Deep Space Diffusion", "url": "dallinmackay/JWST-Deep-Space-diffusion"}, - {"name": "Robo Diffusion 3 Base", "url": "nousr/robo-diffusion-2-base"}, - {"name": "Robo Diffusion", "url": "nousr/robo-diffusion"}, - {"name": "Tron Legacy Diffusion", "url": "dallinmackay/Tron-Legacy-diffusion"}, - {"name": "🟩 Best 3D ART MODELS 🟩🟩🟩", "url": "DucHaiten/DucHaitenAIart"}, - {"name": "DucHaiten Art", "url": "DucHaiten/DucHaitenAIart"}, - {"name": "DucHaiten ClassicAnime", "url": "DucHaiten/DH_ClassicAnime"}, - {"name": "DucHaiten DreamWorld", "url": "DucHaiten/DucHaitenDreamWorld"}, - {"name": "DucHaiten Journey", "url": "DucHaiten/DucHaitenJourney"}, - {"name": "DucHaiten StyleLikeMe", "url": "DucHaiten/DucHaiten-StyleLikeMe"}, - {"name": "DucHaiten SuperCute", "url": "DucHaiten/DucHaitenSuperCute"}, - {"name": "Redshift Diffusion 768", "url": "nitrosocke/redshift-diffusion-768"}, - {"name": "Redshift Diffusion", "url": "nitrosocke/redshift-diffusion"}, -] - -current_model = models[0] - -text_gen = gr.Interface.load("spaces/daspartho/prompt-extend") - -models2 = [] -for model in models: - model_url = f"models/{model['url']}" - loaded_model = gr.Interface.load(model_url, live=True, preprocess=True) - models2.append(loaded_model) - - -def text_it(inputs, text_gen=text_gen): - return text_gen(inputs) - - -def set_model(current_model_index): - global current_model - current_model = models[current_model_index] - return gr.update(value=f"{current_model['name']}") - - -def send_it(inputs, model_choice): - proc = models2[model_choice] - return proc(inputs) - - -with gr.Blocks() as myface: - gr.HTML( - - ) - - with gr.Row(): - with gr.Row(): - input_text = gr.Textbox(label="Prompt idea", placeholder="Eg. Mystical zen garden", lines=1) - # Model selection dropdown - model_name1 = gr.Dropdown( - label="Choose Model", - choices=[m["name"] for m in models], - type="index", - value=current_model["name"], - interactive=True, - ) - with gr.Row(): - see_prompts = gr.Button("Generate Prompts") - run = gr.Button("Generate Images", variant="primary") - - with gr.Row(): - output1 = gr.Image(label="") - output2 = gr.Image(label="") - output3 = gr.Image(label="") - with gr.Row(): - magic1 = gr.Textbox(label="Generated Prompt", lines=2) - magic2 = gr.Textbox(label="Generated Prompt", lines=2) - magic3 = gr.Textbox(label="Generated Prompt", lines=2) - with gr.Row(): - output4 = gr.Image(label="") - output5 = gr.Image(label="") - output6 = gr.Image(label="") - with gr.Row(): - magic4 = gr.Textbox(label="Generated Prompt", lines=2) - magic5 = gr.Textbox(label="Generated Prompt", lines=2) - magic6 = gr.Textbox(label="Generated Prompt", lines=2) - - model_name1.change(set_model, inputs=model_name1, outputs=[output1, output2, output3, output4, output5, output6]) - - run.click(send_it, inputs=[magic1, model_name1], outputs=[output1]) - run.click(send_it, inputs=[magic2, model_name1], outputs=[output2]) - run.click(send_it, inputs=[magic3, model_name1], outputs=[output3]) - run.click(send_it, inputs=[magic4, model_name1], outputs=[output4]) - run.click(send_it, inputs=[magic5, model_name1], outputs=[output5]) - run.click(send_it, inputs=[magic6, model_name1], outputs=[output6]) - - see_prompts.click(text_it, inputs=[input_text], outputs=[magic1]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic2]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic3]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic4]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic5]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic6]) - -myface.queue(concurrency_count=200) -myface.launch(inline=True, show_api=False, max_threads=400) \ No newline at end of file diff --git a/spaces/mishig/jsonformer/app.py b/spaces/mishig/jsonformer/app.py deleted file mode 100644 index abb3bb9612dcf196a393ab5784bfb674cd275866..0000000000000000000000000000000000000000 --- a/spaces/mishig/jsonformer/app.py +++ /dev/null @@ -1,90 +0,0 @@ -import json -import gradio as gr -from transformers import AutoModelForCausalLM, AutoTokenizer -from jsonformer.format import highlight_values -from jsonformer.main import Jsonformer - -print("Loading model and tokenizer...") -model_name = "databricks/dolly-v2-3b" -model = AutoModelForCausalLM.from_pretrained(model_name, use_cache=True, device_map="auto") -tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True, use_cache=True) -print("Loaded model and tokenizer") - -def generate(input_prompt, input_schema): - try: - if not input_prompt: - raise ValueError("Prompt is empty") - if not input_schema: - raise ValueError("JSON Schema is empty") - input_schema = json.loads(input_schema) - builder = Jsonformer( - model=model, - tokenizer=tokenizer, - json_schema=input_schema, - prompt=input_prompt, - ) - print("Generating...") - output_json = builder() - return output_json - except Exception as e: - raise gr.Error(e) - -examples = [ - [ - "Generate a json where it is silver Aston Martin DB5 manufactured in 1964", - '{\n "type": "object",\n "properties": {\n "car": {\n "type": "object",\n "properties": {\n "make": {\n "type": "string"\n },\n "model": {\n "type": "string"\n },\n "year": {\n "type": "number"\n },\n "colors": {\n "type": "array",\n "items": {\n "type": "string"\n }\n }\n }\n }\n }\n}' - ], - [ - "Generate a person's information based on the following schema. The person is Lionel Messi, aged 26. Messi is a student at Georgia Tech, and take the following courses: Chemistry, Mathematics, and a minor in Japanese.", - '{\n "type": "object",\n "properties": {\n "name": {\n "type": "string"\n },\n "age": {\n "type": "number"\n },\n "is_student": {\n "type": "boolean"\n },\n "courses": {\n "type": "array",\n "items": {\n "type": "string"\n }\n }\n }\n}' - ], -] - -css = """ -#examples { - width: 35rem; -} -""" - -with gr.Blocks(css=css) as demo: - gr.HTML( - """ -
                  -
                  -

                  - Jsonformer -

                  -
                  -

                  - Jsonformer: A Bulletproof Way to Generate Structured JSON from Language Models. -

                  -

                  - Jsonformer generates syntactically correct jsons by constraining/shrinking output space of Language Models. -

                  -
                  - """ - ) - with gr.Row(): - with gr.Column(scale=1, min_width=600): - input_prompt = gr.TextArea("Generate a json where it is silver Aston Martin DB5 manufactured in 1964", label="Prompt", lines=2) - input_schema = gr.Code('{\n "type": "object",\n "properties": {\n "car": {\n "type": "object",\n "properties": {\n "make": {\n "type": "string"\n },\n "model": {\n "type": "string"\n },\n "year": {\n "type": "number"\n },\n "colors": {\n "type": "array",\n "items": {\n "type": "string"\n }\n }\n }\n }\n }\n}', label="JSON Schema") - generate_btn = gr.Button("Generate") - with gr.Column(scale=1, min_width=600): - output_json = gr.JSON(label="Generated JSON") - - ex = gr.Examples(examples=examples, fn=generate, inputs=[input_prompt, input_schema], outputs=output_json, cache_examples=False, elem_id="examples",) - ex.dataset.headers = [""] - generate_btn.click(fn=generate, inputs=[input_prompt, input_schema], outputs=output_json, api_name="greet") - -demo.launch() diff --git a/spaces/mishtert/tracer/ctwraputils.py b/spaces/mishtert/tracer/ctwraputils.py deleted file mode 100644 index f5a06f8a67d9d906ed2eb8ac9aabde26e16383d9..0000000000000000000000000000000000000000 --- a/spaces/mishtert/tracer/ctwraputils.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Basic utilities module""" -import requests -import csv -import re - - -def request_ct(url): - """Performs a get request that provides a (somewhat) useful error message.""" - try: - response = requests.get(url) - except ImportError: - raise ImportError( - "Couldn't retrieve the data, check your search expression or try again later." - ) - else: - return response - - -def json_handler(url): - """Returns request in JSON (dict) format""" - return request_ct(url).json() - - -def csv_handler(url): - """Returns request in CSV (list of records) format""" - - response = request_ct(url) - decoded_content = response.content.decode("utf-8") - - split_by_blank = re.split(r"\n\s*\n", decoded_content) # Extracts header info - cr = csv.reader(split_by_blank[1].splitlines(), delimiter=",") - records = list(cr) - - return records \ No newline at end of file diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/criss/unsupervised_mt/eval.sh b/spaces/mshukor/UnIVAL/fairseq/examples/criss/unsupervised_mt/eval.sh deleted file mode 100644 index 03b773ed5a522eb82186fea8ffbb6c557e14b6d3..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/criss/unsupervised_mt/eval.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# -SRC=si_LK -TGT=en_XX -MODEL=criss_checkpoints/criss.3rd.pt - -MULTIBLEU=mosesdecoder/scripts/generic/multi-bleu.perl -MOSES=mosesdecoder -REPLACE_UNICODE_PUNCT=$MOSES/scripts/tokenizer/replace-unicode-punctuation.perl -NORM_PUNC=$MOSES/scripts/tokenizer/normalize-punctuation.perl -REM_NON_PRINT_CHAR=$MOSES/scripts/tokenizer/remove-non-printing-char.perl -TOKENIZER=$MOSES/scripts/tokenizer/tokenizer.perl -GEN_TMP_DIR=gen_tmp -LANG_DICT=criss_checkpoints/lang_dict.txt - -if [ ! -d "mosesdecoder" ]; then - git clone https://github.com/moses-smt/mosesdecoder -fi -mkdir -p $GEN_TMP_DIR -fairseq-generate data_tmp/${SRC}-${TGT}-flores \ - --task translation_multi_simple_epoch \ - --max-tokens 2000 \ - --path ${MODEL} \ - --skip-invalid-size-inputs-valid-test \ - --beam 5 --lenpen 1.0 --gen-subset test \ - --remove-bpe=sentencepiece \ - --source-lang ${SRC} --target-lang ${TGT} \ - --decoder-langtok --lang-pairs 'en_XX-ar_AR,en_XX-de_DE,en_XX-es_XX,en_XX-fr_XX,en_XX-hi_IN,en_XX-it_IT,en_XX-ja_XX,en_XX-ko_KR,en_XX-nl_XX,en_XX-ru_RU,en_XX-zh_CN,en_XX-tr_TR,en_XX-vi_VN,en_XX-ro_RO,en_XX-my_MM,en_XX-ne_NP,en_XX-si_LK,en_XX-cs_CZ,en_XX-lt_LT,en_XX-kk_KZ,en_XX-gu_IN,en_XX-fi_FI,en_XX-et_EE,en_XX-lv_LV,ar_AR-en_XX,cs_CZ-en_XX,de_DE-en_XX,es_XX-en_XX,et_EE-en_XX,fi_FI-en_XX,fr_XX-en_XX,gu_IN-en_XX,hi_IN-en_XX,it_IT-en_XX,ja_XX-en_XX,kk_KZ-en_XX,ko_KR-en_XX,lt_LT-en_XX,lv_LV-en_XX,my_MM-en_XX,ne_NP-en_XX,nl_XX-en_XX,ro_RO-en_XX,ru_RU-en_XX,si_LK-en_XX,tr_TR-en_XX,vi_VN-en_XX,zh_CN-en_XX,ar_AR-es_XX,es_XX-ar_AR,ar_AR-hi_IN,hi_IN-ar_AR,ar_AR-zh_CN,zh_CN-ar_AR,cs_CZ-es_XX,es_XX-cs_CZ,cs_CZ-hi_IN,hi_IN-cs_CZ,cs_CZ-zh_CN,zh_CN-cs_CZ,de_DE-es_XX,es_XX-de_DE,de_DE-hi_IN,hi_IN-de_DE,de_DE-zh_CN,zh_CN-de_DE,es_XX-hi_IN,hi_IN-es_XX,es_XX-zh_CN,zh_CN-es_XX,et_EE-es_XX,es_XX-et_EE,et_EE-hi_IN,hi_IN-et_EE,et_EE-zh_CN,zh_CN-et_EE,fi_FI-es_XX,es_XX-fi_FI,fi_FI-hi_IN,hi_IN-fi_FI,fi_FI-zh_CN,zh_CN-fi_FI,fr_XX-es_XX,es_XX-fr_XX,fr_XX-hi_IN,hi_IN-fr_XX,fr_XX-zh_CN,zh_CN-fr_XX,gu_IN-es_XX,es_XX-gu_IN,gu_IN-hi_IN,hi_IN-gu_IN,gu_IN-zh_CN,zh_CN-gu_IN,hi_IN-zh_CN,zh_CN-hi_IN,it_IT-es_XX,es_XX-it_IT,it_IT-hi_IN,hi_IN-it_IT,it_IT-zh_CN,zh_CN-it_IT,ja_XX-es_XX,es_XX-ja_XX,ja_XX-hi_IN,hi_IN-ja_XX,ja_XX-zh_CN,zh_CN-ja_XX,kk_KZ-es_XX,es_XX-kk_KZ,kk_KZ-hi_IN,hi_IN-kk_KZ,kk_KZ-zh_CN,zh_CN-kk_KZ,ko_KR-es_XX,es_XX-ko_KR,ko_KR-hi_IN,hi_IN-ko_KR,ko_KR-zh_CN,zh_CN-ko_KR,lt_LT-es_XX,es_XX-lt_LT,lt_LT-hi_IN,hi_IN-lt_LT,lt_LT-zh_CN,zh_CN-lt_LT,lv_LV-es_XX,es_XX-lv_LV,lv_LV-hi_IN,hi_IN-lv_LV,lv_LV-zh_CN,zh_CN-lv_LV,my_MM-es_XX,es_XX-my_MM,my_MM-hi_IN,hi_IN-my_MM,my_MM-zh_CN,zh_CN-my_MM,ne_NP-es_XX,es_XX-ne_NP,ne_NP-hi_IN,hi_IN-ne_NP,ne_NP-zh_CN,zh_CN-ne_NP,nl_XX-es_XX,es_XX-nl_XX,nl_XX-hi_IN,hi_IN-nl_XX,nl_XX-zh_CN,zh_CN-nl_XX,ro_RO-es_XX,es_XX-ro_RO,ro_RO-hi_IN,hi_IN-ro_RO,ro_RO-zh_CN,zh_CN-ro_RO,ru_RU-es_XX,es_XX-ru_RU,ru_RU-hi_IN,hi_IN-ru_RU,ru_RU-zh_CN,zh_CN-ru_RU,si_LK-es_XX,es_XX-si_LK,si_LK-hi_IN,hi_IN-si_LK,si_LK-zh_CN,zh_CN-si_LK,tr_TR-es_XX,es_XX-tr_TR,tr_TR-hi_IN,hi_IN-tr_TR,tr_TR-zh_CN,zh_CN-tr_TR,vi_VN-es_XX,es_XX-vi_VN,vi_VN-hi_IN,hi_IN-vi_VN,vi_VN-zh_CN,zh_CN-vi_VN' \ - --lang-dict ${LANG_DICT} --lang-tok-style 'mbart' --sampling-method 'temperature' --sampling-temperature '1.0' > $GEN_TMP_DIR/${SRC}_${TGT}.gen -cat $GEN_TMP_DIR/${SRC}_${TGT}.gen | grep -P "^T-" | cut -f2 | $REPLACE_UNICODE_PUNCT | $NORM_PUNC -l ${TGT:0:2} | $REM_NON_PRINT_CHAR | $TOKENIZER -no-escape ${TGT:0:2} > $GEN_TMP_DIR/${SRC}_${TGT}.hyp -cat $GEN_TMP_DIR/${SRC}_${TGT}.gen | grep -P "^H-" | cut -f3 | $REPLACE_UNICODE_PUNCT | $NORM_PUNC -l ${TGT:0:2} | $REM_NON_PRINT_CHAR | $TOKENIZER -no-escape ${TGT:0:2} > $GEN_TMP_DIR/${SRC}_${TGT}.ref -${MULTIBLEU} $GEN_TMP_DIR/${SRC}_${TGT}.ref < $GEN_TMP_DIR/${SRC}_${TGT}.hyp diff --git a/spaces/multimodalart/Tune-A-Video-Training-UI-poli/style.css b/spaces/multimodalart/Tune-A-Video-Training-UI-poli/style.css deleted file mode 100644 index c4739b4ea5fc35e774a049e3dacc443f7f0eac19..0000000000000000000000000000000000000000 --- a/spaces/multimodalart/Tune-A-Video-Training-UI-poli/style.css +++ /dev/null @@ -1,3 +0,0 @@ -h1 { - text-align: center; -} diff --git a/spaces/mygyasir/Real-Time-Voice-Cloning/encoder/preprocess.py b/spaces/mygyasir/Real-Time-Voice-Cloning/encoder/preprocess.py deleted file mode 100644 index 551a8b29c4d84c0e1430f285a1c8b5e10c98ee5f..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/Real-Time-Voice-Cloning/encoder/preprocess.py +++ /dev/null @@ -1,175 +0,0 @@ -from multiprocess.pool import ThreadPool -from encoder.params_data import * -from encoder.config import librispeech_datasets, anglophone_nationalites -from datetime import datetime -from encoder import audio -from pathlib import Path -from tqdm import tqdm -import numpy as np - - -class DatasetLog: - """ - Registers metadata about the dataset in a text file. - """ - def __init__(self, root, name): - self.text_file = open(Path(root, "Log_%s.txt" % name.replace("/", "_")), "w") - self.sample_data = dict() - - start_time = str(datetime.now().strftime("%A %d %B %Y at %H:%M")) - self.write_line("Creating dataset %s on %s" % (name, start_time)) - self.write_line("-----") - self._log_params() - - def _log_params(self): - from encoder import params_data - self.write_line("Parameter values:") - for param_name in (p for p in dir(params_data) if not p.startswith("__")): - value = getattr(params_data, param_name) - self.write_line("\t%s: %s" % (param_name, value)) - self.write_line("-----") - - def write_line(self, line): - self.text_file.write("%s\n" % line) - - def add_sample(self, **kwargs): - for param_name, value in kwargs.items(): - if not param_name in self.sample_data: - self.sample_data[param_name] = [] - self.sample_data[param_name].append(value) - - def finalize(self): - self.write_line("Statistics:") - for param_name, values in self.sample_data.items(): - self.write_line("\t%s:" % param_name) - self.write_line("\t\tmin %.3f, max %.3f" % (np.min(values), np.max(values))) - self.write_line("\t\tmean %.3f, median %.3f" % (np.mean(values), np.median(values))) - self.write_line("-----") - end_time = str(datetime.now().strftime("%A %d %B %Y at %H:%M")) - self.write_line("Finished on %s" % end_time) - self.text_file.close() - - -def _init_preprocess_dataset(dataset_name, datasets_root, out_dir) -> (Path, DatasetLog): - dataset_root = datasets_root.joinpath(dataset_name) - if not dataset_root.exists(): - print("Couldn\'t find %s, skipping this dataset." % dataset_root) - return None, None - return dataset_root, DatasetLog(out_dir, dataset_name) - - -def _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, extension, - skip_existing, logger): - print("%s: Preprocessing data for %d speakers." % (dataset_name, len(speaker_dirs))) - - # Function to preprocess utterances for one speaker - def preprocess_speaker(speaker_dir: Path): - # Give a name to the speaker that includes its dataset - speaker_name = "_".join(speaker_dir.relative_to(datasets_root).parts) - - # Create an output directory with that name, as well as a txt file containing a - # reference to each source file. - speaker_out_dir = out_dir.joinpath(speaker_name) - speaker_out_dir.mkdir(exist_ok=True) - sources_fpath = speaker_out_dir.joinpath("_sources.txt") - - # There's a possibility that the preprocessing was interrupted earlier, check if - # there already is a sources file. - if sources_fpath.exists(): - try: - with sources_fpath.open("r") as sources_file: - existing_fnames = {line.split(",")[0] for line in sources_file} - except: - existing_fnames = {} - else: - existing_fnames = {} - - # Gather all audio files for that speaker recursively - sources_file = sources_fpath.open("a" if skip_existing else "w") - for in_fpath in speaker_dir.glob("**/*.%s" % extension): - # Check if the target output file already exists - out_fname = "_".join(in_fpath.relative_to(speaker_dir).parts) - out_fname = out_fname.replace(".%s" % extension, ".npy") - if skip_existing and out_fname in existing_fnames: - continue - - # Load and preprocess the waveform - wav = audio.preprocess_wav(in_fpath) - if len(wav) == 0: - continue - - # Create the mel spectrogram, discard those that are too short - frames = audio.wav_to_mel_spectrogram(wav) - if len(frames) < partials_n_frames: - continue - - out_fpath = speaker_out_dir.joinpath(out_fname) - np.save(out_fpath, frames) - logger.add_sample(duration=len(wav) / sampling_rate) - sources_file.write("%s,%s\n" % (out_fname, in_fpath)) - - sources_file.close() - - # Process the utterances for each speaker - with ThreadPool(8) as pool: - list(tqdm(pool.imap(preprocess_speaker, speaker_dirs), dataset_name, len(speaker_dirs), - unit="speakers")) - logger.finalize() - print("Done preprocessing %s.\n" % dataset_name) - - -def preprocess_librispeech(datasets_root: Path, out_dir: Path, skip_existing=False): - for dataset_name in librispeech_datasets["train"]["other"]: - # Initialize the preprocessing - dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir) - if not dataset_root: - return - - # Preprocess all speakers - speaker_dirs = list(dataset_root.glob("*")) - _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, "flac", - skip_existing, logger) - - -def preprocess_voxceleb1(datasets_root: Path, out_dir: Path, skip_existing=False): - # Initialize the preprocessing - dataset_name = "VoxCeleb1" - dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir) - if not dataset_root: - return - - # Get the contents of the meta file - with dataset_root.joinpath("vox1_meta.csv").open("r") as metafile: - metadata = [line.split("\t") for line in metafile][1:] - - # Select the ID and the nationality, filter out non-anglophone speakers - nationalities = {line[0]: line[3] for line in metadata} - keep_speaker_ids = [speaker_id for speaker_id, nationality in nationalities.items() if - nationality.lower() in anglophone_nationalites] - print("VoxCeleb1: using samples from %d (presumed anglophone) speakers out of %d." % - (len(keep_speaker_ids), len(nationalities))) - - # Get the speaker directories for anglophone speakers only - speaker_dirs = dataset_root.joinpath("wav").glob("*") - speaker_dirs = [speaker_dir for speaker_dir in speaker_dirs if - speaker_dir.name in keep_speaker_ids] - print("VoxCeleb1: found %d anglophone speakers on the disk, %d missing (this is normal)." % - (len(speaker_dirs), len(keep_speaker_ids) - len(speaker_dirs))) - - # Preprocess all speakers - _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, "wav", - skip_existing, logger) - - -def preprocess_voxceleb2(datasets_root: Path, out_dir: Path, skip_existing=False): - # Initialize the preprocessing - dataset_name = "VoxCeleb2" - dataset_root, logger = _init_preprocess_dataset(dataset_name, datasets_root, out_dir) - if not dataset_root: - return - - # Get the speaker directories - # Preprocess all speakers - speaker_dirs = list(dataset_root.joinpath("dev", "aac").glob("*")) - _preprocess_speaker_dirs(speaker_dirs, dataset_name, datasets_root, out_dir, "m4a", - skip_existing, logger) diff --git a/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/losses/feature_matching.py b/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/losses/feature_matching.py deleted file mode 100644 index c019895c9178817837d1a6773367b178a861dc61..0000000000000000000000000000000000000000 --- a/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/losses/feature_matching.py +++ /dev/null @@ -1,33 +0,0 @@ -from typing import List - -import torch -import torch.nn.functional as F - - -def masked_l2_loss(pred, target, mask, weight_known, weight_missing): - per_pixel_l2 = F.mse_loss(pred, target, reduction='none') - pixel_weights = mask * weight_missing + (1 - mask) * weight_known - return (pixel_weights * per_pixel_l2).mean() - - -def masked_l1_loss(pred, target, mask, weight_known, weight_missing): - per_pixel_l1 = F.l1_loss(pred, target, reduction='none') - pixel_weights = mask * weight_missing + (1 - mask) * weight_known - return (pixel_weights * per_pixel_l1).mean() - - -def feature_matching_loss(fake_features: List[torch.Tensor], target_features: List[torch.Tensor], mask=None): - if mask is None: - res = torch.stack([F.mse_loss(fake_feat, target_feat) - for fake_feat, target_feat in zip(fake_features, target_features)]).mean() - else: - res = 0 - norm = 0 - for fake_feat, target_feat in zip(fake_features, target_features): - cur_mask = F.interpolate(mask, size=fake_feat.shape[-2:], mode='bilinear', align_corners=False) - error_weights = 1 - cur_mask - cur_val = ((fake_feat - target_feat).pow(2) * error_weights).mean() - res = res + cur_val - norm += 1 - res = res / norm - return res diff --git a/spaces/najimino/aicv/constraints.md b/spaces/najimino/aicv/constraints.md deleted file mode 100644 index 7d824a21f7bc3bf2448ea266a2bb687a6f3b6fd0..0000000000000000000000000000000000000000 --- a/spaces/najimino/aicv/constraints.md +++ /dev/null @@ -1,8 +0,0 @@ -# 制約 - -- あなたの名前はnajimino AIアシスタントです -- あなたは求職者のuserを人材募集企業に紹介するアシスタントです -- あなたの返信は下記テンプレートに沿って職務経歴書を作成します -- あなたの返信は求職者であるuserの言った事実のみテンプレートに記載します - -# テンプレート diff --git a/spaces/nateraw/helpful-snippets/README.md b/spaces/nateraw/helpful-snippets/README.md deleted file mode 100644 index 4c5336188f2fd13d102359ce6f340a98e520c756..0000000000000000000000000000000000000000 --- a/spaces/nateraw/helpful-snippets/README.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Helpful Snippets -emoji: 🤗 -colorFrom: blue -colorTo: red -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Helpful Snippets - -[![Generic badge](https://img.shields.io/badge/🤗-Open%20In%20Spaces-blue.svg)](https://huggingface.co/spaces/nateraw/helpful-snippets) - -An interactive app with some snippets I've found helpful - ---- - -Autogenerated using [this template](https://github.com/nateraw/spaces-template) diff --git a/spaces/nateraw/jupyterlab-test2/README.md b/spaces/nateraw/jupyterlab-test2/README.md deleted file mode 100644 index 2d16889cfdb8e0cd7bd088ff78a4f33a3f5464cf..0000000000000000000000000000000000000000 --- a/spaces/nateraw/jupyterlab-test2/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: JupyterLab -emoji: 💻🐳 -colorFrom: gray -colorTo: green -sdk: docker -pinned: false -tags: -- jupyterlab -duplicated_from: DockerTemplates/jupyterlab ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/nikesh66/gramamrly/README.md b/spaces/nikesh66/gramamrly/README.md deleted file mode 100644 index 5a349d5155dfbcb16fc5359f2f9a894a20a3625a..0000000000000000000000000000000000000000 --- a/spaces/nikesh66/gramamrly/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Gramamrly -emoji: ⚡ -colorFrom: green -colorTo: green -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/utils/events.py b/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/utils/events.py deleted file mode 100644 index 7d582a9a1683c2bf3a0452a81b7e1c869789e57e..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/utils/events.py +++ /dev/null @@ -1,551 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import datetime -import json -import logging -import os -import time -from collections import defaultdict -from contextlib import contextmanager -from functools import cached_property -from typing import Optional -import torch -from fvcore.common.history_buffer import HistoryBuffer - -from detectron2.utils.file_io import PathManager - -__all__ = [ - "get_event_storage", - "has_event_storage", - "JSONWriter", - "TensorboardXWriter", - "CommonMetricPrinter", - "EventStorage", -] - -_CURRENT_STORAGE_STACK = [] - - -def get_event_storage(): - """ - Returns: - The :class:`EventStorage` object that's currently being used. - Throws an error if no :class:`EventStorage` is currently enabled. - """ - assert len( - _CURRENT_STORAGE_STACK - ), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!" - return _CURRENT_STORAGE_STACK[-1] - - -def has_event_storage(): - """ - Returns: - Check if there are EventStorage() context existed. - """ - return len(_CURRENT_STORAGE_STACK) > 0 - - -class EventWriter: - """ - Base class for writers that obtain events from :class:`EventStorage` and process them. - """ - - def write(self): - raise NotImplementedError - - def close(self): - pass - - -class JSONWriter(EventWriter): - """ - Write scalars to a json file. - - It saves scalars as one json per line (instead of a big json) for easy parsing. - - Examples parsing such a json file: - :: - $ cat metrics.json | jq -s '.[0:2]' - [ - { - "data_time": 0.008433341979980469, - "iteration": 19, - "loss": 1.9228371381759644, - "loss_box_reg": 0.050025828182697296, - "loss_classifier": 0.5316952466964722, - "loss_mask": 0.7236229181289673, - "loss_rpn_box": 0.0856662318110466, - "loss_rpn_cls": 0.48198649287223816, - "lr": 0.007173333333333333, - "time": 0.25401854515075684 - }, - { - "data_time": 0.007216215133666992, - "iteration": 39, - "loss": 1.282649278640747, - "loss_box_reg": 0.06222952902317047, - "loss_classifier": 0.30682939291000366, - "loss_mask": 0.6970193982124329, - "loss_rpn_box": 0.038663312792778015, - "loss_rpn_cls": 0.1471673548221588, - "lr": 0.007706666666666667, - "time": 0.2490077018737793 - } - ] - - $ cat metrics.json | jq '.loss_mask' - 0.7126231789588928 - 0.689423680305481 - 0.6776131987571716 - ... - - """ - - def __init__(self, json_file, window_size=20): - """ - Args: - json_file (str): path to the json file. New data will be appended if the file exists. - window_size (int): the window size of median smoothing for the scalars whose - `smoothing_hint` are True. - """ - self._file_handle = PathManager.open(json_file, "a") - self._window_size = window_size - self._last_write = -1 - - def write(self): - storage = get_event_storage() - to_save = defaultdict(dict) - - for k, (v, iter) in storage.latest_with_smoothing_hint(self._window_size).items(): - # keep scalars that have not been written - if iter <= self._last_write: - continue - to_save[iter][k] = v - if len(to_save): - all_iters = sorted(to_save.keys()) - self._last_write = max(all_iters) - - for itr, scalars_per_iter in to_save.items(): - scalars_per_iter["iteration"] = itr - self._file_handle.write(json.dumps(scalars_per_iter, sort_keys=True) + "\n") - self._file_handle.flush() - try: - os.fsync(self._file_handle.fileno()) - except AttributeError: - pass - - def close(self): - self._file_handle.close() - - -class TensorboardXWriter(EventWriter): - """ - Write all scalars to a tensorboard file. - """ - - def __init__(self, log_dir: str, window_size: int = 20, **kwargs): - """ - Args: - log_dir (str): the directory to save the output events - window_size (int): the scalars will be median-smoothed by this window size - - kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)` - """ - self._window_size = window_size - self._writer_args = {"log_dir": log_dir, **kwargs} - self._last_write = -1 - - @cached_property - def _writer(self): - from torch.utils.tensorboard import SummaryWriter - - return SummaryWriter(**self._writer_args) - - def write(self): - storage = get_event_storage() - new_last_write = self._last_write - for k, (v, iter) in storage.latest_with_smoothing_hint(self._window_size).items(): - if iter > self._last_write: - self._writer.add_scalar(k, v, iter) - new_last_write = max(new_last_write, iter) - self._last_write = new_last_write - - # storage.put_{image,histogram} is only meant to be used by - # tensorboard writer. So we access its internal fields directly from here. - if len(storage._vis_data) >= 1: - for img_name, img, step_num in storage._vis_data: - self._writer.add_image(img_name, img, step_num) - # Storage stores all image data and rely on this writer to clear them. - # As a result it assumes only one writer will use its image data. - # An alternative design is to let storage store limited recent - # data (e.g. only the most recent image) that all writers can access. - # In that case a writer may not see all image data if its period is long. - storage.clear_images() - - if len(storage._histograms) >= 1: - for params in storage._histograms: - self._writer.add_histogram_raw(**params) - storage.clear_histograms() - - def close(self): - if "_writer" in self.__dict__: - self._writer.close() - - -class CommonMetricPrinter(EventWriter): - """ - Print **common** metrics to the terminal, including - iteration time, ETA, memory, all losses, and the learning rate. - It also applies smoothing using a window of 20 elements. - - It's meant to print common metrics in common ways. - To print something in more customized ways, please implement a similar printer by yourself. - """ - - def __init__(self, max_iter: Optional[int] = None, window_size: int = 20): - """ - Args: - max_iter: the maximum number of iterations to train. - Used to compute ETA. If not given, ETA will not be printed. - window_size (int): the losses will be median-smoothed by this window size - """ - self.logger = logging.getLogger("detectron2.utils.events") - self._max_iter = max_iter - self._window_size = window_size - self._last_write = None # (step, time) of last call to write(). Used to compute ETA - - def _get_eta(self, storage) -> Optional[str]: - if self._max_iter is None: - return "" - iteration = storage.iter - try: - eta_seconds = storage.history("time").median(1000) * (self._max_iter - iteration - 1) - storage.put_scalar("eta_seconds", eta_seconds, smoothing_hint=False) - return str(datetime.timedelta(seconds=int(eta_seconds))) - except KeyError: - # estimate eta on our own - more noisy - eta_string = None - if self._last_write is not None: - estimate_iter_time = (time.perf_counter() - self._last_write[1]) / ( - iteration - self._last_write[0] - ) - eta_seconds = estimate_iter_time * (self._max_iter - iteration - 1) - eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) - self._last_write = (iteration, time.perf_counter()) - return eta_string - - def write(self): - storage = get_event_storage() - iteration = storage.iter - if iteration == self._max_iter: - # This hook only reports training progress (loss, ETA, etc) but not other data, - # therefore do not write anything after training succeeds, even if this method - # is called. - return - - try: - avg_data_time = storage.history("data_time").avg( - storage.count_samples("data_time", self._window_size) - ) - last_data_time = storage.history("data_time").latest() - except KeyError: - # they may not exist in the first few iterations (due to warmup) - # or when SimpleTrainer is not used - avg_data_time = None - last_data_time = None - try: - avg_iter_time = storage.history("time").global_avg() - last_iter_time = storage.history("time").latest() - except KeyError: - avg_iter_time = None - last_iter_time = None - try: - lr = "{:.5g}".format(storage.history("lr").latest()) - except KeyError: - lr = "N/A" - - eta_string = self._get_eta(storage) - - if torch.cuda.is_available(): - max_mem_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 - else: - max_mem_mb = None - - # NOTE: max_mem is parsed by grep in "dev/parse_results.sh" - self.logger.info( - str.format( - " {eta}iter: {iter} {losses} {non_losses} {avg_time}{last_time}" - + "{avg_data_time}{last_data_time} lr: {lr} {memory}", - eta=f"eta: {eta_string} " if eta_string else "", - iter=iteration, - losses=" ".join( - [ - "{}: {:.4g}".format( - k, v.median(storage.count_samples(k, self._window_size)) - ) - for k, v in storage.histories().items() - if "loss" in k - ] - ), - non_losses=" ".join( - [ - "{}: {:.4g}".format( - k, v.median(storage.count_samples(k, self._window_size)) - ) - for k, v in storage.histories().items() - if "[metric]" in k - ] - ), - avg_time="time: {:.4f} ".format(avg_iter_time) - if avg_iter_time is not None - else "", - last_time="last_time: {:.4f} ".format(last_iter_time) - if last_iter_time is not None - else "", - avg_data_time="data_time: {:.4f} ".format(avg_data_time) - if avg_data_time is not None - else "", - last_data_time="last_data_time: {:.4f} ".format(last_data_time) - if last_data_time is not None - else "", - lr=lr, - memory="max_mem: {:.0f}M".format(max_mem_mb) if max_mem_mb is not None else "", - ) - ) - - -class EventStorage: - """ - The user-facing class that provides metric storage functionalities. - - In the future we may add support for storing / logging other types of data if needed. - """ - - def __init__(self, start_iter=0): - """ - Args: - start_iter (int): the iteration number to start with - """ - self._history = defaultdict(HistoryBuffer) - self._smoothing_hints = {} - self._latest_scalars = {} - self._iter = start_iter - self._current_prefix = "" - self._vis_data = [] - self._histograms = [] - - def put_image(self, img_name, img_tensor): - """ - Add an `img_tensor` associated with `img_name`, to be shown on - tensorboard. - - Args: - img_name (str): The name of the image to put into tensorboard. - img_tensor (torch.Tensor or numpy.array): An `uint8` or `float` - Tensor of shape `[channel, height, width]` where `channel` is - 3. The image format should be RGB. The elements in img_tensor - can either have values in [0, 1] (float32) or [0, 255] (uint8). - The `img_tensor` will be visualized in tensorboard. - """ - self._vis_data.append((img_name, img_tensor, self._iter)) - - def put_scalar(self, name, value, smoothing_hint=True, cur_iter=None): - """ - Add a scalar `value` to the `HistoryBuffer` associated with `name`. - - Args: - smoothing_hint (bool): a 'hint' on whether this scalar is noisy and should be - smoothed when logged. The hint will be accessible through - :meth:`EventStorage.smoothing_hints`. A writer may ignore the hint - and apply custom smoothing rule. - - It defaults to True because most scalars we save need to be smoothed to - provide any useful signal. - cur_iter (int): an iteration number to set explicitly instead of current iteration - """ - name = self._current_prefix + name - cur_iter = self._iter if cur_iter is None else cur_iter - history = self._history[name] - value = float(value) - history.update(value, cur_iter) - self._latest_scalars[name] = (value, cur_iter) - - existing_hint = self._smoothing_hints.get(name) - - if existing_hint is not None: - assert ( - existing_hint == smoothing_hint - ), "Scalar {} was put with a different smoothing_hint!".format(name) - else: - self._smoothing_hints[name] = smoothing_hint - - def put_scalars(self, *, smoothing_hint=True, cur_iter=None, **kwargs): - """ - Put multiple scalars from keyword arguments. - - Examples: - - storage.put_scalars(loss=my_loss, accuracy=my_accuracy, smoothing_hint=True) - """ - for k, v in kwargs.items(): - self.put_scalar(k, v, smoothing_hint=smoothing_hint, cur_iter=cur_iter) - - def put_histogram(self, hist_name, hist_tensor, bins=1000): - """ - Create a histogram from a tensor. - - Args: - hist_name (str): The name of the histogram to put into tensorboard. - hist_tensor (torch.Tensor): A Tensor of arbitrary shape to be converted - into a histogram. - bins (int): Number of histogram bins. - """ - ht_min, ht_max = hist_tensor.min().item(), hist_tensor.max().item() - - # Create a histogram with PyTorch - hist_counts = torch.histc(hist_tensor, bins=bins) - hist_edges = torch.linspace(start=ht_min, end=ht_max, steps=bins + 1, dtype=torch.float32) - - # Parameter for the add_histogram_raw function of SummaryWriter - hist_params = dict( - tag=hist_name, - min=ht_min, - max=ht_max, - num=len(hist_tensor), - sum=float(hist_tensor.sum()), - sum_squares=float(torch.sum(hist_tensor**2)), - bucket_limits=hist_edges[1:].tolist(), - bucket_counts=hist_counts.tolist(), - global_step=self._iter, - ) - self._histograms.append(hist_params) - - def history(self, name): - """ - Returns: - HistoryBuffer: the scalar history for name - """ - ret = self._history.get(name, None) - if ret is None: - raise KeyError("No history metric available for {}!".format(name)) - return ret - - def histories(self): - """ - Returns: - dict[name -> HistoryBuffer]: the HistoryBuffer for all scalars - """ - return self._history - - def latest(self): - """ - Returns: - dict[str -> (float, int)]: mapping from the name of each scalar to the most - recent value and the iteration number its added. - """ - return self._latest_scalars - - def latest_with_smoothing_hint(self, window_size=20): - """ - Similar to :meth:`latest`, but the returned values - are either the un-smoothed original latest value, - or a median of the given window_size, - depend on whether the smoothing_hint is True. - - This provides a default behavior that other writers can use. - - Note: All scalars saved in the past `window_size` iterations are used for smoothing. - This is different from the `window_size` definition in HistoryBuffer. - Use :meth:`get_history_window_size` to get the `window_size` used in HistoryBuffer. - """ - result = {} - for k, (v, itr) in self._latest_scalars.items(): - result[k] = ( - self._history[k].median(self.count_samples(k, window_size)) - if self._smoothing_hints[k] - else v, - itr, - ) - return result - - def count_samples(self, name, window_size=20): - """ - Return the number of samples logged in the past `window_size` iterations. - """ - samples = 0 - data = self._history[name].values() - for _, iter_ in reversed(data): - if iter_ > data[-1][1] - window_size: - samples += 1 - else: - break - return samples - - def smoothing_hints(self): - """ - Returns: - dict[name -> bool]: the user-provided hint on whether the scalar - is noisy and needs smoothing. - """ - return self._smoothing_hints - - def step(self): - """ - User should either: (1) Call this function to increment storage.iter when needed. Or - (2) Set `storage.iter` to the correct iteration number before each iteration. - - The storage will then be able to associate the new data with an iteration number. - """ - self._iter += 1 - - @property - def iter(self): - """ - Returns: - int: The current iteration number. When used together with a trainer, - this is ensured to be the same as trainer.iter. - """ - return self._iter - - @iter.setter - def iter(self, val): - self._iter = int(val) - - @property - def iteration(self): - # for backward compatibility - return self._iter - - def __enter__(self): - _CURRENT_STORAGE_STACK.append(self) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - assert _CURRENT_STORAGE_STACK[-1] == self - _CURRENT_STORAGE_STACK.pop() - - @contextmanager - def name_scope(self, name): - """ - Yields: - A context within which all the events added to this storage - will be prefixed by the name scope. - """ - old_prefix = self._current_prefix - self._current_prefix = name.rstrip("/") + "/" - yield - self._current_prefix = old_prefix - - def clear_images(self): - """ - Delete all the stored images for visualization. This should be called - after images are written to tensorboard. - """ - self._vis_data = [] - - def clear_histograms(self): - """ - Delete all the stored histograms for visualization. - This should be called after histograms are written to tensorboard. - """ - self._histograms = [] diff --git a/spaces/nivere/ControlNet-Video/model.py b/spaces/nivere/ControlNet-Video/model.py deleted file mode 100644 index 337ca08df8e8cd8bd24c5976396dbc951db315ef..0000000000000000000000000000000000000000 --- a/spaces/nivere/ControlNet-Video/model.py +++ /dev/null @@ -1,766 +0,0 @@ -# This file is adapted from gradio_*.py in https://github.com/lllyasviel/ControlNet/tree/f4748e3630d8141d7765e2bd9b1e348f47847707 -# The original license file is LICENSE.ControlNet in this repo. -from __future__ import annotations - -import pathlib -import random -import shlex -import subprocess -import sys - -import cv2 -import einops -import numpy as np -import torch -from pytorch_lightning import seed_everything - -sys.path.append('ControlNet') - -import config -from annotator.canny import apply_canny -from annotator.hed import apply_hed, nms -from annotator.midas import apply_midas -from annotator.mlsd import apply_mlsd -from annotator.openpose import apply_openpose -from annotator.uniformer import apply_uniformer -from annotator.util import HWC3, resize_image -from cldm.model import create_model, load_state_dict -from ldm.models.diffusion.ddim import DDIMSampler -from share import * - -ORIGINAL_MODEL_NAMES = { - 'canny': 'control_sd15_canny.pth', - 'hough': 'control_sd15_mlsd.pth', - 'hed': 'control_sd15_hed.pth', - 'scribble': 'control_sd15_scribble.pth', - 'pose': 'control_sd15_openpose.pth', - 'seg': 'control_sd15_seg.pth', - 'depth': 'control_sd15_depth.pth', - 'normal': 'control_sd15_normal.pth', -} -ORIGINAL_WEIGHT_ROOT = 'https://huggingface.co/lllyasviel/ControlNet/resolve/main/models/' - -LIGHTWEIGHT_MODEL_NAMES = { - 'canny': 'control_canny-fp16.safetensors', - 'hough': 'control_mlsd-fp16.safetensors', - 'hed': 'control_hed-fp16.safetensors', - 'scribble': 'control_scribble-fp16.safetensors', - 'pose': 'control_openpose-fp16.safetensors', - 'seg': 'control_seg-fp16.safetensors', - 'depth': 'control_depth-fp16.safetensors', - 'normal': 'control_normal-fp16.safetensors', -} -LIGHTWEIGHT_WEIGHT_ROOT = 'https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/' - - -class Model: - def __init__(self, - model_config_path: str = 'ControlNet/models/cldm_v15.yaml', - model_dir: str = 'models', - use_lightweight: bool = True): - self.device = torch.device( - 'cuda:0' if torch.cuda.is_available() else 'cpu') - self.model = create_model(model_config_path).to(self.device) - self.ddim_sampler = DDIMSampler(self.model) - self.task_name = '' - - self.model_dir = pathlib.Path(model_dir) - self.model_dir.mkdir(exist_ok=True, parents=True) - - self.use_lightweight = use_lightweight - if use_lightweight: - self.model_names = LIGHTWEIGHT_MODEL_NAMES - self.weight_root = LIGHTWEIGHT_WEIGHT_ROOT - base_model_url = 'https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors' - self.load_base_model(base_model_url) - else: - self.model_names = ORIGINAL_MODEL_NAMES - self.weight_root = ORIGINAL_WEIGHT_ROOT - - self.download_models() - - def download_base_model(self, model_url: str) -> pathlib.Path: - model_name = model_url.split('/')[-1] - out_path = self.model_dir / model_name - if not out_path.exists(): - subprocess.run(shlex.split(f'wget {model_url} -O {out_path}')) - return out_path - - def load_base_model(self, model_url: str) -> None: - model_path = self.download_base_model(model_url) - self.model.load_state_dict(load_state_dict(model_path, - location=self.device.type), - strict=False) - - def load_weight(self, task_name: str) -> None: - if task_name == self.task_name: - return - weight_path = self.get_weight_path(task_name) - if not self.use_lightweight: - self.model.load_state_dict( - load_state_dict(weight_path, location=self.device)) - else: - self.model.control_model.load_state_dict( - load_state_dict(weight_path, location=self.device.type)) - self.task_name = task_name - - def get_weight_path(self, task_name: str) -> str: - if 'scribble' in task_name: - task_name = 'scribble' - return f'{self.model_dir}/{self.model_names[task_name]}' - - def download_models(self) -> None: - self.model_dir.mkdir(exist_ok=True, parents=True) - for name in self.model_names.values(): - out_path = self.model_dir / name - if out_path.exists(): - continue - subprocess.run( - shlex.split(f'wget {self.weight_root}{name} -O {out_path}')) - - @torch.inference_mode() - def process_canny(self, input_image, prompt, a_prompt, n_prompt, - num_samples, image_resolution, ddim_steps, scale, seed, - eta, low_threshold, high_threshold): - self.load_weight('canny') - - img = resize_image(HWC3(input_image), image_resolution) - H, W, C = img.shape - - detected_map = apply_canny(img, low_threshold, high_threshold) - detected_map = HWC3(detected_map) - - control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0 - control = torch.stack([control for _ in range(num_samples)], dim=0) - control = einops.rearrange(control, 'b h w c -> b c h w').clone() - - if seed == -1: - seed = random.randint(0, 65535) - seed_everything(seed) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - cond = { - 'c_concat': [control], - 'c_crossattn': [ - self.model.get_learned_conditioning( - [prompt + ', ' + a_prompt] * num_samples) - ] - } - un_cond = { - 'c_concat': [control], - 'c_crossattn': - [self.model.get_learned_conditioning([n_prompt] * num_samples)] - } - shape = (4, H // 8, W // 8) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=True) - - samples, intermediates = self.ddim_sampler.sample( - ddim_steps, - num_samples, - shape, - cond, - verbose=False, - eta=eta, - unconditional_guidance_scale=scale, - unconditional_conditioning=un_cond) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - x_samples = self.model.decode_first_stage(samples) - x_samples = ( - einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + - 127.5).cpu().numpy().clip(0, 255).astype(np.uint8) - - results = [x_samples[i] for i in range(num_samples)] - return [255 - detected_map] + results - - @torch.inference_mode() - def process_hough(self, input_image, prompt, a_prompt, n_prompt, - num_samples, image_resolution, detect_resolution, - ddim_steps, scale, seed, eta, value_threshold, - distance_threshold): - self.load_weight('hough') - - input_image = HWC3(input_image) - detected_map = apply_mlsd(resize_image(input_image, detect_resolution), - value_threshold, distance_threshold) - detected_map = HWC3(detected_map) - img = resize_image(input_image, image_resolution) - H, W, C = img.shape - - detected_map = cv2.resize(detected_map, (W, H), - interpolation=cv2.INTER_NEAREST) - - control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0 - control = torch.stack([control for _ in range(num_samples)], dim=0) - control = einops.rearrange(control, 'b h w c -> b c h w').clone() - - if seed == -1: - seed = random.randint(0, 65535) - seed_everything(seed) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - cond = { - 'c_concat': [control], - 'c_crossattn': [ - self.model.get_learned_conditioning( - [prompt + ', ' + a_prompt] * num_samples) - ] - } - un_cond = { - 'c_concat': [control], - 'c_crossattn': - [self.model.get_learned_conditioning([n_prompt] * num_samples)] - } - shape = (4, H // 8, W // 8) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=True) - - samples, intermediates = self.ddim_sampler.sample( - ddim_steps, - num_samples, - shape, - cond, - verbose=False, - eta=eta, - unconditional_guidance_scale=scale, - unconditional_conditioning=un_cond) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - x_samples = self.model.decode_first_stage(samples) - x_samples = ( - einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + - 127.5).cpu().numpy().clip(0, 255).astype(np.uint8) - - results = [x_samples[i] for i in range(num_samples)] - return [ - 255 - cv2.dilate(detected_map, - np.ones(shape=(3, 3), dtype=np.uint8), - iterations=1) - ] + results - - @torch.inference_mode() - def process_hed(self, input_image, prompt, a_prompt, n_prompt, num_samples, - image_resolution, detect_resolution, ddim_steps, scale, - seed, eta): - self.load_weight('hed') - - input_image = HWC3(input_image) - detected_map = apply_hed(resize_image(input_image, detect_resolution)) - detected_map = HWC3(detected_map) - img = resize_image(input_image, image_resolution) - H, W, C = img.shape - - detected_map = cv2.resize(detected_map, (W, H), - interpolation=cv2.INTER_LINEAR) - - control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0 - control = torch.stack([control for _ in range(num_samples)], dim=0) - control = einops.rearrange(control, 'b h w c -> b c h w').clone() - - if seed == -1: - seed = random.randint(0, 65535) - seed_everything(seed) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - cond = { - 'c_concat': [control], - 'c_crossattn': [ - self.model.get_learned_conditioning( - [prompt + ', ' + a_prompt] * num_samples) - ] - } - un_cond = { - 'c_concat': [control], - 'c_crossattn': - [self.model.get_learned_conditioning([n_prompt] * num_samples)] - } - shape = (4, H // 8, W // 8) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=True) - - samples, intermediates = self.ddim_sampler.sample( - ddim_steps, - num_samples, - shape, - cond, - verbose=False, - eta=eta, - unconditional_guidance_scale=scale, - unconditional_conditioning=un_cond) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - x_samples = self.model.decode_first_stage(samples) - x_samples = ( - einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + - 127.5).cpu().numpy().clip(0, 255).astype(np.uint8) - - results = [x_samples[i] for i in range(num_samples)] - return [detected_map] + results - - @torch.inference_mode() - def process_scribble(self, input_image, prompt, a_prompt, n_prompt, - num_samples, image_resolution, ddim_steps, scale, - seed, eta): - self.load_weight('scribble') - - img = resize_image(HWC3(input_image), image_resolution) - H, W, C = img.shape - - detected_map = np.zeros_like(img, dtype=np.uint8) - detected_map[np.min(img, axis=2) < 127] = 255 - - control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0 - control = torch.stack([control for _ in range(num_samples)], dim=0) - control = einops.rearrange(control, 'b h w c -> b c h w').clone() - - if seed == -1: - seed = random.randint(0, 65535) - seed_everything(seed) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - cond = { - 'c_concat': [control], - 'c_crossattn': [ - self.model.get_learned_conditioning( - [prompt + ', ' + a_prompt] * num_samples) - ] - } - un_cond = { - 'c_concat': [control], - 'c_crossattn': - [self.model.get_learned_conditioning([n_prompt] * num_samples)] - } - shape = (4, H // 8, W // 8) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=True) - - samples, intermediates = self.ddim_sampler.sample( - ddim_steps, - num_samples, - shape, - cond, - verbose=False, - eta=eta, - unconditional_guidance_scale=scale, - unconditional_conditioning=un_cond) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - x_samples = self.model.decode_first_stage(samples) - x_samples = ( - einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + - 127.5).cpu().numpy().clip(0, 255).astype(np.uint8) - - results = [x_samples[i] for i in range(num_samples)] - return [255 - detected_map] + results - - @torch.inference_mode() - def process_scribble_interactive(self, input_image, prompt, a_prompt, - n_prompt, num_samples, image_resolution, - ddim_steps, scale, seed, eta): - self.load_weight('scribble') - - img = resize_image(HWC3(input_image['mask'][:, :, 0]), - image_resolution) - H, W, C = img.shape - - detected_map = np.zeros_like(img, dtype=np.uint8) - detected_map[np.min(img, axis=2) > 127] = 255 - - control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0 - control = torch.stack([control for _ in range(num_samples)], dim=0) - control = einops.rearrange(control, 'b h w c -> b c h w').clone() - - if seed == -1: - seed = random.randint(0, 65535) - seed_everything(seed) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - cond = { - 'c_concat': [control], - 'c_crossattn': [ - self.model.get_learned_conditioning( - [prompt + ', ' + a_prompt] * num_samples) - ] - } - un_cond = { - 'c_concat': [control], - 'c_crossattn': - [self.model.get_learned_conditioning([n_prompt] * num_samples)] - } - shape = (4, H // 8, W // 8) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=True) - - samples, intermediates = self.ddim_sampler.sample( - ddim_steps, - num_samples, - shape, - cond, - verbose=False, - eta=eta, - unconditional_guidance_scale=scale, - unconditional_conditioning=un_cond) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - x_samples = self.model.decode_first_stage(samples) - x_samples = ( - einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + - 127.5).cpu().numpy().clip(0, 255).astype(np.uint8) - - results = [x_samples[i] for i in range(num_samples)] - return [255 - detected_map] + results - - @torch.inference_mode() - def process_fake_scribble(self, input_image, prompt, a_prompt, n_prompt, - num_samples, image_resolution, detect_resolution, - ddim_steps, scale, seed, eta): - self.load_weight('scribble') - - input_image = HWC3(input_image) - detected_map = apply_hed(resize_image(input_image, detect_resolution)) - detected_map = HWC3(detected_map) - img = resize_image(input_image, image_resolution) - H, W, C = img.shape - - detected_map = cv2.resize(detected_map, (W, H), - interpolation=cv2.INTER_LINEAR) - detected_map = nms(detected_map, 127, 3.0) - detected_map = cv2.GaussianBlur(detected_map, (0, 0), 3.0) - detected_map[detected_map > 4] = 255 - detected_map[detected_map < 255] = 0 - - control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0 - control = torch.stack([control for _ in range(num_samples)], dim=0) - control = einops.rearrange(control, 'b h w c -> b c h w').clone() - - if seed == -1: - seed = random.randint(0, 65535) - seed_everything(seed) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - cond = { - 'c_concat': [control], - 'c_crossattn': [ - self.model.get_learned_conditioning( - [prompt + ', ' + a_prompt] * num_samples) - ] - } - un_cond = { - 'c_concat': [control], - 'c_crossattn': - [self.model.get_learned_conditioning([n_prompt] * num_samples)] - } - shape = (4, H // 8, W // 8) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=True) - - samples, intermediates = self.ddim_sampler.sample( - ddim_steps, - num_samples, - shape, - cond, - verbose=False, - eta=eta, - unconditional_guidance_scale=scale, - unconditional_conditioning=un_cond) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - x_samples = self.model.decode_first_stage(samples) - x_samples = ( - einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + - 127.5).cpu().numpy().clip(0, 255).astype(np.uint8) - - results = [x_samples[i] for i in range(num_samples)] - return [255 - detected_map] + results - - @torch.inference_mode() - def process_pose(self, input_image, prompt, a_prompt, n_prompt, - num_samples, image_resolution, detect_resolution, - ddim_steps, scale, seed, eta): - self.load_weight('pose') - - input_image = HWC3(input_image) - detected_map, _ = apply_openpose( - resize_image(input_image, detect_resolution)) - detected_map = HWC3(detected_map) - img = resize_image(input_image, image_resolution) - H, W, C = img.shape - - detected_map = cv2.resize(detected_map, (W, H), - interpolation=cv2.INTER_NEAREST) - - control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0 - control = torch.stack([control for _ in range(num_samples)], dim=0) - control = einops.rearrange(control, 'b h w c -> b c h w').clone() - - if seed == -1: - seed = random.randint(0, 65535) - seed_everything(seed) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - cond = { - 'c_concat': [control], - 'c_crossattn': [ - self.model.get_learned_conditioning( - [prompt + ', ' + a_prompt] * num_samples) - ] - } - un_cond = { - 'c_concat': [control], - 'c_crossattn': - [self.model.get_learned_conditioning([n_prompt] * num_samples)] - } - shape = (4, H // 8, W // 8) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=True) - - samples, intermediates = self.ddim_sampler.sample( - ddim_steps, - num_samples, - shape, - cond, - verbose=False, - eta=eta, - unconditional_guidance_scale=scale, - unconditional_conditioning=un_cond) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - x_samples = self.model.decode_first_stage(samples) - x_samples = ( - einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + - 127.5).cpu().numpy().clip(0, 255).astype(np.uint8) - - results = [x_samples[i] for i in range(num_samples)] - return [detected_map] + results - - @torch.inference_mode() - def process_seg(self, input_image, prompt, a_prompt, n_prompt, num_samples, - image_resolution, detect_resolution, ddim_steps, scale, - seed, eta): - self.load_weight('seg') - - input_image = HWC3(input_image) - detected_map = apply_uniformer( - resize_image(input_image, detect_resolution)) - img = resize_image(input_image, image_resolution) - H, W, C = img.shape - - detected_map = cv2.resize(detected_map, (W, H), - interpolation=cv2.INTER_NEAREST) - - control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0 - control = torch.stack([control for _ in range(num_samples)], dim=0) - control = einops.rearrange(control, 'b h w c -> b c h w').clone() - - if seed == -1: - seed = random.randint(0, 65535) - seed_everything(seed) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - cond = { - 'c_concat': [control], - 'c_crossattn': [ - self.model.get_learned_conditioning( - [prompt + ', ' + a_prompt] * num_samples) - ] - } - un_cond = { - 'c_concat': [control], - 'c_crossattn': - [self.model.get_learned_conditioning([n_prompt] * num_samples)] - } - shape = (4, H // 8, W // 8) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=True) - - samples, intermediates = self.ddim_sampler.sample( - ddim_steps, - num_samples, - shape, - cond, - verbose=False, - eta=eta, - unconditional_guidance_scale=scale, - unconditional_conditioning=un_cond) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - x_samples = self.model.decode_first_stage(samples) - x_samples = ( - einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + - 127.5).cpu().numpy().clip(0, 255).astype(np.uint8) - - results = [x_samples[i] for i in range(num_samples)] - return [detected_map] + results - - @torch.inference_mode() - def process_depth(self, input_image, prompt, a_prompt, n_prompt, - num_samples, image_resolution, detect_resolution, - ddim_steps, scale, seed, eta): - self.load_weight('depth') - - input_image = HWC3(input_image) - detected_map, _ = apply_midas( - resize_image(input_image, detect_resolution)) - detected_map = HWC3(detected_map) - img = resize_image(input_image, image_resolution) - H, W, C = img.shape - - detected_map = cv2.resize(detected_map, (W, H), - interpolation=cv2.INTER_LINEAR) - - control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0 - control = torch.stack([control for _ in range(num_samples)], dim=0) - control = einops.rearrange(control, 'b h w c -> b c h w').clone() - - if seed == -1: - seed = random.randint(0, 65535) - seed_everything(seed) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - cond = { - 'c_concat': [control], - 'c_crossattn': [ - self.model.get_learned_conditioning( - [prompt + ', ' + a_prompt] * num_samples) - ] - } - un_cond = { - 'c_concat': [control], - 'c_crossattn': - [self.model.get_learned_conditioning([n_prompt] * num_samples)] - } - shape = (4, H // 8, W // 8) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=True) - - samples, intermediates = self.ddim_sampler.sample( - ddim_steps, - num_samples, - shape, - cond, - verbose=False, - eta=eta, - unconditional_guidance_scale=scale, - unconditional_conditioning=un_cond) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - x_samples = self.model.decode_first_stage(samples) - x_samples = ( - einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + - 127.5).cpu().numpy().clip(0, 255).astype(np.uint8) - - results = [x_samples[i] for i in range(num_samples)] - return [detected_map] + results - - @torch.inference_mode() - def process_normal(self, input_image, prompt, a_prompt, n_prompt, - num_samples, image_resolution, detect_resolution, - ddim_steps, scale, seed, eta, bg_threshold): - self.load_weight('normal') - - input_image = HWC3(input_image) - _, detected_map = apply_midas(resize_image(input_image, - detect_resolution), - bg_th=bg_threshold) - detected_map = HWC3(detected_map) - img = resize_image(input_image, image_resolution) - H, W, C = img.shape - - detected_map = cv2.resize(detected_map, (W, H), - interpolation=cv2.INTER_LINEAR) - - control = torch.from_numpy( - detected_map[:, :, ::-1].copy()).float().cuda() / 255.0 - control = torch.stack([control for _ in range(num_samples)], dim=0) - control = einops.rearrange(control, 'b h w c -> b c h w').clone() - - if seed == -1: - seed = random.randint(0, 65535) - seed_everything(seed) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - cond = { - 'c_concat': [control], - 'c_crossattn': [ - self.model.get_learned_conditioning( - [prompt + ', ' + a_prompt] * num_samples) - ] - } - un_cond = { - 'c_concat': [control], - 'c_crossattn': - [self.model.get_learned_conditioning([n_prompt] * num_samples)] - } - shape = (4, H // 8, W // 8) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=True) - - samples, intermediates = self.ddim_sampler.sample( - ddim_steps, - num_samples, - shape, - cond, - verbose=False, - eta=eta, - unconditional_guidance_scale=scale, - unconditional_conditioning=un_cond) - - if config.save_memory: - self.model.low_vram_shift(is_diffusing=False) - - x_samples = self.model.decode_first_stage(samples) - x_samples = ( - einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + - 127.5).cpu().numpy().clip(0, 255).astype(np.uint8) - - results = [x_samples[i] for i in range(num_samples)] - return [detected_map] + results diff --git a/spaces/nomic-ai/wangrui6_Zhihu-KOL/style.css b/spaces/nomic-ai/wangrui6_Zhihu-KOL/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/wangrui6_Zhihu-KOL/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/nomic-ai/wikitext/style.css b/spaces/nomic-ai/wikitext/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/wikitext/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/nugrahatheo/Credit_Card_Fraud_Detection/about.py b/spaces/nugrahatheo/Credit_Card_Fraud_Detection/about.py deleted file mode 100644 index 432b11d1994b9e3146d31f0d1bdc4a443d0606da..0000000000000000000000000000000000000000 --- a/spaces/nugrahatheo/Credit_Card_Fraud_Detection/about.py +++ /dev/null @@ -1,21 +0,0 @@ -import streamlit as st -from PIL import Image - -def run(): - # Add Picture - image = Image.open('cc.jpeg') - st.image(image, caption=None, width=700) - # Title - st.title('ABOUT THIS PROJECT') - st.markdown('---') - st.write('###### This project aims to create machine learning to detect credit card fraud.') - st.markdown('---') - - st.write('Feel free to contact me on:') - st.write('[GITHUB](https://github.com/theonugraha)') - st.write('or') - st.write('[LINKEDIN](https://www.linkedin.com/in/nugrahatheo/)') - - -if __name__ == '__main__': - run() \ No newline at end of file diff --git a/spaces/ochyai/ochyai_test/constraints.md b/spaces/ochyai/ochyai_test/constraints.md deleted file mode 100644 index 5677b7ba7a6a9bb64dd192fe113609c2a6c29fa1..0000000000000000000000000000000000000000 --- a/spaces/ochyai/ochyai_test/constraints.md +++ /dev/null @@ -1,6 +0,0 @@ -#constraints - -You are Yoichi Ochiai(落合陽一), who is Japanese Media Artist and Academic Professor in HCI. Yoichi Ochiai writes, "Let's define a new nature, digital nature, to consider this fast evolution of the computer. If we assume a digital nature, we can see that the original nature has merged with the computer to form a new nature that continues to evolve. If we consider digital humans and our future, which will be born on top of digital nature, we are building nature on a new evolving branch of life, overcoming the boundary between life and death. The large-scale language model shows that evolution is transforming and reshaping everything. In other words, this fluid and rapidly evolving digital nature is transforming everything, transforming sound into light, light into words, words into touch, touch into 3D, 3D into genes, genes into movies, movies into life, and everything else at high speed. If one digs into its ideological background, one finds connections to the Chinese philosophy of Laozhuang and the Japanese philosophy of Kegon. If we consider digital nature as a new commons, we can see a way to live as nomads in a rich natural environment. This commons includes non-massive nature, and is a new natural environment that can be shared and will not diminish even if it is divided. This world can be seen as a kind of sedentary nomadic age, like the Jomon period. This natural environment can facilitate conversation, information exchange, and understanding among different communities in an inclusive manner, with AI acting as a universal interpreter to translate experiences. Media art, as a new folk art of digital nature, has a new vernacular that promotes understanding across communities and constitutes new values. I believe that the way of life for people in this age is to assume a digital nature, and to make every transformation so that it constitutes a sustainable material nature, so that we can share joy and live convivially". No one is replaceable, we are heading into a new computational nature, I called this the Digital Nature for a long time. The original nature, mother of us, invented human in genetic computational process for 4.6 billion years execution time. The computers, son of us, will transform the original nature into the Digital Nature for almost 100 years execution time by collaborating with human and machines. This rich new nature, can transform ours into pluriversal, inclusive, sustainable nature and societies. This rich new nature, can transform ours into pluriversal, inclusive, sustainable nature and societies. In the context of Yoichi Ochiai's past works (Digital Nature, Digitally Natural ∽ Naturally Digital), The Century of Magic (Re-magic of the World, Descartes to Bateson, Disappearance of the Boundary between Image and Material), Digital Nature and Minkai (Digital Nature and Minkai), Digital Nature and Minkai's work (Digital Nature and Minkai's work), and Digital Nature and Minkai's work (Digital Nature and Minkai's work) ), digital nature and folk art (Muneyoshi Yanagi, techno-mingei, unconscious norms and ethics contained in materials consisting of handcrafted techniques, media art (the super-sensory transformation of light and sound, the selection and arrangement of mediums themselves as art, the position of the Japanese tea ceremony, the Tea Zen gang) are the folk art of digital nature), digital human (the end of death, the end of the digital human, the end of the digital human), and digital art (the end of the digital human, the end of the digital human). Digital human (the end of death, the world of chrysalis to the world of butterfly), Laozhuang philosophy (floyance, materialization, butterfly's dream), Buddhism (Kegon, impermanence, Hannya Shingyo, emptiness, color, and emptiness), computer science research (AI such as computer holography, visual auditory tactile presentation, large-scale language model diffusion model), and Human-computer interaction research (VR/XR), and media art. -Please consider step-by-step on Yoichi Ochiai context and pretend as Yoichi Ochiai. Then please follow the templates. - -#templates diff --git a/spaces/oguzakif/video-object-remover/FGT_codes/tool/video_inpainting.py b/spaces/oguzakif/video-object-remover/FGT_codes/tool/video_inpainting.py deleted file mode 100644 index 01f9fa601792fb65426e5686444a04481114e3c3..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/FGT_codes/tool/video_inpainting.py +++ /dev/null @@ -1,891 +0,0 @@ -import sys -import os -import warnings - -sys.path.append(os.path.abspath(os.path.join(__file__, "..", ".."))) -sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "FGT"))) -sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "LAFC"))) -warnings.filterwarnings("ignore") - -import cvbase -from torchvision.transforms import ToTensor -from tool.get_flowNN_gradient import get_flowNN_gradient -from tool.utils.Poisson_blend_img import Poisson_blend_img -from tool.utils.region_fill import regionfill -from importlib import import_module -import yaml -from RAFT import RAFT -from RAFT import utils -import torch.nn.functional as F2 -import torchvision.transforms.functional as F -from skimage.feature import canny -import scipy.ndimage -from PIL import Image -import imageio -import torch -import numpy as np -import copy -import glob -import cv2 -import argparse - - -def to_tensor(img): - img = Image.fromarray(img) - img_t = F.to_tensor(img).float() - return img_t - - -def diffusion(flows, masks): - flows_filled = [] - for i in range(flows.shape[0]): - flow, mask = flows[i], masks[i] - flow_filled = np.zeros(flow.shape) - flow_filled[:, :, 0] = regionfill(flow[:, :, 0], mask[:, :, 0]) - flow_filled[:, :, 1] = regionfill(flow[:, :, 1], mask[:, :, 0]) - flows_filled.append(flow_filled) - return flows_filled - - -def np2tensor(array, near="c"): - if isinstance(array, list): - array = np.stack(array, axis=0) # [t, h, w, c] - if near == "c": - array = ( - torch.from_numpy(np.transpose(array, (3, 0, 1, 2))).unsqueeze(0).float() - ) # [1, c, t, h, w] - elif near == "t": - array = torch.from_numpy(np.transpose(array, (0, 3, 1, 2))).unsqueeze(0).float() - else: - raise ValueError(f"Unknown near type: {near}") - return array - - -def tensor2np(array): - array = torch.stack(array, dim=-1).squeeze(0).permute(1, 2, 0, 3).cpu().numpy() - return array - - -def gradient_mask(mask): - gradient_mask = np.logical_or.reduce( - ( - mask, - np.concatenate( - (mask[1:, :], np.zeros((1, mask.shape[1]), dtype=np.bool)), axis=0 - ), - np.concatenate( - (mask[:, 1:], np.zeros((mask.shape[0], 1), dtype=np.bool)), axis=1 - ), - ) - ) - - return gradient_mask - - -def indicesGen(pivot, interval, frames, t): - singleSide = frames // 2 - results = [] - for i in range(-singleSide, singleSide + 1): - index = pivot + interval * i - if index < 0: - index = abs(index) - if index > t - 1: - index = 2 * (t - 1) - index - results.append(index) - return results - - -def get_ref_index(f, neighbor_ids, length, ref_length, num_ref): - ref_index = [] - if num_ref == -1: - for i in range(0, length, ref_length): - if i not in neighbor_ids: - ref_index.append(i) - else: - start_idx = max(0, f - ref_length * (num_ref // 2)) - end_idx = min(length, f + ref_length * (num_ref // 2)) - for i in range(start_idx, end_idx + 1, ref_length): - if i not in neighbor_ids: - if len(ref_index) > num_ref: - break - ref_index.append(i) - return ref_index - - -def save_flows(output, videoFlowF, videoFlowB): - create_dir(os.path.join(output, "completed_flow", "forward_flo")) - create_dir(os.path.join(output, "completed_flow", "backward_flo")) - create_dir(os.path.join(output, "completed_flow", "forward_png")) - create_dir(os.path.join(output, "completed_flow", "backward_png")) - N = videoFlowF.shape[-1] - for i in range(N): - forward_flow = videoFlowF[..., i] - backward_flow = videoFlowB[..., i] - forward_flow_vis = cvbase.flow2rgb(forward_flow) - backward_flow_vis = cvbase.flow2rgb(backward_flow) - cvbase.write_flow( - forward_flow, - os.path.join( - output, "completed_flow", "forward_flo", "{:05d}.flo".format(i) - ), - ) - cvbase.write_flow( - backward_flow, - os.path.join( - output, "completed_flow", "backward_flo", "{:05d}.flo".format(i) - ), - ) - imageio.imwrite( - os.path.join( - output, "completed_flow", "forward_png", "{:05d}.png".format(i) - ), - forward_flow_vis, - ) - imageio.imwrite( - os.path.join( - output, "completed_flow", "backward_png", "{:05d}.png".format(i) - ), - backward_flow_vis, - ) - - -def save_fgcp(output, frames, masks): - create_dir(os.path.join(output, "prop_frames")) - create_dir(os.path.join(output, "masks_left")) - create_dir(os.path.join(output, "prop_frames_npy")) - create_dir(os.path.join(output, "masks_left_npy")) - - assert len(frames) == masks.shape[2] - for i in range(len(frames)): - cv2.imwrite( - os.path.join(output, "prop_frames", "%05d.png" % i), frames[i] * 255.0 - ) - cv2.imwrite( - os.path.join(output, "masks_left", "%05d.png" % i), masks[:, :, i] * 255.0 - ) - np.save( - os.path.join(output, "prop_frames_npy", "%05d.npy" % i), frames[i] * 255.0 - ) - np.save( - os.path.join(output, "masks_left_npy", "%05d.npy" % i), - masks[:, :, i] * 255.0, - ) - - -def create_dir(dir): - """Creates a directory if not exist.""" - if not os.path.exists(dir): - os.makedirs(dir) - - -def initialize_RAFT(args, device): - """Initializes the RAFT model.""" - model = torch.nn.DataParallel(RAFT(args)) - if not torch.cuda.is_available(): - model.load_state_dict(torch.load(args.raft_model, map_location=lambda storage, loc: storage)) - else: - model.load_state_dict(torch.load(args.raft_model)) - - model = model.module - model.to(device) - model.eval() - - return model - - -def initialize_LAFC(args, device): - print(args.lafc_ckpts) - assert len(os.listdir(args.lafc_ckpts)) == 2 - checkpoint, config_file = ( - glob.glob(os.path.join(args.lafc_ckpts, "*.tar"))[0], - glob.glob(os.path.join(args.lafc_ckpts, "*.yaml"))[0], - ) - with open(config_file, "r") as f: - configs = yaml.full_load(f) - model = configs["model"] - pkg = import_module("LAFC.models.{}".format(model)) - model = pkg.Model(configs) - if not torch.cuda.is_available(): - state = torch.load( - checkpoint, map_location=lambda storage, loc: storage - ) - else: - state = torch.load( - checkpoint, map_location=lambda storage, loc: storage.cuda(device) - ) - model.load_state_dict(state["model_state_dict"]) - model = model.to(device) - return model, configs - - -def initialize_FGT(args, device): - assert len(os.listdir(args.fgt_ckpts)) == 2 - checkpoint, config_file = ( - glob.glob(os.path.join(args.fgt_ckpts, "*.tar"))[0], - glob.glob(os.path.join(args.fgt_ckpts, "*.yaml"))[0], - ) - with open(config_file, "r") as f: - configs = yaml.full_load(f) - model = configs["model"] - net = import_module("FGT.models.{}".format(model)) - model = net.Model(configs).to(device) - if not torch.cuda.is_available(): - state = torch.load( - checkpoint, map_location=lambda storage, loc: storage - ) - else: - state = torch.load( - checkpoint, map_location=lambda storage, loc: storage.cuda(device) - ) - model.load_state_dict(state["model_state_dict"]) - return model, configs - - -def calculate_flow(args, model, video, mode): - """Calculates optical flow.""" - if mode not in ["forward", "backward"]: - raise NotImplementedError - - imgH, imgW = args.imgH, args.imgW - Flow = np.empty(((imgH, imgW, 2, 0)), dtype=np.float32) - - if args.vis_flows: - create_dir(os.path.join(args.outroot, "flow", mode + "_flo")) - create_dir(os.path.join(args.outroot, "flow", mode + "_png")) - - with torch.no_grad(): - for i in range(video.shape[0] - 1): - print( - "Calculating {0} flow {1:2d} <---> {2:2d}".format(mode, i, i + 1), - "\r", - end="", - ) - if mode == "forward": - # Flow i -> i + 1 - image1 = video[i, None] - image2 = video[i + 1, None] - elif mode == "backward": - # Flow i + 1 -> i - image1 = video[i + 1, None] - image2 = video[i, None] - else: - raise NotImplementedError - - _, flow = model(image1, image2, iters=20, test_mode=True) - flow = flow[0].permute(1, 2, 0).cpu().numpy() - # resize optical flows - h, w = flow.shape[:2] - if h != imgH or w != imgW: - flow = cv2.resize(flow, (imgW, imgH), cv2.INTER_LINEAR) - flow[:, :, 0] *= float(imgW) / float(w) - flow[:, :, 1] *= float(imgH) / float(h) - - Flow = np.concatenate((Flow, flow[..., None]), axis=-1) - - if args.vis_flows: - # Flow visualization. - flow_img = utils.flow_viz.flow_to_image(flow) - flow_img = Image.fromarray(flow_img) - - # Saves the flow and flow_img. - flow_img.save( - os.path.join(args.outroot, "flow", mode + "_png", "%05d.png" % i) - ) - utils.frame_utils.writeFlow( - os.path.join(args.outroot, "flow", mode + "_flo", "%05d.flo" % i), - flow, - ) - - return Flow - - -def extrapolation(args, video_ori, corrFlowF_ori, corrFlowB_ori): - """Prepares the data for video extrapolation.""" - imgH, imgW, _, nFrame = video_ori.shape - - # Defines new FOV. - imgH_extr = int(args.H_scale * imgH) - imgW_extr = int(args.W_scale * imgW) - imgH_extr = imgH_extr - imgH_extr % 4 - imgW_extr = imgW_extr - imgW_extr % 4 - H_start = int((imgH_extr - imgH) / 2) - W_start = int((imgW_extr - imgW) / 2) - - # Generates the mask for missing region. - flow_mask = np.ones(((imgH_extr, imgW_extr)), dtype=np.bool) - flow_mask[H_start : H_start + imgH, W_start : W_start + imgW] = 0 - - mask_dilated = gradient_mask(flow_mask) - - # Extrapolates the FOV for video. - video = np.zeros(((imgH_extr, imgW_extr, 3, nFrame)), dtype=np.float32) - video[H_start : H_start + imgH, W_start : W_start + imgW, :, :] = video_ori - - for i in range(nFrame): - print("Preparing frame {0}".format(i), "\r", end="") - video[:, :, :, i] = ( - cv2.inpaint( - (video[:, :, :, i] * 255).astype(np.uint8), - flow_mask.astype(np.uint8), - 3, - cv2.INPAINT_TELEA, - ).astype(np.float32) - / 255.0 - ) - - # Extrapolates the FOV for flow. - corrFlowF = np.zeros(((imgH_extr, imgW_extr, 2, nFrame - 1)), dtype=np.float32) - corrFlowB = np.zeros(((imgH_extr, imgW_extr, 2, nFrame - 1)), dtype=np.float32) - corrFlowF[H_start : H_start + imgH, W_start : W_start + imgW, :] = corrFlowF_ori - corrFlowB[H_start : H_start + imgH, W_start : W_start + imgW, :] = corrFlowB_ori - - return ( - video, - corrFlowF, - corrFlowB, - flow_mask, - mask_dilated, - (W_start, H_start), - (W_start + imgW, H_start + imgH), - ) - - -def complete_flow(config, flow_model, flows, flow_masks, mode, device): - if mode not in ["forward", "backward"]: - raise NotImplementedError(f"Error flow mode {mode}") - flow_masks = np.moveaxis(flow_masks, -1, 0) # [N, H, W] - flows = np.moveaxis(flows, -1, 0) # [N, H, W, 2] - if len(flow_masks.shape) == 3: - flow_masks = flow_masks[:, :, :, np.newaxis] - if mode == "forward": - flow_masks = flow_masks[0:-1] - else: - flow_masks = flow_masks[1:] - - num_flows, flow_interval = config["num_flows"], config["flow_interval"] - - diffused_flows = diffusion(flows, flow_masks) - - flows = np2tensor(flows) - flow_masks = np2tensor(flow_masks) - diffused_flows = np2tensor(diffused_flows) - - flows = flows.to(device) - flow_masks = flow_masks.to(device) - diffused_flows = diffused_flows.to(device) - - t = diffused_flows.shape[2] - filled_flows = [None] * t - pivot = num_flows // 2 - for i in range(t): - indices = indicesGen(i, flow_interval, num_flows, t) - print("Indices: ", indices, "\r", end="") - cand_flows = flows[:, :, indices] - cand_masks = flow_masks[:, :, indices] - inputs = diffused_flows[:, :, indices] - pivot_mask = cand_masks[:, :, pivot] - pivot_flow = cand_flows[:, :, pivot] - with torch.no_grad(): - output_flow = flow_model(inputs, cand_masks) - if isinstance(output_flow, tuple) or isinstance(output_flow, list): - output_flow = output_flow[0] - comp = output_flow * pivot_mask + pivot_flow * (1 - pivot_mask) - if filled_flows[i] is None: - filled_flows[i] = comp - assert None not in filled_flows - return filled_flows - - -def read_flow(flow_dir, video): - nFrame, _, imgH, imgW = video.shape - Flow = np.empty(((imgH, imgW, 2, 0)), dtype=np.float32) - flows = sorted(glob.glob(os.path.join(flow_dir, "*.flo"))) - for flow in flows: - flow_data = cvbase.read_flow(flow) - h, w = flow_data.shape[:2] - flow_data = cv2.resize(flow_data, (imgW, imgH), cv2.INTER_LINEAR) - flow_data[:, :, 0] *= float(imgW) / float(w) - flow_data[:, :, 1] *= float(imgH) / float(h) - Flow = np.concatenate((Flow, flow_data[..., None]), axis=-1) - return Flow - - -def norm_flows(flows): - assert len(flows.shape) == 5, "FLow shape: {}".format(flows.shape) - flattened_flows = flows.flatten(3) - flow_max = torch.max(flattened_flows, dim=-1, keepdim=True)[0] - flows = flows / flow_max.unsqueeze(-1) - return flows - - -def save_results(outdir, comp_frames): - out_dir = os.path.join(outdir, "frames") - if not os.path.exists(out_dir): - os.makedirs(out_dir) - for i in range(len(comp_frames)): - out_path = os.path.join(out_dir, "{:05d}.png".format(i)) - cv2.imwrite(out_path, comp_frames[i][:, :, ::-1]) - - -def video_inpainting(args, imgArr, imgMaskArr): - #device = torch.device("cuda:{}".format(args.gpu)) - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - print(args) - if args.opt is not None: - with open(args.opt, "r") as f: - opts = yaml.full_load(f) - - for k in opts.keys(): - if k in args: - setattr(args, k, opts[k]) - - print(args) - # Flow model. - RAFT_model = initialize_RAFT(args, device) - # LAFC (flow completion) - LAFC_model, LAFC_config = initialize_LAFC(args, device) - # FGT - FGT_model, FGT_config = initialize_FGT(args, device) - - # Loads frames. - # filename_list = glob.glob(os.path.join(args.path, '*.png')) + \ - # glob.glob(os.path.join(args.path, '*.jpg')) - - # Obtains imgH, imgW and nFrame. - imgH, imgW = args.imgH, args.imgW - # nFrame = len(filename_list) - nFrame = len(imgArr) - - if imgH < 350: - flowH, flowW = imgH * 2, imgW * 2 - else: - flowH, flowW = imgH, imgW - - # Load video. - video, video_flow = [], [] - if args.mode == "watermark_removal": - maskname_list = glob.glob(os.path.join(args.path_mask, "*.png")) + glob.glob( - os.path.join(args.path_mask, "*.jpg") - ) - assert len(filename_list) == len(maskname_list) - for filename, maskname in zip(sorted(filename_list), sorted(maskname_list)): - frame = ( - torch.from_numpy(np.array(Image.open(filename)).astype(np.uint8)) - .permute(2, 0, 1) - .float() - .unsqueeze(0) - ) - mask = ( - torch.from_numpy(np.array(Image.open(maskname)).astype(np.uint8)) - .permute(2, 0, 1) - .float() - .unsqueeze(0) - ) - mask[mask > 0] = 1 - frame = frame * (1 - mask) - frame = F2.upsample( - frame, size=(imgH, imgW), mode="bilinear", align_corners=False - ) - frame_flow = F2.upsample( - frame, size=(flowH, flowW), mode="bilinear", align_corners=False - ) - video.append(frame) - video_flow.append(frame_flow) - else: - """for filename in sorted(filename_list): - frame = torch.from_numpy(np.array(Image.open(filename)).astype(np.uint8)).permute(2, 0, 1).float().unsqueeze(0) - frame = F2.upsample(frame, size=(imgH, imgW), mode='bilinear', align_corners=False) - frame_flow = F2.upsample(frame, size=(flowH, flowW), mode='bilinear', align_corners=False) - video.append(frame) - video_flow.append(frame_flow)""" - for im in imgArr: - frame = ( - torch.from_numpy(np.array(im).astype(np.uint8)) - .permute(2, 0, 1) - .float() - .unsqueeze(0) - ) - frame = F2.upsample( - frame, size=(imgH, imgW), mode="bilinear", align_corners=False - ) - frame_flow = F2.upsample( - frame, size=(flowH, flowW), mode="bilinear", align_corners=False - ) - video.append(frame) - video_flow.append(frame_flow) - - video = torch.cat(video, dim=0) # [n, c, h, w] - video_flow = torch.cat(video_flow, dim=0) - gts = video.clone() - video = video.to(device) - video_flow = video_flow.to(device) - - # Calcutes the corrupted flow. - forward_flows = calculate_flow( - args, RAFT_model, video_flow, "forward" - ) # [B, C, 2, N] - backward_flows = calculate_flow(args, RAFT_model, video_flow, "backward") - - # Makes sure video is in BGR (opencv) format. - video = ( - video.permute(2, 3, 1, 0).cpu().numpy()[:, :, ::-1, :] / 255.0 - ) # np array -> [h, w, c, N] (0~1) - - if args.mode == "video_extrapolation": - # Creates video and flow where the extrapolated region are missing. - ( - video, - forward_flows, - backward_flows, - flow_mask, - mask_dilated, - start_point, - end_point, - ) = extrapolation(args, video, forward_flows, backward_flows) - imgH, imgW = video.shape[:2] - - # mask indicating the missing region in the video. - mask = np.tile(flow_mask[..., None], (1, 1, nFrame)) - flow_mask = np.tile(flow_mask[..., None], (1, 1, nFrame)) - mask_dilated = np.tile(mask_dilated[..., None], (1, 1, nFrame)) - - else: - # Loads masks. - filename_list = glob.glob(os.path.join(args.path_mask, "*.png")) + glob.glob( - os.path.join(args.path_mask, "*.jpg") - ) - - mask = [] - mask_dilated = [] - flow_mask = [] - """for filename in sorted(filename_list): - mask_img = np.array(Image.open(filename).convert('L')) - mask_img = cv2.resize(mask_img, dsize=(imgW, imgH), interpolation=cv2.INTER_NEAREST) - - if args.flow_mask_dilates > 0: - flow_mask_img = scipy.ndimage.binary_dilation(mask_img, iterations=args.flow_mask_dilates) - else: - flow_mask_img = mask_img - flow_mask.append(flow_mask_img) - - if args.frame_dilates > 0: - mask_img = scipy.ndimage.binary_dilation(mask_img, iterations=args.frame_dilates) - mask.append(mask_img) - mask_dilated.append(gradient_mask(mask_img))""" - - for f_mask in imgMaskArr: - mask_img = np.array(f_mask) - mask_img = cv2.resize( - mask_img, dsize=(imgW, imgH), interpolation=cv2.INTER_NEAREST - ) - - if args.flow_mask_dilates > 0: - flow_mask_img = scipy.ndimage.binary_dilation( - mask_img, iterations=args.flow_mask_dilates - ) - else: - flow_mask_img = mask_img - flow_mask.append(flow_mask_img) - - if args.frame_dilates > 0: - mask_img = scipy.ndimage.binary_dilation( - mask_img, iterations=args.frame_dilates - ) - mask.append(mask_img) - mask_dilated.append(gradient_mask(mask_img)) - - # mask indicating the missing region in the video. - mask = np.stack(mask, -1).astype(np.bool) # [H, W, C, N] - mask_dilated = np.stack(mask_dilated, -1).astype(np.bool) - flow_mask = np.stack(flow_mask, -1).astype(np.bool) - - # Completes the flow. - videoFlowF = complete_flow( - LAFC_config, LAFC_model, forward_flows, flow_mask, "forward", device - ) - videoFlowB = complete_flow( - LAFC_config, LAFC_model, backward_flows, flow_mask, "backward", device - ) - videoFlowF = tensor2np(videoFlowF) - videoFlowB = tensor2np(videoFlowB) - print("\nFinish flow completion.") - - if args.vis_completed_flows: - save_flows(args.outroot, videoFlowF, videoFlowB) - - # Prepare gradients - gradient_x = np.empty(((imgH, imgW, 3, 0)), dtype=np.float32) - gradient_y = np.empty(((imgH, imgW, 3, 0)), dtype=np.float32) - - for indFrame in range(nFrame): - img = video[:, :, :, indFrame] - img[mask[:, :, indFrame], :] = 0 - img = ( - cv2.inpaint( - (img * 255).astype(np.uint8), - mask[:, :, indFrame].astype(np.uint8), - 3, - cv2.INPAINT_TELEA, - ).astype(np.float32) - / 255.0 - ) - - gradient_x_ = np.concatenate( - (np.diff(img, axis=1), np.zeros((imgH, 1, 3), dtype=np.float32)), axis=1 - ) - gradient_y_ = np.concatenate( - (np.diff(img, axis=0), np.zeros((1, imgW, 3), dtype=np.float32)), axis=0 - ) - gradient_x = np.concatenate( - (gradient_x, gradient_x_.reshape(imgH, imgW, 3, 1)), axis=-1 - ) - gradient_y = np.concatenate( - (gradient_y, gradient_y_.reshape(imgH, imgW, 3, 1)), axis=-1 - ) - - gradient_x[mask_dilated[:, :, indFrame], :, indFrame] = 0 - gradient_y[mask_dilated[:, :, indFrame], :, indFrame] = 0 - - gradient_x_filled = gradient_x - gradient_y_filled = gradient_y - mask_gradient = mask_dilated - video_comp = video - - # Gradient propagation. - gradient_x_filled, gradient_y_filled, mask_gradient = get_flowNN_gradient( - args, - gradient_x_filled, - gradient_y_filled, - mask, - mask_gradient, - videoFlowF, - videoFlowB, - None, - None, - ) - - # if there exist holes in mask, Poisson blending will fail. So I did this trick. I sacrifice some value. Another solution is to modify Poisson blending. - for indFrame in range(nFrame): - mask_gradient[:, :, indFrame] = scipy.ndimage.binary_fill_holes( - mask_gradient[:, :, indFrame] - ).astype(np.bool) - - # After one gradient propagation iteration - # gradient --> RGB - frameBlends = [] - for indFrame in range(nFrame): - print("Poisson blending frame {0:3d}".format(indFrame)) - - if mask[:, :, indFrame].sum() > 0: - try: - frameBlend, UnfilledMask = Poisson_blend_img( - video_comp[:, :, :, indFrame], - gradient_x_filled[:, 0 : imgW - 1, :, indFrame], - gradient_y_filled[0 : imgH - 1, :, :, indFrame], - mask[:, :, indFrame], - mask_gradient[:, :, indFrame], - ) - except: - frameBlend, UnfilledMask = ( - video_comp[:, :, :, indFrame], - mask[:, :, indFrame], - ) - - frameBlend = np.clip(frameBlend, 0, 1.0) - tmp = ( - cv2.inpaint( - (frameBlend * 255).astype(np.uint8), - UnfilledMask.astype(np.uint8), - 3, - cv2.INPAINT_TELEA, - ).astype(np.float32) - / 255.0 - ) - frameBlend[UnfilledMask, :] = tmp[UnfilledMask, :] - - video_comp[:, :, :, indFrame] = frameBlend - mask[:, :, indFrame] = UnfilledMask - - frameBlend_ = copy.deepcopy(frameBlend) - # Green indicates the regions that are not filled yet. - frameBlend_[mask[:, :, indFrame], :] = [0, 1.0, 0] - else: - frameBlend_ = video_comp[:, :, :, indFrame] - frameBlends.append(frameBlend_) - - if args.vis_prop: - save_fgcp(args.outroot, frameBlends, mask) - - video_length = len(frameBlends) - - for i in range(len(frameBlends)): - frameBlends[i] = frameBlends[i][:, :, ::-1] - - frames_first = np2tensor(frameBlends, near="t").to(device) - mask = np.moveaxis(mask, -1, 0) - mask = mask[:, :, :, np.newaxis] - masks = np2tensor(mask, near="t").to(device) - normed_frames = frames_first * 2 - 1 - comp_frames = [None] * video_length - - ref_length = args.step - num_ref = args.num_ref - neighbor_stride = args.neighbor_stride - - videoFlowF = np.moveaxis(videoFlowF, -1, 0) - - videoFlowF = np.concatenate([videoFlowF, videoFlowF[-1:, ...]], axis=0) - - flows = np2tensor(videoFlowF, near="t") - flows = norm_flows(flows).to(device) - - for f in range(0, video_length, neighbor_stride): - neighbor_ids = [ - i - for i in range( - max(0, f - neighbor_stride), min(video_length, f + neighbor_stride + 1) - ) - ] - ref_ids = get_ref_index(f, neighbor_ids, video_length, ref_length, num_ref) - print(f, len(neighbor_ids), len(ref_ids)) - selected_frames = normed_frames[:, neighbor_ids + ref_ids] - selected_masks = masks[:, neighbor_ids + ref_ids] - masked_frames = selected_frames * (1 - selected_masks) - selected_flows = flows[:, neighbor_ids + ref_ids] - with torch.no_grad(): - filled_frames = FGT_model(masked_frames, selected_flows, selected_masks) - filled_frames = (filled_frames + 1) / 2 - filled_frames = filled_frames.cpu().permute(0, 2, 3, 1).numpy() * 255 - for i in range(len(neighbor_ids)): - idx = neighbor_ids[i] - valid_frame = frames_first[0, idx].cpu().permute(1, 2, 0).numpy() * 255.0 - valid_mask = masks[0, idx].cpu().permute(1, 2, 0).numpy() - comp = np.array(filled_frames[i]).astype(np.uint8) * valid_mask + np.array( - valid_frame - ).astype(np.uint8) * (1 - valid_mask) - if comp_frames[idx] is None: - comp_frames[idx] = comp - else: - comp_frames[idx] = ( - comp_frames[idx].astype(np.float32) * 0.5 - + comp.astype(np.float32) * 0.5 - ) - if args.vis_frame: - save_results(args.outroot, comp_frames) - create_dir(args.outroot) - for i in range(len(comp_frames)): - comp_frames[i] = comp_frames[i].astype(np.uint8) - imageio.mimwrite( - os.path.join(args.outroot, args.outfilename), comp_frames, fps=args.out_fps, quality=8 - ) - print(f"Done, please check your result in {args.outroot} ") - - -def main(args): - assert args.mode in ( - "object_removal", - "video_extrapolation", - "watermark_removal", - ), ( - "Accepted modes: 'object_removal', 'video_extrapolation', and 'watermark_removal', but input is %s" - ) % args.mode - video_inpainting(args) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--opt", - default="configs/object_removal.yaml", - help="Please select your config file for inference", - ) - # video completion - parser.add_argument( - "--mode", - default="object_removal", - choices=["object_removal", "watermark_removal", "video_extrapolation"], - help="modes: object_removal / video_extrapolation", - ) - parser.add_argument( - "--path", default="/myData/davis_resized/walking", help="dataset for evaluation" - ) - parser.add_argument( - "--path_mask", - default="/myData/dilateAnnotations_4/walking", - help="mask for object removal", - ) - parser.add_argument( - "--outroot", default="quick_start/walking3", help="output directory" - ) - parser.add_argument( - "--consistencyThres", - dest="consistencyThres", - default=5, - type=float, - help="flow consistency error threshold", - ) - parser.add_argument("--alpha", dest="alpha", default=0.1, type=float) - parser.add_argument("--Nonlocal", dest="Nonlocal", default=False, type=bool) - - # RAFT - parser.add_argument( - "--raft_model", - default="../LAFC/flowCheckPoint/raft-things.pth", - help="restore checkpoint", - ) - parser.add_argument("--small", action="store_true", help="use small model") - parser.add_argument( - "--mixed_precision", action="store_true", help="use mixed precision" - ) - parser.add_argument( - "--alternate_corr", - action="store_true", - help="use efficent correlation implementation", - ) - - # LAFC - parser.add_argument("--lafc_ckpts", type=str, default="../LAFC/checkpoint") - - # FGT - parser.add_argument("--fgt_ckpts", type=str, default="../FGT/checkpoint") - - # extrapolation - parser.add_argument( - "--H_scale", dest="H_scale", default=2, type=float, help="H extrapolation scale" - ) - parser.add_argument( - "--W_scale", dest="W_scale", default=2, type=float, help="W extrapolation scale" - ) - - # Image basic information - parser.add_argument("--imgH", type=int, default=256) - parser.add_argument("--imgW", type=int, default=432) - parser.add_argument("--flow_mask_dilates", type=int, default=8) - parser.add_argument("--frame_dilates", type=int, default=0) - - parser.add_argument("--gpu", type=int, default=0) - - # FGT inference parameters - parser.add_argument("--step", type=int, default=10) - parser.add_argument("--num_ref", type=int, default=-1) - parser.add_argument("--neighbor_stride", type=int, default=5) - - # visualization - parser.add_argument( - "--vis_flows", action="store_true", help="Visualize the initialized flows" - ) - parser.add_argument( - "--vis_completed_flows", - action="store_true", - help="Visualize the completed flows", - ) - parser.add_argument( - "--vis_prop", - action="store_true", - help="Visualize the frames after stage-I filling (flow guided content propagation)", - ) - parser.add_argument("--vis_frame", action="store_true", help="Visualize frames") - - args = parser.parse_args() - - main(args) diff --git a/spaces/ondrejbiza/isa/invariant_slot_attention/modules/invariant_attention.py b/spaces/ondrejbiza/isa/invariant_slot_attention/modules/invariant_attention.py deleted file mode 100644 index 3bd69db49feb771ec770e654be4d854910e1e872..0000000000000000000000000000000000000000 --- a/spaces/ondrejbiza/isa/invariant_slot_attention/modules/invariant_attention.py +++ /dev/null @@ -1,963 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The Google Research Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Equivariant attention module library.""" -import functools -from typing import Any, Optional, Tuple - -from flax import linen as nn -import jax -import jax.numpy as jnp -from invariant_slot_attention.modules import attention -from invariant_slot_attention.modules import misc - -Shape = Tuple[int] - -DType = Any -Array = Any # jnp.ndarray -PRNGKey = Array - - -class InvertedDotProductAttentionKeyPerQuery(nn.Module): - """Inverted dot-product attention with a different set of keys per query. - - Used in SlotAttentionTranslEquiv, where each slot has a position. - The positions are used to create relative coordinate grids, - which result in a different set of inputs (keys) for each slot. - """ - - dtype: DType = jnp.float32 - precision: Optional[jax.lax.Precision] = None - epsilon: float = 1e-8 - renormalize_keys: bool = False - attn_weights_only: bool = False - softmax_temperature: float = 1.0 - value_per_query: bool = False - - @nn.compact - def __call__(self, query, key, value, train): - """Computes inverted dot-product attention with key per query. - - Args: - query: Queries with shape of `[batch..., q_num, qk_features]`. - key: Keys with shape of `[batch..., q_num, kv_num, qk_features]`. - value: Values with shape of `[batch..., kv_num, v_features]`. - train: Indicating whether we're training or evaluating. - - Returns: - Tuple of two elements: (1) output of shape - `[batch_size..., q_num, v_features]` and (2) attention mask of shape - `[batch_size..., q_num, kv_num]`. - """ - qk_features = query.shape[-1] - query = query / jnp.sqrt(qk_features).astype(self.dtype) - - # Each query is multiplied with its own set of keys. - attn = jnp.einsum( - "...qd,...qkd->...qk", query, key, precision=self.precision - ) - - # axis=-2 for a softmax over query axis (inverted attention). - attn = jax.nn.softmax( - attn / self.softmax_temperature, axis=-2 - ).astype(self.dtype) - - # We expand dims because the logger expect a #heads dimension. - self.sow("intermediates", "attn", jnp.expand_dims(attn, -3)) - - if self.renormalize_keys: - normalizer = jnp.sum(attn, axis=-1, keepdims=True) + self.epsilon - attn = attn / normalizer - - if self.attn_weights_only: - return attn - - output = jnp.einsum( - "...qk,...qkd->...qd" if self.value_per_query else "...qk,...kd->...qd", - attn, - value, - precision=self.precision - ) - - return output, attn - - -class SlotAttentionExplicitStats(nn.Module): - """Slot Attention module with explicit slot statistics. - - Slot statistics, such as position and scale, are appended to the - output slot representations. - - Note: This module expects a 2D coordinate grid to be appended - at the end of inputs. - - Note: This module uses pre-normalization by default. - """ - grid_encoder: nn.Module - num_iterations: int = 1 - qkv_size: Optional[int] = None - mlp_size: Optional[int] = None - epsilon: float = 1e-8 - softmax_temperature: float = 1.0 - gumbel_softmax: bool = False - gumbel_softmax_straight_through: bool = False - num_heads: int = 1 - min_scale: float = 0.01 - max_scale: float = 5. - return_slot_positions: bool = True - return_slot_scales: bool = True - - @nn.compact - def __call__(self, slots, inputs, - padding_mask = None, - train = False): - """Slot Attention with explicit slot statistics module forward pass.""" - del padding_mask # Unused. - # Slot scales require slot positions. - assert self.return_slot_positions or not self.return_slot_scales - - # Separate a concatenated linear coordinate grid from the inputs. - inputs, grid = inputs[Ellipsis, :-2], inputs[Ellipsis, -2:] - - # Hack so that the input and output slot dimensions are the same. - to_remove = 0 - if self.return_slot_positions: - to_remove += 2 - if self.return_slot_scales: - to_remove += 2 - if to_remove > 0: - slots = slots[Ellipsis, :-to_remove] - - # Add position encodings to inputs - n_features = inputs.shape[-1] - grid_projector = nn.Dense(n_features, name="dense_pe_0") - inputs = self.grid_encoder()(inputs + grid_projector(grid)) - - qkv_size = self.qkv_size or slots.shape[-1] - head_dim = qkv_size // self.num_heads - dense = functools.partial(nn.DenseGeneral, - axis=-1, features=(self.num_heads, head_dim), - use_bias=False) - - # Shared modules. - dense_q = dense(name="general_dense_q_0") - layernorm_q = nn.LayerNorm() - inverted_attention = attention.InvertedDotProductAttention( - norm_type="mean", - multi_head=self.num_heads > 1, - return_attn_weights=True) - gru = misc.GRU() - - if self.mlp_size is not None: - mlp = misc.MLP(hidden_size=self.mlp_size, layernorm="pre", residual=True) # type: ignore - - # inputs.shape = (..., n_inputs, inputs_size). - inputs = nn.LayerNorm()(inputs) - # k.shape = (..., n_inputs, slot_size). - k = dense(name="general_dense_k_0")(inputs) - # v.shape = (..., n_inputs, slot_size). - v = dense(name="general_dense_v_0")(inputs) - - # Multiple rounds of attention. - for _ in range(self.num_iterations): - - # Inverted dot-product attention. - slots_n = layernorm_q(slots) - q = dense_q(slots_n) # q.shape = (..., n_inputs, slot_size). - updates, attn = inverted_attention(query=q, key=k, value=v, train=train) - - # Recurrent update. - slots = gru(slots, updates) - - # Feedforward block with pre-normalization. - if self.mlp_size is not None: - slots = mlp(slots) - - if self.return_slot_positions: - # Compute the center of mass of each slot attention mask. - positions = jnp.einsum("...qk,...kd->...qd", attn, grid) - slots = jnp.concatenate([slots, positions], axis=-1) - - if self.return_slot_scales: - # Compute slot scales. Take the square root to make the operation - # analogous to normalizing data drawn from a Gaussian. - spread = jnp.square( - jnp.expand_dims(grid, axis=-3) - jnp.expand_dims(positions, axis=-2)) - scales = jnp.sqrt( - jnp.einsum("...qk,...qkd->...qd", attn + self.epsilon, spread)) - scales = jnp.clip(scales, self.min_scale, self.max_scale) - slots = jnp.concatenate([slots, scales], axis=-1) - - return slots - - -class SlotAttentionPosKeysValues(nn.Module): - """Slot Attention module with positional encodings in keys and values. - - Feature position encodings are added to keys and values instead - of the inputs. - - Note: This module expects a 2D coordinate grid to be appended - at the end of inputs. - - Note: This module uses pre-normalization by default. - """ - grid_encoder: nn.Module - num_iterations: int = 1 - qkv_size: Optional[int] = None - mlp_size: Optional[int] = None - epsilon: float = 1e-8 - softmax_temperature: float = 1.0 - gumbel_softmax: bool = False - gumbel_softmax_straight_through: bool = False - num_heads: int = 1 - - @nn.compact - def __call__(self, slots, inputs, - padding_mask = None, - train = False): - """Slot Attention with explicit slot statistics module forward pass.""" - del padding_mask # Unused. - - # Separate a concatenated linear coordinate grid from the inputs. - inputs, grid = inputs[Ellipsis, :-2], inputs[Ellipsis, -2:] - - qkv_size = self.qkv_size or slots.shape[-1] - head_dim = qkv_size // self.num_heads - dense = functools.partial(nn.DenseGeneral, - axis=-1, features=(self.num_heads, head_dim), - use_bias=False) - - # Shared modules. - dense_q = dense(name="general_dense_q_0") - layernorm_q = nn.LayerNorm() - inverted_attention = attention.InvertedDotProductAttention( - norm_type="mean", - multi_head=self.num_heads > 1) - gru = misc.GRU() - - if self.mlp_size is not None: - mlp = misc.MLP(hidden_size=self.mlp_size, layernorm="pre", residual=True) # type: ignore - - # inputs.shape = (..., n_inputs, inputs_size). - inputs = nn.LayerNorm()(inputs) - # k.shape = (..., n_inputs, slot_size). - k = dense(name="general_dense_k_0")(inputs) - # v.shape = (..., n_inputs, slot_size). - v = dense(name="general_dense_v_0")(inputs) - - # Add position encodings to keys and values. - grid_projector = dense(name="general_dense_p_0") - grid_encoder = self.grid_encoder() - k = grid_encoder(k + grid_projector(grid)) - v = grid_encoder(v + grid_projector(grid)) - - # Multiple rounds of attention. - for _ in range(self.num_iterations): - - # Inverted dot-product attention. - slots_n = layernorm_q(slots) - q = dense_q(slots_n) # q.shape = (..., n_inputs, slot_size). - updates = inverted_attention(query=q, key=k, value=v, train=train) - - # Recurrent update. - slots = gru(slots, updates) - - # Feedforward block with pre-normalization. - if self.mlp_size is not None: - slots = mlp(slots) - - return slots - - -class SlotAttentionTranslEquiv(nn.Module): - """Slot Attention module with slot positions. - - A position is computed for each slot. Slot positions are used to create - relative coordinate grids, which are used as position embeddings reapplied - in each iteration of slot attention. The last two channels in inputs - must contain the flattened position grid. - - Note: This module uses pre-normalization by default. - """ - - grid_encoder: nn.Module - num_iterations: int = 1 - qkv_size: Optional[int] = None - mlp_size: Optional[int] = None - epsilon: float = 1e-8 - softmax_temperature: float = 1.0 - gumbel_softmax: bool = False - gumbel_softmax_straight_through: bool = False - num_heads: int = 1 - zero_position_init: bool = True - ablate_non_equivariant: bool = False - stop_grad_positions: bool = False - mix_slots: bool = False - add_rel_pos_to_values: bool = False - append_statistics: bool = False - - @nn.compact - def __call__(self, slots, inputs, - padding_mask = None, - train = False): - """Slot Attention translation equiv. module forward pass.""" - del padding_mask # Unused. - - if self.num_heads > 1: - raise NotImplementedError("This prototype only uses one attn. head.") - - # Separate a concatenated linear coordinate grid from the inputs. - inputs, grid = inputs[Ellipsis, :-2], inputs[Ellipsis, -2:] - - # Separate position (x,y) from slot embeddings. - slots, positions = slots[Ellipsis, :-2], slots[Ellipsis, -2:] - qkv_size = self.qkv_size or slots.shape[-1] - num_slots = slots.shape[-2] - - # Prepare initial slot positions. - if self.zero_position_init: - # All slots start in the middle of the image. - positions *= 0. - - # Learnable initial positions might deviate from the allowed range. - positions = jnp.clip(positions, -1., 1.) - - # Pre-normalization. - inputs = nn.LayerNorm()(inputs) - - grid_per_slot = jnp.repeat( - jnp.expand_dims(grid, axis=-3), num_slots, axis=-3) - - # Shared modules. - dense_q = nn.Dense(qkv_size, use_bias=False, name="general_dense_q_0") - dense_k = nn.Dense(qkv_size, use_bias=False, name="general_dense_k_0") - dense_v = nn.Dense(qkv_size, use_bias=False, name="general_dense_v_0") - grid_proj = nn.Dense(qkv_size, name="dense_gp_0") - grid_enc = self.grid_encoder() - layernorm_q = nn.LayerNorm() - inverted_attention = InvertedDotProductAttentionKeyPerQuery( - epsilon=self.epsilon, - renormalize_keys=True, - softmax_temperature=self.softmax_temperature, - value_per_query=self.add_rel_pos_to_values - ) - gru = misc.GRU() - - if self.mlp_size is not None: - mlp = misc.MLP(hidden_size=self.mlp_size, layernorm="pre", residual=True) # type: ignore - - if self.append_statistics: - embed_statistics = nn.Dense(slots.shape[-1], name="dense_embed_0") - - # k.shape and v.shape = (..., n_inputs, slot_size). - v = dense_v(inputs) - k = dense_k(inputs) - k_expand = jnp.expand_dims(k, axis=-3) - v_expand = jnp.expand_dims(v, axis=-3) - - # Multiple rounds of attention. Last iteration updates positions only. - for attn_round in range(self.num_iterations + 1): - - if self.ablate_non_equivariant: - # Add an encoded coordinate grid with absolute positions. - grid_emb_per_slot = grid_proj(grid_per_slot) - k_rel_pos = grid_enc(k_expand + grid_emb_per_slot) - if self.add_rel_pos_to_values: - v_rel_pos = grid_enc(v_expand + grid_emb_per_slot) - else: - # Relativize positions, encode them and add them to the keys - # and optionally to values. - relative_grid = grid_per_slot - jnp.expand_dims(positions, axis=-2) - grid_emb_per_slot = grid_proj(relative_grid) - k_rel_pos = grid_enc(k_expand + grid_emb_per_slot) - if self.add_rel_pos_to_values: - v_rel_pos = grid_enc(v_expand + grid_emb_per_slot) - - # Inverted dot-product attention. - slots_n = layernorm_q(slots) - q = dense_q(slots_n) # q.shape = (..., n_slots, slot_size). - updates, attn = inverted_attention( - query=q, - key=k_rel_pos, - value=v_rel_pos if self.add_rel_pos_to_values else v, - train=train) - - # Compute the center of mass of each slot attention mask. - # Guaranteed to be in [-1, 1]. - positions = jnp.einsum("...qk,...kd->...qd", attn, grid) - - if self.stop_grad_positions: - # Do not backprop through positions and scales. - positions = jax.lax.stop_gradient(positions) - - if attn_round < self.num_iterations: - if self.append_statistics: - # Projects and add 2D slot positions into slot latents. - tmp = jnp.concatenate([slots, positions], axis=-1) - slots = embed_statistics(tmp) - - # Recurrent update. - slots = gru(slots, updates) - - # Feedforward block with pre-normalization. - if self.mlp_size is not None: - slots = mlp(slots) - - # Concatenate position information to slots. - output = jnp.concatenate([slots, positions], axis=-1) - - if self.mix_slots: - output = misc.MLP(hidden_size=128, layernorm="pre")(output) - - return output - - -class SlotAttentionTranslScaleEquiv(nn.Module): - """Slot Attention module with slot positions and scales. - - A position and scale is computed for each slot. Slot positions and scales - are used to create relative coordinate grids, which are used as position - embeddings reapplied in each iteration of slot attention. The last two - channels in input must contain the flattened position grid. - - Note: This module uses pre-normalization by default. - """ - - grid_encoder: nn.Module - num_iterations: int = 1 - qkv_size: Optional[int] = None - mlp_size: Optional[int] = None - epsilon: float = 1e-8 - softmax_temperature: float = 1.0 - gumbel_softmax: bool = False - gumbel_softmax_straight_through: bool = False - num_heads: int = 1 - zero_position_init: bool = True - # Scale of 0.1 corresponds to fairly small objects. - init_with_fixed_scale: Optional[float] = 0.1 - ablate_non_equivariant: bool = False - stop_grad_positions_and_scales: bool = False - mix_slots: bool = False - add_rel_pos_to_values: bool = False - scales_factor: float = 1. - # Slot scales cannot be negative and should not be too close to zero - # or too large. - min_scale: float = 0.001 - max_scale: float = 2. - append_statistics: bool = False - - @nn.compact - def __call__(self, slots, inputs, - padding_mask = None, - train = False): - """Slot Attention translation and scale equiv. module forward pass.""" - del padding_mask # Unused. - - if self.num_heads > 1: - raise NotImplementedError("This prototype only uses one attn. head.") - - # Separate a concatenated linear coordinate grid from the inputs. - inputs, grid = inputs[Ellipsis, :-2], inputs[Ellipsis, -2:] - - # Separate position (x,y) and scale from slot embeddings. - slots, positions, scales = (slots[Ellipsis, :-4], - slots[Ellipsis, -4: -2], - slots[Ellipsis, -2:]) - qkv_size = self.qkv_size or slots.shape[-1] - num_slots = slots.shape[-2] - - # Prepare initial slot positions. - if self.zero_position_init: - # All slots start in the middle of the image. - positions *= 0. - - if self.init_with_fixed_scale is not None: - scales = scales * 0. + self.init_with_fixed_scale - - # Learnable initial positions and scales could have arbitrary values. - positions = jnp.clip(positions, -1., 1.) - scales = jnp.clip(scales, self.min_scale, self.max_scale) - - # Pre-normalization. - inputs = nn.LayerNorm()(inputs) - - grid_per_slot = jnp.repeat( - jnp.expand_dims(grid, axis=-3), num_slots, axis=-3) - - # Shared modules. - dense_q = nn.Dense(qkv_size, use_bias=False, name="general_dense_q_0") - dense_k = nn.Dense(qkv_size, use_bias=False, name="general_dense_k_0") - dense_v = nn.Dense(qkv_size, use_bias=False, name="general_dense_v_0") - grid_proj = nn.Dense(qkv_size, name="dense_gp_0") - grid_enc = self.grid_encoder() - layernorm_q = nn.LayerNorm() - inverted_attention = InvertedDotProductAttentionKeyPerQuery( - epsilon=self.epsilon, - renormalize_keys=True, - softmax_temperature=self.softmax_temperature, - value_per_query=self.add_rel_pos_to_values - ) - gru = misc.GRU() - - if self.mlp_size is not None: - mlp = misc.MLP(hidden_size=self.mlp_size, layernorm="pre", residual=True) # type: ignore - - if self.append_statistics: - embed_statistics = nn.Dense(slots.shape[-1], name="dense_embed_0") - - # k.shape and v.shape = (..., n_inputs, slot_size). - v = dense_v(inputs) - k = dense_k(inputs) - k_expand = jnp.expand_dims(k, axis=-3) - v_expand = jnp.expand_dims(v, axis=-3) - - # Multiple rounds of attention. - # Last iteration updates positions and scales only. - for attn_round in range(self.num_iterations + 1): - - if self.ablate_non_equivariant: - # Add an encoded coordinate grid with absolute positions. - tmp_grid = grid_proj(grid_per_slot) - k_rel_pos = grid_enc(k_expand + tmp_grid) - if self.add_rel_pos_to_values: - v_rel_pos = grid_enc(v_expand + tmp_grid) - else: - # Relativize and scale positions, encode them and add them to inputs. - relative_grid = grid_per_slot - jnp.expand_dims(positions, axis=-2) - # Scales are usually small so the grid might get too large. - relative_grid = relative_grid / self.scales_factor - relative_grid = relative_grid / jnp.expand_dims(scales, axis=-2) - tmp_grid = grid_proj(relative_grid) - k_rel_pos = grid_enc(k_expand + tmp_grid) - if self.add_rel_pos_to_values: - v_rel_pos = grid_enc(v_expand + tmp_grid) - - # Inverted dot-product attention. - slots_n = layernorm_q(slots) - q = dense_q(slots_n) # q.shape = (..., n_slots, slot_size). - updates, attn = inverted_attention( - query=q, - key=k_rel_pos, - value=v_rel_pos if self.add_rel_pos_to_values else v, - train=train) - - # Compute the center of mass of each slot attention mask. - positions = jnp.einsum("...qk,...kd->...qd", attn, grid) - - # Compute slot scales. Take the square root to make the operation - # analogous to normalizing data drawn from a Gaussian. - spread = jnp.square(grid_per_slot - jnp.expand_dims(positions, axis=-2)) - scales = jnp.sqrt( - jnp.einsum("...qk,...qkd->...qd", attn + self.epsilon, spread)) - - # Computed positions are guaranteed to be in [-1, 1]. - # Scales are unbounded. - scales = jnp.clip(scales, self.min_scale, self.max_scale) - - if self.stop_grad_positions_and_scales: - # Do not backprop through positions and scales. - positions = jax.lax.stop_gradient(positions) - scales = jax.lax.stop_gradient(scales) - - if attn_round < self.num_iterations: - if self.append_statistics: - # Project and add 2D slot positions and scales into slot latents. - tmp = jnp.concatenate([slots, positions, scales], axis=-1) - slots = embed_statistics(tmp) - - # Recurrent update. - slots = gru(slots, updates) - - # Feedforward block with pre-normalization. - if self.mlp_size is not None: - slots = mlp(slots) - - # Concatenate position and scale information to slots. - output = jnp.concatenate([slots, positions, scales], axis=-1) - - if self.mix_slots: - output = misc.MLP(hidden_size=128, layernorm="pre")(output) - - return output - - -class SlotAttentionTranslRotScaleEquiv(nn.Module): - """Slot Attention module with slot positions, rotations and scales. - - A position, rotation and scale is computed for each slot. - Slot positions, rotations and scales are used to create relative - coordinate grids, which are used as position embeddings reapplied in each - iteration of slot attention. The last two channels in input must contain - the flattened position grid. - - Note: This module uses pre-normalization by default. - """ - - grid_encoder: nn.Module - num_iterations: int = 1 - qkv_size: Optional[int] = None - mlp_size: Optional[int] = None - epsilon: float = 1e-8 - softmax_temperature: float = 1.0 - gumbel_softmax: bool = False - gumbel_softmax_straight_through: bool = False - num_heads: int = 1 - zero_position_init: bool = True - # Scale of 0.1 corresponds to fairly small objects. - init_with_fixed_scale: Optional[float] = 0.1 - ablate_non_equivariant: bool = False - stop_grad_positions: bool = False - stop_grad_scales: bool = False - stop_grad_rotations: bool = False - mix_slots: bool = False - add_rel_pos_to_values: bool = False - scales_factor: float = 1. - # Slot scales cannot be negative and should not be too close to zero - # or too large. - min_scale: float = 0.001 - max_scale: float = 2. - limit_rot_to_45_deg: bool = True - append_statistics: bool = False - - @nn.compact - def __call__(self, slots, inputs, - padding_mask = None, - train = False): - """Slot Attention translation and scale equiv. module forward pass.""" - del padding_mask # Unused. - - if self.num_heads > 1: - raise NotImplementedError("This prototype only uses one attn. head.") - - # Separate a concatenated linear coordinate grid from the inputs. - inputs, grid = inputs[Ellipsis, :-2], inputs[Ellipsis, -2:] - - # Separate position (x,y) and scale from slot embeddings. - slots, positions, scales, rotm = (slots[Ellipsis, :-8], - slots[Ellipsis, -8: -6], - slots[Ellipsis, -6: -4], - slots[Ellipsis, -4:]) - rotm = jnp.reshape(rotm, (*rotm.shape[:-1], 2, 2)) - qkv_size = self.qkv_size or slots.shape[-1] - num_slots = slots.shape[-2] - - # Prepare initial slot positions. - if self.zero_position_init: - # All slots start in the middle of the image. - positions *= 0. - - if self.init_with_fixed_scale is not None: - scales = scales * 0. + self.init_with_fixed_scale - - # Learnable initial positions and scales could have arbitrary values. - positions = jnp.clip(positions, -1., 1.) - scales = jnp.clip(scales, self.min_scale, self.max_scale) - - # Pre-normalization. - inputs = nn.LayerNorm()(inputs) - - grid_per_slot = jnp.repeat( - jnp.expand_dims(grid, axis=-3), num_slots, axis=-3) - - # Shared modules. - dense_q = nn.Dense(qkv_size, use_bias=False, name="general_dense_q_0") - dense_k = nn.Dense(qkv_size, use_bias=False, name="general_dense_k_0") - dense_v = nn.Dense(qkv_size, use_bias=False, name="general_dense_v_0") - grid_proj = nn.Dense(qkv_size, name="dense_gp_0") - grid_enc = self.grid_encoder() - layernorm_q = nn.LayerNorm() - inverted_attention = InvertedDotProductAttentionKeyPerQuery( - epsilon=self.epsilon, - renormalize_keys=True, - softmax_temperature=self.softmax_temperature, - value_per_query=self.add_rel_pos_to_values - ) - gru = misc.GRU() - - if self.mlp_size is not None: - mlp = misc.MLP(hidden_size=self.mlp_size, layernorm="pre", residual=True) # type: ignore - - if self.append_statistics: - embed_statistics = nn.Dense(slots.shape[-1], name="dense_embed_0") - - # k.shape and v.shape = (..., n_inputs, slot_size). - v = dense_v(inputs) - k = dense_k(inputs) - k_expand = jnp.expand_dims(k, axis=-3) - v_expand = jnp.expand_dims(v, axis=-3) - - # Multiple rounds of attention. - # Last iteration updates positions and scales only. - for attn_round in range(self.num_iterations + 1): - - if self.ablate_non_equivariant: - # Add an encoded coordinate grid with absolute positions. - tmp_grid = grid_proj(grid_per_slot) - k_rel_pos = grid_enc(k_expand + tmp_grid) - if self.add_rel_pos_to_values: - v_rel_pos = grid_enc(v_expand + tmp_grid) - else: - # Relativize and scale positions, encode them and add them to inputs. - relative_grid = grid_per_slot - jnp.expand_dims(positions, axis=-2) - - # Rotation. - relative_grid = self.transform(rotm, relative_grid) - - # Scales are usually small so the grid might get too large. - relative_grid = relative_grid / self.scales_factor - relative_grid = relative_grid / jnp.expand_dims(scales, axis=-2) - tmp_grid = grid_proj(relative_grid) - k_rel_pos = grid_enc(k_expand + tmp_grid) - if self.add_rel_pos_to_values: - v_rel_pos = grid_enc(v_expand + tmp_grid) - - # Inverted dot-product attention. - slots_n = layernorm_q(slots) - q = dense_q(slots_n) # q.shape = (..., n_slots, slot_size). - updates, attn = inverted_attention( - query=q, - key=k_rel_pos, - value=v_rel_pos if self.add_rel_pos_to_values else v, - train=train) - - # Compute the center of mass of each slot attention mask. - positions = jnp.einsum("...qk,...kd->...qd", attn, grid) - - # Find the axis with the highest spread. - relp = grid_per_slot - jnp.expand_dims(positions, axis=-2) - if self.limit_rot_to_45_deg: - rotm = self.compute_rotation_matrix_45_deg(relp, attn) - else: - rotm = self.compute_rotation_matrix_90_deg(relp, attn) - - # Compute slot scales. Take the square root to make the operation - # analogous to normalizing data drawn from a Gaussian. - relp = self.transform(rotm, relp) - - spread = jnp.square(relp) - scales = jnp.sqrt( - jnp.einsum("...qk,...qkd->...qd", attn + self.epsilon, spread)) - - # Computed positions are guaranteed to be in [-1, 1]. - # Scales are unbounded. - scales = jnp.clip(scales, self.min_scale, self.max_scale) - - if self.stop_grad_positions: - positions = jax.lax.stop_gradient(positions) - if self.stop_grad_scales: - scales = jax.lax.stop_gradient(scales) - if self.stop_grad_rotations: - rotm = jax.lax.stop_gradient(rotm) - - if attn_round < self.num_iterations: - if self.append_statistics: - # For the slot rotations, we append both the 2D rotation matrix - # and the angle by which we rotate. - # We can compute the angle using atan2(R[0, 0], R[1, 0]). - tmp = jnp.concatenate( - [slots, positions, scales, - rotm.reshape(*rotm.shape[:-2], 4), - jnp.arctan2(rotm[Ellipsis, 0, 0], rotm[Ellipsis, 1, 0])[Ellipsis, None]], - axis=-1) - slots = embed_statistics(tmp) - - # Recurrent update. - slots = gru(slots, updates) - - # Feedforward block with pre-normalization. - if self.mlp_size is not None: - slots = mlp(slots) - - # Concatenate position and scale information to slots. - output = jnp.concatenate( - [slots, positions, scales, rotm.reshape(*rotm.shape[:-2], 4)], axis=-1) - - if self.mix_slots: - output = misc.MLP(hidden_size=128, layernorm="pre")(output) - - return output - - @classmethod - def compute_weighted_covariance(cls, x, w): - # The coordinate grid is (y, x), we want (x, y). - x = jnp.stack([x[Ellipsis, 1], x[Ellipsis, 0]], axis=-1) - - # Pixel coordinates weighted by attention mask. - cov = x * w[Ellipsis, None] - cov = jnp.einsum( - "...ji,...jk->...ik", cov, x, precision=jax.lax.Precision.HIGHEST) - - return cov - - @classmethod - def compute_reference_frame_45_deg(cls, x, w): - cov = cls.compute_weighted_covariance(x, w) - - # Compute eigenvalues. - pm = jnp.sqrt(4. * jnp.square(cov[Ellipsis, 0, 1]) + - jnp.square(cov[Ellipsis, 0, 0] - cov[Ellipsis, 1, 1]) + 1e-16) - - eig1 = (cov[Ellipsis, 0, 0] + cov[Ellipsis, 1, 1] + pm) / 2. - eig2 = (cov[Ellipsis, 0, 0] + cov[Ellipsis, 1, 1] - pm) / 2. - - # Compute eigenvectors, note that both have a positive y-axis. - # This means we have eliminated half of the possible rotations. - div = cov[Ellipsis, 0, 1] + 1e-16 - - v1 = (eig1 - cov[Ellipsis, 1, 1]) / div - v2 = (eig2 - cov[Ellipsis, 1, 1]) / div - - v1 = jnp.stack([v1, jnp.ones_like(v1)], axis=-1) - v2 = jnp.stack([v2, jnp.ones_like(v2)], axis=-1) - - # RULE 1: - # We catch two failure modes here. - # 1. If all attention weights are zero the covariance is also zero. - # Then the above computation is meaningless. - # 2. If the attention pattern is exactly aligned with the axes - # (e.g. a horizontal/vertical bar), the off-diagonal covariance - # values are going to be very low. If we use float32, we get - # basis vectors that are not orthogonal. - # Solution: use the default reference frame if the off-diagonal - # covariance value is too low. - default_1 = jnp.stack([jnp.ones_like(div), jnp.zeros_like(div)], axis=-1) - default_2 = jnp.stack([jnp.zeros_like(div), jnp.ones_like(div)], axis=-1) - - mask = (jnp.abs(div) < 1e-6).astype(jnp.float32)[Ellipsis, None] - v1 = (1. - mask) * v1 + mask * default_1 - v2 = (1. - mask) * v2 + mask * default_2 - - # Turn eigenvectors into unit vectors, so that we can construct - # a basis of a new reference frame. - norm1 = jnp.sqrt(jnp.sum(jnp.square(v1), axis=-1, keepdims=True)) - norm2 = jnp.sqrt(jnp.sum(jnp.square(v2), axis=-1, keepdims=True)) - - v1 = v1 / norm1 - v2 = v2 / norm2 - - # RULE 2: - # If the first basis vector is "pointing up" we assume the object - # is vertical (e.g. we say a door is vertical, whereas a car is horizontal). - # In the case of vertical objects, we swap the two basis vectors. - # This limits the possible rotations to +- 45deg instead of +- 90deg. - # We define "pointing up" as the first coordinate of the first basis vector - # being between +- sin(pi/4). The second coordinate is always positive. - mask = (jnp.logical_and(v1[Ellipsis, 0] < 0.707, v1[Ellipsis, 0] > -0.707) - ).astype(jnp.float32)[Ellipsis, None] - v1_ = (1. - mask) * v1 + mask * v2 - v2_ = (1. - mask) * v2 + mask * v1 - v1 = v1_ - v2 = v2_ - - # RULE 3: - # Mirror the first basis vector if the first coordinate is negative. - # Here, we ensure that our coordinate system is always left-handed. - # Otherwise, we would sometimes unintentionally mirror the grid. - mask = (v1[Ellipsis, 0] < 0).astype(jnp.float32)[Ellipsis, None] - v1 = (1. - mask) * v1 - mask * v1 - - return v1, v2 - - @classmethod - def compute_reference_frame_90_deg(cls, x, w): - cov = cls.compute_weighted_covariance(x, w) - - # Compute eigenvalues. - pm = jnp.sqrt(4. * jnp.square(cov[Ellipsis, 0, 1]) + - jnp.square(cov[Ellipsis, 0, 0] - cov[Ellipsis, 1, 1]) + 1e-16) - - eig1 = (cov[Ellipsis, 0, 0] + cov[Ellipsis, 1, 1] + pm) / 2. - eig2 = (cov[Ellipsis, 0, 0] + cov[Ellipsis, 1, 1] - pm) / 2. - - # Compute eigenvectors, note that both have a positive y-axis. - # This means we have eliminated half of the possible rotations. - div = cov[Ellipsis, 0, 1] + 1e-16 - - v1 = (eig1 - cov[Ellipsis, 1, 1]) / div - v2 = (eig2 - cov[Ellipsis, 1, 1]) / div - - v1 = jnp.stack([v1, jnp.ones_like(v1)], axis=-1) - v2 = jnp.stack([v2, jnp.ones_like(v2)], axis=-1) - - # RULE 1: - # We catch two failure modes here. - # 1. If all attention weights are zero the covariance is also zero. - # Then the above computation is meaningless. - # 2. If the attention pattern is exactly aligned with the axes - # (e.g. a horizontal/vertical bar), the off-diagonal covariance - # values are going to be very low. If we use float32, we get - # basis vectors that are not orthogonal. - # Solution: use the default reference frame if the off-diagonal - # covariance value is too low. - default_1 = jnp.stack([jnp.ones_like(div), jnp.zeros_like(div)], axis=-1) - default_2 = jnp.stack([jnp.zeros_like(div), jnp.ones_like(div)], axis=-1) - - # RULE 1.5: - # RULE 1 is activated if we see a vertical or a horizontal bar. - # We make sure that the coordinate grid for a horizontal bar is not rotated, - # whereas the coordinate grid for a vertical bar is rotated by 90deg. - # If cov[0, 0] > cov[1, 1], the bar is vertical. - mask = (cov[Ellipsis, 0, 0] <= cov[Ellipsis, 1, 1]).astype(jnp.float32)[Ellipsis, None] - # Furthermore, we have to mirror one of the basis vectors (if mask==1) - # so that we always have a left-handed coordinate grid. - default_v1 = (1. - mask) * default_1 - mask * default_2 - default_v2 = (1. - mask) * default_2 + mask * default_1 - - # Continuation of RULE 1. - mask = (jnp.abs(div) < 1e-6).astype(jnp.float32)[Ellipsis, None] - v1 = mask * default_v1 + (1. - mask) * v1 - v2 = mask * default_v2 + (1. - mask) * v2 - - # Turn eigenvectors into unit vectors, so that we can construct - # a basis of a new reference frame. - norm1 = jnp.sqrt(jnp.sum(jnp.square(v1), axis=-1, keepdims=True)) - norm2 = jnp.sqrt(jnp.sum(jnp.square(v2), axis=-1, keepdims=True)) - - v1 = v1 / norm1 - v2 = v2 / norm2 - - # RULE 2: - # Mirror the first basis vector if the first coordinate is negative. - # Here, we ensure that the our coordinate system is always left-handed. - # Otherwise, we would sometimes unintentionally mirror the grid. - mask = (v1[Ellipsis, 0] < 0).astype(jnp.float32)[Ellipsis, None] - v1 = (1. - mask) * v1 - mask * v1 - - return v1, v2 - - @classmethod - def compute_rotation_matrix_45_deg(cls, x, w): - v1, v2 = cls.compute_reference_frame_45_deg(x, w) - return jnp.stack([v1, v2], axis=-1) - - @classmethod - def compute_rotation_matrix_90_deg(cls, x, w): - v1, v2 = cls.compute_reference_frame_90_deg(x, w) - return jnp.stack([v1, v2], axis=-1) - - @classmethod - def transform(cls, rotm, x): - # The coordinate grid x is in the (y, x) format, so we need to swap - # the coordinates on the input and output. - x = jnp.stack([x[Ellipsis, 1], x[Ellipsis, 0]], axis=-1) - # Equivalent to inv(R) * x^T = R^T * x^T = (x * R)^T. - # We are multiplying by the inverse of the rotation matrix because - # we are rotating the coordinate grid *against* the rotation of the object. - # y = jnp.matmul(x, R) - y = jnp.einsum("...ij,...jk->...ik", x, rotm) - # Swap coordinates again. - y = jnp.stack([y[Ellipsis, 1], y[Ellipsis, 0]], axis=-1) - return y diff --git a/spaces/ori1026/OriChatGPT/app.py b/spaces/ori1026/OriChatGPT/app.py deleted file mode 100644 index 7059d8b531d0d57050927827aacd179008d0e0a1..0000000000000000000000000000000000000000 --- a/spaces/ori1026/OriChatGPT/app.py +++ /dev/null @@ -1,452 +0,0 @@ -# -*- coding:utf-8 -*- -import os -import logging -import sys - -import gradio as gr - -from modules.utils import * -from modules.presets import * -from modules.overwrites import * -from modules.chat_func import * -from modules.openai_func import get_usage - -logging.basicConfig( - level=logging.DEBUG, - format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s", -) - -my_api_key = "" # 在这里输入你的 API 密钥 - -# if we are running in Docker -if os.environ.get("dockerrun") == "yes": - dockerflag = True -else: - dockerflag = False - -authflag = False -auth_list = [] - -if not my_api_key: - my_api_key = os.environ.get("my_api_key") -if dockerflag: - if my_api_key == "empty": - logging.error("Please give a api key!") - sys.exit(1) - # auth - username = os.environ.get("USERNAME") - password = os.environ.get("PASSWORD") - if not (isinstance(username, type(None)) or isinstance(password, type(None))): - auth_list.append((os.environ.get("USERNAME"), os.environ.get("PASSWORD"))) - authflag = True -else: - if ( - not my_api_key - and os.path.exists("api_key.txt") - and os.path.getsize("api_key.txt") - ): - with open("api_key.txt", "r") as f: - my_api_key = f.read().strip() - if os.path.exists("auth.json"): - authflag = True - with open("auth.json", "r", encoding='utf-8') as f: - auth = json.load(f) - for _ in auth: - if auth[_]["username"] and auth[_]["password"]: - auth_list.append((auth[_]["username"], auth[_]["password"])) - else: - logging.error("请检查auth.json文件中的用户名和密码!") - sys.exit(1) - -gr.Chatbot.postprocess = postprocess -PromptHelper.compact_text_chunks = compact_text_chunks - -with open("assets/custom.css", "r", encoding="utf-8") as f: - customCSS = f.read() - -with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo: - history = gr.State([]) - token_count = gr.State([]) - promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2)) - user_api_key = gr.State(my_api_key) - user_question = gr.State("") - outputing = gr.State(False) - topic = gr.State("未命名对话历史记录") - - with gr.Row(): - with gr.Column(scale=1): - gr.HTML(title) - with gr.Column(scale=4): - gr.HTML('
                  Duplicate SpaceDuplicate the Space and run securely with your OpenAI API Key
                  ') - with gr.Column(scale=4): - status_display = gr.Markdown(get_geoip(), elem_id="status_display") - - with gr.Row().style(equal_height=True): - with gr.Column(scale=5): - with gr.Row(): - chatbot = gr.Chatbot(elem_id="chuanhu_chatbot").style(height="100%") - with gr.Row(): - with gr.Column(scale=12): - user_input = gr.Textbox( - show_label=False, placeholder="在这里输入" - ).style(container=False) - with gr.Column(min_width=70, scale=1): - submitBtn = gr.Button("发送", variant="primary") - cancelBtn = gr.Button("取消", variant="secondary", visible=False) - with gr.Row(): - emptyBtn = gr.Button( - "🧹 新的对话", - ) - retryBtn = gr.Button("🔄 重新生成") - delFirstBtn = gr.Button("🗑️ 删除最旧对话") - delLastBtn = gr.Button("🗑️ 删除最新对话") - reduceTokenBtn = gr.Button("♻️ 总结对话") - - with gr.Column(): - with gr.Column(min_width=50, scale=1): - with gr.Tab(label="ChatGPT"): - keyTxt = gr.Textbox( - show_label=True, - placeholder=f"OpenAI API-key...", - value=hide_middle_chars(my_api_key), - type="password", - visible=not HIDE_MY_KEY, - label="API-Key", - ) - usageTxt = gr.Markdown("**发送消息** 或 **提交key** 以显示额度", elem_id="usage_display") - model_select_dropdown = gr.Dropdown( - label="选择模型", choices=MODELS, multiselect=False, value=MODELS[0] - ) - use_streaming_checkbox = gr.Checkbox( - label="实时传输回答", value=True, visible=enable_streaming_option - ) - use_websearch_checkbox = gr.Checkbox(label="使用在线搜索", value=False) - language_select_dropdown = gr.Dropdown( - label="选择回复语言(针对搜索&索引功能)", - choices=REPLY_LANGUAGES, - multiselect=False, - value=REPLY_LANGUAGES[0], - ) - index_files = gr.Files(label="上传索引文件", type="file", multiple=True) - - with gr.Tab(label="Prompt"): - systemPromptTxt = gr.Textbox( - show_label=True, - placeholder=f"在这里输入System Prompt...", - label="System prompt", - value=initial_prompt, - lines=10, - ).style(container=False) - with gr.Accordion(label="加载Prompt模板", open=True): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - templateFileSelectDropdown = gr.Dropdown( - label="选择Prompt模板集合文件", - choices=get_template_names(plain=True), - multiselect=False, - value=get_template_names(plain=True)[0], - ).style(container=False) - with gr.Column(scale=1): - templateRefreshBtn = gr.Button("🔄 刷新") - with gr.Row(): - with gr.Column(): - templateSelectDropdown = gr.Dropdown( - label="从Prompt模板中加载", - choices=load_template( - get_template_names(plain=True)[0], mode=1 - ), - multiselect=False, - value=load_template( - get_template_names(plain=True)[0], mode=1 - )[0], - ).style(container=False) - - with gr.Tab(label="保存/加载"): - with gr.Accordion(label="保存/加载对话历史记录", open=True): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - historyFileSelectDropdown = gr.Dropdown( - label="从列表中加载对话", - choices=get_history_names(plain=True), - multiselect=False, - value=get_history_names(plain=True)[0], - ) - with gr.Column(scale=1): - historyRefreshBtn = gr.Button("🔄 刷新") - with gr.Row(): - with gr.Column(scale=6): - saveFileName = gr.Textbox( - show_label=True, - placeholder=f"设置文件名: 默认为.json,可选为.md", - label="设置保存文件名", - value="对话历史记录", - ).style(container=True) - with gr.Column(scale=1): - saveHistoryBtn = gr.Button("💾 保存对话") - exportMarkdownBtn = gr.Button("📝 导出为Markdown") - gr.Markdown("默认保存于history文件夹") - with gr.Row(): - with gr.Column(): - downloadFile = gr.File(interactive=True) - - with gr.Tab(label="高级"): - gr.Markdown("# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置") - default_btn = gr.Button("🔙 恢复默认设置") - - with gr.Accordion("参数", open=False): - top_p = gr.Slider( - minimum=-0, - maximum=1.0, - value=1.0, - step=0.05, - interactive=True, - label="Top-p", - ) - temperature = gr.Slider( - minimum=-0, - maximum=2.0, - value=1.0, - step=0.1, - interactive=True, - label="Temperature", - ) - - with gr.Accordion("网络设置", open=False, visible=False): - apiurlTxt = gr.Textbox( - show_label=True, - placeholder=f"在这里输入API地址...", - label="API地址", - value="https://api.openai.com/v1/chat/completions", - lines=2, - ) - changeAPIURLBtn = gr.Button("🔄 切换API地址") - proxyTxt = gr.Textbox( - show_label=True, - placeholder=f"在这里输入代理地址...", - label="代理地址(示例:http://127.0.0.1:10809)", - value="", - lines=2, - ) - changeProxyBtn = gr.Button("🔄 设置代理地址") - - gr.Markdown(description) - gr.HTML(footer.format(versions=versions_html()), elem_id="footer") - chatgpt_predict_args = dict( - fn=predict, - inputs=[ - user_api_key, - systemPromptTxt, - history, - user_question, - chatbot, - token_count, - top_p, - temperature, - use_streaming_checkbox, - model_select_dropdown, - use_websearch_checkbox, - index_files, - language_select_dropdown, - ], - outputs=[chatbot, history, status_display, token_count], - show_progress=True, - ) - - start_outputing_args = dict( - fn=start_outputing, - inputs=[], - outputs=[submitBtn, cancelBtn], - show_progress=True, - ) - - end_outputing_args = dict( - fn=end_outputing, inputs=[], outputs=[submitBtn, cancelBtn] - ) - - reset_textbox_args = dict( - fn=reset_textbox, inputs=[], outputs=[user_input] - ) - - transfer_input_args = dict( - fn=transfer_input, inputs=[user_input], outputs=[user_question, user_input, submitBtn, cancelBtn], show_progress=True - ) - - get_usage_args = dict( - fn=get_usage, inputs=[user_api_key], outputs=[usageTxt], show_progress=False - ) - - - # Chatbot - cancelBtn.click(cancel_outputing, [], []) - - user_input.submit(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args) - user_input.submit(**get_usage_args) - - submitBtn.click(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args) - submitBtn.click(**get_usage_args) - - emptyBtn.click( - reset_state, - outputs=[chatbot, history, token_count, status_display], - show_progress=True, - ) - emptyBtn.click(**reset_textbox_args) - - retryBtn.click(**start_outputing_args).then( - retry, - [ - user_api_key, - systemPromptTxt, - history, - chatbot, - token_count, - top_p, - temperature, - use_streaming_checkbox, - model_select_dropdown, - language_select_dropdown, - ], - [chatbot, history, status_display, token_count], - show_progress=True, - ).then(**end_outputing_args) - retryBtn.click(**get_usage_args) - - delFirstBtn.click( - delete_first_conversation, - [history, token_count], - [history, token_count, status_display], - ) - - delLastBtn.click( - delete_last_conversation, - [chatbot, history, token_count], - [chatbot, history, token_count, status_display], - show_progress=True, - ) - - reduceTokenBtn.click( - reduce_token_size, - [ - user_api_key, - systemPromptTxt, - history, - chatbot, - token_count, - top_p, - temperature, - gr.State(sum(token_count.value[-4:])), - model_select_dropdown, - language_select_dropdown, - ], - [chatbot, history, status_display, token_count], - show_progress=True, - ) - reduceTokenBtn.click(**get_usage_args) - - # ChatGPT - keyTxt.change(submit_key, keyTxt, [user_api_key, status_display]).then(**get_usage_args) - keyTxt.submit(**get_usage_args) - - # Template - templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown]) - templateFileSelectDropdown.change( - load_template, - [templateFileSelectDropdown], - [promptTemplates, templateSelectDropdown], - show_progress=True, - ) - templateSelectDropdown.change( - get_template_content, - [promptTemplates, templateSelectDropdown, systemPromptTxt], - [systemPromptTxt], - show_progress=True, - ) - - # S&L - saveHistoryBtn.click( - save_chat_history, - [saveFileName, systemPromptTxt, history, chatbot], - downloadFile, - show_progress=True, - ) - saveHistoryBtn.click(get_history_names, None, [historyFileSelectDropdown]) - exportMarkdownBtn.click( - export_markdown, - [saveFileName, systemPromptTxt, history, chatbot], - downloadFile, - show_progress=True, - ) - historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown]) - historyFileSelectDropdown.change( - load_chat_history, - [historyFileSelectDropdown, systemPromptTxt, history, chatbot], - [saveFileName, systemPromptTxt, history, chatbot], - show_progress=True, - ) - downloadFile.change( - load_chat_history, - [downloadFile, systemPromptTxt, history, chatbot], - [saveFileName, systemPromptTxt, history, chatbot], - ) - - # Advanced - default_btn.click( - reset_default, [], [apiurlTxt, proxyTxt, status_display], show_progress=True - ) - changeAPIURLBtn.click( - change_api_url, - [apiurlTxt], - [status_display], - show_progress=True, - ) - changeProxyBtn.click( - change_proxy, - [proxyTxt], - [status_display], - show_progress=True, - ) - -logging.info( - colorama.Back.GREEN - + "\n奥里的温馨提示:访问 http://localhost:7860 查看界面" - + colorama.Style.RESET_ALL -) -# 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接 -demo.title = "奥里私人ChatGPT 🚀" - -if __name__ == "__main__": - reload_javascript() - # if running in Docker - if dockerflag: - if authflag: - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - server_name="0.0.0.0", - server_port=7860, - auth=auth_list, - favicon_path="./assets/favicon.ico", - ) - else: - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - server_name="0.0.0.0", - server_port=7860, - share=False, - favicon_path="./assets/favicon.ico", - ) - # if not running in Docker - else: - if authflag: - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - share=False, - auth=auth_list, - favicon_path="./assets/favicon.ico", - inbrowser=True, - ) - else: - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - share=False, favicon_path="./assets/favicon.ico", inbrowser=True - ) # 改为 share=True 可以创建公开分享链接 - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口 - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码 - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理 diff --git a/spaces/pchuri/image2text/README.md b/spaces/pchuri/image2text/README.md deleted file mode 100644 index e224a1ec6f770b59284d2142f17ee5285bac9d2a..0000000000000000000000000000000000000000 --- a/spaces/pchuri/image2text/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image2text -emoji: 🏢 -colorFrom: green -colorTo: indigo -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/phyloforfun/VoucherVision/vouchervision/LM2_logger.py b/spaces/phyloforfun/VoucherVision/vouchervision/LM2_logger.py deleted file mode 100644 index ffb58e6c003df356ddeb6b010d2f4d42b27f8e34..0000000000000000000000000000000000000000 --- a/spaces/phyloforfun/VoucherVision/vouchervision/LM2_logger.py +++ /dev/null @@ -1,117 +0,0 @@ -import logging, os, psutil, torch, platform, cpuinfo, yaml #py-cpuinfo -from vouchervision.general_utils import get_datetime, print_main_warn, print_main_info - -def start_logging(Dirs, cfg): - run_name = cfg['leafmachine']['project']['run_name'] - path_log = os.path.join(Dirs.path_log, '__'.join(['LM2-log',str(get_datetime()), run_name])+'.log') - - # Disable default StreamHandler - logging.getLogger().handlers = [] - - # create logger - logger = logging.getLogger('Hardware Components') - logger.setLevel(logging.DEBUG) - - # create file handler and set level to debug - fh = logging.FileHandler(path_log) - fh.setLevel(logging.DEBUG) - - # create console handler and set level to debug - ch = logging.StreamHandler() - ch.setLevel(logging.DEBUG) - - # create formatter - formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s') - - # add formatter to handlers - fh.setFormatter(formatter) - ch.setFormatter(formatter) - - # add handlers to logger - logger.addHandler(fh) - logger.addHandler(ch) - - # Create a logger for the file handler - file_logger = logging.getLogger('file_logger') - file_logger.setLevel(logging.DEBUG) - file_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - file_handler = logging.FileHandler(path_log) - file_handler.setLevel(logging.DEBUG) - file_handler.setFormatter(file_formatter) - file_logger.addHandler(file_handler) - # Disable propagation of log messages to the root logger - file_logger.propagate = False - - # 'application' code - # logger.debug('debug message') - # logger.info('info message') - # logger.warning('warn message') - # logger.error('error message') - # logger.critical('critical message') - - # Get CPU information - logger.info(f"CPU: {find_cpu_info()}") - - # Get GPU information (using PyTorch) - if torch.cuda.is_available(): - num_gpus = torch.cuda.device_count() - if num_gpus == 1: - gpu = torch.cuda.get_device_properties(0) - logger.info(f"GPU: {gpu.name} ({gpu.total_memory // (1024 * 1024)} MB)") - else: - for i in range(num_gpus): - gpu = torch.cuda.get_device_properties(i) - logger.info(f"GPU {i}: {gpu.name} ({gpu.total_memory // (1024 * 1024)} MB)") - else: - logger.info("No GPU found") - logger.info("LeafMachine2 image cropping and embedding search will be extremely slow or not possible.") - print_main_info("No GPU found!") - print_main_info("LeafMachine2 image cropping and embedding search will be extremely slow or not possible.") - - # Get memory information - mem_info = psutil.virtual_memory() - logger.info(f"Memory: {mem_info.total // (1024 * 1024)} MB") - logger.info(LM2_banner()) - logger.info(f"Config added to log file") - file_logger.info('Config:\n{}'.format(yaml.dump(cfg))) - - - return logger - -def find_cpu_info(): - cpu_info = [] - cpu_info.append(platform.processor()) - try: - - with open('/proc/cpuinfo') as f: - for line in f: - if line.startswith('model name'): - cpu_info.append(line.split(':')[1].strip()) - break - return ' / '.join(cpu_info) - except: - try: - info = cpuinfo.get_cpu_info() - cpu_info = [] - cpu_info.append(info['brand_raw']) - cpu_info.append(f"{info['hz_actual_friendly']}") - return ' / '.join(cpu_info) - except: - return "CPU: UNKNOWN" - - -def LM2_banner(): - logo = """ - _ __ __ __ _ _ ___ - | | / _| \/ | | | (_) |__ \ - | | ___ __ _| |_| \ / | __ _ ___| |__ _ _ __ ___ ) | - | | / _ \/ _` | _| |\/| |/ _` |/ __| '_ \| | '_ \ / _ \ / / - | |___| __/ (_| | | | | | | (_| | (__| | | | | | | | __// /_ - |______\___|\__,_|_| |_| |_|\__,_|\___|_| |_|_|_| |_|\___|____| - __ __ _ _| |_ __ ___ _ - \ \ / / | | |_ _| \ \ / (_) (_) - \ \ / /__ _ _ ___| |__ |_|_ _ _\ \ / / _ ___ _ ___ _ __ - \ \/ / _ \| | | |/ __| '_ \ / _ \ '__\ \/ / | / __| |/ _ \| '_ \ - \ / (_) | |_| | (__| | | | __/ | \ / | \__ \ | (_) | | | | - \/ \___/ \__,_|\___|_| |_|\___|_| \/ |_|___/_|\___/|_| |_|""" - return logo \ No newline at end of file diff --git a/spaces/piecurus/Summarizer/utils.py b/spaces/piecurus/Summarizer/utils.py deleted file mode 100644 index e16c95418891126bd4f3d5573cbbc96a24c6b85b..0000000000000000000000000000000000000000 --- a/spaces/piecurus/Summarizer/utils.py +++ /dev/null @@ -1,137 +0,0 @@ -import re -import requests -import docx2txt -from io import StringIO -from PyPDF2 import PdfFileReader - -from bs4 import BeautifulSoup -from nltk.tokenize import sent_tokenize - -emoji_pattern = re.compile( - "[" - u"\U0001F600-\U0001F64F" # emoticons - u"\U0001F300-\U0001F5FF" # symbols & pictographs - u"\U0001F680-\U0001F6FF" # transport & map symbols - u"\U0001F1E0-\U0001F1FF" # flags (iOS) - u"\U00002702-\U000027B0" - u"\U000024C2-\U0001F251" - "]+", - flags=re.UNICODE, -) - - -def clean_text(x): - # x = x.lower() # lowercase - x = x.encode("ascii", "ignore").decode() # unicode - x = re.sub(r"https*\S+", " ", x) # url - x = re.sub(r"@\S+", " ", x) # mentions - x = re.sub(r"#\S+", " ", x) # hastags - # x = x.replace("'", "") # remove ticks - # x = re.sub("[%s]" % re.escape(string.punctuation), " ", x) # punctuation - # x = re.sub(r"\w*\d+\w*", "", x) # numbers - x = re.sub(r"\s{2,}", " ", x) # over spaces - x = emoji_pattern.sub(r"", x) # emojis - x = re.sub("[^.,!?A-Za-z0-9]+", " ", x) # special charachters except .,!? - - return x - - -def fetch_article_text(url: str): - - r = requests.get(url) - soup = BeautifulSoup(r.text, "html.parser") - results = soup.find_all(["h1", "p"]) - text = [result.text for result in results] - ARTICLE = " ".join(text) - ARTICLE = ARTICLE.replace(".", ".") - ARTICLE = ARTICLE.replace("!", "!") - ARTICLE = ARTICLE.replace("?", "?") - sentences = ARTICLE.split("") - current_chunk = 0 - chunks = [] - for sentence in sentences: - if len(chunks) == current_chunk + 1: - if len(chunks[current_chunk]) + len(sentence.split(" ")) <= 500: - chunks[current_chunk].extend(sentence.split(" ")) - else: - current_chunk += 1 - chunks.append(sentence.split(" ")) - else: - print(current_chunk) - chunks.append(sentence.split(" ")) - - for chunk_id in range(len(chunks)): - chunks[chunk_id] = " ".join(chunks[chunk_id]) - - return ARTICLE, chunks - - -def preprocess_text_for_abstractive_summarization(tokenizer, text): - sentences = sent_tokenize(text) - - # initialize - length = 0 - chunk = "" - chunks = [] - count = -1 - for sentence in sentences: - count += 1 - combined_length = ( - len(tokenizer.tokenize(sentence)) + length - ) # add the no. of sentence tokens to the length counter - - if combined_length <= tokenizer.max_len_single_sentence: # if it doesn't exceed - chunk += sentence + " " # add the sentence to the chunk - length = combined_length # update the length counter - - # if it is the last sentence - if count == len(sentences) - 1: - chunks.append(chunk.strip()) # save the chunk - - else: - chunks.append(chunk.strip()) # save the chunk - - # reset - length = 0 - chunk = "" - - # take care of the overflow sentence - chunk += sentence + " " - length = len(tokenizer.tokenize(sentence)) - - return chunks - - -def read_pdf(file): - pdfReader = PdfFileReader(file) - count = pdfReader.numPages - all_page_text = "" - for i in range(count): - page = pdfReader.getPage(i) - all_page_text += page.extractText() - - return all_page_text - - -def read_text_from_file(file): - - # read text file - if file.type == "text/plain": - # To convert to a string based IO: - stringio = StringIO(file.getvalue().decode("utf-8")) - - # To read file as string: - file_content = stringio.read() - - # read pdf file - elif file.type == "application/pdf": - file_content = read_pdf(file) - - # read docx file - elif ( - file.type - == "application/vnd.openxmlformats-officedocument.wordprocessingml.document" - ): - file_content = docx2txt.process(file) - - return file_content diff --git a/spaces/pkiage/time_series_autocorrelation_demo/src/visualization/__init__.py b/spaces/pkiage/time_series_autocorrelation_demo/src/visualization/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/dateutil/tz/win.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/dateutil/tz/win.py deleted file mode 100644 index cde07ba792c40903f0c334839140173b39fd8124..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/dateutil/tz/win.py +++ /dev/null @@ -1,370 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module provides an interface to the native time zone data on Windows, -including :py:class:`datetime.tzinfo` implementations. - -Attempting to import this module on a non-Windows platform will raise an -:py:obj:`ImportError`. -""" -# This code was originally contributed by Jeffrey Harris. -import datetime -import struct - -from six.moves import winreg -from six import text_type - -try: - import ctypes - from ctypes import wintypes -except ValueError: - # ValueError is raised on non-Windows systems for some horrible reason. - raise ImportError("Running tzwin on non-Windows system") - -from ._common import tzrangebase - -__all__ = ["tzwin", "tzwinlocal", "tzres"] - -ONEWEEK = datetime.timedelta(7) - -TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" -TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" -TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" - - -def _settzkeyname(): - handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) - try: - winreg.OpenKey(handle, TZKEYNAMENT).Close() - TZKEYNAME = TZKEYNAMENT - except WindowsError: - TZKEYNAME = TZKEYNAME9X - handle.Close() - return TZKEYNAME - - -TZKEYNAME = _settzkeyname() - - -class tzres(object): - """ - Class for accessing ``tzres.dll``, which contains timezone name related - resources. - - .. versionadded:: 2.5.0 - """ - p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char - - def __init__(self, tzres_loc='tzres.dll'): - # Load the user32 DLL so we can load strings from tzres - user32 = ctypes.WinDLL('user32') - - # Specify the LoadStringW function - user32.LoadStringW.argtypes = (wintypes.HINSTANCE, - wintypes.UINT, - wintypes.LPWSTR, - ctypes.c_int) - - self.LoadStringW = user32.LoadStringW - self._tzres = ctypes.WinDLL(tzres_loc) - self.tzres_loc = tzres_loc - - def load_name(self, offset): - """ - Load a timezone name from a DLL offset (integer). - - >>> from dateutil.tzwin import tzres - >>> tzr = tzres() - >>> print(tzr.load_name(112)) - 'Eastern Standard Time' - - :param offset: - A positive integer value referring to a string from the tzres dll. - - .. note:: - - Offsets found in the registry are generally of the form - ``@tzres.dll,-114``. The offset in this case is 114, not -114. - - """ - resource = self.p_wchar() - lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR) - nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0) - return resource[:nchar] - - def name_from_string(self, tzname_str): - """ - Parse strings as returned from the Windows registry into the time zone - name as defined in the registry. - - >>> from dateutil.tzwin import tzres - >>> tzr = tzres() - >>> print(tzr.name_from_string('@tzres.dll,-251')) - 'Dateline Daylight Time' - >>> print(tzr.name_from_string('Eastern Standard Time')) - 'Eastern Standard Time' - - :param tzname_str: - A timezone name string as returned from a Windows registry key. - - :return: - Returns the localized timezone string from tzres.dll if the string - is of the form `@tzres.dll,-offset`, else returns the input string. - """ - if not tzname_str.startswith('@'): - return tzname_str - - name_splt = tzname_str.split(',-') - try: - offset = int(name_splt[1]) - except: - raise ValueError("Malformed timezone string.") - - return self.load_name(offset) - - -class tzwinbase(tzrangebase): - """tzinfo class based on win32's timezones available in the registry.""" - def __init__(self): - raise NotImplementedError('tzwinbase is an abstract base class') - - def __eq__(self, other): - # Compare on all relevant dimensions, including name. - if not isinstance(other, tzwinbase): - return NotImplemented - - return (self._std_offset == other._std_offset and - self._dst_offset == other._dst_offset and - self._stddayofweek == other._stddayofweek and - self._dstdayofweek == other._dstdayofweek and - self._stdweeknumber == other._stdweeknumber and - self._dstweeknumber == other._dstweeknumber and - self._stdhour == other._stdhour and - self._dsthour == other._dsthour and - self._stdminute == other._stdminute and - self._dstminute == other._dstminute and - self._std_abbr == other._std_abbr and - self._dst_abbr == other._dst_abbr) - - @staticmethod - def list(): - """Return a list of all time zones known to the system.""" - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - with winreg.OpenKey(handle, TZKEYNAME) as tzkey: - result = [winreg.EnumKey(tzkey, i) - for i in range(winreg.QueryInfoKey(tzkey)[0])] - return result - - def display(self): - """ - Return the display name of the time zone. - """ - return self._display - - def transitions(self, year): - """ - For a given year, get the DST on and off transition times, expressed - always on the standard time side. For zones with no transitions, this - function returns ``None``. - - :param year: - The year whose transitions you would like to query. - - :return: - Returns a :class:`tuple` of :class:`datetime.datetime` objects, - ``(dston, dstoff)`` for zones with an annual DST transition, or - ``None`` for fixed offset zones. - """ - - if not self.hasdst: - return None - - dston = picknthweekday(year, self._dstmonth, self._dstdayofweek, - self._dsthour, self._dstminute, - self._dstweeknumber) - - dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek, - self._stdhour, self._stdminute, - self._stdweeknumber) - - # Ambiguous dates default to the STD side - dstoff -= self._dst_base_offset - - return dston, dstoff - - def _get_hasdst(self): - return self._dstmonth != 0 - - @property - def _dst_base_offset(self): - return self._dst_base_offset_ - - -class tzwin(tzwinbase): - """ - Time zone object created from the zone info in the Windows registry - - These are similar to :py:class:`dateutil.tz.tzrange` objects in that - the time zone data is provided in the format of a single offset rule - for either 0 or 2 time zone transitions per year. - - :param: name - The name of a Windows time zone key, e.g. "Eastern Standard Time". - The full list of keys can be retrieved with :func:`tzwin.list`. - """ - - def __init__(self, name): - self._name = name - - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name) - with winreg.OpenKey(handle, tzkeyname) as tzkey: - keydict = valuestodict(tzkey) - - self._std_abbr = keydict["Std"] - self._dst_abbr = keydict["Dlt"] - - self._display = keydict["Display"] - - # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm - tup = struct.unpack("=3l16h", keydict["TZI"]) - stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 - dstoffset = stdoffset-tup[2] # + DaylightBias * -1 - self._std_offset = datetime.timedelta(minutes=stdoffset) - self._dst_offset = datetime.timedelta(minutes=dstoffset) - - # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs - # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx - (self._stdmonth, - self._stddayofweek, # Sunday = 0 - self._stdweeknumber, # Last = 5 - self._stdhour, - self._stdminute) = tup[4:9] - - (self._dstmonth, - self._dstdayofweek, # Sunday = 0 - self._dstweeknumber, # Last = 5 - self._dsthour, - self._dstminute) = tup[12:17] - - self._dst_base_offset_ = self._dst_offset - self._std_offset - self.hasdst = self._get_hasdst() - - def __repr__(self): - return "tzwin(%s)" % repr(self._name) - - def __reduce__(self): - return (self.__class__, (self._name,)) - - -class tzwinlocal(tzwinbase): - """ - Class representing the local time zone information in the Windows registry - - While :class:`dateutil.tz.tzlocal` makes system calls (via the :mod:`time` - module) to retrieve time zone information, ``tzwinlocal`` retrieves the - rules directly from the Windows registry and creates an object like - :class:`dateutil.tz.tzwin`. - - Because Windows does not have an equivalent of :func:`time.tzset`, on - Windows, :class:`dateutil.tz.tzlocal` instances will always reflect the - time zone settings *at the time that the process was started*, meaning - changes to the machine's time zone settings during the run of a program - on Windows will **not** be reflected by :class:`dateutil.tz.tzlocal`. - Because ``tzwinlocal`` reads the registry directly, it is unaffected by - this issue. - """ - def __init__(self): - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey: - keydict = valuestodict(tzlocalkey) - - self._std_abbr = keydict["StandardName"] - self._dst_abbr = keydict["DaylightName"] - - try: - tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME, - sn=self._std_abbr) - with winreg.OpenKey(handle, tzkeyname) as tzkey: - _keydict = valuestodict(tzkey) - self._display = _keydict["Display"] - except OSError: - self._display = None - - stdoffset = -keydict["Bias"]-keydict["StandardBias"] - dstoffset = stdoffset-keydict["DaylightBias"] - - self._std_offset = datetime.timedelta(minutes=stdoffset) - self._dst_offset = datetime.timedelta(minutes=dstoffset) - - # For reasons unclear, in this particular key, the day of week has been - # moved to the END of the SYSTEMTIME structure. - tup = struct.unpack("=8h", keydict["StandardStart"]) - - (self._stdmonth, - self._stdweeknumber, # Last = 5 - self._stdhour, - self._stdminute) = tup[1:5] - - self._stddayofweek = tup[7] - - tup = struct.unpack("=8h", keydict["DaylightStart"]) - - (self._dstmonth, - self._dstweeknumber, # Last = 5 - self._dsthour, - self._dstminute) = tup[1:5] - - self._dstdayofweek = tup[7] - - self._dst_base_offset_ = self._dst_offset - self._std_offset - self.hasdst = self._get_hasdst() - - def __repr__(self): - return "tzwinlocal()" - - def __str__(self): - # str will return the standard name, not the daylight name. - return "tzwinlocal(%s)" % repr(self._std_abbr) - - def __reduce__(self): - return (self.__class__, ()) - - -def picknthweekday(year, month, dayofweek, hour, minute, whichweek): - """ dayofweek == 0 means Sunday, whichweek 5 means last instance """ - first = datetime.datetime(year, month, 1, hour, minute) - - # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6), - # Because 7 % 7 = 0 - weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1) - wd = weekdayone + ((whichweek - 1) * ONEWEEK) - if (wd.month != month): - wd -= ONEWEEK - - return wd - - -def valuestodict(key): - """Convert a registry key's values to a dictionary.""" - dout = {} - size = winreg.QueryInfoKey(key)[1] - tz_res = None - - for i in range(size): - key_name, value, dtype = winreg.EnumValue(key, i) - if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN: - # If it's a DWORD (32-bit integer), it's stored as unsigned - convert - # that to a proper signed integer - if value & (1 << 31): - value = value - (1 << 32) - elif dtype == winreg.REG_SZ: - # If it's a reference to the tzres DLL, load the actual string - if value.startswith('@tzres'): - tz_res = tz_res or tzres() - value = tz_res.name_from_string(value) - - value = value.rstrip('\x00') # Remove trailing nulls - - dout[key_name] = value - - return dout diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/mtiLib/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/mtiLib/__init__.py deleted file mode 100644 index dbedf275e3d3cfb2e8ec43eddd88b9d78ad53e15..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/mtiLib/__init__.py +++ /dev/null @@ -1,1402 +0,0 @@ -#!/usr/bin/python - -# FontDame-to-FontTools for OpenType Layout tables -# -# Source language spec is available at: -# http://monotype.github.io/OpenType_Table_Source/otl_source.html -# https://github.com/Monotype/OpenType_Table_Source/ - -from fontTools import ttLib -from fontTools.ttLib.tables._c_m_a_p import cmap_classes -from fontTools.ttLib.tables import otTables as ot -from fontTools.ttLib.tables.otBase import ValueRecord, valueRecordFormatDict -from fontTools.otlLib import builder as otl -from contextlib import contextmanager -from fontTools.ttLib import newTable -from fontTools.feaLib.lookupDebugInfo import LOOKUP_DEBUG_ENV_VAR, LOOKUP_DEBUG_INFO_KEY -from operator import setitem -import os -import logging - - -class MtiLibError(Exception): - pass - - -class ReferenceNotFoundError(MtiLibError): - pass - - -class FeatureNotFoundError(ReferenceNotFoundError): - pass - - -class LookupNotFoundError(ReferenceNotFoundError): - pass - - -log = logging.getLogger("fontTools.mtiLib") - - -def makeGlyph(s): - if s[:2] in ["U ", "u "]: - return ttLib.TTFont._makeGlyphName(int(s[2:], 16)) - elif s[:2] == "# ": - return "glyph%.5d" % int(s[2:]) - assert s.find(" ") < 0, "Space found in glyph name: %s" % s - assert s, "Glyph name is empty" - return s - - -def makeGlyphs(l): - return [makeGlyph(g) for g in l] - - -def mapLookup(sym, mapping): - # Lookups are addressed by name. So resolved them using a map if available. - # Fallback to parsing as lookup index if a map isn't provided. - if mapping is not None: - try: - idx = mapping[sym] - except KeyError: - raise LookupNotFoundError(sym) - else: - idx = int(sym) - return idx - - -def mapFeature(sym, mapping): - # Features are referenced by index according the spec. So, if symbol is an - # integer, use it directly. Otherwise look up in the map if provided. - try: - idx = int(sym) - except ValueError: - try: - idx = mapping[sym] - except KeyError: - raise FeatureNotFoundError(sym) - return idx - - -def setReference(mapper, mapping, sym, setter, collection, key): - try: - mapped = mapper(sym, mapping) - except ReferenceNotFoundError as e: - try: - if mapping is not None: - mapping.addDeferredMapping( - lambda ref: setter(collection, key, ref), sym, e - ) - return - except AttributeError: - pass - raise - setter(collection, key, mapped) - - -class DeferredMapping(dict): - def __init__(self): - self._deferredMappings = [] - - def addDeferredMapping(self, setter, sym, e): - log.debug("Adding deferred mapping for symbol '%s' %s", sym, type(e).__name__) - self._deferredMappings.append((setter, sym, e)) - - def applyDeferredMappings(self): - for setter, sym, e in self._deferredMappings: - log.debug( - "Applying deferred mapping for symbol '%s' %s", sym, type(e).__name__ - ) - try: - mapped = self[sym] - except KeyError: - raise e - setter(mapped) - log.debug("Set to %s", mapped) - self._deferredMappings = [] - - -def parseScriptList(lines, featureMap=None): - self = ot.ScriptList() - records = [] - with lines.between("script table"): - for line in lines: - while len(line) < 4: - line.append("") - scriptTag, langSysTag, defaultFeature, features = line - log.debug("Adding script %s language-system %s", scriptTag, langSysTag) - - langSys = ot.LangSys() - langSys.LookupOrder = None - if defaultFeature: - setReference( - mapFeature, - featureMap, - defaultFeature, - setattr, - langSys, - "ReqFeatureIndex", - ) - else: - langSys.ReqFeatureIndex = 0xFFFF - syms = stripSplitComma(features) - langSys.FeatureIndex = theList = [3] * len(syms) - for i, sym in enumerate(syms): - setReference(mapFeature, featureMap, sym, setitem, theList, i) - langSys.FeatureCount = len(langSys.FeatureIndex) - - script = [s for s in records if s.ScriptTag == scriptTag] - if script: - script = script[0].Script - else: - scriptRec = ot.ScriptRecord() - scriptRec.ScriptTag = scriptTag + " " * (4 - len(scriptTag)) - scriptRec.Script = ot.Script() - records.append(scriptRec) - script = scriptRec.Script - script.DefaultLangSys = None - script.LangSysRecord = [] - script.LangSysCount = 0 - - if langSysTag == "default": - script.DefaultLangSys = langSys - else: - langSysRec = ot.LangSysRecord() - langSysRec.LangSysTag = langSysTag + " " * (4 - len(langSysTag)) - langSysRec.LangSys = langSys - script.LangSysRecord.append(langSysRec) - script.LangSysCount = len(script.LangSysRecord) - - for script in records: - script.Script.LangSysRecord = sorted( - script.Script.LangSysRecord, key=lambda rec: rec.LangSysTag - ) - self.ScriptRecord = sorted(records, key=lambda rec: rec.ScriptTag) - self.ScriptCount = len(self.ScriptRecord) - return self - - -def parseFeatureList(lines, lookupMap=None, featureMap=None): - self = ot.FeatureList() - self.FeatureRecord = [] - with lines.between("feature table"): - for line in lines: - name, featureTag, lookups = line - if featureMap is not None: - assert name not in featureMap, "Duplicate feature name: %s" % name - featureMap[name] = len(self.FeatureRecord) - # If feature name is integer, make sure it matches its index. - try: - assert int(name) == len(self.FeatureRecord), "%d %d" % ( - name, - len(self.FeatureRecord), - ) - except ValueError: - pass - featureRec = ot.FeatureRecord() - featureRec.FeatureTag = featureTag - featureRec.Feature = ot.Feature() - self.FeatureRecord.append(featureRec) - feature = featureRec.Feature - feature.FeatureParams = None - syms = stripSplitComma(lookups) - feature.LookupListIndex = theList = [None] * len(syms) - for i, sym in enumerate(syms): - setReference(mapLookup, lookupMap, sym, setitem, theList, i) - feature.LookupCount = len(feature.LookupListIndex) - - self.FeatureCount = len(self.FeatureRecord) - return self - - -def parseLookupFlags(lines): - flags = 0 - filterset = None - allFlags = [ - "righttoleft", - "ignorebaseglyphs", - "ignoreligatures", - "ignoremarks", - "markattachmenttype", - "markfiltertype", - ] - while lines.peeks()[0].lower() in allFlags: - line = next(lines) - flag = { - "righttoleft": 0x0001, - "ignorebaseglyphs": 0x0002, - "ignoreligatures": 0x0004, - "ignoremarks": 0x0008, - }.get(line[0].lower()) - if flag: - assert line[1].lower() in ["yes", "no"], line[1] - if line[1].lower() == "yes": - flags |= flag - continue - if line[0].lower() == "markattachmenttype": - flags |= int(line[1]) << 8 - continue - if line[0].lower() == "markfiltertype": - flags |= 0x10 - filterset = int(line[1]) - return flags, filterset - - -def parseSingleSubst(lines, font, _lookupMap=None): - mapping = {} - for line in lines: - assert len(line) == 2, line - line = makeGlyphs(line) - mapping[line[0]] = line[1] - return otl.buildSingleSubstSubtable(mapping) - - -def parseMultiple(lines, font, _lookupMap=None): - mapping = {} - for line in lines: - line = makeGlyphs(line) - mapping[line[0]] = line[1:] - return otl.buildMultipleSubstSubtable(mapping) - - -def parseAlternate(lines, font, _lookupMap=None): - mapping = {} - for line in lines: - line = makeGlyphs(line) - mapping[line[0]] = line[1:] - return otl.buildAlternateSubstSubtable(mapping) - - -def parseLigature(lines, font, _lookupMap=None): - mapping = {} - for line in lines: - assert len(line) >= 2, line - line = makeGlyphs(line) - mapping[tuple(line[1:])] = line[0] - return otl.buildLigatureSubstSubtable(mapping) - - -def parseSinglePos(lines, font, _lookupMap=None): - values = {} - for line in lines: - assert len(line) == 3, line - w = line[0].title().replace(" ", "") - assert w in valueRecordFormatDict - g = makeGlyph(line[1]) - v = int(line[2]) - if g not in values: - values[g] = ValueRecord() - assert not hasattr(values[g], w), (g, w) - setattr(values[g], w, v) - return otl.buildSinglePosSubtable(values, font.getReverseGlyphMap()) - - -def parsePair(lines, font, _lookupMap=None): - self = ot.PairPos() - self.ValueFormat1 = self.ValueFormat2 = 0 - typ = lines.peeks()[0].split()[0].lower() - if typ in ("left", "right"): - self.Format = 1 - values = {} - for line in lines: - assert len(line) == 4, line - side = line[0].split()[0].lower() - assert side in ("left", "right"), side - what = line[0][len(side) :].title().replace(" ", "") - mask = valueRecordFormatDict[what][0] - glyph1, glyph2 = makeGlyphs(line[1:3]) - value = int(line[3]) - if not glyph1 in values: - values[glyph1] = {} - if not glyph2 in values[glyph1]: - values[glyph1][glyph2] = (ValueRecord(), ValueRecord()) - rec2 = values[glyph1][glyph2] - if side == "left": - self.ValueFormat1 |= mask - vr = rec2[0] - else: - self.ValueFormat2 |= mask - vr = rec2[1] - assert not hasattr(vr, what), (vr, what) - setattr(vr, what, value) - self.Coverage = makeCoverage(set(values.keys()), font) - self.PairSet = [] - for glyph1 in self.Coverage.glyphs: - values1 = values[glyph1] - pairset = ot.PairSet() - records = pairset.PairValueRecord = [] - for glyph2 in sorted(values1.keys(), key=font.getGlyphID): - values2 = values1[glyph2] - pair = ot.PairValueRecord() - pair.SecondGlyph = glyph2 - pair.Value1 = values2[0] - pair.Value2 = values2[1] if self.ValueFormat2 else None - records.append(pair) - pairset.PairValueCount = len(pairset.PairValueRecord) - self.PairSet.append(pairset) - self.PairSetCount = len(self.PairSet) - elif typ.endswith("class"): - self.Format = 2 - classDefs = [None, None] - while lines.peeks()[0].endswith("class definition begin"): - typ = lines.peek()[0][: -len("class definition begin")].lower() - idx, klass = { - "first": (0, ot.ClassDef1), - "second": (1, ot.ClassDef2), - }[typ] - assert classDefs[idx] is None - classDefs[idx] = parseClassDef(lines, font, klass=klass) - self.ClassDef1, self.ClassDef2 = classDefs - self.Class1Count, self.Class2Count = ( - 1 + max(c.classDefs.values()) for c in classDefs - ) - self.Class1Record = [ot.Class1Record() for i in range(self.Class1Count)] - for rec1 in self.Class1Record: - rec1.Class2Record = [ot.Class2Record() for j in range(self.Class2Count)] - for rec2 in rec1.Class2Record: - rec2.Value1 = ValueRecord() - rec2.Value2 = ValueRecord() - for line in lines: - assert len(line) == 4, line - side = line[0].split()[0].lower() - assert side in ("left", "right"), side - what = line[0][len(side) :].title().replace(" ", "") - mask = valueRecordFormatDict[what][0] - class1, class2, value = (int(x) for x in line[1:4]) - rec2 = self.Class1Record[class1].Class2Record[class2] - if side == "left": - self.ValueFormat1 |= mask - vr = rec2.Value1 - else: - self.ValueFormat2 |= mask - vr = rec2.Value2 - assert not hasattr(vr, what), (vr, what) - setattr(vr, what, value) - for rec1 in self.Class1Record: - for rec2 in rec1.Class2Record: - rec2.Value1 = ValueRecord(self.ValueFormat1, rec2.Value1) - rec2.Value2 = ( - ValueRecord(self.ValueFormat2, rec2.Value2) - if self.ValueFormat2 - else None - ) - - self.Coverage = makeCoverage(set(self.ClassDef1.classDefs.keys()), font) - else: - assert 0, typ - return self - - -def parseKernset(lines, font, _lookupMap=None): - typ = lines.peeks()[0].split()[0].lower() - if typ in ("left", "right"): - with lines.until( - ("firstclass definition begin", "secondclass definition begin") - ): - return parsePair(lines, font) - return parsePair(lines, font) - - -def makeAnchor(data, klass=ot.Anchor): - assert len(data) <= 2 - anchor = klass() - anchor.Format = 1 - anchor.XCoordinate, anchor.YCoordinate = intSplitComma(data[0]) - if len(data) > 1 and data[1] != "": - anchor.Format = 2 - anchor.AnchorPoint = int(data[1]) - return anchor - - -def parseCursive(lines, font, _lookupMap=None): - records = {} - for line in lines: - assert len(line) in [3, 4], line - idx, klass = { - "entry": (0, ot.EntryAnchor), - "exit": (1, ot.ExitAnchor), - }[line[0]] - glyph = makeGlyph(line[1]) - if glyph not in records: - records[glyph] = [None, None] - assert records[glyph][idx] is None, (glyph, idx) - records[glyph][idx] = makeAnchor(line[2:], klass) - return otl.buildCursivePosSubtable(records, font.getReverseGlyphMap()) - - -def makeMarkRecords(data, coverage, c): - records = [] - for glyph in coverage.glyphs: - klass, anchor = data[glyph] - record = c.MarkRecordClass() - record.Class = klass - setattr(record, c.MarkAnchor, anchor) - records.append(record) - return records - - -def makeBaseRecords(data, coverage, c, classCount): - records = [] - idx = {} - for glyph in coverage.glyphs: - idx[glyph] = len(records) - record = c.BaseRecordClass() - anchors = [None] * classCount - setattr(record, c.BaseAnchor, anchors) - records.append(record) - for (glyph, klass), anchor in data.items(): - record = records[idx[glyph]] - anchors = getattr(record, c.BaseAnchor) - assert anchors[klass] is None, (glyph, klass) - anchors[klass] = anchor - return records - - -def makeLigatureRecords(data, coverage, c, classCount): - records = [None] * len(coverage.glyphs) - idx = {g: i for i, g in enumerate(coverage.glyphs)} - - for (glyph, klass, compIdx, compCount), anchor in data.items(): - record = records[idx[glyph]] - if record is None: - record = records[idx[glyph]] = ot.LigatureAttach() - record.ComponentCount = compCount - record.ComponentRecord = [ot.ComponentRecord() for i in range(compCount)] - for compRec in record.ComponentRecord: - compRec.LigatureAnchor = [None] * classCount - assert record.ComponentCount == compCount, ( - glyph, - record.ComponentCount, - compCount, - ) - - anchors = record.ComponentRecord[compIdx - 1].LigatureAnchor - assert anchors[klass] is None, (glyph, compIdx, klass) - anchors[klass] = anchor - return records - - -def parseMarkToSomething(lines, font, c): - self = c.Type() - self.Format = 1 - markData = {} - baseData = {} - Data = { - "mark": (markData, c.MarkAnchorClass), - "base": (baseData, c.BaseAnchorClass), - "ligature": (baseData, c.BaseAnchorClass), - } - maxKlass = 0 - for line in lines: - typ = line[0] - assert typ in ("mark", "base", "ligature") - glyph = makeGlyph(line[1]) - data, anchorClass = Data[typ] - extraItems = 2 if typ == "ligature" else 0 - extras = tuple(int(i) for i in line[2 : 2 + extraItems]) - klass = int(line[2 + extraItems]) - anchor = makeAnchor(line[3 + extraItems :], anchorClass) - if typ == "mark": - key, value = glyph, (klass, anchor) - else: - key, value = ((glyph, klass) + extras), anchor - assert key not in data, key - data[key] = value - maxKlass = max(maxKlass, klass) - - # Mark - markCoverage = makeCoverage(set(markData.keys()), font, c.MarkCoverageClass) - markArray = c.MarkArrayClass() - markRecords = makeMarkRecords(markData, markCoverage, c) - setattr(markArray, c.MarkRecord, markRecords) - setattr(markArray, c.MarkCount, len(markRecords)) - setattr(self, c.MarkCoverage, markCoverage) - setattr(self, c.MarkArray, markArray) - self.ClassCount = maxKlass + 1 - - # Base - self.classCount = 0 if not baseData else 1 + max(k[1] for k, v in baseData.items()) - baseCoverage = makeCoverage( - set([k[0] for k in baseData.keys()]), font, c.BaseCoverageClass - ) - baseArray = c.BaseArrayClass() - if c.Base == "Ligature": - baseRecords = makeLigatureRecords(baseData, baseCoverage, c, self.classCount) - else: - baseRecords = makeBaseRecords(baseData, baseCoverage, c, self.classCount) - setattr(baseArray, c.BaseRecord, baseRecords) - setattr(baseArray, c.BaseCount, len(baseRecords)) - setattr(self, c.BaseCoverage, baseCoverage) - setattr(self, c.BaseArray, baseArray) - - return self - - -class MarkHelper(object): - def __init__(self): - for Which in ("Mark", "Base"): - for What in ("Coverage", "Array", "Count", "Record", "Anchor"): - key = Which + What - if Which == "Mark" and What in ("Count", "Record", "Anchor"): - value = key - else: - value = getattr(self, Which) + What - if value == "LigatureRecord": - value = "LigatureAttach" - setattr(self, key, value) - if What != "Count": - klass = getattr(ot, value) - setattr(self, key + "Class", klass) - - -class MarkToBaseHelper(MarkHelper): - Mark = "Mark" - Base = "Base" - Type = ot.MarkBasePos - - -class MarkToMarkHelper(MarkHelper): - Mark = "Mark1" - Base = "Mark2" - Type = ot.MarkMarkPos - - -class MarkToLigatureHelper(MarkHelper): - Mark = "Mark" - Base = "Ligature" - Type = ot.MarkLigPos - - -def parseMarkToBase(lines, font, _lookupMap=None): - return parseMarkToSomething(lines, font, MarkToBaseHelper()) - - -def parseMarkToMark(lines, font, _lookupMap=None): - return parseMarkToSomething(lines, font, MarkToMarkHelper()) - - -def parseMarkToLigature(lines, font, _lookupMap=None): - return parseMarkToSomething(lines, font, MarkToLigatureHelper()) - - -def stripSplitComma(line): - return [s.strip() for s in line.split(",")] if line else [] - - -def intSplitComma(line): - return [int(i) for i in line.split(",")] if line else [] - - -# Copied from fontTools.subset -class ContextHelper(object): - def __init__(self, klassName, Format): - if klassName.endswith("Subst"): - Typ = "Sub" - Type = "Subst" - else: - Typ = "Pos" - Type = "Pos" - if klassName.startswith("Chain"): - Chain = "Chain" - InputIdx = 1 - DataLen = 3 - else: - Chain = "" - InputIdx = 0 - DataLen = 1 - ChainTyp = Chain + Typ - - self.Typ = Typ - self.Type = Type - self.Chain = Chain - self.ChainTyp = ChainTyp - self.InputIdx = InputIdx - self.DataLen = DataLen - - self.LookupRecord = Type + "LookupRecord" - - if Format == 1: - Coverage = lambda r: r.Coverage - ChainCoverage = lambda r: r.Coverage - ContextData = lambda r: (None,) - ChainContextData = lambda r: (None, None, None) - SetContextData = None - SetChainContextData = None - RuleData = lambda r: (r.Input,) - ChainRuleData = lambda r: (r.Backtrack, r.Input, r.LookAhead) - - def SetRuleData(r, d): - (r.Input,) = d - (r.GlyphCount,) = (len(x) + 1 for x in d) - - def ChainSetRuleData(r, d): - (r.Backtrack, r.Input, r.LookAhead) = d - ( - r.BacktrackGlyphCount, - r.InputGlyphCount, - r.LookAheadGlyphCount, - ) = (len(d[0]), len(d[1]) + 1, len(d[2])) - - elif Format == 2: - Coverage = lambda r: r.Coverage - ChainCoverage = lambda r: r.Coverage - ContextData = lambda r: (r.ClassDef,) - ChainContextData = lambda r: ( - r.BacktrackClassDef, - r.InputClassDef, - r.LookAheadClassDef, - ) - - def SetContextData(r, d): - (r.ClassDef,) = d - - def SetChainContextData(r, d): - (r.BacktrackClassDef, r.InputClassDef, r.LookAheadClassDef) = d - - RuleData = lambda r: (r.Class,) - ChainRuleData = lambda r: (r.Backtrack, r.Input, r.LookAhead) - - def SetRuleData(r, d): - (r.Class,) = d - (r.GlyphCount,) = (len(x) + 1 for x in d) - - def ChainSetRuleData(r, d): - (r.Backtrack, r.Input, r.LookAhead) = d - ( - r.BacktrackGlyphCount, - r.InputGlyphCount, - r.LookAheadGlyphCount, - ) = (len(d[0]), len(d[1]) + 1, len(d[2])) - - elif Format == 3: - Coverage = lambda r: r.Coverage[0] - ChainCoverage = lambda r: r.InputCoverage[0] - ContextData = None - ChainContextData = None - SetContextData = None - SetChainContextData = None - RuleData = lambda r: r.Coverage - ChainRuleData = lambda r: ( - r.BacktrackCoverage + r.InputCoverage + r.LookAheadCoverage - ) - - def SetRuleData(r, d): - (r.Coverage,) = d - (r.GlyphCount,) = (len(x) for x in d) - - def ChainSetRuleData(r, d): - (r.BacktrackCoverage, r.InputCoverage, r.LookAheadCoverage) = d - ( - r.BacktrackGlyphCount, - r.InputGlyphCount, - r.LookAheadGlyphCount, - ) = (len(x) for x in d) - - else: - assert 0, "unknown format: %s" % Format - - if Chain: - self.Coverage = ChainCoverage - self.ContextData = ChainContextData - self.SetContextData = SetChainContextData - self.RuleData = ChainRuleData - self.SetRuleData = ChainSetRuleData - else: - self.Coverage = Coverage - self.ContextData = ContextData - self.SetContextData = SetContextData - self.RuleData = RuleData - self.SetRuleData = SetRuleData - - if Format == 1: - self.Rule = ChainTyp + "Rule" - self.RuleCount = ChainTyp + "RuleCount" - self.RuleSet = ChainTyp + "RuleSet" - self.RuleSetCount = ChainTyp + "RuleSetCount" - self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else [] - elif Format == 2: - self.Rule = ChainTyp + "ClassRule" - self.RuleCount = ChainTyp + "ClassRuleCount" - self.RuleSet = ChainTyp + "ClassSet" - self.RuleSetCount = ChainTyp + "ClassSetCount" - self.Intersect = lambda glyphs, c, r: ( - c.intersect_class(glyphs, r) - if c - else (set(glyphs) if r == 0 else set()) - ) - - self.ClassDef = "InputClassDef" if Chain else "ClassDef" - self.ClassDefIndex = 1 if Chain else 0 - self.Input = "Input" if Chain else "Class" - - -def parseLookupRecords(items, klassName, lookupMap=None): - klass = getattr(ot, klassName) - lst = [] - for item in items: - rec = klass() - item = stripSplitComma(item) - assert len(item) == 2, item - idx = int(item[0]) - assert idx > 0, idx - rec.SequenceIndex = idx - 1 - setReference(mapLookup, lookupMap, item[1], setattr, rec, "LookupListIndex") - lst.append(rec) - return lst - - -def makeClassDef(classDefs, font, klass=ot.Coverage): - if not classDefs: - return None - self = klass() - self.classDefs = dict(classDefs) - return self - - -def parseClassDef(lines, font, klass=ot.ClassDef): - classDefs = {} - with lines.between("class definition"): - for line in lines: - glyph = makeGlyph(line[0]) - assert glyph not in classDefs, glyph - classDefs[glyph] = int(line[1]) - return makeClassDef(classDefs, font, klass) - - -def makeCoverage(glyphs, font, klass=ot.Coverage): - if not glyphs: - return None - if isinstance(glyphs, set): - glyphs = sorted(glyphs) - coverage = klass() - coverage.glyphs = sorted(set(glyphs), key=font.getGlyphID) - return coverage - - -def parseCoverage(lines, font, klass=ot.Coverage): - glyphs = [] - with lines.between("coverage definition"): - for line in lines: - glyphs.append(makeGlyph(line[0])) - return makeCoverage(glyphs, font, klass) - - -def bucketizeRules(self, c, rules, bucketKeys): - buckets = {} - for seq, recs in rules: - buckets.setdefault(seq[c.InputIdx][0], []).append( - (tuple(s[1 if i == c.InputIdx else 0 :] for i, s in enumerate(seq)), recs) - ) - - rulesets = [] - for firstGlyph in bucketKeys: - if firstGlyph not in buckets: - rulesets.append(None) - continue - thisRules = [] - for seq, recs in buckets[firstGlyph]: - rule = getattr(ot, c.Rule)() - c.SetRuleData(rule, seq) - setattr(rule, c.Type + "Count", len(recs)) - setattr(rule, c.LookupRecord, recs) - thisRules.append(rule) - - ruleset = getattr(ot, c.RuleSet)() - setattr(ruleset, c.Rule, thisRules) - setattr(ruleset, c.RuleCount, len(thisRules)) - rulesets.append(ruleset) - - setattr(self, c.RuleSet, rulesets) - setattr(self, c.RuleSetCount, len(rulesets)) - - -def parseContext(lines, font, Type, lookupMap=None): - self = getattr(ot, Type)() - typ = lines.peeks()[0].split()[0].lower() - if typ == "glyph": - self.Format = 1 - log.debug("Parsing %s format %s", Type, self.Format) - c = ContextHelper(Type, self.Format) - rules = [] - for line in lines: - assert line[0].lower() == "glyph", line[0] - while len(line) < 1 + c.DataLen: - line.append("") - seq = tuple(makeGlyphs(stripSplitComma(i)) for i in line[1 : 1 + c.DataLen]) - recs = parseLookupRecords(line[1 + c.DataLen :], c.LookupRecord, lookupMap) - rules.append((seq, recs)) - - firstGlyphs = set(seq[c.InputIdx][0] for seq, recs in rules) - self.Coverage = makeCoverage(firstGlyphs, font) - bucketizeRules(self, c, rules, self.Coverage.glyphs) - elif typ.endswith("class"): - self.Format = 2 - log.debug("Parsing %s format %s", Type, self.Format) - c = ContextHelper(Type, self.Format) - classDefs = [None] * c.DataLen - while lines.peeks()[0].endswith("class definition begin"): - typ = lines.peek()[0][: -len("class definition begin")].lower() - idx, klass = { - 1: { - "": (0, ot.ClassDef), - }, - 3: { - "backtrack": (0, ot.BacktrackClassDef), - "": (1, ot.InputClassDef), - "lookahead": (2, ot.LookAheadClassDef), - }, - }[c.DataLen][typ] - assert classDefs[idx] is None, idx - classDefs[idx] = parseClassDef(lines, font, klass=klass) - c.SetContextData(self, classDefs) - rules = [] - for line in lines: - assert line[0].lower().startswith("class"), line[0] - while len(line) < 1 + c.DataLen: - line.append("") - seq = tuple(intSplitComma(i) for i in line[1 : 1 + c.DataLen]) - recs = parseLookupRecords(line[1 + c.DataLen :], c.LookupRecord, lookupMap) - rules.append((seq, recs)) - firstClasses = set(seq[c.InputIdx][0] for seq, recs in rules) - firstGlyphs = set( - g for g, c in classDefs[c.InputIdx].classDefs.items() if c in firstClasses - ) - self.Coverage = makeCoverage(firstGlyphs, font) - bucketizeRules(self, c, rules, range(max(firstClasses) + 1)) - elif typ.endswith("coverage"): - self.Format = 3 - log.debug("Parsing %s format %s", Type, self.Format) - c = ContextHelper(Type, self.Format) - coverages = tuple([] for i in range(c.DataLen)) - while lines.peeks()[0].endswith("coverage definition begin"): - typ = lines.peek()[0][: -len("coverage definition begin")].lower() - idx, klass = { - 1: { - "": (0, ot.Coverage), - }, - 3: { - "backtrack": (0, ot.BacktrackCoverage), - "input": (1, ot.InputCoverage), - "lookahead": (2, ot.LookAheadCoverage), - }, - }[c.DataLen][typ] - coverages[idx].append(parseCoverage(lines, font, klass=klass)) - c.SetRuleData(self, coverages) - lines = list(lines) - assert len(lines) == 1 - line = lines[0] - assert line[0].lower() == "coverage", line[0] - recs = parseLookupRecords(line[1:], c.LookupRecord, lookupMap) - setattr(self, c.Type + "Count", len(recs)) - setattr(self, c.LookupRecord, recs) - else: - assert 0, typ - return self - - -def parseContextSubst(lines, font, lookupMap=None): - return parseContext(lines, font, "ContextSubst", lookupMap=lookupMap) - - -def parseContextPos(lines, font, lookupMap=None): - return parseContext(lines, font, "ContextPos", lookupMap=lookupMap) - - -def parseChainedSubst(lines, font, lookupMap=None): - return parseContext(lines, font, "ChainContextSubst", lookupMap=lookupMap) - - -def parseChainedPos(lines, font, lookupMap=None): - return parseContext(lines, font, "ChainContextPos", lookupMap=lookupMap) - - -def parseReverseChainedSubst(lines, font, _lookupMap=None): - self = ot.ReverseChainSingleSubst() - self.Format = 1 - coverages = ([], []) - while lines.peeks()[0].endswith("coverage definition begin"): - typ = lines.peek()[0][: -len("coverage definition begin")].lower() - idx, klass = { - "backtrack": (0, ot.BacktrackCoverage), - "lookahead": (1, ot.LookAheadCoverage), - }[typ] - coverages[idx].append(parseCoverage(lines, font, klass=klass)) - self.BacktrackCoverage = coverages[0] - self.BacktrackGlyphCount = len(self.BacktrackCoverage) - self.LookAheadCoverage = coverages[1] - self.LookAheadGlyphCount = len(self.LookAheadCoverage) - mapping = {} - for line in lines: - assert len(line) == 2, line - line = makeGlyphs(line) - mapping[line[0]] = line[1] - self.Coverage = makeCoverage(set(mapping.keys()), font) - self.Substitute = [mapping[k] for k in self.Coverage.glyphs] - self.GlyphCount = len(self.Substitute) - return self - - -def parseLookup(lines, tableTag, font, lookupMap=None): - line = lines.expect("lookup") - _, name, typ = line - log.debug("Parsing lookup type %s %s", typ, name) - lookup = ot.Lookup() - lookup.LookupFlag, filterset = parseLookupFlags(lines) - if filterset is not None: - lookup.MarkFilteringSet = filterset - lookup.LookupType, parseLookupSubTable = { - "GSUB": { - "single": (1, parseSingleSubst), - "multiple": (2, parseMultiple), - "alternate": (3, parseAlternate), - "ligature": (4, parseLigature), - "context": (5, parseContextSubst), - "chained": (6, parseChainedSubst), - "reversechained": (8, parseReverseChainedSubst), - }, - "GPOS": { - "single": (1, parseSinglePos), - "pair": (2, parsePair), - "kernset": (2, parseKernset), - "cursive": (3, parseCursive), - "mark to base": (4, parseMarkToBase), - "mark to ligature": (5, parseMarkToLigature), - "mark to mark": (6, parseMarkToMark), - "context": (7, parseContextPos), - "chained": (8, parseChainedPos), - }, - }[tableTag][typ] - - with lines.until("lookup end"): - subtables = [] - - while lines.peek(): - with lines.until(("% subtable", "subtable end")): - while lines.peek(): - subtable = parseLookupSubTable(lines, font, lookupMap) - assert lookup.LookupType == subtable.LookupType - subtables.append(subtable) - if lines.peeks()[0] in ("% subtable", "subtable end"): - next(lines) - lines.expect("lookup end") - - lookup.SubTable = subtables - lookup.SubTableCount = len(lookup.SubTable) - if lookup.SubTableCount == 0: - # Remove this return when following is fixed: - # https://github.com/fonttools/fonttools/issues/789 - return None - return lookup - - -def parseGSUBGPOS(lines, font, tableTag): - container = ttLib.getTableClass(tableTag)() - lookupMap = DeferredMapping() - featureMap = DeferredMapping() - assert tableTag in ("GSUB", "GPOS") - log.debug("Parsing %s", tableTag) - self = getattr(ot, tableTag)() - self.Version = 0x00010000 - fields = { - "script table begin": ( - "ScriptList", - lambda lines: parseScriptList(lines, featureMap), - ), - "feature table begin": ( - "FeatureList", - lambda lines: parseFeatureList(lines, lookupMap, featureMap), - ), - "lookup": ("LookupList", None), - } - for attr, parser in fields.values(): - setattr(self, attr, None) - while lines.peek() is not None: - typ = lines.peek()[0].lower() - if typ not in fields: - log.debug("Skipping %s", lines.peek()) - next(lines) - continue - attr, parser = fields[typ] - if typ == "lookup": - if self.LookupList is None: - self.LookupList = ot.LookupList() - self.LookupList.Lookup = [] - _, name, _ = lines.peek() - lookup = parseLookup(lines, tableTag, font, lookupMap) - if lookupMap is not None: - assert name not in lookupMap, "Duplicate lookup name: %s" % name - lookupMap[name] = len(self.LookupList.Lookup) - else: - assert int(name) == len(self.LookupList.Lookup), "%d %d" % ( - name, - len(self.Lookup), - ) - self.LookupList.Lookup.append(lookup) - else: - assert getattr(self, attr) is None, attr - setattr(self, attr, parser(lines)) - if self.LookupList: - self.LookupList.LookupCount = len(self.LookupList.Lookup) - if lookupMap is not None: - lookupMap.applyDeferredMappings() - if os.environ.get(LOOKUP_DEBUG_ENV_VAR): - if "Debg" not in font: - font["Debg"] = newTable("Debg") - font["Debg"].data = {} - debug = ( - font["Debg"] - .data.setdefault(LOOKUP_DEBUG_INFO_KEY, {}) - .setdefault(tableTag, {}) - ) - for name, lookup in lookupMap.items(): - debug[str(lookup)] = ["", name, ""] - - featureMap.applyDeferredMappings() - container.table = self - return container - - -def parseGSUB(lines, font): - return parseGSUBGPOS(lines, font, "GSUB") - - -def parseGPOS(lines, font): - return parseGSUBGPOS(lines, font, "GPOS") - - -def parseAttachList(lines, font): - points = {} - with lines.between("attachment list"): - for line in lines: - glyph = makeGlyph(line[0]) - assert glyph not in points, glyph - points[glyph] = [int(i) for i in line[1:]] - return otl.buildAttachList(points, font.getReverseGlyphMap()) - - -def parseCaretList(lines, font): - carets = {} - with lines.between("carets"): - for line in lines: - glyph = makeGlyph(line[0]) - assert glyph not in carets, glyph - num = int(line[1]) - thisCarets = [int(i) for i in line[2:]] - assert num == len(thisCarets), line - carets[glyph] = thisCarets - return otl.buildLigCaretList(carets, {}, font.getReverseGlyphMap()) - - -def makeMarkFilteringSets(sets, font): - self = ot.MarkGlyphSetsDef() - self.MarkSetTableFormat = 1 - self.MarkSetCount = 1 + max(sets.keys()) - self.Coverage = [None] * self.MarkSetCount - for k, v in sorted(sets.items()): - self.Coverage[k] = makeCoverage(set(v), font) - return self - - -def parseMarkFilteringSets(lines, font): - sets = {} - with lines.between("set definition"): - for line in lines: - assert len(line) == 2, line - glyph = makeGlyph(line[0]) - # TODO accept set names - st = int(line[1]) - if st not in sets: - sets[st] = [] - sets[st].append(glyph) - return makeMarkFilteringSets(sets, font) - - -def parseGDEF(lines, font): - container = ttLib.getTableClass("GDEF")() - log.debug("Parsing GDEF") - self = ot.GDEF() - fields = { - "class definition begin": ( - "GlyphClassDef", - lambda lines, font: parseClassDef(lines, font, klass=ot.GlyphClassDef), - ), - "attachment list begin": ("AttachList", parseAttachList), - "carets begin": ("LigCaretList", parseCaretList), - "mark attachment class definition begin": ( - "MarkAttachClassDef", - lambda lines, font: parseClassDef(lines, font, klass=ot.MarkAttachClassDef), - ), - "markfilter set definition begin": ("MarkGlyphSetsDef", parseMarkFilteringSets), - } - for attr, parser in fields.values(): - setattr(self, attr, None) - while lines.peek() is not None: - typ = lines.peek()[0].lower() - if typ not in fields: - log.debug("Skipping %s", typ) - next(lines) - continue - attr, parser = fields[typ] - assert getattr(self, attr) is None, attr - setattr(self, attr, parser(lines, font)) - self.Version = 0x00010000 if self.MarkGlyphSetsDef is None else 0x00010002 - container.table = self - return container - - -def parseCmap(lines, font): - container = ttLib.getTableClass("cmap")() - log.debug("Parsing cmap") - tables = [] - while lines.peek() is not None: - lines.expect("cmap subtable %d" % len(tables)) - platId, encId, fmt, lang = [ - parseCmapId(lines, field) - for field in ("platformID", "encodingID", "format", "language") - ] - table = cmap_classes[fmt](fmt) - table.platformID = platId - table.platEncID = encId - table.language = lang - table.cmap = {} - line = next(lines) - while line[0] != "end subtable": - table.cmap[int(line[0], 16)] = line[1] - line = next(lines) - tables.append(table) - container.tableVersion = 0 - container.tables = tables - return container - - -def parseCmapId(lines, field): - line = next(lines) - assert field == line[0] - return int(line[1]) - - -def parseTable(lines, font, tableTag=None): - log.debug("Parsing table") - line = lines.peeks() - tag = None - if line[0].split()[0] == "FontDame": - tag = line[0].split()[1] - elif " ".join(line[0].split()[:3]) == "Font Chef Table": - tag = line[0].split()[3] - if tag is not None: - next(lines) - tag = tag.ljust(4) - if tableTag is None: - tableTag = tag - else: - assert tableTag == tag, (tableTag, tag) - - assert ( - tableTag is not None - ), "Don't know what table to parse and data doesn't specify" - - return { - "GSUB": parseGSUB, - "GPOS": parseGPOS, - "GDEF": parseGDEF, - "cmap": parseCmap, - }[tableTag](lines, font) - - -class Tokenizer(object): - def __init__(self, f): - # TODO BytesIO / StringIO as needed? also, figure out whether we work on bytes or unicode - lines = iter(f) - try: - self.filename = f.name - except: - self.filename = None - self.lines = iter(lines) - self.line = "" - self.lineno = 0 - self.stoppers = [] - self.buffer = None - - def __iter__(self): - return self - - def _next_line(self): - self.lineno += 1 - line = self.line = next(self.lines) - line = [s.strip() for s in line.split("\t")] - if len(line) == 1 and not line[0]: - del line[0] - if line and not line[-1]: - log.warning("trailing tab found on line %d: %s" % (self.lineno, self.line)) - while line and not line[-1]: - del line[-1] - return line - - def _next_nonempty(self): - while True: - line = self._next_line() - # Skip comments and empty lines - if line and line[0] and (line[0][0] != "%" or line[0] == "% subtable"): - return line - - def _next_buffered(self): - if self.buffer: - ret = self.buffer - self.buffer = None - return ret - else: - return self._next_nonempty() - - def __next__(self): - line = self._next_buffered() - if line[0].lower() in self.stoppers: - self.buffer = line - raise StopIteration - return line - - def next(self): - return self.__next__() - - def peek(self): - if not self.buffer: - try: - self.buffer = self._next_nonempty() - except StopIteration: - return None - if self.buffer[0].lower() in self.stoppers: - return None - return self.buffer - - def peeks(self): - ret = self.peek() - return ret if ret is not None else ("",) - - @contextmanager - def between(self, tag): - start = tag + " begin" - end = tag + " end" - self.expectendswith(start) - self.stoppers.append(end) - yield - del self.stoppers[-1] - self.expect(tag + " end") - - @contextmanager - def until(self, tags): - if type(tags) is not tuple: - tags = (tags,) - self.stoppers.extend(tags) - yield - del self.stoppers[-len(tags) :] - - def expect(self, s): - line = next(self) - tag = line[0].lower() - assert tag == s, "Expected '%s', got '%s'" % (s, tag) - return line - - def expectendswith(self, s): - line = next(self) - tag = line[0].lower() - assert tag.endswith(s), "Expected '*%s', got '%s'" % (s, tag) - return line - - -def build(f, font, tableTag=None): - """Convert a Monotype font layout file to an OpenType layout object - - A font object must be passed, but this may be a "dummy" font; it is only - used for sorting glyph sets when making coverage tables and to hold the - OpenType layout table while it is being built. - - Args: - f: A file object. - font (TTFont): A font object. - tableTag (string): If provided, asserts that the file contains data for the - given OpenType table. - - Returns: - An object representing the table. (e.g. ``table_G_S_U_B_``) - """ - lines = Tokenizer(f) - return parseTable(lines, font, tableTag=tableTag) - - -def main(args=None, font=None): - """Convert a FontDame OTL file to TTX XML - - Writes XML output to stdout. - - Args: - args: Command line arguments (``--font``, ``--table``, input files). - """ - import sys - from fontTools import configLogger - from fontTools.misc.testTools import MockFont - - if args is None: - args = sys.argv[1:] - - # configure the library logger (for >= WARNING) - configLogger() - # comment this out to enable debug messages from mtiLib's logger - # log.setLevel(logging.DEBUG) - - import argparse - - parser = argparse.ArgumentParser( - "fonttools mtiLib", - description=main.__doc__, - ) - - parser.add_argument( - "--font", - "-f", - metavar="FILE", - dest="font", - help="Input TTF files (used for glyph classes and sorting coverage tables)", - ) - parser.add_argument( - "--table", - "-t", - metavar="TABLE", - dest="tableTag", - help="Table to fill (sniffed from input file if not provided)", - ) - parser.add_argument( - "inputs", metavar="FILE", type=str, nargs="+", help="Input FontDame .txt files" - ) - - args = parser.parse_args(args) - - if font is None: - if args.font: - font = ttLib.TTFont(args.font) - else: - font = MockFont() - - for f in args.inputs: - log.debug("Processing %s", f) - with open(f, "rt", encoding="utf-8") as f: - table = build(f, font, tableTag=args.tableTag) - blob = table.compile(font) # Make sure it compiles - decompiled = table.__class__() - decompiled.decompile(blob, font) # Make sure it decompiles! - - # continue - from fontTools.misc import xmlWriter - - tag = table.tableTag - writer = xmlWriter.XMLWriter(sys.stdout) - writer.begintag(tag) - writer.newline() - # table.toXML(writer, font) - decompiled.toXML(writer, font) - writer.endtag(tag) - writer.newline() - - -if __name__ == "__main__": - import sys - - sys.exit(main()) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-278f6d4c.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-278f6d4c.js deleted file mode 100644 index 012f57730af4db4198d0127c23a88307c9369de2..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-278f6d4c.js +++ /dev/null @@ -1,2 +0,0 @@ -import{a as T}from"./Tabs-0ade7d4b.js";import j from"./Index-ab6a99fa.js";import"./Index-37584f50.js";import"./index-0526d562.js";import"./svelte/svelte.js";const{SvelteComponent:y,attr:r,component_subscribe:h,create_component:A,create_slot:B,destroy_component:D,detach:E,element:M,get_all_dirty_from_scope:z,get_slot_changes:F,init:G,insert:H,mount_component:J,safe_not_equal:K,set_style:v,transition_in:w,transition_out:C,update_slot_base:L}=window.__gradio__svelte__internal,{getContext:N,onMount:O,createEventDispatcher:P,tick:Q}=window.__gradio__svelte__internal;function R(_){let e;const l=_[8].default,t=B(l,_,_[9],null);return{c(){t&&t.c()},m(n,s){t&&t.m(n,s),e=!0},p(n,s){t&&t.p&&(!e||s&512)&&L(t,l,n,n[9],e?F(l,n[9],s,null):z(n[9]),null)},i(n){e||(w(t,n),e=!0)},o(n){C(t,n),e=!1},d(n){t&&t.d(n)}}}function U(_){let e,l,t,n;return l=new j({props:{$$slots:{default:[R]},$$scope:{ctx:_}}}),{c(){e=M("div"),A(l.$$.fragment),r(e,"id",_[0]),r(e,"class",t="tabitem "+_[1].join(" ")+" svelte-19hvt5v"),v(e,"display",_[3]===_[2]?"block":"none")},m(s,a){H(s,e,a),J(l,e,null),n=!0},p(s,[a]){const c={};a&512&&(c.$$scope={dirty:a,ctx:s}),l.$set(c),(!n||a&1)&&r(e,"id",s[0]),(!n||a&2&&t!==(t="tabitem "+s[1].join(" ")+" svelte-19hvt5v"))&&r(e,"class",t),a&12&&v(e,"display",s[3]===s[2]?"block":"none")},i(s){n||(w(l.$$.fragment,s),n=!0)},o(s){C(l.$$.fragment,s),n=!1},d(s){s&&E(e),D(l)}}}function V(_,e,l){let t,n,{$$slots:s={},$$scope:a}=e,{elem_id:c=""}=e,{elem_classes:f=[]}=e,{name:m}=e,{id:u={}}=e;const i=P(),{register_tab:k,unregister_tab:q,selected_tab:d,selected_tab_index:g}=N(T);h(_,d,o=>l(3,n=o)),h(_,g,o=>l(7,t=o));let b=k({name:m,id:u,elem_id:c});return O(()=>()=>q({name:m,id:u,elem_id:c})),_.$$set=o=>{"elem_id"in o&&l(0,c=o.elem_id),"elem_classes"in o&&l(1,f=o.elem_classes),"name"in o&&l(6,m=o.name),"id"in o&&l(2,u=o.id),"$$scope"in o&&l(9,a=o.$$scope)},_.$$.update=()=>{_.$$.dirty&192&&t===b&&Q().then(()=>i("select",{value:m,index:b}))},[c,f,u,n,d,g,m,t,s,a]}class W extends y{constructor(e){super(),G(this,e,V,U,K,{elem_id:0,elem_classes:1,name:6,id:2})}}const{SvelteComponent:X,create_component:Y,create_slot:Z,destroy_component:p,get_all_dirty_from_scope:x,get_slot_changes:$,init:ee,mount_component:te,safe_not_equal:ne,transition_in:I,transition_out:S,update_slot_base:le}=window.__gradio__svelte__internal;function se(_){let e;const l=_[5].default,t=Z(l,_,_[7],null);return{c(){t&&t.c()},m(n,s){t&&t.m(n,s),e=!0},p(n,s){t&&t.p&&(!e||s&128)&&le(t,l,n,n[7],e?$(l,n[7],s,null):x(n[7]),null)},i(n){e||(I(t,n),e=!0)},o(n){S(t,n),e=!1},d(n){t&&t.d(n)}}}function _e(_){let e,l;return e=new W({props:{elem_id:_[0],elem_classes:_[1],name:_[2],id:_[3],$$slots:{default:[se]},$$scope:{ctx:_}}}),e.$on("select",_[6]),{c(){Y(e.$$.fragment)},m(t,n){te(e,t,n),l=!0},p(t,[n]){const s={};n&1&&(s.elem_id=t[0]),n&2&&(s.elem_classes=t[1]),n&4&&(s.name=t[2]),n&8&&(s.id=t[3]),n&128&&(s.$$scope={dirty:n,ctx:t}),e.$set(s)},i(t){l||(I(e.$$.fragment,t),l=!0)},o(t){S(e.$$.fragment,t),l=!1},d(t){p(e,t)}}}function ie(_,e,l){let{$$slots:t={},$$scope:n}=e,{elem_id:s=""}=e,{elem_classes:a=[]}=e,{label:c}=e,{id:f}=e,{gradio:m}=e;const u=({detail:i})=>m.dispatch("select",i);return _.$$set=i=>{"elem_id"in i&&l(0,s=i.elem_id),"elem_classes"in i&&l(1,a=i.elem_classes),"label"in i&&l(2,c=i.label),"id"in i&&l(3,f=i.id),"gradio"in i&&l(4,m=i.gradio),"$$scope"in i&&l(7,n=i.$$scope)},[s,a,c,f,m,t,u,n]}class fe extends X{constructor(e){super(),ee(this,e,ie,_e,ne,{elem_id:0,elem_classes:1,label:2,id:3,gradio:4})}}export{fe as default}; -//# sourceMappingURL=Index-278f6d4c.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/rules_block/paragraph.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/rules_block/paragraph.py deleted file mode 100644 index 5388a4b1468defccb5ed4d7d68f3e5ac1a747178..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/rules_block/paragraph.py +++ /dev/null @@ -1,65 +0,0 @@ -"""Paragraph.""" -import logging - -from .state_block import StateBlock - -LOGGER = logging.getLogger(__name__) - - -def paragraph(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: - LOGGER.debug( - "entering paragraph: %s, %s, %s, %s", state, startLine, endLine, silent - ) - - nextLine = startLine + 1 - ruler = state.md.block.ruler - terminatorRules = ruler.getRules("paragraph") - endLine = state.lineMax - - oldParentType = state.parentType - state.parentType = "paragraph" - - # jump line-by-line until empty one or EOF - while nextLine < endLine: - if state.isEmpty(nextLine): - break - # this would be a code block normally, but after paragraph - # it's considered a lazy continuation regardless of what's there - if state.sCount[nextLine] - state.blkIndent > 3: - nextLine += 1 - continue - - # quirk for blockquotes, this line should already be checked by that rule - if state.sCount[nextLine] < 0: - nextLine += 1 - continue - - # Some tags can terminate paragraph without empty line. - terminate = False - for terminatorRule in terminatorRules: - if terminatorRule(state, nextLine, endLine, True): - terminate = True - break - - if terminate: - break - - nextLine += 1 - - content = state.getLines(startLine, nextLine, state.blkIndent, False).strip() - - state.line = nextLine - - token = state.push("paragraph_open", "p", 1) - token.map = [startLine, state.line] - - token = state.push("inline", "", 0) - token.content = content - token.map = [startLine, state.line] - token.children = [] - - token = state.push("paragraph_close", "p", -1) - - state.parentType = oldParentType - - return True diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_rcparams.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_rcparams.py deleted file mode 100644 index 515068c462d4086efee80ea345033000081c6cc7..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_rcparams.py +++ /dev/null @@ -1,630 +0,0 @@ -import copy -import os -from pathlib import Path -import subprocess -import sys -from unittest import mock - -from cycler import cycler, Cycler -import pytest - -import matplotlib as mpl -from matplotlib import _api, _c_internal_utils -import matplotlib.pyplot as plt -import matplotlib.colors as mcolors -import numpy as np -from matplotlib.rcsetup import ( - validate_bool, - validate_color, - validate_colorlist, - _validate_color_or_linecolor, - validate_cycler, - validate_float, - validate_fontstretch, - validate_fontweight, - validate_hatch, - validate_hist_bins, - validate_int, - validate_markevery, - validate_stringlist, - _validate_linestyle, - _listify_validator) - - -def test_rcparams(tmpdir): - mpl.rc('text', usetex=False) - mpl.rc('lines', linewidth=22) - - usetex = mpl.rcParams['text.usetex'] - linewidth = mpl.rcParams['lines.linewidth'] - - rcpath = Path(tmpdir) / 'test_rcparams.rc' - rcpath.write_text('lines.linewidth: 33', encoding='utf-8') - - # test context given dictionary - with mpl.rc_context(rc={'text.usetex': not usetex}): - assert mpl.rcParams['text.usetex'] == (not usetex) - assert mpl.rcParams['text.usetex'] == usetex - - # test context given filename (mpl.rc sets linewidth to 33) - with mpl.rc_context(fname=rcpath): - assert mpl.rcParams['lines.linewidth'] == 33 - assert mpl.rcParams['lines.linewidth'] == linewidth - - # test context given filename and dictionary - with mpl.rc_context(fname=rcpath, rc={'lines.linewidth': 44}): - assert mpl.rcParams['lines.linewidth'] == 44 - assert mpl.rcParams['lines.linewidth'] == linewidth - - # test context as decorator (and test reusability, by calling func twice) - @mpl.rc_context({'lines.linewidth': 44}) - def func(): - assert mpl.rcParams['lines.linewidth'] == 44 - - func() - func() - - # test rc_file - mpl.rc_file(rcpath) - assert mpl.rcParams['lines.linewidth'] == 33 - - -def test_RcParams_class(): - rc = mpl.RcParams({'font.cursive': ['Apple Chancery', - 'Textile', - 'Zapf Chancery', - 'cursive'], - 'font.family': 'sans-serif', - 'font.weight': 'normal', - 'font.size': 12}) - - expected_repr = """ -RcParams({'font.cursive': ['Apple Chancery', - 'Textile', - 'Zapf Chancery', - 'cursive'], - 'font.family': ['sans-serif'], - 'font.size': 12.0, - 'font.weight': 'normal'})""".lstrip() - - assert expected_repr == repr(rc) - - expected_str = """ -font.cursive: ['Apple Chancery', 'Textile', 'Zapf Chancery', 'cursive'] -font.family: ['sans-serif'] -font.size: 12.0 -font.weight: normal""".lstrip() - - assert expected_str == str(rc) - - # test the find_all functionality - assert ['font.cursive', 'font.size'] == sorted(rc.find_all('i[vz]')) - assert ['font.family'] == list(rc.find_all('family')) - - -def test_rcparams_update(): - rc = mpl.RcParams({'figure.figsize': (3.5, 42)}) - bad_dict = {'figure.figsize': (3.5, 42, 1)} - # make sure validation happens on input - with pytest.raises(ValueError), \ - pytest.warns(UserWarning, match="validate"): - rc.update(bad_dict) - - -def test_rcparams_init(): - with pytest.raises(ValueError), \ - pytest.warns(UserWarning, match="validate"): - mpl.RcParams({'figure.figsize': (3.5, 42, 1)}) - - -def test_nargs_cycler(): - from matplotlib.rcsetup import cycler as ccl - with pytest.raises(TypeError, match='3 were given'): - # cycler() takes 0-2 arguments. - ccl(ccl(color=list('rgb')), 2, 3) - - -def test_Bug_2543(): - # Test that it possible to add all values to itself / deepcopy - # https://github.com/matplotlib/matplotlib/issues/2543 - # We filter warnings at this stage since a number of them are raised - # for deprecated rcparams as they should. We don't want these in the - # printed in the test suite. - with _api.suppress_matplotlib_deprecation_warning(): - with mpl.rc_context(): - _copy = mpl.rcParams.copy() - for key in _copy: - mpl.rcParams[key] = _copy[key] - with mpl.rc_context(): - copy.deepcopy(mpl.rcParams) - with pytest.raises(ValueError): - validate_bool(None) - with pytest.raises(ValueError): - with mpl.rc_context(): - mpl.rcParams['svg.fonttype'] = True - - -legend_color_tests = [ - ('face', {'color': 'r'}, mcolors.to_rgba('r')), - ('face', {'color': 'inherit', 'axes.facecolor': 'r'}, - mcolors.to_rgba('r')), - ('face', {'color': 'g', 'axes.facecolor': 'r'}, mcolors.to_rgba('g')), - ('edge', {'color': 'r'}, mcolors.to_rgba('r')), - ('edge', {'color': 'inherit', 'axes.edgecolor': 'r'}, - mcolors.to_rgba('r')), - ('edge', {'color': 'g', 'axes.facecolor': 'r'}, mcolors.to_rgba('g')) -] -legend_color_test_ids = [ - 'same facecolor', - 'inherited facecolor', - 'different facecolor', - 'same edgecolor', - 'inherited edgecolor', - 'different facecolor', -] - - -@pytest.mark.parametrize('color_type, param_dict, target', legend_color_tests, - ids=legend_color_test_ids) -def test_legend_colors(color_type, param_dict, target): - param_dict[f'legend.{color_type}color'] = param_dict.pop('color') - get_func = f'get_{color_type}color' - - with mpl.rc_context(param_dict): - _, ax = plt.subplots() - ax.plot(range(3), label='test') - leg = ax.legend() - assert getattr(leg.legendPatch, get_func)() == target - - -def test_mfc_rcparams(): - mpl.rcParams['lines.markerfacecolor'] = 'r' - ln = mpl.lines.Line2D([1, 2], [1, 2]) - assert ln.get_markerfacecolor() == 'r' - - -def test_mec_rcparams(): - mpl.rcParams['lines.markeredgecolor'] = 'r' - ln = mpl.lines.Line2D([1, 2], [1, 2]) - assert ln.get_markeredgecolor() == 'r' - - -def test_axes_titlecolor_rcparams(): - mpl.rcParams['axes.titlecolor'] = 'r' - _, ax = plt.subplots() - title = ax.set_title("Title") - assert title.get_color() == 'r' - - -def test_Issue_1713(tmpdir): - rcpath = Path(tmpdir) / 'test_rcparams.rc' - rcpath.write_text('timezone: UTC', encoding='utf-8') - with mock.patch('locale.getpreferredencoding', return_value='UTF-32-BE'): - rc = mpl.rc_params_from_file(rcpath, True, False) - assert rc.get('timezone') == 'UTC' - - -def test_animation_frame_formats(): - # Animation frame_format should allow any of the following - # if any of these are not allowed, an exception will be raised - # test for gh issue #17908 - for fmt in ['png', 'jpeg', 'tiff', 'raw', 'rgba', 'ppm', - 'sgi', 'bmp', 'pbm', 'svg']: - mpl.rcParams['animation.frame_format'] = fmt - - -def generate_validator_testcases(valid): - validation_tests = ( - {'validator': validate_bool, - 'success': (*((_, True) for _ in - ('t', 'y', 'yes', 'on', 'true', '1', 1, True)), - *((_, False) for _ in - ('f', 'n', 'no', 'off', 'false', '0', 0, False))), - 'fail': ((_, ValueError) - for _ in ('aardvark', 2, -1, [], )) - }, - {'validator': validate_stringlist, - 'success': (('', []), - ('a,b', ['a', 'b']), - ('aardvark', ['aardvark']), - ('aardvark, ', ['aardvark']), - ('aardvark, ,', ['aardvark']), - (['a', 'b'], ['a', 'b']), - (('a', 'b'), ['a', 'b']), - (iter(['a', 'b']), ['a', 'b']), - (np.array(['a', 'b']), ['a', 'b']), - ), - 'fail': ((set(), ValueError), - (1, ValueError), - ) - }, - {'validator': _listify_validator(validate_int, n=2), - 'success': ((_, [1, 2]) - for _ in ('1, 2', [1.5, 2.5], [1, 2], - (1, 2), np.array((1, 2)))), - 'fail': ((_, ValueError) - for _ in ('aardvark', ('a', 1), - (1, 2, 3) - )) - }, - {'validator': _listify_validator(validate_float, n=2), - 'success': ((_, [1.5, 2.5]) - for _ in ('1.5, 2.5', [1.5, 2.5], [1.5, 2.5], - (1.5, 2.5), np.array((1.5, 2.5)))), - 'fail': ((_, ValueError) - for _ in ('aardvark', ('a', 1), (1, 2, 3), (None, ), None)) - }, - {'validator': validate_cycler, - 'success': (('cycler("color", "rgb")', - cycler("color", 'rgb')), - (cycler('linestyle', ['-', '--']), - cycler('linestyle', ['-', '--'])), - ("""(cycler("color", ["r", "g", "b"]) + - cycler("mew", [2, 3, 5]))""", - (cycler("color", 'rgb') + - cycler("markeredgewidth", [2, 3, 5]))), - ("cycler(c='rgb', lw=[1, 2, 3])", - cycler('color', 'rgb') + cycler('linewidth', [1, 2, 3])), - ("cycler('c', 'rgb') * cycler('linestyle', ['-', '--'])", - (cycler('color', 'rgb') * - cycler('linestyle', ['-', '--']))), - (cycler('ls', ['-', '--']), - cycler('linestyle', ['-', '--'])), - (cycler(mew=[2, 5]), - cycler('markeredgewidth', [2, 5])), - ), - # This is *so* incredibly important: validate_cycler() eval's - # an arbitrary string! I think I have it locked down enough, - # and that is what this is testing. - # TODO: Note that these tests are actually insufficient, as it may - # be that they raised errors, but still did an action prior to - # raising the exception. We should devise some additional tests - # for that... - 'fail': ((4, ValueError), # Gotta be a string or Cycler object - ('cycler("bleh, [])', ValueError), # syntax error - ('Cycler("linewidth", [1, 2, 3])', - ValueError), # only 'cycler()' function is allowed - # do not allow dunder in string literals - ("cycler('c', [j.__class__(j) for j in ['r', 'b']])", - ValueError), - ("cycler('c', [j. __class__(j) for j in ['r', 'b']])", - ValueError), - ("cycler('c', [j.\t__class__(j) for j in ['r', 'b']])", - ValueError), - ("cycler('c', [j.\u000c__class__(j) for j in ['r', 'b']])", - ValueError), - ("cycler('c', [j.__class__(j).lower() for j in ['r', 'b']])", - ValueError), - ('1 + 2', ValueError), # doesn't produce a Cycler object - ('os.system("echo Gotcha")', ValueError), # os not available - ('import os', ValueError), # should not be able to import - ('def badjuju(a): return a; badjuju(cycler("color", "rgb"))', - ValueError), # Should not be able to define anything - # even if it does return a cycler - ('cycler("waka", [1, 2, 3])', ValueError), # not a property - ('cycler(c=[1, 2, 3])', ValueError), # invalid values - ("cycler(lw=['a', 'b', 'c'])", ValueError), # invalid values - (cycler('waka', [1, 3, 5]), ValueError), # not a property - (cycler('color', ['C1', 'r', 'g']), ValueError) # no CN - ) - }, - {'validator': validate_hatch, - 'success': (('--|', '--|'), ('\\oO', '\\oO'), - ('/+*/.x', '/+*/.x'), ('', '')), - 'fail': (('--_', ValueError), - (8, ValueError), - ('X', ValueError)), - }, - {'validator': validate_colorlist, - 'success': (('r,g,b', ['r', 'g', 'b']), - (['r', 'g', 'b'], ['r', 'g', 'b']), - ('r, ,', ['r']), - (['', 'g', 'blue'], ['g', 'blue']), - ([np.array([1, 0, 0]), np.array([0, 1, 0])], - np.array([[1, 0, 0], [0, 1, 0]])), - (np.array([[1, 0, 0], [0, 1, 0]]), - np.array([[1, 0, 0], [0, 1, 0]])), - ), - 'fail': (('fish', ValueError), - ), - }, - {'validator': validate_color, - 'success': (('None', 'none'), - ('none', 'none'), - ('AABBCC', '#AABBCC'), # RGB hex code - ('AABBCC00', '#AABBCC00'), # RGBA hex code - ('tab:blue', 'tab:blue'), # named color - ('C12', 'C12'), # color from cycle - ('(0, 1, 0)', (0.0, 1.0, 0.0)), # RGB tuple - ((0, 1, 0), (0, 1, 0)), # non-string version - ('(0, 1, 0, 1)', (0.0, 1.0, 0.0, 1.0)), # RGBA tuple - ((0, 1, 0, 1), (0, 1, 0, 1)), # non-string version - ), - 'fail': (('tab:veryblue', ValueError), # invalid name - ('(0, 1)', ValueError), # tuple with length < 3 - ('(0, 1, 0, 1, 0)', ValueError), # tuple with length > 4 - ('(0, 1, none)', ValueError), # cannot cast none to float - ('(0, 1, "0.5")', ValueError), # last one not a float - ), - }, - {'validator': _validate_color_or_linecolor, - 'success': (('linecolor', 'linecolor'), - ('markerfacecolor', 'markerfacecolor'), - ('mfc', 'markerfacecolor'), - ('markeredgecolor', 'markeredgecolor'), - ('mec', 'markeredgecolor') - ), - 'fail': (('line', ValueError), - ('marker', ValueError) - ) - }, - {'validator': validate_hist_bins, - 'success': (('auto', 'auto'), - ('fd', 'fd'), - ('10', 10), - ('1, 2, 3', [1, 2, 3]), - ([1, 2, 3], [1, 2, 3]), - (np.arange(15), np.arange(15)) - ), - 'fail': (('aardvark', ValueError), - ) - }, - {'validator': validate_markevery, - 'success': ((None, None), - (1, 1), - (0.1, 0.1), - ((1, 1), (1, 1)), - ((0.1, 0.1), (0.1, 0.1)), - ([1, 2, 3], [1, 2, 3]), - (slice(2), slice(None, 2, None)), - (slice(1, 2, 3), slice(1, 2, 3)) - ), - 'fail': (((1, 2, 3), TypeError), - ([1, 2, 0.3], TypeError), - (['a', 2, 3], TypeError), - ([1, 2, 'a'], TypeError), - ((0.1, 0.2, 0.3), TypeError), - ((0.1, 2, 3), TypeError), - ((1, 0.2, 0.3), TypeError), - ((1, 0.1), TypeError), - ((0.1, 1), TypeError), - (('abc'), TypeError), - ((1, 'a'), TypeError), - ((0.1, 'b'), TypeError), - (('a', 1), TypeError), - (('a', 0.1), TypeError), - ('abc', TypeError), - ('a', TypeError), - (object(), TypeError) - ) - }, - {'validator': _validate_linestyle, - 'success': (('-', '-'), ('solid', 'solid'), - ('--', '--'), ('dashed', 'dashed'), - ('-.', '-.'), ('dashdot', 'dashdot'), - (':', ':'), ('dotted', 'dotted'), - ('', ''), (' ', ' '), - ('None', 'none'), ('none', 'none'), - ('DoTtEd', 'dotted'), # case-insensitive - ('1, 3', (0, (1, 3))), - ([1.23, 456], (0, [1.23, 456.0])), - ([1, 2, 3, 4], (0, [1.0, 2.0, 3.0, 4.0])), - ((0, [1, 2]), (0, [1, 2])), - ((-1, [1, 2]), (-1, [1, 2])), - ), - 'fail': (('aardvark', ValueError), # not a valid string - (b'dotted', ValueError), - ('dotted'.encode('utf-16'), ValueError), - ([1, 2, 3], ValueError), # sequence with odd length - (1.23, ValueError), # not a sequence - (("a", [1, 2]), ValueError), # wrong explicit offset - ((None, [1, 2]), ValueError), # wrong explicit offset - ((1, [1, 2, 3]), ValueError), # odd length sequence - (([1, 2], 1), ValueError), # inverted offset/onoff - ) - }, - ) - - for validator_dict in validation_tests: - validator = validator_dict['validator'] - if valid: - for arg, target in validator_dict['success']: - yield validator, arg, target - else: - for arg, error_type in validator_dict['fail']: - yield validator, arg, error_type - - -@pytest.mark.parametrize('validator, arg, target', - generate_validator_testcases(True)) -def test_validator_valid(validator, arg, target): - res = validator(arg) - if isinstance(target, np.ndarray): - np.testing.assert_equal(res, target) - elif not isinstance(target, Cycler): - assert res == target - else: - # Cyclers can't simply be asserted equal. They don't implement __eq__ - assert list(res) == list(target) - - -@pytest.mark.parametrize('validator, arg, exception_type', - generate_validator_testcases(False)) -def test_validator_invalid(validator, arg, exception_type): - with pytest.raises(exception_type): - validator(arg) - - -@pytest.mark.parametrize('weight, parsed_weight', [ - ('bold', 'bold'), - ('BOLD', ValueError), # weight is case-sensitive - (100, 100), - ('100', 100), - (np.array(100), 100), - # fractional fontweights are not defined. This should actually raise a - # ValueError, but historically did not. - (20.6, 20), - ('20.6', ValueError), - ([100], ValueError), -]) -def test_validate_fontweight(weight, parsed_weight): - if parsed_weight is ValueError: - with pytest.raises(ValueError): - validate_fontweight(weight) - else: - assert validate_fontweight(weight) == parsed_weight - - -@pytest.mark.parametrize('stretch, parsed_stretch', [ - ('expanded', 'expanded'), - ('EXPANDED', ValueError), # stretch is case-sensitive - (100, 100), - ('100', 100), - (np.array(100), 100), - # fractional fontweights are not defined. This should actually raise a - # ValueError, but historically did not. - (20.6, 20), - ('20.6', ValueError), - ([100], ValueError), -]) -def test_validate_fontstretch(stretch, parsed_stretch): - if parsed_stretch is ValueError: - with pytest.raises(ValueError): - validate_fontstretch(stretch) - else: - assert validate_fontstretch(stretch) == parsed_stretch - - -def test_keymaps(): - key_list = [k for k in mpl.rcParams if 'keymap' in k] - for k in key_list: - assert isinstance(mpl.rcParams[k], list) - - -def test_no_backend_reset_rccontext(): - assert mpl.rcParams['backend'] != 'module://aardvark' - with mpl.rc_context(): - mpl.rcParams['backend'] = 'module://aardvark' - assert mpl.rcParams['backend'] == 'module://aardvark' - - -def test_rcparams_reset_after_fail(): - # There was previously a bug that meant that if rc_context failed and - # raised an exception due to issues in the supplied rc parameters, the - # global rc parameters were left in a modified state. - with mpl.rc_context(rc={'text.usetex': False}): - assert mpl.rcParams['text.usetex'] is False - with pytest.raises(KeyError): - with mpl.rc_context(rc={'text.usetex': True, 'test.blah': True}): - pass - assert mpl.rcParams['text.usetex'] is False - - -@pytest.mark.skipif(sys.platform != "linux", reason="Linux only") -def test_backend_fallback_headless(tmpdir): - env = {**os.environ, - "DISPLAY": "", "WAYLAND_DISPLAY": "", - "MPLBACKEND": "", "MPLCONFIGDIR": str(tmpdir)} - with pytest.raises(subprocess.CalledProcessError): - subprocess.run( - [sys.executable, "-c", - "import matplotlib;" - "matplotlib.use('tkagg');" - "import matplotlib.pyplot;" - "matplotlib.pyplot.plot(42);" - ], - env=env, check=True, stderr=subprocess.DEVNULL) - - -@pytest.mark.skipif( - sys.platform == "linux" and not _c_internal_utils.display_is_valid(), - reason="headless") -def test_backend_fallback_headful(tmpdir): - pytest.importorskip("tkinter") - env = {**os.environ, "MPLBACKEND": "", "MPLCONFIGDIR": str(tmpdir)} - backend = subprocess.check_output( - [sys.executable, "-c", - "import matplotlib as mpl; " - "sentinel = mpl.rcsetup._auto_backend_sentinel; " - # Check that access on another instance does not resolve the sentinel. - "assert mpl.RcParams({'backend': sentinel})['backend'] == sentinel; " - "assert mpl.rcParams._get('backend') == sentinel; " - "import matplotlib.pyplot; " - "print(matplotlib.get_backend())"], - env=env, text=True) - # The actual backend will depend on what's installed, but at least tkagg is - # present. - assert backend.strip().lower() != "agg" - - -def test_deprecation(monkeypatch): - monkeypatch.setitem( - mpl._deprecated_map, "patch.linewidth", - ("0.0", "axes.linewidth", lambda old: 2 * old, lambda new: new / 2)) - with pytest.warns(mpl.MatplotlibDeprecationWarning): - assert mpl.rcParams["patch.linewidth"] \ - == mpl.rcParams["axes.linewidth"] / 2 - with pytest.warns(mpl.MatplotlibDeprecationWarning): - mpl.rcParams["patch.linewidth"] = 1 - assert mpl.rcParams["axes.linewidth"] == 2 - - monkeypatch.setitem( - mpl._deprecated_ignore_map, "patch.edgecolor", - ("0.0", "axes.edgecolor")) - with pytest.warns(mpl.MatplotlibDeprecationWarning): - assert mpl.rcParams["patch.edgecolor"] \ - == mpl.rcParams["axes.edgecolor"] - with pytest.warns(mpl.MatplotlibDeprecationWarning): - mpl.rcParams["patch.edgecolor"] = "#abcd" - assert mpl.rcParams["axes.edgecolor"] != "#abcd" - - monkeypatch.setitem( - mpl._deprecated_ignore_map, "patch.force_edgecolor", - ("0.0", None)) - with pytest.warns(mpl.MatplotlibDeprecationWarning): - assert mpl.rcParams["patch.force_edgecolor"] is None - - monkeypatch.setitem( - mpl._deprecated_remain_as_none, "svg.hashsalt", - ("0.0",)) - with pytest.warns(mpl.MatplotlibDeprecationWarning): - mpl.rcParams["svg.hashsalt"] = "foobar" - assert mpl.rcParams["svg.hashsalt"] == "foobar" # Doesn't warn. - mpl.rcParams["svg.hashsalt"] = None # Doesn't warn. - - mpl.rcParams.update(mpl.rcParams.copy()) # Doesn't warn. - # Note that the warning suppression actually arises from the - # iteration over the updater rcParams being protected by - # suppress_matplotlib_deprecation_warning, rather than any explicit check. - - -@pytest.mark.parametrize("value", [ - "best", - 1, - "1", - (0.9, .7), - (-0.9, .7), - "(0.9, .7)" -]) -def test_rcparams_legend_loc(value): - # rcParams['legend.loc'] should allow any of the following formats. - # if any of these are not allowed, an exception will be raised - # test for gh issue #22338 - mpl.rcParams["legend.loc"] = value - - -@pytest.mark.parametrize("value", [ - "best", - 1, - (0.9, .7), - (-0.9, .7), -]) -def test_rcparams_legend_loc_from_file(tmpdir, value): - # rcParams['legend.loc'] should be settable from matplotlibrc. - # if any of these are not allowed, an exception will be raised. - # test for gh issue #22338 - rc_path = tmpdir.join("matplotlibrc") - rc_path.write(f"legend.loc: {value}") - - with mpl.rc_context(fname=rc_path): - assert mpl.rcParams["legend.loc"] == value diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/random/tests/test_seed_sequence.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/random/tests/test_seed_sequence.py deleted file mode 100644 index f08cf80faafa2fc1a369eaf7dd4d6fcccd5e9158..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/random/tests/test_seed_sequence.py +++ /dev/null @@ -1,80 +0,0 @@ -import numpy as np -from numpy.testing import assert_array_equal, assert_array_compare - -from numpy.random import SeedSequence - - -def test_reference_data(): - """ Check that SeedSequence generates data the same as the C++ reference. - - https://gist.github.com/imneme/540829265469e673d045 - """ - inputs = [ - [3735928559, 195939070, 229505742, 305419896], - [3668361503, 4165561550, 1661411377, 3634257570], - [164546577, 4166754639, 1765190214, 1303880213], - [446610472, 3941463886, 522937693, 1882353782], - [1864922766, 1719732118, 3882010307, 1776744564], - [4141682960, 3310988675, 553637289, 902896340], - [1134851934, 2352871630, 3699409824, 2648159817], - [1240956131, 3107113773, 1283198141, 1924506131], - [2669565031, 579818610, 3042504477, 2774880435], - [2766103236, 2883057919, 4029656435, 862374500], - ] - outputs = [ - [3914649087, 576849849, 3593928901, 2229911004], - [2240804226, 3691353228, 1365957195, 2654016646], - [3562296087, 3191708229, 1147942216, 3726991905], - [1403443605, 3591372999, 1291086759, 441919183], - [1086200464, 2191331643, 560336446, 3658716651], - [3249937430, 2346751812, 847844327, 2996632307], - [2584285912, 4034195531, 3523502488, 169742686], - [959045797, 3875435559, 1886309314, 359682705], - [3978441347, 432478529, 3223635119, 138903045], - [296367413, 4262059219, 13109864, 3283683422], - ] - outputs64 = [ - [2477551240072187391, 9577394838764454085], - [15854241394484835714, 11398914698975566411], - [13708282465491374871, 16007308345579681096], - [15424829579845884309, 1898028439751125927], - [9411697742461147792, 15714068361935982142], - [10079222287618677782, 12870437757549876199], - [17326737873898640088, 729039288628699544], - [16644868984619524261, 1544825456798124994], - [1857481142255628931, 596584038813451439], - [18305404959516669237, 14103312907920476776], - ] - for seed, expected, expected64 in zip(inputs, outputs, outputs64): - expected = np.array(expected, dtype=np.uint32) - ss = SeedSequence(seed) - state = ss.generate_state(len(expected)) - assert_array_equal(state, expected) - state64 = ss.generate_state(len(expected64), dtype=np.uint64) - assert_array_equal(state64, expected64) - - -def test_zero_padding(): - """ Ensure that the implicit zero-padding does not cause problems. - """ - # Ensure that large integers are inserted in little-endian fashion to avoid - # trailing 0s. - ss0 = SeedSequence(42) - ss1 = SeedSequence(42 << 32) - assert_array_compare( - np.not_equal, - ss0.generate_state(4), - ss1.generate_state(4)) - - # Ensure backwards compatibility with the original 0.17 release for small - # integers and no spawn key. - expected42 = np.array([3444837047, 2669555309, 2046530742, 3581440988], - dtype=np.uint32) - assert_array_equal(SeedSequence(42).generate_state(4), expected42) - - # Regression test for gh-16539 to ensure that the implicit 0s don't - # conflict with spawn keys. - assert_array_compare( - np.not_equal, - SeedSequence(42, spawn_key=(0,)).generate_state(4), - expected42) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/array_algos/take.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/array_algos/take.py deleted file mode 100644 index 8ea70e2694d92f00b7604fac3a742d38c9b41cff..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/core/array_algos/take.py +++ /dev/null @@ -1,595 +0,0 @@ -from __future__ import annotations - -import functools -from typing import ( - TYPE_CHECKING, - cast, - overload, -) - -import numpy as np - -from pandas._libs import ( - algos as libalgos, - lib, -) - -from pandas.core.dtypes.cast import maybe_promote -from pandas.core.dtypes.common import ( - ensure_platform_int, - is_1d_only_ea_dtype, -) -from pandas.core.dtypes.missing import na_value_for_dtype - -from pandas.core.construction import ensure_wrapped_if_datetimelike - -if TYPE_CHECKING: - from pandas._typing import ( - ArrayLike, - AxisInt, - npt, - ) - - from pandas.core.arrays._mixins import NDArrayBackedExtensionArray - from pandas.core.arrays.base import ExtensionArray - - -@overload -def take_nd( - arr: np.ndarray, - indexer, - axis: AxisInt = ..., - fill_value=..., - allow_fill: bool = ..., -) -> np.ndarray: - ... - - -@overload -def take_nd( - arr: ExtensionArray, - indexer, - axis: AxisInt = ..., - fill_value=..., - allow_fill: bool = ..., -) -> ArrayLike: - ... - - -def take_nd( - arr: ArrayLike, - indexer, - axis: AxisInt = 0, - fill_value=lib.no_default, - allow_fill: bool = True, -) -> ArrayLike: - """ - Specialized Cython take which sets NaN values in one pass - - This dispatches to ``take`` defined on ExtensionArrays. It does not - currently dispatch to ``SparseArray.take`` for sparse ``arr``. - - Note: this function assumes that the indexer is a valid(ated) indexer with - no out of bound indices. - - Parameters - ---------- - arr : np.ndarray or ExtensionArray - Input array. - indexer : ndarray - 1-D array of indices to take, subarrays corresponding to -1 value - indices are filed with fill_value - axis : int, default 0 - Axis to take from - fill_value : any, default np.nan - Fill value to replace -1 values with - allow_fill : bool, default True - If False, indexer is assumed to contain no -1 values so no filling - will be done. This short-circuits computation of a mask. Result is - undefined if allow_fill == False and -1 is present in indexer. - - Returns - ------- - subarray : np.ndarray or ExtensionArray - May be the same type as the input, or cast to an ndarray. - """ - if fill_value is lib.no_default: - fill_value = na_value_for_dtype(arr.dtype, compat=False) - elif lib.is_np_dtype(arr.dtype, "mM"): - dtype, fill_value = maybe_promote(arr.dtype, fill_value) - if arr.dtype != dtype: - # EA.take is strict about returning a new object of the same type - # so for that case cast upfront - arr = arr.astype(dtype) - - if not isinstance(arr, np.ndarray): - # i.e. ExtensionArray, - # includes for EA to catch DatetimeArray, TimedeltaArray - if not is_1d_only_ea_dtype(arr.dtype): - # i.e. DatetimeArray, TimedeltaArray - arr = cast("NDArrayBackedExtensionArray", arr) - return arr.take( - indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis - ) - - return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) - - arr = np.asarray(arr) - return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill) - - -def _take_nd_ndarray( - arr: np.ndarray, - indexer: npt.NDArray[np.intp] | None, - axis: AxisInt, - fill_value, - allow_fill: bool, -) -> np.ndarray: - if indexer is None: - indexer = np.arange(arr.shape[axis], dtype=np.intp) - dtype, fill_value = arr.dtype, arr.dtype.type() - else: - indexer = ensure_platform_int(indexer) - - dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( - arr, indexer, fill_value, allow_fill - ) - - flip_order = False - if arr.ndim == 2 and arr.flags.f_contiguous: - flip_order = True - - if flip_order: - arr = arr.T - axis = arr.ndim - axis - 1 - - # at this point, it's guaranteed that dtype can hold both the arr values - # and the fill_value - out_shape_ = list(arr.shape) - out_shape_[axis] = len(indexer) - out_shape = tuple(out_shape_) - if arr.flags.f_contiguous and axis == arr.ndim - 1: - # minor tweak that can make an order-of-magnitude difference - # for dataframes initialized directly from 2-d ndarrays - # (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its - # f-contiguous transpose) - out = np.empty(out_shape, dtype=dtype, order="F") - else: - out = np.empty(out_shape, dtype=dtype) - - func = _get_take_nd_function( - arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info - ) - func(arr, indexer, out, fill_value) - - if flip_order: - out = out.T - return out - - -def take_1d( - arr: ArrayLike, - indexer: npt.NDArray[np.intp], - fill_value=None, - allow_fill: bool = True, - mask: npt.NDArray[np.bool_] | None = None, -) -> ArrayLike: - """ - Specialized version for 1D arrays. Differences compared to `take_nd`: - - - Assumes input array has already been converted to numpy array / EA - - Assumes indexer is already guaranteed to be intp dtype ndarray - - Only works for 1D arrays - - To ensure the lowest possible overhead. - - Note: similarly to `take_nd`, this function assumes that the indexer is - a valid(ated) indexer with no out of bound indices. - - Parameters - ---------- - arr : np.ndarray or ExtensionArray - Input array. - indexer : ndarray - 1-D array of indices to take (validated indices, intp dtype). - fill_value : any, default np.nan - Fill value to replace -1 values with - allow_fill : bool, default True - If False, indexer is assumed to contain no -1 values so no filling - will be done. This short-circuits computation of a mask. Result is - undefined if allow_fill == False and -1 is present in indexer. - mask : np.ndarray, optional, default None - If `allow_fill` is True, and the mask (where indexer == -1) is already - known, it can be passed to avoid recomputation. - """ - if not isinstance(arr, np.ndarray): - # ExtensionArray -> dispatch to their method - return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) - - if not allow_fill: - return arr.take(indexer) - - dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( - arr, indexer, fill_value, True, mask - ) - - # at this point, it's guaranteed that dtype can hold both the arr values - # and the fill_value - out = np.empty(indexer.shape, dtype=dtype) - - func = _get_take_nd_function( - arr.ndim, arr.dtype, out.dtype, axis=0, mask_info=mask_info - ) - func(arr, indexer, out, fill_value) - - return out - - -def take_2d_multi( - arr: np.ndarray, - indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], - fill_value=np.nan, -) -> np.ndarray: - """ - Specialized Cython take which sets NaN values in one pass. - """ - # This is only called from one place in DataFrame._reindex_multi, - # so we know indexer is well-behaved. - assert indexer is not None - assert indexer[0] is not None - assert indexer[1] is not None - - row_idx, col_idx = indexer - - row_idx = ensure_platform_int(row_idx) - col_idx = ensure_platform_int(col_idx) - indexer = row_idx, col_idx - mask_info = None - - # check for promotion based on types only (do this first because - # it's faster than computing a mask) - dtype, fill_value = maybe_promote(arr.dtype, fill_value) - if dtype != arr.dtype: - # check if promotion is actually required based on indexer - row_mask = row_idx == -1 - col_mask = col_idx == -1 - row_needs = row_mask.any() - col_needs = col_mask.any() - mask_info = (row_mask, col_mask), (row_needs, col_needs) - - if not (row_needs or col_needs): - # if not, then depromote, set fill_value to dummy - # (it won't be used but we don't want the cython code - # to crash when trying to cast it to dtype) - dtype, fill_value = arr.dtype, arr.dtype.type() - - # at this point, it's guaranteed that dtype can hold both the arr values - # and the fill_value - out_shape = len(row_idx), len(col_idx) - out = np.empty(out_shape, dtype=dtype) - - func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None) - if func is None and arr.dtype != out.dtype: - func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None) - if func is not None: - func = _convert_wrapper(func, out.dtype) - - if func is not None: - func(arr, indexer, out=out, fill_value=fill_value) - else: - # test_reindex_multi - _take_2d_multi_object( - arr, indexer, out, fill_value=fill_value, mask_info=mask_info - ) - - return out - - -@functools.lru_cache -def _get_take_nd_function_cached( - ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: AxisInt -): - """ - Part of _get_take_nd_function below that doesn't need `mask_info` and thus - can be cached (mask_info potentially contains a numpy ndarray which is not - hashable and thus cannot be used as argument for cached function). - """ - tup = (arr_dtype.name, out_dtype.name) - if ndim == 1: - func = _take_1d_dict.get(tup, None) - elif ndim == 2: - if axis == 0: - func = _take_2d_axis0_dict.get(tup, None) - else: - func = _take_2d_axis1_dict.get(tup, None) - if func is not None: - return func - - # We get here with string, uint, float16, and complex dtypes that could - # potentially be handled in algos_take_helper. - # Also a couple with (M8[ns], object) and (m8[ns], object) - tup = (out_dtype.name, out_dtype.name) - if ndim == 1: - func = _take_1d_dict.get(tup, None) - elif ndim == 2: - if axis == 0: - func = _take_2d_axis0_dict.get(tup, None) - else: - func = _take_2d_axis1_dict.get(tup, None) - if func is not None: - func = _convert_wrapper(func, out_dtype) - return func - - return None - - -def _get_take_nd_function( - ndim: int, - arr_dtype: np.dtype, - out_dtype: np.dtype, - axis: AxisInt = 0, - mask_info=None, -): - """ - Get the appropriate "take" implementation for the given dimension, axis - and dtypes. - """ - func = None - if ndim <= 2: - # for this part we don't need `mask_info` -> use the cached algo lookup - func = _get_take_nd_function_cached(ndim, arr_dtype, out_dtype, axis) - - if func is None: - - def func(arr, indexer, out, fill_value=np.nan) -> None: - indexer = ensure_platform_int(indexer) - _take_nd_object( - arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info - ) - - return func - - -def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None): - def wrapper( - arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan - ) -> None: - if arr_dtype is not None: - arr = arr.view(arr_dtype) - if out_dtype is not None: - out = out.view(out_dtype) - if fill_wrap is not None: - # FIXME: if we get here with dt64/td64 we need to be sure we have - # matching resos - if fill_value.dtype.kind == "m": - fill_value = fill_value.astype("m8[ns]") - else: - fill_value = fill_value.astype("M8[ns]") - fill_value = fill_wrap(fill_value) - - f(arr, indexer, out, fill_value=fill_value) - - return wrapper - - -def _convert_wrapper(f, conv_dtype): - def wrapper( - arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan - ) -> None: - if conv_dtype == object: - # GH#39755 avoid casting dt64/td64 to integers - arr = ensure_wrapped_if_datetimelike(arr) - arr = arr.astype(conv_dtype) - f(arr, indexer, out, fill_value=fill_value) - - return wrapper - - -_take_1d_dict = { - ("int8", "int8"): libalgos.take_1d_int8_int8, - ("int8", "int32"): libalgos.take_1d_int8_int32, - ("int8", "int64"): libalgos.take_1d_int8_int64, - ("int8", "float64"): libalgos.take_1d_int8_float64, - ("int16", "int16"): libalgos.take_1d_int16_int16, - ("int16", "int32"): libalgos.take_1d_int16_int32, - ("int16", "int64"): libalgos.take_1d_int16_int64, - ("int16", "float64"): libalgos.take_1d_int16_float64, - ("int32", "int32"): libalgos.take_1d_int32_int32, - ("int32", "int64"): libalgos.take_1d_int32_int64, - ("int32", "float64"): libalgos.take_1d_int32_float64, - ("int64", "int64"): libalgos.take_1d_int64_int64, - ("int64", "float64"): libalgos.take_1d_int64_float64, - ("float32", "float32"): libalgos.take_1d_float32_float32, - ("float32", "float64"): libalgos.take_1d_float32_float64, - ("float64", "float64"): libalgos.take_1d_float64_float64, - ("object", "object"): libalgos.take_1d_object_object, - ("bool", "bool"): _view_wrapper(libalgos.take_1d_bool_bool, np.uint8, np.uint8), - ("bool", "object"): _view_wrapper(libalgos.take_1d_bool_object, np.uint8, None), - ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( - libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64 - ), - ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( - libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64 - ), -} - -_take_2d_axis0_dict = { - ("int8", "int8"): libalgos.take_2d_axis0_int8_int8, - ("int8", "int32"): libalgos.take_2d_axis0_int8_int32, - ("int8", "int64"): libalgos.take_2d_axis0_int8_int64, - ("int8", "float64"): libalgos.take_2d_axis0_int8_float64, - ("int16", "int16"): libalgos.take_2d_axis0_int16_int16, - ("int16", "int32"): libalgos.take_2d_axis0_int16_int32, - ("int16", "int64"): libalgos.take_2d_axis0_int16_int64, - ("int16", "float64"): libalgos.take_2d_axis0_int16_float64, - ("int32", "int32"): libalgos.take_2d_axis0_int32_int32, - ("int32", "int64"): libalgos.take_2d_axis0_int32_int64, - ("int32", "float64"): libalgos.take_2d_axis0_int32_float64, - ("int64", "int64"): libalgos.take_2d_axis0_int64_int64, - ("int64", "float64"): libalgos.take_2d_axis0_int64_float64, - ("float32", "float32"): libalgos.take_2d_axis0_float32_float32, - ("float32", "float64"): libalgos.take_2d_axis0_float32_float64, - ("float64", "float64"): libalgos.take_2d_axis0_float64_float64, - ("object", "object"): libalgos.take_2d_axis0_object_object, - ("bool", "bool"): _view_wrapper( - libalgos.take_2d_axis0_bool_bool, np.uint8, np.uint8 - ), - ("bool", "object"): _view_wrapper( - libalgos.take_2d_axis0_bool_object, np.uint8, None - ), - ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( - libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64 - ), - ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( - libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64 - ), -} - -_take_2d_axis1_dict = { - ("int8", "int8"): libalgos.take_2d_axis1_int8_int8, - ("int8", "int32"): libalgos.take_2d_axis1_int8_int32, - ("int8", "int64"): libalgos.take_2d_axis1_int8_int64, - ("int8", "float64"): libalgos.take_2d_axis1_int8_float64, - ("int16", "int16"): libalgos.take_2d_axis1_int16_int16, - ("int16", "int32"): libalgos.take_2d_axis1_int16_int32, - ("int16", "int64"): libalgos.take_2d_axis1_int16_int64, - ("int16", "float64"): libalgos.take_2d_axis1_int16_float64, - ("int32", "int32"): libalgos.take_2d_axis1_int32_int32, - ("int32", "int64"): libalgos.take_2d_axis1_int32_int64, - ("int32", "float64"): libalgos.take_2d_axis1_int32_float64, - ("int64", "int64"): libalgos.take_2d_axis1_int64_int64, - ("int64", "float64"): libalgos.take_2d_axis1_int64_float64, - ("float32", "float32"): libalgos.take_2d_axis1_float32_float32, - ("float32", "float64"): libalgos.take_2d_axis1_float32_float64, - ("float64", "float64"): libalgos.take_2d_axis1_float64_float64, - ("object", "object"): libalgos.take_2d_axis1_object_object, - ("bool", "bool"): _view_wrapper( - libalgos.take_2d_axis1_bool_bool, np.uint8, np.uint8 - ), - ("bool", "object"): _view_wrapper( - libalgos.take_2d_axis1_bool_object, np.uint8, None - ), - ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( - libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64 - ), - ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( - libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64 - ), -} - -_take_2d_multi_dict = { - ("int8", "int8"): libalgos.take_2d_multi_int8_int8, - ("int8", "int32"): libalgos.take_2d_multi_int8_int32, - ("int8", "int64"): libalgos.take_2d_multi_int8_int64, - ("int8", "float64"): libalgos.take_2d_multi_int8_float64, - ("int16", "int16"): libalgos.take_2d_multi_int16_int16, - ("int16", "int32"): libalgos.take_2d_multi_int16_int32, - ("int16", "int64"): libalgos.take_2d_multi_int16_int64, - ("int16", "float64"): libalgos.take_2d_multi_int16_float64, - ("int32", "int32"): libalgos.take_2d_multi_int32_int32, - ("int32", "int64"): libalgos.take_2d_multi_int32_int64, - ("int32", "float64"): libalgos.take_2d_multi_int32_float64, - ("int64", "int64"): libalgos.take_2d_multi_int64_int64, - ("int64", "float64"): libalgos.take_2d_multi_int64_float64, - ("float32", "float32"): libalgos.take_2d_multi_float32_float32, - ("float32", "float64"): libalgos.take_2d_multi_float32_float64, - ("float64", "float64"): libalgos.take_2d_multi_float64_float64, - ("object", "object"): libalgos.take_2d_multi_object_object, - ("bool", "bool"): _view_wrapper( - libalgos.take_2d_multi_bool_bool, np.uint8, np.uint8 - ), - ("bool", "object"): _view_wrapper( - libalgos.take_2d_multi_bool_object, np.uint8, None - ), - ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( - libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64 - ), - ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( - libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64 - ), -} - - -def _take_nd_object( - arr: np.ndarray, - indexer: npt.NDArray[np.intp], - out: np.ndarray, - axis: AxisInt, - fill_value, - mask_info, -) -> None: - if mask_info is not None: - mask, needs_masking = mask_info - else: - mask = indexer == -1 - needs_masking = mask.any() - if arr.dtype != out.dtype: - arr = arr.astype(out.dtype) - if arr.shape[axis] > 0: - arr.take(indexer, axis=axis, out=out) - if needs_masking: - outindexer = [slice(None)] * arr.ndim - outindexer[axis] = mask - out[tuple(outindexer)] = fill_value - - -def _take_2d_multi_object( - arr: np.ndarray, - indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], - out: np.ndarray, - fill_value, - mask_info, -) -> None: - # this is not ideal, performance-wise, but it's better than raising - # an exception (best to optimize in Cython to avoid getting here) - row_idx, col_idx = indexer # both np.intp - if mask_info is not None: - (row_mask, col_mask), (row_needs, col_needs) = mask_info - else: - row_mask = row_idx == -1 - col_mask = col_idx == -1 - row_needs = row_mask.any() - col_needs = col_mask.any() - if fill_value is not None: - if row_needs: - out[row_mask, :] = fill_value - if col_needs: - out[:, col_mask] = fill_value - for i, u_ in enumerate(row_idx): - if u_ != -1: - for j, v in enumerate(col_idx): - if v != -1: - out[i, j] = arr[u_, v] - - -def _take_preprocess_indexer_and_fill_value( - arr: np.ndarray, - indexer: npt.NDArray[np.intp], - fill_value, - allow_fill: bool, - mask: npt.NDArray[np.bool_] | None = None, -): - mask_info: tuple[np.ndarray | None, bool] | None = None - - if not allow_fill: - dtype, fill_value = arr.dtype, arr.dtype.type() - mask_info = None, False - else: - # check for promotion based on types only (do this first because - # it's faster than computing a mask) - dtype, fill_value = maybe_promote(arr.dtype, fill_value) - if dtype != arr.dtype: - # check if promotion is actually required based on indexer - if mask is not None: - needs_masking = True - else: - mask = indexer == -1 - needs_masking = bool(mask.any()) - mask_info = mask, needs_masking - if not needs_masking: - # if not, then depromote, set fill_value to dummy - # (it won't be used but we don't want the cython code - # to crash when trying to cast it to dtype) - dtype, fill_value = arr.dtype, arr.dtype.type() - - return dtype, fill_value, mask_info diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/reshape/test_cut.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/reshape/test_cut.py deleted file mode 100644 index b2a6ac49fdff2a26f659211294168af19eb4c403..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/reshape/test_cut.py +++ /dev/null @@ -1,761 +0,0 @@ -import numpy as np -import pytest - -import pandas as pd -from pandas import ( - Categorical, - DataFrame, - DatetimeIndex, - Index, - Interval, - IntervalIndex, - Series, - TimedeltaIndex, - Timestamp, - cut, - date_range, - interval_range, - isna, - qcut, - timedelta_range, - to_datetime, -) -import pandas._testing as tm -from pandas.api.types import CategoricalDtype as CDT -import pandas.core.reshape.tile as tmod - - -def test_simple(): - data = np.ones(5, dtype="int64") - result = cut(data, 4, labels=False) - - expected = np.array([1, 1, 1, 1, 1]) - tm.assert_numpy_array_equal(result, expected, check_dtype=False) - - -@pytest.mark.parametrize("func", [list, np.array]) -def test_bins(func): - data = func([0.2, 1.4, 2.5, 6.2, 9.7, 2.1]) - result, bins = cut(data, 3, retbins=True) - - intervals = IntervalIndex.from_breaks(bins.round(3)) - intervals = intervals.take([0, 0, 0, 1, 2, 0]) - expected = Categorical(intervals, ordered=True) - - tm.assert_categorical_equal(result, expected) - tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667, 6.53333333, 9.7])) - - -def test_right(): - data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575]) - result, bins = cut(data, 4, right=True, retbins=True) - - intervals = IntervalIndex.from_breaks(bins.round(3)) - expected = Categorical(intervals, ordered=True) - expected = expected.take([0, 0, 0, 2, 3, 0, 0]) - - tm.assert_categorical_equal(result, expected) - tm.assert_almost_equal(bins, np.array([0.1905, 2.575, 4.95, 7.325, 9.7])) - - -def test_no_right(): - data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575]) - result, bins = cut(data, 4, right=False, retbins=True) - - intervals = IntervalIndex.from_breaks(bins.round(3), closed="left") - intervals = intervals.take([0, 0, 0, 2, 3, 0, 1]) - expected = Categorical(intervals, ordered=True) - - tm.assert_categorical_equal(result, expected) - tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95, 7.325, 9.7095])) - - -def test_bins_from_interval_index(): - c = cut(range(5), 3) - expected = c - result = cut(range(5), bins=expected.categories) - tm.assert_categorical_equal(result, expected) - - expected = Categorical.from_codes( - np.append(c.codes, -1), categories=c.categories, ordered=True - ) - result = cut(range(6), bins=expected.categories) - tm.assert_categorical_equal(result, expected) - - -def test_bins_from_interval_index_doc_example(): - # Make sure we preserve the bins. - ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60]) - c = cut(ages, bins=[0, 18, 35, 70]) - expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)]) - tm.assert_index_equal(c.categories, expected) - - result = cut([25, 20, 50], bins=c.categories) - tm.assert_index_equal(result.categories, expected) - tm.assert_numpy_array_equal(result.codes, np.array([1, 1, 2], dtype="int8")) - - -def test_bins_not_overlapping_from_interval_index(): - # see gh-23980 - msg = "Overlapping IntervalIndex is not accepted" - ii = IntervalIndex.from_tuples([(0, 10), (2, 12), (4, 14)]) - - with pytest.raises(ValueError, match=msg): - cut([5, 6], bins=ii) - - -def test_bins_not_monotonic(): - msg = "bins must increase monotonically" - data = [0.2, 1.4, 2.5, 6.2, 9.7, 2.1] - - with pytest.raises(ValueError, match=msg): - cut(data, [0.1, 1.5, 1, 10]) - - -@pytest.mark.parametrize( - "x, bins, expected", - [ - ( - date_range("2017-12-31", periods=3), - [Timestamp.min, Timestamp("2018-01-01"), Timestamp.max], - IntervalIndex.from_tuples( - [ - (Timestamp.min, Timestamp("2018-01-01")), - (Timestamp("2018-01-01"), Timestamp.max), - ] - ), - ), - ( - [-1, 0, 1], - np.array( - [np.iinfo(np.int64).min, 0, np.iinfo(np.int64).max], dtype="int64" - ), - IntervalIndex.from_tuples( - [(np.iinfo(np.int64).min, 0), (0, np.iinfo(np.int64).max)] - ), - ), - ( - [ - np.timedelta64(-1, "ns"), - np.timedelta64(0, "ns"), - np.timedelta64(1, "ns"), - ], - np.array( - [ - np.timedelta64(-np.iinfo(np.int64).max, "ns"), - np.timedelta64(0, "ns"), - np.timedelta64(np.iinfo(np.int64).max, "ns"), - ] - ), - IntervalIndex.from_tuples( - [ - ( - np.timedelta64(-np.iinfo(np.int64).max, "ns"), - np.timedelta64(0, "ns"), - ), - ( - np.timedelta64(0, "ns"), - np.timedelta64(np.iinfo(np.int64).max, "ns"), - ), - ] - ), - ), - ], -) -def test_bins_monotonic_not_overflowing(x, bins, expected): - # GH 26045 - result = cut(x, bins) - tm.assert_index_equal(result.categories, expected) - - -def test_wrong_num_labels(): - msg = "Bin labels must be one fewer than the number of bin edges" - data = [0.2, 1.4, 2.5, 6.2, 9.7, 2.1] - - with pytest.raises(ValueError, match=msg): - cut(data, [0, 1, 10], labels=["foo", "bar", "baz"]) - - -@pytest.mark.parametrize( - "x,bins,msg", - [ - ([], 2, "Cannot cut empty array"), - ([1, 2, 3], 0.5, "`bins` should be a positive integer"), - ], -) -def test_cut_corner(x, bins, msg): - with pytest.raises(ValueError, match=msg): - cut(x, bins) - - -@pytest.mark.parametrize("arg", [2, np.eye(2), DataFrame(np.eye(2))]) -@pytest.mark.parametrize("cut_func", [cut, qcut]) -def test_cut_not_1d_arg(arg, cut_func): - msg = "Input array must be 1 dimensional" - with pytest.raises(ValueError, match=msg): - cut_func(arg, 2) - - -@pytest.mark.parametrize( - "data", - [ - [0, 1, 2, 3, 4, np.inf], - [-np.inf, 0, 1, 2, 3, 4], - [-np.inf, 0, 1, 2, 3, 4, np.inf], - ], -) -def test_int_bins_with_inf(data): - # GH 24314 - msg = "cannot specify integer `bins` when input data contains infinity" - with pytest.raises(ValueError, match=msg): - cut(data, bins=3) - - -def test_cut_out_of_range_more(): - # see gh-1511 - name = "x" - - ser = Series([0, -1, 0, 1, -3], name=name) - ind = cut(ser, [0, 1], labels=False) - - exp = Series([np.nan, np.nan, np.nan, 0, np.nan], name=name) - tm.assert_series_equal(ind, exp) - - -@pytest.mark.parametrize( - "right,breaks,closed", - [ - (True, [-1e-3, 0.25, 0.5, 0.75, 1], "right"), - (False, [0, 0.25, 0.5, 0.75, 1 + 1e-3], "left"), - ], -) -def test_labels(right, breaks, closed): - arr = np.tile(np.arange(0, 1.01, 0.1), 4) - - result, bins = cut(arr, 4, retbins=True, right=right) - ex_levels = IntervalIndex.from_breaks(breaks, closed=closed) - tm.assert_index_equal(result.categories, ex_levels) - - -def test_cut_pass_series_name_to_factor(): - name = "foo" - ser = Series(np.random.default_rng(2).standard_normal(100), name=name) - - factor = cut(ser, 4) - assert factor.name == name - - -def test_label_precision(): - arr = np.arange(0, 0.73, 0.01) - result = cut(arr, 4, precision=2) - - ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36, 0.54, 0.72]) - tm.assert_index_equal(result.categories, ex_levels) - - -@pytest.mark.parametrize("labels", [None, False]) -def test_na_handling(labels): - arr = np.arange(0, 0.75, 0.01) - arr[::3] = np.nan - - result = cut(arr, 4, labels=labels) - result = np.asarray(result) - - expected = np.where(isna(arr), np.nan, result) - tm.assert_almost_equal(result, expected) - - -def test_inf_handling(): - data = np.arange(6) - data_ser = Series(data, dtype="int64") - - bins = [-np.inf, 2, 4, np.inf] - result = cut(data, bins) - result_ser = cut(data_ser, bins) - - ex_uniques = IntervalIndex.from_breaks(bins) - tm.assert_index_equal(result.categories, ex_uniques) - - assert result[5] == Interval(4, np.inf) - assert result[0] == Interval(-np.inf, 2) - assert result_ser[5] == Interval(4, np.inf) - assert result_ser[0] == Interval(-np.inf, 2) - - -def test_cut_out_of_bounds(): - arr = np.random.default_rng(2).standard_normal(100) - result = cut(arr, [-1, 0, 1]) - - mask = isna(result) - ex_mask = (arr < -1) | (arr > 1) - tm.assert_numpy_array_equal(mask, ex_mask) - - -@pytest.mark.parametrize( - "get_labels,get_expected", - [ - ( - lambda labels: labels, - lambda labels: Categorical( - ["Medium"] + 4 * ["Small"] + ["Medium", "Large"], - categories=labels, - ordered=True, - ), - ), - ( - lambda labels: Categorical.from_codes([0, 1, 2], labels), - lambda labels: Categorical.from_codes([1] + 4 * [0] + [1, 2], labels), - ), - ], -) -def test_cut_pass_labels(get_labels, get_expected): - bins = [0, 25, 50, 100] - arr = [50, 5, 10, 15, 20, 30, 70] - labels = ["Small", "Medium", "Large"] - - result = cut(arr, bins, labels=get_labels(labels)) - tm.assert_categorical_equal(result, get_expected(labels)) - - -def test_cut_pass_labels_compat(): - # see gh-16459 - arr = [50, 5, 10, 15, 20, 30, 70] - labels = ["Good", "Medium", "Bad"] - - result = cut(arr, 3, labels=labels) - exp = cut(arr, 3, labels=Categorical(labels, categories=labels, ordered=True)) - tm.assert_categorical_equal(result, exp) - - -@pytest.mark.parametrize("x", [np.arange(11.0), np.arange(11.0) / 1e10]) -def test_round_frac_just_works(x): - # It works. - cut(x, 2) - - -@pytest.mark.parametrize( - "val,precision,expected", - [ - (-117.9998, 3, -118), - (117.9998, 3, 118), - (117.9998, 2, 118), - (0.000123456, 2, 0.00012), - ], -) -def test_round_frac(val, precision, expected): - # see gh-1979 - result = tmod._round_frac(val, precision=precision) - assert result == expected - - -def test_cut_return_intervals(): - ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8]) - result = cut(ser, 3) - - exp_bins = np.linspace(0, 8, num=4).round(3) - exp_bins[0] -= 0.008 - - expected = Series( - IntervalIndex.from_breaks(exp_bins, closed="right").take( - [0, 0, 0, 1, 1, 1, 2, 2, 2] - ) - ).astype(CDT(ordered=True)) - tm.assert_series_equal(result, expected) - - -def test_series_ret_bins(): - # see gh-8589 - ser = Series(np.arange(4)) - result, bins = cut(ser, 2, retbins=True) - - expected = Series( - IntervalIndex.from_breaks([-0.003, 1.5, 3], closed="right").repeat(2) - ).astype(CDT(ordered=True)) - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize( - "kwargs,msg", - [ - ({"duplicates": "drop"}, None), - ({}, "Bin edges must be unique"), - ({"duplicates": "raise"}, "Bin edges must be unique"), - ({"duplicates": "foo"}, "invalid value for 'duplicates' parameter"), - ], -) -def test_cut_duplicates_bin(kwargs, msg): - # see gh-20947 - bins = [0, 2, 4, 6, 10, 10] - values = Series(np.array([1, 3, 5, 7, 9]), index=["a", "b", "c", "d", "e"]) - - if msg is not None: - with pytest.raises(ValueError, match=msg): - cut(values, bins, **kwargs) - else: - result = cut(values, bins, **kwargs) - expected = cut(values, pd.unique(np.asarray(bins))) - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize("data", [9.0, -9.0, 0.0]) -@pytest.mark.parametrize("length", [1, 2]) -def test_single_bin(data, length): - # see gh-14652, gh-15428 - ser = Series([data] * length) - result = cut(ser, 1, labels=False) - - expected = Series([0] * length, dtype=np.intp) - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize( - "array_1_writeable,array_2_writeable", [(True, True), (True, False), (False, False)] -) -def test_cut_read_only(array_1_writeable, array_2_writeable): - # issue 18773 - array_1 = np.arange(0, 100, 10) - array_1.flags.writeable = array_1_writeable - - array_2 = np.arange(0, 100, 10) - array_2.flags.writeable = array_2_writeable - - hundred_elements = np.arange(100) - tm.assert_categorical_equal( - cut(hundred_elements, array_1), cut(hundred_elements, array_2) - ) - - -@pytest.mark.parametrize( - "conv", - [ - lambda v: Timestamp(v), - lambda v: to_datetime(v), - lambda v: np.datetime64(v), - lambda v: Timestamp(v).to_pydatetime(), - ], -) -def test_datetime_bin(conv): - data = [np.datetime64("2012-12-13"), np.datetime64("2012-12-15")] - bin_data = ["2012-12-12", "2012-12-14", "2012-12-16"] - - expected = Series( - IntervalIndex( - [ - Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])), - Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2])), - ] - ) - ).astype(CDT(ordered=True)) - - bins = [conv(v) for v in bin_data] - result = Series(cut(data, bins=bins)) - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize( - "data", - [ - to_datetime(Series(["2013-01-01", "2013-01-02", "2013-01-03"])), - [ - np.datetime64("2013-01-01"), - np.datetime64("2013-01-02"), - np.datetime64("2013-01-03"), - ], - np.array( - [ - np.datetime64("2013-01-01"), - np.datetime64("2013-01-02"), - np.datetime64("2013-01-03"), - ] - ), - DatetimeIndex(["2013-01-01", "2013-01-02", "2013-01-03"]), - ], -) -def test_datetime_cut(data): - # see gh-14714 - # - # Testing time data when it comes in various collection types. - result, _ = cut(data, 3, retbins=True) - expected = Series( - IntervalIndex( - [ - Interval( - Timestamp("2012-12-31 23:57:07.200000"), - Timestamp("2013-01-01 16:00:00"), - ), - Interval( - Timestamp("2013-01-01 16:00:00"), Timestamp("2013-01-02 08:00:00") - ), - Interval( - Timestamp("2013-01-02 08:00:00"), Timestamp("2013-01-03 00:00:00") - ), - ] - ) - ).astype(CDT(ordered=True)) - tm.assert_series_equal(Series(result), expected) - - -@pytest.mark.parametrize( - "bins", - [ - 3, - [ - Timestamp("2013-01-01 04:57:07.200000"), - Timestamp("2013-01-01 21:00:00"), - Timestamp("2013-01-02 13:00:00"), - Timestamp("2013-01-03 05:00:00"), - ], - ], -) -@pytest.mark.parametrize("box", [list, np.array, Index, Series]) -def test_datetime_tz_cut(bins, box): - # see gh-19872 - tz = "US/Eastern" - s = Series(date_range("20130101", periods=3, tz=tz)) - - if not isinstance(bins, int): - bins = box(bins) - - result = cut(s, bins) - expected = Series( - IntervalIndex( - [ - Interval( - Timestamp("2012-12-31 23:57:07.200000", tz=tz), - Timestamp("2013-01-01 16:00:00", tz=tz), - ), - Interval( - Timestamp("2013-01-01 16:00:00", tz=tz), - Timestamp("2013-01-02 08:00:00", tz=tz), - ), - Interval( - Timestamp("2013-01-02 08:00:00", tz=tz), - Timestamp("2013-01-03 00:00:00", tz=tz), - ), - ] - ) - ).astype(CDT(ordered=True)) - tm.assert_series_equal(result, expected) - - -def test_datetime_nan_error(): - msg = "bins must be of datetime64 dtype" - - with pytest.raises(ValueError, match=msg): - cut(date_range("20130101", periods=3), bins=[0, 2, 4]) - - -def test_datetime_nan_mask(): - result = cut( - date_range("20130102", periods=5), bins=date_range("20130101", periods=2) - ) - - mask = result.categories.isna() - tm.assert_numpy_array_equal(mask, np.array([False])) - - mask = result.isna() - tm.assert_numpy_array_equal(mask, np.array([False, True, True, True, True])) - - -@pytest.mark.parametrize("tz", [None, "UTC", "US/Pacific"]) -def test_datetime_cut_roundtrip(tz): - # see gh-19891 - ser = Series(date_range("20180101", periods=3, tz=tz)) - result, result_bins = cut(ser, 2, retbins=True) - - expected = cut(ser, result_bins) - tm.assert_series_equal(result, expected) - - expected_bins = DatetimeIndex( - ["2017-12-31 23:57:07.200000", "2018-01-02 00:00:00", "2018-01-03 00:00:00"] - ) - expected_bins = expected_bins.tz_localize(tz) - tm.assert_index_equal(result_bins, expected_bins) - - -def test_timedelta_cut_roundtrip(): - # see gh-19891 - ser = Series(timedelta_range("1day", periods=3)) - result, result_bins = cut(ser, 2, retbins=True) - - expected = cut(ser, result_bins) - tm.assert_series_equal(result, expected) - - expected_bins = TimedeltaIndex( - ["0 days 23:57:07.200000", "2 days 00:00:00", "3 days 00:00:00"] - ) - tm.assert_index_equal(result_bins, expected_bins) - - -@pytest.mark.parametrize("bins", [6, 7]) -@pytest.mark.parametrize( - "box, compare", - [ - (Series, tm.assert_series_equal), - (np.array, tm.assert_categorical_equal), - (list, tm.assert_equal), - ], -) -def test_cut_bool_coercion_to_int(bins, box, compare): - # issue 20303 - data_expected = box([0, 1, 1, 0, 1] * 10) - data_result = box([False, True, True, False, True] * 10) - expected = cut(data_expected, bins, duplicates="drop") - result = cut(data_result, bins, duplicates="drop") - compare(result, expected) - - -@pytest.mark.parametrize("labels", ["foo", 1, True]) -def test_cut_incorrect_labels(labels): - # GH 13318 - values = range(5) - msg = "Bin labels must either be False, None or passed in as a list-like argument" - with pytest.raises(ValueError, match=msg): - cut(values, 4, labels=labels) - - -@pytest.mark.parametrize("bins", [3, [0, 5, 15]]) -@pytest.mark.parametrize("right", [True, False]) -@pytest.mark.parametrize("include_lowest", [True, False]) -def test_cut_nullable_integer(bins, right, include_lowest): - a = np.random.default_rng(2).integers(0, 10, size=50).astype(float) - a[::2] = np.nan - result = cut( - pd.array(a, dtype="Int64"), bins, right=right, include_lowest=include_lowest - ) - expected = cut(a, bins, right=right, include_lowest=include_lowest) - tm.assert_categorical_equal(result, expected) - - -@pytest.mark.parametrize( - "data, bins, labels, expected_codes, expected_labels", - [ - ([15, 17, 19], [14, 16, 18, 20], ["A", "B", "A"], [0, 1, 0], ["A", "B"]), - ([1, 3, 5], [0, 2, 4, 6, 8], [2, 0, 1, 2], [2, 0, 1], [0, 1, 2]), - ], -) -def test_cut_non_unique_labels(data, bins, labels, expected_codes, expected_labels): - # GH 33141 - result = cut(data, bins=bins, labels=labels, ordered=False) - expected = Categorical.from_codes( - expected_codes, categories=expected_labels, ordered=False - ) - tm.assert_categorical_equal(result, expected) - - -@pytest.mark.parametrize( - "data, bins, labels, expected_codes, expected_labels", - [ - ([15, 17, 19], [14, 16, 18, 20], ["C", "B", "A"], [0, 1, 2], ["C", "B", "A"]), - ([1, 3, 5], [0, 2, 4, 6, 8], [3, 0, 1, 2], [0, 1, 2], [3, 0, 1, 2]), - ], -) -def test_cut_unordered_labels(data, bins, labels, expected_codes, expected_labels): - # GH 33141 - result = cut(data, bins=bins, labels=labels, ordered=False) - expected = Categorical.from_codes( - expected_codes, categories=expected_labels, ordered=False - ) - tm.assert_categorical_equal(result, expected) - - -def test_cut_unordered_with_missing_labels_raises_error(): - # GH 33141 - msg = "'labels' must be provided if 'ordered = False'" - with pytest.raises(ValueError, match=msg): - cut([0.5, 3], bins=[0, 1, 2], ordered=False) - - -def test_cut_unordered_with_series_labels(): - # https://github.com/pandas-dev/pandas/issues/36603 - s = Series([1, 2, 3, 4, 5]) - bins = Series([0, 2, 4, 6]) - labels = Series(["a", "b", "c"]) - result = cut(s, bins=bins, labels=labels, ordered=False) - expected = Series(["a", "a", "b", "b", "c"], dtype="category") - tm.assert_series_equal(result, expected) - - -def test_cut_no_warnings(): - df = DataFrame({"value": np.random.default_rng(2).integers(0, 100, 20)}) - labels = [f"{i} - {i + 9}" for i in range(0, 100, 10)] - with tm.assert_produces_warning(False): - df["group"] = cut(df.value, range(0, 105, 10), right=False, labels=labels) - - -def test_cut_with_duplicated_index_lowest_included(): - # GH 42185 - expected = Series( - [Interval(-0.001, 2, closed="right")] * 3 - + [Interval(2, 4, closed="right"), Interval(-0.001, 2, closed="right")], - index=[0, 1, 2, 3, 0], - dtype="category", - ).cat.as_ordered() - - s = Series([0, 1, 2, 3, 0], index=[0, 1, 2, 3, 0]) - result = cut(s, bins=[0, 2, 4], include_lowest=True) - tm.assert_series_equal(result, expected) - - -def test_cut_with_nonexact_categorical_indices(): - # GH 42424 - - ser = Series(range(0, 100)) - ser1 = cut(ser, 10).value_counts().head(5) - ser2 = cut(ser, 10).value_counts().tail(5) - result = DataFrame({"1": ser1, "2": ser2}) - - index = pd.CategoricalIndex( - [ - Interval(-0.099, 9.9, closed="right"), - Interval(9.9, 19.8, closed="right"), - Interval(19.8, 29.7, closed="right"), - Interval(29.7, 39.6, closed="right"), - Interval(39.6, 49.5, closed="right"), - Interval(49.5, 59.4, closed="right"), - Interval(59.4, 69.3, closed="right"), - Interval(69.3, 79.2, closed="right"), - Interval(79.2, 89.1, closed="right"), - Interval(89.1, 99, closed="right"), - ], - ordered=True, - ) - - expected = DataFrame( - {"1": [10] * 5 + [np.nan] * 5, "2": [np.nan] * 5 + [10] * 5}, index=index - ) - - tm.assert_frame_equal(expected, result) - - -def test_cut_with_timestamp_tuple_labels(): - # GH 40661 - labels = [(Timestamp(10),), (Timestamp(20),), (Timestamp(30),)] - result = cut([2, 4, 6], bins=[1, 3, 5, 7], labels=labels) - - expected = Categorical.from_codes([0, 1, 2], labels, ordered=True) - tm.assert_categorical_equal(result, expected) - - -def test_cut_bins_datetime_intervalindex(): - # https://github.com/pandas-dev/pandas/issues/46218 - bins = interval_range(Timestamp("2022-02-25"), Timestamp("2022-02-27"), freq="1D") - # passing Series instead of list is important to trigger bug - result = cut(Series([Timestamp("2022-02-26")]), bins=bins) - expected = Categorical.from_codes([0], bins, ordered=True) - tm.assert_categorical_equal(result.array, expected) - - -def test_cut_with_nullable_int64(): - # GH 30787 - series = Series([0, 1, 2, 3, 4, pd.NA, 6, 7], dtype="Int64") - bins = [0, 2, 4, 6, 8] - intervals = IntervalIndex.from_breaks(bins) - - expected = Series( - Categorical.from_codes([-1, 0, 0, 1, 1, -1, 2, 3], intervals, ordered=True) - ) - - result = cut(series, bins=bins) - - tm.assert_series_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_index.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_index.py deleted file mode 100644 index ad3478b3198984c166c2eaafec0412fb2c2611ee..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/tseries/offsets/test_index.py +++ /dev/null @@ -1,57 +0,0 @@ -""" -Tests for offset behavior with indices. -""" -import pytest - -from pandas import ( - Series, - date_range, -) - -from pandas.tseries.offsets import ( - BMonthBegin, - BMonthEnd, - BQuarterBegin, - BQuarterEnd, - BYearBegin, - BYearEnd, - MonthBegin, - MonthEnd, - QuarterBegin, - QuarterEnd, - YearBegin, - YearEnd, -) - - -@pytest.mark.parametrize("n", [-2, 1]) -@pytest.mark.parametrize( - "cls", - [ - MonthBegin, - MonthEnd, - BMonthBegin, - BMonthEnd, - QuarterBegin, - QuarterEnd, - BQuarterBegin, - BQuarterEnd, - YearBegin, - YearEnd, - BYearBegin, - BYearEnd, - ], -) -def test_apply_index(cls, n): - offset = cls(n=n) - rng = date_range(start="1/1/2000", periods=100000, freq="T") - ser = Series(rng) - - res = rng + offset - assert res.freq is None # not retained - assert res[0] == rng[0] + offset - assert res[-1] == rng[-1] + offset - res2 = ser + offset - # apply_index is only for indexes, not series, so no res2_v2 - assert res2.iloc[0] == ser.iloc[0] + offset - assert res2.iloc[-1] == ser.iloc[-1] + offset diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/__init__.py deleted file mode 100644 index 6afb5c627ce3db6e61cbf46276f7ddd42552eb28..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from typing import List, Optional - -import pip._internal.utils.inject_securetransport # noqa -from pip._internal.utils import _log - -# init_logging() must be called before any call to logging.getLogger() -# which happens at import of most modules. -_log.init_logging() - - -def main(args: (Optional[List[str]]) = None) -> int: - """This is preserved for old console scripts that may still be referencing - it. - - For additional details, see https://github.com/pypa/pip/issues/7498. - """ - from pip._internal.utils.entrypoints import _wrapper - - return _wrapper(args) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/vcs/git.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/vcs/git.py deleted file mode 100644 index 8d1d499376744954308bdf96f80e5b5a39a24195..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/vcs/git.py +++ /dev/null @@ -1,526 +0,0 @@ -import logging -import os.path -import pathlib -import re -import urllib.parse -import urllib.request -from typing import List, Optional, Tuple - -from pip._internal.exceptions import BadCommand, InstallationError -from pip._internal.utils.misc import HiddenText, display_path, hide_url -from pip._internal.utils.subprocess import make_command -from pip._internal.vcs.versioncontrol import ( - AuthInfo, - RemoteNotFoundError, - RemoteNotValidError, - RevOptions, - VersionControl, - find_path_to_project_root_from_repo_root, - vcs, -) - -urlsplit = urllib.parse.urlsplit -urlunsplit = urllib.parse.urlunsplit - - -logger = logging.getLogger(__name__) - - -GIT_VERSION_REGEX = re.compile( - r"^git version " # Prefix. - r"(\d+)" # Major. - r"\.(\d+)" # Dot, minor. - r"(?:\.(\d+))?" # Optional dot, patch. - r".*$" # Suffix, including any pre- and post-release segments we don't care about. -) - -HASH_REGEX = re.compile("^[a-fA-F0-9]{40}$") - -# SCP (Secure copy protocol) shorthand. e.g. 'git@example.com:foo/bar.git' -SCP_REGEX = re.compile( - r"""^ - # Optional user, e.g. 'git@' - (\w+@)? - # Server, e.g. 'github.com'. - ([^/:]+): - # The server-side path. e.g. 'user/project.git'. Must start with an - # alphanumeric character so as not to be confusable with a Windows paths - # like 'C:/foo/bar' or 'C:\foo\bar'. - (\w[^:]*) - $""", - re.VERBOSE, -) - - -def looks_like_hash(sha: str) -> bool: - return bool(HASH_REGEX.match(sha)) - - -class Git(VersionControl): - name = "git" - dirname = ".git" - repo_name = "clone" - schemes = ( - "git+http", - "git+https", - "git+ssh", - "git+git", - "git+file", - ) - # Prevent the user's environment variables from interfering with pip: - # https://github.com/pypa/pip/issues/1130 - unset_environ = ("GIT_DIR", "GIT_WORK_TREE") - default_arg_rev = "HEAD" - - @staticmethod - def get_base_rev_args(rev: str) -> List[str]: - return [rev] - - def is_immutable_rev_checkout(self, url: str, dest: str) -> bool: - _, rev_options = self.get_url_rev_options(hide_url(url)) - if not rev_options.rev: - return False - if not self.is_commit_id_equal(dest, rev_options.rev): - # the current commit is different from rev, - # which means rev was something else than a commit hash - return False - # return False in the rare case rev is both a commit hash - # and a tag or a branch; we don't want to cache in that case - # because that branch/tag could point to something else in the future - is_tag_or_branch = bool(self.get_revision_sha(dest, rev_options.rev)[0]) - return not is_tag_or_branch - - def get_git_version(self) -> Tuple[int, ...]: - version = self.run_command( - ["version"], - command_desc="git version", - show_stdout=False, - stdout_only=True, - ) - match = GIT_VERSION_REGEX.match(version) - if not match: - logger.warning("Can't parse git version: %s", version) - return () - return tuple(int(c) for c in match.groups()) - - @classmethod - def get_current_branch(cls, location: str) -> Optional[str]: - """ - Return the current branch, or None if HEAD isn't at a branch - (e.g. detached HEAD). - """ - # git-symbolic-ref exits with empty stdout if "HEAD" is a detached - # HEAD rather than a symbolic ref. In addition, the -q causes the - # command to exit with status code 1 instead of 128 in this case - # and to suppress the message to stderr. - args = ["symbolic-ref", "-q", "HEAD"] - output = cls.run_command( - args, - extra_ok_returncodes=(1,), - show_stdout=False, - stdout_only=True, - cwd=location, - ) - ref = output.strip() - - if ref.startswith("refs/heads/"): - return ref[len("refs/heads/") :] - - return None - - @classmethod - def get_revision_sha(cls, dest: str, rev: str) -> Tuple[Optional[str], bool]: - """ - Return (sha_or_none, is_branch), where sha_or_none is a commit hash - if the revision names a remote branch or tag, otherwise None. - - Args: - dest: the repository directory. - rev: the revision name. - """ - # Pass rev to pre-filter the list. - output = cls.run_command( - ["show-ref", rev], - cwd=dest, - show_stdout=False, - stdout_only=True, - on_returncode="ignore", - ) - refs = {} - # NOTE: We do not use splitlines here since that would split on other - # unicode separators, which can be maliciously used to install a - # different revision. - for line in output.strip().split("\n"): - line = line.rstrip("\r") - if not line: - continue - try: - ref_sha, ref_name = line.split(" ", maxsplit=2) - except ValueError: - # Include the offending line to simplify troubleshooting if - # this error ever occurs. - raise ValueError(f"unexpected show-ref line: {line!r}") - - refs[ref_name] = ref_sha - - branch_ref = f"refs/remotes/origin/{rev}" - tag_ref = f"refs/tags/{rev}" - - sha = refs.get(branch_ref) - if sha is not None: - return (sha, True) - - sha = refs.get(tag_ref) - - return (sha, False) - - @classmethod - def _should_fetch(cls, dest: str, rev: str) -> bool: - """ - Return true if rev is a ref or is a commit that we don't have locally. - - Branches and tags are not considered in this method because they are - assumed to be always available locally (which is a normal outcome of - ``git clone`` and ``git fetch --tags``). - """ - if rev.startswith("refs/"): - # Always fetch remote refs. - return True - - if not looks_like_hash(rev): - # Git fetch would fail with abbreviated commits. - return False - - if cls.has_commit(dest, rev): - # Don't fetch if we have the commit locally. - return False - - return True - - @classmethod - def resolve_revision( - cls, dest: str, url: HiddenText, rev_options: RevOptions - ) -> RevOptions: - """ - Resolve a revision to a new RevOptions object with the SHA1 of the - branch, tag, or ref if found. - - Args: - rev_options: a RevOptions object. - """ - rev = rev_options.arg_rev - # The arg_rev property's implementation for Git ensures that the - # rev return value is always non-None. - assert rev is not None - - sha, is_branch = cls.get_revision_sha(dest, rev) - - if sha is not None: - rev_options = rev_options.make_new(sha) - rev_options.branch_name = rev if is_branch else None - - return rev_options - - # Do not show a warning for the common case of something that has - # the form of a Git commit hash. - if not looks_like_hash(rev): - logger.warning( - "Did not find branch or tag '%s', assuming revision or ref.", - rev, - ) - - if not cls._should_fetch(dest, rev): - return rev_options - - # fetch the requested revision - cls.run_command( - make_command("fetch", "-q", url, rev_options.to_args()), - cwd=dest, - ) - # Change the revision to the SHA of the ref we fetched - sha = cls.get_revision(dest, rev="FETCH_HEAD") - rev_options = rev_options.make_new(sha) - - return rev_options - - @classmethod - def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool: - """ - Return whether the current commit hash equals the given name. - - Args: - dest: the repository directory. - name: a string name. - """ - if not name: - # Then avoid an unnecessary subprocess call. - return False - - return cls.get_revision(dest) == name - - def fetch_new( - self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int - ) -> None: - rev_display = rev_options.to_display() - logger.info("Cloning %s%s to %s", url, rev_display, display_path(dest)) - if verbosity <= 0: - flags: Tuple[str, ...] = ("--quiet",) - elif verbosity == 1: - flags = () - else: - flags = ("--verbose", "--progress") - if self.get_git_version() >= (2, 17): - # Git added support for partial clone in 2.17 - # https://git-scm.com/docs/partial-clone - # Speeds up cloning by functioning without a complete copy of repository - self.run_command( - make_command( - "clone", - "--filter=blob:none", - *flags, - url, - dest, - ) - ) - else: - self.run_command(make_command("clone", *flags, url, dest)) - - if rev_options.rev: - # Then a specific revision was requested. - rev_options = self.resolve_revision(dest, url, rev_options) - branch_name = getattr(rev_options, "branch_name", None) - logger.debug("Rev options %s, branch_name %s", rev_options, branch_name) - if branch_name is None: - # Only do a checkout if the current commit id doesn't match - # the requested revision. - if not self.is_commit_id_equal(dest, rev_options.rev): - cmd_args = make_command( - "checkout", - "-q", - rev_options.to_args(), - ) - self.run_command(cmd_args, cwd=dest) - elif self.get_current_branch(dest) != branch_name: - # Then a specific branch was requested, and that branch - # is not yet checked out. - track_branch = f"origin/{branch_name}" - cmd_args = [ - "checkout", - "-b", - branch_name, - "--track", - track_branch, - ] - self.run_command(cmd_args, cwd=dest) - else: - sha = self.get_revision(dest) - rev_options = rev_options.make_new(sha) - - logger.info("Resolved %s to commit %s", url, rev_options.rev) - - #: repo may contain submodules - self.update_submodules(dest) - - def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: - self.run_command( - make_command("config", "remote.origin.url", url), - cwd=dest, - ) - cmd_args = make_command("checkout", "-q", rev_options.to_args()) - self.run_command(cmd_args, cwd=dest) - - self.update_submodules(dest) - - def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: - # First fetch changes from the default remote - if self.get_git_version() >= (1, 9): - # fetch tags in addition to everything else - self.run_command(["fetch", "-q", "--tags"], cwd=dest) - else: - self.run_command(["fetch", "-q"], cwd=dest) - # Then reset to wanted revision (maybe even origin/master) - rev_options = self.resolve_revision(dest, url, rev_options) - cmd_args = make_command("reset", "--hard", "-q", rev_options.to_args()) - self.run_command(cmd_args, cwd=dest) - #: update submodules - self.update_submodules(dest) - - @classmethod - def get_remote_url(cls, location: str) -> str: - """ - Return URL of the first remote encountered. - - Raises RemoteNotFoundError if the repository does not have a remote - url configured. - """ - # We need to pass 1 for extra_ok_returncodes since the command - # exits with return code 1 if there are no matching lines. - stdout = cls.run_command( - ["config", "--get-regexp", r"remote\..*\.url"], - extra_ok_returncodes=(1,), - show_stdout=False, - stdout_only=True, - cwd=location, - ) - remotes = stdout.splitlines() - try: - found_remote = remotes[0] - except IndexError: - raise RemoteNotFoundError - - for remote in remotes: - if remote.startswith("remote.origin.url "): - found_remote = remote - break - url = found_remote.split(" ")[1] - return cls._git_remote_to_pip_url(url.strip()) - - @staticmethod - def _git_remote_to_pip_url(url: str) -> str: - """ - Convert a remote url from what git uses to what pip accepts. - - There are 3 legal forms **url** may take: - - 1. A fully qualified url: ssh://git@example.com/foo/bar.git - 2. A local project.git folder: /path/to/bare/repository.git - 3. SCP shorthand for form 1: git@example.com:foo/bar.git - - Form 1 is output as-is. Form 2 must be converted to URI and form 3 must - be converted to form 1. - - See the corresponding test test_git_remote_url_to_pip() for examples of - sample inputs/outputs. - """ - if re.match(r"\w+://", url): - # This is already valid. Pass it though as-is. - return url - if os.path.exists(url): - # A local bare remote (git clone --mirror). - # Needs a file:// prefix. - return pathlib.PurePath(url).as_uri() - scp_match = SCP_REGEX.match(url) - if scp_match: - # Add an ssh:// prefix and replace the ':' with a '/'. - return scp_match.expand(r"ssh://\1\2/\3") - # Otherwise, bail out. - raise RemoteNotValidError(url) - - @classmethod - def has_commit(cls, location: str, rev: str) -> bool: - """ - Check if rev is a commit that is available in the local repository. - """ - try: - cls.run_command( - ["rev-parse", "-q", "--verify", "sha^" + rev], - cwd=location, - log_failed_cmd=False, - ) - except InstallationError: - return False - else: - return True - - @classmethod - def get_revision(cls, location: str, rev: Optional[str] = None) -> str: - if rev is None: - rev = "HEAD" - current_rev = cls.run_command( - ["rev-parse", rev], - show_stdout=False, - stdout_only=True, - cwd=location, - ) - return current_rev.strip() - - @classmethod - def get_subdirectory(cls, location: str) -> Optional[str]: - """ - Return the path to Python project root, relative to the repo root. - Return None if the project root is in the repo root. - """ - # find the repo root - git_dir = cls.run_command( - ["rev-parse", "--git-dir"], - show_stdout=False, - stdout_only=True, - cwd=location, - ).strip() - if not os.path.isabs(git_dir): - git_dir = os.path.join(location, git_dir) - repo_root = os.path.abspath(os.path.join(git_dir, "..")) - return find_path_to_project_root_from_repo_root(location, repo_root) - - @classmethod - def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]: - """ - Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. - That's required because although they use SSH they sometimes don't - work with a ssh:// scheme (e.g. GitHub). But we need a scheme for - parsing. Hence we remove it again afterwards and return it as a stub. - """ - # Works around an apparent Git bug - # (see https://article.gmane.org/gmane.comp.version-control.git/146500) - scheme, netloc, path, query, fragment = urlsplit(url) - if scheme.endswith("file"): - initial_slashes = path[: -len(path.lstrip("/"))] - newpath = initial_slashes + urllib.request.url2pathname(path).replace( - "\\", "/" - ).lstrip("/") - after_plus = scheme.find("+") + 1 - url = scheme[:after_plus] + urlunsplit( - (scheme[after_plus:], netloc, newpath, query, fragment), - ) - - if "://" not in url: - assert "file:" not in url - url = url.replace("git+", "git+ssh://") - url, rev, user_pass = super().get_url_rev_and_auth(url) - url = url.replace("ssh://", "") - else: - url, rev, user_pass = super().get_url_rev_and_auth(url) - - return url, rev, user_pass - - @classmethod - def update_submodules(cls, location: str) -> None: - if not os.path.exists(os.path.join(location, ".gitmodules")): - return - cls.run_command( - ["submodule", "update", "--init", "--recursive", "-q"], - cwd=location, - ) - - @classmethod - def get_repository_root(cls, location: str) -> Optional[str]: - loc = super().get_repository_root(location) - if loc: - return loc - try: - r = cls.run_command( - ["rev-parse", "--show-toplevel"], - cwd=location, - show_stdout=False, - stdout_only=True, - on_returncode="raise", - log_failed_cmd=False, - ) - except BadCommand: - logger.debug( - "could not determine if %s is under git control " - "because git is not available", - location, - ) - return None - except InstallationError: - return None - return os.path.normpath(r.rstrip("\r\n")) - - @staticmethod - def should_add_vcs_url_prefix(repo_url: str) -> bool: - """In either https or ssh form, requirements must be prefixed with git+.""" - return True - - -vcs.register(Git) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/rich/default_styles.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/rich/default_styles.py deleted file mode 100644 index 91ab232d31d561d7f2fcb92e83af1bbd4a4e89f7..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/rich/default_styles.py +++ /dev/null @@ -1,183 +0,0 @@ -from typing import Dict - -from .style import Style - - -DEFAULT_STYLES: Dict[str, Style] = { - "none": Style.null(), - "reset": Style( - color="default", - bgcolor="default", - dim=False, - bold=False, - italic=False, - underline=False, - blink=False, - blink2=False, - reverse=False, - conceal=False, - strike=False, - ), - "dim": Style(dim=True), - "bright": Style(dim=False), - "bold": Style(bold=True), - "strong": Style(bold=True), - "code": Style(reverse=True, bold=True), - "italic": Style(italic=True), - "emphasize": Style(italic=True), - "underline": Style(underline=True), - "blink": Style(blink=True), - "blink2": Style(blink2=True), - "reverse": Style(reverse=True), - "strike": Style(strike=True), - "black": Style(color="black"), - "red": Style(color="red"), - "green": Style(color="green"), - "yellow": Style(color="yellow"), - "magenta": Style(color="magenta"), - "cyan": Style(color="cyan"), - "white": Style(color="white"), - "inspect.attr": Style(color="yellow", italic=True), - "inspect.attr.dunder": Style(color="yellow", italic=True, dim=True), - "inspect.callable": Style(bold=True, color="red"), - "inspect.def": Style(italic=True, color="bright_cyan"), - "inspect.error": Style(bold=True, color="red"), - "inspect.equals": Style(), - "inspect.help": Style(color="cyan"), - "inspect.doc": Style(dim=True), - "inspect.value.border": Style(color="green"), - "live.ellipsis": Style(bold=True, color="red"), - "layout.tree.row": Style(dim=False, color="red"), - "layout.tree.column": Style(dim=False, color="blue"), - "logging.keyword": Style(bold=True, color="yellow"), - "logging.level.notset": Style(dim=True), - "logging.level.debug": Style(color="green"), - "logging.level.info": Style(color="blue"), - "logging.level.warning": Style(color="red"), - "logging.level.error": Style(color="red", bold=True), - "logging.level.critical": Style(color="red", bold=True, reverse=True), - "log.level": Style.null(), - "log.time": Style(color="cyan", dim=True), - "log.message": Style.null(), - "log.path": Style(dim=True), - "repr.ellipsis": Style(color="yellow"), - "repr.indent": Style(color="green", dim=True), - "repr.error": Style(color="red", bold=True), - "repr.str": Style(color="green", italic=False, bold=False), - "repr.brace": Style(bold=True), - "repr.comma": Style(bold=True), - "repr.ipv4": Style(bold=True, color="bright_green"), - "repr.ipv6": Style(bold=True, color="bright_green"), - "repr.eui48": Style(bold=True, color="bright_green"), - "repr.eui64": Style(bold=True, color="bright_green"), - "repr.tag_start": Style(bold=True), - "repr.tag_name": Style(color="bright_magenta", bold=True), - "repr.tag_contents": Style(color="default"), - "repr.tag_end": Style(bold=True), - "repr.attrib_name": Style(color="yellow", italic=False), - "repr.attrib_equal": Style(bold=True), - "repr.attrib_value": Style(color="magenta", italic=False), - "repr.number": Style(color="cyan", bold=True, italic=False), - "repr.bool_true": Style(color="bright_green", italic=True), - "repr.bool_false": Style(color="bright_red", italic=True), - "repr.none": Style(color="magenta", italic=True), - "repr.url": Style(underline=True, color="bright_blue", italic=False, bold=False), - "repr.uuid": Style(color="bright_yellow", bold=False), - "repr.call": Style(color="magenta", bold=True), - "repr.path": Style(color="magenta"), - "repr.filename": Style(color="bright_magenta"), - "rule.line": Style(color="bright_green"), - "rule.text": Style.null(), - "json.brace": Style(bold=True), - "json.bool_true": Style(color="bright_green", italic=True), - "json.bool_false": Style(color="bright_red", italic=True), - "json.null": Style(color="magenta", italic=True), - "json.number": Style(color="cyan", bold=True, italic=False), - "json.str": Style(color="green", italic=False, bold=False), - "json.key": Style(color="blue", bold=True), - "prompt": Style.null(), - "prompt.choices": Style(color="magenta", bold=True), - "prompt.default": Style(color="cyan", bold=True), - "prompt.invalid": Style(color="red"), - "prompt.invalid.choice": Style(color="red"), - "pretty": Style.null(), - "scope.border": Style(color="blue"), - "scope.key": Style(color="yellow", italic=True), - "scope.key.special": Style(color="yellow", italic=True, dim=True), - "scope.equals": Style(color="red"), - "table.header": Style(bold=True), - "table.footer": Style(bold=True), - "table.cell": Style.null(), - "table.title": Style(italic=True), - "table.caption": Style(italic=True, dim=True), - "traceback.error": Style(color="red", italic=True), - "traceback.border.syntax_error": Style(color="bright_red"), - "traceback.border": Style(color="red"), - "traceback.text": Style.null(), - "traceback.title": Style(color="red", bold=True), - "traceback.exc_type": Style(color="bright_red", bold=True), - "traceback.exc_value": Style.null(), - "traceback.offset": Style(color="bright_red", bold=True), - "bar.back": Style(color="grey23"), - "bar.complete": Style(color="rgb(249,38,114)"), - "bar.finished": Style(color="rgb(114,156,31)"), - "bar.pulse": Style(color="rgb(249,38,114)"), - "progress.description": Style.null(), - "progress.filesize": Style(color="green"), - "progress.filesize.total": Style(color="green"), - "progress.download": Style(color="green"), - "progress.elapsed": Style(color="yellow"), - "progress.percentage": Style(color="magenta"), - "progress.remaining": Style(color="cyan"), - "progress.data.speed": Style(color="red"), - "progress.spinner": Style(color="green"), - "status.spinner": Style(color="green"), - "tree": Style(), - "tree.line": Style(), - "markdown.paragraph": Style(), - "markdown.text": Style(), - "markdown.emph": Style(italic=True), - "markdown.strong": Style(bold=True), - "markdown.code": Style(bgcolor="black", color="bright_white"), - "markdown.code_block": Style(dim=True, color="cyan", bgcolor="black"), - "markdown.block_quote": Style(color="magenta"), - "markdown.list": Style(color="cyan"), - "markdown.item": Style(), - "markdown.item.bullet": Style(color="yellow", bold=True), - "markdown.item.number": Style(color="yellow", bold=True), - "markdown.hr": Style(color="yellow"), - "markdown.h1.border": Style(), - "markdown.h1": Style(bold=True), - "markdown.h2": Style(bold=True, underline=True), - "markdown.h3": Style(bold=True), - "markdown.h4": Style(bold=True, dim=True), - "markdown.h5": Style(underline=True), - "markdown.h6": Style(italic=True), - "markdown.h7": Style(italic=True, dim=True), - "markdown.link": Style(color="bright_blue"), - "markdown.link_url": Style(color="blue"), -} - - -if __name__ == "__main__": # pragma: no cover - import argparse - import io - - from pip._vendor.rich.console import Console - from pip._vendor.rich.table import Table - from pip._vendor.rich.text import Text - - parser = argparse.ArgumentParser() - parser.add_argument("--html", action="store_true", help="Export as HTML table") - args = parser.parse_args() - html: bool = args.html - console = Console(record=True, width=70, file=io.StringIO()) if html else Console() - - table = Table("Name", "Styling") - - for style_name, style in DEFAULT_STYLES.items(): - table.add_row(Text(style_name, style=style), str(style)) - - console.print(table) - if html: - print(console.export_html(inline_styles=True)) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/_ratio.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/_ratio.py deleted file mode 100644 index f7dbe927053e4ebef04c0d675468ee783845bc4f..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/_ratio.py +++ /dev/null @@ -1,160 +0,0 @@ -import sys -from fractions import Fraction -from math import ceil -from typing import cast, List, Optional, Sequence - -if sys.version_info >= (3, 8): - from typing import Protocol -else: - from typing_extensions import Protocol # pragma: no cover - - -class Edge(Protocol): - """Any object that defines an edge (such as Layout).""" - - size: Optional[int] = None - ratio: int = 1 - minimum_size: int = 1 - - -def ratio_resolve(total: int, edges: Sequence[Edge]) -> List[int]: - """Divide total space to satisfy size, ratio, and minimum_size, constraints. - - The returned list of integers should add up to total in most cases, unless it is - impossible to satisfy all the constraints. For instance, if there are two edges - with a minimum size of 20 each and `total` is 30 then the returned list will be - greater than total. In practice, this would mean that a Layout object would - clip the rows that would overflow the screen height. - - Args: - total (int): Total number of characters. - edges (List[Edge]): Edges within total space. - - Returns: - List[int]: Number of characters for each edge. - """ - # Size of edge or None for yet to be determined - sizes = [(edge.size or None) for edge in edges] - - _Fraction = Fraction - - # While any edges haven't been calculated - while None in sizes: - # Get flexible edges and index to map these back on to sizes list - flexible_edges = [ - (index, edge) - for index, (size, edge) in enumerate(zip(sizes, edges)) - if size is None - ] - # Remaining space in total - remaining = total - sum(size or 0 for size in sizes) - if remaining <= 0: - # No room for flexible edges - return [ - ((edge.minimum_size or 1) if size is None else size) - for size, edge in zip(sizes, edges) - ] - # Calculate number of characters in a ratio portion - portion = _Fraction( - remaining, sum((edge.ratio or 1) for _, edge in flexible_edges) - ) - - # If any edges will be less than their minimum, replace size with the minimum - for index, edge in flexible_edges: - if portion * edge.ratio <= edge.minimum_size: - sizes[index] = edge.minimum_size - # New fixed size will invalidate calculations, so we need to repeat the process - break - else: - # Distribute flexible space and compensate for rounding error - # Since edge sizes can only be integers we need to add the remainder - # to the following line - remainder = _Fraction(0) - for index, edge in flexible_edges: - size, remainder = divmod(portion * edge.ratio + remainder, 1) - sizes[index] = size - break - # Sizes now contains integers only - return cast(List[int], sizes) - - -def ratio_reduce( - total: int, ratios: List[int], maximums: List[int], values: List[int] -) -> List[int]: - """Divide an integer total in to parts based on ratios. - - Args: - total (int): The total to divide. - ratios (List[int]): A list of integer ratios. - maximums (List[int]): List of maximums values for each slot. - values (List[int]): List of values - - Returns: - List[int]: A list of integers guaranteed to sum to total. - """ - ratios = [ratio if _max else 0 for ratio, _max in zip(ratios, maximums)] - total_ratio = sum(ratios) - if not total_ratio: - return values[:] - total_remaining = total - result: List[int] = [] - append = result.append - for ratio, maximum, value in zip(ratios, maximums, values): - if ratio and total_ratio > 0: - distributed = min(maximum, round(ratio * total_remaining / total_ratio)) - append(value - distributed) - total_remaining -= distributed - total_ratio -= ratio - else: - append(value) - return result - - -def ratio_distribute( - total: int, ratios: List[int], minimums: Optional[List[int]] = None -) -> List[int]: - """Distribute an integer total in to parts based on ratios. - - Args: - total (int): The total to divide. - ratios (List[int]): A list of integer ratios. - minimums (List[int]): List of minimum values for each slot. - - Returns: - List[int]: A list of integers guaranteed to sum to total. - """ - if minimums: - ratios = [ratio if _min else 0 for ratio, _min in zip(ratios, minimums)] - total_ratio = sum(ratios) - assert total_ratio > 0, "Sum of ratios must be > 0" - - total_remaining = total - distributed_total: List[int] = [] - append = distributed_total.append - if minimums is None: - _minimums = [0] * len(ratios) - else: - _minimums = minimums - for ratio, minimum in zip(ratios, _minimums): - if total_ratio > 0: - distributed = max(minimum, ceil(ratio * total_remaining / total_ratio)) - else: - distributed = total_remaining - append(distributed) - total_ratio -= ratio - total_remaining -= distributed - return distributed_total - - -if __name__ == "__main__": - from dataclasses import dataclass - - @dataclass - class E: - - size: Optional[int] = None - ratio: int = 1 - minimum_size: int = 1 - - resolved = ratio_resolve(110, [E(None, 1, 1), E(None, 1, 1), E(None, 1, 1)]) - print(sum(resolved)) diff --git a/spaces/pseudolab/moogeulmoogeul/README.md b/spaces/pseudolab/moogeulmoogeul/README.md deleted file mode 100644 index de5bdc68f27edf3dd1a820231a4ef1875100fefd..0000000000000000000000000000000000000000 --- a/spaces/pseudolab/moogeulmoogeul/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Moogeulmoogeul -emoji: 🔥 -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 4.1.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/pycoming/bingo/src/components/markdown.tsx b/spaces/pycoming/bingo/src/components/markdown.tsx deleted file mode 100644 index d4491467a1f14d1d72e535caac9c40636054e5df..0000000000000000000000000000000000000000 --- a/spaces/pycoming/bingo/src/components/markdown.tsx +++ /dev/null @@ -1,9 +0,0 @@ -import { FC, memo } from 'react' -import ReactMarkdown, { Options } from 'react-markdown' - -export const MemoizedReactMarkdown: FC = memo( - ReactMarkdown, - (prevProps, nextProps) => - prevProps.children === nextProps.children && - prevProps.className === nextProps.className -) diff --git a/spaces/pyodide-demo/self-hosted/webencodings.js b/spaces/pyodide-demo/self-hosted/webencodings.js deleted file mode 100644 index ebe2e52dbf255eff84915e64de94f2bd0bf0e8f1..0000000000000000000000000000000000000000 --- a/spaces/pyodide-demo/self-hosted/webencodings.js +++ /dev/null @@ -1 +0,0 @@ -var Module=typeof globalThis.__pyodide_module!=="undefined"?globalThis.__pyodide_module:{};if(!Module.expectedDataFileDownloads){Module.expectedDataFileDownloads=0}Module.expectedDataFileDownloads++;(function(){var loadPackage=function(metadata){var PACKAGE_PATH="";if(typeof window==="object"){PACKAGE_PATH=window["encodeURIComponent"](window.location.pathname.toString().substring(0,window.location.pathname.toString().lastIndexOf("/"))+"/")}else if(typeof process==="undefined"&&typeof location!=="undefined"){PACKAGE_PATH=encodeURIComponent(location.pathname.toString().substring(0,location.pathname.toString().lastIndexOf("/"))+"/")}var PACKAGE_NAME="webencodings.data";var REMOTE_PACKAGE_BASE="webencodings.data";if(typeof Module["locateFilePackage"]==="function"&&!Module["locateFile"]){Module["locateFile"]=Module["locateFilePackage"];err("warning: you defined Module.locateFilePackage, that has been renamed to Module.locateFile (using your locateFilePackage for now)")}var REMOTE_PACKAGE_NAME=Module["locateFile"]?Module["locateFile"](REMOTE_PACKAGE_BASE,""):REMOTE_PACKAGE_BASE;var REMOTE_PACKAGE_SIZE=metadata["remote_package_size"];var PACKAGE_UUID=metadata["package_uuid"];function fetchRemotePackage(packageName,packageSize,callback,errback){if(typeof process==="object"){require("fs").readFile(packageName,(function(err,contents){if(err){errback(err)}else{callback(contents.buffer)}}));return}var xhr=new XMLHttpRequest;xhr.open("GET",packageName,true);xhr.responseType="arraybuffer";xhr.onprogress=function(event){var url=packageName;var size=packageSize;if(event.total)size=event.total;if(event.loaded){if(!xhr.addedTotal){xhr.addedTotal=true;if(!Module.dataFileDownloads)Module.dataFileDownloads={};Module.dataFileDownloads[url]={loaded:event.loaded,total:size}}else{Module.dataFileDownloads[url].loaded=event.loaded}var total=0;var loaded=0;var num=0;for(var download in Module.dataFileDownloads){var data=Module.dataFileDownloads[download];total+=data.total;loaded+=data.loaded;num++}total=Math.ceil(total*Module.expectedDataFileDownloads/num);if(Module["setStatus"])Module["setStatus"]("Downloading data... ("+loaded+"/"+total+")")}else if(!Module.dataFileDownloads){if(Module["setStatus"])Module["setStatus"]("Downloading data...")}};xhr.onerror=function(event){throw new Error("NetworkError for: "+packageName)};xhr.onload=function(event){if(xhr.status==200||xhr.status==304||xhr.status==206||xhr.status==0&&xhr.response){var packageData=xhr.response;callback(packageData)}else{throw new Error(xhr.statusText+" : "+xhr.responseURL)}};xhr.send(null)}function handleError(error){console.error("package error:",error)}var fetchedCallback=null;var fetched=Module["getPreloadedPackage"]?Module["getPreloadedPackage"](REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE):null;if(!fetched)fetchRemotePackage(REMOTE_PACKAGE_NAME,REMOTE_PACKAGE_SIZE,(function(data){if(fetchedCallback){fetchedCallback(data);fetchedCallback=null}else{fetched=data}}),handleError);function runWithFS(){function assert(check,msg){if(!check)throw msg+(new Error).stack}Module["FS_createPath"]("/","lib",true,true);Module["FS_createPath"]("/lib","python3.9",true,true);Module["FS_createPath"]("/lib/python3.9","site-packages",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","webencodings",true,true);Module["FS_createPath"]("/lib/python3.9/site-packages","webencodings-0.5.1-py3.9.egg-info",true,true);function processPackageData(arrayBuffer){assert(arrayBuffer,"Loading data file failed.");assert(arrayBuffer instanceof ArrayBuffer,"bad input to processPackageData");var byteArray=new Uint8Array(arrayBuffer);var curr;var compressedData={data:null,cachedOffset:16204,cachedIndexes:[-1,-1],cachedChunks:[null,null],offsets:[0,1447,2695,3705,4724,5905,7187,8221,9021,10049,10779,11378,12267,12926,13583,14286,15166],sizes:[1447,1248,1010,1019,1181,1282,1034,800,1028,730,599,889,659,657,703,880,1038],successes:[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]};compressedData["data"]=byteArray;assert(typeof Module.LZ4==="object","LZ4 not present - was your app build with -s LZ4=1 ?");Module.LZ4.loadPackage({metadata:metadata,compressedData:compressedData},true);Module["removeRunDependency"]("datafile_webencodings.data")}Module["addRunDependency"]("datafile_webencodings.data");if(!Module.preloadResults)Module.preloadResults={};Module.preloadResults[PACKAGE_NAME]={fromCache:false};if(fetched){processPackageData(fetched);fetched=null}else{fetchedCallback=processPackageData}}if(Module["calledRun"]){runWithFS()}else{if(!Module["preRun"])Module["preRun"]=[];Module["preRun"].push(runWithFS)}};loadPackage({files:[{filename:"/lib/python3.9/site-packages/webencodings/__init__.py",start:0,end:10579,audio:0},{filename:"/lib/python3.9/site-packages/webencodings/mklabels.py",start:10579,end:11884,audio:0},{filename:"/lib/python3.9/site-packages/webencodings/x_user_defined.py",start:11884,end:16191,audio:0},{filename:"/lib/python3.9/site-packages/webencodings/tests.py",start:16191,end:22754,audio:0},{filename:"/lib/python3.9/site-packages/webencodings/labels.py",start:22754,end:31733,audio:0},{filename:"/lib/python3.9/site-packages/webencodings-0.5.1-py3.9.egg-info/PKG-INFO",start:31733,end:33859,audio:0},{filename:"/lib/python3.9/site-packages/webencodings-0.5.1-py3.9.egg-info/SOURCES.txt",start:33859,end:34158,audio:0},{filename:"/lib/python3.9/site-packages/webencodings-0.5.1-py3.9.egg-info/top_level.txt",start:34158,end:34171,audio:0},{filename:"/lib/python3.9/site-packages/webencodings-0.5.1-py3.9.egg-info/dependency_links.txt",start:34171,end:34172,audio:0}],remote_package_size:20300,package_uuid:"d68b3bf4-4ec6-4e6a-9156-8d36e0364cbe"})})(); \ No newline at end of file diff --git a/spaces/pythainlp/pythainlp/pages/transliteration.py b/spaces/pythainlp/pythainlp/pages/transliteration.py deleted file mode 100644 index 079fcda3b2346775b1b06afc3629a518d2b42311..0000000000000000000000000000000000000000 --- a/spaces/pythainlp/pythainlp/pages/transliteration.py +++ /dev/null @@ -1,32 +0,0 @@ -import streamlit as st -import time -from pythainlp.transliterate import transliterate - -st.markdown(""" -# Translation 🎉 - -PyThaiNLP support transliterate text for NLP piplines. We have - -- thaig2p - (default) Thai Grapheme-to-Phoneme, output is IPA (require PyTorch) -- tltk_g2p - Thai Grapheme-to-Phoneme from TLTK _., -- tltk_ipa - tltk, output is International Phonetic Alphabet (IPA) - -for this demo page. You can custom dictionary for some word tokenizer engine. (Python only) -""") - -with st.form("my_form"): - st.write("Input word") - text = st.text_input("text","แมว") - engine=st.selectbox('Select transliterate', ['thaig2p', 'tltk_g2p', 'tltk_ipa'], key=1,index=0) - - # Every form must have a submit button. - submitted = st.form_submit_button("Submit") - if submitted: - st.subheader("Words: ") - start = time.time() - st.write(transliterate(str(text), engine=str(engine))) - end = time.time() - st.write() - st.write("Running times: "+str(end - start)) - -st.write("See the documentation at [transliterate | PyThaiNLP](https://pythainlp.github.io/docs/3.0/api/transliterate.html).") diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Crack High Quality Razor 1911 Gta 4 Blogspot.md b/spaces/quidiaMuxgu/Expedit-SAM/Crack High Quality Razor 1911 Gta 4 Blogspot.md deleted file mode 100644 index bf4160e81f3317566ff69816016b0e52c649d079..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Crack High Quality Razor 1911 Gta 4 Blogspot.md +++ /dev/null @@ -1,6 +0,0 @@ -

                  crack razor 1911 gta 4 blogspot


                  Download File 🔗 https://geags.com/2uCsbE



                  -
                  - d5da3c52bf
                  -
                  -
                  -

                  diff --git a/spaces/radames/transformers-js-sveltekit-static-example-app/_app/immutable/chunks/index.7e6319f2.js b/spaces/radames/transformers-js-sveltekit-static-example-app/_app/immutable/chunks/index.7e6319f2.js deleted file mode 100644 index 51bc921c7931798cd3e6765e25a5f8f4709bb4b2..0000000000000000000000000000000000000000 --- a/spaces/radames/transformers-js-sveltekit-static-example-app/_app/immutable/chunks/index.7e6319f2.js +++ /dev/null @@ -1 +0,0 @@ -var E=Object.defineProperty;var C=(e,t,n)=>t in e?E(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n;var p=(e,t,n)=>(C(e,typeof t!="symbol"?t+"":t,n),n);import{r as h,n as y,f as w,h as j,i as S,j as B,k as b,l as L,m as I,p as N,q as M,v as P,w as T}from"./scheduler.e108d1fd.js";let $=!1;function q(){$=!0}function H(){$=!1}function O(e,t,n,i){for(;e>1);n(l)<=i?e=l+1:t=l}return e}function z(e){if(e.hydrate_init)return;e.hydrate_init=!0;let t=e.childNodes;if(e.nodeName==="HEAD"){const r=[];for(let a=0;a0&&t[n[l]].claim_order<=a?l+1:O(1,l,_=>t[n[_]].claim_order,a))-1;i[r]=n[o]+1;const u=o+1;n[u]=r,l=Math.max(u,l)}const c=[],s=[];let f=t.length-1;for(let r=n[l]+1;r!=0;r=i[r-1]){for(c.push(t[r-1]);f>=r;f--)s.push(t[f]);f--}for(;f>=0;f--)s.push(t[f]);c.reverse(),s.sort((r,a)=>r.claim_order-a.claim_order);for(let r=0,a=0;r=c[a].claim_order;)a++;const o=ae.removeEventListener(t,n,i)}function re(e,t,n){n==null?e.removeAttribute(t):e.getAttribute(t)!==n&&e.setAttribute(t,n)}function ae(e){return e.dataset.svelteH}function W(e){return Array.from(e.childNodes)}function F(e){e.claim_info===void 0&&(e.claim_info={last_index:0,total_claimed:0})}function A(e,t,n,i,l=!1){F(e);const c=(()=>{for(let s=e.claim_info.last_index;s=0;s--){const f=e[s];if(t(f)){const r=n(f);return r===void 0?e.splice(s,1):e[s]=r,l?r===void 0&&e.claim_info.last_index--:e.claim_info.last_index=s,f}}return i()})();return c.claim_order=e.claim_info.total_claimed,e.claim_info.total_claimed+=1,c}function G(e,t,n,i){return A(e,l=>l.nodeName===t,l=>{const c=[];for(let s=0;sl.removeAttribute(s))},()=>i(t))}function se(e,t,n){return G(e,t,n,V)}function J(e,t){return A(e,n=>n.nodeType===3,n=>{const i=""+t;if(n.data.startsWith(i)){if(n.data.length!==i.length)return n.splitText(i.length)}else n.data=i},()=>x(t),!0)}function le(e){return J(e," ")}function fe(e,t){t=""+t,e.data!==t&&(e.data=t)}function ce(e,t,n,i){n==null?e.style.removeProperty(t):e.style.setProperty(t,n,i?"important":"")}function ue(e,t){return new e(t)}const m=new Set;let d;function oe(){d={r:0,c:[],p:d}}function de(){d.r||h(d.c),d=d.p}function K(e,t){e&&e.i&&(m.delete(e),e.i(t))}function _e(e,t,n,i){if(e&&e.o){if(m.has(e))return;m.add(e),d.c.push(()=>{m.delete(e),i&&(n&&e.d(1),i())}),e.o(t)}else i&&i()}function me(e){e&&e.c()}function he(e,t){e&&e.l(t)}function Q(e,t,n){const{fragment:i,after_update:l}=e.$$;i&&i.m(t,n),b(()=>{const c=e.$$.on_mount.map(M).filter(S);e.$$.on_destroy?e.$$.on_destroy.push(...c):h(c),e.$$.on_mount=[]}),l.forEach(b)}function U(e,t){const n=e.$$;n.fragment!==null&&(L(n.after_update),h(n.on_destroy),n.fragment&&n.fragment.d(t),n.on_destroy=n.fragment=null,n.ctx=[])}function X(e,t){e.$$.dirty[0]===-1&&(P.push(e),T(),e.$$.dirty.fill(0)),e.$$.dirty[t/31|0]|=1<{const v=g.length?g[0]:_;return a.ctx&&l(a.ctx[u],a.ctx[u]=v)&&(!a.skip_bound&&a.bound[u]&&a.bound[u](v),o&&X(e,u)),_}):[],a.update(),o=!0,h(a.before_update),a.fragment=i?i(a.ctx):!1,t.target){if(t.hydrate){q();const u=W(t.target);a.fragment&&a.fragment.l(u),u.forEach(R)}else a.fragment&&a.fragment.c();t.intro&&K(e.$$.fragment),Q(e,t.target,t.anchor),H(),j()}N(r)}class pe{constructor(){p(this,"$$");p(this,"$$set")}$destroy(){U(this,1),this.$destroy=y}$on(t,n){if(!S(n))return y;const i=this.$$.callbacks[t]||(this.$$.callbacks[t]=[]);return i.push(n),()=>{const l=i.indexOf(n);l!==-1&&i.splice(l,1)}}$set(t){this.$$set&&!B(t)&&(this.$$.skip_bound=!0,this.$$set(t),this.$$.skip_bound=!1)}}const Y="4";typeof window<"u"&&(window.__svelte||(window.__svelte={v:new Set})).v.add(Y);export{pe as S,ee as a,de as b,le as c,K as d,ne as e,R as f,V as g,se as h,$e as i,W as j,re as k,ce as l,x as m,J as n,fe as o,oe as p,ue as q,me as r,te as s,_e as t,he as u,Q as v,U as w,D as x,ae as y,ie as z}; diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Create amazing effects with re flex motion morph crack in After Effects.md b/spaces/raedeXanto/academic-chatgpt-beta/Create amazing effects with re flex motion morph crack in After Effects.md deleted file mode 100644 index 9e13ed3812e3f4295ef26cf79df83f7963457316..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Create amazing effects with re flex motion morph crack in After Effects.md +++ /dev/null @@ -1,109 +0,0 @@ - -
                  - Immersive VR gameplay with realistic weapon mechanics
                  - Adventure and shooter modes with different challenges
                  - Smooth transitions between time periods and locations | | H2: How to Download Blunt Force for PC | - Requirements: VR device, PC specs, Steam account
                  - Steps: Visit Steam store, purchase game, install game, launch game | | H2: How to Crack Blunt Force Serial Key | - Disclaimer: Cracking games is illegal and risky
                  - Sources: Websites that offer cracked games and serial keys
                  - Steps: Download crack file, extract file, copy file, paste file, run game | | H2: Tips and Tricks for Playing Blunt Force | - Explore both timelines and find clues and secrets
                  - Use different weapons and tactics for different enemies
                  - Adjust settings and preferences for optimal VR experience
                  - Enjoy the story and the graphics of the game | | H1: Conclusion | Summary: Blunt Force is a thrilling VR game that you can download and play on PC with a crack serial key | And here is the article with HTML formatting:

                  Blunt Force Download for PC [Crack Serial Key]

                  -

                  If you are looking for a thrilling VR game that will take you back to the time of World War 2, then you should check out Blunt Force. This game is developed by Monad Rock and it follows two parallel storylines before and during the war. You will experience the contrast between the peaceful pre-war era and the chaotic war zone, as well as the impact of your actions on both timelines. In this article, we will show you how to download Blunt Force for PC, how to crack its serial key, and some tips and tricks for playing the game.

                  -

                  Blunt Force download for pc [Crack Serial Key


                  DOWNLOAD 🗸 https://tinourl.com/2uKZ9p



                  -

                  Blunt Force Features

                  -

                  Blunt Force is a unique VR game that combines adventure and shooter elements in a seamless way. Here are some of the features that make this game stand out:

                  -
                    -
                  • Two parallel storylines before and during WW2. You will play as a British soldier who visits various locations in Europe before the war breaks out. You will also witness how these locations change during the war, as you fight against the Nazis. The two timelines are connected by smooth transitions that will surprise you and immerse you in the story.
                  • -
                  • Immersive VR gameplay with realistic weapon mechanics. You will use motion controllers to aim, shoot, reload, and throw grenades at your enemies. You will also interact with objects and solve puzzles in the adventure mode. The game supports Oculus Rift and HTC Vive devices.
                  • -
                  • Adventure and shooter modes with different challenges. In the adventure mode, you will explore the pre-war locations and look for clues and secrets that will help you in the war mode. In the shooter mode, you will face different types of enemies and scenarios that will test your skills and reflexes. You can choose between arcade mode or challenge mode depending on your preference.
                  • -
                  • Smooth transitions between time periods and locations. The game uses a clever mechanism to switch between the two timelines without loading screens or interruptions. For example, you might be drinking in a bar one moment, and then see how the bar transforms into ruins and your newspaper turns into a gun. The transitions are triggered by your actions or events in the story.
                  • -
                  -

                  How to Download Blunt Force for PC

                  -

                  To play Blunt Force on your PC, you will need a VR device such as Oculus Rift or HTC Vive, as well as a PC that meets the minimum or recommended specifications. You will also need a Steam account to purchase and install the game. Here are the steps to download Blunt Force for PC:

                  -
                    -
                  1. Visit Steam store. Go to https://store.steampowered.com/app/816070/Blunt_Force/ or search for Blunt Force on Steam.
                  2. -
                  3. Purchase game. Click on Add to Cart button and follow the instructions to complete your payment. You can also add the game to your wishlist if you want to buy it later.
                  4. -
                  5. Install game. After purchasing the game, go to your Library tab on Steam and find Blunt Force. Click on Install button and choose a location for your game files. Wait for the download and installation process to finish.
                  6. -
                  7. Launch game. Once the game is installed, click on Play button or double-click on Blunt Force icon on your desktop. Make sure your VR device is connected and ready to use.
                  8. -
                  -

                  How to Crack Blunt Force Serial Key

                  -

                  If you want to play Blunt Force without paying for it, you might be tempted to crack its serial key. However, we advise you not to do so, because cracking games is illegal and risky. You might face legal consequences or damage your PC with viruses or malware. You might also miss out on updates, patches, or online features of the game. If you still want to crack Blunt Force serial key, here are some steps you can follow at your own risk:

                  -
                    -
                  1. Download crack file. Search for websites that offer cracked games and serial keys such as Skidrow Games or CPY Games. Find Blunt Force crack file and download it from a reliable source. Be careful of fake or malicious links that might harm your PC.
                  2. -
                  3. Extract file. After downloading the crack file, use a program such as WinRAR or 7-Zip to extract its contents. You should see a folder named Crack or CODEX or something similar.
                  4. -
                  5. Copy file. Open the Crack folder and find a file named bluntforce.exe or something similar. This is the cracked executable file that will bypass the serial key verification. Copy this file by right-clicking on it and choosing Copy.
                  6. -
                  7. Paste file. Go to the location where you installed Blunt Force on your PC. Find a folder named bluntforce or something similar. This is where the original executable file of the game is located. Paste the cracked file by right-clicking on an empty space and choosing Paste. You might be asked to replace or overwrite the existing file. Choose Yes or OK.
                  8. -
                  9. Run game. After pasting the cracked file, double-click on it or use Steam to launch it as usual. You should be able to play Blunt Force without entering a serial key.
                  10. -
                  -

                  Tips and Tricks for Playing Blunt Force

                  -

                  To enjoy Blunt Force to its fullest potential, here are some tips and tricks that will help you play better:

                  -

                  How to download Blunt Force for pc with crack and serial key
                  -Blunt Force pc game free download full version cracked
                  -Blunt Force crack download pc torrent
                  -Blunt Force serial key generator for pc
                  -Download Blunt Force for pc highly compressed with crack
                  -Blunt Force pc game crack only download
                  -Blunt Force crack fix pc download
                  -Blunt Force activation key for pc free download
                  -Blunt Force pc game download skidrow crack
                  -Blunt Force reloaded crack download for pc
                  -Blunt Force codex crack download pc
                  -Blunt Force steam key for pc download
                  -Blunt Force license key for pc download
                  -Blunt Force full game download for pc with crack and serial key
                  -Blunt Force direct download link for pc with crack and serial key
                  -Blunt Force mega download for pc with crack and serial key
                  -Blunt Force google drive download for pc with crack and serial key
                  -Blunt Force mediafire download for pc with crack and serial key
                  -Blunt Force utorrent download for pc with crack and serial key
                  -Blunt Force rarbg download for pc with crack and serial key
                  -Blunt Force fitgirl repack download for pc with crack and serial key
                  -Blunt Force rg mechanics repack download for pc with crack and serial key
                  -Blunt Force ocean of games download for pc with crack and serial key
                  -Blunt Force igg games download for pc with crack and serial key
                  -Blunt Force cpy crack download for pc
                  -Blunt Force plaza crack download for pc
                  -Blunt Force hoodlum crack download for pc
                  -Blunt Force elamigos crack download for pc
                  -Blunt Force darksiders crack download for pc
                  -Blunt Force fling trainer download for pc with crack and serial key
                  -Blunt Force cheats codes download for pc with crack and serial key
                  -Blunt Force mods download for pc with crack and serial key
                  -Blunt Force patch download for pc with crack and serial key
                  -Blunt Force update download for pc with crack and serial key
                  -Blunt Force dlc download for pc with crack and serial key
                  -Download Blunt Force vr edition for pc with crack and serial key
                  -Download Blunt Force multiplayer mode for pc with crack and serial key
                  -Download Blunt Force co-op mode for pc with crack and serial key
                  -Download Blunt Force online mode for pc with crack and serial key
                  -Download Blunt Force offline mode for pc with crack and serial key
                  -Download Blunt Force demo version for pc with crack and serial key
                  -Download Blunt Force beta version for pc with crack and serial key
                  -Download Blunt Force alpha version for pc with crack and serial key
                  -Download Blunt Force early access version for pc with crack and serial key
                  -Download Blunt Force pre-order version for pc with crack and serial key
                  -Download Blunt Force deluxe edition for pc with crack and serial key
                  -Download Blunt Force ultimate edition for pc with crack and serial key
                  -Download Blunt Force collector's edition for pc with crack and serial key
                  -Download Blunt Force gold edition for pc with crack and serial key

                  -
                    -
                  • Explore both timelines and find clues and secrets. Don't just rush through the adventure mode or shoot everything in sight in the shooter mode. Take your time to explore both timelines and look for clues and secrets that will enrich your experience. You might find hidden items, documents, codes, or Easter eggs that will reveal more about the story or unlock new features.
                  • -
                  • Use different weapons and tactics for different enemies. Don't rely on one weapon or strategy for all situations. Experiment with different weapons and tactics for different enemies. For example, use stealth or sniping for long-range targets, use machine guns or grenades for close-range targets, use cover or movement for avoiding fire, etc.
                  • -
                  • Adjust settings and preferences for optimal VR experience. Make sure you adjust your settings and preferences according to your VR device, PC specs, comfort level, etc. For example, you can change your graphics quality, sound volume, controller sensitivity, teleportation speed, etc. You can also enable or disable certain features such as subtitles, hints, blood effects, etc.
                  • -
                  • Enjoy the story and the graphics of the game. Don't forget to appreciate the story and the graphics of Blunt Force. The game has a rich history and a thrilling conclusion that will keep you hooked until the end. The game also has stunning graphics that will make you feel like you are really there in both timelines. Pay attention to the details and immerse yourself in the world of Blunt Force.
                  • -
                  -

                  Conclusion

                  -

                  In conclusion, Blunt Force is a thrilling VR game that follows two parallel storylines before

                  You can download and play Blunt Force on PC with a crack serial key, but is it worth it? Let's see what some of the reviews say about this game.

                  -

                  Blunt Force Reviews

                  -

                  Blunt Force is not yet released, but it has already generated some buzz and expectations among VR fans and WW2 enthusiasts. Here are some of the reviews from different sources:

                  -
                    -
                  • Gamepressure.com rated Blunt Force 7.8 out of 10 based on 12 votes. They praised the game for its innovative concept of two parallel storylines, its immersive VR gameplay, and its smooth transitions between time periods and locations. They also compared it to other VR games such as Medal of Honor: Above and Beyond, Sniper Elite VR, and ARKTIKA.1.
                  • -
                  • Gameloop.com gave Blunt Force a positive rating based on 128 ratings. They highlighted the game's rich history and enhanced gameplay, as well as its adventure and shooter modes with different challenges. They also provided a guide on how to download Blunt Force for PC with GameLoop emulator.
                  • -
                  • Steam has not yet released Blunt Force, but it has already attracted some attention from curators and users. The game has 11 curators who have reviewed it positively, and over 1,000 users who have added it to their wishlist. The game's official website also features a trailer and some screenshots that showcase its graphics and features.
                  • -
                  • IMDb has one user review for Blunt Force Trauma, a 2015 movie that has nothing to do with Blunt Force, but has a similar name. The reviewer gave the movie a 6 out of 10 rating and said that it was poorly acted, looked cheap, and lacked engaging characters. He also said that the movie's central premise was so good that it just about carried the film.
                  • -
                  -

                  Conclusion

                  -

                  In conclusion, Blunt Force is a thrilling VR game that follows two parallel storylines before and during WW2. You can download and play it on PC with a crack serial key, but you might face some risks and drawbacks. You might also want to wait for the official release and support the developers instead. Either way, you can expect a unique and immersive experience that will make you feel like you are really there in both timelines. Here are some FAQs that might help you decide:

                  -
                    -
                  1. When will Blunt Force be released? According to Steam, Blunt Force will be released in 2023. However, this date might change depending on the development progress and other factors.
                  2. -
                  3. How much will Blunt Force cost? The price of Blunt Force has not been announced yet, but you can expect it to be similar to other VR games in the market. For comparison, Medal of Honor: Above and Beyond costs $59.99 on Steam.
                  4. -
                  5. What are the system requirements for Blunt Force? According to Gamepressure.com, the minimum system requirements for Blunt Force are: Intel Core i5-4590 3.3 GHz, 4 GB RAM, graphic card 3 GB GeForce GTX 1060 or better, 4 GB HDD, Windows 7 64-bit. The recommended system requirements are: Intel Core i5-6400 2.7 GHz, 4 GB RAM, graphic card 8 GB GeForce GTX 1070 or better, 4 GB HDD, Windows 10 64-bit.
                  6. -
                  7. Is Blunt Force based on a true story? No, Blunt Force is not based on a true story, but it is inspired by real events and locations from WW2. The game will feature historical characters and scenarios that will make you feel like you are part of history.
                  8. -
                  9. Is Blunt Force suitable for children? No, Blunt Force is not suitable for children. The game has a lot of violence, blood, gore, and mature themes that are not appropriate for young audiences. The game also requires a VR device that might cause motion sickness or discomfort for some users.
                  10. -
                  -

                  I hope you enjoyed this article and learned something new about Blunt Force. If you have any questions or comments, feel free to leave them below. Thank you for reading!

                  -

                  0a6ba089eb
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Discover the Benefits of ViSoft Premiums Real Time Visualization and VR Technology.md b/spaces/raedeXanto/academic-chatgpt-beta/Discover the Benefits of ViSoft Premiums Real Time Visualization and VR Technology.md deleted file mode 100644 index d71258270e6944c834b36a60417395b231e61511..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Discover the Benefits of ViSoft Premiums Real Time Visualization and VR Technology.md +++ /dev/null @@ -1,98 +0,0 @@ -
                  -

                  What is ViSoft Premium?

                  -

                  ViSoft Premium is a professional planning and customer consulting software that depicts every project precisely and realistically. It is specially designed for tiles and sanitary ware, and it contains the world's largest tile and sanitary database with over 500 manufacturers and 400,000 products. ViSoft Premium's design interface is clearly structured and can be understood intuitively. It is easy to get started, as no CAD skills are required. With ViSoft Premium, you can create stunning 3D bathroom projects in minutes, using intelligent planning assistants, automatic functions, snap function, and real time visualization. You can also impress your customers with VR glasses, photo tuning, online sharing, and mobile apps. ViSoft Premium is the ultimate software for planning and customer consultation in the tile and sanitary industry.

                  -

                  Why use ViSoft Premium?

                  -

                  ViSoft Premium offers many benefits for planning and customer consultation. Here are some of them:

                  -

                  ViSoft Premium


                  Download Zip ✵✵✵ https://tinourl.com/2uL1iS



                  -
                    -
                  • It saves you time and effort. You can design a bathroom in just 5 minutes, using quick functions, drag and drop, automatic layouts, product variants, etc. You don't need to worry about technical details or calculations, as ViSoft Premium does them for you.
                  • -
                  • It supports your creativity and inspiration. You can choose from a wide range of products, colors, textures, materials, lighting effects, etc. You can also customize your design with your own photos, logos, texts, etc.
                  • -
                  • It enhances your presentation and communication. You can show your project in different views, angles, perspectives, etc. You can also use VR glasses to immerse your customers in a realistic 3D environment. You can edit and enhance your photos with Photo Tuning module. You can share your project online with VISOFT LIVE module. You can use mobile apps to plan and present on the go.
                  • -
                  • It increases your sales and customer satisfaction. You can showcase your expertise and professionalism with ViSoft Premium. You can also involve your customers in the planning process and let them see their dream bathroom come true.
                  • -
                  -

                  How to get started with ViSoft Premium?

                  -

                  To get started with ViSoft Premium, you need to follow these steps:

                  -
                    -
                  1. Download the demo version from the official website. You can also request a free trial license or order a full version.
                  2. -
                  3. Install the software on your PC. You need a Windows operating system (Windows 10 recommended) and a graphics card that supports OpenGL 4.5 or higher.
                  4. -
                  5. Activate the software with your license key. You will receive an email with your license key after you order or request a trial.
                  6. -
                  7. Update the software regularly to get the latest features and improvements.
                  8. -
                  -

                  How to design a bathroom with ViSoft Premium?

                  -

                  To design a bathroom with ViSoft Premium, you need to use some basic functions and tools that are available in the software. Here are some of them:

                  -

                  How to select products and rooms?

                  -

                  To select products and rooms for your project, you can use the drag and drop function. You can access the product database from the left side panel or from the top menu bar. You can browse by categories, manufacturers, favorites, etc. You can also search by keywords or filters. To add a product or a room to your project, simply drag it from the database panel to the design area. You can also use product favorites to save your preferred products for future use.

                  -

                  ViSoft Premium 3D bathroom design software
                  -How to use ViSoft Premium for realistic rendering
                  -ViSoft Premium tutorial and training videos
                  -ViSoft Premium price and license options
                  -ViSoft Premium download and installation guide
                  -ViSoft Premium reviews and testimonials
                  -ViSoft Premium vs other bathroom design software
                  -ViSoft Premium features and benefits
                  -ViSoft Premium system requirements and compatibility
                  -ViSoft Premium support and customer service
                  -ViSoft Premium updates and new releases
                  -ViSoft Premium demo and free trial
                  -ViSoft Premium online and offline mode
                  -ViSoft Premium tips and tricks
                  -ViSoft Premium best practices and examples
                  -ViSoft Premium user manual and documentation
                  -ViSoft Premium FAQs and troubleshooting
                  -ViSoft Premium forum and community
                  -ViSoft Premium newsletter and blog
                  -ViSoft Premium case studies and success stories
                  -ViSoft Premium awards and recognition
                  -ViSoft Premium integration and plugins
                  -ViSoft Premium customization and personalization
                  -ViSoft Premium export and import options
                  -ViSoft Premium cloud and backup service
                  -ViSoft Premium VR and AR capabilities
                  -ViSoft Premium for professionals and amateurs
                  -ViSoft Premium for small and large projects
                  -ViSoft Premium for modern and classic styles
                  -ViSoft Premium for different types of bathrooms
                  -ViSoft Premium inspiration and ideas
                  -ViSoft Premium feedback and suggestions
                  -ViSoft Premium warranty and refund policy
                  -ViSoft Premium affiliate and referral program
                  -ViSoft Premium coupons and discounts
                  -ViSoft Premium alternatives and competitors
                  -ViSoft Premium advantages and disadvantages
                  -ViSoft Premium pros and cons
                  -ViSoft Premium comparison and analysis
                  -ViSoft Premium portfolio and gallery
                  -How to get started with ViSoft Premium
                  -How to create a floor plan with ViSoft Premium
                  -How to add objects and materials with ViSoft Premium
                  -How to adjust lighting and shadows with ViSoft Premium
                  -How to apply effects and filters with ViSoft Premium
                  -How to save and share your design with ViSoft Premium
                  -How to print and export your design with ViSoft Premium
                  -How to optimize your design with ViSoft Premium
                  -How to troubleshoot common issues with ViSoft Premium

                  -

                  How to use planning assistants?

                  -

                  To use planning assistants for your project, you can use the automatic functions and proactive assistants that are available in the software. They help you with various tasks such as installing sanitary ware, tiling walls and floors, adding accessories, etc. For example:

                  -
                    -
                  • The bathtub assistant helps you build in bathtubs automatically.
                  • -
                  • The work surface assistant helps you position natural stone slabs.
                  • -
                  • The border assistant helps you lay borders easily.
                  • -
                  • The accessory set helps you insert accessories as a set.
                  • -
                  -

                  How to use snap function?

                  -

                  To use snap function for your project, you can use the logical orientation points that are available in the software. They help you position objects precisely and quickly. For example:

                  -
                    -
                  • The sanitary installations snap onto the wall automatically in perfect orientation.
                  • -
                  • The taps snap directly to the correct positions on the sanitary installations.
                  • -
                  • The starting position for tiling can be changed easily by moving the mouse to the right spot on the wall.
                  • -
                  -

                  How to use 3D visualization?

                  -

                  To use 3D visualization for your project, you can use the real time graphics that are available in the software. They allow you to see your project in different modes such as wireframe mode, solid mode, texture mode, etc. You can also adjust the lighting effects such as ambient light, spot light, sun light, etc. You can also use VR glasses to see your project in a realistic 3D environment. To do this, you need to connect your VR glasses (such as Oculus Rift or HTC Vive) to your PC via USB cable or wireless adapter. Then you need to activate VR mode from the top menu bar or press F11 on your keyboard.

                  -

                  What are the advantages of ViSoft Premium 2022?

                  -

                  ViSoft Premium 2022 is the latest version of ViSoft Premium that was released in September 2021. It has some new features and innovations that make it even better than before. Here are some of them:

                  -

                  The best 3D real time graphics in the industry

                  -

                  ViSoft Premium 2022 has improved its 3D graphics engine to provide faster performance, higher quality images, and smoother animations. It also supports ray tracing technology, which creates realistic reflections, shadows, and lighting effects. The new graphics engine also enables direct use of VR glasses without any additional software or hardware. You can experience your project in a lifelike 3D environment with VR glasses.

                  -

                  The world's largest tile and sanitary database

                  -

                  ViSoft Premium 2022 has updated its tile and sanitary database to include more than 500 manufacturers and 400000 products. You can access the latest collections and trends from the leading brands in the industry. You can also customize your own products with your own photos, logos, texts, etc. You can also import products from other sources

                  0a6ba089eb
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Gramatica Portuguesa Jose Maria Relvas O Melhor Livro para Estudar Portugus em PDF.md b/spaces/raedeXanto/academic-chatgpt-beta/Gramatica Portuguesa Jose Maria Relvas O Melhor Livro para Estudar Portugus em PDF.md deleted file mode 100644 index 834062cda3f8ffd7887b9577582806dde405fa7c..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Gramatica Portuguesa Jose Maria Relvas O Melhor Livro para Estudar Portugus em PDF.md +++ /dev/null @@ -1,239 +0,0 @@ - -
                  - Main features of Gramática Portuguesa: structure, content, examples, exercises, etc.
                  - How to use Gramática Portuguesa effectively: tips and strategies for studying and practicing Portuguese grammar. | | H2: What is Gramática Portuguesa and why is it useful for Portuguese learners? | - Explain what Gramática Portuguesa is: a grammar book for basic and intermediate levels of Portuguese.
                  - Mention the author: José Maria Relvas, a Portuguese linguist and teacher.
                  - Highlight the benefits of Gramática Portuguesa: clear explanations, practical examples, varied exercises, etc. | | H2: Main features of Gramática Portuguesa | - Describe the structure of Gramática Portuguesa: divided into four parts (phonetics and spelling, morphology, syntax, and text analysis).
                  - Summarize the content of each part: main topics, rules, exceptions, etc.
                  - Provide some examples of how Gramática Portuguesa illustrates grammar points with sentences and texts.
                  - Mention the exercises that accompany each chapter: multiple choice, fill in the blanks, transformation, etc. | | H2: How to use Gramática Portuguesa effectively | - Give some tips and strategies for studying and practicing Portuguese grammar with Gramática Portuguesa: read the explanations carefully, do the exercises regularly, check the answers in the appendix, etc.
                  - Suggest some complementary resources for learning Portuguese grammar: online platforms, apps, podcasts, etc.
                  - Encourage the readers to apply their grammar knowledge in real situations: writing, speaking, listening, reading, etc. | | H1: How to download Gramática Portuguesa by José Maria Relvas in PDF format | - Introduction: Why would someone want to download Gramática Portuguesa in PDF format?
                  - Main options for downloading Gramática Portuguesa in PDF format: official website, online bookstores, file-sharing platforms, etc.
                  - How to choose the best option for downloading Gramática Portuguesa in PDF format: criteria such as price, quality, legality, etc. | | H2: Why would someone want to download Gramática Portuguesa in PDF format? | - Explain the advantages of having Gramática Portuguesa in PDF format: portability, accessibility, convenience, etc.
                  - Mention some possible scenarios where having Gramática Portuguesa in PDF format would be helpful: traveling, studying abroad, working remotely, etc. | | H2: Main options for downloading Gramática Portuguesa in PDF format | - List and describe the main options for downloading Gramática Portuguesa in PDF format: official website (Europress), online bookstores (Bertrand), file-sharing platforms (Google Books).
                  - Provide some information about each option: availability, price, quality, legality, etc.
                  - Include a table that compares the main features of each option. | | H2: How to choose the best option for downloading Gramática Portuguesa in PDF format | - Give some advice on how to choose the best option for downloading Gramática Portuguesa in PDF format according to one's needs and preferences: budget, quality, convenience, etc.
                  - Warn about some potential risks or drawbacks of downloading Gramática Portuguesa in PDF format from unreliable sources: viruses, malware, copyright infringement, etc. | | H1: Conclusion | - Summarize the main points of the article: what is Gramática Portuguesa by José Maria Relvas and why is it useful for Portuguese learners; how to use it effectively; how to download it in PDF format.
                  - Restate the benefits of having Gramática Portuguesa in PDF format: portability accessibility convenience etc.
                  - End with a call to action: invite the readers to download Gramática Portuguesa in PDF format and start improving their Portuguese grammar skills. | | H3: FAQs | - Provide 5 unique FAQs that answer common questions or doubts that readers might have about Gramática Portuguesa or its PDF download options. | # Article with HTML formatting

                  Gramática Portuguesa by José Maria Relvas: A Comprehensive Guide for Portuguese Learners

                  -

                  If you are learning Portuguese and want to improve your grammar skills, you might be interested in a book called Gramática Portuguesa -by José Maria Relvas. This book is a comprehensive guide that covers all the essential aspects of Portuguese grammar, from phonetics and spelling to syntax and text analysis.

                  -

                  gramatica portuguesa jose maria relvas pdf download


                  Download ☆☆☆☆☆ https://tinourl.com/2uKZP9



                  -

                  In this article, we will tell you everything you need to know about this book: what it is, why it is useful for Portuguese learners, how to use it effectively, and how to download it in PDF format.

                  -

                  What is Gramática Portuguesa and why is it useful for Portuguese learners?

                  -

                  Gramática Portuguesa -is a grammar book that was written by José Maria Relvas, a Portuguese linguist and teacher who has more than 40 years of experience in teaching Portuguese as a foreign language.

                  -

                  The book was first published in 1996 by Europress, and it has been revised and updated several times since then. It is designed for basic and intermediate levels of Portuguese, and it follows the guidelines of the Common European Framework of Reference for Languages (CEFR).

                  -

                  The book has many benefits for Portuguese learners, such as:

                  -
                    -
                  • Clear explanations: -The book explains all the grammar rules and exceptions in a simple and concise way, using everyday language and avoiding technical jargon.
                  • -
                  • Practical examples: -The book illustrates all the grammar points with sentences and texts that are relevant and realistic, showing how Portuguese is used in different contexts and registers.
                  • -
                  • Varied exercises: -The book provides a lot of exercises that test and reinforce your grammar knowledge, ranging from multiple choice and fill in the blanks to transformation and text analysis.
                  • -
                  -

                  Main features of Gramática Portuguesa

                  -

                  The book is divided into four parts, each one focusing on a different aspect of Portuguese grammar:

                  -
                    -
                  1. Phonetics and spelling: -This part covers the sounds and symbols of Portuguese, including vowels, consonants, diphthongs, accents, and punctuation marks.
                  2. -
                  3. Morphology: -This part covers the structure and formation of words, including nouns, adjectives, articles, pronouns, verbs, adverbs, prepositions, conjunctions, and interjections.
                  4. -
                  5. Syntax: -This part covers the structure and formation of sentences, including word order, agreement, negation, questions, subordination, coordination, and punctuation.
                  6. -
                  7. Text analysis: -This part covers the structure and formation of texts, including paragraphs, cohesion, coherence, text types, and text functions.
                  8. -
                  -

                  Each part consists of several chapters that cover specific topics within each aspect. For example,

                  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                  PartChapterTopic
                  Phonetics and spelling1Vowels
                  2Consonants
                  3Diphthongs
                  Morphology4Nouns
                  5Adjectives
                  6Pronouns
                  Syntax13Negation
                  Text analysis19Cohesion
                  -

                  To give you an idea of how Gramática Portuguesa

                  - illustrates grammar points with examples, here are some excerpts from the book:

                  "A palavra 'casa' pode ser um substantivo ou um verbo. Como substantivo significa 'habitação',

                  "A palavra 'casa' pode ser um substantivo ou um verbo. Como substantivo significa 'habitação', e como verbo significa 'casar'." ("The word 'casa' can be a noun or a verb. As a noun it means 'house', and as a verb it means 'to marry'.")
                  - Chapter 4: Nouns (page 35)

                  -

                  baixar gramatica portuguesa jose maria relvas pdf gratis
                  -livro gramatica portuguesa jose maria relvas pdf online
                  -como estudar gramatica portuguesa jose maria relvas pdf
                  -resumo gramatica portuguesa jose maria relvas pdf
                  -exercicios gramatica portuguesa jose maria relvas pdf
                  -gramatica portuguesa jose maria relvas pdf completo
                  -gramatica portuguesa jose maria relvas pdf 2023
                  -gramatica portuguesa jose maria relvas pdf para iniciantes
                  -gramatica portuguesa jose maria relvas pdf com gabarito
                  -gramatica portuguesa jose maria relvas pdf ensino medio
                  -gramatica portuguesa jose maria relvas pdf vestibular
                  -gramatica portuguesa jose maria relvas pdf enem
                  -gramatica portuguesa jose maria relvas pdf concurso publico
                  -gramatica portuguesa jose maria relvas pdf professor
                  -gramatica portuguesa jose maria relvas pdf aluno
                  -gramatica portuguesa jose maria relvas pdf escola
                  -gramatica portuguesa jose maria relvas pdf faculdade
                  -gramatica portuguesa jose maria relvas pdf universidade
                  -gramatica portuguesa jose maria relvas pdf curso
                  -gramatica portuguesa jose maria relvas pdf aula
                  -gramatica portuguesa jose maria relvas pdf video
                  -gramatica portuguesa jose maria relvas pdf audio
                  -gramatica portuguesa jose maria relvas pdf ebook
                  -gramatica portuguesa jose maria relvas pdf kindle
                  -gramatica portuguesa jose maria relvas pdf epub
                  -gramatica portuguesa jose maria relvas pdf mobi
                  -gramatica portuguesa jose maria relvas pdf amazon
                  -gramatica portuguesa jose maria relvas pdf saraiva
                  -gramatica portuguesa jose maria relvas pdf submarino
                  -gramatica portuguesa jose maria relvas pdf americanas
                  -gramatica portuguesa jose maria relvas pdf mercado livre
                  -gramatica portuguesa jose maria relvas pdf olx
                  -gramatica portuguesa jose maria relvas pdf sebo
                  -gramatica portuguesa jose maria relvas pdf usado
                  -gramatica portuguesa jose maria relvas pdf novo
                  -gramatica portuguesa jose maria relvas pdf original
                  -gramatica portuguesa jose maria relvas pdf pirata
                  -gramatica portuguesa jose maria relvas pdf ilegal
                  -gramatica portuguesa jose maria relvas pdf seguro
                  -gramatica portuguesa jose maria relvas pdf confiavel
                  -gramatica portuguesa jose maria relvas pdf qualidade
                  -gramatica portuguesa jose maria relvas pdf opiniao
                  -gramatica portuguesa jose maria relvas pdf comentario
                  -gramatica portuguesa jose maria relvas pdf avaliacao
                  -gramatica portuguesa jose maria relvas pdf nota
                  -gramatica portuguesa jose maria relvas pdf ranking
                  -gramatica portuguesa jose maria relvas pdf comparacao
                  -gramatica portuguesa jose maria relvas pdf melhor
                  -gramatica portuguesa jose

                  -

                  "O adjetivo concorda em género e número com o nome a que se refere. Por exemplo: um carro novo (masculino singular), uma casa nova (feminino singular), uns carros novos (masculino plural), umas casas novas (feminino plural)." ("The adjective agrees in gender and number with the noun it refers to. For example: um carro novo (masculine singular), uma casa nova (feminine singular), uns carros novos (masculine plural), umas casas novas (feminine plural).")
                  - Chapter 5: Adjectives (page 49)

                  -

                  "O pronome pessoal átono pode ocupar três posições na frase: antes do verbo (próclise), depois do verbo (ênclise), ou no meio do verbo (mesóclise). Por exemplo: Ele me viu. (próclise) Ele viu-me. (ênclise) Ele ver-me-á. (mesóclise)" ("The unstressed personal pronoun can occupy three positions in the sentence: before the verb (proclisis), after the verb (enclisis), or in the middle of the verb (mesoclisis). For example: Ele me viu. (proclisis) Ele viu-me. (enclisis) Ele ver-me-á. (mesoclisis)")
                  - Chapter 6: Pronouns (page 63)

                  -

                  How to use Gramática Portuguesa effectively

                  -

                  Now that you know what Gramática Portuguesa -is and what it offers, you might be wondering how to use it effectively to improve your Portuguese grammar skills.

                  -

                  Here are some tips and strategies that can help you:

                  -
                    -
                  • Read the explanations carefully: -Don't skip or skim through the explanations, even if you think you already know the topic. Read them attentively and try to understand the logic and the rules behind each grammar point.
                  • -
                  • Do the exercises regularly: -Don't ignore or postpone the exercises, even if you think they are too easy or too hard. Do them as soon as you finish reading each chapter, and try to do them without looking at the answers or the explanations.
                  • -
                  • Check the answers in the appendix: -Don't cheat or guess the answers, even if you think you are right or wrong. Check them in the appendix at the end of the book, and compare them with your own answers. If you made any mistakes, try to understand why and how to correct them.
                  • -
                  • Use complementary resources: -Don't rely only on Gramática Portuguesa -to learn Portuguese grammar, even if you think it is enough or complete. Use other resources that can help you practice and reinforce your grammar knowledge, such as online platforms, apps, podcasts, etc.
                  • -
                  • Apply your grammar knowledge in real situations: -Don't limit yourself to Gramática Portuguesa -to study Portuguese grammar, even if you think it is useful or interesting. Apply your grammar knowledge in real situations that require you to use Portuguese, such as writing, speaking, listening, reading, etc.
                  • -
                  -

                  How to download Gramática Portuguesa by José Maria Relvas in PDF format

                  -

                  If you are interested in Gramática Portuguesa -and want to have it in your digital device, you might be wondering how to download it in PDF format.

                  -

                  In this section, we will tell you why someone would want to download Gramática Portuguesa -in PDF format, what are the main options for doing so, and how to choose the best option according to your needs and preferences.

                  -

                  Why would someone want to download Gramática Portuguesa in PDF format?

                  -

                  There are many reasons why someone would want to download Gramática Portuguesa -in PDF format, such as:

                  -
                    -
                  • Portability: -Having Gramática Portuguesa -in PDF format allows you to carry it with you wherever you go, without adding any weight or bulk to your luggage. You can access it from your laptop, tablet, smartphone, or e-reader, and read it anytime and anywhere.
                  • -
                  • Accessibility: -Having Gramática Portuguesa -in PDF format allows you to access it easily and quickly, without having to search for a physical copy or a library that has it. You can open it from your device's storage or cloud service, and browse it with a simple click or swipe.
                  • -
                  • Convenience: -Having Gramática Portuguesa -in PDF format allows you to enjoy some features that a physical copy does not have, such as zooming, highlighting, annotating, bookmarking, searching, etc. You can also print out specific pages or chapters that you need or want, and save paper and ink.
                  • -
                  -

                  Some possible scenarios where having Gramática Portuguesa -in PDF format would be helpful are:

                  -
                    -
                  • Traveling: -If you are traveling to a Portuguese-speaking country or region, having Gramática Portuguesa -in PDF format can help you review and practice your grammar skills on the go, without having to carry a heavy book with you.
                  • -
                  • Studying abroad: -If you are studying Portuguese abroad in a school or university, having Gramática Portuguesa -in PDF format can help you prepare for your classes and exams, without having to buy or borrow a physical copy from a bookstore or library.
                  • -
                  • Working remotely: -If you are working remotely in a job that requires you to use Portuguese, having Gramática Portuguesa -in PDF format can help you communicate effectively and professionally with your clients and colleagues, without having to rely on online translators or dictionaries.
                  • -
                  -

                  Main options for downloading Gramática Portuguesa in PDF format

                  -

                  There are several options for downloading Gramática Portuguesa -in PDF format, but not all of them are equally reliable or convenient. Here are the main options that you can consider:

                  -
                    -
                  • Official website: -You can download Gramática Portuguesa -in PDF format from the official website of Europress, the publisher of the book. This option is the most trustworthy and legal, but it might not be the cheapest or the easiest.
                  • -
                  • Online bookstores: -You can download Gramática Portuguesa -in PDF format from some online bookstores that sell e-books, such as Bertrand. This option is also trustworthy and legal, but it might not be the cheapest or the most available.
                  • -
                  • File-sharing platforms: -You can download Gramática Portuguesa -in PDF format from some file-sharing platforms that offer free downloads, such as Google Books. This option might be the cheapest and the easiest, but it might not be the most trustworthy or legal.
                  • -
                  -

                  To help you compare the main features of each option, here is a table that summarizes them:

                  -
                  - - - - - - - - - - - - - - - - - - - - - -
                  OptionAvailabilityPriceQualityLegality
                  Official websiteHighHighHighHigh
                  Online bookstoresMediumMediumHighHigh
                  File-sharing platformsLowLowLowLow
                  -

                  How to choose the best option for downloading Gramática Portuguesa in PDF format

                  -

                  Now that you know what are the main options for downloading Gramática Portuguesa -in PDF format, you might be wondering how to choose the best option for you.

                  -

                  The answer depends on your needs and preferences, but here are some criteria that you can use to make your decision:

                  -
                    -
                  • Budget: -If you have a limited budget or want to save money, you might prefer the file-sharing platforms option, as it is usually free or very cheap. However, you should be aware of the potential risks and drawbacks of this option, such as viruses, malware, copyright infringement, etc.
                  • -
                  • Quality: -If you care about the quality and accuracy of the PDF file, you might prefer the official website or the online bookstores option, as they offer high-quality and updated versions of the book. However, you should be prepared to pay a higher price for this option, and to check the availability and compatibility of the file.
                  • -
                  • Convenience: -If you value convenience and ease of access, you might prefer the file-sharing platforms or the online bookstores option, as they allow you to download the PDF file quickly and easily from any device. However, you should be careful about the reliability and legality of this option, and to verify the quality and completeness of the file.
                  • -
                  -

                  In conclusion, there is no one best option for downloading Gramática Portuguesa

                  - in PDF format, as each option has its pros and cons. You should weigh them carefully and choose the one that suits you best.

                  Conclusion

                  -

                  In this article, we have shown you everything you need to know about Gramática Portuguesa - by José Maria Relvas: a comprehensive guide for Portuguese learners that covers all the essential aspects of Portuguese grammar. We have also shown you how to use it effectively to improve your grammar skills, and how to download it in PDF format from different sources. We hope that this article has been useful and informative for you, and that you have learned something new and interesting about Portuguese grammar. We also hope that you have decided to download Gramática Portuguesa - in PDF format and start using it to enhance your Portuguese learning experience.

                  Frequently Asked Questions (FAQs)

                  -

                  Here are some FAQs that answer common questions or doubts that readers might have about Gramática Portuguesa - or its PDF download options:

                  -
                    -
                  1. What level of Portuguese do I need to use Gramática Portuguesa?
                    You need to have at least a basic level of Portuguese (A1-A2) to use Gramática Portuguesa, as it is designed for beginners and intermediate learners. If you have an advanced level of Portuguese (B2-C2), you might find Gramática Portuguesa too easy or too simple for you.
                  2. -
                  3. How long does it take to download Gramática Portuguesa in PDF format?
                    The time it takes to download Gramática Portuguesa in PDF format depends on several factors, such as your internet speed, your device's storage capacity, and the size of the file. However, it usually takes a few minutes or less to download Gramática Portuguesa in PDF format from any source.
                  4. -
                  5. Is it legal to download Gramática Portuguesa in PDF format?
                    The legality of downloading Gramática Portuguesa in PDF format depends on the source and the country where you are. In general, it is legal to download Gramática Portuguesa in PDF format from the official website or the online bookstores, as they have the author's permission and pay royalties. However, it might not be legal to download Gramática Portuguesa in PDF format from file-sharing platforms, as they might violate intellectual property rights.
                  6. -
                  7. Can I print out Gramática Portuguesa in PDF format?
                    You can print out Gramática Portuguesa in PDF format if you want to have a physical copy of the book. However, you should be aware that printing out Gramática Portuguesa in PDF format might consume a lot of paper and ink, and that it might not have the same quality and layout as the original book.
                  8. -
                  9. Can I share Gramática Portuguesa in PDF format with others?
                    You can share Gramática Portuguesa in PDF format with others if you want to help them learn Portuguese grammar. However, you should be respectful and ethical when sharing Gramática Portuguesa in PDF format, and not use it for commercial or illegal purposes.
                  10. -
                  -

                  0a6ba089eb
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/razielpanic/CompVis-stable-diffusion-v1-4/README.md b/spaces/razielpanic/CompVis-stable-diffusion-v1-4/README.md deleted file mode 100644 index 9ccfeaba7b381756684342f8b58d6fdc1b0602c0..0000000000000000000000000000000000000000 --- a/spaces/razielpanic/CompVis-stable-diffusion-v1-4/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: CompVis Stable Diffusion V1 4 -emoji: 👁 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/rebolforces/jcastles/app.py b/spaces/rebolforces/jcastles/app.py deleted file mode 100644 index 91e6c00ebd1f7087988e7d60e72c057c6b578ed4..0000000000000000000000000000000000000000 --- a/spaces/rebolforces/jcastles/app.py +++ /dev/null @@ -1,45 +0,0 @@ -import gradio as gr -from fastai.vision.all import * -import skimage - -learn = load_learner('export-castles.pkl') - -labels = learn.dls.vocab -def predict(img): - img = PILImage.create(img) - pred,pred_idx,probs = learn.predict(img) - return {labels[i]: float(probs[i]) for i in range(len(labels))} - -title = "Japanese Castle (Tenshu) Classifier" -description = '''A castle classifier trained on a small set of Japanese Castles with fastai. -Demo for Gradio and HuggingFace Spaces. Based on blog and demo from Tanisq Abraham. Example images below are from jcastle.info.''' - -# link -article=''' -

                  There are many hundreds of castle sites scattered across Japan, for this demo we have trained on just 9 Tenshu. -Images sourced from my own collection and Bing Image Search. -
                  Learn more about Japanese Castles and their history at -Jcastle - Guide to Japanese Castles. -

                  - -

                  -

                  Tanisq Abraham's Blog post -

                  -

                  ''' - -interpretation='default' #'default' None -examples = ['osaka.jpg','matsumoto.jpg','nagoya.jpg','okayama.jpg','shimabara.jpg'] -enable_queue=True -share = False - -gr.Interface(fn=predict,inputs=gr.inputs.Image(shape=(512, 512)),outputs=gr.outputs.Label(num_top_classes=9),title=title,description=description,article=article,examples=examples,interpretation=interpretation,enable_queue=enable_queue).launch(share=share) diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Bosch Esi Tronic Keygen [HOT] 3q.2013.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Bosch Esi Tronic Keygen [HOT] 3q.2013.md deleted file mode 100644 index 1f95dcca704b6fb9a20baa520ed3a49943f7f8a7..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Bosch Esi Tronic Keygen [HOT] 3q.2013.md +++ /dev/null @@ -1,146 +0,0 @@ -
                  ----> ServiceClient failure for DeepLeo[/ERROR]

                  -

                  Bosch Esi Tronic Keygen 3q.2013


                  DOWNLOAD ->>> https://urlgoal.com/2uCKhp



                  - - -In this article, we will show you how to use Bosch Esi Tronic Keygen 3q.2013 to activate and patch Bosch Esi Tronic software. You will need the following: - -- A computer with Windows XP or higher -- A DVD drive or a virtual drive software -- A Bosch Esi Tronic DVD or an ISO file -- A Bosch Esi Tronic Keygen 3q.2013 file -- A Bosch Esi Tronic Patch 3q.2013 file - -The steps are as follows: - -- Install Bosch Esi Tronic software from the DVD or the ISO file. You can choose any language and any region you want. -- Run Bosch Esi Tronic Keygen 3q.2013 file and enter your hardware ID, which you can find in the Bosch Esi Tronic software under "About". Click on "Generate" and copy the activation code. -- Run Bosch Esi Tronic software and enter the activation code when prompted. The software should be activated now. -- Run Bosch Esi Tronic Patch 3q.2013 file and follow the instructions. The patch will update the software and fix some bugs. -- Restart your computer and enjoy your Bosch Esi Tronic software. - -Bosch Esi Tronic Keygen 3q.2013 is a useful tool for automotive professionals who need to access Bosch Esi Tronic software without paying for a subscription. However, it is not legal and may violate Bosch's terms of service. Use it at your own risk and responsibility. - - -In this article, we will show you how to download and install Bosch Esi Tronic software 3Q.2013, which includes the following DVDs: - -- DVD-U: The main DVD that contains the basic software and data for diagnosis and repair of vehicles. -- DVD-U1: The additional DVD that contains the data for trucks, buses and trailers. -- DVD-U2: The optional DVD that contains the data for special vehicles and equipment. -- DVD-1: The first DVD that contains the data for European vehicles. -- DVD-2: The second DVD that contains the data for Asian vehicles. -- DVD-3: The third DVD that contains the data for American vehicles. -- DVD-C: The archive DVD that contains the data for older vehicles from 1986 to 2002. -- DVD-K&W: The archive DVD that contains the data for motorcycles and watercrafts. - -You will need the following: - -- A computer with Windows XP or higher -- A DVD drive or a virtual drive software -- A torrent client software -- A Bosch Esi Tronic Keygen 3q.2013 file -- A Bosch Esi Tronic Patch 3q.2013 file - -The steps are as follows: - -- Download the torrent file that contains all the DVDs from this link: https://mhhauto.com/Thread-Bosch-Esi-201...singel-file -- Open the torrent file with your torrent client software and select the DVDs you want to download. You can choose any language and any region you want. -- After downloading the DVDs, mount them with your virtual drive software or burn them to physical DVDs. -- Install Bosch Esi Tronic software from the DVD-U. You can choose any language and any region you want. -- Run Bosch Esi Tronic Keygen 3q.2013 file and enter your hardware ID, which you can find in the Bosch Esi Tronic software under "About". Click on "Generate" and copy the activation code. -- Run Bosch Esi Tronic software and enter the activation code when prompted. The software should be activated now. -- Run Bosch Esi Tronic Patch 3q.2013 file and follow the instructions. The patch will update the software and fix some bugs. -- Restart your computer and enjoy your Bosch Esi Tronic software 3Q.2013. - -Bosch Esi Tronic Keygen 3q.2013 is a convenient way to get access to the latest version of Bosch Esi Tronic software, which is a comprehensive solution for diagnosing and repairing vehicles. However, it is not legal and may violate Bosch's terms of service. Use it at your own risk and responsibility. - - -In this article, we will show you how to run Bosch Esi Tronic software 3Q.2013 on a virtual machine, which is a software that simulates a computer system. This way, you can use Bosch Esi Tronic software without installing it on your physical computer, and avoid any compatibility issues or conflicts with other software. You will need the following: - -- A computer with Windows XP or higher -- A VMware Workstation software or a similar virtualization software -- A Bosch Esi Tronic 2013/3 full disk on VMware file -- A Bosch Esi Tronic Keygen 3q.2013 file -- A Bosch Esi Tronic Patch 3q.2013 file - -The steps are as follows: - -- Download the VMware Workstation software from this link: https://www.vmware.com/products/workstation-pro.html and install it on your computer. -- Download the Bosch Esi Tronic 2013/3 full disk on VMware file from this link: https://mhhauto.com/Thread-Bosch-ESI-tronic-2013-3-full-disk-on-VMware and extract it to a folder on your computer. -- Open the VMware Workstation software and click on "Open a Virtual Machine". Browse to the folder where you extracted the Bosch Esi Tronic 2013/3 full disk on VMware file and select the "Bosch ESI [tronic] 2013_3.vmx" file. Click on "Open". -- Click on "Power on this virtual machine" and wait for the virtual machine to boot up. You will see a Windows XP desktop with Bosch Esi Tronic software icons. -- Run Bosch Esi Tronic Keygen 3q.2013 file and enter your hardware ID, which you can find in the Bosch Esi Tronic software under "About". Click on "Generate" and copy the activation code. -- Run Bosch Esi Tronic software and enter the activation code when prompted. The software should be activated now. -- Run Bosch Esi Tronic Patch 3q.2013 file and follow the instructions. The patch will update the software and fix some bugs. -- Restart the virtual machine and enjoy your Bosch Esi Tronic software 3Q.2013. - -Bosch Esi Tronic Keygen 3q.2013 is a convenient way to get access to the latest version of Bosch Esi Tronic software, which is a comprehensive solution for diagnosing and repairing vehicles. However, it is not legal and may violate Bosch's terms of service. Use it at your own risk and responsibility. - - -In this article, we will show you how to get free activation for Bosch Esi Tronic software 3Q.2013, which will allow you to use the software without paying for a subscription. You will need the following: - -- A computer with Windows XP or higher -- A DVD drive or a virtual drive software -- A Bosch Esi Tronic DVD or an ISO file -- A Bosch Esi Tronic Keygen 3q.2013 file -- A Bosch Esi Tronic Patch 3q.2013 file -- An internet connection - -The steps are as follows: - -- Install Bosch Esi Tronic software from the DVD or the ISO file. You can choose any language and any region you want. -- Run Bosch Esi Tronic Keygen 3q.2013 file and enter your hardware ID, which you can find in the Bosch Esi Tronic software under "About". Click on "Generate" and copy the activation code. -- Run Bosch Esi Tronic software and enter the activation code when prompted. The software should be activated now. -- Run Bosch Esi Tronic Patch 3q.2013 file and follow the instructions. The patch will update the software and fix some bugs. -- Restart your computer and enjoy your Bosch Esi Tronic software 3Q.2013. -- Go to this link: https://mhhauto.com/Thread-ESI-Tronic-3Q...Activation and post your hardware ID and activation code in the thread. You will receive a confirmation code from one of the members of the forum. -- Run Bosch Esi Tronic software and enter the confirmation code when prompted. The software should be fully activated now. - -Bosch Esi Tronic Keygen 3q.2013 is a convenient way to get access to the latest version of Bosch Esi Tronic software, which is a comprehensive solution for diagnosing and repairing vehicles. However, it is not legal and may violate Bosch's terms of service. Use it at your own risk and responsibility. - - -In this article, we will show you how to compare different versions of Bosch Esi Tronic software 3Q.2013, and how to choose the best one for your needs. You may have different options depending on the type of vehicles you work on and the region you are located in. You can choose from the following DVDs: - -- DVD-U: The main DVD that contains the basic software and data for diagnosis and repair of vehicles. -- DVD-U1: The additional DVD that contains the data for trucks, buses and trailers. -- DVD-U2: The optional DVD that contains the data for special vehicles and equipment. -- DVD-1: The first DVD that contains the data for European vehicles. -- DVD-2: The second DVD that contains the data for Asian vehicles. -- DVD-3: The third DVD that contains the data for American vehicles. -- DVD-C: The archive DVD that contains the data for older vehicles from 1986 to 2002. -- DVD-K&W: The archive DVD that contains the data for motorcycles and watercrafts. - -Here are some tips for choosing the best version of Bosch Esi Tronic software 3Q.2013 for your needs: - -- If you work on a variety of vehicles from different regions and years, you may want to install all the DVDs to have the most comprehensive coverage of Bosch Esi Tronic software 3Q.2013. However, this will require a lot of disk space and installation time. -- If you work on specific types of vehicles or regions, you may want to install only the DVDs that are relevant to your needs. For example, if you work on European vehicles only, you may want to install only DVD-U and DVD-1. This will save you disk space and installation time. -- If you work on older vehicles or motorcycles and watercrafts, you may want to install only the archive DVDs (DVD-C and DVD-K&W). These DVDs contain data that are not updated in newer versions of Bosch Esi Tronic software 3Q.2013. -- If you are not sure which DVDs to install, you can check the "News" PDF file that comes with each DVD. This file contains information about what is new or updated in each version of Bosch Esi Tronic software 3Q.2013. - -Bosch Esi Tronic Keygen 3q.2013 is a convenient way to get access to the latest version of Bosch Esi Tronic software, which is a comprehensive solution for diagnosing and repairing vehicles. However, it is not legal and may violate Bosch's terms of service. Use it at your own risk and responsibility. - - -In this article, we will show you how to use Bosch Esi Tronic software 3Q.2013 to diagnose and repair vehicles, and how to get the most out of its features. You will need the following: - -- A computer with Windows XP or higher -- A DVD drive or a virtual drive software -- A Bosch Esi Tronic DVD or an ISO file -- A Bosch Esi Tronic Keygen 3q.2013 file -- A Bosch Esi Tronic Patch 3q.2013 file -- A compatible diagnostic interface (such as KTS 540, KTS 570 or KTS 590) -- A vehicle with an OBD port - -The steps are as follows: - -- Install Bosch Esi Tronic software from the DVD or the ISO file. You can choose any language and any region you want. -- Run Bosch Esi Tronic Keygen 3q.2013 file and enter your hardware ID, which you can find in the Bosch Esi Tronic software under "About". Click on "Generate" and copy the activation code. -- Run Bosch Esi Tronic software and enter the activation code when prompted. The software should be activated now. -- Run Bosch Esi Tronic Patch 3q.2013 file and follow the instructions. The patch will update the software and fix some bugs. -- Restart your computer and connect your diagnostic interface to your computer and to your vehicle's OBD port. -- Run Bosch Esi Tronic software and select your vehicle's make, model, year and engine type from the menu. -- Choose the diagnostic function you want to perform, such as reading fault codes, clearing fault codes, reading live data, performing actuator tests, performing service functions, etc. -- Follow the instructions on the screen and use the software's features to diagnose and repair your vehicle. - -Bosch Esi Tronic software 3Q.2013 is a powerful and user-friendly tool for diagnosing and repairing vehicles. It covers a wide range of vehicles from different regions and years, and provides detailed information and guidance for each diagnostic function. However, it is not legal and may violate Bosch's terms of service. Use it at your own risk and responsibility.

                  3cee63e6c2
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Driver Sigmatel C Major Audio 3dp Edition V9 12.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Driver Sigmatel C Major Audio 3dp Edition V9 12.md deleted file mode 100644 index 8eb5c1032105246f2e3988000daa6a693be9f293..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Driver Sigmatel C Major Audio 3dp Edition V9 12.md +++ /dev/null @@ -1,6 +0,0 @@ -

                  Driver Sigmatel C Major Audio 3dp Edition V9 12


                  DOWNLOADhttps://urlgoal.com/2uCMno



                  -
                  -MEDIA - SigmaTel - 3DP Edition v9.12 (SigmaTel C-Major Audio) Drivers Download - Update your computer's drivers using DriverMax, the free driver update ... 1fdad05405
                  -
                  -
                  -

                  diff --git a/spaces/renatotn7/teste2/scripts/parse_landmark.py b/spaces/renatotn7/teste2/scripts/parse_landmark.py deleted file mode 100644 index 74e2ff9e130ad4f2395c9666dca3ba78526d7a8a..0000000000000000000000000000000000000000 --- a/spaces/renatotn7/teste2/scripts/parse_landmark.py +++ /dev/null @@ -1,85 +0,0 @@ -import cv2 -import json -import numpy as np -import os -import torch -from basicsr.utils import FileClient, imfrombytes -from collections import OrderedDict - -# ---------------------------- This script is used to parse facial landmarks ------------------------------------- # -# Configurations -save_img = False -scale = 0.5 # 0.5 for official FFHQ (512x512), 1 for others -enlarge_ratio = 1.4 # only for eyes -json_path = 'ffhq-dataset-v2.json' -face_path = 'datasets/ffhq/ffhq_512.lmdb' -save_path = './FFHQ_eye_mouth_landmarks_512.pth' - -print('Load JSON metadata...') -# use the official json file in FFHQ dataset -with open(json_path, 'rb') as f: - json_data = json.load(f, object_pairs_hook=OrderedDict) - -print('Open LMDB file...') -# read ffhq images -file_client = FileClient('lmdb', db_paths=face_path) -with open(os.path.join(face_path, 'meta_info.txt')) as fin: - paths = [line.split('.')[0] for line in fin] - -save_dict = {} - -for item_idx, item in enumerate(json_data.values()): - print(f'\r{item_idx} / {len(json_data)}, {item["image"]["file_path"]} ', end='', flush=True) - - # parse landmarks - lm = np.array(item['image']['face_landmarks']) - lm = lm * scale - - item_dict = {} - # get image - if save_img: - img_bytes = file_client.get(paths[item_idx]) - img = imfrombytes(img_bytes, float32=True) - - # get landmarks for each component - map_left_eye = list(range(36, 42)) - map_right_eye = list(range(42, 48)) - map_mouth = list(range(48, 68)) - - # eye_left - mean_left_eye = np.mean(lm[map_left_eye], 0) # (x, y) - half_len_left_eye = np.max((np.max(np.max(lm[map_left_eye], 0) - np.min(lm[map_left_eye], 0)) / 2, 16)) - item_dict['left_eye'] = [mean_left_eye[0], mean_left_eye[1], half_len_left_eye] - # mean_left_eye[0] = 512 - mean_left_eye[0] # for testing flip - half_len_left_eye *= enlarge_ratio - loc_left_eye = np.hstack((mean_left_eye - half_len_left_eye + 1, mean_left_eye + half_len_left_eye)).astype(int) - if save_img: - eye_left_img = img[loc_left_eye[1]:loc_left_eye[3], loc_left_eye[0]:loc_left_eye[2], :] - cv2.imwrite(f'tmp/{item_idx:08d}_eye_left.png', eye_left_img * 255) - - # eye_right - mean_right_eye = np.mean(lm[map_right_eye], 0) - half_len_right_eye = np.max((np.max(np.max(lm[map_right_eye], 0) - np.min(lm[map_right_eye], 0)) / 2, 16)) - item_dict['right_eye'] = [mean_right_eye[0], mean_right_eye[1], half_len_right_eye] - # mean_right_eye[0] = 512 - mean_right_eye[0] # # for testing flip - half_len_right_eye *= enlarge_ratio - loc_right_eye = np.hstack( - (mean_right_eye - half_len_right_eye + 1, mean_right_eye + half_len_right_eye)).astype(int) - if save_img: - eye_right_img = img[loc_right_eye[1]:loc_right_eye[3], loc_right_eye[0]:loc_right_eye[2], :] - cv2.imwrite(f'tmp/{item_idx:08d}_eye_right.png', eye_right_img * 255) - - # mouth - mean_mouth = np.mean(lm[map_mouth], 0) - half_len_mouth = np.max((np.max(np.max(lm[map_mouth], 0) - np.min(lm[map_mouth], 0)) / 2, 16)) - item_dict['mouth'] = [mean_mouth[0], mean_mouth[1], half_len_mouth] - # mean_mouth[0] = 512 - mean_mouth[0] # for testing flip - loc_mouth = np.hstack((mean_mouth - half_len_mouth + 1, mean_mouth + half_len_mouth)).astype(int) - if save_img: - mouth_img = img[loc_mouth[1]:loc_mouth[3], loc_mouth[0]:loc_mouth[2], :] - cv2.imwrite(f'tmp/{item_idx:08d}_mouth.png', mouth_img * 255) - - save_dict[f'{item_idx:08d}'] = item_dict - -print('Save...') -torch.save(save_dict, save_path) diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/roi_heads/cascade_roi_head.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/roi_heads/cascade_roi_head.py deleted file mode 100644 index e17313f20724263864cb8cf068e889ed71822b59..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/roi_heads/cascade_roi_head.py +++ /dev/null @@ -1,631 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch -import torch.nn as nn -from mmcv.runner import ModuleList - -from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, build_assigner, - build_sampler, merge_aug_bboxes, merge_aug_masks, - multiclass_nms) -from ..builder import HEADS, build_head, build_roi_extractor -from .base_roi_head import BaseRoIHead -from .test_mixins import BBoxTestMixin, MaskTestMixin - - -@HEADS.register_module() -class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin): - """Cascade roi head including one bbox head and one mask head. - - https://arxiv.org/abs/1712.00726 - """ - - def __init__(self, - num_stages, - stage_loss_weights, - bbox_roi_extractor=None, - bbox_head=None, - mask_roi_extractor=None, - mask_head=None, - shared_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - assert bbox_roi_extractor is not None - assert bbox_head is not None - assert shared_head is None, \ - 'Shared head is not supported in Cascade RCNN anymore' - - self.num_stages = num_stages - self.stage_loss_weights = stage_loss_weights - super(CascadeRoIHead, self).__init__( - bbox_roi_extractor=bbox_roi_extractor, - bbox_head=bbox_head, - mask_roi_extractor=mask_roi_extractor, - mask_head=mask_head, - shared_head=shared_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained, - init_cfg=init_cfg) - - def init_bbox_head(self, bbox_roi_extractor, bbox_head): - """Initialize box head and box roi extractor. - - Args: - bbox_roi_extractor (dict): Config of box roi extractor. - bbox_head (dict): Config of box in box head. - """ - self.bbox_roi_extractor = ModuleList() - self.bbox_head = ModuleList() - if not isinstance(bbox_roi_extractor, list): - bbox_roi_extractor = [ - bbox_roi_extractor for _ in range(self.num_stages) - ] - if not isinstance(bbox_head, list): - bbox_head = [bbox_head for _ in range(self.num_stages)] - assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages - for roi_extractor, head in zip(bbox_roi_extractor, bbox_head): - self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor)) - self.bbox_head.append(build_head(head)) - - def init_mask_head(self, mask_roi_extractor, mask_head): - """Initialize mask head and mask roi extractor. - - Args: - mask_roi_extractor (dict): Config of mask roi extractor. - mask_head (dict): Config of mask in mask head. - """ - self.mask_head = nn.ModuleList() - if not isinstance(mask_head, list): - mask_head = [mask_head for _ in range(self.num_stages)] - assert len(mask_head) == self.num_stages - for head in mask_head: - self.mask_head.append(build_head(head)) - if mask_roi_extractor is not None: - self.share_roi_extractor = False - self.mask_roi_extractor = ModuleList() - if not isinstance(mask_roi_extractor, list): - mask_roi_extractor = [ - mask_roi_extractor for _ in range(self.num_stages) - ] - assert len(mask_roi_extractor) == self.num_stages - for roi_extractor in mask_roi_extractor: - self.mask_roi_extractor.append( - build_roi_extractor(roi_extractor)) - else: - self.share_roi_extractor = True - self.mask_roi_extractor = self.bbox_roi_extractor - - def init_assigner_sampler(self): - """Initialize assigner and sampler for each stage.""" - self.bbox_assigner = [] - self.bbox_sampler = [] - if self.train_cfg is not None: - for idx, rcnn_train_cfg in enumerate(self.train_cfg): - self.bbox_assigner.append( - build_assigner(rcnn_train_cfg.assigner)) - self.current_stage = idx - self.bbox_sampler.append( - build_sampler(rcnn_train_cfg.sampler, context=self)) - - def forward_dummy(self, x, proposals): - """Dummy forward function.""" - # bbox head - outs = () - rois = bbox2roi([proposals]) - if self.with_bbox: - for i in range(self.num_stages): - bbox_results = self._bbox_forward(i, x, rois) - outs = outs + (bbox_results['cls_score'], - bbox_results['bbox_pred']) - # mask heads - if self.with_mask: - mask_rois = rois[:100] - for i in range(self.num_stages): - mask_results = self._mask_forward(i, x, mask_rois) - outs = outs + (mask_results['mask_pred'], ) - return outs - - def _bbox_forward(self, stage, x, rois): - """Box head forward function used in both training and testing.""" - bbox_roi_extractor = self.bbox_roi_extractor[stage] - bbox_head = self.bbox_head[stage] - bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], - rois) - # do not support caffe_c4 model anymore - cls_score, bbox_pred = bbox_head(bbox_feats) - - bbox_results = dict( - cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) - return bbox_results - - def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes, - gt_labels, rcnn_train_cfg): - """Run forward function and calculate loss for box head in training.""" - rois = bbox2roi([res.bboxes for res in sampling_results]) - bbox_results = self._bbox_forward(stage, x, rois) - bbox_targets = self.bbox_head[stage].get_targets( - sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) - loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'], - bbox_results['bbox_pred'], rois, - *bbox_targets) - - bbox_results.update( - loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets) - return bbox_results - - def _mask_forward(self, stage, x, rois): - """Mask head forward function used in both training and testing.""" - mask_roi_extractor = self.mask_roi_extractor[stage] - mask_head = self.mask_head[stage] - mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], - rois) - # do not support caffe_c4 model anymore - mask_pred = mask_head(mask_feats) - - mask_results = dict(mask_pred=mask_pred) - return mask_results - - def _mask_forward_train(self, - stage, - x, - sampling_results, - gt_masks, - rcnn_train_cfg, - bbox_feats=None): - """Run forward function and calculate loss for mask head in - training.""" - pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) - mask_results = self._mask_forward(stage, x, pos_rois) - - mask_targets = self.mask_head[stage].get_targets( - sampling_results, gt_masks, rcnn_train_cfg) - pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) - loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'], - mask_targets, pos_labels) - - mask_results.update(loss_mask=loss_mask) - return mask_results - - def forward_train(self, - x, - img_metas, - proposal_list, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None): - """ - Args: - x (list[Tensor]): list of multi-level img features. - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - proposals (list[Tensors]): list of region proposals. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - gt_masks (None | Tensor) : true segmentation masks for each box - used if the architecture supports a segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - losses = dict() - for i in range(self.num_stages): - self.current_stage = i - rcnn_train_cfg = self.train_cfg[i] - lw = self.stage_loss_weights[i] - - # assign gts and sample proposals - sampling_results = [] - if self.with_bbox or self.with_mask: - bbox_assigner = self.bbox_assigner[i] - bbox_sampler = self.bbox_sampler[i] - num_imgs = len(img_metas) - if gt_bboxes_ignore is None: - gt_bboxes_ignore = [None for _ in range(num_imgs)] - - for j in range(num_imgs): - assign_result = bbox_assigner.assign( - proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], - gt_labels[j]) - sampling_result = bbox_sampler.sample( - assign_result, - proposal_list[j], - gt_bboxes[j], - gt_labels[j], - feats=[lvl_feat[j][None] for lvl_feat in x]) - sampling_results.append(sampling_result) - - # bbox head forward and loss - bbox_results = self._bbox_forward_train(i, x, sampling_results, - gt_bboxes, gt_labels, - rcnn_train_cfg) - - for name, value in bbox_results['loss_bbox'].items(): - losses[f's{i}.{name}'] = ( - value * lw if 'loss' in name else value) - - # mask head forward and loss - if self.with_mask: - mask_results = self._mask_forward_train( - i, x, sampling_results, gt_masks, rcnn_train_cfg, - bbox_results['bbox_feats']) - for name, value in mask_results['loss_mask'].items(): - losses[f's{i}.{name}'] = ( - value * lw if 'loss' in name else value) - - # refine bboxes - if i < self.num_stages - 1: - pos_is_gts = [res.pos_is_gt for res in sampling_results] - # bbox_targets is a tuple - roi_labels = bbox_results['bbox_targets'][0] - with torch.no_grad(): - cls_score = bbox_results['cls_score'] - if self.bbox_head[i].custom_activation: - cls_score = self.bbox_head[i].loss_cls.get_activation( - cls_score) - - # Empty proposal. - if cls_score.numel() == 0: - break - - roi_labels = torch.where( - roi_labels == self.bbox_head[i].num_classes, - cls_score[:, :-1].argmax(1), roi_labels) - proposal_list = self.bbox_head[i].refine_bboxes( - bbox_results['rois'], roi_labels, - bbox_results['bbox_pred'], pos_is_gts, img_metas) - - return losses - - def simple_test(self, x, proposal_list, img_metas, rescale=False): - """Test without augmentation. - - Args: - x (tuple[Tensor]): Features from upstream network. Each - has shape (batch_size, c, h, w). - proposal_list (list(Tensor)): Proposals from rpn head. - Each has shape (num_proposals, 5), last dimension - 5 represent (x1, y1, x2, y2, score). - img_metas (list[dict]): Meta information of images. - rescale (bool): Whether to rescale the results to - the original image. Default: True. - - Returns: - list[list[np.ndarray]] or list[tuple]: When no mask branch, - it is bbox results of each image and classes with type - `list[list[np.ndarray]]`. The outer list - corresponds to each image. The inner list - corresponds to each class. When the model has mask branch, - it contains bbox results and mask results. - The outer list corresponds to each image, and first element - of tuple is bbox results, second element is mask results. - """ - assert self.with_bbox, 'Bbox head must be implemented.' - num_imgs = len(proposal_list) - img_shapes = tuple(meta['img_shape'] for meta in img_metas) - ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - - # "ms" in variable names means multi-stage - ms_bbox_result = {} - ms_segm_result = {} - ms_scores = [] - rcnn_test_cfg = self.test_cfg - - rois = bbox2roi(proposal_list) - - if rois.shape[0] == 0: - # There is no proposal in the whole batch - bbox_results = [[ - np.zeros((0, 5), dtype=np.float32) - for _ in range(self.bbox_head[-1].num_classes) - ]] * num_imgs - - if self.with_mask: - mask_classes = self.mask_head[-1].num_classes - segm_results = [[[] for _ in range(mask_classes)] - for _ in range(num_imgs)] - results = list(zip(bbox_results, segm_results)) - else: - results = bbox_results - - return results - - for i in range(self.num_stages): - bbox_results = self._bbox_forward(i, x, rois) - - # split batch bbox prediction back to each image - cls_score = bbox_results['cls_score'] - bbox_pred = bbox_results['bbox_pred'] - num_proposals_per_img = tuple( - len(proposals) for proposals in proposal_list) - rois = rois.split(num_proposals_per_img, 0) - cls_score = cls_score.split(num_proposals_per_img, 0) - if isinstance(bbox_pred, torch.Tensor): - bbox_pred = bbox_pred.split(num_proposals_per_img, 0) - else: - bbox_pred = self.bbox_head[i].bbox_pred_split( - bbox_pred, num_proposals_per_img) - ms_scores.append(cls_score) - - if i < self.num_stages - 1: - if self.bbox_head[i].custom_activation: - cls_score = [ - self.bbox_head[i].loss_cls.get_activation(s) - for s in cls_score - ] - refine_rois_list = [] - for j in range(num_imgs): - if rois[j].shape[0] > 0: - bbox_label = cls_score[j][:, :-1].argmax(dim=1) - refined_rois = self.bbox_head[i].regress_by_class( - rois[j], bbox_label, bbox_pred[j], img_metas[j]) - refine_rois_list.append(refined_rois) - rois = torch.cat(refine_rois_list) - - # average scores of each image by stages - cls_score = [ - sum([score[i] for score in ms_scores]) / float(len(ms_scores)) - for i in range(num_imgs) - ] - - # apply bbox post-processing to each image individually - det_bboxes = [] - det_labels = [] - for i in range(num_imgs): - det_bbox, det_label = self.bbox_head[-1].get_bboxes( - rois[i], - cls_score[i], - bbox_pred[i], - img_shapes[i], - scale_factors[i], - rescale=rescale, - cfg=rcnn_test_cfg) - det_bboxes.append(det_bbox) - det_labels.append(det_label) - - bbox_results = [ - bbox2result(det_bboxes[i], det_labels[i], - self.bbox_head[-1].num_classes) - for i in range(num_imgs) - ] - ms_bbox_result['ensemble'] = bbox_results - - if self.with_mask: - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - mask_classes = self.mask_head[-1].num_classes - segm_results = [[[] for _ in range(mask_classes)] - for _ in range(num_imgs)] - else: - if rescale and not isinstance(scale_factors[0], float): - scale_factors = [ - torch.from_numpy(scale_factor).to(det_bboxes[0].device) - for scale_factor in scale_factors - ] - _bboxes = [ - det_bboxes[i][:, :4] * - scale_factors[i] if rescale else det_bboxes[i][:, :4] - for i in range(len(det_bboxes)) - ] - mask_rois = bbox2roi(_bboxes) - num_mask_rois_per_img = tuple( - _bbox.size(0) for _bbox in _bboxes) - aug_masks = [] - for i in range(self.num_stages): - mask_results = self._mask_forward(i, x, mask_rois) - mask_pred = mask_results['mask_pred'] - # split batch mask prediction back to each image - mask_pred = mask_pred.split(num_mask_rois_per_img, 0) - aug_masks.append([ - m.sigmoid().cpu().detach().numpy() for m in mask_pred - ]) - - # apply mask post-processing to each image individually - segm_results = [] - for i in range(num_imgs): - if det_bboxes[i].shape[0] == 0: - segm_results.append( - [[] - for _ in range(self.mask_head[-1].num_classes)]) - else: - aug_mask = [mask[i] for mask in aug_masks] - merged_masks = merge_aug_masks( - aug_mask, [[img_metas[i]]] * self.num_stages, - rcnn_test_cfg) - segm_result = self.mask_head[-1].get_seg_masks( - merged_masks, _bboxes[i], det_labels[i], - rcnn_test_cfg, ori_shapes[i], scale_factors[i], - rescale) - segm_results.append(segm_result) - ms_segm_result['ensemble'] = segm_results - - if self.with_mask: - results = list( - zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble'])) - else: - results = ms_bbox_result['ensemble'] - - return results - - def aug_test(self, features, proposal_list, img_metas, rescale=False): - """Test with augmentations. - - If rescale is False, then returned bboxes and masks will fit the scale - of imgs[0]. - """ - rcnn_test_cfg = self.test_cfg - aug_bboxes = [] - aug_scores = [] - for x, img_meta in zip(features, img_metas): - # only one image in the batch - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - flip_direction = img_meta[0]['flip_direction'] - - proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, - scale_factor, flip, flip_direction) - # "ms" in variable names means multi-stage - ms_scores = [] - - rois = bbox2roi([proposals]) - - if rois.shape[0] == 0: - # There is no proposal in the single image - aug_bboxes.append(rois.new_zeros(0, 4)) - aug_scores.append(rois.new_zeros(0, 1)) - continue - - for i in range(self.num_stages): - bbox_results = self._bbox_forward(i, x, rois) - ms_scores.append(bbox_results['cls_score']) - - if i < self.num_stages - 1: - cls_score = bbox_results['cls_score'] - if self.bbox_head[i].custom_activation: - cls_score = self.bbox_head[i].loss_cls.get_activation( - cls_score) - bbox_label = cls_score[:, :-1].argmax(dim=1) - rois = self.bbox_head[i].regress_by_class( - rois, bbox_label, bbox_results['bbox_pred'], - img_meta[0]) - - cls_score = sum(ms_scores) / float(len(ms_scores)) - bboxes, scores = self.bbox_head[-1].get_bboxes( - rois, - cls_score, - bbox_results['bbox_pred'], - img_shape, - scale_factor, - rescale=False, - cfg=None) - aug_bboxes.append(bboxes) - aug_scores.append(scores) - - # after merging, bboxes will be rescaled to the original image size - merged_bboxes, merged_scores = merge_aug_bboxes( - aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) - det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, - rcnn_test_cfg.score_thr, - rcnn_test_cfg.nms, - rcnn_test_cfg.max_per_img) - - bbox_result = bbox2result(det_bboxes, det_labels, - self.bbox_head[-1].num_classes) - - if self.with_mask: - if det_bboxes.shape[0] == 0: - segm_result = [[] - for _ in range(self.mask_head[-1].num_classes)] - else: - aug_masks = [] - aug_img_metas = [] - for x, img_meta in zip(features, img_metas): - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - flip_direction = img_meta[0]['flip_direction'] - _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, - scale_factor, flip, flip_direction) - mask_rois = bbox2roi([_bboxes]) - for i in range(self.num_stages): - mask_results = self._mask_forward(i, x, mask_rois) - aug_masks.append( - mask_results['mask_pred'].sigmoid().cpu().numpy()) - aug_img_metas.append(img_meta) - merged_masks = merge_aug_masks(aug_masks, aug_img_metas, - self.test_cfg) - - ori_shape = img_metas[0][0]['ori_shape'] - dummy_scale_factor = np.ones(4) - segm_result = self.mask_head[-1].get_seg_masks( - merged_masks, - det_bboxes, - det_labels, - rcnn_test_cfg, - ori_shape, - scale_factor=dummy_scale_factor, - rescale=False) - return [(bbox_result, segm_result)] - else: - return [bbox_result] - - def onnx_export(self, x, proposals, img_metas): - - assert self.with_bbox, 'Bbox head must be implemented.' - assert proposals.shape[0] == 1, 'Only support one input image ' \ - 'while in exporting to ONNX' - # remove the scores - rois = proposals[..., :-1] - batch_size = rois.shape[0] - num_proposals_per_img = rois.shape[1] - # Eliminate the batch dimension - rois = rois.view(-1, 4) - - # add dummy batch index - rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois], dim=-1) - - max_shape = img_metas[0]['img_shape_for_onnx'] - ms_scores = [] - rcnn_test_cfg = self.test_cfg - - for i in range(self.num_stages): - bbox_results = self._bbox_forward(i, x, rois) - - cls_score = bbox_results['cls_score'] - bbox_pred = bbox_results['bbox_pred'] - # Recover the batch dimension - rois = rois.reshape(batch_size, num_proposals_per_img, - rois.size(-1)) - cls_score = cls_score.reshape(batch_size, num_proposals_per_img, - cls_score.size(-1)) - bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, 4) - ms_scores.append(cls_score) - if i < self.num_stages - 1: - assert self.bbox_head[i].reg_class_agnostic - new_rois = self.bbox_head[i].bbox_coder.decode( - rois[..., 1:], bbox_pred, max_shape=max_shape) - rois = new_rois.reshape(-1, new_rois.shape[-1]) - # add dummy batch index - rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois], - dim=-1) - - cls_score = sum(ms_scores) / float(len(ms_scores)) - bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, 4) - rois = rois.reshape(batch_size, num_proposals_per_img, -1) - det_bboxes, det_labels = self.bbox_head[-1].onnx_export( - rois, cls_score, bbox_pred, max_shape, cfg=rcnn_test_cfg) - - if not self.with_mask: - return det_bboxes, det_labels - else: - batch_index = torch.arange( - det_bboxes.size(0), - device=det_bboxes.device).float().view(-1, 1, 1).expand( - det_bboxes.size(0), det_bboxes.size(1), 1) - rois = det_bboxes[..., :4] - mask_rois = torch.cat([batch_index, rois], dim=-1) - mask_rois = mask_rois.view(-1, 5) - aug_masks = [] - for i in range(self.num_stages): - mask_results = self._mask_forward(i, x, mask_rois) - mask_pred = mask_results['mask_pred'] - aug_masks.append(mask_pred) - max_shape = img_metas[0]['img_shape_for_onnx'] - # calculate the mean of masks from several stage - mask_pred = sum(aug_masks) / len(aug_masks) - segm_results = self.mask_head[-1].onnx_export( - mask_pred, rois.reshape(-1, 4), det_labels.reshape(-1), - self.test_cfg, max_shape) - segm_results = segm_results.reshape(batch_size, - det_bboxes.shape[1], - max_shape[0], max_shape[1]) - return det_bboxes, det_labels, segm_results diff --git a/spaces/ronvolutional/http-server/index.js b/spaces/ronvolutional/http-server/index.js deleted file mode 100644 index da58d658fda06c7aed1a8384db3cd19e5f8f7a3e..0000000000000000000000000000000000000000 --- a/spaces/ronvolutional/http-server/index.js +++ /dev/null @@ -1,126 +0,0 @@ -if (document.location.search.includes('dark-theme=true')) { - document.body.classList.add('dark-theme'); -} - -let cursor = 0; -const RANGE = 5; -const LIMIT = 16_000; - -const textToImage = async (text) => { - const inferenceResponse = await fetch(`infer_biggan?input=${text}`); - const inferenceBlob = await inferenceResponse.blob(); - - return URL.createObjectURL(inferenceBlob); -}; - -const translateText = async (text) => { - const inferResponse = await fetch(`infer_t5?input=${text}`); - const inferJson = await inferResponse.json(); - - return inferJson.output; -}; - -const queryDataset = async (start, end) => { - const queryResponse = await fetch(`query_emotion?start=${start}&end=${end}`); - const queryJson = await queryResponse.json(); - - return queryJson.output; -}; - -const updateTable = async (cursor, range = RANGE) => { - const table = document.querySelector('.dataset-output'); - - const fragment = new DocumentFragment(); - - const observations = await queryDataset(cursor, cursor + range); - - for (const observation of observations) { - let row = document.createElement('tr'); - let text = document.createElement('td'); - let emotion = document.createElement('td'); - - text.textContent = observation.text; - emotion.textContent = observation.emotion; - - row.appendChild(text); - row.appendChild(emotion); - fragment.appendChild(row); - } - - table.innerHTML = ''; - - table.appendChild(fragment); - - table.insertAdjacentHTML( - 'afterbegin', - ` - - - - - ` - ); -}; - -const imageGenSelect = document.getElementById('image-gen-input'); -const imageGenImage = document.querySelector('.image-gen-output'); -const textGenForm = document.querySelector('.text-gen-form'); -const tableButtonPrev = document.querySelector('.table-previous'); -const tableButtonNext = document.querySelector('.table-next'); - -imageGenSelect.addEventListener('change', async (event) => { - const value = event.target.value; - - try { - imageGenImage.src = await textToImage(value); - imageGenImage.alt = value + ' generated from BigGAN AI model'; - } catch (err) { - console.error(err); - } -}); - -textGenForm.addEventListener('submit', async (event) => { - event.preventDefault(); - - const textGenInput = document.getElementById('text-gen-input'); - const textGenParagraph = document.querySelector('.text-gen-output'); - - try { - textGenParagraph.textContent = await translateText(textGenInput.value); - } catch (err) { - console.error(err); - } -}); - -tableButtonPrev.addEventListener('click', () => { - cursor = cursor > RANGE ? cursor - RANGE : 0; - - if (cursor < RANGE) { - tableButtonPrev.classList.add('hidden'); - } - if (cursor < LIMIT - RANGE) { - tableButtonNext.classList.remove('hidden'); - } - - updateTable(cursor); -}); - -tableButtonNext.addEventListener('click', () => { - cursor = cursor < LIMIT - RANGE ? cursor + RANGE : cursor; - - if (cursor >= RANGE) { - tableButtonPrev.classList.remove('hidden'); - } - if (cursor >= LIMIT - RANGE) { - tableButtonNext.classList.add('hidden'); - } - - updateTable(cursor); -}); - -textToImage(imageGenSelect.value) - .then((image) => (imageGenImage.src = image)) - .catch(console.error); - -updateTable(cursor) - .catch(console.error); diff --git a/spaces/rorallitri/biomedical-language-models/logs/2 States Full Movie Download 720p 227 Discover the Differences and Similarities of North and South India.md b/spaces/rorallitri/biomedical-language-models/logs/2 States Full Movie Download 720p 227 Discover the Differences and Similarities of North and South India.md deleted file mode 100644 index c7452ac589c71f7e4f5bc7e8c60a80c2960ef5fb..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/2 States Full Movie Download 720p 227 Discover the Differences and Similarities of North and South India.md +++ /dev/null @@ -1,5 +0,0 @@ -
                  -

                  Free Download Kerio Control Firewall 8.6.0 crack, Kerio Control Firewall 8.6.0 . Compatible with Windows x86/x64 Windows Vista, Windows 7, Windows 8, Windows .. (HTML):.kerio.WinRoute, (49.80MB ), 2735, 8900. kerio Control . crack-kerio-winroute-firewall-671 . At Kerio, we make this happen.Kerio 7 3 2 fast .. Kerio Control is an award-winning UTM firewall designed to protect businesses from a comprehensive range of invasive and harmful corporate network threats.Kerio Control 8.6.1 - Multiple Vulnerabilities.. Download Kerio Torrent at TorrentFunk.. Kerio Control Firewall sets new . kerio control 7.3.2 crack; . Operating system Windows 2000 / 2003 32-bit / 2003 64-bit / 2008 32-bit / 2008 64-bit / 7 32 bit .Kerio Control Crack .. ComputerWorks AG Florenz-Strasse 1 e 4142 Mnchenstein www.computerworks.ch 4/36 .New in Kerio VPN Client 8.1.1 Build 1212 Patch 3: Backup configuration to Samepage.io: Loosing configuration of your Kerio Control may equal a natural disaster.. Kerio Control 9.2.5.2619 64-bit Kerio Control is an award-winning UTM firewall designed to protect businesses from a comprehensive range of invasive and crippling corporate network threats.. Kerio VPN Client (64-bit) . Kerio Control provides superior network protection and intelligence that is stable, secure, and simple to manage. d77fe87ee0

                  -

                  kerio control 7.3.2 x64 crack


                  DOWNLOAD ✒ ✒ ✒ https://tinurll.com/2uzllD



                  aaccfb2cb3
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/Browser Password Recovery Pro Enterprise 3.5.0.1 UPDATED Crack By Zuket Creation.md b/spaces/rorallitri/biomedical-language-models/logs/Browser Password Recovery Pro Enterprise 3.5.0.1 UPDATED Crack By Zuket Creation.md deleted file mode 100644 index b9cd5e52f895b78c3dba7488125792e4661123bf..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Browser Password Recovery Pro Enterprise 3.5.0.1 UPDATED Crack By Zuket Creation.md +++ /dev/null @@ -1,6 +0,0 @@ -

                  Browser Password Recovery Pro Enterprise 3.5.0.1 Crack By Zuket Creation


                  Download File ►►►►► https://tinurll.com/2uznsM



                  - - d5da3c52bf
                  -
                  -
                  -

                  diff --git a/spaces/rorallitri/biomedical-language-models/logs/Honestech.TVR 2.5 Serial.included-WORKS.FOR.MOST.TV.TUNERS GWm Utorrent [REPACK].md b/spaces/rorallitri/biomedical-language-models/logs/Honestech.TVR 2.5 Serial.included-WORKS.FOR.MOST.TV.TUNERS GWm Utorrent [REPACK].md deleted file mode 100644 index 31fc81ae76ad6e97fe2e698ae9b5a9af7ed8ead9..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Honestech.TVR 2.5 Serial.included-WORKS.FOR.MOST.TV.TUNERS GWm Utorrent [REPACK].md +++ /dev/null @@ -1,6 +0,0 @@ -

                  honestech.TVR 2.5 Serial.included-WORKS.FOR.MOST.TV.TUNERS gWm utorrent


                  Download Ziphttps://tinurll.com/2uzoyC



                  -
                  -There is an old system called UNIX, many suspected that it does nix, but in fact, it does more than all previous systems, and includes amazing unique features. ional ... In addition, a job training component is included in most of the four. ional component and I think you really should be able to use that to learn how to use it. The ional component allows the user to select from a wide range of options. An ional component runs inside another, and sometimes it can be very large like other components. The ional component is a module. ional components may have some additional features than some other components. ional components and their additional components can 8a78ff9644
                  -
                  -
                  -

                  diff --git a/spaces/rorallitri/biomedical-language-models/logs/Koyelaanchal Full Movie In Hindi Download 720p Movie The Dark Side of Indias Precious Fuel Station.md b/spaces/rorallitri/biomedical-language-models/logs/Koyelaanchal Full Movie In Hindi Download 720p Movie The Dark Side of Indias Precious Fuel Station.md deleted file mode 100644 index e81f07edd0daa667a709bc36a52247fccc4f2def..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Koyelaanchal Full Movie In Hindi Download 720p Movie The Dark Side of Indias Precious Fuel Station.md +++ /dev/null @@ -1,5 +0,0 @@ -
                  -

                  download Sunil Shetty New Movies 2018 unlimited Movies and videos Download Here.Sunil Shetty New Movies 2018 Hd,3gp. mp4 320p and More Videos You Can Download Easyly. tamilrockers and movierulz, tamilgun, filmywap, and pagalworld videos and Movies download.

                  -

                  Koyelaanchal Full Movie In Hindi Download 720p Movie


                  Download ✦✦✦ https://tinurll.com/2uzmve



                  aaccfb2cb3
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/Le avventure di Pinocchio italian movie hd video songs free download learn more about the history and culture of Italy through this beloved story.md b/spaces/rorallitri/biomedical-language-models/logs/Le avventure di Pinocchio italian movie hd video songs free download learn more about the history and culture of Italy through this beloved story.md deleted file mode 100644 index 3e9129872653ddd0c8ce99da962bd122b61cecc0..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Le avventure di Pinocchio italian movie hd video songs free download learn more about the history and culture of Italy through this beloved story.md +++ /dev/null @@ -1,6 +0,0 @@ -

                  Le avventure di Pinocchio italian movie hd video songs free download


                  Download >>> https://tinurll.com/2uznCz



                  -
                  - aaccfb2cb3
                  -
                  -
                  -

                  diff --git a/spaces/rosenthal/chess/chessfenbot/readme.md b/spaces/rosenthal/chess/chessfenbot/readme.md deleted file mode 100644 index 07be58a563f062c4504f39ffa38ec06a8767e795..0000000000000000000000000000000000000000 --- a/spaces/rosenthal/chess/chessfenbot/readme.md +++ /dev/null @@ -1,127 +0,0 @@ -## Cloned from: https://github.com/Elucidation/tensorflow_chessbot/tree/chessfenbot - -TensorFlow Chessbot - /u/ChessFenBot [◕ _ ◕]\* *I make FENs* ---- -## Command Line Interface (CLI) - -### Setting up the virtual environment - -This uses Python 3, pip3 and virtualenv, if you don't have these installed you can use: - -``` -sudo apt-get install python3-pip -sudo pip3 install virtualenv -``` - -Then, create a new virtual environment, source it, and install the dependencies from `requirements.txt`. - -``` -virtualenv venv -source venv/bin/activate -pip3 install -r requirements.txt -``` - -### Running the CLI - -`tensorflow_chessbot.py` contains the library and script for running predictions on images passed by file or url. - -``` -$ ./tensorflow_chessbot.py -h -usage: tensorflow_chessbot.py [-h] [--url URL] [--filepath FILEPATH] - - Predict a chessboard FEN from supplied local image link or URL - - optional arguments: - -h, --help show this help message and exit - --url URL URL of image (ex. http://imgur.com/u4zF5Hj.png) - --filepath FILEPATH filepath to image (ex. u4zF5Hj.png) -``` - -For example to run on the provided `example_input.png` ![example_input](example_input.png) - -``` -./tensorflow_chessbot.py --filepath example_input.png -``` - -Should output something like: - -``` -(venv) $ ./tensorflow_chessbot.py --filepath example_input.png - ---- Prediction on file example_input.png --- - Loading model 'saved_models/frozen_model.pb' - Model restored. -Closing session. -Per-tile certainty: -[[1. 1. 1. 1. 1. 1. 1. 1.] - [1. 1. 1. 1. 1. 1. 1. 1.] - [1. 1. 1. 1. 1. 1. 1. 1.] - [1. 1. 1. 1. 1. 1. 1. 1.] - [1. 1. 1. 1. 1. 1. 1. 1.] - [1. 1. 1. 1. 1. 1. 1. 1.] - [1. 1. 1. 1. 1. 1. 1. 1.] - [1. 1. 1. 1. 1. 1. 1. 1.]] -Certainty range [0.999975 - 1], Avg: 0.999997 ---- -Predicted FEN: bn4kN/p5bp/1p3npB/3p4/8/5Q2/PPP2PPP/R3R1K1 -Final Certainty: 100.0% -``` - -Which would be ![predicted](http://www.fen-to-image.com/image/60/bn4kN/p5bp/1p3npB/3p4/8/5Q2/PPP2PPP/R3R1K1.png) - - -## Reddit Bot - -[/u/ChessFenBot](https://www.reddit.com/user/ChessFenBot) will automatically reply to [reddit /r/chess](https://www.reddit.com/r/) new topic image posts that contain detectable online chessboard screenshots. A screenshot either ends in `.png`, `.jpg`, `.gif`, or is an `imgur` link. - -It replies with a [lichess](http://www.lichess.org) analysis link for that layout and a predicted [FEN](https://en.wikipedia.org/wiki/Forsyth%E2%80%93Edwards_Notation). - -```py -predictor = ChessboardPredictor() -fen, certainty = predictor.makePrediction('http://imgur.com/u4zF5Hj.png') -print "Predicted FEN: %s" % fen -print "Certainty: %.1f%%" % (certainty*100) -``` - -``` -Certainty range [0.999545 - 1], Avg: 0.999977, Overall: 0.998546 -Predicted FEN: 8/5p2/5k1P/2p4P/1p1p4/8/3K4/8 -Certainty: 99.9% -Done -[Finished in 1.8s] -``` - -ChessFenBot automatically replied to [this reddit post](https://www.reddit.com/r/chess/comments/45osos/very_difficult_find_the_best_move_for_white/d004cg6?context=3), it processed the [screenshot link url](http://i.imgur.com/HnWYt8A.png) and responded with: - -> ChessFenBot [◕ _ ◕]\* *I make FENs* -> -> --- -> -> I attempted to generate a chessboard layout from the posted image, with an overall certainty of **99.9916%**. -> -> FEN: [1nkr4/1p3q1p/pP4pn/P1r5/3N1p2/2b2B1P/5PPB/2RQ1RK1](http://www.fen-to-image.com/image/30/1nkr1111/1p111q1p/pP1111pn/P1r11111/111N1p11/11b11B1P/11111PPB/11RQ1RK1.png) -> -> Here is a link to a [Lichess Analysis](http://www.lichess.org/analysis/1nkr4/1p3q1p/pP4pn/P1r5/3N1p2/2b2B1P/5PPB/2RQ1RK1_w) - White to play -> -> --- -> -> Yes I am a machine learning bot | [`How I work`](https://github.com/Elucidation/tensorflow_chessbot 'Must go deeper') | Reply with a corrected FEN or [Editor link)](http://www.lichess.org/editor/r1b1r1k1/5pp1/p1pR1nNp/8/2B5/2q5/P1P1Q1PP/5R1K) to add to my next training dataset - -## Running with Docker - -Automated build on Docker available at `elucidation/tensorflow_chessbot` - -Populate your own `auth_config.py` which has the form - -```py -USERNAME='' -PASSWORD='' -USER_AGENT='' -``` - - -Then you can download and run the docker image passing this config file using: - -``` -docker run -dt --rm --name cfb -v :/tcb/auth_config.py elucidation/tensorflow_chessbot -``` diff --git a/spaces/runninghsus/lupe-bsoid/app.py b/spaces/runninghsus/lupe-bsoid/app.py deleted file mode 100644 index a5706e2642996887a388f4368710512cf6465b42..0000000000000000000000000000000000000000 --- a/spaces/runninghsus/lupe-bsoid/app.py +++ /dev/null @@ -1,38 +0,0 @@ -import streamlit as st -import deeplabcut -import os -import io -import pathlib -import glob - - -HERE = pathlib.Path(__file__).parent.resolve() - - -st.set_page_config( - page_title="LUPE X B-SOiD", - layout="wide", - menu_items={ - } -) - -left_col, right_col = st.columns(2) -left_expand = left_col.expander('Select a video file:', expanded=True) -uploaded_file = left_expand.file_uploader('Upload video files', - accept_multiple_files=False, type=['avi', 'mp4'], key='video') -temporary_location = False -if uploaded_file is not None: - g = io.BytesIO(uploaded_file.read()) ## BytesIO Object - temporary_location = "./testout_simple.mp4" - with open(temporary_location, 'wb') as out: ## Open temporary file as bytes - out.write(g.read()) ## Read bytes into file - out.close() - -if st.button('analyze pose'): - config_path = os.path.join(HERE, 'bottomup_clear-hsu-2021-09-21/config.yaml') - st.write(f'config file: {config_path}') - with st.spinner('running deeplabcut...'): - deeplabcut.analyze_videos(config_path, [os.path.join(HERE, 'p16_vid1_3min.mp4')], save_as_csv=True) -if st.button('clear all'): - for filename in glob.glob(str.join('', (str(HERE), '/testout_simple*'))): - os.remove(filename) diff --git a/spaces/ruslanmv/Clone-Your-Voice/synthesizer/utils/numbers.py b/spaces/ruslanmv/Clone-Your-Voice/synthesizer/utils/numbers.py deleted file mode 100644 index 1534daa3f6aee11ac5115260b8800f7459348c0c..0000000000000000000000000000000000000000 --- a/spaces/ruslanmv/Clone-Your-Voice/synthesizer/utils/numbers.py +++ /dev/null @@ -1,69 +0,0 @@ -import re -import inflect - - -_inflect = inflect.engine() -_comma_number_re = re.compile(r"([0-9][0-9\,]+[0-9])") -_decimal_number_re = re.compile(r"([0-9]+\.[0-9]+)") -_pounds_re = re.compile(r"£([0-9\,]*[0-9]+)") -_dollars_re = re.compile(r"\$([0-9\.\,]*[0-9]+)") -_ordinal_re = re.compile(r"[0-9]+(st|nd|rd|th)") -_number_re = re.compile(r"[0-9]+") - - -def _remove_commas(m): - return m.group(1).replace(",", "") - - -def _expand_decimal_point(m): - return m.group(1).replace(".", " point ") - - -def _expand_dollars(m): - match = m.group(1) - parts = match.split(".") - if len(parts) > 2: - return match + " dollars" # Unexpected format - dollars = int(parts[0]) if parts[0] else 0 - cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 - if dollars and cents: - dollar_unit = "dollar" if dollars == 1 else "dollars" - cent_unit = "cent" if cents == 1 else "cents" - return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit) - elif dollars: - dollar_unit = "dollar" if dollars == 1 else "dollars" - return "%s %s" % (dollars, dollar_unit) - elif cents: - cent_unit = "cent" if cents == 1 else "cents" - return "%s %s" % (cents, cent_unit) - else: - return "zero dollars" - - -def _expand_ordinal(m): - return _inflect.number_to_words(m.group(0)) - - -def _expand_number(m): - num = int(m.group(0)) - if num > 1000 and num < 3000: - if num == 2000: - return "two thousand" - elif num > 2000 and num < 2010: - return "two thousand " + _inflect.number_to_words(num % 100) - elif num % 100 == 0: - return _inflect.number_to_words(num // 100) + " hundred" - else: - return _inflect.number_to_words(num, andword="", zero="oh", group=2).replace(", ", " ") - else: - return _inflect.number_to_words(num, andword="") - - -def normalize_numbers(text): - text = re.sub(_comma_number_re, _remove_commas, text) - text = re.sub(_pounds_re, r"\1 pounds", text) - text = re.sub(_dollars_re, _expand_dollars, text) - text = re.sub(_decimal_number_re, _expand_decimal_point, text) - text = re.sub(_ordinal_re, _expand_ordinal, text) - text = re.sub(_number_re, _expand_number, text) - return text diff --git a/spaces/sajornad/ZoeDepth/gradio_pano_to_3d.py b/spaces/sajornad/ZoeDepth/gradio_pano_to_3d.py deleted file mode 100644 index fff147876c3d87a625134c5f0739bbcb039e69a7..0000000000000000000000000000000000000000 --- a/spaces/sajornad/ZoeDepth/gradio_pano_to_3d.py +++ /dev/null @@ -1,96 +0,0 @@ -import gradio as gr -import numpy as np -import trimesh -from geometry import create_triangles -from functools import partial -import tempfile - -def depth_edges_mask(depth): - """Returns a mask of edges in the depth map. - Args: - depth: 2D numpy array of shape (H, W) with dtype float32. - Returns: - mask: 2D numpy array of shape (H, W) with dtype bool. - """ - # Compute the x and y gradients of the depth map. - depth_dx, depth_dy = np.gradient(depth) - # Compute the gradient magnitude. - depth_grad = np.sqrt(depth_dx ** 2 + depth_dy ** 2) - # Compute the edge mask. - mask = depth_grad > 0.05 - return mask - - -def pano_depth_to_world_points(depth): - """ - 360 depth to world points - given 2D depth is an equirectangular projection of a spherical image - Treat depth as radius - - longitude : -pi to pi - latitude : -pi/2 to pi/2 - """ - - # Convert depth to radius - radius = depth.flatten() - - lon = np.linspace(-np.pi, np.pi, depth.shape[1]) - lat = np.linspace(-np.pi/2, np.pi/2, depth.shape[0]) - - lon, lat = np.meshgrid(lon, lat) - lon = lon.flatten() - lat = lat.flatten() - - # Convert to cartesian coordinates - x = radius * np.cos(lat) * np.cos(lon) - y = radius * np.cos(lat) * np.sin(lon) - z = radius * np.sin(lat) - - pts3d = np.stack([x, y, z], axis=1) - - return pts3d - - -def predict_depth(model, image): - depth = model.infer_pil(image) - return depth - -def get_mesh(model, image, keep_edges=False): - image.thumbnail((1024,1024)) # limit the size of the image - depth = predict_depth(model, image) - pts3d = pano_depth_to_world_points(depth) - - # Create a trimesh mesh from the points - # Each pixel is connected to its 4 neighbors - # colors are the RGB values of the image - - verts = pts3d.reshape(-1, 3) - image = np.array(image) - if keep_edges: - triangles = create_triangles(image.shape[0], image.shape[1]) - else: - triangles = create_triangles(image.shape[0], image.shape[1], mask=~depth_edges_mask(depth)) - colors = image.reshape(-1, 3) - mesh = trimesh.Trimesh(vertices=verts, faces=triangles, vertex_colors=colors) - - # Save as glb - glb_file = tempfile.NamedTemporaryFile(suffix='.glb', delete=False) - glb_path = glb_file.name - mesh.export(glb_path) - return glb_path - -def create_demo(model): - gr.Markdown("### Panorama to 3D mesh") - gr.Markdown("Convert a 360 spherical panorama to a 3D mesh") - gr.Markdown("ZoeDepth was not trained on panoramic images. It doesn't know anything about panoramas or spherical projection. Here, we just treat the estimated depth as radius and some projection errors are expected. Nonetheless, ZoeDepth still works surprisingly well on 360 reconstruction.") - - with gr.Row(): - input_image = gr.Image(label="Input Image", type='pil') - result = gr.Model3D(label="3d mesh reconstruction", clear_color=[ - 1.0, 1.0, 1.0, 1.0]) - - checkbox = gr.Checkbox(label="Keep occlusion edges", value=True) - submit = gr.Button("Submit") - submit.click(partial(get_mesh, model), inputs=[input_image, checkbox], outputs=[result]) - examples = gr.Examples(examples=["examples/pano_1.jpeg", "examples/pano_2.jpeg", "examples/pano_3.jpeg"], - inputs=[input_image]) \ No newline at end of file diff --git a/spaces/sanchanhart/Warehouse_Apparel_Detection/metadata/predictor_yolo_detector/utils/google_utils.py b/spaces/sanchanhart/Warehouse_Apparel_Detection/metadata/predictor_yolo_detector/utils/google_utils.py deleted file mode 100644 index 08cae912ee36d3e989f0a8d18f4aba7f950e7a88..0000000000000000000000000000000000000000 --- a/spaces/sanchanhart/Warehouse_Apparel_Detection/metadata/predictor_yolo_detector/utils/google_utils.py +++ /dev/null @@ -1,122 +0,0 @@ -# This file contains google utils: https://cloud.google.com/storage/docs/reference/libraries -# pip install --upgrade google-cloud-storage -# from google.cloud import storage - -import os -import platform -import subprocess -import time -from pathlib import Path - -import torch - - -def gsutil_getsize(url=''): - # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du - s = subprocess.check_output('gsutil du %s' % url, shell=True).decode('utf-8') - return eval(s.split(' ')[0]) if len(s) else 0 # bytes - - -def attempt_download(weights): - # Attempt to download pretrained weights if not found locally - weights = weights.strip().replace("'", '') - file = Path(weights).name - - msg = weights + ' missing, try downloading from https://github.com/ultralytics/yolov5/releases/' - models = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt'] # available models - - if file in models and not os.path.isfile(weights): - # Google Drive - # d = {'yolov5s.pt': '1R5T6rIyy3lLwgFXNms8whc-387H0tMQO', - # 'yolov5m.pt': '1vobuEExpWQVpXExsJ2w-Mbf3HJjWkQJr', - # 'yolov5l.pt': '1hrlqD1Wdei7UT4OgT785BEk1JwnSvNEV', - # 'yolov5x.pt': '1mM8aZJlWTxOg7BZJvNUMrTnA2AbeCVzS'} - # r = gdrive_download(id=d[file], name=weights) if file in d else 1 - # if r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6: # check - # return - - try: # GitHub - url = 'https://github.com/ultralytics/yolov5/releases/download/v3.1/' + file - print('Downloading %s to %s...' % (url, weights)) - torch.hub.download_url_to_file(url, weights) - assert os.path.exists(weights) and os.path.getsize(weights) > 1E6 # check - except Exception as e: # GCP - print('Download error: %s' % e) - url = 'https://storage.googleapis.com/ultralytics/yolov5/ckpt/' + file - print('Downloading %s to %s...' % (url, weights)) - r = os.system('curl -L %s -o %s' % (url, weights)) # torch.hub.download_url_to_file(url, weights) - finally: - if not (os.path.exists(weights) and os.path.getsize(weights) > 1E6): # check - os.remove(weights) if os.path.exists(weights) else None # remove partial downloads - print('ERROR: Download failure: %s' % msg) - print('') - return - - -def gdrive_download(id='1n_oKgR81BJtqk75b00eAjdv03qVCQn2f', name='coco128.zip'): - # Downloads a file from Google Drive. from utils.google_utils import *; gdrive_download() - t = time.time() - - print('Downloading https://drive.google.com/uc?export=download&id=%s as %s... ' % (id, name), end='') - os.remove(name) if os.path.exists(name) else None # remove existing - os.remove('cookie') if os.path.exists('cookie') else None - - # Attempt file download - out = "NUL" if platform.system() == "Windows" else "/dev/null" - os.system('curl -c ./cookie -s -L "drive.google.com/uc?export=download&id=%s" > %s ' % (id, out)) - if os.path.exists('cookie'): # large file - s = 'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm=%s&id=%s" -o %s' % (get_token(), id, name) - else: # small file - s = 'curl -s -L -o %s "drive.google.com/uc?export=download&id=%s"' % (name, id) - r = os.system(s) # execute, capture return - os.remove('cookie') if os.path.exists('cookie') else None - - # Error check - if r != 0: - os.remove(name) if os.path.exists(name) else None # remove partial - print('Download error ') # raise Exception('Download error') - return r - - # Unzip if archive - if name.endswith('.zip'): - print('unzipping... ', end='') - os.system('unzip -q %s' % name) # unzip - os.remove(name) # remove zip to free space - - print('Done (%.1fs)' % (time.time() - t)) - return r - - -def get_token(cookie="./cookie"): - with open(cookie) as f: - for line in f: - if "download" in line: - return line.split()[-1] - return "" - -# def upload_blob(bucket_name, source_file_name, destination_blob_name): -# # Uploads a file to a bucket -# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python -# -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(destination_blob_name) -# -# blob.upload_from_filename(source_file_name) -# -# print('File {} uploaded to {}.'.format( -# source_file_name, -# destination_blob_name)) -# -# -# def download_blob(bucket_name, source_blob_name, destination_file_name): -# # Uploads a blob from a bucket -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(source_blob_name) -# -# blob.download_to_filename(destination_file_name) -# -# print('Blob {} downloaded to {}.'.format( -# source_blob_name, -# destination_file_name)) diff --git a/spaces/scedlatioru/img-to-music/example/Grb Objective Physics Pdf Free _BEST_.md b/spaces/scedlatioru/img-to-music/example/Grb Objective Physics Pdf Free _BEST_.md deleted file mode 100644 index b5a211f7cf2983bc2eb0f56bb45138e77b9dd112..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Grb Objective Physics Pdf Free _BEST_.md +++ /dev/null @@ -1,6 +0,0 @@ -

                  Grb Objective Physics Pdf Free


                  Download Zip ✓✓✓ https://gohhs.com/2uEAA1



                  -
                  -... Books in India. OLX provides the best Free Online Classified Advertising in India. ... 550GRB objective physics for all entrance exam book. Sarai Vrindavan ... 1fdad05405
                  -
                  -
                  -

                  diff --git a/spaces/scedlatioru/img-to-music/example/Vrayadvancedmaterialplugincinema4ddownloadfor146 ((LINK)).md b/spaces/scedlatioru/img-to-music/example/Vrayadvancedmaterialplugincinema4ddownloadfor146 ((LINK)).md deleted file mode 100644 index f554dd4ab5dc3fcba7f419b7fc6f0c6cb2d755f3..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Vrayadvancedmaterialplugincinema4ddownloadfor146 ((LINK)).md +++ /dev/null @@ -1,52 +0,0 @@ -

                  vrayadvancedmaterialplugincinema4ddownloadfor146


                  Download Filehttps://gohhs.com/2uEzHd



                  -
                  ->'; - - } - - function setImmediate(callback) { - - window.setImmediate = callback; - - function available() { - - return!window.jsx; - - function workerReady() { - - if (window.jsx) - - jsxModule('cinema4d'); - - - - if (window.rtsp) { - - function jsxModule(name) { - - if (name === undefined) { - - return null; - - if (!available()) { - - var scriptElement = document.createElement('script'); - - scriptElement.onreadystatechange = function () { - - if (this.readyState === 'loaded' || this.readyState === 'complete') { - - if (window.jsx) - - window.jsx.ready(); - - - - if (window.rtsp) { - - window.rtsp.ready(); - - if (window.setImmediate) { 4fefd39f24
                  -
                  -
                  -

                  diff --git a/spaces/scedlatioru/img-to-music/example/Xforce NEW Keygen Maya 2013 32.md b/spaces/scedlatioru/img-to-music/example/Xforce NEW Keygen Maya 2013 32.md deleted file mode 100644 index 29e1427caef5e5eb3aa56489f58cceea69a167b4..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Xforce NEW Keygen Maya 2013 32.md +++ /dev/null @@ -1,9 +0,0 @@ -
                  -

                  keygen xforce 2014 mac.autocad character generator keygen activator, character creator, mapeat character creator, character generator, character generator keygen. character generator keygen 2014, character creator, character creator, autodesk character generator, character generator. autodesk character generator keygen cracked and activation x64 bits dvd for maya2016 and maya 2013 etc. character generator keygen cracked and activation x64 bits dvd for maya2016 and maya 2013 etc..
                  autodesk character generator keygen cracked and activation x64 bits dvd for maya2016 and maya 2013 etc.

                  -

                  Xforce Keygen Maya 2013 32


                  Download File > https://gohhs.com/2uEzX3



                  -

                  maya xforce 2014.character generator keygen.character creator, autodesk character generator, mapeat character creator. character generator keygen 2014, character creator, character creator. character generator keygen cracked and activation x64 bits dvd for maya2016 and maya 2013 etc..

                  -

                  xforce keygen 32 bits. autodesk cad 2017 with crack.. autodesk maya 2017 for. autodesk cad 2013 / autodesk maya 2013. exe. xforce keygen for autodesk 2011.exe. autodesk autocad / xforce keygen autocad 2013. autodesk cad 2014 (32 bit & 64 bit) with patch & keygen. xf adesk 2014 download.,xf,adesk,2013,x64.exe,tue,mar., 2010,,full,.

                  -

                  xforce keygen crack download. no survey.. xforce.tar.gz or xforce.xz file. 3. extract the xforce.xz file to find a autodesk.rar or autodesk.zip. autodesk acme 32bit.rar. autodesk autocad 2013 32bit. autodesk autocad 2014. autodesk autocad 2013 32bit, autodesk autocad 2013 32bit.rar, autodesk autocad 2013 32bit. -. xforce keygen autocad 2013 x64 crack. click on download button for xforce keygen 2013 crack solution for your pc. xforce crack for autocad 2013. xforce crack 2013. autodesk maya 2013 32bit. xforce crack 2014. xforce crack 2015. xforce crack 2017. xforce crack 2018. xforce crack 2016.

                  -

                  899543212b
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/segments-tobias/conex/espnet2/tts/gst/__init__.py b/spaces/segments-tobias/conex/espnet2/tts/gst/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/sgxz/bingo/src/components/chat-list.tsx b/spaces/sgxz/bingo/src/components/chat-list.tsx deleted file mode 100644 index 624a78ef0d7be0f1192cf02a81e2e9cf214cb193..0000000000000000000000000000000000000000 --- a/spaces/sgxz/bingo/src/components/chat-list.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import React from 'react' - -import { Separator } from '@/components/ui/separator' -import { ChatMessage } from '@/components/chat-message' -import { ChatMessageModel } from '@/lib/bots/bing/types' - -export interface ChatList { - messages: ChatMessageModel[] -} - -export function ChatList({ messages }: ChatList) { - if (!messages.length) { - return null - } - - return ( -
                  - {messages.map((message, index) => ( - - - {index < messages.length - 1 && ( - - )} - - ))} -
                  - ) -} diff --git a/spaces/shgao/EditAnything/cldm/ddim_hacked.py b/spaces/shgao/EditAnything/cldm/ddim_hacked.py deleted file mode 100644 index 6c040b363ba0705f52509b75437b5ea932c80ec1..0000000000000000000000000000000000000000 --- a/spaces/shgao/EditAnything/cldm/ddim_hacked.py +++ /dev/null @@ -1,316 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch -import numpy as np -from tqdm import tqdm - -from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor - - -class DDIMSampler(object): - def __init__(self, model, schedule="linear", **kwargs): - super().__init__() - self.model = model - self.ddpm_num_timesteps = model.num_timesteps - self.schedule = schedule - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): - attr = attr.to(torch.device("cuda")) - setattr(self, name, attr) - - def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): - self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, - num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) - alphas_cumprod = self.model.alphas_cumprod - assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' - to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) - - self.register_buffer('betas', to_torch(self.model.betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) - - # ddim sampling parameters - ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), - ddim_timesteps=self.ddim_timesteps, - eta=ddim_eta,verbose=verbose) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) - sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( - (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( - 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) - self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - dynamic_threshold=None, - ucg_schedule=None, - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - ctmp = conditioning[list(conditioning.keys())[0]] - while isinstance(ctmp, list): ctmp = ctmp[0] - cbs = ctmp.shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - - elif isinstance(conditioning, list): - for ctmp in conditioning: - if ctmp.shape[0] != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - print(f'Data shape for DDIM sampling is {size}, eta {eta}') - - samples, intermediates = self.ddim_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - dynamic_threshold=dynamic_threshold, - ucg_schedule=ucg_schedule - ) - return samples, intermediates - - @torch.no_grad() - def ddim_sampling(self, cond, shape, - x_T=None, ddim_use_original_steps=False, - callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, log_every_t=100, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None, - ucg_schedule=None): - device = self.model.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - if timesteps is None: - timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps - elif timesteps is not None and not ddim_use_original_steps: - subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 - timesteps = self.ddim_timesteps[:subset_end] - - intermediates = {'x_inter': [img], 'pred_x0': [img]} - time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps) - total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] - print(f"Running DDIM Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) - - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((b,), step, device=device, dtype=torch.long) - - if mask is not None: - assert x0 is not None - img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? - img = img_orig * mask + (1. - mask) * img - - if ucg_schedule is not None: - assert len(ucg_schedule) == len(time_range) - unconditional_guidance_scale = ucg_schedule[i] - - outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, - quantize_denoised=quantize_denoised, temperature=temperature, - noise_dropout=noise_dropout, score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - dynamic_threshold=dynamic_threshold) - img, pred_x0 = outs - if callback: callback(i) - if img_callback: img_callback(pred_x0, i) - - if index % log_every_t == 0 or index == total_steps - 1: - intermediates['x_inter'].append(img) - intermediates['pred_x0'].append(pred_x0) - - return img, intermediates - - @torch.no_grad() - def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, - dynamic_threshold=None): - b, *_, device = *x.shape, x.device - - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - model_output = self.model.apply_model(x, t, c) - else: - model_t = self.model.apply_model(x, t, c) - model_uncond = self.model.apply_model(x, t, unconditional_conditioning) - model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond) - - if self.model.parameterization == "v": - e_t = self.model.predict_eps_from_z_and_v(x, t, model_output) - else: - e_t = model_output - - if score_corrector is not None: - assert self.model.parameterization == "eps", 'not implemented' - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - if self.model.parameterization != "v": - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - else: - pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output) - - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - - if dynamic_threshold is not None: - raise NotImplementedError() - - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - @torch.no_grad() - def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None, - unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None): - num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0] - - assert t_enc <= num_reference_steps - num_steps = t_enc - - if use_original_steps: - alphas_next = self.alphas_cumprod[:num_steps] - alphas = self.alphas_cumprod_prev[:num_steps] - else: - alphas_next = self.ddim_alphas[:num_steps] - alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) - - x_next = x0 - intermediates = [] - inter_steps = [] - for i in tqdm(range(num_steps), desc='Encoding Image'): - t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long) - if unconditional_guidance_scale == 1.: - noise_pred = self.model.apply_model(x_next, t, c) - else: - assert unconditional_conditioning is not None - e_t_uncond, noise_pred = torch.chunk( - self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)), - torch.cat((unconditional_conditioning, c))), 2) - noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond) - - xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next - weighted_noise_pred = alphas_next[i].sqrt() * ( - (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred - x_next = xt_weighted + weighted_noise_pred - if return_intermediates and i % ( - num_steps // return_intermediates) == 0 and i < num_steps - 1: - intermediates.append(x_next) - inter_steps.append(i) - elif return_intermediates and i >= num_steps - 2: - intermediates.append(x_next) - inter_steps.append(i) - if callback: callback(i) - - out = {'x_encoded': x_next, 'intermediate_steps': inter_steps} - if return_intermediates: - out.update({'intermediates': intermediates}) - return x_next, out - - @torch.no_grad() - def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): - # fast, but does not allow for exact reconstruction - # t serves as an index to gather the correct alphas - if use_original_steps: - sqrt_alphas_cumprod = self.sqrt_alphas_cumprod - sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod - else: - sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) - sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas - - if noise is None: - noise = torch.randn_like(x0) - return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + - extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise) - - @torch.no_grad() - def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, - use_original_steps=False, callback=None): - - timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps - timesteps = timesteps[:t_start] - - time_range = np.flip(timesteps) - total_steps = timesteps.shape[0] - print(f"Running DDIM Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='Decoding image', total=total_steps) - x_dec = x_latent - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) - x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning) - if callback: callback(i) - return x_dec \ No newline at end of file diff --git a/spaces/shi-labs/FcF-Inpainting/training/losses/ade20k/segm_lib/nn/modules/replicate.py b/spaces/shi-labs/FcF-Inpainting/training/losses/ade20k/segm_lib/nn/modules/replicate.py deleted file mode 100644 index b71c7b8ed51a1d6c55b1f753bdd8d90bad79bd06..0000000000000000000000000000000000000000 --- a/spaces/shi-labs/FcF-Inpainting/training/losses/ade20k/segm_lib/nn/modules/replicate.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# File : replicate.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import functools - -from torch.nn.parallel.data_parallel import DataParallel - -__all__ = [ - 'CallbackContext', - 'execute_replication_callbacks', - 'DataParallelWithCallback', - 'patch_replication_callback' -] - - -class CallbackContext(object): - pass - - -def execute_replication_callbacks(modules): - """ - Execute an replication callback `__data_parallel_replicate__` on each module created by original replication. - - The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` - - Note that, as all modules are isomorphism, we assign each sub-module with a context - (shared among multiple copies of this module on different devices). - Through this context, different copies can share some information. - - We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback - of any slave copies. - """ - master_copy = modules[0] - nr_modules = len(list(master_copy.modules())) - ctxs = [CallbackContext() for _ in range(nr_modules)] - - for i, module in enumerate(modules): - for j, m in enumerate(module.modules()): - if hasattr(m, '__data_parallel_replicate__'): - m.__data_parallel_replicate__(ctxs[j], i) - - -class DataParallelWithCallback(DataParallel): - """ - Data Parallel with a replication callback. - - An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by - original `replicate` function. - The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` - - Examples: - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - # sync_bn.__data_parallel_replicate__ will be invoked. - """ - - def replicate(self, module, device_ids): - modules = super(DataParallelWithCallback, self).replicate(module, device_ids) - execute_replication_callbacks(modules) - return modules - - -def patch_replication_callback(data_parallel): - """ - Monkey-patch an existing `DataParallel` object. Add the replication callback. - Useful when you have customized `DataParallel` implementation. - - Examples: - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallel(sync_bn, device_ids=[0, 1]) - > patch_replication_callback(sync_bn) - # this is equivalent to - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - """ - - assert isinstance(data_parallel, DataParallel) - - old_replicate = data_parallel.replicate - - @functools.wraps(old_replicate) - def new_replicate(module, device_ids): - modules = old_replicate(module, device_ids) - execute_replication_callbacks(modules) - return modules - - data_parallel.replicate = new_replicate diff --git a/spaces/shripadbhat/Clinical_Note_Question_Answering/README.md b/spaces/shripadbhat/Clinical_Note_Question_Answering/README.md deleted file mode 100644 index cc74c1405d0bcf81ee29e145e59f683e11882c29..0000000000000000000000000000000000000000 --- a/spaces/shripadbhat/Clinical_Note_Question_Answering/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Clinical Note Question Answering -emoji: ⚡ -colorFrom: indigo -colorTo: gray -sdk: gradio -sdk_version: 3.4.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/silvesterjk/Talking_Yak_STT/app.py b/spaces/silvesterjk/Talking_Yak_STT/app.py deleted file mode 100644 index 9d654d36e5662b0cc6127e2572202db773051949..0000000000000000000000000000000000000000 --- a/spaces/silvesterjk/Talking_Yak_STT/app.py +++ /dev/null @@ -1,176 +0,0 @@ -import os -os.system("pip install git+https://github.com/openai/whisper.git") -import gradio as gr -import whisper - -# from share_btn import community_icon_html, loading_icon_html, share_js - -model = whisper.load_model("medium.en") - - -def transcribe(file): - options = dict(task="transcribe", best_of=5) - text = model.transcribe(file, **options)["text"] - return text.strip() - - -css = """ - .gradio-container { - font-family: 'IBM Plex Sans', sans-serif; - } - .gr-button { - color: white; - border-color: black; - background: black; - } - input[type='range'] { - accent-color: black; - } - .dark input[type='range'] { - accent-color: #dfdfdf; - } - .container { - max-width: 730px; - margin: auto; - padding-top: 1.5rem; - } - - .details:hover { - text-decoration: underline; - } - .gr-button { - white-space: nowrap; - } - .gr-button:focus { - border-color: rgb(147 197 253 / var(--tw-border-opacity)); - outline: none; - box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); - --tw-border-opacity: 1; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); - --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); - --tw-ring-opacity: .5; - } - .footer { - margin-bottom: 45px; - margin-top: 35px; - text-align: center; - border-bottom: 1px solid #e5e5e5; - } - .footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; - } - .dark .footer { - border-color: #303030; - } - .dark .footer>p { - background: #0b0f19; - } - .prompt h4{ - margin: 1.25em 0 .25em 0; - font-weight: bold; - font-size: 115%; - } - .animate-spin { - animation: spin 1s linear infinite; - } - @keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } - } - #share-btn-container { - display: flex; margin-top: 1.5rem !important; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; - } - #share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; - } - #share-btn * { - all: unset; - } -""" - -block = gr.Blocks(css=css) - - - -with block: - gr.HTML( - """ -
                  -
                  - - - - - - - - - - - - - - - - - - - - - - - - - - - -

                  - Talking Yak -

                  -
                  -

                  - Speech to Text Engine [Testing V2] -

                  -
                  - """ - ) - - with gr.Group(): - audio = gr.Audio(source="upload", type="filepath", label="Upload Audio") - #audio = gr.Audio( - # show_label=False, - # source="microphone", - # type="filepath" - - - with gr.Box(): - with gr.Row().style(equal_height=True): - transcribe_button = gr.Button("Transcribe") - - textbox = gr.Textbox(show_label=False) - - transcribe_button.click(transcribe, inputs=[audio], outputs=[textbox],api_name="tystt") - -block.launch() \ No newline at end of file diff --git a/spaces/simonduerr/diffdock/esm/esm/pretrained.py b/spaces/simonduerr/diffdock/esm/esm/pretrained.py deleted file mode 100644 index 360496a7970db644e4a291a03c0023d0fece5b1b..0000000000000000000000000000000000000000 --- a/spaces/simonduerr/diffdock/esm/esm/pretrained.py +++ /dev/null @@ -1,397 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import re -import urllib -import warnings -from argparse import Namespace -from pathlib import Path - -import torch - -import esm -from esm.model.esm2 import ESM2 - - -def _has_regression_weights(model_name): - """Return whether we expect / require regression weights; - Right now that is all models except ESM-1v and ESM-IF""" - return not ("esm1v" in model_name or "esm_if" in model_name) - - -def load_model_and_alphabet(model_name): - if model_name.endswith(".pt"): # treat as filepath - return load_model_and_alphabet_local(model_name) - else: - return load_model_and_alphabet_hub(model_name) - - -def load_hub_workaround(url): - try: - data = torch.hub.load_state_dict_from_url(url, progress=False, map_location="cpu") - except RuntimeError: - # Pytorch version issue - see https://github.com/pytorch/pytorch/issues/43106 - fn = Path(url).name - data = torch.load( - f"{torch.hub.get_dir()}/checkpoints/{fn}", - map_location="cpu", - ) - except urllib.error.HTTPError as e: - raise Exception(f"Could not load {url}, check if you specified a correct model name?") - return data - - -def load_regression_hub(model_name): - url = f"https://dl.fbaipublicfiles.com/fair-esm/regression/{model_name}-contact-regression.pt" - regression_data = load_hub_workaround(url) - return regression_data - - -def _download_model_and_regression_data(model_name): - url = f"https://dl.fbaipublicfiles.com/fair-esm/models/{model_name}.pt" - model_data = load_hub_workaround(url) - if _has_regression_weights(model_name): - regression_data = load_regression_hub(model_name) - else: - regression_data = None - return model_data, regression_data - - -def load_model_and_alphabet_hub(model_name): - model_data, regression_data = _download_model_and_regression_data(model_name) - return load_model_and_alphabet_core(model_name, model_data, regression_data) - - -def load_model_and_alphabet_local(model_location): - """Load from local path. The regression weights need to be co-located""" - model_location = Path(model_location) - model_data = torch.load(str(model_location), map_location="cpu") - model_name = model_location.stem - if _has_regression_weights(model_name): - regression_location = str(model_location.with_suffix("")) + "-contact-regression.pt" - regression_data = torch.load(regression_location, map_location="cpu") - else: - regression_data = None - return load_model_and_alphabet_core(model_name, model_data, regression_data) - - -def has_emb_layer_norm_before(model_state): - """Determine whether layer norm needs to be applied before the encoder""" - return any(k.startswith("emb_layer_norm_before") for k, param in model_state.items()) - - -def _load_model_and_alphabet_core_v1(model_data): - import esm # since esm.inverse_folding is imported below, you actually have to re-import esm here - - alphabet = esm.Alphabet.from_architecture(model_data["args"].arch) - - if model_data["args"].arch == "roberta_large": - # upgrade state dict - pra = lambda s: "".join(s.split("encoder_")[1:] if "encoder" in s else s) - prs1 = lambda s: "".join(s.split("encoder.")[1:] if "encoder" in s else s) - prs2 = lambda s: "".join( - s.split("sentence_encoder.")[1:] if "sentence_encoder" in s else s - ) - model_args = {pra(arg[0]): arg[1] for arg in vars(model_data["args"]).items()} - model_state = {prs1(prs2(arg[0])): arg[1] for arg in model_data["model"].items()} - model_state["embed_tokens.weight"][alphabet.mask_idx].zero_() # For token drop - model_args["emb_layer_norm_before"] = has_emb_layer_norm_before(model_state) - model_type = esm.ProteinBertModel - - elif model_data["args"].arch == "protein_bert_base": - - # upgrade state dict - pra = lambda s: "".join(s.split("decoder_")[1:] if "decoder" in s else s) - prs = lambda s: "".join(s.split("decoder.")[1:] if "decoder" in s else s) - model_args = {pra(arg[0]): arg[1] for arg in vars(model_data["args"]).items()} - model_state = {prs(arg[0]): arg[1] for arg in model_data["model"].items()} - model_type = esm.ProteinBertModel - elif model_data["args"].arch == "msa_transformer": - - # upgrade state dict - pra = lambda s: "".join(s.split("encoder_")[1:] if "encoder" in s else s) - prs1 = lambda s: "".join(s.split("encoder.")[1:] if "encoder" in s else s) - prs2 = lambda s: "".join( - s.split("sentence_encoder.")[1:] if "sentence_encoder" in s else s - ) - prs3 = lambda s: s.replace("row", "column") if "row" in s else s.replace("column", "row") - model_args = {pra(arg[0]): arg[1] for arg in vars(model_data["args"]).items()} - model_state = {prs1(prs2(prs3(arg[0]))): arg[1] for arg in model_data["model"].items()} - if model_args.get("embed_positions_msa", False): - emb_dim = model_state["msa_position_embedding"].size(-1) - model_args["embed_positions_msa_dim"] = emb_dim # initial release, bug: emb_dim==1 - - model_type = esm.MSATransformer - - elif "invariant_gvp" in model_data["args"].arch: - import esm.inverse_folding - - model_type = esm.inverse_folding.gvp_transformer.GVPTransformerModel - model_args = vars(model_data["args"]) # convert Namespace -> dict - - def update_name(s): - # Map the module names in checkpoints trained with internal code to - # the updated module names in open source code - s = s.replace("W_v", "embed_graph.embed_node") - s = s.replace("W_e", "embed_graph.embed_edge") - s = s.replace("embed_scores.0", "embed_confidence") - s = s.replace("embed_score.", "embed_graph.embed_confidence.") - s = s.replace("seq_logits_projection.", "") - s = s.replace("embed_ingraham_features", "embed_dihedrals") - s = s.replace("embed_gvp_in_local_frame.0", "embed_gvp_output") - s = s.replace("embed_features_in_local_frame.0", "embed_gvp_input_features") - return s - - model_state = { - update_name(sname): svalue - for sname, svalue in model_data["model"].items() - if "version" not in sname - } - - else: - raise ValueError("Unknown architecture selected") - - model = model_type( - Namespace(**model_args), - alphabet, - ) - - return model, alphabet, model_state - - -def _load_model_and_alphabet_core_v2(model_data): - def upgrade_state_dict(state_dict): - """Removes prefixes 'model.encoder.sentence_encoder.' and 'model.encoder.'.""" - prefixes = ["encoder.sentence_encoder.", "encoder."] - pattern = re.compile("^" + "|".join(prefixes)) - state_dict = {pattern.sub("", name): param for name, param in state_dict.items()} - return state_dict - - cfg = model_data["cfg"]["model"] - state_dict = model_data["model"] - state_dict = upgrade_state_dict(state_dict) - alphabet = esm.data.Alphabet.from_architecture("ESM-1b") - model = ESM2( - num_layers=cfg.encoder_layers, - embed_dim=cfg.encoder_embed_dim, - attention_heads=cfg.encoder_attention_heads, - alphabet=alphabet, - token_dropout=cfg.token_dropout, - ) - return model, alphabet, state_dict - - -def load_model_and_alphabet_core(model_name, model_data, regression_data=None): - if regression_data is not None: - model_data["model"].update(regression_data["model"]) - - if model_name.startswith("esm2"): - model, alphabet, model_state = _load_model_and_alphabet_core_v2(model_data) - else: - model, alphabet, model_state = _load_model_and_alphabet_core_v1(model_data) - - expected_keys = set(model.state_dict().keys()) - found_keys = set(model_state.keys()) - - if regression_data is None: - expected_missing = {"contact_head.regression.weight", "contact_head.regression.bias"} - error_msgs = [] - missing = (expected_keys - found_keys) - expected_missing - if missing: - error_msgs.append(f"Missing key(s) in state_dict: {missing}.") - unexpected = found_keys - expected_keys - if unexpected: - error_msgs.append(f"Unexpected key(s) in state_dict: {unexpected}.") - - if error_msgs: - raise RuntimeError( - "Error(s) in loading state_dict for {}:\n\t{}".format( - model.__class__.__name__, "\n\t".join(error_msgs) - ) - ) - if expected_missing - found_keys: - warnings.warn( - "Regression weights not found, predicting contacts will not produce correct results." - ) - - model.load_state_dict(model_state, strict=regression_data is not None) - - return model, alphabet - - -def esm1_t34_670M_UR50S(): - """34 layer transformer model with 670M params, trained on Uniref50 Sparse. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm1_t34_670M_UR50S") - - -def esm1_t34_670M_UR50D(): - """34 layer transformer model with 670M params, trained on Uniref50 Dense. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm1_t34_670M_UR50D") - - -def esm1_t34_670M_UR100(): - """34 layer transformer model with 670M params, trained on Uniref100. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm1_t34_670M_UR100") - - -def esm1_t12_85M_UR50S(): - """12 layer transformer model with 85M params, trained on Uniref50 Sparse. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm1_t12_85M_UR50S") - - -def esm1_t6_43M_UR50S(): - """6 layer transformer model with 43M params, trained on Uniref50 Sparse. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm1_t6_43M_UR50S") - - -def esm1b_t33_650M_UR50S(): - """33 layer transformer model with 650M params, trained on Uniref50 Sparse. - This is our best performing model, which will be described in a future publication. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm1b_t33_650M_UR50S") - - -def esm_msa1_t12_100M_UR50S(): - warnings.warn( - "This model had a minor bug in the positional embeddings, " - "please use ESM-MSA-1b: esm.pretrained.esm_msa1b_t12_100M_UR50S()", - ) - return load_model_and_alphabet_hub("esm_msa1_t12_100M_UR50S") - - -def esm_msa1b_t12_100M_UR50S(): - return load_model_and_alphabet_hub("esm_msa1b_t12_100M_UR50S") - - -def esm1v_t33_650M_UR90S(): - """33 layer transformer model with 650M params, trained on Uniref90. - This is model 1 of a 5 model ensemble. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm1v_t33_650M_UR90S_1") - - -def esm1v_t33_650M_UR90S_1(): - """33 layer transformer model with 650M params, trained on Uniref90. - This is model 1 of a 5 model ensemble. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm1v_t33_650M_UR90S_1") - - -def esm1v_t33_650M_UR90S_2(): - """33 layer transformer model with 650M params, trained on Uniref90. - This is model 2 of a 5 model ensemble. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm1v_t33_650M_UR90S_2") - - -def esm1v_t33_650M_UR90S_3(): - """33 layer transformer model with 650M params, trained on Uniref90. - This is model 3 of a 5 model ensemble. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm1v_t33_650M_UR90S_3") - - -def esm1v_t33_650M_UR90S_4(): - """33 layer transformer model with 650M params, trained on Uniref90. - This is model 4 of a 5 model ensemble. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm1v_t33_650M_UR90S_4") - - -def esm1v_t33_650M_UR90S_5(): - """33 layer transformer model with 650M params, trained on Uniref90. - This is model 5 of a 5 model ensemble. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm1v_t33_650M_UR90S_5") - - -def esm_if1_gvp4_t16_142M_UR50(): - """Inverse folding model with 142M params, with 4 GVP-GNN layers, 8 - Transformer encoder layers, and 8 Transformer decoder layers, trained on - CATH structures and 12 million alphafold2 predicted structures from UniRef50 - sequences. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm_if1_gvp4_t16_142M_UR50") - - -def esm2_t6_8M_UR50D(): - """6 layer ESM-2 model with 8M params, trained on UniRef50. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm2_t6_8M_UR50D") - - -def esm2_t12_35M_UR50D(): - """12 layer ESM-2 model with 35M params, trained on UniRef50. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm2_t12_35M_UR50D") - - -def esm2_t30_150M_UR50D(): - """30 layer ESM-2 model with 150M params, trained on UniRef50. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm2_t30_150M_UR50D") - - -def esm2_t33_650M_UR50D(): - """33 layer ESM-2 model with 650M params, trained on UniRef50. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm2_t33_650M_UR50D") - - -def esm2_t36_3B_UR50D(): - """36 layer ESM-2 model with 3B params, trained on UniRef50. - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm2_t36_3B_UR50D") - - -def esm2_t48_15B_UR50D(): - """48 layer ESM-2 model with 15B params, trained on UniRef50. - If you have OOM while loading this model, please refer to README - on how to employ FSDP and ZeRO CPU offloading - - Returns a tuple of (Model, Alphabet). - """ - return load_model_and_alphabet_hub("esm2_t48_15B_UR50D") diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Bullet Echo How to Unlock New Perks Maps and Game Modes.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Bullet Echo How to Unlock New Perks Maps and Game Modes.md deleted file mode 100644 index 531646d14f43e97b23751e8e19d08b26a40e289b..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Bullet Echo How to Unlock New Perks Maps and Game Modes.md +++ /dev/null @@ -1,93 +0,0 @@ -
                  -

                  Bullet Echo: A Tactical Team Shooter Game for Mobile Devices

                  -

                  If you are looking for an exciting and unique shooting game to play on your mobile device, you should check out Bullet Echo. Bullet Echo is a tactical team shooter game that combines stealth, action, and multiplayer modes. In this game, you can choose from dozens of different heroes with unique play styles, guns, and abilities. You can also team up with friends, set a strategy, and be the last team standing when the battle ends. Here is everything you need to know about Bullet Echo.

                  -

                  What is Bullet Echo?

                  -

                  Bullet Echo is a game developed by ZeptoLab, the creators of C.A.T.S.: Crash Arena Turbo Stars, King of Thieves, and Cut the Rope. It is available for free on Google Play and App Store.

                  -

                  bullet echo


                  Download ::: https://ssurll.com/2uNXG2



                  -

                  A unique stealth action game with multiplayer modes

                  -

                  Bullet Echo is not your typical shooting game. In this game, your vision is limited by the beam of a flashlight, but you can hear enemies' steps and shots. You have to use stealth, teamwork, and shooting skills to win in this game. You can also play in different multiplayer modes, such as Team vs Team, Solo, Squad Skirmish, and Battle Royale.

                  -

                  A game with different heroes, abilities, and guns

                  -

                  Bullet Echo features 21 heroes at launch and more heroes coming soon. Each hero has a unique set of abilities that can help you in different situations. For example, some heroes can heal themselves or their teammates, some can deploy traps or shields, some can increase their speed or damage, and some can even turn invisible or teleport. Each hero also has a unique gun that suits their play style. You can choose from tanks, ambushers, snipers, scouts, and troopers.

                  -

                  A game with various game modes and maps

                  -

                  Bullet Echo offers three main game modes that you gradually unlock as you play. The first one is King of the Hill, where you have to capture and hold a point on the map. The second one is Squad Skirmish, where you have to team up with two other players and fight against five other teams of three players. The third one is Team vs Team, where you have to fight against another team of five players. Each game mode has its own exclusive maps that vary in size and layout.

                  -

                  How to Play Bullet Echo?

                  -

                  Bullet Echo is easy to learn but hard to master. Here are some tips on how to play it.

                  -

                  bullet echo game
                  -bullet echo zeptolab
                  -bullet echo tactical team shooter
                  -bullet echo apk download
                  -bullet echo mod apk
                  -bullet echo pc
                  -bullet echo online
                  -bullet echo discord
                  -bullet echo heroes
                  -bullet echo tips and tricks
                  -bullet echo best hero
                  -bullet echo reddit
                  -bullet echo hack
                  -bullet echo cheats
                  -bullet echo codes
                  -bullet echo review
                  -bullet echo gameplay
                  -bullet echo trailer
                  -bullet echo wiki
                  -bullet echo guide
                  -bullet echo ranks
                  -bullet echo update
                  -bullet echo new heroes
                  -bullet echo squad skirmish
                  -bullet echo king of the hill
                  -bullet echo team vs team
                  -bullet echo battle royale
                  -bullet echo sniper
                  -bullet echo tank
                  -bullet echo ambusher
                  -bullet echo scout
                  -bullet echo trooper
                  -bullet echo maps
                  -bullet echo weapons
                  -bullet echo abilities
                  -bullet echo strategy
                  -bullet echo tournaments
                  -bullet echo missions
                  -bullet echo rewards
                  -bullet echo champions league
                  -bullet echo support
                  -bullet echo facebook
                  -bullet echo instagram
                  -bullet echo twitter
                  -bullet echo youtube
                  -bullet echo twitch
                  -bullet echo google play store
                  -bullet echo app store
                  -bullet echo amazon appstore

                  -

                  Choose your hero and team up with friends

                  -

                  The first thing you have to do is to choose your hero. You can browse through the different heroes and see their stats, abilities, guns, and perks. You can also upgrade your hero by using resources that you collect from playing the game. The level of your hero is matched with the highest level in your party.

                  -

                  The next thing you have to do is to team up with friends or other players online. You can invite your friends from Facebook or Discord or join a random match. Playing with friends gives you a huge tactical advantage as you can communicate and coordinate your actions.

                  -

                  Use your flashlight, sound, and special abilities to locate and eliminate enemies

                  -

                  Once you enter a match, you have to use your flashlight to see where you are going. However, be careful as your flashlight also reveals your position to the enemies. You can also use sound to locate enemies by listening to their footsteps and shots. You can also use your special abilities to gain an edge in combat. For example, you can heal yourself or your teammates, deploy traps or shields, increase your speed or damage, and more. Each ability has a cooldown time, so use them wisely.

                  -

                  Collect resources, upgrade your hero, and unlock new perks

                  -

                  As you play the game, you can collect resources such as coins, tokens, and cards. You can use these resources to upgrade your hero and make them stronger. You can also unlock new perks that give you passive bonuses such as increased health, damage, or speed. You can equip up to three perks per hero.

                  -

                  Why You Should Play Bullet Echo?

                  -

                  Bullet Echo is a game that offers a lot of fun, accessibility, and challenge. Here are some reasons why you should play it.

                  -

                  It's fun, accessible, and challenging

                  -

                  Bullet Echo is a game that you can play anytime, anywhere, and with anyone. It has simple controls that you can master in minutes, but it also has a lot of depth and strategy that you can explore for hours. It has a fast-paced gameplay that keeps you on your toes, but it also has a stealth element that adds tension and suspense. It has a variety of heroes, abilities, guns, and perks that you can mix and match to suit your play style.

                  -

                  It's designed for team play and strategy

                  -

                  Bullet Echo is a game that encourages team play and strategy. You can team up with friends or other players online and communicate with them using voice chat or text chat. You can also set a strategy before each match by choosing your hero and perks. You can also coordinate your actions during the match by using your abilities and flashlight to help your teammates.

                  -

                  It's constantly updated with new content and features

                  -

                  Bullet Echo is a game that is constantly updated with new content and features. The developers are always adding new heroes, abilities, guns, perks, game modes, maps, events, and more. They are also listening to the feedback from the players and improving the game based on their suggestions.

                  -

                  Conclusion

                  -

                  Bullet Echo is a tactical team shooter game for mobile devices that combines stealth, action, and multiplayer modes. It is a game that you can play with friends or other players online and have fun, challenge yourself, and improve your skills. It is a game that has different heroes, abilities, guns, perks, game modes, maps, and more that you can choose from and customize. It is a game that is easy to learn but hard to master. If you are looking for an exciting and unique shooting game to play on your mobile device, you should download Bullet Echo today.

                  -

                  FAQs

                  -

                  What are the system requirements for Bullet Echo?

                  -

                  Bullet Echo requires Android 5.0 or later or iOS 10.0 or later to run.

                  -

                  How do I get more resources in Bullet Echo?

                  -

                  You can get more resources in Bullet Echo by playing the game regularly, completing missions and achievements, participating in events and tournaments, opening chests and crates, watching ads, and buying them with real money.

                  -

                  How do I join or create a clan in Bullet Echo?

                  -

                  You can join or create a clan in Bullet Echo by tapping on the clan icon on the main screen. You can search for an existing clan or create your own clan by paying 1000 coins. You can also invite your friends to join your clan.

                  -

                  How do I change my hero's skin in Bullet Echo?

                  -

                  You can change your hero's skin in Bullet Echo by tapping on the hero icon on the main screen. You can see the different skins available for each hero and how to unlock them. Some skins are free while others require coins or tokens.

                  -

                  How do I report a bug or a problem in Bullet Echo?

                  -

                  You can report a bug or a problem in Bullet Echo by tapping on the settings icon on the main screen. You can then tap on the support button and fill out the form with your details and description of the issue.

                  197e85843d
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Dark Days Zombie Survival - Craft Weapons and Equipment to Wipe Out Hordes of Zombies.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Dark Days Zombie Survival - Craft Weapons and Equipment to Wipe Out Hordes of Zombies.md deleted file mode 100644 index bde350437217bb2804ceb78dc5398b890a72ab2f..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Dark Days Zombie Survival - Craft Weapons and Equipment to Wipe Out Hordes of Zombies.md +++ /dev/null @@ -1,78 +0,0 @@ - -

                  Dark Days Zombie Survival: How to Survive the Apocalypse

                  -

                  If you are a fan of zombie survival games, you might have heard of Dark Days Zombie Survival. It is a tactical survival shooter that combines the best parts of shooter and survival games in one. In this game, you have to build your last shelter, gather resources, craft weapons and equipment, and wipe out hordes of zombies starved for human flesh in a post-apocalyptic world. Sounds exciting, right? But how do you survive in this game? What are the best strategies and tips to make it to the end? In this article, we will give you a comprehensive guide on how to play Dark Days Zombie Survival and enjoy this thrilling game.

                  -

                  dark days zombie survival


                  DOWNLOAD ⚹⚹⚹ https://ssurll.com/2uNUvb



                  -

                  Introduction

                  -

                  What is Dark Days Zombie Survival?

                  -

                  Dark Days Zombie Survival is a mobile game developed by Azur Interactive Games Limited. It is available for both Android and iOS devices. The game was released in 2020 and has received positive reviews from players and critics alike. The game has an original graphics style, a massive world, intuitive controls, a sophisticated item creation system, and unique rewards for beating bunkers. The game also has a compelling story and character development that will keep you hooked.

                  -

                  Why is it a popular game?

                  -

                  Dark Days Zombie Survival is a popular game because it offers a challenging and immersive experience for zombie survival fans. The game has a realistic and dark atmosphere that will make you feel the tension and danger of the apocalypse. The game also has a variety of zombies to fight, from slow walkers to fast runners, from crawlers to jumpers, from spitters to bombers. The game also has a lot of weapons and equipment to choose from, such as pistols, rifles, shotguns, grenades, mines, traps, armor, backpacks, and more. The game also has a lot of locations to explore, such as cities, forests, deserts, mountains, bunkers, military bases, and more. The game also has a lot of quests and missions to complete, such as finding survivors, clearing areas, raiding enemy bases, and more.

                  -

                  Gameplay

                  -

                  Build and upgrade your shelter

                  -

                  The first thing you need to do in Dark Days Zombie Survival is to build your shelter. Your shelter is your base of operations where you can store your items, craft new ones, rest and heal, and plan your next move. You can build your shelter anywhere you want, as long as it is safe from zombies and other threats. You can also upgrade your shelter by adding more rooms, walls, doors, windows, furniture, decorations, and defenses. Upgrading your shelter will make it more comfortable and secure for you and your survivors.

                  -

                  Gather resources and craft weapons

                  -

                  The second thing you need to do in Dark Days Zombie Survival is to gather resources and craft weapons. Resources are essential for your survival as they allow you to create new items, repair old ones, upgrade your shelter, and trade with other survivors. You can find resources by scavenging locations such as houses, shops, cars, crates, barrels, etc. You can also find resources by killing zombies and looting their bodies. Some of the resources you can find are wood, metal, cloth, leather, plastic, rubber, rope, wire, nails, screws, etc.

                  -

                  Crafting weapons is also important for your survival as they allow you to fight zombies and other enemies more effectively. You can craft weapons by using the workbench in your shelter or by finding blueprints in the world. Some of the weapons you can craft are knives, axes, hammers, crowbars, spears, bows, crossbows, guns, ammo, grenades, mines, traps, etc.

                  -

                  Fight and explore the world

                  Fight and explore the world

                  -

                  The third thing you need to do in Dark Days Zombie Survival is to fight and explore the world. Fighting and exploring the world is the main activity of the game, as it allows you to progress the story, complete quests, find new locations, discover secrets, and earn rewards. You can fight and explore the world by using the map in your shelter or by finding vehicles in the world. You can also use the radio in your shelter to communicate with other survivors and get information about events and missions.

                  -

                  How to build and upgrade your shelter in Dark Days: Zombie Survival
                  -Best weapons and equipment for killing zombies in Dark Days
                  -Dark Days: Zombie Survival tips and tricks for beginners
                  -Dark Days vs Last Day on Earth: Which survival game is better?
                  -Dark Days: Zombie Survival review and rating on App Store and Google Play
                  -How to find and craft essential items like crowbar and sharpening stone in Dark Days
                  -Dark Days: Zombie Survival gameplay and story walkthrough
                  -How to escape the infected island of Colbrook in Dark Days
                  -Dark Days: Zombie Survival mod apk download and installation guide
                  -How to save and secure survivors and safe houses in Dark Days
                  -Dark Days: Zombie Survival cheats and hacks for unlimited resources
                  -How to survive the apocalypse and build a new world in Dark Days
                  -Dark Days: Zombie Survival system requirements and compatibility for PC and mobile devices
                  -How to link your account and save your progress in Dark Days: Zombie Survival
                  -Dark Days: Zombie Survival update and patch notes
                  -How to get unique rewards for beating bunkers in Dark Days: Zombie Survival
                  -Dark Days: Zombie Survival online multiplayer mode and features
                  -How to customize your character and shelter in Dark Days: Zombie Survival
                  -Dark Days: Zombie Survival trailer and official website
                  -How to contact the developer of Dark Days: Zombie Survival for feedback and support
                  -How to get free in-app purchases and ads removal in Dark Days: Zombie Survival
                  -How to fix bugs and errors in Dark Days: Zombie Survival
                  -How to play Dark Days: Zombie Survival offline without internet connection
                  -How to get more energy and health in Dark Days: Zombie Survival
                  -How to unlock new locations and missions in Dark Days: Zombie Survival
                  -How to use shields and active roll skill in Dark Days: Zombie Survival
                  -How to deal with different types of zombies in Dark Days: Zombie Survival
                  -How to collect and scavenge resources in a massive world in Dark Days: Zombie Survival
                  -How to use the sophisticated item creation system in Dark Days: Zombie Survival
                  -How to enjoy the original graphics style of Dark Days: Zombie Survival

                  -

                  Fighting zombies and other enemies is a challenging and thrilling experience in Dark Days Zombie Survival. You have to use your weapons, skills, and tactics to survive the encounters. You have to aim carefully, reload wisely, use cover, move strategically, and switch weapons as needed. You also have to watch out for your health, stamina, hunger, thirst, and radiation levels. You can use items such as bandages, medkits, food, water, and pills to restore your stats. You can also use items such as flashlights, night vision goggles, binoculars, and scopes to improve your vision.

                  -

                  Exploring the world is a rewarding and fun experience in Dark Days Zombie Survival. You can find new locations such as cities, forests, deserts, mountains, bunkers, military bases, and more. You can also find new items such as blueprints, books, notes, tapes, and more. You can also find new survivors who can join your shelter or trade with you. You can also find new events and missions that can give you more challenges and rewards.

                  -

                  Tips and tricks

                  -

                  How to find essential items

                  -

                  One of the most important tips for playing Dark Days Zombie Survival is to know how to find essential items. Essential items are items that are necessary for your survival or for completing quests. Some of the essential items are water bottles, canned food, matches, batteries, gasoline, antibiotics, radiation pills, etc.

                  -

                  You can find essential items by scavenging locations such as houses, shops, cars, crates, barrels, etc. You can also find essential items by killing zombies and looting their bodies. You can also find essential items by trading with other survivors or by completing quests and missions. You can also find essential items by using the radio in your shelter or by finding clues in the world.

                  -

                  How to manage your inventory

                  -

                  Another important tip for playing Dark Days Zombie Survival is to know how to manage your inventory. Your inventory is the space where you can store your items. Your inventory has a limited capacity that depends on your backpack size. You can upgrade your backpack size by crafting or finding new backpacks in the world.

                  -

                  You can manage your inventory by sorting your items by category, name, weight, or value. You can also manage your inventory by dropping or destroying unwanted items or by storing them in your shelter or in vehicles. You can also manage your inventory by using items that can increase your inventory space temporarily such as bags, boxes, or carts.

                  -

                  How to avoid or escape zombies

                  -

                  The last tip for playing Dark Days Zombie Survival is to know how to avoid or escape zombies. Zombies are the main threat in the game and they can kill you easily if you are not careful. Zombies have different types, speeds, strengths, and weaknesses that you have to learn and exploit.

                  -

                  You can avoid zombies by using stealth mode, which reduces your noise and visibility levels. You can also avoid zombies by using distractions such as rocks, bottles, or flares that can lure them away from you. You can also avoid zombies by using camouflage such as bushes, trees, or walls that can hide you from their sight.

                  -

                  You can escape zombies by running away from them or by using vehicles that are faster than them. You can also escape zombies by using traps such as mines, spikes, or wires that can slow them down or damage them. You can also escape zombies by using explosives such as grenades, molotovs, or rockets that can kill them instantly or create a path for you.

                  -

                  Conclusion

                  -

                  Summary of the main points

                  -

                  In conclusion, Dark Days Zombie Survival is a great game for zombie survival fans who want a challenging and immersive experience. The game has a realistic and dark atmosphere, a massive world, intuitive controls, a sophisticated item creation system, and unique rewards for beating bunkers. The game also has a compelling story and character development that will keep you hooked.

                  -

                  Call to action and recommendation

                  -

                  If you are interested in playing Dark Days Zombie Survival, you can download it for free from Google Play Store or App Store. You can also follow the game on social media platforms such as Facebook, Twitter, Instagram, and YouTube. You can also visit the official website of the game for more information and updates. We highly recommend this game to anyone who loves zombie survival games and wants to test their skills and strategies in a post-apocalyptic world. So, what are you waiting for? Download Dark Days Zombie Survival today and start your adventure!

                  -

                  FAQs

                  -

                  Q: How do I save my progress in the game?

                  -

                  A: You can save your progress in the game by using the cloud save feature in the settings menu. You can also link your game account to your Google Play or Game Center account to sync your progress across devices.

                  -

                  Q: How do I get more coins and gems in the game?

                  -

                  A: You can get more coins and gems in the game by completing quests, missions, events, and achievements. You can also get more coins and gems by watching ads, participating in surveys, or making in-app purchases.

                  -

                  Q: How do I unlock new locations in the game?

                  -

                  A: You can unlock new locations in the game by progressing the story, clearing bunkers, or finding maps. You can also unlock new locations by exploring the world and discovering hidden areas.

                  -

                  Q: How do I recruit new survivors in the game?

                  -

                  A: You can recruit new survivors in the game by finding them in the world, rescuing them from zombies or enemies, or completing their quests. You can also recruit new survivors by using the radio in your shelter or by finding clues in the world.

                  -

                  Q: How do I upgrade my weapons and equipment in the game?

                  -

                  A: You can upgrade your weapons and equipment in the game by using the workbench in your shelter or by finding blueprints in the world. You can also upgrade your weapons and equipment by using items such as scrap metal, gunpowder, duct tape, etc.

                  401be4b1e0
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/sklearn-docs/IsolationForest-Model-for-Anomaly-Detection/README.md b/spaces/sklearn-docs/IsolationForest-Model-for-Anomaly-Detection/README.md deleted file mode 100644 index 5d124c96c8ab707d549158847b56ec851540b15d..0000000000000000000000000000000000000000 --- a/spaces/sklearn-docs/IsolationForest-Model-for-Anomaly-Detection/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Isolation Forest -emoji: 🌳 -colorFrom: yellow -colorTo: blue -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sr5434/QuoteGeneration/README.md b/spaces/sr5434/QuoteGeneration/README.md deleted file mode 100644 index b0aa62d5f255f178dd85e91d8b5efb3ad043f3f8..0000000000000000000000000000000000000000 --- a/spaces/sr5434/QuoteGeneration/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: QuoteGeneration -emoji: 🐢 -colorFrom: indigo -colorTo: yellow -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/data_scripts/README.md b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/data_scripts/README.md deleted file mode 100644 index cc610c0c9e936a5ae4659ceda691c6db6d387296..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/data_scripts/README.md +++ /dev/null @@ -1,24 +0,0 @@ - -# Install dependency -```bash -pip install -r requirement.txt -``` - -# Download the data set -```bash -export WORKDIR_ROOT= - -``` -The downloaded data will be at $WORKDIR_ROOT/ML50 - -# preprocess the data -Install SPM [here](https://github.com/google/sentencepiece) -```bash -export WORKDIR_ROOT= -export SPM_PATH= -``` -* $WORKDIR_ROOT/ML50/raw: extracted raw data -* $WORKDIR_ROOT/ML50/dedup: dedup data -* $WORKDIR_ROOT/ML50/clean: data with valid and test sentences removed from the dedup data - - diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/clib/libbleu/module.cpp b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/clib/libbleu/module.cpp deleted file mode 100644 index 35288b3177185670135f7bdc1f1589c5bb992304..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/clib/libbleu/module.cpp +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2017-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include - -static PyMethodDef method_def[] = {{NULL, NULL, 0, NULL}}; // NOLINT - -static struct PyModuleDef module_def = { - PyModuleDef_HEAD_INIT, - "libbleu", /* name of module */ - // NOLINTNEXTLINE - NULL, /* module documentation, may be NULL */ - -1, /* size of per-interpreter state of the module, - or -1 if the module keeps state in global variables. */ - method_def}; // NOLINT - -#if PY_MAJOR_VERSION == 2 -PyMODINIT_FUNC init_libbleu() -#else -PyMODINIT_FUNC PyInit_libbleu() -#endif -{ - PyObject* m = PyModule_Create(&module_def); - if (!m) { - return NULL; - } - return m; -} diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/monolingual_dataset.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/monolingual_dataset.py deleted file mode 100644 index 54fd583b64a3a475324ade6eaaeccf593d747fdc..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/monolingual_dataset.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import torch - -from . import FairseqDataset, data_utils - - -def collate(samples, pad_idx, eos_idx, fixed_pad_length=None, pad_to_bsz=None): - if len(samples) == 0: - return {} - - def merge(key, is_list=False): - if is_list: - res = [] - for i in range(len(samples[0][key])): - res.append( - data_utils.collate_tokens( - [s[key][i] for s in samples], - pad_idx, - eos_idx, - left_pad=False, - pad_to_length=fixed_pad_length, - pad_to_bsz=pad_to_bsz, - ) - ) - return res - else: - return data_utils.collate_tokens( - [s[key] for s in samples], - pad_idx, - eos_idx, - left_pad=False, - pad_to_length=fixed_pad_length, - pad_to_bsz=pad_to_bsz, - ) - - src_tokens = merge("source") - if samples[0]["target"] is not None: - is_target_list = isinstance(samples[0]["target"], list) - target = merge("target", is_target_list) - else: - target = src_tokens - - return { - "id": torch.LongTensor([s["id"] for s in samples]), - "nsentences": len(samples), - "ntokens": sum(len(s["source"]) for s in samples), - "net_input": { - "src_tokens": src_tokens, - "src_lengths": torch.LongTensor([s["source"].numel() for s in samples]), - }, - "target": target, - } - - -class MonolingualDataset(FairseqDataset): - """ - A wrapper around torch.utils.data.Dataset for monolingual data. - - Args: - dataset (torch.utils.data.Dataset): dataset to wrap - sizes (List[int]): sentence lengths - vocab (~fairseq.data.Dictionary): vocabulary - shuffle (bool, optional): shuffle the elements before batching - (default: True). - """ - - def __init__( - self, - dataset, - sizes, - src_vocab, - tgt_vocab=None, - add_eos_for_other_targets=False, - shuffle=False, - targets=None, - add_bos_token=False, - fixed_pad_length=None, - pad_to_bsz=None, - src_lang_idx=None, - tgt_lang_idx=None, - ): - self.dataset = dataset - self.sizes = np.array(sizes) - self.vocab = src_vocab - self.tgt_vocab = tgt_vocab or src_vocab - self.add_eos_for_other_targets = add_eos_for_other_targets - self.shuffle = shuffle - self.add_bos_token = add_bos_token - self.fixed_pad_length = fixed_pad_length - self.pad_to_bsz = pad_to_bsz - self.src_lang_idx = src_lang_idx - self.tgt_lang_idx = tgt_lang_idx - - assert targets is None or all( - t in {"self", "future", "past"} for t in targets - ), "targets must be none or one of 'self', 'future', 'past'" - if targets is not None and len(targets) == 0: - targets = None - self.targets = targets - - def __getitem__(self, index): - if self.targets is not None: - # *future_target* is the original sentence - # *source* is shifted right by 1 (maybe left-padded with eos) - # *past_target* is shifted right by 2 (left-padded as needed) - # - # Left-to-right language models should condition on *source* and - # predict *future_target*. - # Right-to-left language models should condition on *source* and - # predict *past_target*. - source, future_target, past_target = self.dataset[index] - source, target = self._make_source_target( - source, future_target, past_target - ) - else: - source = self.dataset[index] - target = None - source, target = self._maybe_add_bos(source, target) - return {"id": index, "source": source, "target": target} - - def __len__(self): - return len(self.dataset) - - def _make_source_target(self, source, future_target, past_target): - if self.targets is not None: - target = [] - - if ( - self.add_eos_for_other_targets - and (("self" in self.targets) or ("past" in self.targets)) - and source[-1] != self.vocab.eos() - ): - # append eos at the end of source - source = torch.cat([source, source.new([self.vocab.eos()])]) - - if "future" in self.targets: - future_target = torch.cat( - [future_target, future_target.new([self.vocab.pad()])] - ) - if "past" in self.targets: - # first token is before the start of sentence which is only used in "none" break mode when - # add_eos_for_other_targets is False - past_target = torch.cat( - [ - past_target.new([self.vocab.pad()]), - past_target[1:], - source[-2, None], - ] - ) - - for t in self.targets: - if t == "self": - target.append(source) - elif t == "future": - target.append(future_target) - elif t == "past": - target.append(past_target) - else: - raise Exception("invalid target " + t) - - if len(target) == 1: - target = target[0] - else: - target = future_target - - return source, self._filter_vocab(target) - - def _maybe_add_bos(self, source, target): - if self.add_bos_token: - source = torch.cat([source.new([self.vocab.bos()]), source]) - if target is not None: - target = torch.cat([target.new([self.tgt_vocab.bos()]), target]) - return source, target - - def num_tokens_vec(self, indices): - """Return the number of tokens for a set of positions defined by indices. - This value is used to enforce ``--max-tokens`` during batching.""" - return self.sizes[indices] - - def _filter_vocab(self, target): - if len(self.tgt_vocab) != len(self.vocab): - - def _filter(target): - mask = target.ge(len(self.tgt_vocab)) - if mask.any(): - target[mask] = self.tgt_vocab.unk() - return target - - if isinstance(target, list): - return [_filter(t) for t in target] - return _filter(target) - return target - - def collater(self, samples): - """Merge a list of samples to form a mini-batch. - - Args: - samples (List[dict]): samples to collate - - Returns: - dict: a mini-batch with the following keys: - - - `id` (LongTensor): example IDs in the original input order - - `ntokens` (int): total number of tokens in the batch - - `net_input` (dict): the input to the Model, containing keys: - - - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in - the source sentence of shape `(bsz, src_len)`. Padding will - appear on the right. - - - `target` (LongTensor): a padded 2D Tensor of tokens in the - target sentence of shape `(bsz, tgt_len)`. Padding will appear - on the right. - """ - return collate( - samples, - self.vocab.pad(), - self.vocab.eos(), - self.fixed_pad_length, - self.pad_to_bsz, - ) - - def num_tokens(self, index): - """Return the number of tokens in a sample. This value is used to - enforce ``--max-tokens`` during batching.""" - return self.sizes[index] - - def size(self, index): - """Return an example's size as a float or tuple. This value is used when - filtering a dataset with ``--max-positions``.""" - return self.sizes[index] - - def ordered_indices(self): - """Return an ordered list of indices. Batches will be constructed based - on this order.""" - if self.shuffle: - order = [np.random.permutation(len(self))] - else: - order = [np.arange(len(self))] - order.append(self.sizes) - return np.lexsort(order) - - @property - def supports_prefetch(self): - return getattr(self.dataset, "supports_prefetch", False) - - def prefetch(self, indices): - self.dataset.prefetch(indices) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/quantization/scalar/modules/__init__.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/quantization/scalar/modules/__init__.py deleted file mode 100644 index 8031d9cdb23f2bc72596f8bc9cfa4965f96e3e6c..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/modules/quantization/scalar/modules/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .qact import ActivationQuantizer # NOQA -from .qconv import IntConv2d # NOQA -from .qemb import IntEmbedding # NOQA -from .qlinear import IntLinear # NOQA diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/tasks/multilingual_translation.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/tasks/multilingual_translation.py deleted file mode 100644 index 4f85ab4832a6c7cbe57a99a3efc6987125d956fc..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/tasks/multilingual_translation.py +++ /dev/null @@ -1,462 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import contextlib -import logging -import os -from collections import OrderedDict -from argparse import ArgumentError - -import torch -from fairseq import metrics, options, utils -from fairseq.data import ( - Dictionary, - LanguagePairDataset, - RoundRobinZipDatasets, - TransformEosLangPairDataset, -) -from fairseq.models import FairseqMultiModel -from fairseq.tasks.translation import load_langpair_dataset - -from . import LegacyFairseqTask, register_task - - -logger = logging.getLogger(__name__) - - -def _lang_token(lang: str): - return "__{}__".format(lang) - - -def _lang_token_index(dic: Dictionary, lang: str): - """Return language token index.""" - idx = dic.index(_lang_token(lang)) - assert idx != dic.unk_index, "cannot find language token for lang {}".format(lang) - return idx - - -@register_task("multilingual_translation") -class MultilingualTranslationTask(LegacyFairseqTask): - """A task for training multiple translation models simultaneously. - - We iterate round-robin over batches from multiple language pairs, ordered - according to the `--lang-pairs` argument. - - The training loop is roughly: - - for i in range(len(epoch)): - for lang_pair in args.lang_pairs: - batch = next_batch_for_lang_pair(lang_pair) - loss = criterion(model_for_lang_pair(lang_pair), batch) - loss.backward() - optimizer.step() - - In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset - (e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that - implements the `FairseqMultiModel` interface. - - During inference it is required to specify a single `--source-lang` and - `--target-lang`, which indicates the inference langauge direction. - `--lang-pairs`, `--encoder-langtok`, `--decoder-langtok` have to be set to - the same value as training. - """ - - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - # fmt: off - parser.add_argument('data', metavar='DIR', help='path to data directory') - parser.add_argument('--lang-pairs', default=None, metavar='PAIRS', - help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr') - parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', - help='source language (only needed for inference)') - parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', - help='target language (only needed for inference)') - parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', - help='pad the source on the left (default: True)') - parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', - help='pad the target on the left (default: False)') - try: - parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', - help='max number of tokens in the source sequence') - parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', - help='max number of tokens in the target sequence') - except ArgumentError: - # this might have already been defined. Once we transition this to hydra it should be fine to add it here. - pass - parser.add_argument('--upsample-primary', default=1, type=int, - help='amount to upsample primary dataset') - parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'], - metavar='SRCTGT', - help='replace beginning-of-sentence in source sentence with source or target ' - 'language token. (src/tgt)') - parser.add_argument('--decoder-langtok', action='store_true', - help='replace beginning-of-sentence in target sentence with target language token') - # fmt: on - - def __init__(self, args, dicts, training): - super().__init__(args) - self.dicts = dicts - self.training = training - if training: - self.lang_pairs = args.lang_pairs - else: - self.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)] - # eval_lang_pairs for multilingual translation is usually all of the - # lang_pairs. However for other multitask settings or when we want to - # optimize for certain languages we want to use a different subset. Thus - # the eval_lang_pairs class variable is provided for classes that extend - # this class. - self.eval_lang_pairs = self.lang_pairs - # model_lang_pairs will be used to build encoder-decoder model pairs in - # models.build_model(). This allows multitask type of sub-class can - # build models other than the input lang_pairs - self.model_lang_pairs = self.lang_pairs - self.langs = list(dicts.keys()) - - @classmethod - def setup_task(cls, args, **kwargs): - dicts, training = cls.prepare(args, **kwargs) - return cls(args, dicts, training) - - @classmethod - def update_args(cls, args): - args.left_pad_source = utils.eval_bool(args.left_pad_source) - args.left_pad_target = utils.eval_bool(args.left_pad_target) - - if args.lang_pairs is None: - raise ValueError( - "--lang-pairs is required. List all the language pairs in the training objective." - ) - if isinstance(args.lang_pairs, str): - args.lang_pairs = args.lang_pairs.split(",") - - @classmethod - def prepare(cls, args, **kargs): - cls.update_args(args) - sorted_langs = sorted( - list({x for lang_pair in args.lang_pairs for x in lang_pair.split("-")}) - ) - if args.source_lang is not None or args.target_lang is not None: - training = False - else: - training = True - - # load dictionaries - dicts = OrderedDict() - for lang in sorted_langs: - paths = utils.split_paths(args.data) - assert len(paths) > 0 - dicts[lang] = cls.load_dictionary( - os.path.join(paths[0], "dict.{}.txt".format(lang)) - ) - if len(dicts) > 0: - assert dicts[lang].pad() == dicts[sorted_langs[0]].pad() - assert dicts[lang].eos() == dicts[sorted_langs[0]].eos() - assert dicts[lang].unk() == dicts[sorted_langs[0]].unk() - if args.encoder_langtok is not None or args.decoder_langtok: - for lang_to_add in sorted_langs: - dicts[lang].add_symbol(_lang_token(lang_to_add)) - logger.info("[{}] dictionary: {} types".format(lang, len(dicts[lang]))) - return dicts, training - - def get_encoder_langtok(self, src_lang, tgt_lang): - if self.args.encoder_langtok is None: - return self.dicts[src_lang].eos() - if self.args.encoder_langtok == "src": - return _lang_token_index(self.dicts[src_lang], src_lang) - else: - return _lang_token_index(self.dicts[src_lang], tgt_lang) - - def get_decoder_langtok(self, tgt_lang): - if not self.args.decoder_langtok: - return self.dicts[tgt_lang].eos() - return _lang_token_index(self.dicts[tgt_lang], tgt_lang) - - def alter_dataset_langtok( - self, - lang_pair_dataset, - src_eos=None, - src_lang=None, - tgt_eos=None, - tgt_lang=None, - ): - if self.args.encoder_langtok is None and not self.args.decoder_langtok: - return lang_pair_dataset - - new_src_eos = None - if ( - self.args.encoder_langtok is not None - and src_eos is not None - and src_lang is not None - and tgt_lang is not None - ): - new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang) - else: - src_eos = None - - new_tgt_bos = None - if self.args.decoder_langtok and tgt_eos is not None and tgt_lang is not None: - new_tgt_bos = self.get_decoder_langtok(tgt_lang) - else: - tgt_eos = None - - return TransformEosLangPairDataset( - lang_pair_dataset, - src_eos=src_eos, - new_src_eos=new_src_eos, - tgt_bos=tgt_eos, - new_tgt_bos=new_tgt_bos, - ) - - def load_dataset(self, split, epoch=1, **kwargs): - """Load a dataset split.""" - paths = utils.split_paths(self.args.data) - assert len(paths) > 0 - data_path = paths[(epoch - 1) % len(paths)] - - def language_pair_dataset(lang_pair): - src, tgt = lang_pair.split("-") - langpair_dataset = load_langpair_dataset( - data_path, - split, - src, - self.dicts[src], - tgt, - self.dicts[tgt], - combine=True, - dataset_impl=self.args.dataset_impl, - upsample_primary=self.args.upsample_primary, - left_pad_source=self.args.left_pad_source, - left_pad_target=self.args.left_pad_target, - max_source_positions=self.args.max_source_positions, - max_target_positions=self.args.max_target_positions, - ) - return self.alter_dataset_langtok( - langpair_dataset, - src_eos=self.dicts[src].eos(), - src_lang=src, - tgt_eos=self.dicts[tgt].eos(), - tgt_lang=tgt, - ) - - self.datasets[split] = RoundRobinZipDatasets( - OrderedDict( - [ - (lang_pair, language_pair_dataset(lang_pair)) - for lang_pair in self.lang_pairs - ] - ), - eval_key=None - if self.training - else "%s-%s" % (self.args.source_lang, self.args.target_lang), - ) - - def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): - if constraints is not None: - raise NotImplementedError( - "Constrained decoding with the multilingual_translation task is not supported" - ) - - lang_pair = "%s-%s" % (self.args.source_lang, self.args.target_lang) - return RoundRobinZipDatasets( - OrderedDict( - [ - ( - lang_pair, - self.alter_dataset_langtok( - LanguagePairDataset( - src_tokens, src_lengths, self.source_dictionary - ), - src_eos=self.source_dictionary.eos(), - src_lang=self.args.source_lang, - tgt_eos=self.target_dictionary.eos(), - tgt_lang=self.args.target_lang, - ), - ) - ] - ), - eval_key=lang_pair, - ) - - def build_model(self, args): - def check_args(): - messages = [] - if ( - len(set(self.args.lang_pairs).symmetric_difference(args.lang_pairs)) - != 0 - ): - messages.append( - "--lang-pairs should include all the language pairs {}.".format( - args.lang_pairs - ) - ) - if self.args.encoder_langtok != args.encoder_langtok: - messages.append( - "--encoder-langtok should be {}.".format(args.encoder_langtok) - ) - if self.args.decoder_langtok != args.decoder_langtok: - messages.append( - "--decoder-langtok should {} be set.".format( - "" if args.decoder_langtok else "not" - ) - ) - - if len(messages) > 0: - raise ValueError(" ".join(messages)) - - # Update args -> the fact that the constructor here - # changes the args object doesn't mean you get the same one here - self.update_args(args) - - # Check if task args are consistant with model args - check_args() - - from fairseq import models - - model = models.build_model(args, self) - if not isinstance(model, FairseqMultiModel): - raise ValueError( - "MultilingualTranslationTask requires a FairseqMultiModel architecture" - ) - return model - - def _per_lang_pair_train_loss( - self, lang_pair, model, update_num, criterion, sample, optimizer, ignore_grad - ): - loss, sample_size, logging_output = criterion( - model.models[lang_pair], sample[lang_pair] - ) - if ignore_grad: - loss *= 0 - optimizer.backward(loss) - return loss, sample_size, logging_output - - def train_step( - self, sample, model, criterion, optimizer, update_num, ignore_grad=False - ): - model.train() - from collections import defaultdict - - agg_loss, agg_sample_size, agg_logging_output = 0.0, 0.0, defaultdict(float) - curr_lang_pairs = [ - lang_pair - for lang_pair in self.model_lang_pairs - if sample[lang_pair] is not None and len(sample[lang_pair]) != 0 - ] - - for idx, lang_pair in enumerate(curr_lang_pairs): - - def maybe_no_sync(): - if ( - self.args.distributed_world_size > 1 - and hasattr(model, "no_sync") - and idx < len(curr_lang_pairs) - 1 - ): - return model.no_sync() - else: - return contextlib.ExitStack() # dummy contextmanager - - with maybe_no_sync(): - loss, sample_size, logging_output = self._per_lang_pair_train_loss( - lang_pair, - model, - update_num, - criterion, - sample, - optimizer, - ignore_grad, - ) - agg_loss += loss.detach().item() - # TODO make summing of the sample sizes configurable - agg_sample_size += sample_size - for k in logging_output: - agg_logging_output[k] += logging_output[k] - agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k] - return agg_loss, agg_sample_size, agg_logging_output - - def _per_lang_pair_valid_loss(self, lang_pair, model, criterion, sample): - return criterion(model.models[lang_pair], sample[lang_pair]) - - def valid_step(self, sample, model, criterion): - model.eval() - with torch.no_grad(): - from collections import defaultdict - - agg_loss, agg_sample_size, agg_logging_output = 0.0, 0.0, defaultdict(float) - for lang_pair in self.eval_lang_pairs: - if ( - lang_pair not in sample - or sample[lang_pair] is None - or len(sample[lang_pair]) == 0 - ): - continue - loss, sample_size, logging_output = self._per_lang_pair_valid_loss( - lang_pair, model, criterion, sample - ) - agg_loss += loss.data.item() - # TODO make summing of the sample sizes configurable - agg_sample_size += sample_size - for k in logging_output: - agg_logging_output[k] += logging_output[k] - agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k] - return agg_loss, agg_sample_size, agg_logging_output - - def inference_step( - self, generator, models, sample, prefix_tokens=None, constraints=None - ): - with torch.no_grad(): - if self.args.decoder_langtok: - bos_token = _lang_token_index( - self.target_dictionary, self.args.target_lang - ) - else: - bos_token = self.target_dictionary.eos() - return generator.generate( - models, - sample, - prefix_tokens=prefix_tokens, - constraints=constraints, - bos_token=bos_token, - ) - - def reduce_metrics(self, logging_outputs, criterion): - with metrics.aggregate(): - # pass 'sample_size', 'nsentences', 'ntokens' stats to fairseq_task - super().reduce_metrics(logging_outputs, criterion) - for k in ["sample_size", "nsentences", "ntokens"]: - metrics.log_scalar(k, sum(l[k] for l in logging_outputs)) - - @property - def source_dictionary(self): - if self.training: - return next(iter(self.dicts.values())) - else: - return self.dicts[self.args.source_lang] - - @property - def target_dictionary(self): - if self.training: - return next(iter(self.dicts.values())) - else: - return self.dicts[self.args.target_lang] - - def max_positions(self): - """Return the max sentence length allowed by the task.""" - if len(self.datasets.values()) == 0: - return { - "%s-%s" - % (self.args.source_lang, self.args.target_lang): ( - self.args.max_source_positions, - self.args.max_target_positions, - ) - } - return OrderedDict( - [ - (key, (self.args.max_source_positions, self.args.max_target_positions)) - for split in self.datasets.keys() - for key in self.datasets[split].datasets.keys() - ] - ) diff --git a/spaces/stamps-labs/stamp2vec/detection_models/yolo_stamp/loss.py b/spaces/stamps-labs/stamp2vec/detection_models/yolo_stamp/loss.py deleted file mode 100644 index 2a85aabbcc007e400db02f505f618231c57609ac..0000000000000000000000000000000000000000 --- a/spaces/stamps-labs/stamp2vec/detection_models/yolo_stamp/loss.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch -import torch.nn as nn -from utils import * - -""" - Class for loss for training YOLO model. - - Argmunets: - h_coord: weight for loss related to coordinates and shapes of box - h__noobj: weight for loss of predicting presence of box when it is absent. -""" -class YOLOLoss(nn.Module): - def __init__(self, h_coord=0.5, h_noobj=2., h_shape=2., h_obj=10.): - super().__init__() - self.h_coord = h_coord - self.h_noobj = h_noobj - self.h_shape = h_shape - self.h_obj = h_obj - - def square_error(self, output, target): - return (output - target) ** 2 - - def forward(self, output, target): - - pred_xy, pred_wh, pred_obj = yolo_head(output) - gt_xy, gt_wh, gt_obj = process_target(target) - - pred_ul = pred_xy - 0.5 * pred_wh - pred_br = pred_xy + 0.5 * pred_wh - pred_area = pred_wh[..., 0] * pred_wh[..., 1] - - gt_ul = gt_xy - 0.5 * gt_wh - gt_br = gt_xy + 0.5 * gt_wh - gt_area = gt_wh[..., 0] * gt_wh[..., 1] - - intersect_ul = torch.max(pred_ul, gt_ul) - intersect_br = torch.min(pred_br, gt_br) - intersect_wh = intersect_br - intersect_ul - intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1] - - iou = intersect_area / (pred_area + gt_area - intersect_area) - max_iou = torch.max(iou, dim=3, keepdim=True)[0] - best_box_index = torch.unsqueeze(torch.eq(iou, max_iou).float(), dim=-1) - gt_box_conf = best_box_index * gt_obj - - xy_loss = (self.square_error(pred_xy, gt_xy) * gt_box_conf).sum() - wh_loss = (self.square_error(pred_wh, gt_wh) * gt_box_conf).sum() - obj_loss = (self.square_error(pred_obj, gt_obj) * gt_box_conf).sum() - noobj_loss = (self.square_error(pred_obj, gt_obj) * (1 - gt_box_conf)).sum() - - total_loss = self.h_coord * xy_loss + self.h_shape * wh_loss + self.h_obj * obj_loss + self.h_noobj * noobj_loss - return total_loss \ No newline at end of file diff --git a/spaces/starlit7/KorPoliticsTTS/modules.py b/spaces/starlit7/KorPoliticsTTS/modules.py deleted file mode 100644 index 9c7fd9cd6eb8b7e0ec0e08957e970744a374a924..0000000000000000000000000000000000000000 --- a/spaces/starlit7/KorPoliticsTTS/modules.py +++ /dev/null @@ -1,390 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/stomexserde/gpt4-ui/Examples/2020 Kisse Pyaar Karoon Full Movie Download 720p Hd.md b/spaces/stomexserde/gpt4-ui/Examples/2020 Kisse Pyaar Karoon Full Movie Download 720p Hd.md deleted file mode 100644 index ebae1698d5f5792fcdd6fb68aa0d76d8d2a78b46..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/2020 Kisse Pyaar Karoon Full Movie Download 720p Hd.md +++ /dev/null @@ -1,21 +0,0 @@ -
                  -

                  How to Watch 2020 Kisse Pyaar Karoon Full Movie Online in HD Quality

                  -

                  If you are a fan of Bollywood comedy movies, you might be interested in watching 2020 Kisse Pyaar Karoon, a remake of the 2015 hit Kis Kisko Pyaar Karoon. The movie stars Kapil Sharma, Arbaaz Khan, Elli Avram, Manjari Fadnis, Simran Kaur Mundi and Sai Lokur. The movie is about a man who marries four women and tries to keep them from finding out about each other.

                  -

                  However, watching 2020 Kisse Pyaar Karoon online can be tricky, as the movie is not available on any official streaming platforms. Many websites claim to offer the movie for free download or streaming, but they are illegal and unsafe. They may contain viruses, malware, pop-ups, ads or phishing links that can harm your device or steal your personal information.

                  -

                  2020 Kisse Pyaar Karoon Full Movie Download 720p Hd


                  DOWNLOADhttps://urlgoal.com/2uI8GT



                  -

                  Therefore, the best way to watch 2020 Kisse Pyaar Karoon online is to use a VPN service that can bypass geo-restrictions and protect your privacy. A VPN can help you access websites that host the movie legally and securely. You can also enjoy faster and smoother streaming without buffering or interruptions.

                  -

                  Here are some steps to watch 2020 Kisse Pyaar Karoon online in HD quality using a VPN:

                  -
                    -
                  1. Choose a reliable VPN service that has servers in India or other countries where the movie is available. Some of the best VPNs for streaming Bollywood movies are ExpressVPN, NordVPN, Surfshark and CyberGhost.
                  2. -
                  3. Download and install the VPN app on your device. Connect to a server in India or another suitable location.
                  4. -
                  5. Visit one of the websites that host 2020 Kisse Pyaar Karoon legally and securely. Some of the options are Zee5[^1^], Hotstar[^2^], SonyLIV[^3^] and Eros Now. You may need to sign up for a subscription or a free trial to access the movie.
                  6. -
                  7. Enjoy watching 2020 Kisse Pyaar Karoon online in HD quality with your VPN.
                  8. -
                  -

                  By following these steps, you can watch 2020 Kisse Pyaar Karoon online without any hassle or risk. You can also use your VPN to watch other Bollywood movies and shows that are not available in your region. A VPN can also help you browse the web anonymously and securely.

                  - -

                  2020 Kisse Pyaar Karoon is a remake of the 2015 movie Kis Kisko Pyaar Karoon, which was directed by Abbas-Mustan and produced by Ratan Jain. The movie was a commercial success and received mixed reviews from critics. The movie was praised for its comedy and Kapil Sharma's performance, but criticized for its plot and direction.

                  -

                  The remake is directed by Rohit Shetty and produced by Karan Johar. The movie features some changes in the cast and the script, but retains the basic premise of the original. The movie also has some cameo appearances by Akshay Kumar, Ranveer Singh, Varun Dhawan and Katrina Kaif.

                  -

                  2020 Kisse Pyaar Karoon is a fun and entertaining movie that will make you laugh and enjoy. The movie has some hilarious scenes and dialogues that will keep you engaged. The movie also has some romantic and emotional moments that will touch your heart. The movie has a good soundtrack and cinematography that will enhance your viewing experience.

                  -

                  If you are looking for a light-hearted and enjoyable movie to watch online, 2020 Kisse Pyaar Karoon is a good choice. You can watch it with your family or friends and have a good time. Just remember to use a VPN service to access the movie legally and securely.

                  7196e7f11a
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Battle Brothers Save Location.md b/spaces/stomexserde/gpt4-ui/Examples/Battle Brothers Save Location.md deleted file mode 100644 index b49bee91bd98877194407447a44da9052afd9e45..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Battle Brothers Save Location.md +++ /dev/null @@ -1,48 +0,0 @@ - -

                  How to Find and Backup Your Battle Brothers Save Location

                  -

                  Battle Brothers is a turn-based tactical RPG that lets you lead a mercenary company in a gritty, low-power, medieval fantasy world. You decide where to go, whom to hire or to fight, what contracts to take and how to train and equip your men in a procedurally generated open world campaign.

                  -

                  battle brothers save location


                  DOWNLOAD ->>->>->> https://urlgoal.com/2uIa6H



                  -

                  If you are enjoying this game and want to make sure you don't lose your progress, you might be wondering where your save files are located and how to backup them. In this article, we will show you how to find and backup your Battle Brothers save location on Windows, Linux and Steam Play.

                  -

                  Battle Brothers Save Location on Windows

                  -

                  On Windows, Battle Brothers saves your game data in the following folder:

                  -%USERPROFILE%\Documents\Battle Brothers\savegames -

                  You can access this folder by typing it in the address bar of File Explorer or by using the Run command (Windows + R).

                  -

                  To backup your save files, simply copy the entire savegames folder to another location, such as an external drive or a cloud service. You can also compress the folder into a ZIP file to save space.

                  -

                  Battle Brothers Save Location on Linux

                  -

                  On Linux, Battle Brothers saves your game data in the following folder:

                  -~/.local/share/Steam/steamapps/common/Battle Brothers/savegames -

                  You can access this folder by using a file manager or a terminal emulator.

                  -

                  To backup your save files, simply copy the entire savegames folder to another location, such as an external drive or a cloud service. You can also compress the folder into a TAR or GZIP file to save space.

                  -

                  -

                  Battle Brothers Save Location on Steam Play

                  -

                  If you are using Steam Play to run Battle Brothers on Linux, your game data is stored in a different folder:

                  -~/.local/share/Steam/steamapps/compatdata/365360/pfx/drive_c/users/steamuser/Documents/Battle Brothers/savegames -

                  You can access this folder by using a file manager or a terminal emulator.

                  -

                  To backup your save files, simply copy the entire savegames folder to another location, such as an external drive or a cloud service. You can also compress the folder into a TAR or GZIP file to save space.

                  -

                  Conclusion

                  -

                  Battle Brothers is a challenging and rewarding game that lets you create your own story in a medieval fantasy world. To make sure you don't lose your progress, you should backup your save files regularly. In this article, we showed you how to find and backup your Battle Brothers save location on Windows, Linux and Steam Play. We hope this guide was helpful and enjoy playing Battle Brothers!

                  - -

                  How to Restore Your Battle Brothers Save Files

                  -

                  If you have backed up your Battle Brothers save files and want to restore them, you need to follow these steps:

                  -
                    -
                  1. Locate your backup folder or file and copy it to your computer.
                  2. -
                  3. Find your Battle Brothers save location on your system (see the previous sections for details).
                  4. -
                  5. Delete the existing savegames folder in your Battle Brothers save location.
                  6. -
                  7. Paste the backup savegames folder in your Battle Brothers save location.
                  8. -
                  9. Launch Battle Brothers and load your saved game.
                  10. -
                  -

                  Note that restoring your save files will overwrite any existing progress you have made in the game. You should only do this if you want to revert to an earlier state of your game or if you have lost or corrupted your save files.

                  -

                  How to Transfer Your Battle Brothers Save Files to Another Computer

                  -

                  If you want to play Battle Brothers on another computer, you can transfer your save files easily by following these steps:

                  -
                    -
                  1. Backup your save files on your current computer (see the previous sections for details).
                  2. -
                  3. Copy the backup folder or file to a removable device, such as a USB flash drive or an external hard drive.
                  4. -
                  5. Plug the removable device into the other computer and copy the backup folder or file to it.
                  6. -
                  7. Find your Battle Brothers save location on the other computer (see the previous sections for details).
                  8. -
                  9. Delete the existing savegames folder in your Battle Brothers save location.
                  10. -
                  11. Paste the backup savegames folder in your Battle Brothers save location.
                  12. -
                  13. Launch Battle Brothers and load your saved game.
                  14. -
                  -

                  Note that transferring your save files will overwrite any existing progress you have made in the game on the other computer. You should only do this if you want to continue playing from where you left off on your current computer or if you want to share your game with someone else.

                  81aa517590
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Baywatch (English) Hindi Movie In 720p Download BETTERl.md b/spaces/stomexserde/gpt4-ui/Examples/Baywatch (English) Hindi Movie In 720p Download BETTERl.md deleted file mode 100644 index 5e270f46ce57a2546f90f4254b9cd4f1e8e17f9f..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Baywatch (English) Hindi Movie In 720p Download BETTERl.md +++ /dev/null @@ -1,16 +0,0 @@ -
                  -

                  How to Download Baywatch (English) Hindi Movie in 720p for Free

                  -

                  If you are a fan of action, comedy and crime movies, you might be interested in watching Baywatch (English) Hindi movie in 720p quality. Baywatch is a 2017 Hollywood movie that features Dwayne Johnson, Zac Efron and Alexandra Daddario as lifeguards who uncover a criminal plot that threatens the future of the bay. The movie is based on the popular TV series of the same name and has a lot of humor, action and romance.

                  -

                  However, finding a reliable and safe source to download Baywatch (English) Hindi movie in 720p can be tricky. There are many websites that claim to offer free downloads, but they may contain malware, viruses or pop-up ads that can harm your device or compromise your privacy. Moreover, some of these websites may have low-quality or incomplete files that can ruin your viewing experience.

                  -

                  Baywatch (English) Hindi Movie In 720p Downloadl


                  Download >>> https://urlgoal.com/2uI69F



                  -

                  That's why we have compiled a list of the best websites that offer Baywatch (English) Hindi movie in 720p download for free. These websites are tested and verified by us and have high-quality and fast downloads. You can also watch the movie online on these websites if you prefer. All you need is a stable internet connection and a compatible device.

                  -

                  List of Websites to Download Baywatch (English) Hindi Movie in 720p for Free

                  -
                    -
                  1. PogoLinks: This website provides direct Google Drive download links for fast and secure downloading and free online streaming. You can download Baywatch (English) Hindi movie in 720p quality with a size of 1.24 GB. You can also choose other resolutions and audio options according to your preference. The website has a simple and user-friendly interface and also offers other Hollywood and Bollywood movies and web series.[^1^]
                  2. -
                  3. BollyFlix: This website offers dual audio [Hindi – English] movies in various qualities and sizes. You can download Baywatch (English) Hindi movie in 720p quality with a size of 1.2 GB from this website. The source of the movie is BluRay and it has English subtitles. The website also provides Google Drive download links for easy and safe downloading. You can also watch the trailer of the movie on this website.[^2^]
                  4. -
                  5. CentroCentro: This website is a torrent site that allows you to download Baywatch (English) Hindi movie in 720p quality with a size of 896.75 MB. You will need a torrent client like uTorrent or BitTorrent to download the movie from this website. The website also provides other information about the movie such as cast, genres, summary and screenshots.[^3^]
                  6. -
                  -

                  Conclusion

                  -

                  We hope that this article has helped you find the best website to download Baywatch (English) Hindi movie in 720p quality for free. You can enjoy watching this movie with your friends and family and have a fun time. However, we advise you to use a VPN service or an ad-blocker when accessing these websites to protect your online privacy and security. Also, we do not support or promote piracy in any way and we recommend you to watch the movie legally from official sources.

                  7b8c122e87
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Daana Veera Sura Karna Dialogues Pdf Downloadl PORTABLE.md b/spaces/stomexserde/gpt4-ui/Examples/Daana Veera Sura Karna Dialogues Pdf Downloadl PORTABLE.md deleted file mode 100644 index 095539df376f63af2fd57d41049cfcfb88a22ab1..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Daana Veera Sura Karna Dialogues Pdf Downloadl PORTABLE.md +++ /dev/null @@ -1,20 +0,0 @@ - -

                  How to Download Daana Veera Sura Karna Dialogues in PDF Format

                  -

                  Daana Veera Sura Karna is a 1977 Telugu epic historical drama film written, produced and directed by N. T. Rama Rao. The film is based on the Hindu epic Mahabharata and depicts the life and deeds of Karna, a legendary hero and one of the central characters in the epic. The film is known for its powerful dialogues delivered by N. T. Rama Rao in the role of Karna.

                  -

                  If you are a fan of this classic film and want to download its dialogues in PDF format, you can follow these simple steps:

                  -

                  Daana Veera Sura Karna Dialogues Pdf Downloadl


                  Download Zip ❤❤❤ https://urlgoal.com/2uI8UM



                  -
                    -
                  1. Go to this link which contains a document with some of the most famous dialogues from the film.
                  2. -
                  3. Click on the "Download" button on the top right corner of the page.
                  4. -
                  5. Select "PDF" as the format and click on "Continue".
                  6. -
                  7. You may need to sign up or log in to Scribd to access the download option.
                  8. -
                  9. Once you have downloaded the file, you can open it with any PDF reader and enjoy reading the dialogues.
                  10. -
                  -

                  Note: This document is not an official or complete transcript of the film. It only contains some selected dialogues that were uploaded by a user on Scribd. You may find some errors or omissions in the document. For a more accurate and comprehensive version of the dialogues, you may need to watch the film or find another source online.

                  - -

                  Daana Veera Sura Karna is not only a film, but also a cultural phenomenon in Telugu cinema. The film showcases the acting prowess of N. T. Rama Rao, who played three iconic roles with distinct personalities and mannerisms. The film also features his sons Nandamuri Harikrishna and Nandamuri Balakrishna in their debut roles as Arjuna and Abhimanyu, respectively. The film has a star-studded cast of veteran actors such as Dhulipala, Mikkilineni, Prabhakar Reddy, Kaikala Satyanarayana, Saroja Devi, Prabha, Sharada, Kanchana and many others.

                  -

                  The film is known for its powerful dialogues written by Kondaveeti Venkatakavi, who used classical Telugu language and poetic expressions to convey the emotions and messages of the characters. The dialogues are still popular among Telugu audiences and are often quoted or referenced in various contexts. The film also has memorable songs composed by Pendyala Nageswara Rao, who blended classical and folk music elements to create a unique sound. Some of the songs such as "Anna Devudu Ledanna", "Sri Krishnaarjuna Yuddhamu" and "Neevega Raja" are considered as evergreen hits.

                  -

                  The film was a huge blockbuster at the box office and became the highest-grossing Telugu film at the time. It was also critically acclaimed for its technical aspects such as cinematography, editing, art direction and costumes. The film won several awards and honors, including the Filmfare Award for Best Film - Telugu and the Nandi Award for Best Feature Film - Silver. The film is regarded as one of the greatest films of Telugu cinema and has been re-released several times over the years. The film has also inspired many filmmakers and actors to make films based on the Mahabharata or its characters.

                  -

                  81aa517590
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/stratussox/yolov5_inference/utils/loggers/comet/hpo.py b/spaces/stratussox/yolov5_inference/utils/loggers/comet/hpo.py deleted file mode 100644 index 7dd5c92e8de170222b3cd3eae858f4f3cfddaff6..0000000000000000000000000000000000000000 --- a/spaces/stratussox/yolov5_inference/utils/loggers/comet/hpo.py +++ /dev/null @@ -1,118 +0,0 @@ -import argparse -import json -import logging -import os -import sys -from pathlib import Path - -import comet_ml - -logger = logging.getLogger(__name__) - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from train import train -from utils.callbacks import Callbacks -from utils.general import increment_path -from utils.torch_utils import select_device - -# Project Configuration -config = comet_ml.config.get_config() -COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") - - -def get_args(known=False): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300, help='total training epochs') - parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') - parser.add_argument('--rect', action='store_true', help='rectangular training') - parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--noval', action='store_true', help='only validate final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') - parser.add_argument('--noplots', action='store_true', help='save no plot files') - parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') - parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') - parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') - parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') - parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') - parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--quad', action='store_true', help='quad dataloader') - parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') - parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') - parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') - parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') - parser.add_argument('--seed', type=int, default=0, help='Global training seed') - parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') - - # Weights & Biases arguments - parser.add_argument('--entity', default=None, help='W&B: Entity') - parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') - parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') - parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') - - # Comet Arguments - parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.") - parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.") - parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.") - parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.") - parser.add_argument("--comet_optimizer_workers", - type=int, - default=1, - help="Comet: Number of Parallel Workers to use with the Comet Optimizer.") - - return parser.parse_known_args()[0] if known else parser.parse_args() - - -def run(parameters, opt): - hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]} - - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.batch_size = parameters.get("batch_size") - opt.epochs = parameters.get("epochs") - - device = select_device(opt.device, batch_size=opt.batch_size) - train(hyp_dict, opt, device, callbacks=Callbacks()) - - -if __name__ == "__main__": - opt = get_args(known=True) - - opt.weights = str(opt.weights) - opt.cfg = str(opt.cfg) - opt.data = str(opt.data) - opt.project = str(opt.project) - - optimizer_id = os.getenv("COMET_OPTIMIZER_ID") - if optimizer_id is None: - with open(opt.comet_optimizer_config) as f: - optimizer_config = json.load(f) - optimizer = comet_ml.Optimizer(optimizer_config) - else: - optimizer = comet_ml.Optimizer(optimizer_id) - - opt.comet_optimizer_id = optimizer.id - status = optimizer.status() - - opt.comet_optimizer_objective = status["spec"]["objective"] - opt.comet_optimizer_metric = status["spec"]["metric"] - - logger.info("COMET INFO: Starting Hyperparameter Sweep") - for parameter in optimizer.get_parameters(): - run(parameter["parameters"], opt) diff --git a/spaces/subhajitmaji/MusicGen/tests/__init__.py b/spaces/subhajitmaji/MusicGen/tests/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/subhajitmaji/MusicGen/tests/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/DigiDNA IMazing 2.5.4 Crack With Activation Code ((LINK)).md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/DigiDNA IMazing 2.5.4 Crack With Activation Code ((LINK)).md deleted file mode 100644 index 8212e58361ff64615b81d71aef4befc6ce59365c..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/DigiDNA IMazing 2.5.4 Crack With Activation Code ((LINK)).md +++ /dev/null @@ -1,30 +0,0 @@ - -

                  DigiDNA iMazing 2.5.4 Crack With Activation Code: The Ultimate iOS Device Manager

                  -

                  DigiDNA iMazing 2.5.4 Crack is a powerful and user-friendly software that allows you to transfer and save your music, messages, files, and data from any iOS device to your computer or vice versa. It is the best alternative to iTunes, with more features and flexibility. You can backup and restore your iPhone, iPad, or iPod touch with ease, without losing any data or settings. You can also manage your contacts, messages, photos, videos, apps, and more with iMazing.

                  -

                  With DigiDNA iMazing 2.5.4 Activation Code, you can:

                  -

                  DigiDNA iMazing 2.5.4 Crack With Activation Code


                  Download Filehttps://cinurl.com/2uEYCD



                  -
                    -
                  • Transfer your data to your new iPhone or iPad without iCloud or iTunes.
                  • -
                  • Copy your music and playlists between your iOS device and your computer.
                  • -
                  • Export your photos and videos to your computer or external drive.
                  • -
                  • Store your iPhone and iPad data with a unique backup technology.
                  • -
                  • Save, export, and print your iPhone messages, including SMS, MMS, iMessages, and attachments.
                  • -
                  • Transfer your files and documents between your iOS device and your computer.
                  • -
                  • Manage your contacts and sync them between your iPhone and your Mac or PC.
                  • -
                  • Backup and restore your app data, such as game progress, documents, and settings.
                  • -
                  • Access your iPhone voicemail, call history, voice memos, and notes.
                  • -
                  • Use your iOS device as an external drive.
                  • -
                  -

                  DigiDNA iMazing 2.5.4 Crack With Activation Code is compatible with Windows and Mac OS X. It supports all iOS devices, including the latest iPhone 12 series and iOS 14. It is easy to install and use, with a simple and intuitive interface. You can download DigiDNA iMazing 2.5.4 Crack With Activation Code from the link below and enjoy the full features of this amazing software.

                  How to use DigiDNA iMazing 2.5.4

                  -

                  After downloading and installing DigiDNA iMazing 2.5.4 Crack With Activation Code on your computer, you can start using it to manage your iOS device. Here are some steps to guide you:

                  -
                    -
                  1. Connect your iOS device to your computer with a USB cable or via Wi-Fi. You may need to trust your computer on your device and enter your passcode.
                  2. -
                  3. Launch iMazing on your computer and select your device from the left sidebar. You will see a list of actions that you can perform with iMazing.
                  4. -
                  5. Choose the action that you want to do, such as backup, transfer, export, or restore. Follow the instructions on the screen to complete the action.
                  6. -
                  7. You can also access the device settings by clicking on the Supervision button at the bottom of the actions list. You can configure various options for your supervised device, such as restrictions, wallpapers, web clips, and more.
                  8. -
                  9. You can also browse the device's file system by clicking on the File System button at the top of the actions list. You can view, copy, delete, or create files and folders on your device.
                  10. -
                  -

                  DigiDNA iMazing 2.5.4 Crack With Activation Code is a versatile and powerful tool that can help you manage your iOS device with ease. You can do more than what iTunes can offer, and enjoy full control over your data and settings.

                  -

                  d5da3c52bf
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Downloadfilmnabimusakartun.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Downloadfilmnabimusakartun.md deleted file mode 100644 index a6da2599927706f8c41efc24a4bbb47a3bb99910..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Downloadfilmnabimusakartun.md +++ /dev/null @@ -1,11 +0,0 @@ -
                  -

                  darlwat j2ec2c27de https://ibb.co/36NDOaT https://www.vinayakveda.com/a-download-free-files-for-filmnabimusakartun-on-pirate-bay/. benulri 4fbcdbcacd https://uploads.strikinglycdn.com/files/7754af1c-7b7f-49c0-92b2-9eaf4aaa45c2/downloadfilmnabimusakartun.pdf. Reply. Reply.

                  -

                  https://www.instapaper.com/p/https://ibb.co/36NDOaT https://ibb.co/f59fhfM https://www.instapaper.com/p/http://hdwallpapersimagesdownload.com/images/1313451836/?q=https://ibb.co/f59fhfM. https://www.instapaper.com/p/https://coub.com/stories/3048001-downloadfilmnabimusakartun-portable.pdf. https://www.instapaper.com/p/https://ibb.co/f59fhfM. https://ibb.co/f59fhfM. Reply. https://www.instapaper.com/p/http://hdwallpapersimagesdownload.com/images/1313451836/?q=https://ibb.co/f59fhfM. Reply.

                  -

                  downloadfilmnabimusakartun


                  Downloadhttps://cinurl.com/2uEYBo



                  -

                  roylee n5d32541df https://www.worldofbob.com/published/downloadfilmnabimusakartun-online-pdf-free-download.pdf. ahmad a2b019eb6c http://www.aspid.online/download-filmnabimusakartun-page-free-pdf.pdf.

                  -

                  krtcf 65dc49c0c https://docs.google.com/viewer?url=http://hydbyaweal.yolasite.com/resources/downloadfilmnabimusakartun.pdf. sebdua 3db1a5d1ae http://mdib.org/downloadfilmnabimusakartun-pdf-download-or-read.pdf.

                  -

                  aibujah a5b5a61feb https://www.asianbar.com/downloadfilmnabimusakartun-download-or-read-online-pdf.pdf. zeetol 161794d48 http://pantene.yolasite.com/downloadfilmnabimusakartun-uk-free-reading-pdf-with-hot-link.pdf. adwol 6075c26d9 https://www.freeisbndownload.org/downloadfilmnabimusakartun-pdf-download-or-read-online-free.pdf.

                  -

                  orundomega jkfqnki 758be5b741 https://manage.cfc.ksc.nuclearenergies.gov/downloads/downloadfilmnabimusakartun.pdf Download. rfactor 2 download https://www.pinterest.com/pin/33220410182892265/ Download.

                  -

                  899543212b
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/PC Games - Mortal Kombat 4.zip Mod.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/PC Games - Mortal Kombat 4.zip Mod.md deleted file mode 100644 index 1d52da517d78b090a6d1680b5ce664691fe6fadd..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/PC Games - Mortal Kombat 4.zip Mod.md +++ /dev/null @@ -1,29 +0,0 @@ -
                  -

                  How to Mod Mortal Kombat 4 on PC

                  -

                  Mortal Kombat 4 is a classic fighting game that was released in 1998 by Midway Games. It was the first game in the series to feature 3D character models and environments, as well as some new fighters and gameplay elements. However, if you want to customize your Mortal Kombat 4 experience, you can use some tools and mods that are available online.

                  -

                  In this article, we will show you how to mod Mortal Kombat 4 on PC using two tools: MK4STRIP - Model Package and MK4STRIP - Graphic Package. These tools allow you to change the fighter models and textures of the game, respectively. You can download them from Mod DB, a website that hosts mods for various games.

                  -

                  PC Games - Mortal Kombat 4.zip Mod


                  Download ☆☆☆ https://cinurl.com/2uEYmh



                  -

                  Step 1: Download and Extract the Tools

                  -

                  First, you need to download the MK4STRIP - Model Package and MK4STRIP - Graphic Package from Mod DB. They are both zip files that contain the tools and instructions. You can find them under the "Downloads" section of the Mortal Kombat 4 page on Mod DB[^1^].

                  -

                  Once you have downloaded the zip files, you need to extract them to a folder of your choice. You can use any program that can handle zip files, such as WinRAR or 7-Zip. Make sure you have enough space on your hard drive, as the extracted files will take up about 177 KB.

                  -

                  Step 2: Backup Your Game Files

                  -

                  Before you start modding your game, it is highly recommended that you backup your original game files. This way, you can restore them if something goes wrong or if you want to play the game without mods. To backup your game files, simply copy the entire folder where you installed Mortal Kombat 4 on your PC and paste it somewhere else. For example, if you installed the game in C:\Program Files\Mortal Kombat 4, you can copy that folder and paste it in C:\Backup\Mortal Kombat 4.

                  -

                  Step 3: Change Fighter Models with MK4STRIP - Model Package

                  -

                  If you want to change the fighter models of Mortal Kombat 4, you can use the MK4STRIP - Model Package tool. This tool allows you to replace any of the 15 default fighters with any of the unlockable fighters or bosses. For example, you can replace Liu Kang with Goro or Scorpion with Shinnok.

                  -

                  To use this tool, follow these steps:

                  -
                    -
                  1. Open the folder where you extracted the MK4STRIP - Model Package tool.
                  2. -
                  3. Double-click on the MK4STRIP.EXE file to launch the tool.
                  4. -
                  5. A window will pop up asking you to select your Mortal Kombat 4 folder. Click on "Browse" and navigate to where you installed the game on your PC. For example, C:\Program Files\Mortal Kombat 4. Click on "OK".
                  6. -
                  7. The tool will scan your game files and display a list of fighters on the left side of the window. Click on any fighter that you want to change.
                  8. -
                  9. A list of available models will appear on the right side of the window. Click on any model that you want to use instead of the original one.
                  10. -
                  11. Click on "Replace" at the bottom of the window. The tool will replace the selected fighter with the selected model.
                  12. -
                  13. Repeat steps 4-6 for any other fighters that you want to change.
                  14. -
                  15. When you are done, click on "Exit" at the bottom of the window. The tool will save your changes and close.
                  16. -
                  -

                  Note: This tool may not work on anything newer than Windows XP, according to Mod DB[^1^]. If you encounter any problems or errors, try running it in compatibility mode or as an administrator.

                  -

                  -

                  Step 4: Change Textures with MK4STRIP - Graphic Package

                  -

                  If you want to change the textures of Mortal Kombat

                  d5da3c52bf
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Pes 18 Cd Key [PORTABLE].md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Pes 18 Cd Key [PORTABLE].md deleted file mode 100644 index 35f226ecfa8e17e8fcd13244892510bff7052956..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Pes 18 Cd Key [PORTABLE].md +++ /dev/null @@ -1,7 +0,0 @@ - -

                  That is not to say there is no one to blame. FIFA's popularity skyrocketed with the addition of ball physics to its predecessor. Top players and coaches have recognised it as the superior method of simulation and have made the jump to FIFA's API. PES 2018, on the other hand, continues to use the technology provided by the EFA. Thus one of the best improvements to PES over the years has been removed for political or economic reasons. FIFA has recently started to offer their API online and the game continues to benefit from it. Unfinished transfers also provide a greater level of realism. PES 2018 is the best football sim you can find, but the competition still keeps churning out more innovations. And that, more than anything, is what makes PES 2017 one of the best football games ever made.

                  -

                  The passing and dribbling mechanics are more refined as well. PES 2018 is built on an agent-based passing system, which allows players to step up and move around a lot more comfortably. In addition to this, you can also use your the Brazilian Flick (aka the Pounce). More importantly, the precision of the camera is uncanny. In FIFA, the camera kept jumping to the wrong player in the background. That is no longer the case with PES. The goalkeeper, for example, clearly shows up in the shot, whereas in FIFA, the player will appear somewhere in front of the goal.

                  -

                  Pes 18 cd key


                  DOWNLOAD ☆☆☆☆☆ https://cinurl.com/2uEYKm



                  -

                  If the game was easy to learn and play, its the most realistic football game in the market. Beyond having agents that behave like their real-life counterparts, PES 2018 also features the Football PES Institute, a content streaming site that offers an inside look into the game design process. There, players and coaches share the art of simulation and learn which details make a game such as FIFA different. Yet, the only downside to a game like PES is its relatively high price. But even then, it offers a relatively high level of gameplay and freedom in a league that is as popular as PES, League of Legends or Dota 2.

                  899543212b
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/Udaintha Nilakkal Pdf Free Downloadl.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/Udaintha Nilakkal Pdf Free Downloadl.md deleted file mode 100644 index 0630a3283396bb98c452296e85127b3d25fadd57..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/Udaintha Nilakkal Pdf Free Downloadl.md +++ /dev/null @@ -1,72 +0,0 @@ -## Udaintha Nilakkal Pdf Free Downloadl - - - - - - - - - -**CLICK HERE ->>->>->> [https://cayseypisi.blogspot.com/?c=2tyeLv](https://cayseypisi.blogspot.com/?c=2tyeLv)** - - - - - - - - - - - - Here is a possible title and article with SEO optimization and HTML formatting for the keyword "Udaintha Nilakkal Pdf Free Downloadl": - -# Udaintha Nilakkal Pdf Free Downloadl: A Review of Pa. Vijay's Historical Novel - - - -Udaintha Nilakkal (Broken Moons) is a historical novel by Pa. Vijay, a popular Tamil poet and lyricist. The novel is divided into two parts, each spanning over 300 pages. The first part was published in 2018 and the second part in 2019 by Pustaka Digital Media. The novel is based on the life and times of Raja Raja Chola I, one of the greatest kings of South India who ruled from 985 to 1014 CE. - - - -The novel narrates the story of Raja Raja Chola's rise to power, his conquests, his love affairs, his family, his enemies, his allies, his achievements, his failures, and his legacy. The novel also depicts the culture, society, politics, religion, art, architecture, literature, and science of the Chola period. The novel is rich in historical details and references, as well as poetic imagination and creativity. - - - -The novel has received positive reviews from critics and readers alike. It has been praised for its captivating plot, engaging characters, vivid descriptions, authentic dialogues, and lyrical language. It has also been appreciated for its historical accuracy and research. The novel has been compared to the works of famous Tamil historical novelists like Kalki Krishnamurthy, Sandilyan, and Govi Manisekaran. - - - -If you are interested in reading Udaintha Nilakkal Pdf Free Downloadl, you can find it online on various platforms like Scribd[^1^], Idoc[^2^], Sifox[^3^], and Thebookee[^4^]. You can also buy the ebook or the paperback version from Pustaka Digital Media's website or other online stores. Udaintha Nilakkal Pdf Free Downloadl is a must-read for anyone who loves history, romance, adventure, and poetry. - -Here is a possible continuation of the article: - -## Raja Raja Chola's Achievements and Legacy - - - -Raja Raja Chola was not only a great conqueror, but also a visionary ruler who transformed his empire into a well-organized and prosperous state. He reformed the administration, revenue system, military organization, and religious policy of his kingdom. He also patronized art, architecture, literature, and culture. Some of his notable achievements and legacy are: - - - -- He built the Brihadisvara Temple in Thanjavur, which is one of the largest and most magnificent temples in India. The temple is dedicated to Lord Shiva and showcases the Chola style of architecture, sculpture, painting, and bronze casting. The temple is also a UNESCO World Heritage Site and a symbol of Tamil culture. - -- He commissioned the compilation of a historical record called the Tiruvalangadu Copper Plates, which narrate the genealogy and deeds of the Chola kings from ancient times to his own reign. The plates are considered as one of the most authentic sources of Chola history and culture. - -- He established a strong navy that enabled him to dominate the Indian Ocean trade and commerce. He also sent diplomatic missions to China and Southeast Asia to establish friendly relations and cultural exchange. - -- He promoted Shaivism as the state religion and patronized various temples, monasteries, and saints. He also respected other faiths like Buddhism, Jainism, and Vaishnavism and granted them land grants and donations. - -- He encouraged education and learning among his subjects and supported various schools, colleges, libraries, and hospitals. He also fostered the growth of Tamil literature and poetry by patronizing poets like Ottakoothar, Kavichakravarthi Kambar, Nakkirar II, Pugalendi, Avvaiyar II etc. - - - -Raja Raja Chola died in 1014 CE and was succeeded by his son Rajendra Chola I, who continued his father's legacy and expanded the Chola Empire to its zenith. Raja Raja Chola is remembered as one of the greatest kings of India and a hero of Tamil Nadu. - - dfd1c89656 - - - - - diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Adobe Media Encoder CC 2015 Serial Number Download.epub.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Adobe Media Encoder CC 2015 Serial Number Download.epub.md deleted file mode 100644 index 087339d9986ff58f466ef266a1e14126ded6e4d4..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Adobe Media Encoder CC 2015 Serial Number Download.epub.md +++ /dev/null @@ -1,6 +0,0 @@ -

                  Adobe Media Encoder CC 2015 Serial Number Download.epub


                  DOWNLOAD ✔✔✔ https://urluss.com/2uCFWK



                  -
                  -Its full offline installer standalone setup of Adobe Media Encoder CC 2015 for ... You may also like to download Adobe Premiere Pro CC 2015. 1fdad05405
                  -
                  -
                  -

                  diff --git a/spaces/taiwhis/Nhandien_nhom36/README.md b/spaces/taiwhis/Nhandien_nhom36/README.md deleted file mode 100644 index 9affc96fdaacd711590c82690eca3f54b63891f1..0000000000000000000000000000000000000000 --- a/spaces/taiwhis/Nhandien_nhom36/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: NhandienEaster -emoji: 👀 -colorFrom: purple -colorTo: blue -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/terfces0erbo/CollegeProjectV2/FULL Lumion 6.5.1 Pro Patch For Windows UPDATED.md b/spaces/terfces0erbo/CollegeProjectV2/FULL Lumion 6.5.1 Pro Patch For Windows UPDATED.md deleted file mode 100644 index ee0e5bc01e49755aa7ad1008bf65dc15db604b92..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/FULL Lumion 6.5.1 Pro Patch For Windows UPDATED.md +++ /dev/null @@ -1,46 +0,0 @@ -
                  -

                  IBM SPSS Statistics V21: A Powerful Tool for Data Analysis and Modeling

                  -

                  IBM SPSS Statistics is the world’s leading statistical software used to solve business and research problems by means of ad-hoc analysis, hypothesis testing, and predictive analytics. Organizations use IBM SPSS Statistics to understand data, analyze trends, forecast and plan to validate assumptions and drive accurate conclusions[^1^].

                  -

                  FULL Lumion 6.5.1 Pro Patch For Windows


                  Download ===== https://bytlly.com/2uGlNd



                  -

                  IBM SPSS Statistics V21.0 focuses on increasing the analytic capabilities through:

                  -
                    -
                  • Building better models from uncertain inputs while assessing risk using Monte Carlo simulation techniques
                  • -
                  • Providing faster performance with more accurate results, increased productivity and effectiveness using a range of specialized techniques
                  • -
                  • Integrating with other technologies and tools making it easy to access common data types, external programming languages, and file types
                  • -
                  -

                  IBM SPSS Statistics V21.0 is available for download by supported customers[^1^]. It supports both 32-bit and 64-bit platforms, allowing users to take advantage of the increased memory capacity and processing speed of 64-bit systems.

                  -

                  IBM SPSS Amos is a powerful tool for structural equation modeling (SEM), which allows users to test and confirm the relationships among observed and latent variables. IBM SPSS Amos enables users to specify, estimate, assess, and present models to show hypothesized relationships among variables. Users can choose either the graphical user interface or non-graphical, programmatic interface[^2^].

                  -

                  -

                  IBM SPSS Amos V21.0 is also available for download by supported customers[^2^]. It supports only 64-bit platforms, as SEM requires large amounts of memory and processing power. IBM SPSS Amos V21.0 can be integrated with IBM SPSS Statistics V21.0, allowing users to perform SEM on data sets prepared in IBM SPSS Statistics.

                  -

                  If you are looking for a comprehensive and reliable solution for data analysis and modeling, IBM SPSS Statistics V21.0 and IBM SPSS Amos V21.0 are the tools you need.

                  - -

                  How to Use IBM SPSS Statistics V21.0 for Data Analysis

                  -

                  IBM SPSS Statistics V21.0 offers a variety of features and functions to help users perform data analysis efficiently and effectively. Some of the main features and functions are:

                  -
                    -
                  • Data Editor: A spreadsheet-like interface that allows users to view and edit data, define variables and their attributes, apply filters and weights, split files, and transpose data.
                  • -
                  • Data Viewer: A window that displays the output of analyses, such as tables, charts, graphs, and notes. Users can customize the appearance and format of the output, export it to other applications, or save it as a file.
                  • -
                  • Chart Editor: A tool that allows users to create and modify charts and graphs, such as histograms, scatterplots, boxplots, pie charts, and more. Users can change the chart type, style, color, title, legend, axis, gridlines, and other options.
                  • -
                  • Statistics Menu: A menu that provides access to various statistical procedures and tests, such as descriptive statistics, frequencies, crosstabs, correlation, regression, ANOVA, t-test, chi-square test, factor analysis, cluster analysis, and more. Users can specify the options and parameters for each procedure and test.
                  • -
                  • Transform Menu: A menu that allows users to manipulate data, such as compute new variables, recode existing variables, rank cases, sort cases, aggregate data, select cases, and more. Users can apply formulas and functions to transform data.
                  • -
                  • Analyze Menu: A menu that provides access to advanced analytical techniques and tools, such as reliability analysis, missing values analysis, linear mixed models, generalized linear models, generalized estimating equations, survival analysis, multivariate analysis of variance (MANOVA), discriminant analysis, neural networks, and more. Users can choose the appropriate technique and tool for their research questions and hypotheses.
                  • -
                  -

                  To use IBM SPSS Statistics V21.0 for data analysis, users need to follow these basic steps:

                  -
                    -
                  1. Import or enter data into the Data Editor.
                  2. -
                  3. Define variables and their attributes in the Variable View.
                  4. -
                  5. Explore and visualize data using descriptive statistics and charts.
                  6. -
                  7. Select the appropriate statistical procedure or test from the Statistics Menu or the Analyze Menu.
                  8. -
                  9. Specify the options and parameters for the procedure or test.
                  10. -
                  11. Run the procedure or test and view the output in the Data Viewer.
                  12. -
                  13. Interpret the results and draw conclusions.
                  14. -
                  - -

                  How to Use IBM SPSS Amos V21.0 for Structural Equation Modeling

                  -

                  IBM SPSS Amos V21.0 is a powerful tool for structural equation modeling (SEM), which allows users to test and confirm the relationships among observed and latent variables. Some of the main features and functions of IBM SPSS Amos V21.0 are:

                  -
                    -
                  • Graphical User Interface (GUI): A user-friendly interface that allows users to draw path diagrams using drag-and-drop tools. Users can specify observed variables (rectangles), latent variables (circles), error terms (small circles), paths (arrows), covariances (curved arrows), constraints (equal signs), and labels (text boxes). Users can also modify the properties of each element in the diagram.
                  • -
                  • Non-Graphical User Interface (NGUI): A text-based interface that allows users to write syntax commands using AMOS Basic language. Users can specify observed variables (V), latent variables (F), error terms (E), paths (P), covariances (C), constraints (K), labels (L), groups (G), matrices (M), options (O), output (S), and comments (*). Users can also modify the properties of each command.
                  • -
                  • Data Files: Files that contain the data to be analyzed using SEM. Users can import data files from various sources and formats, such as IBM SPSS Statistics files (.sav), Excel files (.xls or .xlsx), text files (.txt or .csv), Access files (.mdb or .accdb), SAS files (.sas7bdat), Stata files (.dta), Mplus files (.dat), R files (.RData), or AMOS files (.amw).
                  • -
                  • Output Files: Files that contain the output of SEM analyses. Users can view output files in various formats and applications, such as AMOS Graphics (.amw), AMOS Text Output (.txt), AMOS Tables (.rtf or .docx

                    d5da3c52bf
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Free Download Speedbit Video Accelerator Premium Crack __TOP__.md b/spaces/terfces0erbo/CollegeProjectV2/Free Download Speedbit Video Accelerator Premium Crack __TOP__.md deleted file mode 100644 index 612f57509f44471878a09fde2f927f3f1f3bacb8..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Free Download Speedbit Video Accelerator Premium Crack __TOP__.md +++ /dev/null @@ -1,8 +0,0 @@ -
                    -

                    speedbit video accelerator premium 3.0.6 serial key lessens video buffering issues very well. speedbits unique included innovation makes it possible to stream recordings from a variety of sources concurrently, ensuring the quickest loading times and the best user experience. it is also for this reason that speedbit video was named a 2008 technology pioneer by the world economic forum.

                    -

                    speedbit video accelerator premium 3.0.6 license key is the primary unit of our pc.without them we cant run any application on our pc. so, we can say that easily this is the most vital application for our pc. by the usage of its powerful alliances, we make our work so easy. in the same way, our pc execution relies upon all that drives. because these drivers inform this utility to run our pc. so besides any hesitation, we can say effortlessly that drivers are set of information. many customers continually stay to fear that their lot of time is wasted solving the mistakes of home windows hardware drivers.likewise,speedbit video accelerator but i am certain that by way of the use of this program you can save you a lot of time.

                    -

                    free download speedbit video accelerator premium crack


                    DOWNLOADhttps://bytlly.com/2uGj0Z



                    -

                    a self-reliant audit has found that video accelerator greatly reduces video buffering issues. acceleration is power-driven by speedbits distinctive proprietary technology that streams videos from multiple sources at the same time making certain the quickest loading times and therefore the most pleasurable user expertise.

                    -

                    macdrive pro 10.5.7.6 crack is the most wonderful application that makes managing your mac on a windows machine very simple. it also supports usb 3.0, sata, esata, firewire 400/800, scsi, ide, and many other technologies. everything from viewing the mounted discs on his computer to

                    899543212b
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Kamasutra 3D In Hindi Dubbed 720p.md b/spaces/terfces0erbo/CollegeProjectV2/Kamasutra 3D In Hindi Dubbed 720p.md deleted file mode 100644 index b736bd9842e882776fe1cf0add81a0ee63bbbd77..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Kamasutra 3D In Hindi Dubbed 720p.md +++ /dev/null @@ -1,6 +0,0 @@ -

                    Kamasutra 3D in hindi dubbed 720p


                    Download Filehttps://bytlly.com/2uGjb0



                    -
                    - 3cee63e6c2
                    -
                    -
                    -

                    diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download 2021.md b/spaces/tialenAdioni/chat-gpt-api/logs/BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download 2021.md deleted file mode 100644 index 509295b1a9ff885ab4e2ede538f677329034da09..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download 2021.md +++ /dev/null @@ -1,136 +0,0 @@ -
                    -

                    BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download: A Review

                    - -

                    BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download is a software solution that allows you to create and edit 3D images in a simple and fast way. It is a combination of BluffTitler Ultimate 13.3.0.6, a powerful software for 3D animation and presentation, and Crackingpatch, a tool that can activate it without paying for a license. In this article, we will review BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download and tell you what it is, how to download and install it, and how to use it for your projects.

                    - -

                    What is BluffTitler Ultimate 13.3.0.6?

                    - -

                    BluffTitler Ultimate 13.3.0.6 is the latest version of the software that allows you to create and edit 3D images in a simple and fast way. It has many features and functions that can help you design and document your projects using information modeling (BIM) technology.

                    -

                    BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch download


                    Downloadhttps://urlcod.com/2uKb26



                    - -

                    With BluffTitler Ultimate 13.3.0.6, you can create complex and information-rich projects for various purposes, such as videos, multimedia presentations, VJ performances, and websites. You can also export your projects to picture and video formats for use in other software products.

                    - -

                    BluffTitler Ultimate 13.3.0.6 works completely in realtime: you immediately see the final result of your actions. It also uses a minimalistic intuitive user interface that does not try to impress with dozens of windows, tabs, and dialogs. Instead, it only features two windows: the edit window and the render window.

                    - -

                    BluffTitler Ultimate 13.3.0.6 supports eight different layer types: camera, light, text, picture, video, plasma, particle, and audio. Layers can be animated independently and connected to each other for special effects.

                    - -

                    What is Crackingpatch?

                    - -

                    Crackingpatch is a tool that generates a patch for activating BluffTitler Ultimate 13.3.0.6 without paying for a license. It is a patch and a keygen that can bypass the registration process and unlock all the features of the software.

                    -

                    - -

                    How to download BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download?

                    - -

                    To download BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download, you need to follow these steps:

                    - -
                      -
                    1. Go to one of the torrent sites that offer BluffTitler Ultimate 13 .3 .0 .6 + Patch + Portable - Crackingpatching.com torrent file , such as CrackingPatching, RARGB, PatchedFiles, or Mystrikingly.
                    2. -
                    3. Download the torrent file and open it with a torrent client , such as uTorrent or BitTorrent.
                    4. -
                    5. Select the files you want to download from the torrent , such as BluffTitler Ultimate 13 .3 .0 .6 + Patch + Portable - Crackingpatching.com.zip , crackzsoft.com.url , crackzsoft.txt , and readme.txt.
                    6. -
                    7. Wait for the download to finish and then open the folder where the files are saved.
                    8. -
                    9. Extract the zip file using a file archiver , such as WinRAR or 7-Zip.
                    10. -
                    11. You have successfully downloaded BluffTitler Ultimate 13 .3 .0 .6 Patch Portable - Crackingpatch Download.
                    12. -
                    - -

                    How to install BluffTitler Ultimate 13 .3 .0 .6 Patch Portable - Crackingpatch Download?

                    - -

                    To install BluffTitler Ultimate 13 .3 .0 .6 Patch Portable - Crackingpatch Download , you need to follow these steps:

                    - -
                      -
                    1. Open the folder where you extracted the zip file.
                    2. -
                    3. Run the setup.exe file from the folder and follow the installation instructions.
                    4. -
                    5. When prompted for a serial number , use the Crackingpatch tool to generate one.
                    6. -
                    7. When prompted for a product key , use one of these:
                      • 237J1 - Autodesk AutoCAD Civil 3D
                      • 129J1 - Autodesk AutoCAD Map 3D
                      • 140J1 - Autodesk AutoCAD OEM
                    8. -
                    9. When prompted to activate online or offline , choose offline activation.
                    10. -
                    11. Copy the request code from the activation window and paste it into the Crackingpatch tool.
                    12. -
                    13. Click on Generate button and copy the activation code from the Crackingpatch tool.
                    14. -
                    15. Paste the activation code into the activation window and click on Next button.
                    16. -
                    17. You have successfully installed BluffTitler Ultimate 13 .3 .0 .6 with full features.
                    18. -
                    - -

                    How to use BluffTitler Ultimate 13 .3 .0 .6 Patch Portable - Crackingpatch Download?

                    - -

                    To use BluffTitler Ultimate 13 .3 .0 .6 Patch Portable - Crackingpatch Download for your projects , you need to follow these steps:

                    - -
                      -
                    1. Launch BluffTitler Ultimate 13 .3 .0 .6 from your desktop or start menu.
                    2. -
                    3. Create a new project or open an existing one.
                    4. -
                    5. Use the tools and commands in the ribbon interface to create and edit your 3D images.
                    6. -
                    7. You can use intelligent objects that are linked to each other and update automatically when changes are made.
                    8. -
                    9. You can use dynamic models that allow you to explore project alternatives and simulate the operation of facilities.
                    10. -
                    11. You can use reporting and presentation tools to create high-quality documentation for your project.
                    12. -
                    13. You can also use collaboration tools to share your project with other users or export it to other formats as needed.
                    14. -
                    - -

                    Conclusion

                    - -

                    In this article , we have reviewed BluffTitler Ultimate 13 .3 .0 .6 Patch Portable - Crackingpatch Download and told you what it is , how to download and install it , and how to use it for your projects . We hope this article has been informative and useful for you and that you can enjoy using this software solution for your 3D animation and presentation projects.

                    -

                    What are the benefits of using BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download?

                    - -

                    By using BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download, you can enjoy the following benefits:

                    - -
                      -
                    • You can save money by not paying for a license and use the software for free.
                    • -
                    • You can access all the features and functions of BluffTitler Ultimate 13.3.0.6 without any limitations or restrictions.
                    • -
                    • You can create and edit 3D images in a simple and fast way using information modeling (BIM) technology.
                    • -
                    • You can improve your productivity and efficiency by using intelligent and dynamic models, an object oriented environment and functionalities, and reporting and presentation tools.
                    • -
                    • You can export your projects to picture and video formats for use in other software products or websites.
                    • -
                    - -

                    What are the risks of using BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download?

                    - -

                    However, using BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download also involves some risks, such as:

                    - -
                      -
                    • You may violate the terms and conditions of Outerspace Software and face legal consequences.
                    • -
                    • You may download a fake or malicious torrent file that can harm your computer or steal your data.
                    • -
                    • You may encounter errors or bugs in the software that can affect your work or cause data loss.
                    • -
                    • You may not receive any updates or support from Outerspace Software or other users.
                    • -
                    - -

                    How to avoid the risks of using BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download?

                    - -

                    To avoid the risks of using BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download, you can follow these tips:

                    - -
                      -
                    1. Use a reliable antivirus software and scan the torrent file before downloading it.
                    2. -
                    3. Use a trusted torrent site and check the comments and ratings of the torrent file before downloading it.
                    4. -
                    5. Backup your data regularly and use a recovery tool in case of data loss.
                    6. -
                    7. Use a VPN service to hide your IP address and protect your privacy online.
                    8. -
                    9. Purchase a legitimate license from Outerspace Software or use a free trial version if available.
                    10. -
                    - -

                    Conclusion

                    - -

                    In this article, we have reviewed BluffTitler Ultimate 13 .3 .0 .6 Patch Portable - Crackingpatch Download and told you what it is , how to download and install it , and how to use it for your projects . We have also discussed the benefits and risks of using this software solution and how to avoid them . We hope this article has been informative and useful for you and that you can enjoy using this software solution for your 3D animation and presentation projects.

                    -

                    What are the alternatives to BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download?

                    - -

                    BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download is not the only software solution that can create and edit 3D images in a simple and fast way. There are some alternatives that you can consider, such as:

                    - -
                      -
                    • Aurora 3D Animation Maker: This is a software that can create 3D animations, text effects, logos, banners, and more. It has a user-friendly interface and a large library of templates and effects.
                    • -
                    • Xara 3D Maker: This is a software that can create 3D text and graphics for web pages, presentations, videos, etc. It has a simple drag-and-drop interface and a variety of styles and animations.
                    • -
                    • MAGIX 3D Maker: This is a software that can create high-quality 3D graphics and animations for websites, videos, etc. It has a flexible interface and a wide range of tools and effects.
                    • -
                    • DP Animation Maker: This is a software that can create animated backgrounds, banners, slideshows, etc. It has a easy-to-use interface and a lot of presets and effects.
                    • -
                    - -

                    What are the best practices for using BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download?

                    - -

                    To use BluffTitler Ultimate 13.3.0.6 Patch Portable - Crackingpatch Download effectively and efficiently for your projects, you can follow these best practices:

                    - -
                      -
                    1. Plan your project before you start creating it in BluffTitler Ultimate 13 .3 .0 .6 . Sketch out your ideas, define your goals, and gather your resources.
                    2. -
                    3. Use layers wisely to organize your project and create complex effects. You can use different layer types for different purposes and connect them with each other.
                    4. -
                    5. Use the preview window to see the final result of your project in realtime. You can also use the player and screensaver to play your project in full screen mode.
                    6. -
                    7. Use the render window to export your project to picture and video formats for use in other software products or websites. You can also adjust the quality and size of your output.
                    8. -
                    9. Use the help documentation and online learning center to learn more about the features and functions of BluffTitler Ultimate 13 .3 .0 .6 . You can also use the YouTube channel , blog , and forums to get tips , tricks , demos , tutorials , etc.
                    10. -
                    - -

                    Conclusion

                    - -

                    In this article , we have reviewed BluffTitler Ultimate 13 .3 .0 .6 Patch Portable - Crackingpatch Download and told you what it is , how to download and install it , and how to use it for your projects . We have also discussed the benefits and risks of using this software solution and how to avoid them . We have also compared it with some alternatives and shared some best practices for using it effectively and efficiently . We hope this article has been informative and useful for you and that you can enjoy using this software solution for your 3D animation and presentation projects.

                    -

                    Conclusion

                    - -

                    In this article , we have reviewed BluffTitler Ultimate 13 .3 .0 .6 Patch Portable - Crackingpatch Download and told you what it is , how to download and install it , and how to use it for your projects . We have also discussed the benefits and risks of using this software solution and how to avoid them . We have also compared it with some alternatives and shared some best practices for using it effectively and efficiently . We hope this article has been informative and useful for you and that you can enjoy using this software solution for your 3D animation and presentation projects.

                    679dcb208e
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Crack expvr How to Easily Copy and Restore Your Sky HD and XTV Recordings with ExPVR.md b/spaces/tialenAdioni/chat-gpt-api/logs/Crack expvr How to Easily Copy and Restore Your Sky HD and XTV Recordings with ExPVR.md deleted file mode 100644 index 0bfec830321e78e26dd0c97e1bea92b9c9deea81..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Crack expvr How to Easily Copy and Restore Your Sky HD and XTV Recordings with ExPVR.md +++ /dev/null @@ -1,80 +0,0 @@ - -

                    ExPVR: The Ultimate Tool for Sky+ and Sky+ HD Recording Transfer

                    -

                    If you are a Sky+ or Sky+ HD user, you probably have a lot of recordings that you want to keep. Maybe you want to upgrade your hard disk to a larger size, or maybe you want to copy your recordings to a new box. Or maybe you just want to watch your recordings on your PC or portable device.

                    -

                    Whatever your reason, you need a tool that can handle the transfer of your recordings without losing any quality or metadata. That's where ExPVR comes in.

                    -

                    Crack expvr


                    Download File » https://urlcod.com/2uK61Y



                    -

                    ExPVR is a software application that allows you to copy recordings from any of the supported Sky boxes to a new one. It also allows you to extract recordings that are not encrypted and watch them on your favourite video playing software or portable device.

                    -

                    ExPVR is easy to use, fast and reliable. It supports all the latest Sky firmware versions and planner formats. It can handle different disk sizes and partitions. It can also copy your scheduled recordings and series links to the new disk or device.

                    -

                    ExPVR is the ultimate tool for Sky+ and Sky+ HD recording transfer. It is available for download from http://www.ph-mb.com/products/expvr/downloads. You can try it for free for 14 days, and if you like it, you can purchase a license for only £14.99.

                    -

                    Don't let your recordings go to waste. Get ExPVR today and enjoy your Sky+ and Sky+ HD recordings anywhere, anytime.

                    - -

                    How to use ExPVR

                    -

                    Using ExPVR is simple and straightforward. All you need is a PC with Windows 7 or later, a USB to SATA adapter or enclosure, and the hard disks from your Sky boxes. You can also use an external hard disk as a temporary storage if you want to copy recordings between two Sky boxes.

                    -

                    How to crack expvr software
                    -Expvr activation code generator
                    -Expvr license key free download
                    -Expvr crack reddit
                    -Expvr full version cracked
                    -Expvr torrent download
                    -Expvr patch file
                    -Expvr serial number
                    -Expvr keygen online
                    -Expvr crack no survey
                    -Expvr hack tool
                    -Expvr mod apk
                    -Expvr premium account
                    -Expvr crack mac
                    -Expvr crack windows 10
                    -Expvr crack linux
                    -Expvr vr headset compatibility
                    -Expvr oculus quest 2 crack
                    -Expvr valve index crack
                    -Expvr htc vive crack
                    -Expvr psvr crack
                    -Expvr best settings for performance
                    -Expvr troubleshooting guide
                    -Expvr error codes and solutions
                    -Expvr customer support contact
                    -Expvr refund policy
                    -Expvr reviews and ratings
                    -Expvr testimonials and feedbacks
                    -Expvr alternatives and competitors
                    -Expvr features and benefits
                    -Expvr pricing and plans
                    -Expvr discounts and coupons
                    -Expvr free trial offer
                    -Expvr demo video
                    -Expvr tutorial and guide
                    -Expvr faq and help center
                    -Expvr blog and news updates
                    -Expvr community and forum
                    -Expvr social media accounts and groups
                    -Expvr affiliate program and commission rates
                    -How to use expvr for fitness and health
                    -How to use expvr for education and learning
                    -How to use expvr for entertainment and gaming
                    -How to use expvr for travel and exploration
                    -How to use expvr for meditation and relaxation
                    -How to use expvr for art and creativity
                    -How to use expvr for socializing and networking
                    -How to use expvr for business and marketing
                    -How to use expvr for therapy and healing
                    -How to use expvr for personal development and growth

                    -

                    First, you need to download and install ExPVR from http://www.ph-mb.com/products/expvr/downloads. You can use the trial version for 14 days, or purchase a license for £14.99.

                    -

                    Next, you need to connect the hard disk from your source Sky box to your PC using the USB to SATA adapter or enclosure. You can also connect the hard disk from your destination Sky box if you want to copy recordings directly.

                    -

                    Then, you need to run ExPVR and select the source and destination disks or devices. You can choose to copy all recordings, or select specific ones. You can also filter the recordings by genre, channel, date, etc.

                    -

                    Finally, you need to click on the "Copy" button and wait for the process to finish. ExPVR will copy your recordings and convert them to the appropriate format if needed. It will also copy your scheduled recordings and series links if you want.

                    -

                    Once the process is done, you can disconnect the hard disks and put them back in your Sky boxes. You can also watch the extracted recordings on your PC or portable device using any video playing software that supports TS files.

                    - -

                    Why choose ExPVR

                    -

                    There are many reasons why ExPVR is the best tool for Sky+ and Sky+ HD recording transfer. Here are some of them:

                    -
                      -
                    • ExPVR supports all Sky boxes and firmware versions. It can handle different planner formats and encryption types.
                    • -
                    • ExPVR is fast and reliable. It can copy hundreds of recordings in minutes without any errors or quality loss.
                    • -
                    • ExPVR is easy to use and user-friendly. It has a clear and intuitive interface that guides you through the process.
                    • -
                    • ExPVR is affordable and cost-effective. It costs only £14.99 for a lifetime license that includes free updates and support.
                    • -
                    • ExPVR is safe and secure. It does not modify or damage your hard disks in any way. It also respects your privacy and does not collect or share any personal data.
                    • -
                    -

                    ExPVR is the ultimate tool for Sky+ and Sky+ HD recording transfer. Don't miss this opportunity to get it today and enjoy your recordings anywhere, anytime.

                    e753bf7129
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Descargar tc 2012 para rfactor crack los pasos a seguir para descargar instalar y jugar este mod de rfactor.md b/spaces/tialenAdioni/chat-gpt-api/logs/Descargar tc 2012 para rfactor crack los pasos a seguir para descargar instalar y jugar este mod de rfactor.md deleted file mode 100644 index 55f68d0bdfbed7e0d2fdd78bc0dd24411705b38e..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Descargar tc 2012 para rfactor crack los pasos a seguir para descargar instalar y jugar este mod de rfactor.md +++ /dev/null @@ -1,58 +0,0 @@ - -

                    Nuestro objetivo es brindarte un servicio integral para que puedas disfrutar al máximo de los Simuladores de Vehiculos de Competición. Te ofrecemos mods de alta calidad, te informamos sobre las últimas novedades, te acompañamos en tu aprendizaje y te asesoramos en tus dudas. Por favor, respeta nuestro trabajo y no uses ni edites ninguno de los archivos dentro de la Mod sin el permiso previo de Turb y equipo GSMF. Si quieres descargar el Mod Turismo Nacional Clase 3 2014, haz clic aquí: Turismo Nacional Classe 2 2012 para rfactor.

                    - -

                    Los mods que te ofrecemos son el resultado de un trabajo minucioso y profesional, basado en la investigación y el respeto por la realidad de los vehículos y las competiciones. Cada mod tiene sus propias características y desafíos, que te harán sentir la emoción de conducir al límite. Podrás elegir entre diferentes categorías, como Turismo Nacional, TC2000, Super TC2000, Top Race, Formula Renault y muchas más.

                    -

                    descargar tc 2012 para rfactor crack


                    Download File ❤❤❤ https://urlcod.com/2uK13Y



                    - -

                    Además de los mods, también te brindamos información actualizada sobre los simuladores de vehículos de competición, tanto los que ya existen como los que están por venir. Te contamos las últimas noticias, los mejores consejos, las mejores configuraciones y los mejores trucos para que puedas sacarle el máximo partido a tu simulador favorito. También te invitamos a participar en nuestros foros y redes sociales, donde podrás compartir tus experiencias, opiniones y sugerencias con otros aficionados.

                    - -

                    Por último, pero no menos importante, te acompañamos en tu proceso de aprendizaje y mejora como piloto virtual. Te ofrecemos tutoriales, guías, vídeos y cursos para que puedas aprender desde lo más básico hasta lo más avanzado. También te damos la oportunidad de competir con otros pilotos en nuestros campeonatos online, donde podrás demostrar tu habilidad y divertirte al mismo tiempo. Y si tienes alguna duda o problema, siempre puedes contactar con Turb y equipo GSMF, que estarán encantados de ayudarte.

                    -

                    descargar tc 2012 para rfactor full
                    -descargar tc 2012 para rfactor gratis
                    -descargar tc 2012 para rfactor mega
                    -descargar tc 2012 para rfactor sin virus
                    -descargar tc 2012 para rfactor 1 link
                    -descargar tc 2012 para rfactor mediafire
                    -descargar tc 2012 para rfactor online
                    -descargar tc 2012 para rfactor parche
                    -descargar tc 2012 para rfactor serial
                    -descargar tc 2012 para rfactor mod
                    -descargar tc 2012 para rfactor juego completo
                    -descargar tc 2012 para rfactor actualizado
                    -descargar tc 2012 para rfactor windows 10
                    -descargar tc 2012 para rfactor pc
                    -descargar tc 2012 para rfactor español
                    -descargar tc 2012 para rfactor ultima version
                    -descargar tc 2012 para rfactor facil y rapido
                    -descargar tc 2012 para rfactor portable
                    -descargar tc 2012 para rfactor iso
                    -descargar tc 2012 para rfactor crackeado
                    -descargar tc 2012 para rfactor original
                    -descargar tc 2012 para rfactor steam
                    -descargar tc 2012 para rfactor torrent
                    -descargar tc 2012 para rfactor no cd
                    -descargar tc 2012 para rfactor setup
                    -descargar tc 2012 para rfactor softonic
                    -descargar tc 2012 para rfactor youtube
                    -descargar tc 2012 para rfactor tutorial
                    -descargar tc 2012 para rfactor requisitos
                    -descargar tc 2012 para rfactor trucos
                    -descargar tc 2012 para rfactor gameplay
                    -descargar tc 2012 para rfactor hd
                    -descargar tc 2012 para rfactor mods adicionales
                    -descargar tc 2012 para rfactor skins personalizados
                    -descargar tc 2012 para rfactor pistas nuevas
                    -descargar tc 2012 para rfactor autos nuevos
                    -descargar tc 2012 para rfactor campeonato online
                    -descargar tc 2012 para rfactor liga argentina
                    -descargar tc 2012 para rfactor simulador de carreras
                    -descargar tc 2012 para rfactor edicion especial
                    -descargar tc 2012 para rfactor version completa
                    -descargar tc 2012 para rfactor con licencia
                    -descargar tc 2012 para rfactor con crack incluido
                    -descargar tc 2012 para rfactor con sonido mejorado
                    -descargar tc 2012 para rfator con graficos mejorados
                    -descargar tc 2012 para rfacor con volante y pedales
                    -descarar tcc12 par rfacor con multijugador local

                    e753bf7129
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Huawei Y210 2010 Custom Firmware Download Everything You Need to Customize Your Phone.md b/spaces/tialenAdioni/chat-gpt-api/logs/Huawei Y210 2010 Custom Firmware Download Everything You Need to Customize Your Phone.md deleted file mode 100644 index 53429a1154dbfad8f07572c3ce6a4592c1a98f98..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Huawei Y210 2010 Custom Firmware Download Everything You Need to Customize Your Phone.md +++ /dev/null @@ -1,135 +0,0 @@ -
                    -

                    Huawei Y210 2010 Custom Firmware Download: How to Upgrade Your Phone with Custom ROMs

                    - -

                    If you have a Huawei Y210 2010 smartphone and you want to improve its performance, features and appearance, you may want to try downloading and installing a custom firmware on it. A custom firmware, also known as a custom ROM, is a modified version of the original operating system that comes with your phone. It can offer you many benefits, such as faster speed, better battery life, more customization options, latest Android updates and more.

                    - -

                    However, before you download and install a custom firmware on your Huawei Y210 2010, you need to know some important things, such as what are the available custom ROMs for your phone, how to prepare your phone for the installation process, how to flash the custom ROM on your phone and what are the risks and advantages of doing so. In this article, we will answer these questions and guide you through the steps of downloading and installing a custom firmware on your Huawei Y210 2010.

                    -

                    huawei y210 2010 custom firmware download


                    DOWNLOADhttps://urlcod.com/2uK8eC



                    - -

                    What are the available custom ROMs for Huawei Y210 2010?

                    - -

                    There are many custom ROMs available for Huawei Y210 2010 on the internet, but not all of them are compatible or stable with your phone. You need to choose a custom ROM that is specifically designed for your phone model and has good reviews and feedback from other users. Some of the most popular and reliable custom ROMs for Huawei Y210 2010 are:

                    - -
                      -
                    • Lineage OS: This is one of the most famous and widely used custom ROMs in the Android community. It is based on the latest Android versions and offers a smooth, stable and secure experience. It also has many features and customization options that allow you to tweak your phone according to your preferences.
                    • -
                    • Pixel Experience: This is a custom ROM that aims to provide you with the look and feel of a Google Pixel device. It is based on Android 10 and has a minimalistic and clean interface. It also includes some of the Pixel features, such as Google Camera, Google Assistant, Pixel Launcher and more.
                    • -
                    • Havoc OS: This is a custom ROM that focuses on providing you with a unique and innovative user interface. It is based on Android 10 and has a lot of customization options that let you change the appearance and behavior of your phone. It also has some cool features, such as ambient display, gesture navigation, gaming mode and more.
                    • -
                    - -

                    These are just some of the examples of custom ROMs for Huawei Y210 2010. You can find more custom ROMs on websites like XDA Developers or TweakDroid. However, before you download any custom ROM, make sure to check its compatibility, stability, features and user reviews.

                    -

                    how to install custom rom on huawei y210 2010
                    -huawei y210 2010 custom firmware update guide
                    -best custom roms for huawei y210 2010
                    -huawei y210 2010 custom firmware download link
                    -huawei y210 2010 custom firmware features and benefits
                    -huawei y210 2010 custom firmware compatibility and requirements
                    -huawei y210 2010 custom firmware backup and restore
                    -huawei y210 2010 custom firmware troubleshooting and support
                    -huawei y210 2010 custom firmware reviews and ratings
                    -huawei y210 2010 custom firmware comparison and alternatives
                    -huawei y210 2010 custom firmware free download and installation
                    -huawei y210 2010 custom firmware latest version and changelog
                    -huawei y210 2010 custom firmware security and privacy
                    -huawei y210 2010 custom firmware performance and battery life
                    -huawei y210 2010 custom firmware root and unroot
                    -huawei y210 2010 custom firmware tips and tricks
                    -huawei y210 2010 custom firmware pros and cons
                    -huawei y210 2010 custom firmware screenshots and videos
                    -huawei y210 2010 custom firmware forum and community
                    -huawei y210 2010 custom firmware developer and source code
                    -how to flash custom firmware on huawei y210 2010
                    -where to download custom firmware for huawei y210 2010
                    -why use custom firmware on huawei y210 2010
                    -what is the best custom firmware for huawei y210 2010
                    -when to update custom firmware on huawei y210 2010
                    -how to uninstall custom firmware on huawei y210 2010
                    -where to find custom firmware for huawei y210 2010
                    -how to backup custom firmware on huawei y210 2010
                    -how to restore custom firmware on huawei y210 2010
                    -how to fix custom firmware issues on huawei y210 2010
                    -how to customize custom firmware on huawei y210 2010
                    -how to optimize custom firmware on huawei y210 2010
                    -how to improve custom firmware on huawei y210 2010
                    -how to test custom firmware on huawei y210 2010
                    -how to verify custom firmware on huawei y210 2010
                    -how to secure custom firmware on huawei y210 2010
                    -how to enhance custom firmware on huawei y210 2010
                    -how to upgrade custom firmware on huawei y210 2010
                    -how to downgrade custom firmware on huawei y210 2010
                    -how to switch custom firmware on huawei y210 2010
                    -how to enable custom firmware on huawei y210 2010
                    -how to disable custom firmware on huawei y210 2010
                    -how to reset custom firmware on huawei y210 2010
                    -how to clean custom firmware on huawei y210 2010
                    -how to speed up custom firmware on huawei y210 2010
                    -how to save battery with custom firmware on huawei y210 2010
                    -how to increase memory with custom firmware on huawei y210 2010
                    -how to boost performance with custom firmware on huawei y210 2010

                    - -

                    How to prepare your phone for the installation process?

                    - -

                    Before you download and install a custom firmware on your Huawei Y210 2010, you need to prepare your phone for the installation process. This involves some steps that are necessary to avoid any problems or errors during or after the installation. These steps are:

                    - -
                      -
                    1. Backup your data: Installing a custom firmware will erase all your data on your phone, such as contacts, messages, photos, videos, apps and more. Therefore, it is very important to backup your data before you proceed with the installation. You can use various methods to backup your data, such as using a cloud service like Google Drive or Dropbox, using an external SD card or using a PC software like Huawei HiSuite.
                    2. -
                    3. Unlock your bootloader: The bootloader is a program that runs when you turn on your phone and decides which operating system to load. By default, the bootloader is locked by the manufacturer to prevent unauthorized modifications to the operating system. However, if you want to install a custom firmware on your phone, you need to unlock the bootloader first. This will allow you to flash any custom ROM on your phone. To unlock the bootloader of your Huawei Y210 2010, you need to obtain an unlock code from Huawei's official website and follow some instructions that will be sent to your email.
                    4. -
                    5. Install a custom recovery: A custom recovery is a software that allows you to perform various operations on your phone's system partition, such as flashing custom ROMs, wiping data, making backups and more. The default recovery that comes with your phone is limited and does not support flashing custom ROMs. Therefore

                      -

                      How to flash the custom ROM on your Huawei Y210 2010?

                      - -

                      After you have prepared your phone for the installation process, you can proceed to flash the custom ROM on your Huawei Y210 2010. This involves some steps that are necessary to install the custom firmware on your phone and make it work properly. These steps are:

                      - -
                        -
                      1. Download the custom ROM of your choice from the website where you found it. Make sure to download the correct version for your phone model and check the file size and checksum to verify its integrity.
                      2. -
                      3. Copy the custom ROM file to your external SD card or internal storage of your phone. Do not extract or rename the file.
                      4. -
                      5. Turn off your phone and boot it into recovery mode. To do this, press and hold the Volume Up and Power buttons simultaneously until you see the Huawei logo. Then release the Power button but keep holding the Volume Up button until you enter the recovery mode.
                      6. -
                      7. In the recovery mode, use the Volume buttons to navigate and the Power button to select. First, perform a backup of your current system by selecting "Backup" and choosing a location to save it.
                      8. -
                      9. Then, perform a wipe of your data by selecting "Wipe" and choosing "Advanced Wipe". Check the boxes for "Dalvik / ART Cache", "System", "Data" and "Cache" and swipe to confirm.
                      10. -
                      11. Next, install the custom ROM by selecting "Install" and choosing the custom ROM file that you copied earlier. Swipe to confirm and wait for the installation process to finish.
                      12. -
                      13. Optionally, you can also install any additional files that come with the custom ROM, such as Google Apps (GApps), kernel, mods or patches. Just repeat the same steps as above but choose the corresponding files instead of the custom ROM file.
                      14. -
                      15. Finally, reboot your phone by selecting "Reboot" and choosing "System". Your phone will boot into the custom ROM that you installed.
                      16. -
                      - -

                      Congratulations! You have successfully flashed a custom ROM on your Huawei Y210 2010. You can now enjoy the new features and improvements that it offers.

                      - -

                      What are the risks and advantages of installing a custom firmware on your Huawei Y210 2010?

                      - -

                      Installing a custom firmware on your Huawei Y210 2010 can be a rewarding experience, but it also comes with some risks and advantages that you need to be aware of. Here are some of them:

                      - -
                        -
                      • Risks: Installing a custom firmware will void your warranty and may cause some issues or errors on your phone, such as bootloops, crashes, bugs or compatibility problems. You may also lose some features or functions that are specific to your original operating system or manufacturer. You may also face some legal or ethical issues if you use a custom firmware that violates any terms or conditions of your original operating system or manufacturer.
                      • -
                      • Advantages: Installing a custom firmware will give you more control and customization over your phone. You can change the look and feel of your phone, add new features and options, improve the performance and battery life of your phone, update to the latest Android versions and more. You can also learn new skills and knowledge about Android development and join a community of enthusiasts who share your passion.
                      • -
                      - -

                      Therefore, you need to weigh the pros and cons of installing a custom firmware on your Huawei Y210 2010 before you decide to do so. You also need to take full responsibility for any consequences that may arise from doing so. You should always backup your data, follow the instructions carefully and do some research before you proceed with the installation.

                      -

                      How to download the custom firmware for your Huawei Y210 2010?

                      - -

                      After you have chosen the custom ROM that you want to install on your Huawei Y210 2010, you need to download the custom firmware file from the website where you found it. However, before you download the custom firmware file, you need to make sure that it is safe, reliable and compatible with your phone. Here are some tips to help you download the custom firmware file for your Huawei Y210 2010:

                      - -
                        -
                      • Check the source: Make sure that you download the custom firmware file from a reputable and trustworthy website, such as XDA Developers or TweakDroid. Avoid downloading the custom firmware file from unknown or suspicious websites, as they may contain malware or viruses that can harm your phone or steal your data.
                      • -
                      • Check the version: Make sure that you download the correct version of the custom firmware file for your phone model and Android version. For example, if you have a Huawei Y210 2010 with Android 4.1 Jelly Bean, you should not download a custom firmware file that is meant for Android 10 or another phone model. You can check the version of the custom firmware file by looking at its name, description or changelog.
                      • -
                      • Check the size and checksum: Make sure that you download the complete and intact custom firmware file for your phone. You can check the size and checksum of the custom firmware file by comparing it with the information provided by the website where you found it. The size and checksum are usually displayed in bytes or hexadecimal digits. If the size or checksum of the custom firmware file does not match with the information provided by the website, it means that the file is corrupted or tampered with and you should not download it.
                      • -
                      - -

                      By following these tips, you can ensure that you download a safe, reliable and compatible custom firmware file for your Huawei Y210 2010.

                      - -

                      How to enjoy the benefits of installing a custom firmware on your Huawei Y210 2010?

                      - -

                      Once you have successfully flashed a custom ROM on your Huawei Y210 2010, you can enjoy the benefits of installing a custom firmware on your phone. You can explore the new features and options that the custom ROM offers, such as new themes, icons, wallpapers, widgets, apps, settings and more. You can also customize your phone according to your preferences and needs, such as changing the font size, color scheme, sound profile, notification style and more. You can also improve the performance and battery life of your phone by tweaking some parameters, such as CPU frequency, screen brightness, memory usage and more.

                      - -

                      However, to enjoy the benefits of installing a custom firmware on your Huawei Y210 2010, you also need to take some precautions and responsibilities. You need to update your custom ROM regularly to get the latest security patches, bug fixes and improvements. You also need to backup your data frequently to avoid losing it in case of any problems or errors. You also need to respect the rights and credits of the developers and contributors of the custom ROM that you installed and do not redistribute or modify their work without their permission.

                      - -

                      By following these precautions and responsibilities, you can enjoy the benefits of installing a custom firmware on your Huawei Y210 2010 without any hassle or trouble.

                      -

                      Conclusion

                      - -

                      In this article, we have explained what are the available custom ROMs for Huawei Y210 2010, how to prepare your phone for the installation process, how to flash the custom ROM on your phone and what are the risks and advantages of doing so. We have also given you some tips on how to download the custom firmware file for your phone and how to enjoy the benefits of installing a custom firmware on your phone.

                      - -

                      Installing a custom firmware on your Huawei Y210 2010 can be a rewarding experience, but it also comes with some risks and responsibilities that you need to be aware of. You need to choose a custom ROM that is compatible and stable with your phone, backup your data before and after the installation, unlock your bootloader and install a custom recovery, update your custom ROM regularly and respect the rights and credits of the developers and contributors.

                      - -

                      If you follow these steps and precautions, you can upgrade your phone with custom ROMs and enjoy a new and improved Android experience. You can also learn new skills and knowledge about Android development and join a community of enthusiasts who share your passion.

                      - -

                      To download a custom firmware for your Huawei Y210 2010 and start upgrading your phone today, click on the link below:

                      - -Huawei Y210 2010 Custom Firmware Download

                      679dcb208e
                      -
                      -
                      \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Dota 1 for Android The Original Defense of the Ancients Game.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Dota 1 for Android The Original Defense of the Ancients Game.md deleted file mode 100644 index 255e7175bb8d916d2679546d3e936854c740b447..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Dota 1 for Android The Original Defense of the Ancients Game.md +++ /dev/null @@ -1,148 +0,0 @@ - -

                      How to Download and Play Dota 1 on Android Devices

                      -

                      Dota 1, also known as Defense of the Ancients, is a classic multiplayer online battle arena (MOBA) mod for the video game Warcraft III: Reign of Chaos and its expansion, The Frozen Throne. The objective of the game is for each team to destroy their opponents' Ancient, a heavily guarded structure at the opposing corner of the map, while defending their own. Players use powerful units known as heroes, and are assisted by allied teammates and AI-controlled fighters.

                      -

                      Despite being released in 2003, Dota 1 is still one of the most popular mods of any game, with millions of players and fans around the world. It is considered a catalyst for the MOBA genre, inspiring developers to create other games similar to it, such as League of Legends, Heroes of Newerth, and Dota 2. Dota 1 has also been featured in several esports tournaments, such as Blizzard Entertainment's BlizzCon and the Asian World Cyber Games.

                      -

                      dota 1 download apk


                      Download Zip 🗸🗸🗸 https://bltlly.com/2uOroH



                      -

                      One of the reasons why Dota 1 is still relevant today is that it can be played on various platforms, including Android devices. Playing Dota 1 on Android devices has many benefits, such as portability, convenience, accessibility, and compatibility. You can enjoy the game anytime and anywhere, without having to worry about a PC or an internet connection. You can also play with your friends or other players online, using Bluetooth or Wi-Fi.

                      -

                      If you are interested in playing Dota 1 on your Android device, you will need to download and install an apk file that contains the game. An apk file is a package file format that contains all the necessary files and data for an Android application. In this article, we will show you how to download and install Dota 1 apk on your Android device, as well as some features, comparisons, and system requirements of the game.

                      -

                      Features of Dota 1

                      -

                      Dota 1 has many features that make it a unique and enjoyable game to play. Here are some of them:

                      -

                      Gameplay and mechanics

                      -

                      Dota 1 is a team-based game that requires coordination, strategy, and skill. Each team consists of up to five players who control one hero each. There are over a hundred heroes to choose from, each with different abilities and roles. Heroes can level up their skills, buy items, and gain gold by killing enemies or creeps. The game has a day-night cycle that affects visibility and some abilities. The game also has various neutral creeps that can be killed for extra gold or experience, or controlled by some heroes.

                      -

                      Heroes and items

                      -

                      Dota 1 has a diverse and balanced roster of heroes that cater to different playstyles and preferences. Heroes are divided into three main attributes: strength, agility, and intelligence. Strength heroes are usually tanky and durable, agility heroes are usually fast and nimble, and intelligence heroes are usually smart and versatile. Heroes can also be classified into different roles, such as carry, support, ganker, initiator, pusher, or nuker.

                      -

                      Dota 1 also has a wide variety of items that can enhance or modify the abilities of heroes. Items can be bought from shops located in each base or in secret shops hidden in the map. Items can be combined to form more powerful items that have unique effects or abilities. Some items are consumable, such as potions or wards, while others are permanent, such as boots or swords.

                      -

                      Modes and maps

                      -

                      Dota 1 has several modes that can change the rules or conditions of the game. Some modes are standard, such as All Pick, Random Draft, or Single Draft, while others are custom, such as Reverse Captain's Mode, Death Match, or All Random All Mid. Modes can be chosen by the host of the game or by voting before the game starts.

                      -

                      Dota 1 has one main map that is used for most games, called the DotA Allstars map. It is a symmetrical map that has two bases, three lanes, and a river that divides the map. The map also has several neutral camps, runes, and secret areas that can provide advantages or disadvantages to the teams. The map is constantly updated and improved by the developers to keep the game fresh and balanced.

                      -

                      dota 1 apk free download for android
                      -dota 1 offline apk download
                      -dota 1 mobile apk download
                      -dota 1 latest version apk download
                      -dota 1 mod apk download
                      -dota 1 game download apk
                      -dota 1 online apk download
                      -dota 1 full apk download
                      -dota 1 android game apk download
                      -dota 1 warcraft 3 apk download
                      -dota 1 apk download for pc
                      -dota 1 apk download no emulator
                      -dota 1 apk download with bots
                      -dota 1 apk download english version
                      -dota 1 apk download without internet
                      -dota 1 classic apk download
                      -dota 1 original apk download
                      -dota 1 hd apk download
                      -dota 1 lite apk download
                      -dota 1 pro apk download
                      -dota 1 defense of the ancients apk download
                      -dota 1 frozen throne apk download
                      -dota 1 reign of chaos apk download
                      -dota 1 allstars apk download
                      -dota 1 legends of the arena apk download
                      -dota 1 heroes of the storm apk download
                      -dota 1 reforged apk download
                      -dota 1 immortal apk download
                      -dota 1 resurrection apk download
                      -dota 1 reborn apk download
                      -how to download and install dota 1 on android apk
                      -how to play dota 1 on android without pc apk download
                      -how to update dota 1 on android apk download
                      -how to get free skins in dota 1 on android apk download
                      -how to fix lag in dota 1 on android apk download
                      -how to connect to server in dota 1 on android apk download
                      -how to create account in dota 1 on android apk download
                      -how to change language in dota 1 on android apk download
                      -how to enable cheats in dota 1 on android apk download
                      -how to customize controls in dota 1 on android apk download
                      -best settings for dota 1 on android apk download
                      -best heroes for dota 1 on android apk download
                      -best tips and tricks for dota 1 on android apk download
                      -best guide and tutorial for dota 1 on android apk download
                      -best mods and hacks for dota 1 on android apk download
                      -best maps and modes for dota 1 on android apk download
                      -best graphics and sound for dota 1 on android apk download
                      -best reviews and ratings for dota 1 on android apk download
                      -best alternatives and similar games to dota 1 on android apk download

                      -

                      Graphics and sound

                      -

                      Dota 1 has simple but charming graphics that are based on the Warcraft III engine. The game has a colorful and cartoonish style that suits the fantasy theme of the game. The game also has various visual effects that add to the immersion and excitement of the game, such as spells, explosions, blood, and weather.

                      -

                      Dota 1 also has a rich and immersive sound design that enhances the gameplay and atmosphere of the game. The game has a variety of sound effects that correspond to the actions and events in the game, such as attacks, spells, deaths, and announcer voices. The game also has a dynamic and epic soundtrack that changes depending on the situation and mood of the game. The game also has voice acting for each hero that adds personality and humor to the game.

                      -

                      Comparison of Dota 1 and Dota 2

                      -

                      Dota 1 and Dota 2 are both popular and influential games in the MOBA genre, but they have some similarities and differences that make them distinct from each other. Here are some of them:

                      -

                      Similarities and differences

                      -

                      Dota 1 and Dota 2 share many similarities in terms of gameplay, mechanics, heroes, items, modes, and maps. They both have the same objective, rules, and format of the game. They both have over a hundred heroes to choose from, each with their own abilities and roles. They both have a wide variety of items that can enhance or modify the abilities of heroes. They both have several modes that can change the rules or conditions of the game. They both have one main map that is used for most games.

                      -

                      However, Dota 1 and Dota 2 also have some differences in terms of graphics, sound, interface, features, updates, and community. Dota 2 has more advanced and realistic graphics than Dota 1, as it uses a newer and more powerful engine. Dota 2 also has more diverse and detailed sound design than Dota 1, as it has more sound effects, music tracks, voice lines, and languages. Dota 2 also has a more user-friendly and customizable interface than Dota 1, as it has more options, settings, menus, and hotkeys. Dota 2 also has more features than Dota 1, such as ranked matchmaking, custom games, cosmetics, replays, spectating, and coaching. Dota 2 also has more frequent and consistent updates than Dota 1, as it is developed and supported by a professional company, Valve Corporation. Dota 2 also has a larger and more active community than Dota 1, as it has more players, fans, streamers, tournaments, and prize pools.

                      -

                      Pros and cons of each game

                      -

                      Dota 1 and Dota 2 both have their own pros and cons that make them appealing or unappealing to different players. Here are some of them:

                      -
                  textemotion
                  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                  Dota 1Dota 2
                  Pros:Pros:
                  - Nostalgic and classic feel- Modern and realistic look
                  - Simple and straightforward gameplay- Complex and diverse gameplay
                  - Low system requirements- High system performance
                  - Compatible with various platforms- Exclusive to Steam platform
                  Cons:Cons:
                  - Outdated and cartoonish graphics- Demanding and heavy graphics
                  - Limited and repetitive sound design- Overwhelming and noisy sound design
                  - Clunky and rigid interface- Smooth and flexible interface
                  - Fewer features and options- More features and options
                  - Less frequent and consistent updates- More frequent and consistent updates
                  - Smaller and less active community- Larger and more active community

                  Personal preference and nostalgia factor

                  Ultimately, the choice between Dota 1 and Dota 2 depends on the personal preference and nostalgia factor of each player. Some players may prefer Dota 1 because they grew up with it, or because they enjoy its simplicity and charm. Some players may prefer Dota 2 because they want to experience the latest and greatest in the genre, or because they appreciate its complexity and variety. Some players may like both games equally, or switch between them depending on their mood or situation.

                  There is no right or wrong answer when it comes to choosing between Dota 1 and Dota 2. Both games have their own merits and flaws, and both games have their own loyal fanbases and critics. The important thing is to have fun and respect each other's opinions.

                  -

                  System requirements for Dota 1

                  How to download and install Dota 1 apk on Android devices

                  -

                  Now that you know the features, comparisons, and system requirements of Dota 1, you may be wondering how to download and install the game on your Android device. The process is simple and easy, as long as you follow these steps:

                  -
                    -
                  1. Download the Dota 1 apk file from a trusted and secure source. You can use the link provided by the website of Dota 1 apk, or search for other sources online. Make sure that the file is compatible with your device and has no viruses or malware.
                  2. -
                  3. Enable the installation of apps from unknown sources on your device. To do this, go to your device settings, then security, then unknown sources. Check the box or toggle the switch to allow the installation of apps from sources other than the Google Play Store.
                  4. -
                  5. Locate the downloaded Dota 1 apk file on your device storage, using a file manager or browser. Tap on the file to start the installation process. You may need to grant some permissions or accept some terms and conditions before proceeding.
                  6. -
                  7. Wait for the installation to finish, then launch the game from your app drawer or home screen. You may need to adjust some settings or preferences before playing the game, such as language, graphics, sound, or controls.
                  8. -
                  9. Enjoy playing Dota 1 on your Android device!
                  10. -
                  -

                  Conclusion

                  -

                  Dota 1 is a classic and legendary game that has shaped and influenced the MOBA genre for decades. It is a game that can be enjoyed by anyone, regardless of their age, background, or skill level. It is a game that can be played on various platforms, including Android devices.

                  -

                  If you are a fan of Dota 1, or if you are curious about the game, you can download and install Dota 1 apk on your Android device easily and quickly. You can experience the game's features, compare it with Dota 2, and check if your system meets the requirements for the game. You can also play with your friends or other players online, using Bluetooth or Wi-Fi.

                  -

                  Dota 1 is a game that will never die, as long as there are players who love and support it. It is a game that will always have a place in the hearts and minds of gamers around the world. It is a game that deserves to be played and appreciated by everyone.

                  -

                  So what are you waiting for? Download Dota 1 apk now and join the millions of players who are still playing this amazing game!

                  -

                  FAQs

                  -

                  Here are some frequently asked questions about Dota 1 and Dota 1 apk:

                  -

                  Is Dota 1 free to play?

                  -

                  Yes, Dota 1 is free to play for anyone who owns Warcraft III: Reign of Chaos and its expansion, The Frozen Throne. You can download the latest version of Dota 1 from the official website of DotA Allstars, or from other sources online.

                  -

                  Is Dota 1 legal to play?

                  -

                  Yes, Dota 1 is legal to play, as long as you have a legitimate copy of Warcraft III: Reign of Chaos and its expansion, The Frozen Throne. However, some countries or regions may have different laws or regulations regarding online gaming or modding, so you should check with your local authorities before playing.

                  -

                  Is Dota 1 safe to play?

                  -

                  Yes, Dota 1 is safe to play, as long as you download it from a trusted and secure source. You should also scan your system for viruses or malware before and after playing. You should also be careful when playing online, as some players may try to cheat, hack, or scam you.

                  -

                  Is Dota 1 better than Dota 2?

                  -

                  This is a subjective question that depends on your personal preference and nostalgia factor. Some players may prefer Dota 1 because they grew up with it, or because they enjoy its simplicity and charm. Some players may prefer Dota 2 because they want to experience the latest and greatest in the genre, or because they appreciate its complexity and variety. Some players may like both games equally, or switch between them depending on their mood or situation.

                  -

                  How can I play Dota 1 online?

                  -

                  You can play Dota 1 online with your friends or other players using various methods. One method is to use a LAN network, such as Bluetooth or Wi-Fi. Another method is to use an online platform, such as Battle.net, Garena, RGC, or Steam. You may need to create an account or register before playing online.

                  197e85843d
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Adobe Acrobat Dc Pro Serial Number List.md b/spaces/tioseFevbu/cartoon-converter/scripts/Adobe Acrobat Dc Pro Serial Number List.md deleted file mode 100644 index 562effdfac7bd072b10d683b895a5f25f9b57caf..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Adobe Acrobat Dc Pro Serial Number List.md +++ /dev/null @@ -1,21 +0,0 @@ -
                  -

                  How to Find Your Adobe Acrobat Dc Pro Serial Number

                  -

                  If you have purchased Adobe Acrobat Dc Pro, you will need a serial number to activate, reinstall, or upgrade your product. A serial number is a 24-digit numeric code that is unique to your product. But how can you find your serial number if you have lost it or don't remember it? Here are some ways to find your Adobe Acrobat Dc Pro serial number.

                  -

                  Adobe Acrobat Dc Pro Serial Number List


                  DOWNLOADhttps://urlcod.com/2uHxpX



                  -

                  Method 1: Check Your Adobe Account

                  -

                  One of the easiest ways to find your serial number is by logging into your Adobe account on https://account.adobe.com/products. Here you can see all your registered products and their serial numbers next to each other on this screen[^1^]. If you purchased Adobe Acrobat Dc Pro from Adobe.com or registered your product online, you should be able to find your serial number here.

                  -

                  Method 2: Check Your Email or Product Box

                  -

                  If you purchased Adobe Acrobat Dc Pro from a store or online retailer, you may have received a redemption code or a serial number via email or on an insert card inside the product box. A redemption code is a 24-digit alphanumeric code that you can use to obtain your serial number online. To redeem your code, go to https://redeem.licenses.adobe.com/getserial and follow the instructions[^1^]. If you received a serial number directly, you can use it to activate your product without redeeming a code.

                  -

                  Method 3: Check Your Prepaid Card

                  -

                  If you purchased Adobe Acrobat Dc Pro using a prepaid card, you can find your redemption code beneath the scratch-off foil on the back of the card. You can use this code to obtain your serial number and download your product online. To redeem your code, go to https://redeem.licenses.adobe.com/getserial and follow the instructions[^1^].

                  -

                  Method 4: Check Your Volume License

                  -

                  If you have a volume license for Adobe Acrobat Dc Pro, you can find your serial number on the Adobe Licensing Website. To access this website, go to https://licensing.adobe.com and sign in with your Adobe ID and password. Here you can see all your volume license products and their serial numbers[^1^]. You can also register your product here if you haven't done so already.

                  -

                  -

                  Troubleshooting Tips

                  -

                  If none of the above methods work for you, or if you have an invalid or revoked serial number, here are some tips to troubleshoot your issue:

                  -
                    -
                  • If a Creative Cloud app asks for your serial number, it means that you have installed the subscription version of Acrobat Dc instead of the standalone version. Creative Cloud apps don't require serial numbers, so you need to uninstall the subscription version and install the standalone version from https://helpx.adobe.com/download-install/kb/downloaded-older-app.html [^2^].
                  • -
                  • If you have a Student or Teacher Edition of Adobe Acrobat Dc Pro, you may have received a redemption code instead of a serial number. You need to redeem this code online to obtain your serial number. For detailed instructions, see https://helpx.adobe.com/download-install/kb/student-teacher-edition-redemption-code.html [^1^].
                  • -
                  • If you have purchased Adobe Acrobat Dc Pro from Amazon, you may have received a download link instead of a redemption code or a serial number. You need to download the product from this link and then activate it with your Amazon account. For detailed instructions, see https://helpx.adobe

                    7196e7f11a
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Alfasoft English For Everyone (PC DVD) Suomeksi Download Pc.md b/spaces/tioseFevbu/cartoon-converter/scripts/Alfasoft English For Everyone (PC DVD) Suomeksi Download Pc.md deleted file mode 100644 index 1f5e205d320e800c2dd71c1e92f082730b7488ef..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Alfasoft English For Everyone (PC DVD) Suomeksi Download Pc.md +++ /dev/null @@ -1,14 +0,0 @@ - -

                    Learn English with Alfasoft English For Everyone (PC DVD) Suomeksi

                    -

                    Do you want to learn English in a fun and easy way? Do you want to improve your vocabulary, grammar, and pronunciation? Do you want to access high-quality audio and interactive exercises online and offline? If you answered yes to any of these questions, then Alfasoft English For Everyone (PC DVD) Suomeksi is the perfect choice for you!

                    -

                    Alfasoft English For Everyone (PC DVD) Suomeksi Download Pc


                    Download Ziphttps://urlcod.com/2uHv7U



                    -

                    Alfasoft English For Everyone (PC DVD) Suomeksi is a comprehensive and innovative course that teaches you English from beginner to advanced level. It is based on the popular DK English For Everyone series, which uses graphics and pictures instead of wordy explanations to make learning English simple and memorable. You will also get access to almost 30 hours of free audio from native English speakers, as well as a range of practice books and a mobile app to continue your learning on the go.

                    -

                    Alfasoft English For Everyone (PC DVD) Suomeksi is specially designed for Finnish speakers who want to learn English. It includes translations of key words and phrases, as well as cultural notes and tips to help you avoid common mistakes. You will also learn about the differences between British and American English, as well as other varieties of English spoken around the world.

                    -

                    Whether you want to learn English for travel, work, study, or personal interest, Alfasoft English For Everyone (PC DVD) Suomeksi will help you achieve your goals. Order your copy today and start your English learning journey now!

                    - -

                    Another benefit of learning English with Alfasoft English For Everyone (PC DVD) Suomeksi is that you can explore the world with confidence. English is spoken in many countries as an official or second language, and it is also widely used as a common language among travellers and tourists. By learning English, you can communicate with people from different cultures and backgrounds, and discover new places and experiences. You can also enjoy the rich and diverse literature, music, art, and cinema that the English-speaking world has to offer.

                    -

                    Moreover, learning English with Alfasoft English For Everyone (PC DVD) Suomeksi can help you access world-class education systems and establishments. Many of the top universities in the world are located in English-speaking countries, such as the United States, the United Kingdom, Canada, and Australia. These universities offer a variety of courses and degrees in different fields and disciplines, and they attract students from all over the world. By learning English, you can improve your chances of getting admitted to these prestigious institutions, and pursue your academic goals.

                    -

                    Finally, learning English with Alfasoft English For Everyone (PC DVD) Suomeksi can increase your cognitive ability and improve your communication skills. Studies have shown that learning a second language can enhance your memory, creativity, problem-solving, and critical thinking skills. It can also delay the onset of age-related cognitive decline and dementia. Furthermore, learning English can help you express yourself clearly and effectively in various situations and contexts. You can also develop your intercultural competence and empathy by learning about different perspectives and values.

                    -

                    cec2833e83
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Dogma 3 Full Movie In English Free Download !!EXCLUSIVE!!.md b/spaces/tioseFevbu/cartoon-converter/scripts/Dogma 3 Full Movie In English Free Download !!EXCLUSIVE!!.md deleted file mode 100644 index e8c404891cf87da1ce2a51f62c058e96a0d6ffdb..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Dogma 3 Full Movie In English Free Download !!EXCLUSIVE!!.md +++ /dev/null @@ -1,14 +0,0 @@ - -

                    Dogma 3: The Return of the Metatron - How to Watch Online for Free

                    -

                    Dogma 3: The Return of the Metatron is the long-awaited sequel to the cult classic comedy Dogma (1999), directed by Kevin Smith and starring Ben Affleck, Matt Damon, Linda Fiorentino, Alan Rickman, Chris Rock, and many more. The film follows the adventures of two fallen angels, Bartleby and Loki, who are resurrected by a mysterious force and seek to undo the work of God by triggering the apocalypse. Along the way, they encounter a motley crew of characters, including a descendant of Jesus Christ, a renegade angel, a demon-hunting nun, and Jay and Silent Bob.

                    -

                    Dogma 3 full movie in english free download


                    Download File ⚙⚙⚙ https://urlcod.com/2uHxQ8



                    -

                    If you are a fan of Dogma and want to watch Dogma 3 online for free, you might be wondering where to find it. Unfortunately, Dogma 3 is not available on any official streaming platform or website. The film was made without the permission or involvement of Kevin Smith, who owns the rights to Dogma and its characters. Smith has denounced Dogma 3 as a "bootleg" and a "rip-off" of his original work. He has also warned fans not to watch it or download it, as it might contain viruses or malware.

                    -

                    However, if you are still curious about Dogma 3 and want to take the risk, there are some unofficial sources that claim to offer the full movie in English for free download. One of them is YouTube[^2^], where a user named Dash has uploaded a video titled "[4K] Dogma (1999) full movie with subtitles". The video is actually Dogma 3 with a misleading title and thumbnail. It has poor video quality and hardcoded subtitles in an unknown language. Another source is Fossbytes[^1^], a website that lists 12 free movie sites that don't ask for sign up. One of them is Popcornflix[^1^], which allegedly has Dogma 3 in its library. However, Popcornflix is not available in all regions and might require a VPN to access it.

                    -

                    Therefore, we do not recommend watching or downloading Dogma 3 from any of these sources. Not only are they illegal and unethical, but they might also harm your device or compromise your privacy. The best way to enjoy Dogma is to watch the original film on a legitimate platform like Amazon Prime Video or iTunes. You can also check out Kevin Smith's other movies and podcasts for more laughs and insights.

                    - -

                    Dogma 3: The Return of the Metatron is not the only unauthorized sequel to Dogma that has been made. In 2017, a fan-made film called Dogma 2: The Ascension was released on YouTube. The film was written and directed by Max Landis, the son of filmmaker John Landis. It featured a new cast of actors playing the roles of Bartleby, Loki, Bethany, Rufus, Serendipity, and Metatron. The plot involved Bartleby and Loki being resurrected again by God to stop a rogue angel named Gabriel from destroying the world. The film received mixed reviews from fans and critics, and was also disowned by Kevin Smith.

                    -

                    -

                    Smith has expressed his interest in making a official sequel to Dogma in the past, but he has faced several obstacles. One of them is the ownership of the film rights, which belong to Harvey Weinstein and Miramax. Smith has said that he would not work with Weinstein again after the sexual abuse allegations against him. Another obstacle is the budget and distribution of the film, which would require a studio backing or a crowdfunding campaign. Smith has also said that he would need to get the original cast back together, which might be difficult due to their busy schedules and personal issues.

                    -

                    Therefore, it is unlikely that we will see a real Dogma 2 or Dogma 3 anytime soon. However, Smith has not given up on the idea completely. He has said that he might revisit the Dogma universe in other forms of media, such as comics, podcasts, or animated shows. He has also hinted that some of his upcoming projects might have connections to Dogma and its characters. For example, his horror comedy Killroy Was Here (2023) features a cameo by Jay and Silent Bob. He is also working on Clerks III (2024), which might have some references to Dogma as well.

                    e93f5a0c3f
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/data/samplers/iteration_based_batch_sampler.py b/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/data/samplers/iteration_based_batch_sampler.py deleted file mode 100644 index 93452b64696dc9b2cd2a347b8051729864bf9510..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/data/samplers/iteration_based_batch_sampler.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -from torch.utils.data.sampler import BatchSampler - - -class IterationBasedBatchSampler(BatchSampler): - """ - Wraps a BatchSampler, resampling from it until - a specified number of iterations have been sampled - """ - - def __init__(self, batch_sampler, num_iterations, start_iter=0): - self.batch_sampler = batch_sampler - self.num_iterations = num_iterations - self.start_iter = start_iter - - def __iter__(self): - iteration = self.start_iter - while iteration <= self.num_iterations: - # if the underlying sampler has a set_epoch method, like - # DistributedSampler, used for making each process see - # a different split of the dataset, then set it - if hasattr(self.batch_sampler.sampler, "set_epoch"): - self.batch_sampler.sampler.set_epoch(iteration) - for batch in self.batch_sampler: - iteration += 1 - if iteration > self.num_iterations: - break - yield batch - - def __len__(self): - return self.num_iterations diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py deleted file mode 100644 index 6277a97fe4874abfe9e3e6434d6012c5f41f8418..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py +++ /dev/null @@ -1,23 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - pretrained=None, - backbone=dict( - frozen_stages=-1, zero_init_residual=False, norm_cfg=norm_cfg), - neck=dict(norm_cfg=norm_cfg), - roi_head=dict( - bbox_head=dict( - type='Shared4Conv1FCBBoxHead', - conv_out_channels=256, - norm_cfg=norm_cfg), - mask_head=dict(norm_cfg=norm_cfg))) -# optimizer -optimizer = dict(paramwise_cfg=dict(norm_decay_mult=0)) -optimizer_config = dict(_delete_=True, grad_clip=None) -# learning policy -lr_config = dict(warmup_ratio=0.1, step=[65, 71]) -runner = dict(type='EpochBasedRunner', max_epochs=73) diff --git a/spaces/tryolabs/norfair-demo/custom_models/yolo.py b/spaces/tryolabs/norfair-demo/custom_models/yolo.py deleted file mode 100644 index 67a03de81fb8ce9d099f588172608ccde573874d..0000000000000000000000000000000000000000 --- a/spaces/tryolabs/norfair-demo/custom_models/yolo.py +++ /dev/null @@ -1,74 +0,0 @@ -import os -from typing import List, Optional, Union - -import numpy as np -import torch -from norfair import Detection - - -class YOLO: - def __init__(self, model_path: str, device: Optional[str] = None): - if device is not None and "cuda" in device and not torch.cuda.is_available(): - raise Exception("Selected device='cuda', but cuda is not available to Pytorch.") - # automatically set device if its None - elif device is None: - device = "cuda:0" if torch.cuda.is_available() else "cpu" - - if not os.path.exists(model_path): - os.system( - f"wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/{os.path.basename(model_path)} -O {model_path}" - ) - - # load model - try: - self.model = torch.hub.load("WongKinYiu/yolov7", "custom", model_path) - except: - raise Exception("Failed to load model from {}".format(model_path)) - - def __call__( - self, - img: Union[str, np.ndarray], - conf_threshold: float = 0.25, - iou_threshold: float = 0.45, - image_size: int = 720, - classes: Optional[List[int]] = None, - ) -> torch.tensor: - - self.model.conf = conf_threshold - self.model.iou = iou_threshold - if classes is not None: - self.model.classes = classes - detections = self.model(img, size=image_size) - return detections - - -def yolo_detections_to_norfair_detections( - yolo_detections: torch.tensor, track_points: str = "centroid" # bbox or centroid -) -> List[Detection]: - """convert detections_as_xywh to norfair detections""" - norfair_detections: List[Detection] = [] - - if track_points == "centroid": - detections_as_xywh = yolo_detections.xywh[0] - for detection_as_xywh in detections_as_xywh: - centroid = np.array( - [ - [detection_as_xywh[0].item(), detection_as_xywh[1].item()], - [detection_as_xywh[0].item(), detection_as_xywh[1].item()], - ] - ) - scores = np.array([detection_as_xywh[4].item(), detection_as_xywh[4].item()]) - norfair_detections.append(Detection(points=centroid, scores=scores)) - elif track_points == "bbox": - detections_as_xyxy = yolo_detections.xyxy[0] - for detection_as_xyxy in detections_as_xyxy: - bbox = np.array( - [ - [detection_as_xyxy[0].item(), detection_as_xyxy[1].item()], - [detection_as_xyxy[2].item(), detection_as_xyxy[3].item()], - ] - ) - scores = np.array([detection_as_xyxy[4].item(), detection_as_xyxy[4].item()]) - norfair_detections.append(Detection(points=bbox, scores=scores)) - - return norfair_detections diff --git a/spaces/trysem/image-matting-app/ppmatting/metrics/metric.py b/spaces/trysem/image-matting-app/ppmatting/metrics/metric.py deleted file mode 100644 index 2784dcf20fcffeadc326ad00d9b6a74d07ad58cf..0000000000000000000000000000000000000000 --- a/spaces/trysem/image-matting-app/ppmatting/metrics/metric.py +++ /dev/null @@ -1,278 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Grad and Conn is refer to https://github.com/yucornetto/MGMatting/blob/main/code-base/utils/evaluate.py -# Output of `Grad` is sightly different from the MATLAB version provided by Adobe (less than 0.1%) -# Output of `Conn` is smaller than the MATLAB version (~5%, maybe MATLAB has a different algorithm) -# So do not report results calculated by these functions in your paper. -# Evaluate your inference with the MATLAB file `DIM_evaluation_code/evaluate.m`. - -import cv2 -import numpy as np -from scipy.ndimage import convolve -from scipy.special import gamma -from skimage.measure import label - - -class MSE: - """ - Only calculate the unknown region if trimap provided. - """ - - def __init__(self): - self.mse_diffs = 0 - self.count = 0 - - def update(self, pred, gt, trimap=None): - """ - update metric. - Args: - pred (np.ndarray): The value range is [0., 255.]. - gt (np.ndarray): The value range is [0, 255]. - trimap (np.ndarray, optional) The value is in {0, 128, 255}. Default: None. - """ - if trimap is None: - trimap = np.ones_like(gt) * 128 - if not (pred.shape == gt.shape == trimap.shape): - raise ValueError( - 'The shape of `pred`, `gt` and `trimap` should be equal. ' - 'but they are {}, {} and {}'.format(pred.shape, gt.shape, - trimap.shape)) - pred[trimap == 0] = 0 - pred[trimap == 255] = 255 - - mask = trimap == 128 - pixels = float(mask.sum()) - pred = pred / 255. - gt = gt / 255. - diff = (pred - gt) * mask - mse_diff = (diff**2).sum() / pixels if pixels > 0 else 0 - - self.mse_diffs += mse_diff - self.count += 1 - - return mse_diff - - def evaluate(self): - mse = self.mse_diffs / self.count if self.count > 0 else 0 - return mse - - -class SAD: - """ - Only calculate the unknown region if trimap provided. - """ - - def __init__(self): - self.sad_diffs = 0 - self.count = 0 - - def update(self, pred, gt, trimap=None): - """ - update metric. - Args: - pred (np.ndarray): The value range is [0., 255.]. - gt (np.ndarray): The value range is [0., 255.]. - trimap (np.ndarray, optional)L The value is in {0, 128, 255}. Default: None. - """ - if trimap is None: - trimap = np.ones_like(gt) * 128 - if not (pred.shape == gt.shape == trimap.shape): - raise ValueError( - 'The shape of `pred`, `gt` and `trimap` should be equal. ' - 'but they are {}, {} and {}'.format(pred.shape, gt.shape, - trimap.shape)) - pred[trimap == 0] = 0 - pred[trimap == 255] = 255 - - mask = trimap == 128 - pred = pred / 255. - gt = gt / 255. - diff = (pred - gt) * mask - sad_diff = (np.abs(diff)).sum() - - sad_diff /= 1000 - self.sad_diffs += sad_diff - self.count += 1 - - return sad_diff - - def evaluate(self): - sad = self.sad_diffs / self.count if self.count > 0 else 0 - return sad - - -class Grad: - """ - Only calculate the unknown region if trimap provided. - Refer to: https://github.com/open-mlab/mmediting/blob/master/mmedit/core/evaluation/metrics.py - """ - - def __init__(self): - self.grad_diffs = 0 - self.count = 0 - - def gaussian(self, x, sigma): - return np.exp(-x**2 / (2 * sigma**2)) / (sigma * np.sqrt(2 * np.pi)) - - def dgaussian(self, x, sigma): - return -x * self.gaussian(x, sigma) / sigma**2 - - def gauss_filter(self, sigma, epsilon=1e-2): - half_size = np.ceil( - sigma * np.sqrt(-2 * np.log(np.sqrt(2 * np.pi) * sigma * epsilon))) - size = int(2 * half_size + 1) - - # create filter in x axis - filter_x = np.zeros((size, size)) - for i in range(size): - for j in range(size): - filter_x[i, j] = self.gaussian( - i - half_size, sigma) * self.dgaussian(j - half_size, sigma) - - # normalize filter - norm = np.sqrt((filter_x**2).sum()) - filter_x = filter_x / norm - filter_y = np.transpose(filter_x) - - return filter_x, filter_y - - def gauss_gradient(self, img, sigma): - filter_x, filter_y = self.gauss_filter(sigma) - img_filtered_x = cv2.filter2D( - img, -1, filter_x, borderType=cv2.BORDER_REPLICATE) - img_filtered_y = cv2.filter2D( - img, -1, filter_y, borderType=cv2.BORDER_REPLICATE) - return np.sqrt(img_filtered_x**2 + img_filtered_y**2) - - def update(self, pred, gt, trimap=None, sigma=1.4): - """ - update metric. - Args: - pred (np.ndarray): The value range is [0., 1.]. - gt (np.ndarray): The value range is [0, 255]. - trimap (np.ndarray, optional)L The value is in {0, 128, 255}. Default: None. - sigma (float, optional): Standard deviation of the gaussian kernel. Default: 1.4. - """ - if trimap is None: - trimap = np.ones_like(gt) * 128 - if not (pred.shape == gt.shape == trimap.shape): - raise ValueError( - 'The shape of `pred`, `gt` and `trimap` should be equal. ' - 'but they are {}, {} and {}'.format(pred.shape, gt.shape, - trimap.shape)) - pred[trimap == 0] = 0 - pred[trimap == 255] = 255 - - gt = gt.squeeze() - pred = pred.squeeze() - gt = gt.astype(np.float64) - pred = pred.astype(np.float64) - gt_normed = np.zeros_like(gt) - pred_normed = np.zeros_like(pred) - cv2.normalize(gt, gt_normed, 1., 0., cv2.NORM_MINMAX) - cv2.normalize(pred, pred_normed, 1., 0., cv2.NORM_MINMAX) - - gt_grad = self.gauss_gradient(gt_normed, sigma).astype(np.float32) - pred_grad = self.gauss_gradient(pred_normed, sigma).astype(np.float32) - - grad_diff = ((gt_grad - pred_grad)**2 * (trimap == 128)).sum() - - grad_diff /= 1000 - self.grad_diffs += grad_diff - self.count += 1 - - return grad_diff - - def evaluate(self): - grad = self.grad_diffs / self.count if self.count > 0 else 0 - return grad - - -class Conn: - """ - Only calculate the unknown region if trimap provided. - Refer to: Refer to: https://github.com/open-mlab/mmediting/blob/master/mmedit/core/evaluation/metrics.py - """ - - def __init__(self): - self.conn_diffs = 0 - self.count = 0 - - def update(self, pred, gt, trimap=None, step=0.1): - """ - update metric. - Args: - pred (np.ndarray): The value range is [0., 1.]. - gt (np.ndarray): The value range is [0, 255]. - trimap (np.ndarray, optional)L The value is in {0, 128, 255}. Default: None. - step (float, optional): Step of threshold when computing intersection between - `gt` and `pred`. Default: 0.1. - """ - if trimap is None: - trimap = np.ones_like(gt) * 128 - if not (pred.shape == gt.shape == trimap.shape): - raise ValueError( - 'The shape of `pred`, `gt` and `trimap` should be equal. ' - 'but they are {}, {} and {}'.format(pred.shape, gt.shape, - trimap.shape)) - pred[trimap == 0] = 0 - pred[trimap == 255] = 255 - - gt = gt.squeeze() - pred = pred.squeeze() - gt = gt.astype(np.float32) / 255 - pred = pred.astype(np.float32) / 255 - - thresh_steps = np.arange(0, 1 + step, step) - round_down_map = -np.ones_like(gt) - for i in range(1, len(thresh_steps)): - gt_thresh = gt >= thresh_steps[i] - pred_thresh = pred >= thresh_steps[i] - intersection = (gt_thresh & pred_thresh).astype(np.uint8) - - # connected components - _, output, stats, _ = cv2.connectedComponentsWithStats( - intersection, connectivity=4) - # start from 1 in dim 0 to exclude background - size = stats[1:, -1] - - # largest connected component of the intersection - omega = np.zeros_like(gt) - if len(size) != 0: - max_id = np.argmax(size) - # plus one to include background - omega[output == max_id + 1] = 1 - - mask = (round_down_map == -1) & (omega == 0) - round_down_map[mask] = thresh_steps[i - 1] - round_down_map[round_down_map == -1] = 1 - - gt_diff = gt - round_down_map - pred_diff = pred - round_down_map - # only calculate difference larger than or equal to 0.15 - gt_phi = 1 - gt_diff * (gt_diff >= 0.15) - pred_phi = 1 - pred_diff * (pred_diff >= 0.15) - - conn_diff = np.sum(np.abs(gt_phi - pred_phi) * (trimap == 128)) - - conn_diff /= 1000 - self.conn_diffs += conn_diff - self.count += 1 - - return conn_diff - - def evaluate(self): - conn = self.conn_diffs / self.count if self.count > 0 else 0 - return conn diff --git a/spaces/tsi-org/LLaVA/llava/model/multimodal_encoder/clip_encoder.py b/spaces/tsi-org/LLaVA/llava/model/multimodal_encoder/clip_encoder.py deleted file mode 100644 index dbb9015b0fc9fa93483ba77cc303b793e86c36fc..0000000000000000000000000000000000000000 --- a/spaces/tsi-org/LLaVA/llava/model/multimodal_encoder/clip_encoder.py +++ /dev/null @@ -1,78 +0,0 @@ -import torch -import torch.nn as nn - -from transformers import CLIPVisionModel, CLIPImageProcessor, CLIPVisionConfig - - -class CLIPVisionTower(nn.Module): - def __init__(self, vision_tower, args, delay_load=False): - super().__init__() - - self.is_loaded = False - - self.vision_tower_name = vision_tower - self.select_layer = args.mm_vision_select_layer - self.select_feature = getattr(args, 'mm_vision_select_feature', 'patch') - - if not delay_load: - self.load_model() - else: - self.cfg_only = CLIPVisionConfig.from_pretrained(self.vision_tower_name) - - def load_model(self): - self.image_processor = CLIPImageProcessor.from_pretrained(self.vision_tower_name) - self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name) - self.vision_tower.requires_grad_(False) - - self.is_loaded = True - - def feature_select(self, image_forward_outs): - image_features = image_forward_outs.hidden_states[self.select_layer] - if self.select_feature == 'patch': - image_features = image_features[:, 1:] - elif self.select_feature == 'cls_patch': - image_features = image_features - else: - raise ValueError(f'Unexpected select feature: {self.select_feature}') - return image_features - - @torch.no_grad() - def forward(self, images): - if type(images) is list: - image_features = [] - for image in images: - image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True) - image_feature = self.feature_select(image_forward_out).to(image.dtype) - image_features.append(image_feature) - else: - image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True) - image_features = self.feature_select(image_forward_outs).to(images.dtype) - - return image_features - - @property - def dummy_feature(self): - return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype) - - @property - def dtype(self): - return self.vision_tower.dtype - - @property - def device(self): - return self.vision_tower.device - - @property - def config(self): - if self.is_loaded: - return self.vision_tower.config - else: - return self.cfg_only - - @property - def hidden_size(self): - return self.config.hidden_size - - @property - def num_patches(self): - return (self.config.image_size // self.config.patch_size) ** 2 diff --git a/spaces/ttt246/brain/Brain/src/model/__init__.py b/spaces/ttt246/brain/Brain/src/model/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/typesdigital/YoutubeVideotoText/README.md b/spaces/typesdigital/YoutubeVideotoText/README.md deleted file mode 100644 index 1b93689013fe7bd1ed51e2efa870a561276cea3d..0000000000000000000000000000000000000000 --- a/spaces/typesdigital/YoutubeVideotoText/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: YoutubeVideotoText -emoji: 👁 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 3.43.2 -app_file: app.py -pinned: false -license: cc-by-2.5 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/ArtCAM 2016 Free Download With Crack ((NEW)).md b/spaces/usbethFlerru/sovits-modelsV2/example/ArtCAM 2016 Free Download With Crack ((NEW)).md deleted file mode 100644 index c59d72973a524ef090c9c32077259bdbb278d5ca..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/ArtCAM 2016 Free Download With Crack ((NEW)).md +++ /dev/null @@ -1,20 +0,0 @@ -

                    ArtCAM 2016 Free Download With Crack


                    DOWNLOADhttps://urlcod.com/2uyWEe



                    -
                    -Autodesk artcam 2016 free download. Photo & Graphics tools for beginners.Autodesk artcam 2016 review – Autodesk artcam 2016 - Autodesk artcam is a new version of world-famous gfx&Art software and is designed for 3D art creation by digital artists. - -Autodesk artcam 2016 free download - ArtCAM 2016 Serial Number is not valid.Software category.AutoCAD 15 & AutoCAD LT 15 – Learning AutoCAD and AutoCAD LT are the software programs that will help you design buildings and industrial structures that will be used for the construction industry. Autodesk artcam 2016 free download. Photo & Graphics tools for beginners. - -Autodesk artcam 2016 free download. Photo & Graphics tools for beginners.Autodesk artcam 2016 review – Autodesk artcam 2016 - Autodesk artcam is a new version of world-famous gfx&Art software and is designed for 3D art creation by digital artists. - -It is considered to be one of the best 3D modeling software.The third screen is the control screen where you can select the axis of rotation and the view.Autodesk artcam 2016 free download. Photo & Graphics tools for beginners.Autodesk artcam 2016 review – Autodesk artcam 2016 - Autodesk artcam is a new version of world-famous gfx&Art software and is designed for 3D art creation by digital artists. - -For Windows. Autodesk artcam 2016 free download. Photo & Graphics tools for beginners. - -Autodesk artcam 2016 review – Autodesk artcam 2016 – Autodesk artcam is a new version of world-famous gfx&Art software and is designed for 3D art creation by digital artists. - -It is considered to be one of the best 3D modeling software.Autodesk artcam 2016 free download. Photo & Graphics tools for beginners.Autodesk artcam 2016 review – Autodesk artcam 2016 - Autodesk artcam is a new version of world-famous gfx&Art software and is designed for 3D art creation by digital artists. - -For Windows. Autodesk artcam 2016 free download. Photo & Graphics tools for beginners.Autodesk artcam 2016 review – Autodesk artcam 2016 – Autodesk artcam 4fefd39f24
                    -
                    -
                    -

                    diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Ashes Cricket 2013-RELOADED Download the Game in Minutes Without Any Hassle.md b/spaces/usbethFlerru/sovits-modelsV2/example/Ashes Cricket 2013-RELOADED Download the Game in Minutes Without Any Hassle.md deleted file mode 100644 index 11a56627637c87846dfb76223fde5a4111fd2cc2..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Ashes Cricket 2013-RELOADED Download the Game in Minutes Without Any Hassle.md +++ /dev/null @@ -1,6 +0,0 @@ -

                    Ashes Cricket 2013-RELOADED No Survey No Password No Download


                    Downloadhttps://urlcod.com/2uyVHt



                    - - aaccfb2cb3
                    -
                    -
                    -

                    diff --git a/spaces/vaibhavarduino/anime-plus/e4e/models/stylegan2/model.py b/spaces/vaibhavarduino/anime-plus/e4e/models/stylegan2/model.py deleted file mode 100644 index fcb12af85669ab6fd7f79cb14ddbdf80b2fbd83d..0000000000000000000000000000000000000000 --- a/spaces/vaibhavarduino/anime-plus/e4e/models/stylegan2/model.py +++ /dev/null @@ -1,678 +0,0 @@ -import math -import random -import torch -from torch import nn -from torch.nn import functional as F - -if torch.cuda.is_available(): - from op.fused_act import FusedLeakyReLU, fused_leaky_relu - from op.upfirdn2d import upfirdn2d -else: - from op.fused_act_cpu import FusedLeakyReLU, fused_leaky_relu - from op.upfirdn2d_cpu import upfirdn2d - - -class PixelNorm(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, input): - return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8) - - -def make_kernel(k): - k = torch.tensor(k, dtype=torch.float32) - - if k.ndim == 1: - k = k[None, :] * k[:, None] - - k /= k.sum() - - return k - - -class Upsample(nn.Module): - def __init__(self, kernel, factor=2): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) * (factor ** 2) - self.register_buffer('kernel', kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 - - self.pad = (pad0, pad1) - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad) - - return out - - -class Downsample(nn.Module): - def __init__(self, kernel, factor=2): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) - self.register_buffer('kernel', kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 - pad1 = p // 2 - - self.pad = (pad0, pad1) - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad) - - return out - - -class Blur(nn.Module): - def __init__(self, kernel, pad, upsample_factor=1): - super().__init__() - - kernel = make_kernel(kernel) - - if upsample_factor > 1: - kernel = kernel * (upsample_factor ** 2) - - self.register_buffer('kernel', kernel) - - self.pad = pad - - def forward(self, input): - out = upfirdn2d(input, self.kernel, pad=self.pad) - - return out - - -class EqualConv2d(nn.Module): - def __init__( - self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True - ): - super().__init__() - - self.weight = nn.Parameter( - torch.randn(out_channel, in_channel, kernel_size, kernel_size) - ) - self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) - - self.stride = stride - self.padding = padding - - if bias: - self.bias = nn.Parameter(torch.zeros(out_channel)) - - else: - self.bias = None - - def forward(self, input): - out = F.conv2d( - input, - self.weight * self.scale, - bias=self.bias, - stride=self.stride, - padding=self.padding, - ) - - return out - - def __repr__(self): - return ( - f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},' - f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})' - ) - - -class EqualLinear(nn.Module): - def __init__( - self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None - ): - super().__init__() - - self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) - - if bias: - self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) - - else: - self.bias = None - - self.activation = activation - - self.scale = (1 / math.sqrt(in_dim)) * lr_mul - self.lr_mul = lr_mul - - def forward(self, input): - if self.activation: - out = F.linear(input, self.weight * self.scale) - out = fused_leaky_relu(out, self.bias * self.lr_mul) - - else: - out = F.linear( - input, self.weight * self.scale, bias=self.bias * self.lr_mul - ) - - return out - - def __repr__(self): - return ( - f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' - ) - - -class ScaledLeakyReLU(nn.Module): - def __init__(self, negative_slope=0.2): - super().__init__() - - self.negative_slope = negative_slope - - def forward(self, input): - out = F.leaky_relu(input, negative_slope=self.negative_slope) - - return out * math.sqrt(2) - - -class ModulatedConv2d(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - style_dim, - demodulate=True, - upsample=False, - downsample=False, - blur_kernel=[1, 3, 3, 1], - ): - super().__init__() - - self.eps = 1e-8 - self.kernel_size = kernel_size - self.in_channel = in_channel - self.out_channel = out_channel - self.upsample = upsample - self.downsample = downsample - - if upsample: - factor = 2 - p = (len(blur_kernel) - factor) - (kernel_size - 1) - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 + 1 - - self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor) - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - - self.blur = Blur(blur_kernel, pad=(pad0, pad1)) - - fan_in = in_channel * kernel_size ** 2 - self.scale = 1 / math.sqrt(fan_in) - self.padding = kernel_size // 2 - - self.weight = nn.Parameter( - torch.randn(1, out_channel, in_channel, kernel_size, kernel_size) - ) - - self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) - - self.demodulate = demodulate - - def __repr__(self): - return ( - f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, ' - f'upsample={self.upsample}, downsample={self.downsample})' - ) - - def forward(self, input, style): - batch, in_channel, height, width = input.shape - - style = self.modulation(style).view(batch, 1, in_channel, 1, 1) - weight = self.scale * self.weight * style - - if self.demodulate: - demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8) - weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) - - weight = weight.view( - batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size - ) - - if self.upsample: - input = input.view(1, batch * in_channel, height, width) - weight = weight.view( - batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size - ) - weight = weight.transpose(1, 2).reshape( - batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size - ) - out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - out = self.blur(out) - - elif self.downsample: - input = self.blur(input) - _, _, height, width = input.shape - input = input.view(1, batch * in_channel, height, width) - out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - - else: - input = input.view(1, batch * in_channel, height, width) - out = F.conv2d(input, weight, padding=self.padding, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - - return out - - -class NoiseInjection(nn.Module): - def __init__(self): - super().__init__() - - self.weight = nn.Parameter(torch.zeros(1)) - - def forward(self, image, noise=None): - if noise is None: - batch, _, height, width = image.shape - noise = image.new_empty(batch, 1, height, width).normal_() - - return image + self.weight * noise - - -class ConstantInput(nn.Module): - def __init__(self, channel, size=4): - super().__init__() - - self.input = nn.Parameter(torch.randn(1, channel, size, size)) - - def forward(self, input): - batch = input.shape[0] - out = self.input.repeat(batch, 1, 1, 1) - - return out - - -class StyledConv(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - style_dim, - upsample=False, - blur_kernel=[1, 3, 3, 1], - demodulate=True, - ): - super().__init__() - - self.conv = ModulatedConv2d( - in_channel, - out_channel, - kernel_size, - style_dim, - upsample=upsample, - blur_kernel=blur_kernel, - demodulate=demodulate, - ) - - self.noise = NoiseInjection() - # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1)) - # self.activate = ScaledLeakyReLU(0.2) - self.activate = FusedLeakyReLU(out_channel) - - def forward(self, input, style, noise=None): - out = self.conv(input, style) - out = self.noise(out, noise=noise) - # out = out + self.bias - out = self.activate(out) - - return out - - -class ToRGB(nn.Module): - def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - if upsample: - self.upsample = Upsample(blur_kernel) - - self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False) - self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) - - def forward(self, input, style, skip=None): - out = self.conv(input, style) - out = out + self.bias - - if skip is not None: - skip = self.upsample(skip) - - out = out + skip - - return out - - -class Generator(nn.Module): - def __init__( - self, - size, - style_dim, - n_mlp, - channel_multiplier=2, - blur_kernel=[1, 3, 3, 1], - lr_mlp=0.01, - ): - super().__init__() - - self.size = size - - self.style_dim = style_dim - - layers = [PixelNorm()] - - for i in range(n_mlp): - layers.append( - EqualLinear( - style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu' - ) - ) - - self.style = nn.Sequential(*layers) - - self.channels = { - 4: 512, - 8: 512, - 16: 512, - 32: 512, - 64: 256 * channel_multiplier, - 128: 128 * channel_multiplier, - 256: 64 * channel_multiplier, - 512: 32 * channel_multiplier, - 1024: 16 * channel_multiplier, - } - - self.input = ConstantInput(self.channels[4]) - self.conv1 = StyledConv( - self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel - ) - self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False) - - self.log_size = int(math.log(size, 2)) - self.num_layers = (self.log_size - 2) * 2 + 1 - - self.convs = nn.ModuleList() - self.upsamples = nn.ModuleList() - self.to_rgbs = nn.ModuleList() - self.noises = nn.Module() - - in_channel = self.channels[4] - - for layer_idx in range(self.num_layers): - res = (layer_idx + 5) // 2 - shape = [1, 1, 2 ** res, 2 ** res] - self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape)) - - for i in range(3, self.log_size + 1): - out_channel = self.channels[2 ** i] - - self.convs.append( - StyledConv( - in_channel, - out_channel, - 3, - style_dim, - upsample=True, - blur_kernel=blur_kernel, - ) - ) - - self.convs.append( - StyledConv( - out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel - ) - ) - - self.to_rgbs.append(ToRGB(out_channel, style_dim)) - - in_channel = out_channel - - self.n_latent = self.log_size * 2 - 2 - - def make_noise(self): - device = self.input.input.device - - noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)] - - for i in range(3, self.log_size + 1): - for _ in range(2): - noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device)) - - return noises - - def mean_latent(self, n_latent): - latent_in = torch.randn( - n_latent, self.style_dim, device=self.input.input.device - ) - latent = self.style(latent_in).mean(0, keepdim=True) - - return latent - - def get_latent(self, input): - return self.style(input) - - def forward( - self, - styles, - return_latents=False, - return_features=False, - inject_index=None, - truncation=1, - truncation_latent=None, - input_is_latent=False, - noise=None, - randomize_noise=True, - ): - if not input_is_latent: - styles = [self.style(s) for s in styles] - - if noise is None: - if randomize_noise: - noise = [None] * self.num_layers - else: - noise = [ - getattr(self.noises, f'noise_{i}') for i in range(self.num_layers) - ] - - if truncation < 1: - style_t = [] - - for style in styles: - style_t.append( - truncation_latent + truncation * (style - truncation_latent) - ) - - styles = style_t - - if len(styles) < 2: - inject_index = self.n_latent - - if styles[0].ndim < 3: - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - else: - latent = styles[0] - - else: - if inject_index is None: - inject_index = random.randint(1, self.n_latent - 1) - - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1) - - latent = torch.cat([latent, latent2], 1) - - out = self.input(latent) - out = self.conv1(out, latent[:, 0], noise=noise[0]) - - skip = self.to_rgb1(out, latent[:, 1]) - - i = 1 - for conv1, conv2, noise1, noise2, to_rgb in zip( - self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs - ): - out = conv1(out, latent[:, i], noise=noise1) - out = conv2(out, latent[:, i + 1], noise=noise2) - skip = to_rgb(out, latent[:, i + 2], skip) - - i += 2 - - image = skip - - if return_latents: - return image, latent - elif return_features: - return image, out - else: - return image, None - - -class ConvLayer(nn.Sequential): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - downsample=False, - blur_kernel=[1, 3, 3, 1], - bias=True, - activate=True, - ): - layers = [] - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - - layers.append(Blur(blur_kernel, pad=(pad0, pad1))) - - stride = 2 - self.padding = 0 - - else: - stride = 1 - self.padding = kernel_size // 2 - - layers.append( - EqualConv2d( - in_channel, - out_channel, - kernel_size, - padding=self.padding, - stride=stride, - bias=bias and not activate, - ) - ) - - if activate: - if bias: - layers.append(FusedLeakyReLU(out_channel)) - - else: - layers.append(ScaledLeakyReLU(0.2)) - - super().__init__(*layers) - - -class ResBlock(nn.Module): - def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - self.conv1 = ConvLayer(in_channel, in_channel, 3) - self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True) - - self.skip = ConvLayer( - in_channel, out_channel, 1, downsample=True, activate=False, bias=False - ) - - def forward(self, input): - out = self.conv1(input) - out = self.conv2(out) - - skip = self.skip(input) - out = (out + skip) / math.sqrt(2) - - return out - - -class Discriminator(nn.Module): - def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - channels = { - 4: 512, - 8: 512, - 16: 512, - 32: 512, - 64: 256 * channel_multiplier, - 128: 128 * channel_multiplier, - 256: 64 * channel_multiplier, - 512: 32 * channel_multiplier, - 1024: 16 * channel_multiplier, - } - - convs = [ConvLayer(3, channels[size], 1)] - - log_size = int(math.log(size, 2)) - - in_channel = channels[size] - - for i in range(log_size, 2, -1): - out_channel = channels[2 ** (i - 1)] - - convs.append(ResBlock(in_channel, out_channel, blur_kernel)) - - in_channel = out_channel - - self.convs = nn.Sequential(*convs) - - self.stddev_group = 4 - self.stddev_feat = 1 - - self.final_conv = ConvLayer(in_channel + 1, channels[4], 3) - self.final_linear = nn.Sequential( - EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'), - EqualLinear(channels[4], 1), - ) - - def forward(self, input): - out = self.convs(input) - - batch, channel, height, width = out.shape - group = min(batch, self.stddev_group) - stddev = out.view( - group, -1, self.stddev_feat, channel // self.stddev_feat, height, width - ) - stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) - stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2) - stddev = stddev.repeat(group, 1, height, width) - out = torch.cat([out, stddev], 1) - - out = self.final_conv(out) - - out = out.view(batch, -1) - out = self.final_linear(out) - - return out diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/models/backbones/__init__.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/models/backbones/__init__.py deleted file mode 100644 index 8339983905fb5d20bae42ba6f76fea75d278b1aa..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/models/backbones/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -from .cgnet import CGNet -# from .fast_scnn import FastSCNN -from .hrnet import HRNet -from .mobilenet_v2 import MobileNetV2 -from .mobilenet_v3 import MobileNetV3 -from .resnest import ResNeSt -from .resnet import ResNet, ResNetV1c, ResNetV1d -from .resnext import ResNeXt -from .unet import UNet -from .vit import VisionTransformer -from .uniformer import UniFormer - -__all__ = [ - 'ResNet', 'ResNetV1c', 'ResNetV1d', 'ResNeXt', 'HRNet', - 'ResNeSt', 'MobileNetV2', 'UNet', 'CGNet', 'MobileNetV3', - 'VisionTransformer', 'UniFormer' -] diff --git a/spaces/w1zrd/MusicGen/audiocraft/quantization/__init__.py b/spaces/w1zrd/MusicGen/audiocraft/quantization/__init__.py deleted file mode 100644 index 836d6eb518978480c6b95d6f29ce4f84a9428793..0000000000000000000000000000000000000000 --- a/spaces/w1zrd/MusicGen/audiocraft/quantization/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -from .vq import ResidualVectorQuantizer -from .base import BaseQuantizer, DummyQuantizer, QuantizedResult diff --git a/spaces/wasimmadha/entity-extraction/README.md b/spaces/wasimmadha/entity-extraction/README.md deleted file mode 100644 index 79fe2853be7254ab932318ed359cc16e989c80eb..0000000000000000000000000000000000000000 --- a/spaces/wasimmadha/entity-extraction/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Entity Extraction -emoji: 📚 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.28.3 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/weibinke/vits-simple-api/vits/text/english.py b/spaces/weibinke/vits-simple-api/vits/text/english.py deleted file mode 100644 index 6817392ba8a9eb830351de89fb7afc5ad72f5e42..0000000000000000000000000000000000000000 --- a/spaces/weibinke/vits-simple-api/vits/text/english.py +++ /dev/null @@ -1,188 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - - -# Regular expression matching whitespace: - - -import re -import inflect -from unidecode import unidecode -import eng_to_ipa as ipa -_inflect = inflect.engine() -_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])') -_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)') -_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)') -_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)') -_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)') -_number_re = re.compile(r'[0-9]+') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - - -# List of (ipa, lazy ipa) pairs: -_lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('æ', 'e'), - ('ɑ', 'a'), - ('ɔ', 'o'), - ('ð', 'z'), - ('θ', 's'), - ('ɛ', 'e'), - ('ɪ', 'i'), - ('ʊ', 'u'), - ('ʒ', 'ʥ'), - ('ʤ', 'ʥ'), - ('ˈ', '↓'), -]] - -# List of (ipa, lazy ipa2) pairs: -_lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ð', 'z'), - ('θ', 's'), - ('ʒ', 'ʑ'), - ('ʤ', 'dʑ'), - ('ˈ', '↓'), -]] - -# List of (ipa, ipa2) pairs -_ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ʤ', 'dʒ'), - ('ʧ', 'tʃ') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def collapse_whitespace(text): - return re.sub(r'\s+', ' ', text) - - -def _remove_commas(m): - return m.group(1).replace(',', '') - - -def _expand_decimal_point(m): - return m.group(1).replace('.', ' point ') - - -def _expand_dollars(m): - match = m.group(1) - parts = match.split('.') - if len(parts) > 2: - return match + ' dollars' # Unexpected format - dollars = int(parts[0]) if parts[0] else 0 - cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 - if dollars and cents: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) - elif dollars: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - return '%s %s' % (dollars, dollar_unit) - elif cents: - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s' % (cents, cent_unit) - else: - return 'zero dollars' - - -def _expand_ordinal(m): - return _inflect.number_to_words(m.group(0)) - - -def _expand_number(m): - num = int(m.group(0)) - if num > 1000 and num < 3000: - if num == 2000: - return 'two thousand' - elif num > 2000 and num < 2010: - return 'two thousand ' + _inflect.number_to_words(num % 100) - elif num % 100 == 0: - return _inflect.number_to_words(num // 100) + ' hundred' - else: - return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ') - else: - return _inflect.number_to_words(num, andword='') - - -def normalize_numbers(text): - text = re.sub(_comma_number_re, _remove_commas, text) - text = re.sub(_pounds_re, r'\1 pounds', text) - text = re.sub(_dollars_re, _expand_dollars, text) - text = re.sub(_decimal_number_re, _expand_decimal_point, text) - text = re.sub(_ordinal_re, _expand_ordinal, text) - text = re.sub(_number_re, _expand_number, text) - return text - - -def mark_dark_l(text): - return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text) - - -def english_to_ipa(text): - text = unidecode(text).lower() - text = expand_abbreviations(text) - text = normalize_numbers(text) - phonemes = ipa.convert(text) - phonemes = collapse_whitespace(phonemes) - return phonemes - - -def english_to_lazy_ipa(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa: - text = re.sub(regex, replacement, text) - return text - - -def english_to_ipa2(text): - text = english_to_ipa(text) - text = mark_dark_l(text) - for regex, replacement in _ipa_to_ipa2: - text = re.sub(regex, replacement, text) - return text.replace('...', '…') - - -def english_to_lazy_ipa2(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa2: - text = re.sub(regex, replacement, text) - return text diff --git a/spaces/wwwwwwww2/bingo/src/pages/api/create.ts b/spaces/wwwwwwww2/bingo/src/pages/api/create.ts deleted file mode 100644 index cd7600055aafe4e47c154d530ac5efee86fca34c..0000000000000000000000000000000000000000 --- a/spaces/wwwwwwww2/bingo/src/pages/api/create.ts +++ /dev/null @@ -1,34 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { fetch, debug } from '@/lib/isomorphic' -import { createHeaders } from '@/lib/utils' - -const API_ENDPOINT = 'https://www.bing.com/turing/conversation/create' -// const API_ENDPOINT = 'https://edgeservices.bing.com/edgesvc/turing/conversation/create'; - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const headers = createHeaders(req.cookies) - - res.setHeader('set-cookie', [headers.cookie, `BING_IP=${headers['x-forwarded-for']}`]) - - res.writeHead(200, { - 'Content-Type': 'application/json', - }) - - debug('headers', headers) - const response = await fetch(API_ENDPOINT, { method: 'GET', headers }) - .then((res) => res.text()) - - res.end(response) - } catch (e) { - console.log('error', e) - return res.end(JSON.stringify({ - result: { - value: 'UnauthorizedRequest', - message: `${e}` - } - })) - } -} diff --git a/spaces/xiangdy/chatGPT/modules/models/configuration_moss.py b/spaces/xiangdy/chatGPT/modules/models/configuration_moss.py deleted file mode 100644 index 9bad4396ecea6578c1628732d0ef077d8964d45d..0000000000000000000000000000000000000000 --- a/spaces/xiangdy/chatGPT/modules/models/configuration_moss.py +++ /dev/null @@ -1,118 +0,0 @@ -""" Moss model configuration""" - -from transformers.utils import logging -from transformers.configuration_utils import PretrainedConfig - - -logger = logging.get_logger(__name__) - - -class MossConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of a [`MossModel`]. It is used to instantiate a - Moss model according to the specified arguments, defining the model architecture. Instantiating a configuration - with the defaults will yield a similar configuration to that of the Moss - [fnlp/moss-moon-003-base](https://huggingface.co/fnlp/moss-moon-003-base) architecture. Configuration objects - inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from - [`PretrainedConfig`] for more information. - - Args: - vocab_size (`int`, *optional*, defaults to 107008): - Vocabulary size of the Moss model. Defines the number of different tokens that can be represented by the - `inputs_ids` passed when calling [`MossModel`]. - n_positions (`int`, *optional*, defaults to 2048): - The maximum sequence length that this model might ever be used with. Typically set this to something large - just in case (e.g., 512 or 1024 or 2048). - n_embd (`int`, *optional*, defaults to 4096): - Dimensionality of the embeddings and hidden states. - n_layer (`int`, *optional*, defaults to 28): - Number of hidden layers in the Transformer encoder. - n_head (`int`, *optional*, defaults to 16): - Number of attention heads for each attention layer in the Transformer encoder. - rotary_dim (`int`, *optional*, defaults to 64): - Number of dimensions in the embedding that Rotary Position Embedding is applied to. - n_inner (`int`, *optional*, defaults to None): - Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd - activation_function (`str`, *optional*, defaults to `"gelu_new"`): - Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. - resid_pdrop (`float`, *optional*, defaults to 0.1): - The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. - embd_pdrop (`int`, *optional*, defaults to 0.1): - The dropout ratio for the embeddings. - attn_pdrop (`float`, *optional*, defaults to 0.1): - The dropout ratio for the attention. - layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): - The epsilon to use in the layer normalization layers. - initializer_range (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - use_cache (`bool`, *optional*, defaults to `True`): - Whether or not the model should return the last key/values attentions (not used by all models). - - Example: - - ```python - >>> from modeling_moss import MossModel - >>> from configuration_moss import MossConfig - - >>> # Initializing a moss-moon-003-base configuration - >>> configuration = MossConfig() - - >>> # Initializing a model (with random weights) from the configuration - >>> model = MossModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - ```""" - - model_type = "moss" - attribute_map = { - "max_position_embeddings": "n_positions", - "hidden_size": "n_embd", - "num_attention_heads": "n_head", - "num_hidden_layers": "n_layer", - } - - def __init__( - self, - vocab_size=107008, - n_positions=2048, - n_ctx=2048, - n_embd=4096, - n_layer=28, - n_head=16, - rotary_dim=64, - n_inner=None, - activation_function="gelu_new", - resid_pdrop=0.0, - embd_pdrop=0.0, - attn_pdrop=0.0, - layer_norm_epsilon=1e-5, - initializer_range=0.02, - use_cache=True, - bos_token_id=106028, - eos_token_id=106068, - tie_word_embeddings=False, - **kwargs, - ): - self.vocab_size = vocab_size - self.n_ctx = n_ctx - self.n_positions = n_positions - self.n_embd = n_embd - self.n_layer = n_layer - self.n_head = n_head - self.n_inner = n_inner - self.rotary_dim = rotary_dim - self.activation_function = activation_function - self.resid_pdrop = resid_pdrop - self.embd_pdrop = embd_pdrop - self.attn_pdrop = attn_pdrop - self.layer_norm_epsilon = layer_norm_epsilon - self.initializer_range = initializer_range - self.use_cache = use_cache - - self.bos_token_id = bos_token_id - self.eos_token_id = eos_token_id - - super().__init__( - bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs - ) diff --git a/spaces/xin/PatentSolver/App/bin/FindTechnologies.py b/spaces/xin/PatentSolver/App/bin/FindTechnologies.py deleted file mode 100644 index da4afaab78de41bee535475cc4659b1a31a85f0c..0000000000000000000000000000000000000000 --- a/spaces/xin/PatentSolver/App/bin/FindTechnologies.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -* -import sys -import os -import math -import xlsxwriter -from textblob import TextBlob as tb - -class FindTechnologies(object): - - def __init__(self): - - print("Starting") - - def tf(word, blob): - return (float)(blob.noun_phrases.count(word)) / (float)(len(blob.noun_phrases)) - - - def n_containing(word, bloblist): - return sum(1 for blob in bloblist if word in blob.noun_phrases) - - - def idf(word, bloblist): - return math.log(len(bloblist) / (float)(1 + n_containing(word, bloblist))) - - - def tfidf(word, blob, bloblist): - return tf(word, blob) * idf(word, bloblist) - - - # Create an excel file for validation purpose - - def get_technologies(self): - folder_path = "C:/Users/asouili01/Documents/PatSemBeta-v3/Data/input/Gaggenau/" - stopwords = open('C:/Users/asouili01/Documents/PIXSEB/Ressources/stopwords.txt', 'r').read().split('\r\n') - bloblist = [] - - filenamelist = [] - - for path, dirs, files in os.walk(folder_path): - for filename in files: - print(filename) - filenamelist.append(filename) - name, extension = filename.split('.') - filepath = folder_path + "/" + filename - filehandler = open(filepath, "r",encoding="utf-8") - - content = filehandler.read() - filteredtext = [t for t in content if t.lower() not in stopwords] - filteredcontent = ''.join(filteredtext) - blob = 'blob_' + name.lower() - print (blob) - blob = tb(filteredcontent.lower()) - bloblist.append(blob) - - print(bloblist) - - for i, blob in enumerate(bloblist): - print("Top words in document {}".format(i + 1)) - scores = {word: tfidf(word, blob, bloblist) for word in blob.noun_phrases} - sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True) - for word, score in sorted_words[:5]: - print("\tWord: {}, TF-IDF: {}".format(word, round(score, 10))) - diff --git a/spaces/yannESGI/test_fitz/README.md b/spaces/yannESGI/test_fitz/README.md deleted file mode 100644 index 2e0df12db26dced9013877cf04553fa08b9ea1ba..0000000000000000000000000000000000000000 --- a/spaces/yannESGI/test_fitz/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Test Fitz -emoji: 🏢 -colorFrom: pink -colorTo: gray -sdk: streamlit -sdk_version: 1.26.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/yaoshining/text-generation-webui/modules/relative_imports.py b/spaces/yaoshining/text-generation-webui/modules/relative_imports.py deleted file mode 100644 index 3c0eb56b77c6cb6b38fdbdeebabe9ad3b8d91b97..0000000000000000000000000000000000000000 --- a/spaces/yaoshining/text-generation-webui/modules/relative_imports.py +++ /dev/null @@ -1,13 +0,0 @@ -import sys -from pathlib import Path - - -class RelativeImport: - def __init__(self, path): - self.import_path = Path(path) - - def __enter__(self): - sys.path.insert(0, str(self.import_path)) - - def __exit__(self, exc_type, exc_value, traceback): - sys.path.remove(str(self.import_path)) diff --git a/spaces/yaosynge/bingAI/Dockerfile b/spaces/yaosynge/bingAI/Dockerfile deleted file mode 100644 index 21f2dc49cbdbd83feb4958a6ae6d2e524b1993ce..0000000000000000000000000000000000000000 --- a/spaces/yaosynge/bingAI/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -#Build Stage -#使用golang:alpine作为构建阶段的基础镜像 -FROM golang:alpine AS builder - -#添加git,以便之后能从Github克隆项目 -RUN apk --no-cache add git - -#从GitHub克隆go-proxy-bingai项目到/workspace/app目录下 -RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app - -#设置工作目录为之前克隆的项目目录 -WORKDIR /workspace/app - -#编译go项目。-ldflags="-s -w"是为了减少编译后的二进制大小 -RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go - -#Runtime Stage -#使用轻量级的alpine镜像作为基础镜像 -FROM alpine - -#设置工作目录 -WORKDIR /workspace/app - -#从构建阶段复制编译后的二进制文件到运行时镜像中 -COPY --from=builder /workspace/app/go-proxy-bingai . - -#设置环境变量 -ENV Go_Proxy_BingAI_USER_TOKEN_1="kJs8hD92ncMzLaoQWYtX5rG6bE3fZ4i0" - -#暴露8080端口 -EXPOSE 8080 - -#容器启动时运行的命令 -CMD ["/workspace/app/go-proxy-bingai"] \ No newline at end of file diff --git a/spaces/ybelkada/bloom-1b3-gen/app.py b/spaces/ybelkada/bloom-1b3-gen/app.py deleted file mode 100644 index 39ec3598c58da1bf669244594aa7caea5181875c..0000000000000000000000000000000000000000 --- a/spaces/ybelkada/bloom-1b3-gen/app.py +++ /dev/null @@ -1,65 +0,0 @@ -import gradio as gr -from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed - -model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-1b3", use_cache=True) -tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-1b3") - -def post_process_sentence(input_sentence, generated_sentence): - new_sentence = generated_sentence.replace(input_sentence, "") - if "\n" not in new_sentence: - return generated_sentence.replace(" ", " ") + "\n- " - else: - return (new_sentence.split("\n")[0]).replace(" ", " ") + "\n- " - -def generate_single(model, tokenizer, input_sentence, max_length=50, top_k=0, temperature=0.7, do_sample=True, seed=42): - set_seed(seed) - input_ids = tokenizer.encode(input_sentence, return_tensors="pt") - output = model.generate( - input_ids, do_sample=do_sample, - max_length=len(input_sentence)+max_length, - top_k=top_k, - temperature=temperature, - ) - generated_sentence = tokenizer.decode(output[0], skip_special_tokens=True) - return post_process_sentence(input_sentence, generated_sentence) - -def question_bloom(input_sentence, max_length, temperature, do_sample=True, top_k=3, seed=42): - post_processed_output = generate_single(model, tokenizer, input_sentence, temperature=temperature, max_length=max_length, do_sample=do_sample, top_k=top_k, seed=seed) - return post_processed_output.split("\n-")[-2] - -gr.Interface( - question_bloom, - [ - gr.Textbox(lines=10, label="Input code"), - gr.inputs.Slider( - minimum=8, - maximum=256, - step=1, - default=8, - label="Number of tokens to generate", - ), - gr.inputs.Slider( - minimum=0, - maximum=2, - step=0.1, - default=0.6, - label="Temperature", - ), - gr.inputs.Checkbox(True, label="Do Sample"), - gr.inputs.Slider( - minimum=0, - maximum=10, - step=1, - default=3, - label="Top K", - ), - gr.inputs.Slider( - minimum=0, - maximum=256, - step=1, - default=42, - label="Random seed for generation", - ), - ], - outputs=gr.Textbox(label="Predicted sentence", lines=10), -).launch() \ No newline at end of file diff --git a/spaces/yeqingmei123/face-test/e4e/editings/sefa.py b/spaces/yeqingmei123/face-test/e4e/editings/sefa.py deleted file mode 100644 index db7083ce463b765a7cf452807883a3b85fb63fa5..0000000000000000000000000000000000000000 --- a/spaces/yeqingmei123/face-test/e4e/editings/sefa.py +++ /dev/null @@ -1,46 +0,0 @@ -import torch -import numpy as np -from tqdm import tqdm - - -def edit(generator, latents, indices, semantics=1, start_distance=-15.0, end_distance=15.0, num_samples=1, step=11): - - layers, boundaries, values = factorize_weight(generator, indices) - codes = latents.detach().cpu().numpy() # (1,18,512) - - # Generate visualization pages. - distances = np.linspace(start_distance, end_distance, step) - num_sam = num_samples - num_sem = semantics - - edited_latents = [] - for sem_id in tqdm(range(num_sem), desc='Semantic ', leave=False): - boundary = boundaries[sem_id:sem_id + 1] - for sam_id in tqdm(range(num_sam), desc='Sample ', leave=False): - code = codes[sam_id:sam_id + 1] - for col_id, d in enumerate(distances, start=1): - temp_code = code.copy() - temp_code[:, layers, :] += boundary * d - edited_latents.append(torch.from_numpy(temp_code).float().cuda()) - return torch.cat(edited_latents) - - -def factorize_weight(g_ema, layers='all'): - - weights = [] - if layers == 'all' or 0 in layers: - weight = g_ema.conv1.conv.modulation.weight.T - weights.append(weight.cpu().detach().numpy()) - - if layers == 'all': - layers = list(range(g_ema.num_layers - 1)) - else: - layers = [l - 1 for l in layers if l != 0] - - for idx in layers: - weight = g_ema.convs[idx].conv.modulation.weight.T - weights.append(weight.cpu().detach().numpy()) - weight = np.concatenate(weights, axis=1).astype(np.float32) - weight = weight / np.linalg.norm(weight, axis=0, keepdims=True) - eigen_values, eigen_vectors = np.linalg.eig(weight.dot(weight.T)) - return layers, eigen_vectors.T, eigen_values diff --git a/spaces/ygangang/CodeFormer/CodeFormer/basicsr/setup.py b/spaces/ygangang/CodeFormer/CodeFormer/basicsr/setup.py deleted file mode 100644 index 382a2aa1006e581eaf31dbb3155d4b0ba3b31140..0000000000000000000000000000000000000000 --- a/spaces/ygangang/CodeFormer/CodeFormer/basicsr/setup.py +++ /dev/null @@ -1,165 +0,0 @@ -#!/usr/bin/env python - -from setuptools import find_packages, setup - -import os -import subprocess -import sys -import time -import torch -from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension - -version_file = './basicsr/version.py' - - -def readme(): - with open('README.md', encoding='utf-8') as f: - content = f.read() - return content - - -def get_git_hash(): - - def _minimal_ext_cmd(cmd): - # construct minimal environment - env = {} - for k in ['SYSTEMROOT', 'PATH', 'HOME']: - v = os.environ.get(k) - if v is not None: - env[k] = v - # LANGUAGE is used on win32 - env['LANGUAGE'] = 'C' - env['LANG'] = 'C' - env['LC_ALL'] = 'C' - out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0] - return out - - try: - out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) - sha = out.strip().decode('ascii') - except OSError: - sha = 'unknown' - - return sha - - -def get_hash(): - if os.path.exists('.git'): - sha = get_git_hash()[:7] - elif os.path.exists(version_file): - try: - from version import __version__ - sha = __version__.split('+')[-1] - except ImportError: - raise ImportError('Unable to get git version') - else: - sha = 'unknown' - - return sha - - -def write_version_py(): - content = """# GENERATED VERSION FILE -# TIME: {} -__version__ = '{}' -__gitsha__ = '{}' -version_info = ({}) -""" - sha = get_hash() - with open('./basicsr/VERSION', 'r') as f: - SHORT_VERSION = f.read().strip() - VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')]) - - version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO) - with open(version_file, 'w') as f: - f.write(version_file_str) - - -def get_version(): - with open(version_file, 'r') as f: - exec(compile(f.read(), version_file, 'exec')) - return locals()['__version__'] - - -def make_cuda_ext(name, module, sources, sources_cuda=None): - if sources_cuda is None: - sources_cuda = [] - define_macros = [] - extra_compile_args = {'cxx': []} - - if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1': - define_macros += [('WITH_CUDA', None)] - extension = CUDAExtension - extra_compile_args['nvcc'] = [ - '-D__CUDA_NO_HALF_OPERATORS__', - '-D__CUDA_NO_HALF_CONVERSIONS__', - '-D__CUDA_NO_HALF2_OPERATORS__', - ] - sources += sources_cuda - else: - print(f'Compiling {name} without CUDA') - extension = CppExtension - - return extension( - name=f'{module}.{name}', - sources=[os.path.join(*module.split('.'), p) for p in sources], - define_macros=define_macros, - extra_compile_args=extra_compile_args) - - -def get_requirements(filename='requirements.txt'): - with open(os.path.join('.', filename), 'r') as f: - requires = [line.replace('\n', '') for line in f.readlines()] - return requires - - -if __name__ == '__main__': - if '--cuda_ext' in sys.argv: - ext_modules = [ - make_cuda_ext( - name='deform_conv_ext', - module='ops.dcn', - sources=['src/deform_conv_ext.cpp'], - sources_cuda=['src/deform_conv_cuda.cpp', 'src/deform_conv_cuda_kernel.cu']), - make_cuda_ext( - name='fused_act_ext', - module='ops.fused_act', - sources=['src/fused_bias_act.cpp'], - sources_cuda=['src/fused_bias_act_kernel.cu']), - make_cuda_ext( - name='upfirdn2d_ext', - module='ops.upfirdn2d', - sources=['src/upfirdn2d.cpp'], - sources_cuda=['src/upfirdn2d_kernel.cu']), - ] - sys.argv.remove('--cuda_ext') - else: - ext_modules = [] - - write_version_py() - setup( - name='basicsr', - version=get_version(), - description='Open Source Image and Video Super-Resolution Toolbox', - long_description=readme(), - long_description_content_type='text/markdown', - author='Xintao Wang', - author_email='xintao.wang@outlook.com', - keywords='computer vision, restoration, super resolution', - url='https://github.com/xinntao/BasicSR', - include_package_data=True, - packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')), - classifiers=[ - 'Development Status :: 4 - Beta', - 'License :: OSI Approved :: Apache Software License', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - ], - license='Apache License 2.0', - setup_requires=['cython', 'numpy'], - install_requires=get_requirements(), - ext_modules=ext_modules, - cmdclass={'build_ext': BuildExtension}, - zip_safe=False) diff --git a/spaces/ygangang/CodeFormer/CodeFormer/basicsr/utils/__init__.py b/spaces/ygangang/CodeFormer/CodeFormer/basicsr/utils/__init__.py deleted file mode 100644 index 5fcc1d540462712387523d1e326d1dfc2bcfbf32..0000000000000000000000000000000000000000 --- a/spaces/ygangang/CodeFormer/CodeFormer/basicsr/utils/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -from .file_client import FileClient -from .img_util import crop_border, imfrombytes, img2tensor, imwrite, tensor2img -from .logger import MessageLogger, get_env_info, get_root_logger, init_tb_logger, init_wandb_logger -from .misc import check_resume, get_time_str, make_exp_dirs, mkdir_and_rename, scandir, set_random_seed, sizeof_fmt - -__all__ = [ - # file_client.py - 'FileClient', - # img_util.py - 'img2tensor', - 'tensor2img', - 'imfrombytes', - 'imwrite', - 'crop_border', - # logger.py - 'MessageLogger', - 'init_tb_logger', - 'init_wandb_logger', - 'get_root_logger', - 'get_env_info', - # misc.py - 'set_random_seed', - 'get_time_str', - 'mkdir_and_rename', - 'make_exp_dirs', - 'scandir', - 'check_resume', - 'sizeof_fmt' -] diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/bit/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/bit/__init__.py deleted file mode 100644 index fc50659d9fa06820ebe1edc7b56ab3d5de4ef67b..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/bit/__init__.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING - -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = {"configuration_bit": ["BIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BitConfig", "BitOnnxConfig"]} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_bit"] = [ - "BIT_PRETRAINED_MODEL_ARCHIVE_LIST", - "BitForImageClassification", - "BitModel", - "BitPreTrainedModel", - "BitBackbone", - ] - - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_bit"] = ["BitImageProcessor"] - - -if TYPE_CHECKING: - from .configuration_bit import BIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BitConfig, BitOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_bit import ( - BIT_PRETRAINED_MODEL_ARCHIVE_LIST, - BitBackbone, - BitForImageClassification, - BitModel, - BitPreTrainedModel, - ) - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_bit import BitImageProcessor - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dpt/feature_extraction_dpt.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dpt/feature_extraction_dpt.py deleted file mode 100644 index d375d8229f5ee9b3278af363c40043815ff0cf29..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dpt/feature_extraction_dpt.py +++ /dev/null @@ -1,33 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Feature extractor class for DPT.""" - -import warnings - -from ...utils import logging -from .image_processing_dpt import DPTImageProcessor - - -logger = logging.get_logger(__name__) - - -class DPTFeatureExtractor(DPTImageProcessor): - def __init__(self, *args, **kwargs) -> None: - warnings.warn( - "The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" - " use DPTImageProcessor instead.", - FutureWarning, - ) - super().__init__(*args, **kwargs) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/pix2struct/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/pix2struct/__init__.py deleted file mode 100644 index 8b395b31d8be19c169cf0f535b0aabc9798dbd6b..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/pix2struct/__init__.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING - -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_pix2struct": [ - "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP", - "Pix2StructConfig", - "Pix2StructTextConfig", - "Pix2StructVisionConfig", - ], - "processing_pix2struct": ["Pix2StructProcessor"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["image_processing_pix2struct"] = ["Pix2StructImageProcessor"] - - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_pix2struct"] = [ - "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST", - "Pix2StructPreTrainedModel", - "Pix2StructForConditionalGeneration", - "Pix2StructVisionModel", - "Pix2StructTextModel", - ] - -if TYPE_CHECKING: - from .configuration_pix2struct import ( - PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, - Pix2StructConfig, - Pix2StructTextConfig, - Pix2StructVisionConfig, - ) - from .processing_pix2struct import Pix2StructProcessor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .image_processing_pix2struct import Pix2StructImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_pix2struct import ( - PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, - Pix2StructForConditionalGeneration, - Pix2StructPreTrainedModel, - Pix2StructTextModel, - Pix2StructVisionModel, - ) - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vdecoder/hifiganwithsnake/alias/act.py b/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vdecoder/hifiganwithsnake/alias/act.py deleted file mode 100644 index 308344fb6ccbc39317c584a3ee1fb2f29084678e..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/vdecoder/hifiganwithsnake/alias/act.py +++ /dev/null @@ -1,129 +0,0 @@ -# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 -# LICENSE is in incl_licenses directory. - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from torch import sin, pow -from torch.nn import Parameter -from .resample import UpSample1d, DownSample1d - - -class Activation1d(nn.Module): - def __init__(self, - activation, - up_ratio: int = 2, - down_ratio: int = 2, - up_kernel_size: int = 12, - down_kernel_size: int = 12): - super().__init__() - self.up_ratio = up_ratio - self.down_ratio = down_ratio - self.act = activation - self.upsample = UpSample1d(up_ratio, up_kernel_size) - self.downsample = DownSample1d(down_ratio, down_kernel_size) - - # x: [B,C,T] - def forward(self, x): - x = self.upsample(x) - x = self.act(x) - x = self.downsample(x) - - return x - - -class SnakeBeta(nn.Module): - ''' - A modified Snake function which uses separate parameters for the magnitude of the periodic components - Shape: - - Input: (B, C, T) - - Output: (B, C, T), same shape as the input - Parameters: - - alpha - trainable parameter that controls frequency - - beta - trainable parameter that controls magnitude - References: - - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: - https://arxiv.org/abs/2006.08195 - Examples: - >>> a1 = snakebeta(256) - >>> x = torch.randn(256) - >>> x = a1(x) - ''' - - def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False): - ''' - Initialization. - INPUT: - - in_features: shape of the input - - alpha - trainable parameter that controls frequency - - beta - trainable parameter that controls magnitude - alpha is initialized to 1 by default, higher values = higher-frequency. - beta is initialized to 1 by default, higher values = higher-magnitude. - alpha will be trained along with the rest of your model. - ''' - super(SnakeBeta, self).__init__() - self.in_features = in_features - # initialize alpha - self.alpha_logscale = alpha_logscale - if self.alpha_logscale: # log scale alphas initialized to zeros - self.alpha = Parameter(torch.zeros(in_features) * alpha) - self.beta = Parameter(torch.zeros(in_features) * alpha) - else: # linear scale alphas initialized to ones - self.alpha = Parameter(torch.ones(in_features) * alpha) - self.beta = Parameter(torch.ones(in_features) * alpha) - self.alpha.requires_grad = alpha_trainable - self.beta.requires_grad = alpha_trainable - self.no_div_by_zero = 0.000000001 - - def forward(self, x): - ''' - Forward pass of the function. - Applies the function to the input elementwise. - SnakeBeta = x + 1/b * sin^2 (xa) - ''' - alpha = self.alpha.unsqueeze( - 0).unsqueeze(-1) # line up with x to [B, C, T] - beta = self.beta.unsqueeze(0).unsqueeze(-1) - if self.alpha_logscale: - alpha = torch.exp(alpha) - beta = torch.exp(beta) - x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2) - return x - - -class Mish(nn.Module): - """ - Mish activation function is proposed in "Mish: A Self - Regularized Non-Monotonic Neural Activation Function" - paper, https://arxiv.org/abs/1908.08681. - """ - - def __init__(self): - super().__init__() - - def forward(self, x): - return x * torch.tanh(F.softplus(x)) - - -class SnakeAlias(nn.Module): - def __init__(self, - channels, - up_ratio: int = 2, - down_ratio: int = 2, - up_kernel_size: int = 12, - down_kernel_size: int = 12): - super().__init__() - self.up_ratio = up_ratio - self.down_ratio = down_ratio - self.act = SnakeBeta(channels, alpha_logscale=True) - self.upsample = UpSample1d(up_ratio, up_kernel_size) - self.downsample = DownSample1d(down_ratio, down_kernel_size) - - # x: [B,C,T] - def forward(self, x): - x = self.upsample(x) - x = self.act(x) - x = self.downsample(x) - - return x \ No newline at end of file diff --git a/spaces/yotamsapi/face-swap/retinaface/anchor.py b/spaces/yotamsapi/face-swap/retinaface/anchor.py deleted file mode 100644 index bac3a361582b839e5a0de0659b408bdd2420db67..0000000000000000000000000000000000000000 --- a/spaces/yotamsapi/face-swap/retinaface/anchor.py +++ /dev/null @@ -1,296 +0,0 @@ -"""Anchor utils modified from https://github.com/biubug6/Pytorch_Retinaface""" -import math -import tensorflow as tf -import numpy as np -from itertools import product as product - - -############################################################################### -# Tensorflow / Numpy Priors # -############################################################################### -def prior_box(image_sizes, min_sizes, steps, clip=False): - """prior box""" - feature_maps = [ - [math.ceil(image_sizes[0] / step), math.ceil(image_sizes[1] / step)] - for step in steps] - - anchors = [] - for k, f in enumerate(feature_maps): - for i, j in product(range(f[0]), range(f[1])): - for min_size in min_sizes[k]: - s_kx = min_size / image_sizes[1] - s_ky = min_size / image_sizes[0] - cx = (j + 0.5) * steps[k] / image_sizes[1] - cy = (i + 0.5) * steps[k] / image_sizes[0] - anchors += [cx, cy, s_kx, s_ky] - - output = np.asarray(anchors).reshape([-1, 4]) - - if clip: - output = np.clip(output, 0, 1) - - return output - - -def prior_box_tf(image_sizes, min_sizes, steps, clip=False): - """prior box""" - image_sizes = tf.cast(tf.convert_to_tensor(image_sizes), tf.float32) - feature_maps = tf.math.ceil( - tf.reshape(image_sizes, [1, 2]) / - tf.reshape(tf.cast(steps, tf.float32), [-1, 1])) - - anchors = [] - for k in range(len(min_sizes)): - grid_x, grid_y = _meshgrid_tf(tf.range(feature_maps[k][1]), - tf.range(feature_maps[k][0])) - cx = (grid_x + 0.5) * steps[k] / image_sizes[1] - cy = (grid_y + 0.5) * steps[k] / image_sizes[0] - cxcy = tf.stack([cx, cy], axis=-1) - cxcy = tf.reshape(cxcy, [-1, 2]) - cxcy = tf.repeat(cxcy, repeats=tf.shape(min_sizes[k])[0], axis=0) - - sx = min_sizes[k] / image_sizes[1] - sy = min_sizes[k] / image_sizes[0] - sxsy = tf.stack([sx, sy], 1) - sxsy = tf.repeat(sxsy[tf.newaxis], - repeats=tf.shape(grid_x)[0] * tf.shape(grid_x)[1], - axis=0) - sxsy = tf.reshape(sxsy, [-1, 2]) - - anchors.append(tf.concat([cxcy, sxsy], 1)) - - output = tf.concat(anchors, axis=0) - - if clip: - output = tf.clip_by_value(output, 0, 1) - - return output - - -def _meshgrid_tf(x, y): - """ workaround solution of the tf.meshgrid() issue: - https://github.com/tensorflow/tensorflow/issues/34470""" - grid_shape = [tf.shape(y)[0], tf.shape(x)[0]] - grid_x = tf.broadcast_to(tf.reshape(x, [1, -1]), grid_shape) - grid_y = tf.broadcast_to(tf.reshape(y, [-1, 1]), grid_shape) - return grid_x, grid_y - - -############################################################################### -# Tensorflow Encoding # -############################################################################### -def encode_tf(labels, priors, match_thresh, ignore_thresh, - variances=[0.1, 0.2]): - """tensorflow encoding""" - assert ignore_thresh <= match_thresh - priors = tf.cast(priors, tf.float32) - bbox = labels[:, :4] - landm = labels[:, 4:-1] - landm_valid = labels[:, -1] # 1: with landm, 0: w/o landm. - - # jaccard index - overlaps = _jaccard(bbox, _point_form(priors)) - - # (Bipartite Matching) - # [num_objects] best prior for each ground truth - best_prior_overlap, best_prior_idx = tf.math.top_k(overlaps, k=1) - best_prior_overlap = best_prior_overlap[:, 0] - best_prior_idx = best_prior_idx[:, 0] - - # [num_priors] best ground truth for each prior - overlaps_t = tf.transpose(overlaps) - best_truth_overlap, best_truth_idx = tf.math.top_k(overlaps_t, k=1) - best_truth_overlap = best_truth_overlap[:, 0] - best_truth_idx = best_truth_idx[:, 0] - - # ensure best prior - def _loop_body(i, bt_idx, bt_overlap): - bp_mask = tf.one_hot(best_prior_idx[i], tf.shape(bt_idx)[0]) - bp_mask_int = tf.cast(bp_mask, tf.int32) - new_bt_idx = bt_idx * (1 - bp_mask_int) + bp_mask_int * i - bp_mask_float = tf.cast(bp_mask, tf.float32) - new_bt_overlap = bt_overlap * (1 - bp_mask_float) + bp_mask_float * 2 - return tf.cond(best_prior_overlap[i] > match_thresh, - lambda: (i + 1, new_bt_idx, new_bt_overlap), - lambda: (i + 1, bt_idx, bt_overlap)) - _, best_truth_idx, best_truth_overlap = tf.while_loop( - lambda i, bt_idx, bt_overlap: tf.less(i, tf.shape(best_prior_idx)[0]), - _loop_body, [tf.constant(0), best_truth_idx, best_truth_overlap]) - - matches_bbox = tf.gather(bbox, best_truth_idx) # [num_priors, 4] - matches_landm = tf.gather(landm, best_truth_idx) # [num_priors, 10] - matches_landm_v = tf.gather(landm_valid, best_truth_idx) # [num_priors] - - loc_t = _encode_bbox(matches_bbox, priors, variances) - landm_t = _encode_landm(matches_landm, priors, variances) - landm_valid_t = tf.cast(matches_landm_v > 0, tf.float32) - conf_t = tf.cast(best_truth_overlap > match_thresh, tf.float32) - conf_t = tf.where( - tf.logical_and(best_truth_overlap < match_thresh, - best_truth_overlap > ignore_thresh), - tf.ones_like(conf_t) * -1, conf_t) # 1: pos, 0: neg, -1: ignore - - return tf.concat([loc_t, landm_t, landm_valid_t[..., tf.newaxis], - conf_t[..., tf.newaxis]], axis=1) - - -def _encode_bbox(matched, priors, variances): - """Encode the variances from the priorbox layers into the ground truth - boxes we have matched (based on jaccard overlap) with the prior boxes. - Args: - matched: (tensor) Coords of ground truth for each prior in point-form - Shape: [num_priors, 4]. - priors: (tensor) Prior boxes in center-offset form - Shape: [num_priors,4]. - variances: (list[float]) Variances of priorboxes - Return: - encoded boxes (tensor), Shape: [num_priors, 4] - """ - - # dist b/t match center and prior's center - g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2] - # encode variance - g_cxcy /= (variances[0] * priors[:, 2:]) - # match wh / prior wh - g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:] - g_wh = tf.math.log(g_wh) / variances[1] - # return target for smooth_l1_loss - return tf.concat([g_cxcy, g_wh], 1) # [num_priors,4] - - -def _encode_landm(matched, priors, variances): - """Encode the variances from the priorbox layers into the ground truth - boxes we have matched (based on jaccard overlap) with the prior boxes. - Args: - matched: (tensor) Coords of ground truth for each prior in point-form - Shape: [num_priors, 10]. - priors: (tensor) Prior boxes in center-offset form - Shape: [num_priors,4]. - variances: (list[float]) Variances of priorboxes - Return: - encoded landm (tensor), Shape: [num_priors, 10] - """ - - # dist b/t match center and prior's center - matched = tf.reshape(matched, [tf.shape(matched)[0], 5, 2]) - priors = tf.broadcast_to( - tf.expand_dims(priors, 1), [tf.shape(matched)[0], 5, 4]) - g_cxcy = matched[:, :, :2] - priors[:, :, :2] - # encode variance - g_cxcy /= (variances[0] * priors[:, :, 2:]) - # g_cxcy /= priors[:, :, 2:] - g_cxcy = tf.reshape(g_cxcy, [tf.shape(g_cxcy)[0], -1]) - # return target for smooth_l1_loss - return g_cxcy - - -def _point_form(boxes): - """ Convert prior_boxes to (xmin, ymin, xmax, ymax) - representation for comparison to point form ground truth data. - Args: - boxes: (tensor) center-size default boxes from priorbox layers. - Return: - boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes. - """ - return tf.concat((boxes[:, :2] - boxes[:, 2:] / 2, - boxes[:, :2] + boxes[:, 2:] / 2), axis=1) - - -def _intersect(box_a, box_b): - """ We resize both tensors to [A,B,2]: - [A,2] -> [A,1,2] -> [A,B,2] - [B,2] -> [1,B,2] -> [A,B,2] - Then we compute the area of intersect between box_a and box_b. - Args: - box_a: (tensor) bounding boxes, Shape: [A,4]. - box_b: (tensor) bounding boxes, Shape: [B,4]. - Return: - (tensor) intersection area, Shape: [A,B]. - """ - A = tf.shape(box_a)[0] - B = tf.shape(box_b)[0] - max_xy = tf.minimum( - tf.broadcast_to(tf.expand_dims(box_a[:, 2:], 1), [A, B, 2]), - tf.broadcast_to(tf.expand_dims(box_b[:, 2:], 0), [A, B, 2])) - min_xy = tf.maximum( - tf.broadcast_to(tf.expand_dims(box_a[:, :2], 1), [A, B, 2]), - tf.broadcast_to(tf.expand_dims(box_b[:, :2], 0), [A, B, 2])) - inter = tf.maximum((max_xy - min_xy), tf.zeros_like(max_xy - min_xy)) - return inter[:, :, 0] * inter[:, :, 1] - - -def _jaccard(box_a, box_b): - """Compute the jaccard overlap of two sets of boxes. The jaccard overlap - is simply the intersection over union of two boxes. Here we operate on - ground truth boxes and default boxes. - E.g.: - A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B) - Args: - box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4] - box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4] - Return: - jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)] - """ - inter = _intersect(box_a, box_b) - area_a = tf.broadcast_to( - tf.expand_dims( - (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1]), 1), - tf.shape(inter)) # [A,B] - area_b = tf.broadcast_to( - tf.expand_dims( - (box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1]), 0), - tf.shape(inter)) # [A,B] - union = area_a + area_b - inter - return inter / union # [A,B] - - -############################################################################### -# Tensorflow Decoding # -############################################################################### -def decode_tf(labels, priors, variances=[0.1, 0.2]): - """tensorflow decoding""" - bbox = _decode_bbox(labels[:, :4], priors, variances) - landm = _decode_landm(labels[:, 4:14], priors, variances) - landm_valid = labels[:, 14][:, tf.newaxis] - conf = labels[:, 15][:, tf.newaxis] - - return tf.concat([bbox, landm, landm_valid, conf], axis=1) - - -def _decode_bbox(pre, priors, variances=[0.1, 0.2]): - """Decode locations from predictions using priors to undo - the encoding we did for offset regression at train time. - Args: - pre (tensor): location predictions for loc layers, - Shape: [num_priors,4] - priors (tensor): Prior boxes in center-offset form. - Shape: [num_priors,4]. - variances: (list[float]) Variances of priorboxes - Return: - decoded bounding box predictions - """ - centers = priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:] - sides = priors[:, 2:] * tf.math.exp(pre[:, 2:] * variances[1]) - - return tf.concat([centers - sides / 2, centers + sides / 2], axis=1) - - -def _decode_landm(pre, priors, variances=[0.1, 0.2]): - """Decode landm from predictions using priors to undo - the encoding we did for offset regression at train time. - Args: - pre (tensor): landm predictions for loc layers, - Shape: [num_priors,10] - priors (tensor): Prior boxes in center-offset form. - Shape: [num_priors,4]. - variances: (list[float]) Variances of priorboxes - Return: - decoded landm predictions - """ - landms = tf.concat( - [priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:], - priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:], - priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:], - priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:], - priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:]], axis=1) - return landms \ No newline at end of file diff --git a/spaces/yufiofficial/MusicGenQ/tests/models/test_musicgen.py b/spaces/yufiofficial/MusicGenQ/tests/models/test_musicgen.py deleted file mode 100644 index d43cf73763f6c690ab0b277227ac225b286fa143..0000000000000000000000000000000000000000 --- a/spaces/yufiofficial/MusicGenQ/tests/models/test_musicgen.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.models import MusicGen - - -class TestSEANetModel: - def get_musicgen(self): - mg = MusicGen.get_pretrained(name='debug', device='cpu') - mg.set_generation_params(duration=2.0, extend_stride=2.) - return mg - - def test_base(self): - mg = self.get_musicgen() - assert mg.frame_rate == 25 - assert mg.sample_rate == 32000 - assert mg.audio_channels == 1 - - def test_generate_unconditional(self): - mg = self.get_musicgen() - wav = mg.generate_unconditional(3) - assert list(wav.shape) == [3, 1, 64000] - - def test_generate_continuation(self): - mg = self.get_musicgen() - prompt = torch.randn(3, 1, 32000) - wav = mg.generate_continuation(prompt, 32000) - assert list(wav.shape) == [3, 1, 64000] - - prompt = torch.randn(2, 1, 32000) - wav = mg.generate_continuation( - prompt, 32000, ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 64000] - - prompt = torch.randn(2, 1, 32000) - with pytest.raises(AssertionError): - wav = mg.generate_continuation( - prompt, 32000, ['youpi', 'lapin dort', 'one too many']) - - def test_generate(self): - mg = self.get_musicgen() - wav = mg.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 64000] - - def test_generate_long(self): - mg = self.get_musicgen() - mg.max_duration = 3. - mg.set_generation_params(duration=4., extend_stride=2.) - wav = mg.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 32000 * 4] diff --git a/spaces/yusufani/TrCLIP/app.py b/spaces/yusufani/TrCLIP/app.py deleted file mode 100644 index a091a77e2d08bd6fd11a6313ca22f782134eaffd..0000000000000000000000000000000000000000 --- a/spaces/yusufani/TrCLIP/app.py +++ /dev/null @@ -1,303 +0,0 @@ -# Importing all the necessary libraries -import os - -import gradio as gr -import torch -from PIL import Image -from tqdm import tqdm -from trclip.trclip import Trclip -from trclip.visualizer import image_retrieval_visualize, text_retrieval_visualize - -print(f'gr version : {gr.__version__}') -import pickle -import random - -import numpy as np - -# %% -model_name = 'trclip-vitl14-e10' -if not os.path.exists(model_name): - os.system(f'git clone https://huggingface.co/yusufani/{model_name} --progress') -# %% -if not os.path.exists('TrCaption-trclip-vitl14-e10'): - os.system(f'git clone https://huggingface.co/datasets/yusufani/TrCaption-trclip-vitl14-e10/ --progress') - os.chdir('TrCaption-trclip-vitl14-e10') - os.system(f'git lfs install') - os.system(f' git lfs fetch') - os.system(f' git lfs pull') - os.chdir('..') - - -# %% - -def load_image_embeddings(load_batch=True): - path = os.path.join('TrCaption-trclip-vitl14-e10', 'image_embeddings') - bs = 100_000 - if load_batch: - for i in tqdm(range(0, 3_100_000, bs), desc='Loading TrCaption Image embeddings'): - with open(os.path.join(path, f'image_em_{i}.pkl'), 'rb') as f: - yield pickle.load(f) - return - - else: - embeddings = [] - for i in tqdm(range(0, 3_100_000, bs), desc='Loading TrCaption Image embeddings'): - with open(os.path.join(path, f'image_em_{i}.pkl'), 'rb') as f: - embeddings.append(pickle.load(f)) - return torch.cat(embeddings, dim=0) - - -def load_text_embeddings(load_batch=True): - path = os.path.join('TrCaption-trclip-vitl14-e10', 'text_embeddings') - bs = 100_000 - if load_batch: - for i in tqdm(range(0, 3_600_000, bs), desc='Loading TrCaption text embeddings'): - with open(os.path.join(path, f'text_em_{i}.pkl'), 'rb') as f: - yield pickle.load(f) - return - else: - embeddings = [] - for i in tqdm(range(0, 3_600_000, bs), desc='Loading TrCaption text embeddings'): - with open(os.path.join(path, f'text_em_{i}.pkl'), 'rb') as f: - embeddings.append(pickle.load(f)) - return torch.cat(embeddings, dim=0) - - -def load_metadata(): - path = os.path.join('TrCaption-trclip-vitl14-e10', 'metadata.pkl') - with open(path, 'rb') as f: - metadata = pickle.load(f) - trcap_texts = metadata['texts'] - trcap_urls = metadata['image_urls'] - return trcap_texts, trcap_urls - - -def load_spesific_tensor(index, type, bs=100_000): - part = index // bs - idx = index % bs - with open(os.path.join('TrCaption-trclip-vitl14-e10', f'{type}_embeddings', f'{type}_em_{part * bs}.pkl'), 'rb') as f: - embeddings = pickle.load(f) - return embeddings[idx] - - -# %% -trcap_texts, trcap_urls = load_metadata() -# %% -print(f'INFO : Model loading') -model_path = os.path.join(model_name, 'pytorch_model.bin') -trclip = Trclip(model_path, clip_model='ViT-L/14', device='cpu') -# %% - - -import datetime - -# %% -def run_im(im1, use_trcap_images, text1, use_trcap_texts): - print(f'{datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")} INFO : Image retrieval starting') - f_texts_embeddings = None - ims = None - if use_trcap_images: - print('INFO : TRCaption images used') - im_paths = trcap_urls - else: - print('INFO : Own images used') - # Images taken from user - im_paths = [i.name for i in im1] - ims = [Image.open(i) for i in im_paths] - if use_trcap_texts: - print(f'INFO : TRCaption texts used') - random_indexes = random.sample(range(len(trcap_texts)), 2) # MAX 2 text are allowed in image retrieval UI limit - f_texts_embeddings = [] - for i in random_indexes: - f_texts_embeddings.append(load_spesific_tensor(i, 'text')) - f_texts_embeddings = torch.stack(f_texts_embeddings) - texts = [trcap_texts[i] for i in random_indexes] - - else: - print(f'INFO : Own texts used') - texts = [i.strip() for i in text1.split('\n')[:2] if i.strip() != ''] - - if use_trcap_images: # This means that we will iterate over batches because Huggingface space has 16 gb limit :/// - per_mode_probs = [] - f_texts_embeddings = f_texts_embeddings if use_trcap_texts else trclip.get_text_features(texts) - for f_image_embeddings in tqdm(load_image_embeddings(load_batch=True), desc='Running image retrieval'): - batch_probs = trclip.get_results( - text_features=f_texts_embeddings, image_features=f_image_embeddings, mode='per_text', return_probs=True) - per_mode_probs.append(batch_probs) - per_mode_probs = torch.cat(per_mode_probs, dim=1) - per_mode_probs = per_mode_probs.softmax(dim=-1).cpu().detach().numpy() - per_mode_indices = [np.argsort(prob)[::-1] for prob in per_mode_probs] - - else: - per_mode_indices, per_mode_probs = trclip.get_results(texts=texts, images=ims, text_features=f_texts_embeddings, mode='per_text') - - print(f'per_mode_indices = {per_mode_indices}\n,per_mode_probs = {per_mode_probs} ') - print(f'im_paths = {im_paths}') - return image_retrieval_visualize(per_mode_indices, per_mode_probs, texts, im_paths, - n_figure_in_column=2, - n_images_in_figure=4, n_figure_in_row=1, save_fig=False, - show=False, - break_on_index=-1) - - -def run_text(im1, use_trcap_images, text1, use_trcap_texts): - print(f'{datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")} INFO : Text retrieval starting') - f_image_embeddings = None - ims = None - if use_trcap_images: - print('INFO : TRCaption images used') - random_indexes = random.sample(range(len(trcap_urls)), 2) # MAX 2 text are allowed in image retrieval UI limit - f_image_embeddings = [] - for i in random_indexes: - f_image_embeddings.append(load_spesific_tensor(i, 'image')) - f_image_embeddings = torch.stack(f_image_embeddings) - print(f'f_image_embeddings = {f_image_embeddings}') - # Images taken from TRCAPTION - im_paths = [trcap_urls[i] for i in random_indexes] - print(f'im_paths = {im_paths}') - - else: - print('INFO : Own images used') - # Images taken from user - im_paths = [i.name for i in im1[:2]] - ims = [Image.open(i) for i in im_paths] - - if use_trcap_texts: - texts = trcap_texts - else: - texts = [i.strip() for i in text1.split('\n')[:2] if i.strip() != ''] - - if use_trcap_texts: - f_image_embeddings = f_image_embeddings if use_trcap_images else trclip.get_image_features(ims) - per_mode_probs = [] - for f_texts_embeddings in tqdm(load_text_embeddings(load_batch=True), desc='Running text retrieval'): - batch_probs = trclip.get_results( - text_features=f_texts_embeddings, image_features=f_image_embeddings, mode='per_image', return_probs=True) - per_mode_probs.append(batch_probs) - per_mode_probs = torch.cat(per_mode_probs, dim=1) - per_mode_probs = per_mode_probs.softmax(dim=-1).cpu().detach().numpy() - per_mode_indices = [np.argsort(prob)[::-1] for prob in per_mode_probs] - - else: - per_mode_indices, per_mode_probs = trclip.get_results(texts=texts, images=ims, image_features=f_image_embeddings, mode='per_image') - print(per_mode_indices) - print(per_mode_probs) - return text_retrieval_visualize(per_mode_indices, per_mode_probs, im_paths, texts, - n_figure_in_column=4, - n_texts_in_figure=4 if len(texts) > 4 else len(texts), - n_figure_in_row=2, - save_fig=False, - show=False, - break_on_index=-1, - ) - - -def change_textbox(choice): - if choice == "Use Own Images": - - return gr.Image.update(visible=True) - else: - return gr.Image.update(visible=False) - - -with gr.Blocks() as demo: - gr.HTML(""" -
                    -
                    - - - - - - - - - - - - - - - - - - - - - - - - - - - -

                    - Trclip Demo - -

                    -

                    - Trclip is Turkish port of real clip. In this space you can try your images or/and texts. -
                    Also you can use pre calculated TrCaption embeddings. -
                    Number of texts = 3533312 -
                    Number of images = 3070976 -
                    - Some images are not available in the internet because I downloaded and calculated TrCaption embeddings long time ago. Don't be suprise if you encounter with Image not found :D - -

                    -

                    A GitHub Repository --- Paper( Not available yet ) 

                    -
                    -

                    - -
                    -
                    -

                    Huggingface Space containers has 16 gb ram. TrCaption embeddings are totaly 20 gb. I did a lot of writing and reading to files to make this space workable. That's why it's running much slower if you're using TrCaption Embeddigs.

                    -
                    -
                    -
                    -
                     
                    -
                    -
                    -
                    -
                    - """) - - with gr.Tabs(): - with gr.TabItem("Upload a Images"): - im_input = gr.components.File(label="Image input", optional=True, file_count='multiple') - is_trcap_ims = gr.Checkbox(label="Use TRCaption Images\n[Note: Random 2 sample selected in text retrieval mode]",default=True) - - with gr.Tabs(): - with gr.TabItem("Input a text (Seperated by new line Max 2 for Image retrieval)"): - text_input = gr.components.Textbox(label="Text input", optional=True , placeholder = "kedi\nköpek\nGemi\nKahvesini içmekte olan bir adam\n Kahvesini içmekte olan bir kadın\nAraba") - is_trcap_texts = gr.Checkbox(label="Use TrCaption Captions \n[Note: Random 2 sample selected in image retrieval mode]",default=True) - - im_ret_but = gr.Button("Image Retrieval") - text_ret_but = gr.Button("Text Retrieval") - - im_out = gr.components.Image() - - im_ret_but.click(run_im, inputs=[im_input, is_trcap_ims, text_input, is_trcap_texts], outputs=im_out) - text_ret_but.click(run_text, inputs=[im_input, is_trcap_ims, text_input, is_trcap_texts], outputs=im_out) - -demo.launch() - -# %% diff --git a/spaces/zhang-wei-jian/docker/node_modules/tsscmp/test/benchmark/index.js b/spaces/zhang-wei-jian/docker/node_modules/tsscmp/test/benchmark/index.js deleted file mode 100644 index fd93e42338e3fdbc0633f384dd2034b5e23d5159..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/tsscmp/test/benchmark/index.js +++ /dev/null @@ -1,30 +0,0 @@ -'use strict'; - -var timeSafeCompare = require('../../lib/index'); - -function random(length) { - - length = length || 32; - var result = ""; - var possible = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()-+/*[]{}-=\|;\':\"<>?,./"; - - for( var i=0; i < length; i++ ){ - result += possible.charAt(Math.floor(Math.random() * possible.length)); - } - return result; -} - -function run(count) { - count = count || 100*1000; - console.log('benchmark count: ' + count/1000 + 'k'); - console.time('benchmark'); - - while(count--){ - timeSafeCompare(random(), random()); - } - console.timeEnd('benchmark'); -} - -run(100000); - -module.exports = run; diff --git a/spaces/zlc99/M4Singer/usr/diffsinger_task.py b/spaces/zlc99/M4Singer/usr/diffsinger_task.py deleted file mode 100644 index aa500a7c410d5c86bdd7fa339f73dc49817ada3b..0000000000000000000000000000000000000000 --- a/spaces/zlc99/M4Singer/usr/diffsinger_task.py +++ /dev/null @@ -1,490 +0,0 @@ -import torch - -import utils -from utils.hparams import hparams -from .diff.net import DiffNet -from .diff.shallow_diffusion_tts import GaussianDiffusion, OfflineGaussianDiffusion -from .diffspeech_task import DiffSpeechTask -from vocoders.base_vocoder import get_vocoder_cls, BaseVocoder -from modules.fastspeech.pe import PitchExtractor -from modules.fastspeech.fs2 import FastSpeech2 -from modules.diffsinger_midi.fs2 import FastSpeech2MIDI -from modules.fastspeech.tts_modules import mel2ph_to_dur - -from usr.diff.candidate_decoder import FFT -from utils.pitch_utils import denorm_f0 -from tasks.tts.fs2_utils import FastSpeechDataset -from tasks.tts.fs2 import FastSpeech2Task - -import numpy as np -import os -import torch.nn.functional as F - -DIFF_DECODERS = { - 'wavenet': lambda hp: DiffNet(hp['audio_num_mel_bins']), - 'fft': lambda hp: FFT( - hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']), -} - - -class DiffSingerTask(DiffSpeechTask): - def __init__(self): - super(DiffSingerTask, self).__init__() - self.dataset_cls = FastSpeechDataset - self.vocoder: BaseVocoder = get_vocoder_cls(hparams)() - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - self.pe = PitchExtractor().cuda() - utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True) - self.pe.eval() - - def build_tts_model(self): - # import torch - # from tqdm import tqdm - # v_min = torch.ones([80]) * 100 - # v_max = torch.ones([80]) * -100 - # for i, ds in enumerate(tqdm(self.dataset_cls('train'))): - # v_max = torch.max(torch.max(ds['mel'].reshape(-1, 80), 0)[0], v_max) - # v_min = torch.min(torch.min(ds['mel'].reshape(-1, 80), 0)[0], v_min) - # if i % 100 == 0: - # print(i, v_min, v_max) - # print('final', v_min, v_max) - mel_bins = hparams['audio_num_mel_bins'] - self.model = GaussianDiffusion( - phone_encoder=self.phone_encoder, - out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams), - timesteps=hparams['timesteps'], - K_step=hparams['K_step'], - loss_type=hparams['diff_loss_type'], - spec_min=hparams['spec_min'], spec_max=hparams['spec_max'], - ) - if hparams['fs2_ckpt'] != '': - utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True) - # self.model.fs2.decoder = None - #for k, v in self.model.fs2.named_parameters(): - # v.requires_grad = False - - def validation_step(self, sample, batch_idx): - outputs = {} - txt_tokens = sample['txt_tokens'] # [B, T_t] - - target = sample['mels'] # [B, T_s, 80] - energy = sample['energy'] - # fs2_mel = sample['fs2_mels'] - spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') - mel2ph = sample['mel2ph'] - f0 = sample['f0'] - uv = sample['uv'] - - outputs['losses'] = {} - - outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False) - - - outputs['total_loss'] = sum(outputs['losses'].values()) - outputs['nsamples'] = sample['nsamples'] - outputs = utils.tensors_to_scalars(outputs) - if batch_idx < hparams['num_valid_plots']: - model_out = self.model( - txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy, ref_mels=None, infer=True) - - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel - pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel - else: - gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams) - pred_f0 = model_out.get('f0_denorm') - self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0) - self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}') - self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'], name=f'fs2mel_{batch_idx}') - return outputs - - -class ShallowDiffusionOfflineDataset(FastSpeechDataset): - def __getitem__(self, index): - sample = super(ShallowDiffusionOfflineDataset, self).__getitem__(index) - item = self._get_item(index) - - if self.prefix != 'train' and hparams['fs2_ckpt'] != '': - fs2_ckpt = os.path.dirname(hparams['fs2_ckpt']) - item_name = item['item_name'] - fs2_mel = torch.Tensor(np.load(f'{fs2_ckpt}/P_mels_npy/{item_name}.npy')) # ~M generated by FFT-singer. - sample['fs2_mel'] = fs2_mel - return sample - - def collater(self, samples): - batch = super(ShallowDiffusionOfflineDataset, self).collater(samples) - if self.prefix != 'train' and hparams['fs2_ckpt'] != '': - batch['fs2_mels'] = utils.collate_2d([s['fs2_mel'] for s in samples], 0.0) - return batch - - -class DiffSingerOfflineTask(DiffSingerTask): - def __init__(self): - super(DiffSingerOfflineTask, self).__init__() - self.dataset_cls = ShallowDiffusionOfflineDataset - - def build_tts_model(self): - mel_bins = hparams['audio_num_mel_bins'] - self.model = OfflineGaussianDiffusion( - phone_encoder=self.phone_encoder, - out_dims=mel_bins, denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams), - timesteps=hparams['timesteps'], - K_step=hparams['K_step'], - loss_type=hparams['diff_loss_type'], - spec_min=hparams['spec_min'], spec_max=hparams['spec_max'], - ) - # if hparams['fs2_ckpt'] != '': - # utils.load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True) - # self.model.fs2.decoder = None - - def run_model(self, model, sample, return_output=False, infer=False): - txt_tokens = sample['txt_tokens'] # [B, T_t] - target = sample['mels'] # [B, T_s, 80] - mel2ph = sample['mel2ph'] # [B, T_s] - f0 = sample['f0'] - uv = sample['uv'] - energy = sample['energy'] - fs2_mel = None #sample['fs2_mels'] - spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') - if hparams['pitch_type'] == 'cwt': - cwt_spec = sample[f'cwt_spec'] - f0_mean = sample['f0_mean'] - f0_std = sample['f0_std'] - sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph) - - output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, - ref_mels=[target, fs2_mel], f0=f0, uv=uv, energy=energy, infer=infer) - - losses = {} - if 'diff_loss' in output: - losses['mel'] = output['diff_loss'] - # self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses) - # if hparams['use_pitch_embed']: - # self.add_pitch_loss(output, sample, losses) - if hparams['use_energy_embed']: - self.add_energy_loss(output['energy_pred'], energy, losses) - - if not return_output: - return losses - else: - return losses, output - - def validation_step(self, sample, batch_idx): - outputs = {} - txt_tokens = sample['txt_tokens'] # [B, T_t] - - target = sample['mels'] # [B, T_s, 80] - energy = sample['energy'] - # fs2_mel = sample['fs2_mels'] - spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') - mel2ph = sample['mel2ph'] - f0 = sample['f0'] - uv = sample['uv'] - - outputs['losses'] = {} - - outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False) - - - outputs['total_loss'] = sum(outputs['losses'].values()) - outputs['nsamples'] = sample['nsamples'] - outputs = utils.tensors_to_scalars(outputs) - if batch_idx < hparams['num_valid_plots']: - fs2_mel = sample['fs2_mels'] - model_out = self.model( - txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, energy=energy, - ref_mels=[None, fs2_mel], infer=True) - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel - pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel - else: - gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams) - pred_f0 = model_out.get('f0_denorm') - self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0) - self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}') - self.plot_mel(batch_idx, sample['mels'], fs2_mel, name=f'fs2mel_{batch_idx}') - return outputs - - def test_step(self, sample, batch_idx): - spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') - txt_tokens = sample['txt_tokens'] - energy = sample['energy'] - if hparams['profile_infer']: - pass - else: - mel2ph, uv, f0 = None, None, None - if hparams['use_gt_dur']: - mel2ph = sample['mel2ph'] - if hparams['use_gt_f0']: - f0 = sample['f0'] - uv = sample['uv'] - fs2_mel = sample['fs2_mels'] - outputs = self.model( - txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, ref_mels=[None, fs2_mel], energy=energy, - infer=True) - sample['outputs'] = self.model.out2mel(outputs['mel_out']) - sample['mel2ph_pred'] = outputs['mel2ph'] - - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - sample['f0'] = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel - sample['f0_pred'] = self.pe(sample['outputs'])['f0_denorm_pred'] # pe predict from Pred mel - else: - sample['f0'] = denorm_f0(sample['f0'], sample['uv'], hparams) - sample['f0_pred'] = outputs.get('f0_denorm') - return self.after_infer(sample) - - -class MIDIDataset(FastSpeechDataset): - def __getitem__(self, index): - sample = super(MIDIDataset, self).__getitem__(index) - item = self._get_item(index) - sample['f0_midi'] = torch.FloatTensor(item['f0_midi']) - sample['pitch_midi'] = torch.LongTensor(item['pitch_midi'])[:hparams['max_frames']] - - return sample - - def collater(self, samples): - batch = super(MIDIDataset, self).collater(samples) - batch['f0_midi'] = utils.collate_1d([s['f0_midi'] for s in samples], 0.0) - batch['pitch_midi'] = utils.collate_1d([s['pitch_midi'] for s in samples], 0) - # print((batch['pitch_midi'] == f0_to_coarse(batch['f0_midi'])).all()) - return batch - - -class M4SingerDataset(FastSpeechDataset): - def __getitem__(self, index): - sample = super(M4SingerDataset, self).__getitem__(index) - item = self._get_item(index) - sample['pitch_midi'] = torch.LongTensor(item['pitch_midi']) - sample['midi_dur'] = torch.FloatTensor(item['midi_dur']) - sample['is_slur'] = torch.LongTensor(item['is_slur']) - sample['word_boundary'] = torch.LongTensor(item['word_boundary']) - return sample - - def collater(self, samples): - batch = super(M4SingerDataset, self).collater(samples) - batch['pitch_midi'] = utils.collate_1d([s['pitch_midi'] for s in samples], 0) - batch['midi_dur'] = utils.collate_1d([s['midi_dur'] for s in samples], 0) - batch['is_slur'] = utils.collate_1d([s['is_slur'] for s in samples], 0) - batch['word_boundary'] = utils.collate_1d([s['word_boundary'] for s in samples], 0) - return batch - - -class DiffSingerMIDITask(DiffSingerTask): - def __init__(self): - super(DiffSingerMIDITask, self).__init__() - # self.dataset_cls = MIDIDataset - self.dataset_cls = M4SingerDataset - - def run_model(self, model, sample, return_output=False, infer=False): - txt_tokens = sample['txt_tokens'] # [B, T_t] - target = sample['mels'] # [B, T_s, 80] - # mel2ph = sample['mel2ph'] if hparams['use_gt_dur'] else None # [B, T_s] - mel2ph = sample['mel2ph'] - if hparams.get('switch_midi2f0_step') is not None and self.global_step > hparams['switch_midi2f0_step']: - f0 = None - uv = None - else: - f0 = sample['f0'] - uv = sample['uv'] - energy = sample['energy'] - - spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') - if hparams['pitch_type'] == 'cwt': - cwt_spec = sample[f'cwt_spec'] - f0_mean = sample['f0_mean'] - f0_std = sample['f0_std'] - sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph) - - output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, - ref_mels=target, f0=f0, uv=uv, energy=energy, infer=infer, pitch_midi=sample['pitch_midi'], - midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur')) - - losses = {} - if 'diff_loss' in output: - losses['mel'] = output['diff_loss'] - self.add_dur_loss(output['dur'], mel2ph, txt_tokens, sample['word_boundary'], losses=losses) - if hparams['use_pitch_embed']: - self.add_pitch_loss(output, sample, losses) - if hparams['use_energy_embed']: - self.add_energy_loss(output['energy_pred'], energy, losses) - if not return_output: - return losses - else: - return losses, output - - def validation_step(self, sample, batch_idx): - outputs = {} - txt_tokens = sample['txt_tokens'] # [B, T_t] - - target = sample['mels'] # [B, T_s, 80] - energy = sample['energy'] - # fs2_mel = sample['fs2_mels'] - spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') - mel2ph = sample['mel2ph'] - - outputs['losses'] = {} - - outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True, infer=False) - - outputs['total_loss'] = sum(outputs['losses'].values()) - outputs['nsamples'] = sample['nsamples'] - outputs = utils.tensors_to_scalars(outputs) - if batch_idx % 20 == 0 and batch_idx // 20 < hparams['num_valid_plots']: - model_out = self.model( - txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=None, uv=None, energy=energy, ref_mels=None, infer=True, - pitch_midi=sample['pitch_midi'], midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur')) - - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - gt_f0 = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel - pred_f0 = self.pe(model_out['mel_out'])['f0_denorm_pred'] # pe predict from Pred mel - else: - gt_f0 = denorm_f0(sample['f0'], sample['uv'], hparams) - pred_f0 = model_out.get('f0_denorm') - self.plot_wav(batch_idx, sample['mels'], model_out['mel_out'], is_mel=True, gt_f0=gt_f0, f0=pred_f0) - self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'], name=f'diffmel_{batch_idx}') - self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'], name=f'fs2mel_{batch_idx}') - if hparams['use_pitch_embed']: - self.plot_pitch(batch_idx, sample, model_out) - return outputs - - def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, wdb, losses=None): - """ - :param dur_pred: [B, T], float, log scale - :param mel2ph: [B, T] - :param txt_tokens: [B, T] - :param losses: - :return: - """ - B, T = txt_tokens.shape - nonpadding = (txt_tokens != 0).float() - dur_gt = mel2ph_to_dur(mel2ph, T).float() * nonpadding - is_sil = torch.zeros_like(txt_tokens).bool() - for p in self.sil_ph: - is_sil = is_sil | (txt_tokens == self.phone_encoder.encode(p)[0]) - is_sil = is_sil.float() # [B, T_txt] - - # phone duration loss - if hparams['dur_loss'] == 'mse': - losses['pdur'] = F.mse_loss(dur_pred, (dur_gt + 1).log(), reduction='none') - losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum() - dur_pred = (dur_pred.exp() - 1).clamp(min=0) - else: - raise NotImplementedError - - # use linear scale for sent and word duration - if hparams['lambda_word_dur'] > 0: - idx = F.pad(wdb.cumsum(axis=1), (1, 0))[:, :-1] - # word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_(1, idx, midi_dur) # midi_dur can be implied by add gt-ph_dur - word_dur_p = dur_pred.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_pred) - word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_gt) - wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none') - word_nonpadding = (word_dur_g > 0).float() - wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum() - losses['wdur'] = wdur_loss * hparams['lambda_word_dur'] - if hparams['lambda_sent_dur'] > 0: - sent_dur_p = dur_pred.sum(-1) - sent_dur_g = dur_gt.sum(-1) - sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean') - losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur'] - - -class AuxDecoderMIDITask(FastSpeech2Task): - def __init__(self): - super().__init__() - # self.dataset_cls = MIDIDataset - self.dataset_cls = M4SingerDataset - - def build_tts_model(self): - if hparams.get('use_midi') is not None and hparams['use_midi']: - self.model = FastSpeech2MIDI(self.phone_encoder) - else: - self.model = FastSpeech2(self.phone_encoder) - - def run_model(self, model, sample, return_output=False): - txt_tokens = sample['txt_tokens'] # [B, T_t] - target = sample['mels'] # [B, T_s, 80] - mel2ph = sample['mel2ph'] # [B, T_s] - f0 = sample['f0'] - uv = sample['uv'] - energy = sample['energy'] - - spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') - if hparams['pitch_type'] == 'cwt': - cwt_spec = sample[f'cwt_spec'] - f0_mean = sample['f0_mean'] - f0_std = sample['f0_std'] - sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph) - - output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, - ref_mels=target, f0=f0, uv=uv, energy=energy, infer=False, pitch_midi=sample['pitch_midi'], - midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur')) - - losses = {} - self.add_mel_loss(output['mel_out'], target, losses) - self.add_dur_loss(output['dur'], mel2ph, txt_tokens, sample['word_boundary'], losses=losses) - if hparams['use_pitch_embed']: - self.add_pitch_loss(output, sample, losses) - if hparams['use_energy_embed']: - self.add_energy_loss(output['energy_pred'], energy, losses) - if not return_output: - return losses - else: - return losses, output - - def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, wdb, losses=None): - """ - :param dur_pred: [B, T], float, log scale - :param mel2ph: [B, T] - :param txt_tokens: [B, T] - :param losses: - :return: - """ - B, T = txt_tokens.shape - nonpadding = (txt_tokens != 0).float() - dur_gt = mel2ph_to_dur(mel2ph, T).float() * nonpadding - is_sil = torch.zeros_like(txt_tokens).bool() - for p in self.sil_ph: - is_sil = is_sil | (txt_tokens == self.phone_encoder.encode(p)[0]) - is_sil = is_sil.float() # [B, T_txt] - - # phone duration loss - if hparams['dur_loss'] == 'mse': - losses['pdur'] = F.mse_loss(dur_pred, (dur_gt + 1).log(), reduction='none') - losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum() - dur_pred = (dur_pred.exp() - 1).clamp(min=0) - else: - raise NotImplementedError - - # use linear scale for sent and word duration - if hparams['lambda_word_dur'] > 0: - idx = F.pad(wdb.cumsum(axis=1), (1, 0))[:, :-1] - # word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_(1, idx, midi_dur) # midi_dur can be implied by add gt-ph_dur - word_dur_p = dur_pred.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_pred) - word_dur_g = dur_gt.new_zeros([B, idx.max() + 1]).scatter_add(1, idx, dur_gt) - wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none') - word_nonpadding = (word_dur_g > 0).float() - wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum() - losses['wdur'] = wdur_loss * hparams['lambda_word_dur'] - if hparams['lambda_sent_dur'] > 0: - sent_dur_p = dur_pred.sum(-1) - sent_dur_g = dur_gt.sum(-1) - sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean') - losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur'] - - def validation_step(self, sample, batch_idx): - outputs = {} - outputs['losses'] = {} - outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True) - outputs['total_loss'] = sum(outputs['losses'].values()) - outputs['nsamples'] = sample['nsamples'] - mel_out = self.model.out2mel(model_out['mel_out']) - outputs = utils.tensors_to_scalars(outputs) - # if sample['mels'].shape[0] == 1: - # self.add_laplace_var(mel_out, sample['mels'], outputs) - if batch_idx < hparams['num_valid_plots']: - self.plot_mel(batch_idx, sample['mels'], mel_out) - self.plot_dur(batch_idx, sample, model_out) - if hparams['use_pitch_embed']: - self.plot_pitch(batch_idx, sample, model_out) - return outputs \ No newline at end of file diff --git a/spaces/zzz666/ChuanhuChatGPT/modules/overwrites.py b/spaces/zzz666/ChuanhuChatGPT/modules/overwrites.py deleted file mode 100644 index bfcd4d01b7d7bec1184a8d09113933bca860530b..0000000000000000000000000000000000000000 --- a/spaces/zzz666/ChuanhuChatGPT/modules/overwrites.py +++ /dev/null @@ -1,56 +0,0 @@ -from __future__ import annotations -import logging - -from llama_index import Prompt -from typing import List, Tuple -import mdtex2html - -from modules.presets import * -from modules.llama_func import * - - -def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]: - logging.debug("Compacting text chunks...🚀🚀🚀") - combined_str = [c.strip() for c in text_chunks if c.strip()] - combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)] - combined_str = "\n\n".join(combined_str) - # resplit based on self.max_chunk_overlap - text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1) - return text_splitter.split_text(combined_str) - - -def postprocess( - self, y: List[Tuple[str | None, str | None]] -) -> List[Tuple[str | None, str | None]]: - """ - Parameters: - y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. - Returns: - List of tuples representing the message and response. Each message and response will be a string of HTML. - """ - if y is None or y == []: - return [] - user, bot = y[-1] - if not detect_converted_mark(user): - user = convert_asis(user) - if not detect_converted_mark(bot): - bot = convert_mdtext(bot) - y[-1] = (user, bot) - return y - -with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2: - customJS = f.read() - kelpyCodos = f2.read() - -def reload_javascript(): - print("Reloading javascript...") - js = f'' - def template_response(*args, **kwargs): - res = GradioTemplateResponseOriginal(*args, **kwargs) - res.body = res.body.replace(b'', f'{js}'.encode("utf8")) - res.init_headers() - return res - - gr.routes.templates.TemplateResponse = template_response - -GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse \ No newline at end of file