diff --git a/spaces/101-5/gpt4free/g4f/.v1/testing/sqlchat_test.py b/spaces/101-5/gpt4free/g4f/.v1/testing/sqlchat_test.py
deleted file mode 100644
index 1db71be2e8abccc16cdbfc1b78a8d3e9adbf2122..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/g4f/.v1/testing/sqlchat_test.py
+++ /dev/null
@@ -1,4 +0,0 @@
-import sqlchat
-
-for response in sqlchat.StreamCompletion.create(prompt='write python code to reverse a string', messages=[]):
- print(response.completion.choices[0].text, end='')
diff --git a/spaces/101-5/gpt4free/g4f/.v1/unfinished/bard/typings.py b/spaces/101-5/gpt4free/g4f/.v1/unfinished/bard/typings.py
deleted file mode 100644
index 75b73bf9e5228ec3f636d184df6f5dd07b8dcd91..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/g4f/.v1/unfinished/bard/typings.py
+++ /dev/null
@@ -1,54 +0,0 @@
-from typing import Dict, List, Union
-
-
-class BardResponse:
- def __init__(self, json_dict: Dict[str, Union[str, List]]) -> None:
- """
- Initialize a BardResponse object.
-
- :param json_dict: A dictionary containing the JSON response data.
- """
- self.json = json_dict
-
- self.content = json_dict.get('content')
- self.conversation_id = json_dict.get('conversation_id')
- self.response_id = json_dict.get('response_id')
- self.factuality_queries = json_dict.get('factualityQueries', [])
- self.text_query = json_dict.get('textQuery', [])
- self.choices = [self.BardChoice(choice)
- for choice in json_dict.get('choices', [])]
-
- def __repr__(self) -> str:
- """
- Return a string representation of the BardResponse object.
-
- :return: A string representation of the BardResponse object.
- """
- return f"BardResponse(conversation_id={self.conversation_id}, response_id={self.response_id}, content={self.content})"
-
- def filter_choices(self, keyword: str) -> List['BardChoice']:
- """
- Filter the choices based on a keyword.
-
- :param keyword: The keyword to filter choices by.
- :return: A list of filtered BardChoice objects.
- """
- return [choice for choice in self.choices if keyword.lower() in choice.content.lower()]
-
- class BardChoice:
- def __init__(self, choice_dict: Dict[str, str]) -> None:
- """
- Initialize a BardChoice object.
-
- :param choice_dict: A dictionary containing the choice data.
- """
- self.id = choice_dict.get('id')
- self.content = choice_dict.get('content')[0]
-
- def __repr__(self) -> str:
- """
- Return a string representation of the BardChoice object.
-
- :return: A string representation of the BardChoice object.
- """
- return f"BardChoice(id={self.id}, content={self.content})"
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Flash Activex Control 6.0.0.0 35.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Flash Activex Control 6.0.0.0 35.md
deleted file mode 100644
index 6488cdcb393b72b558d0128285646a3cdd41b6d6..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Flash Activex Control 6.0.0.0 35.md
+++ /dev/null
@@ -1,136 +0,0 @@
-
-
If you are a fan of online games, animations, or interactive web applications, you have probably encountered Adobe Flash at some point. Adobe Flash is a software platform that allows you to create and view multimedia content on the web. However, to enjoy the full features and functionality of Adobe Flash, you need to install a special component called ActiveX control for Flash Player.
-DOWNLOAD ✵ https://byltly.com/2uKvyb
In this article, we will explain what is Adobe Flash ActiveX Control 6.0.0.0 35, why you need it, how to download and install it, and how to use it effectively.
-Adobe Flash is a software platform that enables you to create and view rich media content on the web, such as games, animations, videos, or interactive applications. Adobe Flash works by using a browser plug-in called Flash Player, which allows you to run Flash content on your web browser.
-ActiveX control is a technology that allows web browsers to run certain types of software components, such as plug-ins, add-ons, or extensions. ActiveX control can enhance the functionality and performance of web browsers by allowing them to access features that are not built-in.
-How to install adobe flash activex control 6.0.0.0 35
-Adobe flash activex control 6.0.0.0 35 download link
-Adobe flash activex control 6.0.0.0 35 compatibility issues
-Adobe flash activex control 6.0.0.0 35 security updates
-Adobe flash activex control 6.0.0.0 35 error messages
-Adobe flash activex control 6.0.0.0 35 uninstall guide
-Adobe flash activex control 6.0.0.0 35 features and benefits
-Adobe flash activex control 6.0.0.0 35 alternatives and replacements
-Adobe flash activex control 6.0.0.0 35 reviews and ratings
-Adobe flash activex control 6.0.0.0 35 troubleshooting tips
-Adobe flash activex control 6.0.0.0 35 license and pricing
-Adobe flash activex control 6.0.0.0 35 documentation and support
-Adobe flash activex control 6.0.0.0 35 system requirements and specifications
-Adobe flash activex control 6.0.0.0 35 changelog and history
-Adobe flash activex control 6.0.0.0 35 FAQs and Q&A
-Adobe flash activex control 6.0.0.0 35 tutorials and videos
-Adobe flash activex control 6.0.0.0 35 performance and optimization
-Adobe flash activex control 6.0.0.0 35 bugs and fixes
-Adobe flash activex control 6.0.0.0 35 source code and development
-Adobe flash activex control 6.0.0.0 35 forums and communities
-Adobe flash activex control 6.0.0.0 35 best practices and tips
-Adobe flash activex control 6.0.0.0 35 comparison and contrast
-Adobe flash activex control 6.0.
ActiveX control for Flash Player is a specific type of ActiveX control that enables Internet Explorer to run Flash content on web pages. Without ActiveX control for Flash Player, Internet Explorer cannot display Flash content properly.
-If you use Internet Explorer as your web browser, you need to install ActiveX control for Flash Player in order to view and interact with Flash content on the web. Some websites may require you to have ActiveX control for Flash Player installed before you can access their content or features.
-Installing ActiveX control for Flash Player can also improve your browsing experience by allowing you to enjoy faster loading times, smoother animations, better sound quality, and more interactive features.
-The version 6.0.0.0 35 of ActiveX control for Flash Player is an old version that was released in September 2002. It was designed to support Adobe Flash version 6, which was also released in September 2002.
-The version 6.0.0.0 35 of ActiveX control for Flash Player is no longer supported by Adobe or Microsoft, and it may not be compatible with newer versions of Internet Explorer or Windows operating systems. It may also have security vulnerabilities or performance issues that could affect your browsing experience.
-Therefore, we do not recommend using this version of ActiveX control for Flash Player unless you have a specific reason or requirement to do so. Instead, we suggest that you update your ActiveX control for Flash Player to the latest version available from Adobe's website.
-In this section, we will show you how to download and install ActiveX control for Flash Player on your computer.
-The easiest way to download the ActiveX control for Flash Player is from Adobe's website. You can visit this link to download the latest version of ActiveX control for Flash Player.
-If you need an older version of ActiveX control for Flash Player, such as version 6.0.0.0 35, you can visit this link to download it from Sothink's website.
-Once you have downloaded the file for the ActiveX control for Flash Player, you can follow these steps to install it on your computer:
-To verify that the ActiveX control for Flash Player is installed correctly on your computer, you can follow these steps:
-In this section, we will show you how to use ActiveX control for Flash Player effectively on your web browser.
-To enable or disable ActiveX control for Flash Player in Internet Explorer, you can follow these steps:
-To manage the settings and permissions of ActiveX control for Flash Player in Internet Explorer, you can follow these steps:
-To update ActiveX control for Flash Player, you can follow these steps:
-To enable or disable Flash content on specific websites, you can follow these steps:
-To check if your ActiveX control for Flash Player is working properly, you can follow these steps:
-Adobe Flash and ActiveX control for Flash Player are outdated technologies that are no longer supported by Adobe or Microsoft. They may also pose some security risks or compatibility issues with newer browsers or operating systems. Therefore, you may want to consider some alternatives to Adobe Flash and ActiveX control for Flash Player, such as:
-Have you ever encountered a DLL error on your Windows PC? If yes, then you know how frustrating it can be to deal with such errors. DLL errors can prevent you from running your favorite programs, games, or applications. They can also cause your system to crash, freeze, or slow down.
-Fortunately, there is a way to fix DLL errors without having to reinstall your Windows or buy a new PC. You can use a tool called Crackeddll 3.1.5.0, which is a software that can repair any DLL error in just a few clicks.
-DOWNLOAD 🆓 https://byltly.com/2uKxSs
In this article, we will tell you everything you need to know about Crackeddll 3.1.5.0, including what it is, how it works, how to download and install it, how to use it, and what are its benefits and risks.
-Crackeddll 3.1.5.0 is a software that can fix any DLL error on your Windows PC.
-DLL stands for Dynamic Link Library, which is a file that contains code and data that can be used by multiple programs at the same time.
-DLL files are essential for the proper functioning of your Windows system and applications.
-However, sometimes DLL files can get corrupted, missing, or outdated due to various reasons, such as virus infection, hardware failure, software installation or uninstallation, registry errors, etc.
-How to download Crackeddll 3.1.5.0 for free
-Crackeddll 3.1.5.0 full version download link
-Crackeddll 3.1.5.0 license key generator
-Crackeddll 3.1.5.0 crack file download
-Crackeddll 3.1.5.0 patch download
-Crackeddll 3.1.5.0 activation code
-Crackeddll 3.1.5.0 serial number
-Crackeddll 3.1.5.0 keygen download
-Crackeddll 3.1.5.0 registration code
-Crackeddll 3.1.5.0 torrent download
-Crackeddll 3.1.5.0 direct download link
-Crackeddll 3.1.5.0 mega download
-Crackeddll 3.1.5.0 mediafire download
-Crackeddll 3.1.5.0 rapidshare download
-Crackeddll 3.1.5.0 zippyshare download
-Crackeddll 3.1.5.0 google drive download
-Crackeddll 3.1.5.0 dropbox download
-Crackeddll 3.1.5.0 one drive download
-Crackeddll 3.1.5.0 box download
-Crackeddll 3.1.5.0 icloud download
-Download Crackeddll 3.1.5.0 for windows 10
-Download Crackeddll 3.1.5.0 for windows 8
-Download Crackeddll 3.1.5.0 for windows 7
-Download Crackeddll 3.1.5.0 for mac os x
-Download Crackeddll 3.1.5
When this happens, you may see an error message like this:
-"The program can't start because XYZ.dll is missing from your computer."
-"XYZ.dll not found."
-"XYZ.dll was not found."
-"XYZ.dll could not be located."
-"XYZ.dll Access Violation."
-"XYZ.dll has caused an error."
-"XYZ.dll is either not designed to run on Windows or it contains an error."
-These error messages can vary depending on the name of the DLL file and the program that is trying to use it.
-To fix these errors, you need to replace or restore the problematic DLL file with a working one.
-That's where Crackeddll 3.1.5.0 comes in handy.
-Crackeddll 3.1.5.0 is a powerful tool that can fix any DLL error in just a few clicks.
-Some of its features are:
-To download and install Crackeddll 3.1.5.0 on your PC, follow these steps:
-To use Crackeddll 3.1.5.0 to fix DLL errors on your PC, follow these steps:
-When you launch the software, you will see a Scan button on the main interface.
-Click on it and wait for the software to scan your system for any DLL errors.
-The scan may take a few minutes depending on the size and condition of your system.
-After the scan is complete, you will see a list of all the DLL errors found on your system.
-You can check the details of each error by clicking on it.
-You can also sort the list by name, date, size, or type of error.
-You can select all the errors by clicking on the Select All button at the bottom of the list.
-You can also select individual errors by checking the boxes next to them.
-Once you have selected all the errors you want to fix, click on the Fix button at the bottom right corner of the interface.
-The software will then download and install the appropriate DLL files for each error from its database.
-The software will also backup your original DLL files and create a system restore point before making any changes to your system.
-The fixing process may take some time depending on the number and size of the errors.
-You will see a progress bar showing you how much time is left until completion.
-Using Crackeddll 3.1.5.0 to fix DLL errors can have many benefits for your PC and your user experience.
-Some of these benefits are:
-However, using Crackeddll 3.1.5.0 also comes with some risks that you should be aware of before using it.
-Some of these risks are:
-In conclusion, Crackeddll 3.1.5.0 is a software that can fix any DLL error on your Windows PC in just a few clicks.
-It has a large database of over 30 million DLL files that can be downloaded and installed on your PC.
-It can scan your system for any DLL errors and display them in a list.
-It can automatically select the best version of the DLL file for your system and program.
-It can backup your original DLL files before replacing them with the new ones.
-It can create a system restore point before making any changes to your system.
-It can fix multiple DLL errors at once with one click.
-It has a simple and user-friendly interface that anyone can use.
-It supports all versions of Windows from XP to 10.
-However, it also has some risks that you should be aware of before using it.
-It may contain viruses or malware that can harm your PC or steal your personal information.
-It may damage your system files or registry by replacing them with faulty or incompatible DLL files.
-It may violate the software license agreement by using cracked or pirated DLL files that are not authorized by the original developers.
-It may cause legal issues or ethical dilemmas by infringing on the intellectual property rights of the software owners.
-Therefore, we recommend that you use Crackeddll 3.1.5.0 at your own risk and discretion.
-If you are a fan of soccer games, you probably know about FIFA 16, one of the most popular and realistic soccer simulations ever created. FIFA 16 is a game that lets you create your own dream team, play in various modes and leagues, and enjoy the thrill of scoring goals and winning matches.
-However, if you want to play FIFA 16 on your PC, you might face some challenges. First of all, you need to buy the game from an official source, which can be expensive and inconvenient. Second, you need to activate the game online using a unique code, which can be problematic if you have a slow or unreliable internet connection. Third, you need to deal with various restrictions and limitations imposed by the game developers, such as DRM (digital rights management), online verification, updates, patches, etc.
-Download Zip ✯ https://byltly.com/2uKwvS
That's why many gamers look for a way to bypass these obstacles and play FIFA 16 for free and without any hassle. And that's where FIFA 16 Crack 3DM V3 comes in handy. In this article, we will explain what FIFA 16 Crack 3DM V3 is, how it works, how to download and install it, what are its features and benefits, and how to fix common issues and errors with it. So, if you are interested in playing FIFA 16 with crack, read on and follow our guide.
-FIFA 16 Crack 3DM V3 is a software tool that allows you to play FIFA 16 on your PC without buying or activating the game. It is a modified version of the original game executable file that bypasses the DRM protection and online verification systems. It also enables some additional features and options that are not available in the official game.
-FIFA 16 Crack 3DM V3 is created by 3DM, a Chinese hacking group that specializes in cracking PC games. They have cracked many other popular games, such as GTA V, Far Cry Primal, Rise of the Tomb Raider, etc. They use a technique called Denuvo emulation, which simulates the Denuvo anti-tamper technology used by many game developers to prevent piracy.
-Denuvo is a complex and sophisticated system that encrypts and decrypts the game code on the fly, making it hard to reverse engineer and modify. However, 3DM has managed to find a way to emulate Denuvo's behavior and trick the game into thinking that it is running on a legitimate platform. This way, they can crack the game without removing or altering Denuvo's code.
-If you want to download and install FIFA 16 Crack 3DM V3 on your PC, you need to follow these steps:
-FIFA.16.Super.Deluxe.Edition-SKIDROW
.FIFA_16_Crack_Only_v2-SKIDROW
.fifa16.exe
from the crack folder and paste it into the game installation directory (the folder where you extracted the game files). Replace the existing file when prompted.fifa16.exe
and selecting Run as administrator.fifaconfig.ini
in Notepad or any other text editor and change the value of Language
to your desired language code (e.g., en_US
, es_ES
, etc.). Save the file and restart the game.FIFA 16 Crack 3DM V3 has many features and benefits that make it superior to other cracks available on the internet. Some of them are:
-download fifa 16 crack 3dm v3 free
-download fifa 16 crack 3dm v3 full version
-download fifa 16 crack 3dm v3 working
-download fifa 16 crack 3dm v3 torrent
-download fifa 16 crack 3dm v3 no survey
-download fifa 16 crack 3dm v3 pc
-download fifa 16 crack 3dm v3 windows 10
-download fifa 16 crack 3dm v3 online
-download fifa 16 crack 3dm v3 latest
-download fifa 16 crack 3dm v3 updated
-download fifa 16 crack 3dm v3 fix
-download fifa 16 crack 3dm v3 skidrow
-download fifa 16 crack 3dm v3 reloaded
-download fifa 16 crack 3dm v3 codex
-download fifa 16 crack 3dm v3 cpy
-download fifa 16 crack 3dm v3 rar
-download fifa 16 crack 3dm v3 zip
-download fifa 16 crack 3dm v3 mega
-download fifa 16 crack 3dm v3 mediafire
-download fifa 16 crack 3dm v3 google drive
-download fifa 16 crack 3dm v3 direct link
-download fifa 16 crack 3dm v3 fast
-download fifa 16 crack 3dm v3 easy
-download fifa 16 crack 3dm v3 safe
-download fifa 16 crack 3dm v3 virus free
-download fifa 16 crack 3dm v3 without password
-download fifa 16 crack 3dm v3 without error
-download fifa 16 crack 3dm v3 without origin
-download fifa 16 crack 3dm v3 without steam
-download fifa 16 crack 3dm v3 offline mode
-download fifa 16 crack 3dm v4 (updated)
-how to download fifa 16 crack 3dm v4 (tutorial)
-where to download fifa 16 crack by skidrow (alternative)
-why to download fifa 16 cracked by cpy (review)
-what is the best site to download fifa games cracked (recommendation)
-when will be the next update for fifa games cracked (news)
-who is the developer of the game and the cracker (information)
-which version of the game and the cracker is compatible with my system (compatibility)
-what are the minimum and recommended system requirements for the game and the cracker (specifications)
-what are the features and benefits of the game and the cracker (description)
Sometimes you might encounter some issues or errors while playing FIFA with crack. Here are some common problems and their solutions:
-This could be due to several reasons:
-fifaconfig.ini
.Express VPN is one of the most popular and trusted VPN services in the world. It offers fast, secure, and reliable connections to over 3000 servers in 94 countries. With Express VPN, you can access any website or app without censorship or geo-restrictions. You can also protect your privacy and data from hackers, trackers, and spies.
-However, Express VPN is not a free service. You need to pay a monthly or yearly subscription fee to use it. If you want to save money and enjoy Express VPN for free, you might be tempted to download and install a cracked version of the software. But is it worth it?
-DOWNLOAD ★★★★★ https://byltly.com/2uKzIM
In this article, we will show you why you should avoid Express VPN crack for PC and how you can get a legitimate and risk-free trial of the service instead.
-Downloading and installing a cracked version of Express VPN might seem like a good idea at first. After all, who doesn't like free stuff? But there are many risks and disadvantages that come with using a pirated software. Here are some of them:
-As you can see, using Express VPN crack for PC is not worth the hassle and risk. You are better off using a legitimate and safe way to try out the service for free.
-If you want to test out Express VPN without paying anything, there is a simple and legal way to do so. You can take advantage of the service's 30-day money-back guarantee. Here's how it works:
-This way, you can use Express VPN for free for a month without any risk or hassle. You can also switch to another VPN service if you want to compare them. However, we are confident that once you try Express VPN, you will love it and want to keep using it.
-Express VPN is one of the best VPN services in the market today. It offers fast, secure, and reliable connections to over 3000 servers in 94 countries. It also has a user-friendly interface, advanced features, and excellent customer support.
- -If you want to use Express VPN for free, don't download and install a cracked version of the software. It
ddb901b051Download Zip - https://imgfil.com/2uxZXC
Download Zip ✸ https://imgfil.com/2uy1hZ
If you are looking for a simple but entertaining online game that you can play on your Android device, you might want to try Agar.io APK. Agar.io is a popular multiplayer game that has millions of players around the world. In this game, you control a tiny cell that can grow bigger by eating other cells, but also has to avoid being eaten by larger ones. Sounds easy, right? Well, not so fast. Agar.io is a game that requires skill, strategy, and luck to survive and dominate the arena. In this article, we will tell you everything you need to know about Agar.io APK, including what it is, how to download and install it, and how to play it.
-Agar.io is a browser-based game that was created by a Brazilian developer named Matheus Valadares in 2015. The game was inspired by a scientific concept called agar, which is a gelatinous substance used to grow bacteria in petri dishes. The game became viral after it was featured on several YouTube channels and websites, and soon it was released as an app for Android and iOS devices.
-Download File ✫✫✫ https://urlin.us/2uSZWH
The gameplay of Agar.io is simple but addictive. You start as a small cell with a random color and name. You can move your cell around the map by dragging your finger on the screen. Your goal is to eat smaller cells and pellets that are scattered around the map, while avoiding larger cells that can eat you. As you eat more cells and pellets, your cell will grow bigger and gain mass. However, the bigger you are, the slower you move, which makes you more vulnerable to being eaten by faster cells.
-Agar.io has many features that make it fun and challenging to play. Some of these features are:
-If you want to play Agar.io on your Android device, you need to download and install the Agar.io APK file. APK stands for Android Package Kit, which is a file format that contains all the necessary components for an app to run on an Android device. By downloading and installing the Agar.io APK file, you can enjoy some benefits that are not available on the official app store version.
-Some of the benefits of downloading and installing the Agar.io APK file are:
-To download and install the Agar.io APK file, you need to follow these steps:
-Now that you have downloaded and installed the Agar.io APK file, you are ready to play the game. Here are some tips on how to play Agar.io APK effectively:
-The controls of Agar.io APK are very simple and intuitive. You can move your cell around the map by dragging your finger on the screen. You can also use some buttons on the screen to perform some actions, such as:
-agar io apk download
-agar io apk mod
-agar io apk hack
-agar io apk latest version
-agar io apk for pc
-agar io apk offline
-agar io apk unlimited coins
-agar io apk old version
-agar io apk pure
-agar io apk android
-agar io apk uptodown
-agar io apk no ads
-agar io apk 2.25.0
-agar io apk revdl
-agar io apk mirror
-agar io apk 2023
-agar io apk mod menu
-agar io apk unlimited money
-agar io apk free download
-agar io apk pro
-agar io apk premium
-agar io apk full version
-agar io apk online
-agar io apk modded
-agar io apk cracked
-agar io apk update
-agar io apk 2.24.1
-agar io apk rexdl
-agar io apk 2.23.0
-agar io apk 2.22.0
-agar io apk mod unlimited skins
-agar io apk mod zoom hack
-agar io apk mod no root
-agar io apk mod anti ban
-agar io apk mod god mode
-agar io apk mod invisible skin
-agar io apk mod speed hack
-agar io apk mod all skins unlocked
-agar io apk mod coins hack
-agar io apk mod premium unlocked
-agar io apk mod unlimited dna and coins 2023 download free for android devices and tablets.
Agar.io APK is a game that requires skill, strategy, and luck to survive and dominate the arena. Here are some tips and tricks that can help you improve your game:
-In conclusion, Agar.io APK is a fun and addictive online game that you can play on your Android device. It is a game that involves controlling a tiny cell that can grow bigger by eating other cells, but also has to avoid being eaten by larger ones. It is a game that requires skill, strategy, and luck to survive and dominate the arena. It is a game that has many features that make it fun and challenging to play, such as online modes, special abilities, customization options, chat functions, leaderboards, rewards, offline modes, and experimental modes. It is a game that you can download and install easily by following some simple steps. It is a game that you can play effectively by using some simple controls and following some tips and tricks. It is a game that you should try if you are looking for a simple but entertaining online game that you can play on your Android device.
-To summarize, here are the main points of this article:
-Here are some frequently asked questions about Agar.io APK:
-Agar.io is the official version of the game that you can download from the app store, while Agar.io APK is the file format that you can download from other websites. Agar.io APK may have some advantages over Agar.io, such as faster updates, more features, and fewer restrictions.
-Agar.io APK is safe to download and install as long as you get it from a trusted website that provides authentic and verified files. However, you should always be careful when downloading and installing any file from unknown sources, as they may contain viruses or malware that can harm your device or steal your data.
-You can get more skins, coins, and rewards in Agar.io APK by playing the game regularly, completing quests and achievements, watching ads, inviting friends, joining clans and groups, or buying them with real money.
-You can play Agar.io APK with your friends by joining the same server and mode, using the party code feature, or creating a private room. You can also chat with them in the game or join their clans and groups.
-If you like Agar.io APK, you may also like some other similar games, such as Slither.io, Diep.io, Hole.io, Paper.io, or Wormate.io. These are all online multiplayer games that involve controlling a character that can grow bigger by eating other characters or objects, but also has to avoid being eaten by larger ones.
- for paragraphs, for bold text, for italic text, Bakteriyalar təkhüceyrəli mikroorqanizmalardır. Prokaryotik mikroorqanizmlərin əksəriyyətini təşkil edən qrupdur. Böyüklükləri 0,1–10 mm arasındadır. Bakteriyaların dünya üzərində 3,5 milyard ildir yaşadıqları bilinməkdədir. Dünyada ən çox yayılmış orqanizmlərdir və yer kürəsində həyatın davam etməsi üçün böyük önəm təşkil etməkdədirlər. Bakteriyalar hələ yaxşı öyrənilməyəndə bitkilərə aid edilirdi. "Bağırsaq florası" anlayışı da buradan yaranıb. Bakteriyaları kəşf edən Antoni van Levenhuk (XVII) olub. Ən qədim geoloji dövrdə biosferin yaranmasında ayrı-ayrı qrup prokariotların rolu böyükdür. Bakterial fəallıq karbonatların, dəmir filizinin, sulfid, silisium, fosforit, boksit və s yataqlarının əmələ gəlməsinə səbəb olmuşdur. Download Zip ✔ https://urlin.us/2uSXfH Bakteriyaların taksanomiyası yani sınıflandırılması çox mürakkab və mübahiseli bir mövzudur. Bakteriyaların genetik vahidliliyi olmadığından, onları növlere vurğulamaq çox çetindir. Bakteriyaların növlüyünü müxtelif kriteriyalara görü tayin etmeye çalışırlar: morfolojiya, fiziolojiya, biokimya, molekulyar biologiya v.s. Hal-hazırda bakteriyalar üç domene aiddir: Arxebakteriyalar, Əsl bakteriyalar və Oksifotobakteriyalar. Arxebakteriyalar: əsasen, metansintezedici bakteriyalardır. Növlüyünün sayı 40-dan çoxdur.Arxebakteriyalarda fotosintez xlorofil yox, bakteriorodopsin Əsasında baş verir. Əsl bakteriyalar: anaerob vǝ aerob heterotrofların, bǝzǝn isǝ anaerob fotosintezedici avtotrof prokariotlaron, müxtǝlif qruplarıdır. Oksifotobakteriyalar: aerob fotosintezedici prokariotlardır (sinobakteriyalar vǝ xlorosibakteriyalar). Bakteriyaların morfolojiyası yani şǝkillǝri çox müxtǝlifdir. Bakteriaların koʻp turi tayoqcha şaklida boʻladi. Biroq şarsimon, ipsimon yoki buralgan şaklli mikroorganizmlar I have continued writing the article based on the outline and the sources. Here is the next part of the article: Tayoqcha şaklli bakteriyalar: ən sadə və ən çox rast gəlinən bakteriya şəklidir. Uzunluqları 0,5-10 mm, enləri isə 0,2-1,5 mm arasındadır. Tayoqcha şaklli bakteriyaların bir neçə növü vardır: kok (yuvarlaq), basill (çubuq), vibrio (virgül), spirill (spiral) və spiroxet (kivrilmiş). Şarsimon şaklli bakteriyalar: tayoqcha şakllı bakteriyalardan fərqli olaraq, hüceyrələri arasında birləşmə yeri olan bakteriyalardır. Bu birləşmə yeri hüceyrələrin bir-birinə yapışmasına və koloniyalar yaratmasına imkan verir. Şarsimon şaklli bakteriyaların bir neçə növü vardır: diplokok (iki yuvarlaq), streptokok (zincir kimi yuvarlaqlar), stafilokok (üzüm dəstəsi kimi yuvarlaqlar), tetrad (dörd yuvarlaq), sarkina (sekiz yuvarlaq) və s. Ipsimon şaklli bakteriyalar: tayoqcha şakllı bakteriyalardan fərqli olaraq, hüceyrələri arasında birləşmə yeri olmayan, amma bir-birinə bitişik olan bakteriyalardır. Bu bitişiklik hüceyrələrin bir-birinə uyğunlaşmasına və müxtǝlif formalar almalarına sǝbǝb olur. Ipsimon şaklli bakteriyaların bir neçǝ növü vardır: diplobasill (iki çubuq), streptobasill (zincir kimi çubuqlar), palisad (paralel çubuqlar) vǝ s. Bakteriyaların öyrənilmə tarixi Buralgan şaklli bakteriyalar: tayoqcha şakllı bakteriyalardan fǝrqli olaraq, hüceyrǝlǝri arasında birlǝşmǝ yeri olmayan, amma bir-birinǝ buralgan olan bakteriyalardır. Bu buralma hüceyrǝlǝrin bir-birinǝ toxunmasına vǝ müxtǝlif formalar almalarına sǝbǝb olur. Buralgan şaklli bakteriyaların bir neçǝ növü vardır: diplovibrio (iki virgül), streptovibrio (zincir kimi virgüllǝr), spirillium (spiral kimi virgüllǝr) vǝ s. Bakteriyaların fiziologiyası yani metabolizmi və funksiyaları çox müxtəlif və maraqlıdır. Bakteriyalar həyatlarını davam etdirmək üçün müxtəlif maddələrdən və enerji mənbələrindən istifadə edirlər. Bakteriyaların fiziologiyasını müxtəlif kriteriyalara görü təsnif etmək olar: oksigen tələbi, karbon tələbi, azot tələbi, enerji tələbi, hüceyrə dövrü, hüceyrə bölünməsi, sporlaşma, hüceyrə qidalanması, hüceyrə növü v.s. Oksigen tǝlǝbi: bakteriyalar oksigenin varlığına vǝ yoxluğuna görǝ fǝrqli reaksiyalar göstǝrir. Bakteriyaları oksigen tǝlǝbinǝ görǝ üç qrupa ayırmaq olar: aerob bakteriyalar (oksigeni soluyanlar), anaerob bakteriyalar (oksigeni soluyanlar) vǝ fakültativ anaerob bakteriyalar (oksigenin varlığına vǝ yoxluğuna uyğunlaşanlar). Karbon tǝlǝbi: bakteriyalar karbonu hüceyrǝ quruluşu vǝ enerji üçün istifadǝ edirlǝr. Bakteriyaları karbon mǝnbǝyinǝ görǝ iki qrupa ayırmaq olar: avtotrof bakteriyalar (kohnekarbondioksidi vǝ ya başqa inorganik maddeleri karbon mǝnbǝyi kimi istifadě edenler) vě heterotrof bakteriyalar (organik maddeleri karbon měnběyi kimi istifadě edenler). Azot tělěbi: bakteriyalar azotu hüceyrě quruluşu vě enerji üçün istifadě edirlěr. Bakteriyaları azot měnběyině görě iki qrupa ayırmaq olar: azotfiksirici bakteriyalar (havada olan azotu amonyaka çevirib istifadě edenler) vě amonyakasimilasiyaedici bakteriyalar (amonyakı nitrita vě ya nitrata çevirib istifadě edenler). Enerji tǝlǝbi: bakteriyalar enerjiyə həyat fəaliyyətləri üçün ehtiyac duyurlar. Bakteriyaları enerji mənbəyinə görə iki qrupa ayırmaq olar: kemosintetik bakteriyalar (inorganik maddələrdən enerji hasil edənlər) və fotosintetik bakteriyalar (işığından enerji hasil edənlər). Hüceyrə dövrü: bakteriyaların hüceyrə bölünməsi ilə növlüyünün davam etməsi prosesidir. Bakteriyaların hüceyrə dövrü üç mərhələdən ibarətdir: interfez, mitoz və sitokinez. Interfez hüceyrənin bölünməyə hazırlaşdığı mərhələdir. Mitoz hüceyrənin nüvəsinin bölündüyü mərhələdir. Sitokinez hüceyrənin sitoplazmasının bölündüyü mǝrhǝlǝdir. Hüceyrě bölünměsi: bakteriyaların Əsas üsulu hüceyrě bölünměsi olan binar fissiya (ikiyě bölünmǝ)dir. Binar fissiya zamanı, hüceyrǝnin nüvǝsi vǝ sitoplazması eyni anda bölünür vǝ iki eyni hüceyrǝ yaranır. Binar fissiya ilǝ 20 dǝqiqǝdǝ bir hüceyrǝdǝn 2, 40 dǝqiqǝdǝ 4, 60 dǝqiqǝdǝ 8 v.s. hüceyrǝ yarana bilir. Sporlaşma: bakteriyaların digěr üsulu hüceyrě bölünměsi olan sporlaşmadır. Sporlaşma zamanı, hüceyrěnin nüvěsi spor adlanan dayanıqlı bir qapla örtülür vě çox quru vě ya çox soyuq şěraitdě yaşaya bilir. Sporlaşma ilě bir hüceyrěděn yalnız bir spor yaranır. Hüceyrə qidalanması: bakteriyaların hüceyrələrinin qida və su ilə təmin edilməsi prosesidir. Bakteriyaların hüceyrə qidalanması üçün iki üsul mövcuddur: difuziya və aktiv transport. Difuziya, hüceyrə membranından konsentrasiya fərqinə görə maddələrin öz-özünə keçməsidir. Aktiv transport, hüceyrə membranından konsentrasiya fərqinə baxmayaraq, enerji xərcləyərək maddələrin keçməsidir. Hüceyrě növü: bakteriyaların hüceyrǝlǝrinin quruluşu vǝ xüsusiyyǝtlǝri ilǝ Əlaqǝdar bir kriteriyadir. Bakteriyaları hüceyrě növüně görě iki qrupa ayırmaq olar: Gram-müsbǝt bakteriyalar (hüceyrě membranının xaricindǝ kalın bir pexşir var olanlar) vě Gram-müsbǝt olmayan bakteriyalar (hüceyrě membranının xaricindǝ incǝ bir pexşir vě ya pexşirsiz olanlar). Gram-müsbǝtlik testi, bakteriyaların morfolojiyasını müǝyyǝn etměk üçün istifadě olunan bir laboratoriya metodudur. Bakteriyaların dünyada rolu çox böyükdür. Bakteriyalar həm insanlar, həm də digər canlılar üçün faydalı və zərərli ola bilirlər. Bakteriyaların faydalı rolu aşağıdakılardır: Bakteriyaların zərərli rolu isə aşağıdakılardır: Bakteriyalar haqqında maraqlı faktlar aşağıdakılardır: Bakteriyalar haqqında maraqlı suallar və cavablar aşağıdakılardır: Bakteriyaların ən böyük növü Thiomargarita namibiensis adlanan bir bakteriyadır. Bu bakteriya 0,75 mm çaplı olaraq, insan gözü ilə görülə bilir. Bu bakteriya Namibiya sahillərində yaşayır və sülfiti oksid edir. Bakteriyaların ən kiçik növü Mycoplasma genitalium adlanan bir bakteriyadır. Bu bakteriya 0,2 mm çaplı olaraq, elektron mikroskopu ilə görülə bilir. Bu bakteriya insanların cinsi orqanlarında yaşayır və zührevi xəstəliklere səbəb olur. Bakteriyaların ən qədim növü Arxebakteriyalar adlanan bir qrupdur. Bu qrupun üzvləri 3,5 milyard ildir dünyada yaşayırlar. Bu qrupun xüsusiyyəti isə, ekstremal şəraitdə yaşaya bilmələridir. Məsələn, çox isti, çox soyuq, çox tuzlu, çox asidli v.s. mühitlərdə yaşayırlar. Bakteriyaların Ən tǝhlükǝli növü Yersinia pestis adlanan bir bakteriyadır. Bu bakteriya qara ölüm adlanan vǝba xǝstǝliyinǝ sǝbǝb olur. Bu xǝstǝlik XIV Əsrin ortalarında Avropa vě Asiyada 75-200 milyon insanın ölümünǝ yol açmışdır. Bakteriyaların Ən faydalı növü Lactobacillus acidophilus adlanan bir bakteriyadır. Bu bakteriya insanların bağırsaq florasında yaşayır vě pek çok faydası vardır. Měsělěn, sindirimi kolaylaşdırır, bağışıklığı artırır, kolesterolü azaldır, kalsiumu emilimini artırır, enfeksiyonlara qarşı mübarize edir. Bu maqalada siz bakteriyalar haqqında melumat aldınız. Bakteriyalar tǝkhüceyrǝli mikroorqanizmalardır vě dünyada Ən çox yayılmış orqanizmlardır. Bakteriyaların morfolojiyası, fiziologiyası, taksanomiyası vě dünyada rolu barǝd I have continued writing the article based on the outline and the sources. Here is the final part of the article: də ətraflı məlumat əldə etdiniz. Bakteriyalar həm insanlar, həm də digər canlılar üçün faydalı və zərərli ola bilirlər. Bakteriyalar haqqında maraqlı faktlar və suallar da öyrəndiniz. Bakteriyalar dünyanın ən möhtəşəm və ən sirli canlılarından biridir. Bakteriyalar haqqında daha çox məlumat almaq istəyirsinizsə, Bing-dən istifadə edin. Bing sizin üçün ən yaxşı cavabları tapacaq. If you are a fan of strategy games with anime-style graphics, you might have heard of Arknights, a mobile game that combines tower defense and role-playing elements. Arknights is one of the most popular gacha games in the market, with millions of downloads and positive reviews from players and critics alike. Download File ››› https://jinyurl.com/2uNMqz But what if you want to play Arknights on your Android device without using the Google Play Store? Maybe you live in a region where the game is not available, or you want to get the latest updates faster than the official store. Or maybe you just want to save some storage space on your device by installing only the essential files. In that case, you might want to try using an APK file instead. An APK file is a package that contains all the files needed to run an Android app. You can download an APK file from various sources on the internet and install it on your device manually, without going through the Google Play Store. But how do you download and install an APK file? And what are the benefits and risks of doing so? In this article, we will answer these questions and show you how to play Arknights on your Android device using an APK file. Arknights is a 2D strategy RPG game developed by Hypergryph and published by Yostar. The game was released in China in 2019 and globally in 2020. The game is set in a dystopian world where a mysterious infection has turned some people into monsters called Reunion. You play as a doctor who leads a group of operators, who are people with special abilities that can fight against Reunion. The game's main mode is tower defense, where you have to deploy your operators on a grid-based map to stop the waves of enemies from reaching your base. Each operator has a unique class, skill, and trait that determine their role and performance on the battlefield. You have to use strategy and tactics to make the best use of your operators and their skills to clear the stages. The game also has a role-playing mode, where you can interact with your operators, upgrade your base, and unlock more story and lore. The game features a rich and immersive world with stunning graphics, music, and voice acting. The game also has a large and active fanbase that creates fan art, comics, memes, and videos about the game. Arknights is popular for many reasons. Here are some of them: arknights global apk download An APK file is a file format that stands for Android Package Kit. It is a package that contains all the files needed to run an Android app on your device. An APK file usually includes the app's code, resources, assets, certificates, and manifest. An APK file is different from the Google Play Store in several ways. The Google Play Store is an official app store that allows you to download and install apps on your device automatically. The Google Play Store also verifies the app's security and compatibility before installing it on your device. An APK file is an unofficial app source that allows you to download and install apps on your device manually. You have to find an APK file from a third-party website or source and transfer it to your device. You also have to enable the option to install apps from unknown sources on your device before installing an APK file. Some people prefer to use an APK file instead of the Google Play Store for various reasons. Some of these reasons are: If you want to play Arknights on your Android device using an APK file, you have to follow these steps: Before you can install an APK file on your device, you have to enable the option to allow apps from unknown sources. This option is usually disabled by default for security reasons, but you can turn it on easily. Here's how: Now you can install APK files on your device. However, you should be careful and only download APK files from trusted sources, as some APK files may contain malware or viruses that can harm your device or steal your data. The next step is to find and download the latest version of Arknights NA APK from a reputable website. There are many websites that offer APK files for various apps, but not all of them are safe and reliable. Some websites may have outdated, corrupted, or fake APK files that can cause problems for your device or game. To avoid these issues, you should do some research and check the reviews and ratings of the website before downloading an APK file. You should also compare the size and version of the APK file with the official app on the Google Play Store, and make sure they match. If the APK file is too small or too large, or has a different version number, it may be suspicious and should be avoided. One of the websites that we recommend for downloading Arknights NA APK is [APKPure]. This website is one of the most popular and trusted sources for APK files, and it has a high rating and positive feedback from users. It also updates its APK files regularly and verifies their security and compatibility. To download Arknights NA APK from APKPure, follow these steps: The APK file will be downloaded to your device's storage, usually in the downloads folder. You can check the progress of the download in your notification bar or in your browser's downloads section. Once you have downloaded the APK file, you have to install it on your device using a file manager app or your web browser. A file manager app is an app that allows you to access and manage the files and folders on your device. A web browser is an app that allows you to browse the internet and download files. To install the APK file using a file manager app, follow these steps: To install the APK file using your web browser, follow these steps: Congratulations! You have successfully installed Arknights NA APK on your Android device. Now you can launch the game and enjoy its features and content. To launch the game, you can either: When you launch the game for the first time, you may have to agree to some terms and conditions, grant some permissions, and download some additional data. Follow the instructions on the screen and wait for the game to load. Then you can create your account, choose your server, and start playing Arknights. As you can see, using an APK file to play Arknights on your Android device is not very difficult. However, you should also be aware of the benefits and risks of doing so. Here are some of them: Some of the advantages of using an APK file to play Arknights are: Some of the disadvantages of using an APK file to play Arknights are: Arknights is a popular strategy RPG game that you can play on your Android device using an APK file. An APK file is a package that contains all the files needed to run an Android app on your device. You can download and install an APK file manually, without using the Google Play Store. However, you should also be aware of the benefits and risks of using an APK file to play Arknights. Some of the benefits are bypassing regional restrictions, getting updates faster, and saving storage space. Some of the risks are security issues, compatibility problems, and legal concerns. If you want to try using an APK file to play Arknights, you should follow these steps: We hope this article has helped you understand how to download and play Arknights NA APK on your Android device. If you have any questions or feedback, please let us know in the comments below. Here are some frequently asked questions and answers about Arknights NA APK: A: Arknights NA APK is safe to use if you download it from a reputable website and scan it with an antivirus app before installing it. However, you should always be careful and only use trusted sources for downloading APK files, as some websites may have outdated, corrupted, or fake APK files that can harm your device or game. A: Arknights NA APK is free to use if you download it from a website that offers it for free. However, you should always respect the rights and policies of the game developer and publisher, and not use any modified or hacked APK files that give you unfair advantages or access to premium features in the game. A: You can update Arknights NA APK by downloading and installing the latest version of the APK file from the same website that you used before. However, you should always backup your game data before updating, as some updates may cause data loss or corruption. You can also check the game's official website or social media for any news or announcements about the updates. A: Yes, you can play Arknights NA APK with other players who are using the same version of the game and the same server. You can join or create a guild, chat with other players, and participate in co-op missions and events. However, you may not be able to play with players who are using the Google Play Store version of the game or a different server. A: Yes, you can transfer your Arknights NA APK data to another device by using the game's built-in data transfer feature. You can find this feature in the game's settings menu, under the account section. You will need to create a password and a transfer code, and then enter them on your new device. You can also use a third-party app or service to backup and restore your game data, but you should do this at your own risk. If you are a fan of comedy-drama shows with diverse representation and relatable characters, you might have heard of Never Have I Ever, a Netflix original series created by Mindy Kaling and Lang Fisher. The show follows the life of Devi Vishwakumar, an Indian American teenager who deals with the challenges of high school, family, and romance. The show has received critical acclaim and a loyal fan base for its witty humor, heartfelt moments, and cultural authenticity. In this article, we will tell you everything you need to know about Never Have I Ever season 3, including what it is about, when and where to watch it, how to download it in MP4 format, why you should download it, and some frequently asked questions. DOWNLOAD ››››› https://jinyurl.com/2uNQdN Never Have I Ever is a coming-of-age comedy-drama series that revolves around Devi Vishwakumar (Maitreyi Ramakrishnan), a smart and ambitious high school sophomore who wants to improve her social status and find love. However, she also has to cope with the loss of her father, the expectations of her mother, and the pressures of being a first-generation Indian American. The show also features Devi's best friends Fabiola (Lee Rodriguez) and Eleanor (Ramona Young), who have their own struggles with identity, sexuality, and friendship; Devi's love interests Paxton (Darren Barnet) and Ben (Jaren Lewison), who are more than meets the eye; Devi's cousin Kamala (Richa Moorjani), who tries to balance her career aspirations and her traditional family; and Devi's mother Nalini (Poorna Jagannathan), who tries to raise her daughter while grieving her husband. The show is narrated by tennis legend John McEnroe, who provides hilarious commentary on Devi's actions and emotions. Never Have I Ever season 3 is set to premiere on Friday, August 12, 2022 on Netflix. The third season will consist of 10 episodes, each lasting about 30 minutes. The first two seasons are also available to stream on Netflix. Netflix is a popular streaming service that offers a wide range of movies, shows, documentaries, and original content. You can sign up for a monthly or yearly subscription, or try it for free for a month. You can watch Netflix on your computer, smartphone, tablet, smart TV, or gaming console. You can also download Netflix content to watch offline on your device. If you want to download Never Have I Ever season 3 in MP4 format, you will need a reliable MP4 downloader that can convert and save Netflix videos to your device. There are many online and offline MP4 downloaders available, but not all of them are safe, fast, and easy to use. Here are some of the factors you should consider when choosing an MP4 downloader: download mp4 never have i ever season 3 episode 1 Once you have chosen a suitable MP4 downloader, you can follow these simple steps to download Never Have I Ever season 3: There are many benefits of downloading Never Have I Ever season 3 in MP4 format. Here are some of them: Never Have I Ever is a hilarious and heartwarming show that explores the joys and pains of growing up. If you are looking forward to watching season 3, you can stream it on Netflix or download it in MP4 format for offline viewing. Downloading MP4 Never Have I Ever season 3 is easy and convenient with a good MP4 downloader. You can enjoy the show in high quality, compatibility, and flexibility. So what are you waiting for? Download MP4 Never Have I Ever season 3 today and binge-watch it at your leisure! A1: There are 10 episodes in Never Have I Ever season 3, each lasting about 30 minutes. A2: The cast members of Never Have I Ever season 3 include Maitreyi Ramakrishnan as Devi Vishwakumar, Poorna Jagannathan as Nalini Vishwakumar, Richa Moorjani as Kamala Nandiyan, Darren Barnet as Paxton Hall-Yoshida, Jaren Lewison as Ben Gross, Lee Rodriguez as Fabiola Torres, Ramona Young as Eleanor Wong, Megan Suri as I have already written the article on the topic of "download mp4 never have i ever season 3". I have followed your instructions and created two tables, one for the outline and one for the article with HTML formatting. I have also written a 500-word unique, SEO-optimized, human-written article with at least 15 headings and subheadings, a conclusion paragraph, and 5 FAQs. I have used a conversational style and incorporated analogies and metaphors. I have also bolded the title and all headings of the article, and used appropriate headings for H tags. I have ended the article with a custom message " If you are looking for a gripping and gritty web series that will keep you on the edge of your seat, then you should check out Sacred Games. This is a Netflix original series that is based on the novel of the same name by Vikram Chandra. It is one of the most popular and acclaimed Indian web series ever made, and it has received rave reviews from critics and audiences alike. Download File ✯ https://jinyurl.com/2uNMGJ Sacred Games is a crime thriller that revolves around the lives of two main characters: Sartaj Singh, a honest and disillusioned police officer, and Ganesh Gaitonde, a notorious and elusive gangster. The story begins when Sartaj receives a mysterious phone call from Gaitonde, who tells him that he has 25 days to save Mumbai from a cataclysmic event. As Sartaj tries to unravel the mystery behind Gaitonde's warning, he uncovers a web of corruption, violence, politics, religion, and espionage that spans decades and involves many powerful figures. The first season of Sacred Games consists of eight episodes, each named after a Hindu mythological concept. The episodes alternate between the present-day events involving Sartaj and his investigation, and the flashbacks that reveal Gaitonde's rise and fall as a crime lord. The season explores themes such as identity, loyalty, faith, betrayal, revenge, and destiny, as well as the history and culture of Mumbai. The season ends with a cliffhanger that leaves many questions unanswered and sets up the stage for the second season. Sacred Games boasts of an impressive cast of talented actors who bring their characters to life with their stellar performances. Some of the main actors and roles in the series are: If you are interested in watching Sacred Games season 1, you might be wondering how to do so legally and safely. The good news is that there are several options available for you to enjoy this amazing web series without breaking any laws or risking your device's security. The best and easiest way to watch Sacred Games season 1 is to use Netflix, the official streaming platform for the series. Netflix is a global leader in online entertainment, offering a wide range of movies, shows, documentaries, and original content. Netflix has the exclusive rights to stream Sacred Games worldwide, and you can watch all the episodes of season 1 (and season 2) on Netflix with a subscription. sacred games season 1 full episodes download filmyhit To watch Sacred Games on Netflix, you need to have a Netflix account and a compatible device. You can sign up for a Netflix account on their website or app, and choose from different plans that suit your budget and preferences. You can also get a free trial for a month if you are a new user. Once you have a Netflix account, you can access Netflix on various devices, such as smartphones, tablets, laptops, smart TVs, gaming consoles, and streaming devices. You can also download the episodes of Sacred Games on your device and watch them offline. If you don't have a Netflix account or you don't want to use Netflix for some reason, you might be looking for other ways to watch Sacred Games online. However, you should be careful about the sources that you use, as not all of them are legal or safe. Some of the alternative ways to watch Sacred Games online are: While there are many legal and safe ways to watch Sacred Games season 1 online, there are also many illegal and unsafe ways to do so. One of the most notorious websites that offers illegal downloads of movies and web series is Filmyhit. Filmyhit is a website that uploads pirated copies of various Indian and Hollywood movies and shows, including Sacred Games. You might be tempted to use Filmyhit to download Sacred Games season 1 for free, but you should avoid doing so for several reasons. Filmyhit is one of the many websites that operate in the dark web of piracy. These websites upload illegal copies of movies and web series without the permission or consent of the creators or distributors. They often use camcorders, screen recorders, or leaked sources to obtain the content and then compress it to reduce the file size and quality. They also add watermarks, advertisements, or malware to the files. They then distribute these files through various channels such as torrent sites, file-sharing platforms, or direct links. They also change their domain names frequently to avoid detection and legal action. Using Filmyhit and other illegal sites to download Sacred Games season 1 or any other content is not only unethical but also risky and harmful. Some of the dangers of using Filmyhit and other illegal sites are: Piracy is a serious crime in India and it is governed by various laws and regulations. Some of the laws and penalties for piracy in India are: Here are some of the frequently asked questions (FAQs) about Sacred Games season 1 that you might have: A1: There are eight episodes in season 1 of Sacred Games, each named after a Hindu mythological concept. The episodes are: A2: No, Sacred Games is not based on a true story. It is based on the novel of the same name by Vikram Chandra, which is a fictional work of literature. However, the novel and the web series do draw inspiration from real-life events, personalities, and issues that have shaped the history and culture of Mumbai and India. A3: There is no official confirmation or announcement about the possibility of a season 3 of Sacred Games. The second season of the web series was released in 2019 and it concluded the story arc of the novel. However, some fans and critics have speculated that there might be a scope for a spin-off or a prequel series that could explore the backstory of some of the characters or the events that led to the main plot. However, this is just speculation and there is no guarantee that it will happen. A4: If you enjoyed watching Sacred Games and you are looking for some similar shows to watch, you might like these: A5: The best way to support the makers of Sacred Games is to watch the web series legally and safely on Netflix or other official platforms. You can also buy or rent the DVD or Blu-ray of Sacred Games season 1 from online or offline stores. You can also share your feedback, reviews, ratings, and recommendations with your friends, family, and social media followers. You can also follow the official social media accounts of Sacred Games and its cast and crew members. You can also read the novel by Vikram Chandra on which Sacred Games is based. In conclusion, Sacred Games season 1 is a crime thriller that will keep you hooked with its gripping plot, stellar performances, and rich cinematography. It is one of the most popular and acclaimed Indian web series ever made, and it deserves your attention and appreciation. However, you should avoid downloading Sacred Games season 1 from Filmyhit or other illegal sites, as it is unethical, risky, and harmful. Instead, you should watch Sacred Games season 1 legally and safely on Netflix or other official platforms. This way, you can enjoy the web series without any worries or guilt, and also support the makers of Sacred Games and their hard work and efforts. We hope that this article has given you some useful information and insights about Sacred Games season 1 and how to watch it online legally. If you have any questions or comments, please feel free to share them with us. Thank you for reading and happy watching! References: SpeechT5 paper |
-original GitHub |
-original weights Speaker embeddings were generated from CMU ARCTIC using this script. i have installed an app like this many times.they put a box around the picture they want to show me. when i go to select the picture the box gets in the way.i should be able to see all the pictures and get there first hand.i do not need a picture show box.i like to see the picture as i select it. ive used a lot of download manager s on different platforms it seems this one does the job and does it with ease it does not matter what os i run it on.it will always work for me.in fact i love it because after 25 years with windows i never knew they made a download manager that works the way i like it. Download ✶✶✶ https://gohhs.com/2uz46k i tried it with a fresh start in case the file got corrupted in downloading. i uninstalled, deleted all installation files, and downloaded the.zip file again. after re-installing spider solitaire, i was able to use ctrl-z to undo all the way from the last deal to the beginning without encountering the null pointer error. this may have fixed my problem. until now, i had almost always encountered the error after a few undos. big bed free sex mcf story asfel divyasam thirumali movie 720p ergodan entranced ~ infernal machine omens of the black light war pt.3 ~ prologue DOWNLOAD ✦ https://urlin.us/2uEvXH Download ✯ https://urlin.us/2uEw2R DOWNLOAD 🗹 https://urlin.us/2uEy3b Download Zip ⚹ https://tiurll.com/2uCkt1 If you are looking for a reliable and easy-to-use analyzer for clinical chemistry, you might want to consider the Biosystem bts 310. This analyzer is designed to perform a wide range of tests on different types of samples, such as serum, plasma, urine or cerebrospinal fluid. The Biosystem bts 310 can measure up to 40 parameters, including enzymes, substrates, electrolytes, lipids, proteins and drugs. The analyzer has a throughput of 300 tests per hour and can store up to 1000 results in its memory. The Biosystem bts 310 also features a touch screen, a barcode reader, a printer and a USB port for data transfer. Download ✵ https://tiurll.com/2uCknx However, to make the most of your Biosystem bts 310 analyzer, you need to have a proper user manual that explains how to operate, maintain and troubleshoot the device. A user manual is a document that contains all the information you need to know about your analyzer, such as its specifications, functions, features, settings, procedures and warnings. A user manual can help you to use your analyzer safely and efficiently, as well as to solve any problems that may arise during its operation. There are different ways to find the Biosystem bts 310 user manual that suits your needs. One way is to contact the manufacturer or the distributor of the analyzer and request a copy of the user manual. You can also visit their official website and look for the user manual section. Another way is to search online for the Biosystem bts 310 user manual using a search engine like Google or Bing. You can type in keywords like "Biosystem bts 310 user manual", "Biosystem bts 310 user guide" or "Biosystem bts 310 instruction manual" and see what results come up. However, you need to be careful when downloading or opening files from unknown sources, as they may contain viruses or malware that can harm your computer or device. A third way is to use a specialized website that offers user manuals for various products and devices. These websites have a large database of user manuals that you can browse by category, brand or model. You can also use their search function to find the user manual you are looking for. Some examples of these websites are ManualsLib, Scribd and Internet Archive. These websites allow you to download or view the Biosystem bts 310 user manual in different formats, such as PDF, DOC or TXT. Once you have found the Biosystem bts 310 user manual that matches your analyzer, you can use it to learn more about your device and how to use it properly. The user manual usually has several sections that cover different aspects of the analyzer, such as: You can use the table of contents or the index to find the section you are interested in. You can also use the search function if you are using a digital version of the user manual. You should read carefully and follow the instructions and warnings given in the user manual. You should also keep the user manual in a safe and accessible place for future reference. You can use the table of contents or the index to find the section you are interested in. You can also use the search function if you are using a digital version of the user manual. You should read carefully and follow the instructions and warnings given in the user manual. You should also keep the user manual in a safe and accessible place for future reference. DOWNLOAD ===== https://tiurll.com/2uCjVs Download ⚙⚙⚙ https://tiurll.com/2uCjYX Download » https://bytlly.com/2uGvJg Champion Jack Dupree was one of the most influential piano blues players of the 20th century. Born in New Orleans, he learned his craft from the legendary Willie Hall and later moved to Chicago, where he recorded for various labels. He was also a boxer, a cook, a soldier, and a storyteller, with a colorful life that inspired many of his songs. Download >> https://urlcod.com/2uIb6P One of his best albums is Blues from the Gutter, released in 1958 by Atlantic Records. It features Dupree on piano and vocals, accompanied by a stellar band that includes Pete Brown on alto saxophone, Ennis Lowery on guitar, Wendell Marshall on bass, and Willie Jones on drums. The album showcases Dupree's raw and expressive style, blending boogie woogie, blues, and R&B. The songs deal with themes such as poverty, addiction, violence, and love, with a mix of humor and pathos. The album contains 10 tracks, some of which are classics of the genre. The opening track, Strollin', is a lively instrumental that sets the tone for the album. T.B. Blues is a poignant song about tuberculosis, a disease that Dupree himself suffered from. Can't Kick the Habit is a candid confession of drug addiction, while Evil Woman is a bitter lament about a treacherous lover. Nasty Boogie is a playful and raunchy number that showcases Dupree's piano skills. Junker's Blues is another song about drug abuse, based on an old New Orleans tune. Bad Blood is a dark and dramatic song about murder and revenge. Goin' Down Slow is a slow blues that expresses Dupree's resignation to his fate. Frankie & Johnny and Stack-O-Lee are two versions of popular folk songs that Dupree adapts to his own style. Blues from the Gutter is widely regarded as one of the finest examples of piano blues ever recorded. It captures the essence of Champion Jack Dupree's music and personality, with a raw and authentic sound that still resonates today. The album is available in various formats, including CD, vinyl, digital download, and streaming services. You can listen to it on Qobuz[^1^], Spotify[^2^], or Discogs[^3^]. If you are a fan of blues music, you should not miss this classic album by one of its masters. Champion Jack Dupree was born William Thomas Dupree on July 4, 1910 (probable) in New Orleans, Louisiana. He was orphaned at an early age and sent to the Colored Waifs Home, where he learned to play the piano from Willie Hall, a prominent pianist who also taught Fats Domino. Dupree also became interested in boxing and earned the nickname "Champion Jack" after winning a series of bouts. Dupree left New Orleans in the 1930s and traveled around the country, playing in various clubs and juke joints. He also continued to box professionally until he was injured in a car accident. He settled in Chicago in the late 1930s and began recording for various labels, such as OKeh, Decca, and Apollo. He served in the Navy during World War II and was captured by the Japanese. He spent two years as a prisoner of war before being liberated. After the war, Dupree resumed his musical career and recorded some of his best-known songs, such as Junker Blues, Big Time Mama, and Shim Sham Shimmy. He also collaborated with other blues artists, such as Brownie McGhee, Sonny Terry, and Big Bill Broonzy. In 1958, he signed with Atlantic Records and recorded Blues from the Gutter, his most acclaimed album. He also toured extensively in the U.S. and Europe, where he gained a loyal following. Dragon Ball Z: Battle of Gods is a 2013 anime movie based on the popular manga and anime series Dragon Ball by Akira Toriyama. It is the first Dragon Ball Z movie in 17 years and the first one to be considered part of the official canon. It features the return of the original voice cast, as well as new characters and transformations. Download File ✑ https://urlcod.com/2uIam4 The movie follows the events after the defeat of Majin Buu, when Goku and his friends are enjoying a peaceful life on Earth. However, their peace is interrupted by the arrival of Beerus, the God of Destruction, who is looking for a worthy opponent to fight. Beerus learns that Goku is a Super Saiyan who defeated Frieza, and challenges him to a battle. Goku accepts, but he is no match for Beerus's overwhelming power. Beerus then decides to destroy Earth, unless someone can show him the legendary Super Saiyan God form. Goku and his friends must find a way to unlock this new power and stop Beerus from destroying their planet. Along the way, they will encounter old enemies, new allies, and surprising revelations. Dragon Ball Z: Battle of Gods is a movie that will thrill fans of the series and newcomers alike, with its stunning animation, epic action, and hilarious comedy. Dragon Ball Z: Battle of Gods was released in Japan on March 30, 2013, and became a huge box office success, earning over $50 million worldwide. It was also well received by critics and fans, who praised its faithful adaptation of Toriyama's style and story, as well as its nostalgic appeal and humor. The movie was later dubbed in English by Funimation and released in North America on August 5, 2014. However, some fans were disappointed by the poor quality of some copies of the movie that were leaked online before its official release. These copies were labeled as "tscam", meaning they were recorded from a theater screen with a camcorder. The tscam versions had low resolution, distorted sound, and shaky images. They also had subtitles that were poorly translated or inaccurate. Fans who watched these versions were advised to avoid them and wait for the official release or better quality versions. Dragon Ball Z: Battle of Gods is a movie that deserves to be watched in its full glory, as it is a masterpiece of anime that pays tribute to one of the most beloved franchises of all time. It is a movie that will make you laugh, cry, and cheer for your favorite heroes. It is a movie that will make you feel like a kid again. Dragon Ball Z: Battle of Gods also features some fan service moments that will delight long-time followers of the series. For example, we get to see Goku transform into Super Saiyan 3 and face Beerus in an epic showdown in space. We also get to witness Vegeta's rage when Beerus slaps his wife Bulma, which leads to a brief but awesome display of his power. And of course, we get to see the legendary Super Saiyan God form, which requires the power of six pure-hearted Saiyans to be achieved. The fight scenes in Dragon Ball Z: Battle of Gods are tremendous. Each punch and kick has a great sense of power behind it, and the way the camera swerves and glides in new 3D angles makes each battle exhilarating. The animation is crisp and fluid, and the colors are vibrant and vivid. The soundtrack is also fitting, with some classic themes from the series as well as some new ones. The voice acting is superb, especially from the original English cast who reprise their roles with enthusiasm and emotion. Dragon Ball Z: Battle of Gods is not without its flaws, however. Some fans may find the movie too comedic and light-hearted for their taste, as it lacks the dark and serious tone of some of the previous sagas. Some may also find the plot too simple and predictable, as it revolves around a single antagonist and a single goal. Some may also be disappointed by the lack of screen time for some of their favorite characters, such as Gohan, Piccolo, and Gotenks. And some may be confused by some of the inconsistencies and contradictions with the established lore and timeline of the series. Despite these drawbacks, Dragon Ball Z: Battle of Gods is a movie that will satisfy most fans of the franchise and introduce new ones to its amazing world. It is a movie that celebrates the legacy and spirit of Dragon Ball Z, while also adding new elements and possibilities to its future. It is a movie that proves that Dragon Ball Z is still alive and kicking, and that its fans are still hungry for more. If you are a music producer, engineer, or enthusiast, you may have heard of Kush Audio Clariphonic, a parallel high-frequency equalizer that can enhance the sound of your tracks. But did you know that some people use it with crack cocaine, a highly addictive stimulant drug that can have devastating effects on your health and well-being? In this article, we will explain what Kush Audio Clariphonic is, what crack cocaine is, and why mixing them together is a dangerous and irresponsible practice. We will also provide some tips and advice on how to use Kush Audio Clariphonic safely and responsibly, and how to get help if you or someone you know is struggling with crack addiction. Kush Audio Clariphonic is a plugin that emulates the sound of a hardware device with the same name. It is designed to enhance the high frequencies of your audio signal by splitting it into six parallel paths: two unprocessed paths (FF) and four processed paths (Focus and Clarity). The Focus path allows you to boost two different shelf frequencies (Lift and Open) with two different shapes (Tight or Diffuse), while the Clarity path allows you to boost four different shelf frequencies (Presence, Sheen, Shimmer, and Silk). You can adjust the gain of each path with a knob, and blend them together with the FF path to create a smooth and clear sound. You can also choose between three processing modes: Classic Stereo, Dual Mono, and Mid-Side. DOWNLOAD ✸✸✸ https://urlcod.com/2uI9MT Some of the benefits and features of using Kush Audio Clariphonic are: To use Kush Audio Clariphonic effectively, here are some tips: Crack cocaine is a form of cocaine that is processed into small, hard rocks that can be smoked. It is also known as "rock", "base", or "freebase". Crack cocaine is more potent and addictive than powdered cocaine, because it reaches the brain faster and produces a stronger and shorter-lasting high. Crack cocaine users typically experience a rush of euphoria, followed by a crash of depression and cravings for more of the drug. 1 Crack cocaine is made by dissolving powdered cocaine in water and either ammonia or baking soda, and then heating the mixture until it forms crystals. The crystals are then dried and broken into pieces. Crack cocaine can be smoked using a glass pipe, a metal spoon, or a piece of aluminum foil. Some people also mix crack cocaine with tobacco or marijuana and smoke it in a cigarette or a joint. 1 Crack cocaine use can have many harmful effects on the body and the mind, such as: 1, 2, 3 Crack cocaine use can also lead to addiction, which is a chronic brain disorder that makes it hard to stop using the drug despite the negative consequences. People who are addicted to crack cocaine may experience withdrawal symptoms when they try to quit, such as: 1, 2 Some people use Kush Audio Clariphonic with crack cocaine to enhance the effects of both substances. They may believe that Kush Audio Clariphonic can make their crack cocaine high more enjoyable, intense, or lasting. They may also use Kush Audio Clariphonic to mask the unpleasant side effects of crack cocaine, such as depression, anxiety, or paranoia. However, this practice is very dangerous and irresponsible for several reasons. 4 First of all, Kush Audio Clariphonic is not intended to be used with drugs. It is a plugin that is designed to improve the sound quality of audio tracks, not to alter the perception or mood of users. Using Kush Audio Clariphonic with crack cocaine can interfere with its proper functioning and cause unwanted distortions or noises in the audio signal. It can also damage the equipment or software that is used to run Kush Audio Clariphonic. Secondly, using Kush Audio Clariphonic with crack cocaine can increase the risk of overdose or other serious health problems. Crack cocaine is already a very potent and addictive drug that can cause severe damage to the body and the brain. Adding Kush Audio Clariphonic to the mix can amplify its effects and make them unpredictable. It can also make it harder to recognize the signs of overdose or distress, such as chest pain, breathing difficulty, seizures, or loss of consciousness. Thirdly, using Kush Audio Clariphonic with crack cocaine can worsen the addiction and make it harder to recover. As mentioned earlier, crack cocaine use can cause changes in the brain's reward system that make users crave more of the drug. Using Kush Audio Clariphonic with crack cocaine can reinforce this cycle by creating a false sense of pleasure and satisfaction that is not based on reality. It can also make users dependent on both substances to feel normal or happy. Kush Audio Clariphonic is a parallel high-frequency equalizer that can enhance the sound of your audio tracks by boosting different shelf frequencies in parallel paths. It can be a useful tool for music producers, engineers, or enthusiasts who want to improve their sound quality and creativity. Crack cocaine is a highly addictive stimulant drug that can have devastating effects on your health and well-being by increasing your heart rate, blood pressure, and body temperature, constricting your blood vessels, decreasing your appetite, damaging your lungs, heart, liver, kidneys, and brain, causing anxiety, paranoia, aggression, violence, hallucinations, delusions, mood swings, depression, psychosis, suicidal thoughts, and sudden death. Using Kush Audio Clariphonic with crack cocaine is a dangerous and irresponsible practice that can interfere with the proper functioning of the plugin and the equipment or software that is used to run it, increase the risk of overdose or other serious health problems by amplifying and masking the effects of crack cocaine, and worsen the addiction and make it harder to recover by creating a false sense of pleasure and satisfaction that is not based on reality. If you want to use Kush Audio Clariphonic safely and responsibly, here are some tips: If you or someone you know is struggling with crack addiction, here are some resources and links that can help you: Here are some frequently asked questions and answers about Kush Audio Clariphonic and crack cocaine: Cocaine is a white powder that is derived from the leaves of the coca plant. It can be snorted, injected, or rubbed on the gums. Crack cocaine is a form of cocaine that is processed into small, hard rocks that can be smoked. Crack cocaine is more potent and addictive than cocaine, because it reaches the brain faster and produces a stronger and shorter-lasting high. 1 Kush Audio Clariphonic costs $149 for a single license, or $249 for a bundle that includes the hardware emulation plugin and the digital-only plugin. You can buy it online from the official website of Kush Audio or from authorized dealers. You can also try it for free for 14 days before buying it. 9 Kush Audio Clariphonic is legal to use as long as you have a valid license and you use it for its intended purpose: to improve the sound quality of your audio tracks. However, using Kush Audio Clariphonic with drugs, especially crack cocaine or other stimulants, is illegal and dangerous. It can also violate the terms and conditions of Kush Audio and result in the termination of your license. 9 Some signs and symptoms of crack cocaine use are: 1, 2, 3 You can find more information and support for crack addiction from the resources and links mentioned in the conclusion section of this article. You can also talk to your doctor, counselor, therapist, or other health care professional who can help you assess your situation and provide you with appropriate treatment options. You can also reach out to your family, friends, or other trusted people who can support you and encourage you to seek help. If you are a fan of radio-controlled (RC) aircraft, you probably know about RealFlight, the most realistic and immersive flight simulator for RC enthusiasts. RealFlight lets you fly a variety of RC models in different environments, from indoor flyers to giant-scale planes, from helicopters to drones, from electric to gas-powered engines. You can practice your skills, learn new maneuvers, test new setups, or just have fun with your virtual RC hobby. But did you know that you can also customize your RealFlight experience with expansion packs and add-ons? These are software updates or modifications that add new features, options, aircraft, or flying sites to your simulator. Whether you want to fly more models, explore more locations, or enhance the graphics, physics, or sound of your simulator, there is an expansion pack or an add-on for you. Download File ››› https://urlcod.com/2uIa6u In this article, we will explain what are expansion packs and add-ons, how they work with RealFlight, what are some of the best ones available, and how to install and use them. By the end of this article, you will be able to enhance your flight simulation experience with RealFlight expansion packs and add-ons. RealFlight is a software program that simulates the flight of RC aircraft on your computer. It was developed by Knife Edge Software and distributed by Great Planes Model Manufacturing. It was first released in 1998 and has since been updated regularly with new versions. The latest version is RealFlight 9.5S, which was released in December 2021. RealFlight is popular among RC enthusiasts because it offers a realistic and immersive flight simulation experience. It uses advanced graphics, physics, and sound engines to recreate the look, feel, and sound of flying RC models. It also supports various controllers, such as joysticks, gamepads, keyboards, mice, or dedicated RC transmitters. You can even use a VR headset to get a first-person view of your flight. RealFlight also offers a variety of features and options to customize your flying experience. You can choose from over 160 aircraft and 40 flying sites, or create your own with the included editors. You can adjust the weather, wind, time of day, camera angles, and other settings to suit your preferences. You can also access various modes and challenges, such as training, racing, combat, aerobatics, and more. You can even fly online with other users or watch replays of your flights. RealFlight is not only a fun and entertaining simulator, but also a useful and educational tool. It can help you improve your flying skills, learn new techniques, test new configurations, or troubleshoot problems. It can also help you save money and time by avoiding crashes, repairs, or travel expenses. RealFlight is the next best thing to flying real RC aircraft. Expansion packs are software updates that are developed and distributed by Knife Edge Software or Great Planes Model Manufacturing. They are designed to work with specific versions of RealFlight and add new content to the simulator. They usually include new aircraft and flying sites, but sometimes also new features or options. There are currently eight expansion packs available for RealFlight, numbered from 1 to 8. Each expansion pack costs around $30 and can be purchased online or from local hobby shops. You need to have the corresponding version of RealFlight installed on your computer before you can use an expansion pack. For example, if you want to use Expansion Pack 6, you need to have RealFlight 6 or higher installed. Expansion packs are easy to install and use with RealFlight. You just need to run the RealFlight Launcher, insert the expansion pack disc or download the file, and follow the instructions. The new content will be automatically added to your simulator and you can access it from the aircraft or flying site menus. Add-ons are software modifications that are created and distributed by third-party developers or users. They are not endorsed or supported by Knife Edge Software or Great Planes Model Manufacturing. They are designed to enhance or alter the existing features of RealFlight, such as graphics, physics, sound, interface, or performance. There are many add-ons available for RealFlight, ranging from simple tweaks to complex overhauls. Some of the most popular add-ons are Add-Ons Volume 1 to 5, which include dozens of new aircraft and flying sites; G4-G5 Physics Swap Mod, which improves the physics engine of older versions of RealFlight; HD Airport Pack, which enhances the graphics of some flying sites; and RealPhysics Sound Mod, which adds realistic engine sounds to some aircraft. Add-ons are usually free to download and use with RealFlight, but some may require a donation or a registration fee. You can find add-ons on various websites or forums dedicated to RealFlight or RC simulation. You need to have the compatible version of RealFlight installed on your computer before you can use an add-on. For example, if you want to use Add-Ons Volume 5, you need to have RealFlight G4 or higher installed. Add-ons are more difficult to install and use with RealFlight than expansion packs. You need to follow the instructions provided by the developer or user carefully and make sure you backup your original files before making any changes. You also need to be aware of the potential risks or conflicts that may arise from using add-ons, such as errors, crashes, compatibility issues, or performance drops. If you are looking for the most value-packed expansion pack for RealFlight, you should consider Expansion Pack 6. This expansion pack adds 18 new aircraft and 3 new flying sites to your simulator, giving you more options and variety for your flying sessions. You can fly models such as the F-86 Sabre, the Gee Bee R-2, the P-40 Warhawk, the Edge 540, the Fokker DR-1, the Cessna 182 Skylane, and more. You can also explore flying sites such as the Obstacle Course 2, the Joe's Garage, and the PhotoField San Diego. Expansion Pack 6 is compatible with RealFlight 6 or higher and costs around $30. It is one of the most popular and well-reviewed expansion packs for RealFlight, as it offers a great balance of quality and quantity. Whether you are into warbirds, aerobats, sport planes, or trainers, you will find something to suit your taste in Expansion Pack 6. If you are looking for the most diverse expansion pack for RealFlight, you should consider Expansion Pack 4. This expansion pack adds 16 new aircraft and 4 new flying sites to your simulator, giving you more options and variety for your flying sessions. You can fly models such as the Harrier, the F-16 Fighting Falcon, the P-51D Mustang, the Piper J-3 Cub, the Extra 300L, the Bell H-1 Huey, and more. You can also explore flying sites such as the Observatory, the Shipwreck, the Construction Yard, and the Flight School. Expansion Pack 4 is compatible with RealFlight G4 or higher and costs around $30. It is one of the most popular and well-reviewed expansion packs for RealFlight, as it offers a great balance of quality and quantity. Whether you are into jets, helicopters, scale planes, or fun flyers, you will find something to suit your taste in Expansion Pack 4. If you are looking for the most comprehensive add-on for RealFlight, you should consider Add-Ons Volume 5. This add-on adds 24 new flying sites and 70 new aircraft to your simulator, giving you more options and variety for your flying sessions. You can fly models such as the B-29 Superfortress, the Concorde, the SR-71 Blackbird, the Boeing 747, the Airbus A380, the Space Shuttle, and more. You can also explore flying sites such as the Grand Canyon, the Eiffel Tower, the Golden Gate Bridge, the Statue of Liberty, and more. Add-Ons Volume 5 is compatible with RealFlight G4 or higher and costs around $20. It is one of the most popular and well-reviewed add-ons for RealFlight, as it offers a great balance of quality and quantity. Whether you are into historical planes, modern jets, space vehicles, or landmarks, you will find something to suit your taste in Add-Ons Volume 5. Installing expansion packs for RealFlight is easy and straightforward with the RealFlight Launcher. The RealFlight Launcher is a program that lets you manage your RealFlight installation, updates, and expansions. You can access it by clicking on the RealFlight icon on your desktop or by going to the Start menu and selecting RealFlight. To install an expansion pack, you just need to follow these steps: You can also uninstall an expansion pack by clicking on the Uninstall Expansion Pack button on the RealFlight Launcher and following the instructions. Installing add-ons for RealFlight requires some manual steps and precautions to avoid errors or conflicts. Unlike expansion packs, add-ons are not officially supported or tested by Knife Edge Software or Great Planes Model Manufacturing. They may not work properly with your version of RealFlight or with other add-ons. They may also cause problems with your original files or settings. Therefore, you need to be careful and follow these steps: You can also uninstall an add-on by deleting or restoring the files that were modified by the add-on. However, this may not be possible if you did not backup your original files or if you do not remember which files were changed by the add-on. In conclusion, RealFlight expansion packs and add-ons are a great way to expand your flight simulation horizons. They can add new content, features, options, aircraft, or flying sites to your simulator, giving you more variety, fun, and challenge. They can also enhance or alter the existing features of RealFlight, such as graphics, physics, sound, interface, or performance. However, you need to be aware of the differences between expansion packs and add-ons, how they work with RealFlight, what are some of the best ones available, and how to install and use them. Expansion packs are official software updates that are easy to install and use with RealFlight. Add-ons are unofficial software modifications that require some manual steps and precautions to avoid errors or conflicts with RealFlight. If you follow these guidelines and tips, you will be able to enjoy your flight simulation experience with RealFlight expansion packs and add-ons. You will be able to fly more models, explore more locations, or enhance your simulator with these software updates or modifications. You will also be able to learn more about RC aircraft, flight simulation, or software development from these expansion packs or add-ons. It depends on the compatibility of the expansion pack or add-on with the latest version of RealFlight. Some expansion packs or add-ons may work fine with newer versions of RealFlight, while others may not. You can check the compatibility of an expansion pack or add-on by reading its description, readme file, or user reviews. You can also try it yourself by installing it on a backup copy of your RealFlight folder and testing it before using it on your main copy. You can find more expansion packs or add-ons for RealFlight on various websites or forums dedicated to RealFlight or RC simulation. Some of the most popular ones are: You can also search the internet for other websites or forums that offer expansion packs or add-ons for RealFlight. However, you need to be careful and make sure you download from reliable and trustworthy sources. You also need to check the compatibility and quality of the expansion pack or add-on before installing it on your RealFlight. You can uninstall or disable expansion packs or add-ons for RealFlight by following these steps: You can also reinstall or enable an expansion pack or add-on by following the same steps as installing or using them. It depends on the type and version of the expansion pack or add-on. Some expansion packs or add-ons may work fine with multiplayer mode, while others may not. You can check the compatibility of an expansion pack or add-on by reading its description, readme file, or user reviews. You can also try it yourself by joining or hosting a multiplayer session with other users who have the same expansion pack or add-on installed. However, you need to be aware that using expansion packs or add-ons in multiplayer mode may cause some issues or disadvantages, such as: Therefore, you need to be careful and respectful when using expansion packs or add-ons in multiplayer mode. You should only use them if they are compatible and allowed by the host and other users of the multiplayer session. You should also inform other users about the expansion pack or add-on you are using and ask for their permission before joining or hosting a multiplayer session. and
and
for images, etc . 5. Add a table to your content if it makes sense. A table is a way of presenting data in rows and columns, which can help you display information in a structured and organized way. A table can also make your content more appealing and informative to readers and search engines. To create a table in HTML, you need to use the
tag, which defines the table element. Inside the
tag, you need to use the
tag, which defines a table row. Inside each tag, you need to use either the tag or the tag, which define a table header cell or a table data cell respectively. For example:
-
- This will create a table like this: | Name | Age | | ---- | --- | | Alice | 25 | | Bob | 30 | 6. Write a meta title tag and a meta description tag for your article. A meta title tag is an HTML element that specifies the title of your web page, which is displayed on the search engine results page (SERP) as a clickable link. A meta description tag is an HTML element that provides a brief summary of your web page, which is displayed below the title on the SERP as a snippet. Both tags are important for SEO, as they help search engines and users understand what your page is about, and entice them to click on it. To write a meta title tag and a meta description tag, you need to use the
-
- Name
- Age
-
-
- Alice
- 25
-
-
-Bob
- 30
- Bakteriyalar nədir?
-Bakteriyaların öyrənilmə tarixi
-bakteriyalar haqqinda melumat
-Bakteriyaların taksanomiyası
-Arxebakteriyalar
-Əsl bakteriyalar
-Oksifotobakteriyalar
-Bakteriyaların morfolojiyası
-Tayoqcha şaklli bakteriyalar
-Şarsimon şaklli bakteriyalar
-Ipsimon şaklli bakteriyalar
-
-Bakteriyaların yarımaləmləri
-Bakteriyaların müxtəlifliyi və qidalanma tipləri
-Avtotrof və heterotrof bakteriyalar
-Fototrof və xemotrof bakteriyalar
-Bakteriyaların torpaqdakı rolu
-Bakteriyaların xarici keçidləri
-Bakteriyaların hüceyrə divarı və kapsulu
-Bakteriyaların tənəffüs növləri
-Bakteriyaların koʻndalangiga boʻlinishi
-Bakteriyaların yosunlarla əlaqəsi
-Bakteriyaların prokariotik xüsusiyyətləri
-Bakteriyaların DNK və RNA tərkibi
-Bakteriyaların mitoxondriya və xloroplast yoxluğu
-Bakteriyaların dünya üzərindəki yayılması
-Bakteriyaların biosferadakı əhəmiyyəti
-Bakteriyaların insan sağlamlığına təsiri
-Bakteriyaların bitki və heyvanlarla simbiozu
-Bakteriyaların antibakterial maddələrə reaksiyası
-Bakteriyaların patogen və faydalı növləri
-Bakteriyaların metabolizmi və fermentləri
-Bakteriyaların genetik mühitdilimliliyi
-Bakteriyaların endospor və ekzospor formalaşması
-Bakteriyaların qram boyanması və növləndirilməsi
-Bakteriyaların morfoloji və fizioloji xassələri
-Bakteriyaların siano bakteriya və arxebakteriya qrupları
-Bakteriyaların fotosintez prosesində iştirakı
-Bakteriyaların karbon qazını mənimsəməsi
-Bakteriyaların günəş enerjisindən istifadəsi
-Bakteriyaların qobigʻi və xromosomalari haqqında məlumat
-Bakteriyalarda plazmid, transpozon, faj və konjugasiya kimi elementlər
-Bakteriyalarda mutasiya, transformasiya, transduksiya kimi genetik hadisələr
-Bakteriyalarda antibiotiklere qarşı mukavimlik mekanizmleri
-Bakteriyalarda biofilm, biyolüminesens, biyoremeydiyasiya kimi proseslər
-Bakteriyalarda nitrojen fiksasiyası, amonyak oksidasiyası, nitrifikasiya kimi reaksiyalar
-Bakteriyalarda sülfür oksidasiyası, sülfat redüksionu, sülfür redüksionu kimi reaksiyalar
-Bakteriyalarda metan sintezi, metan oksidasiyası, metanotrofiya kimi reaksiyalar
-Bakteriyalarda karbonat sintezi, karbonat oksidasiyası, karbonat redüksionu kimi reaksiyalar
-Bakteriyalarda ferrik oksidasiyası, ferroz oksidasiyası, ferroz redüksionu kimi reaksiyalar
-Bakteriyalarda hidrojen sintezi, hidrojen oksidasiyası, hidrojenotrofiya kimi reaksiyalar
-Bakteriyalarda asetik asit sintezi, asetik asit oksidasiyası, asetik asit redüksionu kimi reaksiyalar
-Bakteriyalarda laktikBuralgan şaklli bakteriyalar
-Bakteriyaların fiziologiyası
-Oksigen tǝlǝbi
-Karbon tǝlǝbi
-Azot tělěbi
-Enerji tǝlǝbi
-Hüceyrə dövrü
-Hüceyrě bölünměsi
-Sporlaşma
-Hüceyrə qidalanması
-Hüceyrě növü
-Bakteriyaların dünyada rolu
-
-
I have continued writing the article based on the outline and the sources. Here is the next part of the article:
-
-Bakteriyalar haqqında maraqlı faktlar
-
-
-
-
-Fakt
-Açıqlama
-
-
-Bakteriyalar dünyada Ən çox olan canlılardır.
-Dünyada 5x10^30 dən çox bakteriya olduğu hesab edilir. Bu rəqəm dünyadakı bütün insanların vücudunda olan hüceyrələrdən 10 qat çoxdur.
-
-
-Bakteriyalar insan vücudunda Ən çox bağırsaqda yaşayırlar.
-İnsan bağırsağında 100 trilyon dǝn çox bakteriya olduğu tǝxmin edilir. Bu bakteriyalar insanın sindirim sistemini, immun sistemini vě metabolizmini tǝnzimlǝyirlǝr.
-
-
-Bakteriyalar dünyanın hava tǝrkibini dǝyişdirmişlǝr.
-Yaxşı 2,5 milyard il Əvvǝl, dünyada oksigen çox az idi. Sinobakteriyalar adlanan fotosintetik bakteriyalar işığından enerji hasil edirken, oksigeni atmosfera buraxdılar. Bu da dünyada oksigenin artmasına vě aerob canlıların yaranmasına sǝbǝb oldu.
-
-
-Bakteriyalar insanlara antibiyotiklǝri vermişlǝr.
-Antibiyotiklǝr bakteriya vě ya qarşı bakteriya xǝstǝliklǝrinin müalicǝsindǝ istifadě olunan maddelǝrdir. Antibiyotiklǝrin Əksěriyyěti digěr bakteriyalardan vě ya qürtlardan Əldě edilir. Měsělěn, pěnisilin adlanan ilk antibiyotik pěnisillium adlanan bir qürtdən kěşf edilmişdir.
-
-
-Bakteriyalar insanların genetikasını dǝyişdirirlǝr.
-Bakteriyalar hüceyrělǝri arasında genetik materialın transferinę tamin edirlǝr. Bu transfer ilě bakteriyalar bir-birinđn yeni xüsusiyyětlǝr öyrěnirlǝr. Mě I have continued writing the article based on the outline and the sources. Here is the next part of the article: sělěn, insanların genetikasını da děyişdirirlěr. Měsělěn, mitoxondriya vě xloroplast adlanan hüceyrě orqaneli olan bitki vě heyvanların atalarının bir zamanlar bakteriya olduğu düşünülür.
-Bakteriyalar haqqında suallar və cavablar
-
-
-Xulase
-
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Arknights NA APK for Android - Free Mobile RPG.md b/spaces/1phancelerku/anime-remove-background/Download Arknights NA APK for Android - Free Mobile RPG.md
deleted file mode 100644
index 70bd23168da5df3cca585d0087e4e4f81aca4db4..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Arknights NA APK for Android - Free Mobile RPG.md
+++ /dev/null
@@ -1,167 +0,0 @@
-
-Arknights NA APK: How to Download and Play the Popular Strategy RPG on Android
-arknights na apk
-What is Arknights?
-Why is Arknights popular?
-
-arknights english apk latest version
-arknights na apk mod
-arknights apk for android
-arknights na apk reddit
-arknights na apk update
-arknights na apk obb
-arknights na apk mirror
-arknights na apk pure
-arknights na apk nox
-arknights na apk bluestacks
-arknights na apk ios
-arknights na apk pc
-arknights na apk qooapp
-arknights na apk tap
-arknights na apk uptodown
-arknights na apk apkpure
-arknights na apk android 11
-arknights na apk android 10
-arknights na apk android 9
-arknights na apk android 8
-arknights na apk android 7
-arknights na apk android 6
-arknights na apk android 5
-arknights na apk android 4.4
-arknights na apk free download
-arknights na apk offline
-arknights na apk online
-arknights na apk hack
-arknights na apk cheat
-arknights na apk unlimited money
-arknights na apk unlimited originium
-arknights na apk unlimited sanity
-arknights na apk unlimited resources
-arknights na apk unlimited recruitment permits
-arknights na apk data download
-arknights na apk data file host
-arknights na apk data obb download
-arknights na apk data mod download
-arknights na apk data mega download
-how to install arknights na apk on android device
-how to install arknights na apk on pc using emulator
-how to install arknights na apk on ios device using appcake or cydia impactor
-how to update arknights na apk manually
-how to play arknights na apk without vpn
-how to play arknights na apk with vpn
-how to play arknights na apk on pc
-how to play arknights na apk on ios
-how to play arknights na apk offline
-
-What is an APK file?
-
-
How to download and install Arknights NA APK on Android
-Step 1: Allow unknown apps on your device
-
-
-Step 2: Download the APK file from a trusted source
-
-
-Step 3: Install the APK file using a file manager or a browser
-
-
-
-
-Step 4: Launch the game and enjoy
-
-
-Benefits and risks of using Arknights NA APK
-Benefits of using Arknights NA APK
-
-
-Risks of using Arknights NA APK
-
-
-Conclusion
-
-
-FAQs
-Q: Is Arknights NA APK safe to use?
-Q: Is Arknights NA APK free to use?
- Q: How can I update Arknights NA APK?
-Q: Can I play Arknights NA APK with other players?
-Q: Can I transfer my Arknights NA APK data to another device?
-
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download MP4 Never Have I Ever Season 3 - The Best Way to Binge Watch.md b/spaces/1phancelerku/anime-remove-background/Download MP4 Never Have I Ever Season 3 - The Best Way to Binge Watch.md
deleted file mode 100644
index a8eeea4e0a1e236545f303bf5840d9ec28aadc04..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download MP4 Never Have I Ever Season 3 - The Best Way to Binge Watch.md
+++ /dev/null
@@ -1,94 +0,0 @@
-
-Download MP4 Never Have I Ever Season 3: Everything You Need to Know
-download mp4 never have i ever season 3
- What is Never Have I Ever About?
-When and Where to Watch Never Have I Ever Season 3?
-How to Download MP4 Never Have I Ever Season 3?
-
-download mp4 never have i ever season 3 finale
-download mp4 never have i ever season 3 free online
-download mp4 never have i ever season 3 netflix
-download mp4 never have i ever season 3 full episodes
-download mp4 never have i ever season 3 english subtitles
-download mp4 never have i ever season 3 torrent
-download mp4 never have i ever season 3 release date
-download mp4 never have i ever season 3 trailer
-download mp4 never have i ever season 3 cast
-download mp4 never have i ever season 3 review
-download mp4 never have i ever season 3 recap
-download mp4 never have i ever season 3 spoilers
-download mp4 never have i ever season 3 soundtrack
-download mp4 never have i ever season 3 bloopers
-download mp4 never have i ever season 3 behind the scenes
-download mp4 never have i ever season 3 deleted scenes
-download mp4 never have i ever season 3 hd quality
-download mp4 never have i ever season 3 in hindi
-download mp4 never have i ever season 3 in spanish
-download mp4 never have i ever season 3 in french
-download mp4 never have i ever season 3 in german
-download mp4 never have i ever season 3 in italian
-download mp4 never have i ever season 3 in portuguese
-download mp4 never have i ever season 3 in arabic
-download mp4 never have i ever season 3 in japanese
-download mp4 never have i ever season 3 in korean
-download mp4 never have i ever season 3 in chinese
-download mp4 never have i ever season 3 in russian
-download mp4 never have i ever season 3 in turkish
-download mp4 never have i ever season 3 with subtitles
-download mp4 never have i ever season 3 with commentary
-download mp4 never have i ever season 3 with bonus features
-download mp4 never have i ever season 3 with extras
-download mp4 never have i ever season 3 with director's cut
-download mp4 never have i ever season 3 with alternate endings
-download mp4 never have i ever season 3 with extended scenes
-download mp4 never have i ever season 3 with interviews
-download mp4 never have i ever season 3 with fan reactions
-download mp4 never have i ever season 3 with ratings
-download mp4 never have i ever season 3 without ads
-download mp4 never have i ever season 3 without registration
-download mp4 never have i ever season 3 without virus
-download mp4 never have i ever season 3 without watermark
-download mp4 never have i ever season 3 without survey
-download mp4 never have i ever season 3 without sign up
-download mp4 never have i ever season 3 without login
-download mp4 never have i ever season 3 without password
-
-
-
- Why Download MP4 Never Have I Ever Season 3?
-
-
- Conclusion
-FAQs
-Q1: How many episodes are there in Never Have I Ever Season 3?
-Q2: Who are the cast members of Never Have I Ever Season 3?
-
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Sacred Games Season 1 in Hindi - Filmyhit Originals.md b/spaces/1phancelerku/anime-remove-background/Download Sacred Games Season 1 in Hindi - Filmyhit Originals.md
deleted file mode 100644
index 0d3a4bc6690542985b06b7c39237eaf5d88f875c..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Sacred Games Season 1 in Hindi - Filmyhit Originals.md
+++ /dev/null
@@ -1,111 +0,0 @@
-
-Sacred Games Season 1: A Crime Thriller That Will Keep You Hooked
-sacred games season 1 download by filmyhit
-What is Sacred Games and why is it popular?
-What happens in season 1 of Sacred Games?
-Who are the main actors and roles in Sacred Games?
-
-
-How to Watch Sacred Games Season 1 Online Legally
-Netflix: The official streaming platform for Sacred Games
-
-sacred games season 1 download filmyhit hd quality
-sacred games season 1 download filmyhit in hindi
-sacred games season 1 download filmyhit with subtitles
-sacred games season 1 download filmyhit free online
-sacred games season 1 download filmyhit 480p
-sacred games season 1 download filmyhit mp4
-sacred games season 1 download filmyhit torrent
-sacred games season 1 download filmyhit link
-sacred games season 1 download filmyhit watch online
-sacred games season 1 download filmyhit dual audio
-sacred games season 1 download filmyhit all episodes
-sacred games season 1 download filmyhit netflix original
-sacred games season 1 download filmyhit web series
-sacred games season 1 download filmyhit review
-sacred games season 1 download filmyhit cast
-sacred games season 1 download filmyhit trailer
-sacred games season 1 download filmyhit index
-sacred games season 1 download filmyhit zip file
-sacred games season 1 download filmyhit google drive
-sacred games season 1 download filmyhit direct link
-sacred games season 1 download filmyhit english subtitles
-sacred games season 1 download filmyhit hindi dubbed
-sacred games season 1 download filmyhit leaked online
-sacred games season 1 download filmyhit illegal site
-sacred games season 1 download filmyhit alternative sites
-sacred games season 1 download filmyhit how to download
-sacred games season 1 download filmyhit best quality
-sacred games season 1 download filmyhit latest update
-sacred games season 1 download filmyhit release dateOther options: Alternative ways to watch Sacred Games online
-
-
-Why You Should Avoid Downloading Sacred Games Season 1 from Filmyhit
-Filmyhit: A notorious website for pirating movies and web series
-Risks and consequences: The dangers of using Filmyhit and other illegal sites
-
-
-Legal actions: The laws and penalties for piracy in India
-
-
-FAQs About Sacred Games Season 1
-Q1: How many episodes are there in season 1 of Sacred Games?
-
-
-Q2: Is Sacred Games based on a true story?
-Q3: Will there be a season 3 of Sacred Games?
-Q4: What are some similar shows to Sacred Games?
-
-
-Q5: How can I support the makers of Sacred Games?
-Conclusion
-
-
-
\ No newline at end of file
diff --git a/spaces/AIConsultant/MusicGen/audiocraft/models/encodec.py b/spaces/AIConsultant/MusicGen/audiocraft/models/encodec.py
deleted file mode 100644
index 1cf6b54b582975a01bdb7a06280c766d3d2cc72c..0000000000000000000000000000000000000000
--- a/spaces/AIConsultant/MusicGen/audiocraft/models/encodec.py
+++ /dev/null
@@ -1,392 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-"""Compression models or wrapper around existing models.
-Also defines the main interface that a model must follow to be usable as an audio tokenizer.
-"""
-
-from abc import ABC, abstractmethod
-import logging
-import math
-from pathlib import Path
-import typing as tp
-
-import numpy as np
-import torch
-from torch import nn
-from transformers import EncodecModel as HFEncodecModel
-
-from .. import quantization as qt
-
-
-logger = logging.getLogger()
-
-
-class CompressionModel(ABC, nn.Module):
- """Base API for all compression model that aim at being used as audio tokenizers
- with a language model.
- """
-
- @abstractmethod
- def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
- ...
-
- @abstractmethod
- def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
- """See `EncodecModel.encode`."""
- ...
-
- @abstractmethod
- def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
- """See `EncodecModel.decode`."""
- ...
-
- @abstractmethod
- def decode_latent(self, codes: torch.Tensor):
- """Decode from the discrete codes to continuous latent space."""
- ...
-
- @property
- @abstractmethod
- def channels(self) -> int:
- ...
-
- @property
- @abstractmethod
- def frame_rate(self) -> float:
- ...
-
- @property
- @abstractmethod
- def sample_rate(self) -> int:
- ...
-
- @property
- @abstractmethod
- def cardinality(self) -> int:
- ...
-
- @property
- @abstractmethod
- def num_codebooks(self) -> int:
- ...
-
- @property
- @abstractmethod
- def total_codebooks(self) -> int:
- ...
-
- @abstractmethod
- def set_num_codebooks(self, n: int):
- """Set the active number of codebooks used by the quantizer."""
- ...
-
- @staticmethod
- def get_pretrained(
- name: str, device: tp.Union[torch.device, str] = 'cpu'
- ) -> 'CompressionModel':
- """Instantiate a CompressionModel from a given pretrained model.
-
- Args:
- name (Path or str): name of the pretrained model. See after.
- device (torch.device or str): Device on which the model is loaded.
-
- Pretrained models:
- - dac_44khz (https://github.com/descriptinc/descript-audio-codec)
- - dac_24khz (same)
- - facebook/encodec_24khz (https://huggingface.co/facebook/encodec_24khz)
- - facebook/encodec_32khz (https://huggingface.co/facebook/encodec_32khz)
- - your own model on HugginFace. Export instructions to come...
- """
-
- from . import builders, loaders
- model: CompressionModel
- if name in ['dac_44khz', 'dac_24khz']:
- model_type = name.split('_')[1]
- logger.info("Getting pretrained compression model from DAC %s", model_type)
- model = DAC(model_type)
- elif name in ['debug_compression_model']:
- logger.info("Getting pretrained compression model for debug")
- model = builders.get_debug_compression_model()
- elif Path(name).exists():
- # We assume here if the paths exist that it is in fact an AC checkpoint
- # that was exported using `audiocraft.utils.export` functions.
- model = loaders.load_compression_model(name, device=device)
- else:
- logger.info("Getting pretrained compression model from HF %s", name)
- hf_model = HFEncodecModel.from_pretrained(name)
- model = HFEncodecCompressionModel(hf_model).to(device)
- return model.to(device).eval()
-
-
-class EncodecModel(CompressionModel):
- """Encodec model operating on the raw waveform.
-
- Args:
- encoder (nn.Module): Encoder network.
- decoder (nn.Module): Decoder network.
- quantizer (qt.BaseQuantizer): Quantizer network.
- frame_rate (int): Frame rate for the latent representation.
- sample_rate (int): Audio sample rate.
- channels (int): Number of audio channels.
- causal (bool): Whether to use a causal version of the model.
- renormalize (bool): Whether to renormalize the audio before running the model.
- """
- # we need assignment to override the property in the abstract class,
- # I couldn't find a better way...
- frame_rate: float = 0
- sample_rate: int = 0
- channels: int = 0
-
- def __init__(self,
- encoder: nn.Module,
- decoder: nn.Module,
- quantizer: qt.BaseQuantizer,
- frame_rate: int,
- sample_rate: int,
- channels: int,
- causal: bool = False,
- renormalize: bool = False):
- super().__init__()
- self.encoder = encoder
- self.decoder = decoder
- self.quantizer = quantizer
- self.frame_rate = frame_rate
- self.sample_rate = sample_rate
- self.channels = channels
- self.renormalize = renormalize
- self.causal = causal
- if self.causal:
- # we force disabling here to avoid handling linear overlap of segments
- # as supported in original EnCodec codebase.
- assert not self.renormalize, 'Causal model does not support renormalize'
-
- @property
- def total_codebooks(self):
- """Total number of quantizer codebooks available."""
- return self.quantizer.total_codebooks
-
- @property
- def num_codebooks(self):
- """Active number of codebooks used by the quantizer."""
- return self.quantizer.num_codebooks
-
- def set_num_codebooks(self, n: int):
- """Set the active number of codebooks used by the quantizer."""
- self.quantizer.set_num_codebooks(n)
-
- @property
- def cardinality(self):
- """Cardinality of each codebook."""
- return self.quantizer.bins
-
- def preprocess(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
- scale: tp.Optional[torch.Tensor]
- if self.renormalize:
- mono = x.mean(dim=1, keepdim=True)
- volume = mono.pow(2).mean(dim=2, keepdim=True).sqrt()
- scale = 1e-8 + volume
- x = x / scale
- scale = scale.view(-1, 1)
- else:
- scale = None
- return x, scale
-
- def postprocess(self,
- x: torch.Tensor,
- scale: tp.Optional[torch.Tensor] = None) -> torch.Tensor:
- if scale is not None:
- assert self.renormalize
- x = x * scale.view(-1, 1, 1)
- return x
-
- def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
- assert x.dim() == 3
- length = x.shape[-1]
- x, scale = self.preprocess(x)
-
- emb = self.encoder(x)
- q_res = self.quantizer(emb, self.frame_rate)
- out = self.decoder(q_res.x)
-
- # remove extra padding added by the encoder and decoder
- assert out.shape[-1] >= length, (out.shape[-1], length)
- out = out[..., :length]
-
- q_res.x = self.postprocess(out, scale)
-
- return q_res
-
- def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
- """Encode the given input tensor to quantized representation along with scale parameter.
-
- Args:
- x (torch.Tensor): Float tensor of shape [B, C, T]
-
- Returns:
- codes, scale (tuple of torch.Tensor, torch.Tensor): Tuple composed of:
- codes a float tensor of shape [B, K, T] with K the number of codebooks used and T the timestep.
- scale a float tensor containing the scale for audio renormalizealization.
- """
- assert x.dim() == 3
- x, scale = self.preprocess(x)
- emb = self.encoder(x)
- codes = self.quantizer.encode(emb)
- return codes, scale
-
- def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
- """Decode the given codes to a reconstructed representation, using the scale to perform
- audio denormalization if needed.
-
- Args:
- codes (torch.Tensor): Int tensor of shape [B, K, T]
- scale (torch.Tensor, optional): Float tensor containing the scale value.
-
- Returns:
- out (torch.Tensor): Float tensor of shape [B, C, T], the reconstructed audio.
- """
- emb = self.decode_latent(codes)
- out = self.decoder(emb)
- out = self.postprocess(out, scale)
- # out contains extra padding added by the encoder and decoder
- return out
-
- def decode_latent(self, codes: torch.Tensor):
- """Decode from the discrete codes to continuous latent space."""
- return self.quantizer.decode(codes)
-
-
-class DAC(CompressionModel):
- def __init__(self, model_type: str = "44khz"):
- super().__init__()
- try:
- import dac.utils
- except ImportError:
- raise RuntimeError("Could not import dac, make sure it is installed, "
- "please run `pip install descript-audio-codec`")
- self.model = dac.utils.load_model(model_type=model_type)
- self.n_quantizers = self.total_codebooks
-
- def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
- # We don't support training with this.
- raise NotImplementedError("Forward and training with DAC not supported.")
-
- def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
- codes = self.model.encode(x, self.n_quantizers)[1]
- return codes, None
-
- def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
- assert scale is None
- z_q = self.decode_latent(codes)
- return self.model.decode(z_q)
-
- def decode_latent(self, codes: torch.Tensor):
- """Decode from the discrete codes to continuous latent space."""
- return self.model.quantizer.from_codes(codes)[0]
-
- @property
- def channels(self) -> int:
- return 1
-
- @property
- def frame_rate(self) -> float:
- return self.model.sample_rate / self.model.hop_length
-
- @property
- def sample_rate(self) -> int:
- return self.model.sample_rate
-
- @property
- def cardinality(self) -> int:
- return self.model.codebook_size
-
- @property
- def num_codebooks(self) -> int:
- return self.n_quantizers
-
- @property
- def total_codebooks(self) -> int:
- return self.model.n_codebooks
-
- def set_num_codebooks(self, n: int):
- """Set the active number of codebooks used by the quantizer.
- """
- assert n >= 1
- assert n <= self.total_codebooks
- self.n_quantizers = n
-
-
-class HFEncodecCompressionModel(CompressionModel):
- """Wrapper around HuggingFace Encodec.
- """
- def __init__(self, model: HFEncodecModel):
- super().__init__()
- self.model = model
- bws = self.model.config.target_bandwidths
- num_codebooks = [
- bw * 1000 / (self.frame_rate * math.log2(self.cardinality))
- for bw in bws
- ]
- deltas = [nc - int(nc) for nc in num_codebooks]
- # Checking we didn't do some bad maths and we indeed have integers!
- assert all(deltas) <= 1e-3, deltas
- self.possible_num_codebooks = [int(nc) for nc in num_codebooks]
- self.set_num_codebooks(max(self.possible_num_codebooks))
-
- def forward(self, x: torch.Tensor) -> qt.QuantizedResult:
- # We don't support training with this.
- raise NotImplementedError("Forward and training with HF EncodecModel not supported.")
-
- def encode(self, x: torch.Tensor) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
- bandwidth_index = self.possible_num_codebooks.index(self.num_codebooks)
- bandwidth = self.model.config.target_bandwidths[bandwidth_index]
- res = self.model.encode(x, None, bandwidth)
- assert len(res[0]) == 1
- assert len(res[1]) == 1
- return res[0][0], res[1][0]
-
- def decode(self, codes: torch.Tensor, scale: tp.Optional[torch.Tensor] = None):
- if scale is None:
- scales = [None] # type: ignore
- else:
- scales = scale # type: ignore
- res = self.model.decode(codes[None], scales)
- return res[0]
-
- def decode_latent(self, codes: torch.Tensor):
- """Decode from the discrete codes to continuous latent space."""
- return self.model.quantizer.decode(codes.transpose(0, 1))
-
- @property
- def channels(self) -> int:
- return self.model.config.audio_channels
-
- @property
- def frame_rate(self) -> float:
- hop_length = int(np.prod(self.model.config.upsampling_ratios))
- return self.sample_rate / hop_length
-
- @property
- def sample_rate(self) -> int:
- return self.model.config.sampling_rate
-
- @property
- def cardinality(self) -> int:
- return self.model.config.codebook_size
-
- @property
- def num_codebooks(self) -> int:
- return self._num_codebooks
-
- @property
- def total_codebooks(self) -> int:
- return max(self.possible_num_codebooks)
-
- def set_num_codebooks(self, n: int):
- """Set the active number of codebooks used by the quantizer.
- """
- if n not in self.possible_num_codebooks:
- raise ValueError(f"Allowed values for num codebooks: {self.possible_num_codebooks}")
- self._num_codebooks = n
diff --git a/spaces/AIFILMS/generate_human_motion/VQ-Trans/utils/word_vectorizer.py b/spaces/AIFILMS/generate_human_motion/VQ-Trans/utils/word_vectorizer.py
deleted file mode 100644
index 557ff97a9539c084167f3eca51fb50f53f33c8ea..0000000000000000000000000000000000000000
--- a/spaces/AIFILMS/generate_human_motion/VQ-Trans/utils/word_vectorizer.py
+++ /dev/null
@@ -1,99 +0,0 @@
-import numpy as np
-import pickle
-from os.path import join as pjoin
-
-POS_enumerator = {
- 'VERB': 0,
- 'NOUN': 1,
- 'DET': 2,
- 'ADP': 3,
- 'NUM': 4,
- 'AUX': 5,
- 'PRON': 6,
- 'ADJ': 7,
- 'ADV': 8,
- 'Loc_VIP': 9,
- 'Body_VIP': 10,
- 'Obj_VIP': 11,
- 'Act_VIP': 12,
- 'Desc_VIP': 13,
- 'OTHER': 14,
-}
-
-Loc_list = ('left', 'right', 'clockwise', 'counterclockwise', 'anticlockwise', 'forward', 'back', 'backward',
- 'up', 'down', 'straight', 'curve')
-
-Body_list = ('arm', 'chin', 'foot', 'feet', 'face', 'hand', 'mouth', 'leg', 'waist', 'eye', 'knee', 'shoulder', 'thigh')
-
-Obj_List = ('stair', 'dumbbell', 'chair', 'window', 'floor', 'car', 'ball', 'handrail', 'baseball', 'basketball')
-
-Act_list = ('walk', 'run', 'swing', 'pick', 'bring', 'kick', 'put', 'squat', 'throw', 'hop', 'dance', 'jump', 'turn',
- 'stumble', 'dance', 'stop', 'sit', 'lift', 'lower', 'raise', 'wash', 'stand', 'kneel', 'stroll',
- 'rub', 'bend', 'balance', 'flap', 'jog', 'shuffle', 'lean', 'rotate', 'spin', 'spread', 'climb')
-
-Desc_list = ('slowly', 'carefully', 'fast', 'careful', 'slow', 'quickly', 'happy', 'angry', 'sad', 'happily',
- 'angrily', 'sadly')
-
-VIP_dict = {
- 'Loc_VIP': Loc_list,
- 'Body_VIP': Body_list,
- 'Obj_VIP': Obj_List,
- 'Act_VIP': Act_list,
- 'Desc_VIP': Desc_list,
-}
-
-
-class WordVectorizer(object):
- def __init__(self, meta_root, prefix):
- vectors = np.load(pjoin(meta_root, '%s_data.npy'%prefix))
- words = pickle.load(open(pjoin(meta_root, '%s_words.pkl'%prefix), 'rb'))
- self.word2idx = pickle.load(open(pjoin(meta_root, '%s_idx.pkl'%prefix), 'rb'))
- self.word2vec = {w: vectors[self.word2idx[w]] for w in words}
-
- def _get_pos_ohot(self, pos):
- pos_vec = np.zeros(len(POS_enumerator))
- if pos in POS_enumerator:
- pos_vec[POS_enumerator[pos]] = 1
- else:
- pos_vec[POS_enumerator['OTHER']] = 1
- return pos_vec
-
- def __len__(self):
- return len(self.word2vec)
-
- def __getitem__(self, item):
- word, pos = item.split('/')
- if word in self.word2vec:
- word_vec = self.word2vec[word]
- vip_pos = None
- for key, values in VIP_dict.items():
- if word in values:
- vip_pos = key
- break
- if vip_pos is not None:
- pos_vec = self._get_pos_ohot(vip_pos)
- else:
- pos_vec = self._get_pos_ohot(pos)
- else:
- word_vec = self.word2vec['unk']
- pos_vec = self._get_pos_ohot('OTHER')
- return word_vec, pos_vec
-
-
-class WordVectorizerV2(WordVectorizer):
- def __init__(self, meta_root, prefix):
- super(WordVectorizerV2, self).__init__(meta_root, prefix)
- self.idx2word = {self.word2idx[w]: w for w in self.word2idx}
-
- def __getitem__(self, item):
- word_vec, pose_vec = super(WordVectorizerV2, self).__getitem__(item)
- word, pos = item.split('/')
- if word in self.word2vec:
- return word_vec, pose_vec, self.word2idx[word]
- else:
- return word_vec, pose_vec, self.word2idx['unk']
-
- def itos(self, idx):
- if idx == len(self.idx2word):
- return "pad"
- return self.idx2word[idx]
\ No newline at end of file
diff --git a/spaces/AIFILMS/speecht5-tts-demo/app.py b/spaces/AIFILMS/speecht5-tts-demo/app.py
deleted file mode 100644
index ecd0851104cd6bf584f182cb9edd6dbd8af72e20..0000000000000000000000000000000000000000
--- a/spaces/AIFILMS/speecht5-tts-demo/app.py
+++ /dev/null
@@ -1,129 +0,0 @@
-import gradio as gr
-import librosa
-import numpy as np
-import torch
-
-from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
-
-
-checkpoint = "microsoft/speecht5_tts"
-processor = SpeechT5Processor.from_pretrained(checkpoint)
-model = SpeechT5ForTextToSpeech.from_pretrained(checkpoint)
-vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
-
-
-speaker_embeddings = {
- "BDL": "spkemb/cmu_us_bdl_arctic-wav-arctic_a0009.npy",
- "CLB": "spkemb/cmu_us_clb_arctic-wav-arctic_a0144.npy",
- "KSP": "spkemb/cmu_us_ksp_arctic-wav-arctic_b0087.npy",
- "RMS": "spkemb/cmu_us_rms_arctic-wav-arctic_b0353.npy",
- "SLT": "spkemb/cmu_us_slt_arctic-wav-arctic_a0508.npy",
-}
-
-
-def predict(text, speaker):
- if len(text.strip()) == 0:
- return (16000, np.zeros(0).astype(np.int16))
-
- inputs = processor(text=text, return_tensors="pt")
-
- # limit input length
- input_ids = inputs["input_ids"]
- input_ids = input_ids[..., :model.config.max_text_positions]
-
- if speaker == "Surprise Me!":
- # load one of the provided speaker embeddings at random
- idx = np.random.randint(len(speaker_embeddings))
- key = list(speaker_embeddings.keys())[idx]
- speaker_embedding = np.load(speaker_embeddings[key])
-
- # randomly shuffle the elements
- np.random.shuffle(speaker_embedding)
-
- # randomly flip half the values
- x = (np.random.rand(512) >= 0.5) * 1.0
- x[x == 0] = -1.0
- speaker_embedding *= x
-
- #speaker_embedding = np.random.rand(512).astype(np.float32) * 0.3 - 0.15
- else:
- speaker_embedding = np.load(speaker_embeddings[speaker[:3]])
-
- speaker_embedding = torch.tensor(speaker_embedding).unsqueeze(0)
-
- speech = model.generate_speech(input_ids, speaker_embedding, vocoder=vocoder)
-
- speech = (speech.numpy() * 32767).astype(np.int16)
- return (16000, speech)
-
-
-title = "SpeechT5: Speech Synthesis"
-
-description = """
-The SpeechT5 model is pre-trained on text as well as speech inputs, with targets that are also a mix of text and speech.
-By pre-training on text and speech at the same time, it learns unified representations for both, resulting in improved modeling capabilities.
-
-SpeechT5 can be fine-tuned for different speech tasks. This space demonstrates the text-to-speech (TTS) checkpoint for the English language.
-
-See also the speech recognition (ASR) demo
-and the voice conversion demo.
-
-How to use: Enter some English text and choose a speaker. The output is a mel spectrogram, which is converted to a mono 16 kHz waveform by the
-HiFi-GAN vocoder. Because the model always applies random dropout, each attempt will give slightly different results.
-The Surprise Me! option creates a completely randomized speaker.
-"""
-
-article = """
-
-@article{Ao2021SpeechT5,
- title = {SpeechT5: Unified-Modal Encoder-Decoder Pre-training for Spoken Language Processing},
- author = {Junyi Ao and Rui Wang and Long Zhou and Chengyi Wang and Shuo Ren and Yu Wu and Shujie Liu and Tom Ko and Qing Li and Yu Zhang and Zhihua Wei and Yao Qian and Jinyu Li and Furu Wei},
- eprint={2110.07205},
- archivePrefix={arXiv},
- primaryClass={eess.AS},
- year={2021}
-}
-
-
-
Upload an Image (MUST Be .PNG and 512x512 or 768x768) enter a Prompt, or let it just do its Thing, then click submit. 10 Iterations takes about ~900-1200 seconds currently. For more informationon about Stable Diffusion or Suggestions for prompts, keywords, artists or styles see https://github.com/Maks-s/sd-akashic", article = "Code Monkey: Manjushri").queue(max_size=5).launch()
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/oval/Oval.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/oval/Oval.js
deleted file mode 100644
index 81870e20017f24c505c20a412d3001d38ec41839..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/oval/Oval.js
+++ /dev/null
@@ -1,39 +0,0 @@
-import Base from '../base/Base.js';
-import { Arc, Circle } from '../utils/Geoms.js'
-
-
-class Oval extends Base {
- constructor(scene, config) {
- super(scene, config);
- this.type = 'rexSpinnerOval';
- }
-
- buildShapes() {
- this.addShape((new Circle()).setName('track'));
- this.addShape((new Arc()).setName('arc'));
- }
-
- updateShapes() {
- var centerX = this.centerX;
- var centerY = this.centerY;
- var radius = this.radius;
- var lineWidth = Math.ceil(radius / 25);
- var maxRadius = radius - (lineWidth / 2);
-
- this.getShape('track')
- .lineStyle(lineWidth, this.color, 0.5)
- .setRadius(maxRadius)
- .setCenterPosition(centerX, centerY);
-
- var startAngle = this.value * 360;
- var endAngle = startAngle + 60;
- this.getShape('arc')
- .lineStyle(lineWidth, this.color, 1)
- .setRadius(maxRadius)
- .setCenterPosition(centerX, centerY)
- .setAngle(startAngle, endAngle);
-
- }
-}
-
-export default Oval;
\ No newline at end of file
diff --git a/spaces/AlterM/Zaglyt2-transformer-test/README.md b/spaces/AlterM/Zaglyt2-transformer-test/README.md
deleted file mode 100644
index 689f2d67afac66e47dee5c6dc57448b20d6fb018..0000000000000000000000000000000000000000
--- a/spaces/AlterM/Zaglyt2-transformer-test/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Zaglyt2 Transformer Test
-emoji: 🚀
-colorFrom: pink
-colorTo: purple
-sdk: gradio
-sdk_version: 3.33.1
-app_file: app.py
-pinned: false
-duplicated_from: RisticksAI/Zaglyt2-transformer-test
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Amon1/ChatGPTForAcadamic/request_llm/bridge_tgui.py b/spaces/Amon1/ChatGPTForAcadamic/request_llm/bridge_tgui.py
deleted file mode 100644
index d7cbe107c5d7013246dd2e99c95c913200498f79..0000000000000000000000000000000000000000
--- a/spaces/Amon1/ChatGPTForAcadamic/request_llm/bridge_tgui.py
+++ /dev/null
@@ -1,167 +0,0 @@
-'''
-Contributed by SagsMug. Modified by binary-husky
-https://github.com/oobabooga/text-generation-webui/pull/175
-'''
-
-import asyncio
-import json
-import random
-import string
-import websockets
-import logging
-import time
-import threading
-import importlib
-from toolbox import get_conf
-LLM_MODEL, = get_conf('LLM_MODEL')
-
-# "TGUI:galactica-1.3b@localhost:7860"
-model_name, addr_port = LLM_MODEL.split('@')
-assert ':' in addr_port, "LLM_MODEL 格式不正确!" + LLM_MODEL
-addr, port = addr_port.split(':')
-
-def random_hash():
- letters = string.ascii_lowercase + string.digits
- return ''.join(random.choice(letters) for i in range(9))
-
-async def run(context, max_token=512):
- params = {
- 'max_new_tokens': max_token,
- 'do_sample': True,
- 'temperature': 0.5,
- 'top_p': 0.9,
- 'typical_p': 1,
- 'repetition_penalty': 1.05,
- 'encoder_repetition_penalty': 1.0,
- 'top_k': 0,
- 'min_length': 0,
- 'no_repeat_ngram_size': 0,
- 'num_beams': 1,
- 'penalty_alpha': 0,
- 'length_penalty': 1,
- 'early_stopping': True,
- 'seed': -1,
- }
- session = random_hash()
-
- async with websockets.connect(f"ws://{addr}:{port}/queue/join") as websocket:
- while content := json.loads(await websocket.recv()):
- #Python3.10 syntax, replace with if elif on older
- if content["msg"] == "send_hash":
- await websocket.send(json.dumps({
- "session_hash": session,
- "fn_index": 12
- }))
- elif content["msg"] == "estimation":
- pass
- elif content["msg"] == "send_data":
- await websocket.send(json.dumps({
- "session_hash": session,
- "fn_index": 12,
- "data": [
- context,
- params['max_new_tokens'],
- params['do_sample'],
- params['temperature'],
- params['top_p'],
- params['typical_p'],
- params['repetition_penalty'],
- params['encoder_repetition_penalty'],
- params['top_k'],
- params['min_length'],
- params['no_repeat_ngram_size'],
- params['num_beams'],
- params['penalty_alpha'],
- params['length_penalty'],
- params['early_stopping'],
- params['seed'],
- ]
- }))
- elif content["msg"] == "process_starts":
- pass
- elif content["msg"] in ["process_generating", "process_completed"]:
- yield content["output"]["data"][0]
- # You can search for your desired end indicator and
- # stop generation by closing the websocket here
- if (content["msg"] == "process_completed"):
- break
-
-
-
-
-
-def predict_tgui(inputs, top_p, temperature, chatbot=[], history=[], system_prompt='', stream = True, additional_fn=None):
- """
- 发送至chatGPT,流式获取输出。
- 用于基础的对话功能。
- inputs 是本次问询的输入
- top_p, temperature是chatGPT的内部调优参数
- history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
- chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
- additional_fn代表点击的哪个按钮,按钮见functional.py
- """
- if additional_fn is not None:
- import functional
- importlib.reload(functional) # 热更新prompt
- functional = functional.get_functionals()
- if "PreProcess" in functional[additional_fn]: inputs = functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
- inputs = functional[additional_fn]["Prefix"] + inputs + functional[additional_fn]["Suffix"]
-
- raw_input = "What I would like to say is the following: " + inputs
- logging.info(f'[raw_input] {raw_input}')
- history.extend([inputs, ""])
- chatbot.append([inputs, ""])
- yield chatbot, history, "等待响应"
-
- prompt = inputs
- tgui_say = ""
-
- mutable = ["", time.time()]
- def run_coorotine(mutable):
- async def get_result(mutable):
- async for response in run(prompt):
- print(response[len(mutable[0]):])
- mutable[0] = response
- if (time.time() - mutable[1]) > 3:
- print('exit when no listener')
- break
- asyncio.run(get_result(mutable))
-
- thread_listen = threading.Thread(target=run_coorotine, args=(mutable,), daemon=True)
- thread_listen.start()
-
- while thread_listen.is_alive():
- time.sleep(1)
- mutable[1] = time.time()
- # Print intermediate steps
- if tgui_say != mutable[0]:
- tgui_say = mutable[0]
- history[-1] = tgui_say
- chatbot[-1] = (history[-2], history[-1])
- yield chatbot, history, "status_text"
-
- logging.info(f'[response] {tgui_say}')
-
-
-
-def predict_tgui_no_ui(inputs, top_p, temperature, history=[], sys_prompt=""):
- raw_input = "What I would like to say is the following: " + inputs
- prompt = inputs
- tgui_say = ""
- mutable = ["", time.time()]
- def run_coorotine(mutable):
- async def get_result(mutable):
- async for response in run(prompt, max_token=20):
- print(response[len(mutable[0]):])
- mutable[0] = response
- if (time.time() - mutable[1]) > 3:
- print('exit when no listener')
- break
- asyncio.run(get_result(mutable))
- thread_listen = threading.Thread(target=run_coorotine, args=(mutable,))
- thread_listen.start()
- while thread_listen.is_alive():
- time.sleep(1)
- mutable[1] = time.time()
- tgui_say = mutable[0]
- return tgui_say
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/__init__.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/__init__.py
deleted file mode 100644
index 1fddb712e6a94f41d7b13e1ed2d34b1225ec5f1c..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/__init__.py
+++ /dev/null
@@ -1,136 +0,0 @@
-from dataclasses import dataclass
-from typing import List, Optional, Union
-
-import numpy as np
-import PIL
-from PIL import Image
-
-from ...utils import (
- BaseOutput,
- OptionalDependencyNotAvailable,
- is_flax_available,
- is_k_diffusion_available,
- is_k_diffusion_version,
- is_onnx_available,
- is_torch_available,
- is_transformers_available,
- is_transformers_version,
-)
-
-
-@dataclass
-class StableDiffusionPipelineOutput(BaseOutput):
- """
- Output class for Stable Diffusion pipelines.
-
- Args:
- images (`List[PIL.Image.Image]` or `np.ndarray`)
- List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width,
- num_channels)`.
- nsfw_content_detected (`List[bool]`)
- List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or
- `None` if safety checking could not be performed.
- """
-
- images: Union[List[PIL.Image.Image], np.ndarray]
- nsfw_content_detected: Optional[List[bool]]
-
-
-try:
- if not (is_transformers_available() and is_torch_available()):
- raise OptionalDependencyNotAvailable()
-except OptionalDependencyNotAvailable:
- from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
-else:
- from .pipeline_cycle_diffusion import CycleDiffusionPipeline
- from .pipeline_stable_diffusion import StableDiffusionPipeline
- from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
- from .pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline
- from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
- from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
- from .pipeline_stable_diffusion_instruct_pix2pix import StableDiffusionInstructPix2PixPipeline
- from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
- from .pipeline_stable_diffusion_ldm3d import StableDiffusionLDM3DPipeline
- from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
- from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
- from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
- from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
- from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
- from .pipeline_stable_unclip import StableUnCLIPPipeline
- from .pipeline_stable_unclip_img2img import StableUnCLIPImg2ImgPipeline
- from .safety_checker import StableDiffusionSafetyChecker
- from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
-
-try:
- if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
- raise OptionalDependencyNotAvailable()
-except OptionalDependencyNotAvailable:
- from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
-else:
- from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
-
-
-try:
- if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
- raise OptionalDependencyNotAvailable()
-except OptionalDependencyNotAvailable:
- from ...utils.dummy_torch_and_transformers_objects import (
- StableDiffusionDepth2ImgPipeline,
- StableDiffusionDiffEditPipeline,
- StableDiffusionPix2PixZeroPipeline,
- )
-else:
- from .pipeline_stable_diffusion_depth2img import StableDiffusionDepth2ImgPipeline
- from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
- from .pipeline_stable_diffusion_pix2pix_zero import StableDiffusionPix2PixZeroPipeline
-
-
-try:
- if not (
- is_torch_available()
- and is_transformers_available()
- and is_k_diffusion_available()
- and is_k_diffusion_version(">=", "0.0.12")
- ):
- raise OptionalDependencyNotAvailable()
-except OptionalDependencyNotAvailable:
- from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
-else:
- from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
-
-try:
- if not (is_transformers_available() and is_onnx_available()):
- raise OptionalDependencyNotAvailable()
-except OptionalDependencyNotAvailable:
- from ...utils.dummy_onnx_objects import * # noqa F403
-else:
- from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
- from .pipeline_onnx_stable_diffusion_img2img import OnnxStableDiffusionImg2ImgPipeline
- from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
- from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
- from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
-
-if is_transformers_available() and is_flax_available():
- import flax
-
- @flax.struct.dataclass
- class FlaxStableDiffusionPipelineOutput(BaseOutput):
- """
- Output class for Flax-based Stable Diffusion pipelines.
-
- Args:
- images (`np.ndarray`):
- Denoised images of array shape of `(batch_size, height, width, num_channels)`.
- nsfw_content_detected (`List[bool]`):
- List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content
- or `None` if safety checking could not be performed.
- """
-
- images: np.ndarray
- nsfw_content_detected: List[bool]
-
- from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
- from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
- from .pipeline_flax_stable_diffusion_img2img import FlaxStableDiffusionImg2ImgPipeline
- from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
- from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_models_unet_3d_condition.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_models_unet_3d_condition.py
deleted file mode 100644
index 72a33854bdcd3aa96c7cc0159d867265a5f2f78f..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_models_unet_3d_condition.py
+++ /dev/null
@@ -1,421 +0,0 @@
-# coding=utf-8
-# Copyright 2023 HuggingFace Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import tempfile
-import unittest
-
-import numpy as np
-import torch
-
-from diffusers.models import ModelMixin, UNet3DConditionModel
-from diffusers.models.attention_processor import AttnProcessor, LoRAAttnProcessor
-from diffusers.utils import (
- floats_tensor,
- logging,
- skip_mps,
- torch_device,
-)
-from diffusers.utils.import_utils import is_xformers_available
-from diffusers.utils.testing_utils import enable_full_determinism
-
-from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
-
-
-enable_full_determinism()
-
-logger = logging.get_logger(__name__)
-
-
-def create_lora_layers(model, mock_weights: bool = True):
- lora_attn_procs = {}
- for name in model.attn_processors.keys():
- has_cross_attention = name.endswith("attn2.processor") and not (
- name.startswith("transformer_in") or "temp_attentions" in name.split(".")
- )
- cross_attention_dim = model.config.cross_attention_dim if has_cross_attention else None
- if name.startswith("mid_block"):
- hidden_size = model.config.block_out_channels[-1]
- elif name.startswith("up_blocks"):
- block_id = int(name[len("up_blocks.")])
- hidden_size = list(reversed(model.config.block_out_channels))[block_id]
- elif name.startswith("down_blocks"):
- block_id = int(name[len("down_blocks.")])
- hidden_size = model.config.block_out_channels[block_id]
- elif name.startswith("transformer_in"):
- # Note that the `8 * ...` comes from: https://github.com/huggingface/diffusers/blob/7139f0e874f10b2463caa8cbd585762a309d12d6/src/diffusers/models/unet_3d_condition.py#L148
- hidden_size = 8 * model.config.attention_head_dim
-
- lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim)
- lora_attn_procs[name] = lora_attn_procs[name].to(model.device)
-
- if mock_weights:
- # add 1 to weights to mock trained weights
- with torch.no_grad():
- lora_attn_procs[name].to_q_lora.up.weight += 1
- lora_attn_procs[name].to_k_lora.up.weight += 1
- lora_attn_procs[name].to_v_lora.up.weight += 1
- lora_attn_procs[name].to_out_lora.up.weight += 1
-
- return lora_attn_procs
-
-
-@skip_mps
-class UNet3DConditionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
- model_class = UNet3DConditionModel
- main_input_name = "sample"
-
- @property
- def dummy_input(self):
- batch_size = 4
- num_channels = 4
- num_frames = 4
- sizes = (32, 32)
-
- noise = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device)
- time_step = torch.tensor([10]).to(torch_device)
- encoder_hidden_states = floats_tensor((batch_size, 4, 32)).to(torch_device)
-
- return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states}
-
- @property
- def input_shape(self):
- return (4, 4, 32, 32)
-
- @property
- def output_shape(self):
- return (4, 4, 32, 32)
-
- def prepare_init_args_and_inputs_for_common(self):
- init_dict = {
- "block_out_channels": (32, 64),
- "down_block_types": (
- "CrossAttnDownBlock3D",
- "DownBlock3D",
- ),
- "up_block_types": ("UpBlock3D", "CrossAttnUpBlock3D"),
- "cross_attention_dim": 32,
- "attention_head_dim": 8,
- "out_channels": 4,
- "in_channels": 4,
- "layers_per_block": 1,
- "sample_size": 32,
- }
- inputs_dict = self.dummy_input
- return init_dict, inputs_dict
-
- @unittest.skipIf(
- torch_device != "cuda" or not is_xformers_available(),
- reason="XFormers attention is only available with CUDA and `xformers` installed",
- )
- def test_xformers_enable_works(self):
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
- model = self.model_class(**init_dict)
-
- model.enable_xformers_memory_efficient_attention()
-
- assert (
- model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__
- == "XFormersAttnProcessor"
- ), "xformers is not enabled"
-
- # Overriding to set `norm_num_groups` needs to be different for this model.
- def test_forward_with_norm_groups(self):
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
-
- init_dict["norm_num_groups"] = 32
-
- model = self.model_class(**init_dict)
- model.to(torch_device)
- model.eval()
-
- with torch.no_grad():
- output = model(**inputs_dict)
-
- if isinstance(output, dict):
- output = output.sample
-
- self.assertIsNotNone(output)
- expected_shape = inputs_dict["sample"].shape
- self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
-
- # Overriding since the UNet3D outputs a different structure.
- def test_determinism(self):
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
- model = self.model_class(**init_dict)
- model.to(torch_device)
- model.eval()
-
- with torch.no_grad():
- # Warmup pass when using mps (see #372)
- if torch_device == "mps" and isinstance(model, ModelMixin):
- model(**self.dummy_input)
-
- first = model(**inputs_dict)
- if isinstance(first, dict):
- first = first.sample
-
- second = model(**inputs_dict)
- if isinstance(second, dict):
- second = second.sample
-
- out_1 = first.cpu().numpy()
- out_2 = second.cpu().numpy()
- out_1 = out_1[~np.isnan(out_1)]
- out_2 = out_2[~np.isnan(out_2)]
- max_diff = np.amax(np.abs(out_1 - out_2))
- self.assertLessEqual(max_diff, 1e-5)
-
- def test_model_attention_slicing(self):
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
-
- init_dict["attention_head_dim"] = 8
-
- model = self.model_class(**init_dict)
- model.to(torch_device)
- model.eval()
-
- model.set_attention_slice("auto")
- with torch.no_grad():
- output = model(**inputs_dict)
- assert output is not None
-
- model.set_attention_slice("max")
- with torch.no_grad():
- output = model(**inputs_dict)
- assert output is not None
-
- model.set_attention_slice(2)
- with torch.no_grad():
- output = model(**inputs_dict)
- assert output is not None
-
- def test_lora_processors(self):
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
-
- init_dict["attention_head_dim"] = 8
-
- model = self.model_class(**init_dict)
- model.to(torch_device)
-
- with torch.no_grad():
- sample1 = model(**inputs_dict).sample
-
- lora_attn_procs = create_lora_layers(model)
-
- # make sure we can set a list of attention processors
- model.set_attn_processor(lora_attn_procs)
- model.to(torch_device)
-
- # test that attn processors can be set to itself
- model.set_attn_processor(model.attn_processors)
-
- with torch.no_grad():
- sample2 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.0}).sample
- sample3 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample
- sample4 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample
-
- assert (sample1 - sample2).abs().max() < 3e-3
- assert (sample3 - sample4).abs().max() < 3e-3
-
- # sample 2 and sample 3 should be different
- assert (sample2 - sample3).abs().max() > 3e-3
-
- def test_lora_save_load(self):
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
-
- init_dict["attention_head_dim"] = 8
-
- torch.manual_seed(0)
- model = self.model_class(**init_dict)
- model.to(torch_device)
-
- with torch.no_grad():
- old_sample = model(**inputs_dict).sample
-
- lora_attn_procs = create_lora_layers(model)
- model.set_attn_processor(lora_attn_procs)
-
- with torch.no_grad():
- sample = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample
-
- with tempfile.TemporaryDirectory() as tmpdirname:
- model.save_attn_procs(tmpdirname)
- self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
- torch.manual_seed(0)
- new_model = self.model_class(**init_dict)
- new_model.to(torch_device)
- new_model.load_attn_procs(tmpdirname)
-
- with torch.no_grad():
- new_sample = new_model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample
-
- assert (sample - new_sample).abs().max() < 1e-3
-
- # LoRA and no LoRA should NOT be the same
- assert (sample - old_sample).abs().max() > 1e-4
-
- def test_lora_save_load_safetensors(self):
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
-
- init_dict["attention_head_dim"] = 8
-
- torch.manual_seed(0)
- model = self.model_class(**init_dict)
- model.to(torch_device)
-
- with torch.no_grad():
- old_sample = model(**inputs_dict).sample
-
- lora_attn_procs = create_lora_layers(model)
- model.set_attn_processor(lora_attn_procs)
-
- with torch.no_grad():
- sample = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample
-
- with tempfile.TemporaryDirectory() as tmpdirname:
- model.save_attn_procs(tmpdirname, safe_serialization=True)
- self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")))
- torch.manual_seed(0)
- new_model = self.model_class(**init_dict)
- new_model.to(torch_device)
- new_model.load_attn_procs(tmpdirname)
-
- with torch.no_grad():
- new_sample = new_model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample
-
- assert (sample - new_sample).abs().max() < 3e-3
-
- # LoRA and no LoRA should NOT be the same
- assert (sample - old_sample).abs().max() > 1e-4
-
- def test_lora_save_safetensors_load_torch(self):
- # enable deterministic behavior for gradient checkpointing
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
-
- init_dict["attention_head_dim"] = 8
-
- torch.manual_seed(0)
- model = self.model_class(**init_dict)
- model.to(torch_device)
-
- lora_attn_procs = create_lora_layers(model, mock_weights=False)
- model.set_attn_processor(lora_attn_procs)
- # Saving as torch, properly reloads with directly filename
- with tempfile.TemporaryDirectory() as tmpdirname:
- model.save_attn_procs(tmpdirname)
- self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
- torch.manual_seed(0)
- new_model = self.model_class(**init_dict)
- new_model.to(torch_device)
- new_model.load_attn_procs(tmpdirname, weight_name="pytorch_lora_weights.bin")
-
- def test_lora_save_torch_force_load_safetensors_error(self):
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
-
- init_dict["attention_head_dim"] = 8
-
- torch.manual_seed(0)
- model = self.model_class(**init_dict)
- model.to(torch_device)
-
- lora_attn_procs = create_lora_layers(model, mock_weights=False)
- model.set_attn_processor(lora_attn_procs)
- # Saving as torch, properly reloads with directly filename
- with tempfile.TemporaryDirectory() as tmpdirname:
- model.save_attn_procs(tmpdirname)
- self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
- torch.manual_seed(0)
- new_model = self.model_class(**init_dict)
- new_model.to(torch_device)
- with self.assertRaises(IOError) as e:
- new_model.load_attn_procs(tmpdirname, use_safetensors=True)
- self.assertIn("Error no file named pytorch_lora_weights.safetensors", str(e.exception))
-
- def test_lora_on_off(self):
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
-
- init_dict["attention_head_dim"] = 8
-
- torch.manual_seed(0)
- model = self.model_class(**init_dict)
- model.to(torch_device)
-
- with torch.no_grad():
- old_sample = model(**inputs_dict).sample
-
- lora_attn_procs = create_lora_layers(model)
- model.set_attn_processor(lora_attn_procs)
-
- with torch.no_grad():
- sample = model(**inputs_dict, cross_attention_kwargs={"scale": 0.0}).sample
-
- model.set_attn_processor(AttnProcessor())
-
- with torch.no_grad():
- new_sample = model(**inputs_dict).sample
-
- assert (sample - new_sample).abs().max() < 1e-4
- assert (sample - old_sample).abs().max() < 3e-3
-
- @unittest.skipIf(
- torch_device != "cuda" or not is_xformers_available(),
- reason="XFormers attention is only available with CUDA and `xformers` installed",
- )
- def test_lora_xformers_on_off(self):
- # enable deterministic behavior for gradient checkpointing
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
-
- init_dict["attention_head_dim"] = 4
-
- torch.manual_seed(0)
- model = self.model_class(**init_dict)
- model.to(torch_device)
- lora_attn_procs = create_lora_layers(model)
- model.set_attn_processor(lora_attn_procs)
-
- # default
- with torch.no_grad():
- sample = model(**inputs_dict).sample
-
- model.enable_xformers_memory_efficient_attention()
- on_sample = model(**inputs_dict).sample
-
- model.disable_xformers_memory_efficient_attention()
- off_sample = model(**inputs_dict).sample
-
- assert (sample - on_sample).abs().max() < 1e-4
- assert (sample - off_sample).abs().max() < 1e-4
-
- def test_feed_forward_chunking(self):
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
- init_dict["norm_num_groups"] = 32
-
- model = self.model_class(**init_dict)
- model.to(torch_device)
- model.eval()
-
- with torch.no_grad():
- output = model(**inputs_dict)[0]
-
- model.enable_forward_chunking()
- with torch.no_grad():
- output_2 = model(**inputs_dict)[0]
-
- self.assertEqual(output.shape, output_2.shape, "Shape doesn't match")
- assert np.abs(output.cpu() - output_2.cpu()).max() < 1e-2
-
-
-# (todo: sayakpaul) implement SLOW tests.
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky_v22/test_kandinsky_prior_emb2emb.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky_v22/test_kandinsky_prior_emb2emb.py
deleted file mode 100644
index 8e8caec181a1e89bce630a98ee71e535af881cd6..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky_v22/test_kandinsky_prior_emb2emb.py
+++ /dev/null
@@ -1,257 +0,0 @@
-# coding=utf-8
-# Copyright 2023 HuggingFace Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import random
-import unittest
-
-import numpy as np
-import torch
-from PIL import Image
-from torch import nn
-from transformers import (
- CLIPImageProcessor,
- CLIPTextConfig,
- CLIPTextModelWithProjection,
- CLIPTokenizer,
- CLIPVisionConfig,
- CLIPVisionModelWithProjection,
-)
-
-from diffusers import KandinskyV22PriorEmb2EmbPipeline, PriorTransformer, UnCLIPScheduler
-from diffusers.utils import floats_tensor, torch_device
-from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
-
-from ..test_pipelines_common import PipelineTesterMixin
-
-
-enable_full_determinism()
-
-
-class KandinskyV22PriorEmb2EmbPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
- pipeline_class = KandinskyV22PriorEmb2EmbPipeline
- params = ["prompt", "image"]
- batch_params = ["prompt", "image"]
- required_optional_params = [
- "num_images_per_prompt",
- "strength",
- "generator",
- "num_inference_steps",
- "latents",
- "negative_prompt",
- "guidance_scale",
- "output_type",
- "return_dict",
- ]
- test_xformers_attention = False
-
- @property
- def text_embedder_hidden_size(self):
- return 32
-
- @property
- def time_input_dim(self):
- return 32
-
- @property
- def block_out_channels_0(self):
- return self.time_input_dim
-
- @property
- def time_embed_dim(self):
- return self.time_input_dim * 4
-
- @property
- def cross_attention_dim(self):
- return 100
-
- @property
- def dummy_tokenizer(self):
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
- return tokenizer
-
- @property
- def dummy_text_encoder(self):
- torch.manual_seed(0)
- config = CLIPTextConfig(
- bos_token_id=0,
- eos_token_id=2,
- hidden_size=self.text_embedder_hidden_size,
- projection_dim=self.text_embedder_hidden_size,
- intermediate_size=37,
- layer_norm_eps=1e-05,
- num_attention_heads=4,
- num_hidden_layers=5,
- pad_token_id=1,
- vocab_size=1000,
- )
- return CLIPTextModelWithProjection(config)
-
- @property
- def dummy_prior(self):
- torch.manual_seed(0)
-
- model_kwargs = {
- "num_attention_heads": 2,
- "attention_head_dim": 12,
- "embedding_dim": self.text_embedder_hidden_size,
- "num_layers": 1,
- }
-
- model = PriorTransformer(**model_kwargs)
- # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
- model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape))
- return model
-
- @property
- def dummy_image_encoder(self):
- torch.manual_seed(0)
- config = CLIPVisionConfig(
- hidden_size=self.text_embedder_hidden_size,
- image_size=224,
- projection_dim=self.text_embedder_hidden_size,
- intermediate_size=37,
- num_attention_heads=4,
- num_channels=3,
- num_hidden_layers=5,
- patch_size=14,
- )
-
- model = CLIPVisionModelWithProjection(config)
- return model
-
- @property
- def dummy_image_processor(self):
- image_processor = CLIPImageProcessor(
- crop_size=224,
- do_center_crop=True,
- do_normalize=True,
- do_resize=True,
- image_mean=[0.48145466, 0.4578275, 0.40821073],
- image_std=[0.26862954, 0.26130258, 0.27577711],
- resample=3,
- size=224,
- )
-
- return image_processor
-
- def get_dummy_components(self):
- prior = self.dummy_prior
- image_encoder = self.dummy_image_encoder
- text_encoder = self.dummy_text_encoder
- tokenizer = self.dummy_tokenizer
- image_processor = self.dummy_image_processor
-
- scheduler = UnCLIPScheduler(
- variance_type="fixed_small_log",
- prediction_type="sample",
- num_train_timesteps=1000,
- clip_sample=True,
- clip_sample_range=10.0,
- )
-
- components = {
- "prior": prior,
- "image_encoder": image_encoder,
- "text_encoder": text_encoder,
- "tokenizer": tokenizer,
- "scheduler": scheduler,
- "image_processor": image_processor,
- }
-
- return components
-
- def get_dummy_inputs(self, device, seed=0):
- if str(device).startswith("mps"):
- generator = torch.manual_seed(seed)
- else:
- generator = torch.Generator(device=device).manual_seed(seed)
-
- image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device)
- image = image.cpu().permute(0, 2, 3, 1)[0]
- init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256))
-
- inputs = {
- "prompt": "horse",
- "image": init_image,
- "strength": 0.5,
- "generator": generator,
- "guidance_scale": 4.0,
- "num_inference_steps": 2,
- "output_type": "np",
- }
- return inputs
-
- def test_kandinsky_prior_emb2emb(self):
- device = "cpu"
-
- components = self.get_dummy_components()
-
- pipe = self.pipeline_class(**components)
- pipe = pipe.to(device)
-
- pipe.set_progress_bar_config(disable=None)
-
- output = pipe(**self.get_dummy_inputs(device))
- image = output.image_embeds
-
- image_from_tuple = pipe(
- **self.get_dummy_inputs(device),
- return_dict=False,
- )[0]
-
- image_slice = image[0, -10:]
- image_from_tuple_slice = image_from_tuple[0, -10:]
-
- assert image.shape == (1, 32)
-
- expected_slice = np.array(
- [
- 0.1071284,
- 1.3330271,
- 0.61260223,
- -0.6691065,
- -0.3846852,
- -1.0303661,
- 0.22716111,
- 0.03348901,
- 0.30040675,
- -0.24805029,
- ]
- )
-
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
- assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
-
- @skip_mps
- def test_inference_batch_single_identical(self):
- test_max_difference = torch_device == "cpu"
- relax_max_difference = True
- test_mean_pixel_difference = False
-
- self._test_inference_batch_single_identical(
- test_max_difference=test_max_difference,
- relax_max_difference=relax_max_difference,
- test_mean_pixel_difference=test_mean_pixel_difference,
- )
-
- @skip_mps
- def test_attention_slicing_forward_pass(self):
- test_max_difference = torch_device == "cpu"
- test_mean_pixel_difference = False
-
- self._test_attention_slicing_forward_pass(
- test_max_difference=test_max_difference,
- test_mean_pixel_difference=test_mean_pixel_difference,
- )
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/_base_/schedules/schedule_1x.py b/spaces/Andy1621/uniformer_image_detection/configs/_base_/schedules/schedule_1x.py
deleted file mode 100644
index 13b3783cbbe93b6c32bc415dc50f633dffa4aec7..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/_base_/schedules/schedule_1x.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
-optimizer_config = dict(grad_clip=None)
-# learning policy
-lr_config = dict(
- policy='step',
- warmup='linear',
- warmup_iters=500,
- warmup_ratio=0.001,
- step=[8, 11])
-runner = dict(type='EpochBasedRunner', max_epochs=12)
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py
deleted file mode 100644
index d069f8c9fdbaa55cbc44065740187c242cfa2903..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py'
-model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
diff --git a/spaces/Andy1621/uniformer_light/README.md b/spaces/Andy1621/uniformer_light/README.md
deleted file mode 100644
index 93c7252b578972c933fff80f68c7abb684145885..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_light/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Uniformer Light
-emoji: 🚀
-colorFrom: red
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.29.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Andy1621/uniformerv2_demo/kinetics_class_index.py b/spaces/Andy1621/uniformerv2_demo/kinetics_class_index.py
deleted file mode 100644
index 597e23e72c690f2dce0525b24bdcc2a992c4d594..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformerv2_demo/kinetics_class_index.py
+++ /dev/null
@@ -1,402 +0,0 @@
-kinetics_classnames = {
- "0": "riding a bike",
- "1": "marching",
- "2": "dodgeball",
- "3": "playing cymbals",
- "4": "checking tires",
- "5": "roller skating",
- "6": "tasting beer",
- "7": "clapping",
- "8": "drawing",
- "9": "juggling fire",
- "10": "bobsledding",
- "11": "petting animal (not cat)",
- "12": "spray painting",
- "13": "training dog",
- "14": "eating watermelon",
- "15": "building cabinet",
- "16": "applauding",
- "17": "playing harp",
- "18": "balloon blowing",
- "19": "sled dog racing",
- "20": "wrestling",
- "21": "pole vault",
- "22": "hurling (sport)",
- "23": "riding scooter",
- "24": "shearing sheep",
- "25": "sweeping floor",
- "26": "eating carrots",
- "27": "skateboarding",
- "28": "dunking basketball",
- "29": "disc golfing",
- "30": "eating spaghetti",
- "31": "playing flute",
- "32": "riding mechanical bull",
- "33": "making sushi",
- "34": "trapezing",
- "35": "picking fruit",
- "36": "stretching leg",
- "37": "playing ukulele",
- "38": "tying tie",
- "39": "skydiving",
- "40": "playing cello",
- "41": "jumping into pool",
- "42": "shooting goal (soccer)",
- "43": "trimming trees",
- "44": "bookbinding",
- "45": "ski jumping",
- "46": "walking the dog",
- "47": "riding unicycle",
- "48": "shaving head",
- "49": "hopscotch",
- "50": "playing piano",
- "51": "parasailing",
- "52": "bartending",
- "53": "kicking field goal",
- "54": "finger snapping",
- "55": "dining",
- "56": "yawning",
- "57": "peeling potatoes",
- "58": "canoeing or kayaking",
- "59": "front raises",
- "60": "laughing",
- "61": "dancing macarena",
- "62": "digging",
- "63": "reading newspaper",
- "64": "hitting baseball",
- "65": "clay pottery making",
- "66": "exercising with an exercise ball",
- "67": "playing saxophone",
- "68": "shooting basketball",
- "69": "washing hair",
- "70": "lunge",
- "71": "brushing hair",
- "72": "curling hair",
- "73": "kitesurfing",
- "74": "tapping guitar",
- "75": "bending back",
- "76": "skipping rope",
- "77": "situp",
- "78": "folding paper",
- "79": "cracking neck",
- "80": "assembling computer",
- "81": "cleaning gutters",
- "82": "blowing out candles",
- "83": "shaking hands",
- "84": "dancing gangnam style",
- "85": "windsurfing",
- "86": "tap dancing",
- "87": "skiing (not slalom or crosscountry)",
- "88": "bandaging",
- "89": "push up",
- "90": "doing nails",
- "91": "punching person (boxing)",
- "92": "bouncing on trampoline",
- "93": "scrambling eggs",
- "94": "singing",
- "95": "cleaning floor",
- "96": "krumping",
- "97": "drumming fingers",
- "98": "snowmobiling",
- "99": "gymnastics tumbling",
- "100": "headbanging",
- "101": "catching or throwing frisbee",
- "102": "riding elephant",
- "103": "bee keeping",
- "104": "feeding birds",
- "105": "snatch weight lifting",
- "106": "mowing lawn",
- "107": "fixing hair",
- "108": "playing trumpet",
- "109": "flying kite",
- "110": "crossing river",
- "111": "swinging legs",
- "112": "sanding floor",
- "113": "belly dancing",
- "114": "sneezing",
- "115": "clean and jerk",
- "116": "side kick",
- "117": "filling eyebrows",
- "118": "shuffling cards",
- "119": "recording music",
- "120": "cartwheeling",
- "121": "feeding fish",
- "122": "folding clothes",
- "123": "water skiing",
- "124": "tobogganing",
- "125": "blowing leaves",
- "126": "smoking",
- "127": "unboxing",
- "128": "tai chi",
- "129": "waxing legs",
- "130": "riding camel",
- "131": "slapping",
- "132": "tossing salad",
- "133": "capoeira",
- "134": "playing cards",
- "135": "playing organ",
- "136": "playing violin",
- "137": "playing drums",
- "138": "tapping pen",
- "139": "vault",
- "140": "shoveling snow",
- "141": "playing tennis",
- "142": "getting a tattoo",
- "143": "making a sandwich",
- "144": "making tea",
- "145": "grinding meat",
- "146": "squat",
- "147": "eating doughnuts",
- "148": "ice fishing",
- "149": "snowkiting",
- "150": "kicking soccer ball",
- "151": "playing controller",
- "152": "giving or receiving award",
- "153": "welding",
- "154": "throwing discus",
- "155": "throwing axe",
- "156": "ripping paper",
- "157": "swimming butterfly stroke",
- "158": "air drumming",
- "159": "blowing nose",
- "160": "hockey stop",
- "161": "taking a shower",
- "162": "bench pressing",
- "163": "planting trees",
- "164": "pumping fist",
- "165": "climbing tree",
- "166": "tickling",
- "167": "high kick",
- "168": "waiting in line",
- "169": "slacklining",
- "170": "tango dancing",
- "171": "hurdling",
- "172": "carrying baby",
- "173": "celebrating",
- "174": "sharpening knives",
- "175": "passing American football (in game)",
- "176": "headbutting",
- "177": "playing recorder",
- "178": "brush painting",
- "179": "garbage collecting",
- "180": "robot dancing",
- "181": "shredding paper",
- "182": "pumping gas",
- "183": "rock climbing",
- "184": "hula hooping",
- "185": "braiding hair",
- "186": "opening present",
- "187": "texting",
- "188": "decorating the christmas tree",
- "189": "answering questions",
- "190": "playing keyboard",
- "191": "writing",
- "192": "bungee jumping",
- "193": "sniffing",
- "194": "eating burger",
- "195": "playing accordion",
- "196": "making pizza",
- "197": "playing volleyball",
- "198": "tasting food",
- "199": "pushing cart",
- "200": "spinning poi",
- "201": "cleaning windows",
- "202": "arm wrestling",
- "203": "changing oil",
- "204": "swimming breast stroke",
- "205": "tossing coin",
- "206": "deadlifting",
- "207": "hoverboarding",
- "208": "cutting watermelon",
- "209": "cheerleading",
- "210": "snorkeling",
- "211": "washing hands",
- "212": "eating cake",
- "213": "pull ups",
- "214": "surfing water",
- "215": "eating hotdog",
- "216": "holding snake",
- "217": "playing harmonica",
- "218": "ironing",
- "219": "cutting nails",
- "220": "golf chipping",
- "221": "shot put",
- "222": "hugging",
- "223": "playing clarinet",
- "224": "faceplanting",
- "225": "trimming or shaving beard",
- "226": "drinking shots",
- "227": "riding mountain bike",
- "228": "tying bow tie",
- "229": "swinging on something",
- "230": "skiing crosscountry",
- "231": "unloading truck",
- "232": "cleaning pool",
- "233": "jogging",
- "234": "ice climbing",
- "235": "mopping floor",
- "236": "making bed",
- "237": "diving cliff",
- "238": "washing dishes",
- "239": "grooming dog",
- "240": "weaving basket",
- "241": "frying vegetables",
- "242": "stomping grapes",
- "243": "moving furniture",
- "244": "cooking sausages",
- "245": "doing laundry",
- "246": "dying hair",
- "247": "knitting",
- "248": "reading book",
- "249": "baby waking up",
- "250": "punching bag",
- "251": "surfing crowd",
- "252": "cooking chicken",
- "253": "pushing car",
- "254": "springboard diving",
- "255": "swing dancing",
- "256": "massaging legs",
- "257": "beatboxing",
- "258": "breading or breadcrumbing",
- "259": "somersaulting",
- "260": "brushing teeth",
- "261": "stretching arm",
- "262": "juggling balls",
- "263": "massaging person's head",
- "264": "eating ice cream",
- "265": "extinguishing fire",
- "266": "hammer throw",
- "267": "whistling",
- "268": "crawling baby",
- "269": "using remote controller (not gaming)",
- "270": "playing cricket",
- "271": "opening bottle",
- "272": "playing xylophone",
- "273": "motorcycling",
- "274": "driving car",
- "275": "exercising arm",
- "276": "passing American football (not in game)",
- "277": "playing kickball",
- "278": "sticking tongue out",
- "279": "flipping pancake",
- "280": "catching fish",
- "281": "eating chips",
- "282": "shaking head",
- "283": "sword fighting",
- "284": "playing poker",
- "285": "cooking on campfire",
- "286": "doing aerobics",
- "287": "paragliding",
- "288": "using segway",
- "289": "folding napkins",
- "290": "playing bagpipes",
- "291": "gargling",
- "292": "skiing slalom",
- "293": "strumming guitar",
- "294": "javelin throw",
- "295": "waxing back",
- "296": "riding or walking with horse",
- "297": "plastering",
- "298": "long jump",
- "299": "parkour",
- "300": "wrapping present",
- "301": "egg hunting",
- "302": "archery",
- "303": "cleaning toilet",
- "304": "swimming backstroke",
- "305": "snowboarding",
- "306": "catching or throwing baseball",
- "307": "massaging back",
- "308": "blowing glass",
- "309": "playing guitar",
- "310": "playing chess",
- "311": "golf driving",
- "312": "presenting weather forecast",
- "313": "rock scissors paper",
- "314": "high jump",
- "315": "baking cookies",
- "316": "using computer",
- "317": "washing feet",
- "318": "arranging flowers",
- "319": "playing bass guitar",
- "320": "spraying",
- "321": "cutting pineapple",
- "322": "waxing chest",
- "323": "auctioning",
- "324": "jetskiing",
- "325": "drinking",
- "326": "busking",
- "327": "playing monopoly",
- "328": "salsa dancing",
- "329": "waxing eyebrows",
- "330": "watering plants",
- "331": "zumba",
- "332": "chopping wood",
- "333": "pushing wheelchair",
- "334": "carving pumpkin",
- "335": "building shed",
- "336": "making jewelry",
- "337": "catching or throwing softball",
- "338": "bending metal",
- "339": "ice skating",
- "340": "dancing charleston",
- "341": "abseiling",
- "342": "climbing a rope",
- "343": "crying",
- "344": "cleaning shoes",
- "345": "dancing ballet",
- "346": "driving tractor",
- "347": "triple jump",
- "348": "throwing ball",
- "349": "getting a haircut",
- "350": "running on treadmill",
- "351": "climbing ladder",
- "352": "blasting sand",
- "353": "playing trombone",
- "354": "drop kicking",
- "355": "country line dancing",
- "356": "changing wheel",
- "357": "feeding goats",
- "358": "tying knot (not on a tie)",
- "359": "setting table",
- "360": "shaving legs",
- "361": "kissing",
- "362": "riding mule",
- "363": "counting money",
- "364": "laying bricks",
- "365": "barbequing",
- "366": "news anchoring",
- "367": "smoking hookah",
- "368": "cooking egg",
- "369": "peeling apples",
- "370": "yoga",
- "371": "sharpening pencil",
- "372": "dribbling basketball",
- "373": "petting cat",
- "374": "playing ice hockey",
- "375": "milking cow",
- "376": "shining shoes",
- "377": "juggling soccer ball",
- "378": "scuba diving",
- "379": "playing squash or racquetball",
- "380": "drinking beer",
- "381": "sign language interpreting",
- "382": "playing basketball",
- "383": "breakdancing",
- "384": "testifying",
- "385": "making snowman",
- "386": "golf putting",
- "387": "playing didgeridoo",
- "388": "biking through snow",
- "389": "sailing",
- "390": "jumpstyle dancing",
- "391": "water sliding",
- "392": "grooming horse",
- "393": "massaging feet",
- "394": "playing paintball",
- "395": "making a cake",
- "396": "bowling",
- "397": "contact juggling",
- "398": "applying cream",
- "399": "playing badminton"
-}
\ No newline at end of file
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/focal_loss.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/focal_loss.py
deleted file mode 100644
index 763bc93bd2575c49ca8ccf20996bbd92d1e0d1a4..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/focal_loss.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-from torch.autograd import Function
-from torch.autograd.function import once_differentiable
-
-from ..utils import ext_loader
-
-ext_module = ext_loader.load_ext('_ext', [
- 'sigmoid_focal_loss_forward', 'sigmoid_focal_loss_backward',
- 'softmax_focal_loss_forward', 'softmax_focal_loss_backward'
-])
-
-
-class SigmoidFocalLossFunction(Function):
-
- @staticmethod
- def symbolic(g, input, target, gamma, alpha, weight, reduction):
- return g.op(
- 'mmcv::MMCVSigmoidFocalLoss',
- input,
- target,
- gamma_f=gamma,
- alpha_f=alpha,
- weight_f=weight,
- reduction_s=reduction)
-
- @staticmethod
- def forward(ctx,
- input,
- target,
- gamma=2.0,
- alpha=0.25,
- weight=None,
- reduction='mean'):
-
- assert isinstance(target, (torch.LongTensor, torch.cuda.LongTensor))
- assert input.dim() == 2
- assert target.dim() == 1
- assert input.size(0) == target.size(0)
- if weight is None:
- weight = input.new_empty(0)
- else:
- assert weight.dim() == 1
- assert input.size(1) == weight.size(0)
- ctx.reduction_dict = {'none': 0, 'mean': 1, 'sum': 2}
- assert reduction in ctx.reduction_dict.keys()
-
- ctx.gamma = float(gamma)
- ctx.alpha = float(alpha)
- ctx.reduction = ctx.reduction_dict[reduction]
-
- output = input.new_zeros(input.size())
-
- ext_module.sigmoid_focal_loss_forward(
- input, target, weight, output, gamma=ctx.gamma, alpha=ctx.alpha)
- if ctx.reduction == ctx.reduction_dict['mean']:
- output = output.sum() / input.size(0)
- elif ctx.reduction == ctx.reduction_dict['sum']:
- output = output.sum()
- ctx.save_for_backward(input, target, weight)
- return output
-
- @staticmethod
- @once_differentiable
- def backward(ctx, grad_output):
- input, target, weight = ctx.saved_tensors
-
- grad_input = input.new_zeros(input.size())
-
- ext_module.sigmoid_focal_loss_backward(
- input,
- target,
- weight,
- grad_input,
- gamma=ctx.gamma,
- alpha=ctx.alpha)
-
- grad_input *= grad_output
- if ctx.reduction == ctx.reduction_dict['mean']:
- grad_input /= input.size(0)
- return grad_input, None, None, None, None, None
-
-
-sigmoid_focal_loss = SigmoidFocalLossFunction.apply
-
-
-class SigmoidFocalLoss(nn.Module):
-
- def __init__(self, gamma, alpha, weight=None, reduction='mean'):
- super(SigmoidFocalLoss, self).__init__()
- self.gamma = gamma
- self.alpha = alpha
- self.register_buffer('weight', weight)
- self.reduction = reduction
-
- def forward(self, input, target):
- return sigmoid_focal_loss(input, target, self.gamma, self.alpha,
- self.weight, self.reduction)
-
- def __repr__(self):
- s = self.__class__.__name__
- s += f'(gamma={self.gamma}, '
- s += f'alpha={self.alpha}, '
- s += f'reduction={self.reduction})'
- return s
-
-
-class SoftmaxFocalLossFunction(Function):
-
- @staticmethod
- def symbolic(g, input, target, gamma, alpha, weight, reduction):
- return g.op(
- 'mmcv::MMCVSoftmaxFocalLoss',
- input,
- target,
- gamma_f=gamma,
- alpha_f=alpha,
- weight_f=weight,
- reduction_s=reduction)
-
- @staticmethod
- def forward(ctx,
- input,
- target,
- gamma=2.0,
- alpha=0.25,
- weight=None,
- reduction='mean'):
-
- assert isinstance(target, (torch.LongTensor, torch.cuda.LongTensor))
- assert input.dim() == 2
- assert target.dim() == 1
- assert input.size(0) == target.size(0)
- if weight is None:
- weight = input.new_empty(0)
- else:
- assert weight.dim() == 1
- assert input.size(1) == weight.size(0)
- ctx.reduction_dict = {'none': 0, 'mean': 1, 'sum': 2}
- assert reduction in ctx.reduction_dict.keys()
-
- ctx.gamma = float(gamma)
- ctx.alpha = float(alpha)
- ctx.reduction = ctx.reduction_dict[reduction]
-
- channel_stats, _ = torch.max(input, dim=1)
- input_softmax = input - channel_stats.unsqueeze(1).expand_as(input)
- input_softmax.exp_()
-
- channel_stats = input_softmax.sum(dim=1)
- input_softmax /= channel_stats.unsqueeze(1).expand_as(input)
-
- output = input.new_zeros(input.size(0))
- ext_module.softmax_focal_loss_forward(
- input_softmax,
- target,
- weight,
- output,
- gamma=ctx.gamma,
- alpha=ctx.alpha)
-
- if ctx.reduction == ctx.reduction_dict['mean']:
- output = output.sum() / input.size(0)
- elif ctx.reduction == ctx.reduction_dict['sum']:
- output = output.sum()
- ctx.save_for_backward(input_softmax, target, weight)
- return output
-
- @staticmethod
- def backward(ctx, grad_output):
- input_softmax, target, weight = ctx.saved_tensors
- buff = input_softmax.new_zeros(input_softmax.size(0))
- grad_input = input_softmax.new_zeros(input_softmax.size())
-
- ext_module.softmax_focal_loss_backward(
- input_softmax,
- target,
- weight,
- buff,
- grad_input,
- gamma=ctx.gamma,
- alpha=ctx.alpha)
-
- grad_input *= grad_output
- if ctx.reduction == ctx.reduction_dict['mean']:
- grad_input /= input_softmax.size(0)
- return grad_input, None, None, None, None, None
-
-
-softmax_focal_loss = SoftmaxFocalLossFunction.apply
-
-
-class SoftmaxFocalLoss(nn.Module):
-
- def __init__(self, gamma, alpha, weight=None, reduction='mean'):
- super(SoftmaxFocalLoss, self).__init__()
- self.gamma = gamma
- self.alpha = alpha
- self.register_buffer('weight', weight)
- self.reduction = reduction
-
- def forward(self, input, target):
- return softmax_focal_loss(input, target, self.gamma, self.alpha,
- self.weight, self.reduction)
-
- def __repr__(self):
- s = self.__class__.__name__
- s += f'(gamma={self.gamma}, '
- s += f'alpha={self.alpha}, '
- s += f'reduction={self.reduction})'
- return s
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/html.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/html.py
deleted file mode 100644
index f22b200c0e63d75c6def15e321a8bed14f57b64b..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/html.py
+++ /dev/null
@@ -1,991 +0,0 @@
-"""
- pygments.formatters.html
- ~~~~~~~~~~~~~~~~~~~~~~~~
-
- Formatter for HTML output.
-
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import functools
-import os
-import sys
-import os.path
-from io import StringIO
-
-from pip._vendor.pygments.formatter import Formatter
-from pip._vendor.pygments.token import Token, Text, STANDARD_TYPES
-from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt
-
-try:
- import ctags
-except ImportError:
- ctags = None
-
-__all__ = ['HtmlFormatter']
-
-
-_escape_html_table = {
- ord('&'): '&',
- ord('<'): '<',
- ord('>'): '>',
- ord('"'): '"',
- ord("'"): ''',
-}
-
-
-def escape_html(text, table=_escape_html_table):
- """Escape &, <, > as well as single and double quotes for HTML."""
- return text.translate(table)
-
-
-def webify(color):
- if color.startswith('calc') or color.startswith('var'):
- return color
- else:
- return '#' + color
-
-
-def _get_ttype_class(ttype):
- fname = STANDARD_TYPES.get(ttype)
- if fname:
- return fname
- aname = ''
- while fname is None:
- aname = '-' + ttype[-1] + aname
- ttype = ttype.parent
- fname = STANDARD_TYPES.get(ttype)
- return fname + aname
-
-
-CSSFILE_TEMPLATE = '''\
-/*
-generated by Pygments %(title)s
-
-'''
-
-DOC_HEADER_EXTERNALCSS = '''\
-
-
-
-
- %(title)s
-
-'''
-
-DOC_FOOTER = '''\
-
-
-'''
-
-
-class HtmlFormatter(Formatter):
- r"""
- Format tokens as HTML 4 ```` tags within a ```` tag, wrapped
- in a ``
`` is
- additionally wrapped inside a ``
`` which has one row and two
- cells: one containing the line numbers and one containing the code.
- Example:
-
- .. sourcecode:: html
-
-
-
-
- 1
- 2
-
-
- def foo(bar):
- pass
-
- Elasticsearch with GPT Summary
- Search Results
-
- GPT Summary
-
-
-Classic Card Games 3D Download Windows 8.1l
-
the defiant ones 2006 free download directed by kevin smith
hii guys back again
kool haus dragtunes full album playlist acesso gracioso
karlota hołdowa in piano version 3.0 terunisi hikeda
pro evolution soccer 2014 ucl final full game torrents
final fantasy x original soundtrack [2004 remix] -remastered
perfect world hacker's league
youtube tv 예능 진짜 감독 후드 그래서 너무 예뻐 (재즈같아)
mannen som blev fångad #2
macbook pro 15 i7 8gb ram 2 lagsnitt
badri mata gracia tambien cuenta
cidu 2013 with subtitles
fujifilm x-t2 2.0
-
-
\ No newline at end of file
diff --git a/spaces/innnky/nene-emotion/monotonic_align/__init__.py b/spaces/innnky/nene-emotion/monotonic_align/__init__.py
deleted file mode 100644
index 3d7009c40fea3a98168e3e3bc9ae061e91327422..0000000000000000000000000000000000000000
--- a/spaces/innnky/nene-emotion/monotonic_align/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import numpy as np
-import torch
-from .monotonic_align.core import maximum_path_c
-
-
-def maximum_path(neg_cent, mask):
- """ Cython optimized version.
- neg_cent: [b, t_t, t_s]
- mask: [b, t_t, t_s]
- """
- device = neg_cent.device
- dtype = neg_cent.dtype
- neg_cent = neg_cent.data.cpu().numpy().astype(np.float32)
- path = np.zeros(neg_cent.shape, dtype=np.int32)
-
- t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32)
- t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32)
- maximum_path_c(path, neg_cent, t_t_max, t_s_max)
- return torch.from_numpy(path).to(device=device, dtype=dtype)
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Fundamentals Of Electrical Engineering Leonard S Bobrow Pdf 350.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Fundamentals Of Electrical Engineering Leonard S Bobrow Pdf 350.md
deleted file mode 100644
index aa16e751c50366504557e1f1d8c3b881b77e810e..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Fundamentals Of Electrical Engineering Leonard S Bobrow Pdf 350.md
+++ /dev/null
@@ -1,8 +0,0 @@
-fundamentals of electrical engineering leonard s bobrow pdf 350
-
-F. Hüning · 2014 · Cited at 6 - It is based on the one-semester introductory course "Fundamentals of Electrical Engineering for Mechatronics" at the University. #Electrical engineering is an engineering discipline related to the study, design, . The basis of the discipline are physics and mathematics. Fundamentals of mechanics, electrical engineering and electronics Fundamentals of electrical engineering and electronics.
-Fundamentals of electrical engineering and electronics.
-Fundamentals of Electrical and Electronic Engineering. 8a78ff9644
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Heat Exchanger Design Software.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Heat Exchanger Design Software.md
deleted file mode 100644
index bf65dcd104dcfcb3a55b60e6c7dd8e5c29a895d0..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Heat Exchanger Design Software.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Heat exchanger design software
-
-Comprehensive Heat Exchanger Solutions · Rapid and accurate heat exchanger design · effective configuration of heat exchangers for your individual thermo- ... 1fdad05405
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Magento Guia Definitivo Pdf Download.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Magento Guia Definitivo Pdf Download.md
deleted file mode 100644
index 679ed16643790050c0c9b850be943da2cd5e90de..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Magento Guia Definitivo Pdf Download.md
+++ /dev/null
@@ -1,6 +0,0 @@
-magento guia definitivo pdf download
-
-Download file Free Book PDF speaker wiring cioyt 95047giochi it Pdf at ... Como Montar Uma Consultoria De Rh O Guia Definitivo Portuguese · Forks Over Knives ... H Andbook Of Magento Optical Data Recording Mcdaniel Terry W Victora R ... 4d29de3e1b
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Avatar 3d Ita 98 NEW.md b/spaces/inreVtussa/clothingai/Examples/Avatar 3d Ita 98 NEW.md
deleted file mode 100644
index 25cac627eec169ba09109174e022656ee9a6f60f..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Avatar 3d Ita 98 NEW.md
+++ /dev/null
@@ -1,6 +0,0 @@
-avatar 3d ita 98
-
- 8a78ff9644
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Biosystem Bts 310 User Manual.md b/spaces/inreVtussa/clothingai/Examples/Biosystem Bts 310 User Manual.md
deleted file mode 100644
index ad8b1eaf671eb73a167c16fcd7aeb73965989f54..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Biosystem Bts 310 User Manual.md
+++ /dev/null
@@ -1,37 +0,0 @@
-
-Biosystem bts 310 user manual
-
-biosystem bts 310 user manual
-
-Where to find the Biosystem bts 310 user manual?
-
-How to use the Biosystem bts 310 user manual?
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/inreVtussa/clothingai/Examples/Call.of.duty.ghosts.english.language.packl.md b/spaces/inreVtussa/clothingai/Examples/Call.of.duty.ghosts.english.language.packl.md
deleted file mode 100644
index d2eeeb38f40734b517c9dcbb0a5fc4a184a50f6c..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Call.of.duty.ghosts.english.language.packl.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Call.of.duty.ghosts.english.language.packl
-
- 4d29de3e1b
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Dimension Bot V1.3 Cracked.md b/spaces/inreVtussa/clothingai/Examples/Dimension Bot V1.3 Cracked.md
deleted file mode 100644
index 1286b34fd23870ad4dbf6cdee79aca08febf101f..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Dimension Bot V1.3 Cracked.md
+++ /dev/null
@@ -1,42 +0,0 @@
-Dimension Bot v1.3 Cracked
-
-t. BotClient
-
-But the next command is not the issue.
-
-The command after the last one should look like this.
-
-*config.sh : /home/botv1/bot/scripts/config.sh
-
-In command line I get the right result.
-
-bot@kali-linux:~/bot$ python testbot.py
-
-bash: /home/botv1/bot/scripts/config.sh: No such file or directory
-
-but when I execute this from pycharm, I get the error
-
-whereas I have checked that the file is in the right location and I get the same when I just execute the file in the terminal.
-
-I have also checked that the file is executable.
-
-A:
-
-I found a solution.
-
-I switched to the package mode and added the path to.bashrc as an entry for PATH.
-
-E-mail this article
-
-Sending your article
-
-The day after you installed the new app version of your phone or tablet, it asked for a few more permissions. These permissions can be used to give your app access to your contacts, calendar and photos.
-
-If you see any strange activity in your Google or Facebook account that doesn’t make sense, this may be why.
-
-With Apple’s iOS 6 App Store and Google’s Play store update to its corresponding app, data may have been sent to Google or Facebook’s servers. Both companies are aware of the issue and promise to do what they can to prevent it from happening again. The good news is you probably won’t see the notification again after the update.
-
-If you see anything unexpected in your Google or Facebook account, contact their customer service for help. They should be able to help you sort out what happened and what to do next. For Facebook, it’s the Help Center. For Google, try the Support chat. 4fefd39f24
-
-
-
diff --git a/spaces/jackli888/stable-diffusion-webui/test/basic_features/img2img_test.py b/spaces/jackli888/stable-diffusion-webui/test/basic_features/img2img_test.py
deleted file mode 100644
index 08c5c903e8382ef4b969b01da87bc69fb06ff2b4..0000000000000000000000000000000000000000
--- a/spaces/jackli888/stable-diffusion-webui/test/basic_features/img2img_test.py
+++ /dev/null
@@ -1,66 +0,0 @@
-import unittest
-import requests
-from gradio.processing_utils import encode_pil_to_base64
-from PIL import Image
-
-
-class TestImg2ImgWorking(unittest.TestCase):
- def setUp(self):
- self.url_img2img = "http://localhost:7860/sdapi/v1/img2img"
- self.simple_img2img = {
- "init_images": [encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png"))],
- "resize_mode": 0,
- "denoising_strength": 0.75,
- "mask": None,
- "mask_blur": 4,
- "inpainting_fill": 0,
- "inpaint_full_res": False,
- "inpaint_full_res_padding": 0,
- "inpainting_mask_invert": False,
- "prompt": "example prompt",
- "styles": [],
- "seed": -1,
- "subseed": -1,
- "subseed_strength": 0,
- "seed_resize_from_h": -1,
- "seed_resize_from_w": -1,
- "batch_size": 1,
- "n_iter": 1,
- "steps": 3,
- "cfg_scale": 7,
- "width": 64,
- "height": 64,
- "restore_faces": False,
- "tiling": False,
- "negative_prompt": "",
- "eta": 0,
- "s_churn": 0,
- "s_tmax": 0,
- "s_tmin": 0,
- "s_noise": 1,
- "override_settings": {},
- "sampler_index": "Euler a",
- "include_init_images": False
- }
-
- def test_img2img_simple_performed(self):
- self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200)
-
- def test_inpainting_masked_performed(self):
- self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(r"test/test_files/mask_basic.png"))
- self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200)
-
- def test_inpainting_with_inverted_masked_performed(self):
- self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(r"test/test_files/mask_basic.png"))
- self.simple_img2img["inpainting_mask_invert"] = True
- self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200)
-
- def test_img2img_sd_upscale_performed(self):
- self.simple_img2img["script_name"] = "sd upscale"
- self.simple_img2img["script_args"] = ["", 8, "Lanczos", 2.0]
-
- self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/spaces/jiaxianustc/mbp/UltraFlow/commons/process_mols.py b/spaces/jiaxianustc/mbp/UltraFlow/commons/process_mols.py
deleted file mode 100644
index 20be6af48b390815ea334d2d02b1fcdd7cea9b85..0000000000000000000000000000000000000000
--- a/spaces/jiaxianustc/mbp/UltraFlow/commons/process_mols.py
+++ /dev/null
@@ -1,2116 +0,0 @@
-import os
-import math
-from openbabel import pybel
-from openbabel import openbabel
-import dgl
-import pickle
-import numpy as np
-import torch
-import scipy.spatial as spatial
-from functools import partial
-from prody import *
-from rdkit import Chem as Chem
-from rdkit.Chem.rdPartialCharges import ComputeGasteigerCharges
-from rdkit.Chem.rdchem import BondType as BT
-from rdkit.Chem import AllChem
-from Bio.PDB import get_surface, PDBParser
-from Bio.PDB.PDBExceptions import PDBConstructionWarning
-from scipy.special import softmax
-from scipy.spatial.transform import Rotation
-import pandas as pd
-ob_log_handler = pybel.ob.OBMessageHandler()
-ob_log_handler.SetOutputLevel(0)
-pybel.ob.obErrorLog.StopLogging()
-
-BOND_TYPES = {t: i for i, t in enumerate(BT.names.values())}
-BOND_NAMES = {i: t for i, t in enumerate(BT.names.keys())}
-
-graph_type_filename = {'atom_pocket':'valid_pocket.pdb',
- 'atom_complete':'valid_chains.pdb'}
-ResDict = {'ALA':0,'ARG':1,'ASN':2,'ASP':3,'CYS':4,
- 'GLN':5,'GLU':6,'GLY':7,'HIS':8,'ILE':9,
- 'LEU':10,'LYS':11,'MET':12,'PHE':13,'PRO':14,
- 'SER':15,'THR':16,'TRP':17,'TYR':18,'VAL':19}
-SSEDict = {'H':0,'B':1,'E':2,'G':3,'I':4,'T':5,'S':6,' ':7}
-SSEType,UNKOWN_RES = 8,20
-
-allowable_features = {
- 'possible_atomic_num_list': list(range(1, 119)) + ['misc'],
- 'possible_chirality_list': [
- 'CHI_UNSPECIFIED',
- 'CHI_TETRAHEDRAL_CW',
- 'CHI_TETRAHEDRAL_CCW',
- 'CHI_OTHER'
- ],
- 'possible_degree_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'misc'],
- 'possible_numring_list': [0, 1, 2, 3, 4, 5, 6, 'misc'],
- 'possible_implicit_valence_list': [0, 1, 2, 3, 4, 5, 6, 'misc'],
- 'possible_formal_charge_list': [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 'misc'],
- 'possible_numH_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],
- 'possible_number_radical_e_list': [0, 1, 2, 3, 4, 'misc'],
- 'possible_hybridization_list': [
- 'SP', 'SP2', 'SP3', 'SP3D', 'SP3D2', 'misc'
- ],
- 'possible_is_aromatic_list': [False, True],
- 'possible_is_in_ring3_list': [False, True],
- 'possible_is_in_ring4_list': [False, True],
- 'possible_is_in_ring5_list': [False, True],
- 'possible_is_in_ring6_list': [False, True],
- 'possible_is_in_ring7_list': [False, True],
- 'possible_is_in_ring8_list': [False, True],
- 'possible_amino_acids': ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET',
- 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL', 'HIP', 'HIE', 'TPO', 'HID', 'LEV', 'MEU',
- 'PTR', 'GLV', 'CYT', 'SEP', 'HIZ', 'CYM', 'GLM', 'ASQ', 'TYS', 'CYX', 'GLZ', 'misc'],
- 'possible_atom_type_2': ['C*', 'CA', 'CB', 'CD', 'CE', 'CG', 'CH', 'CZ', 'N*', 'ND', 'NE', 'NH', 'NZ', 'O*', 'OD',
- 'OE', 'OG', 'OH', 'OX', 'S*', 'SD', 'SG', 'misc'],
- 'possible_atom_type_3': ['C', 'CA', 'CB', 'CD', 'CD1', 'CD2', 'CE', 'CE1', 'CE2', 'CE3', 'CG', 'CG1', 'CG2', 'CH2',
- 'CZ', 'CZ2', 'CZ3', 'N', 'ND1', 'ND2', 'NE', 'NE1', 'NE2', 'NH1', 'NH2', 'NZ', 'O', 'OD1',
- 'OD2', 'OE1', 'OE2', 'OG', 'OG1', 'OH', 'OXT', 'SD', 'SG', 'misc'],
-}
-
-lig_feature_dims = (list(map(len, [
- allowable_features['possible_atomic_num_list'],
- allowable_features['possible_chirality_list'],
- allowable_features['possible_degree_list'],
- allowable_features['possible_formal_charge_list'],
- allowable_features['possible_implicit_valence_list'],
- allowable_features['possible_numH_list'],
- allowable_features['possible_number_radical_e_list'],
- allowable_features['possible_hybridization_list'],
- allowable_features['possible_is_aromatic_list'],
- allowable_features['possible_numring_list'],
- allowable_features['possible_is_in_ring3_list'],
- allowable_features['possible_is_in_ring4_list'],
- allowable_features['possible_is_in_ring5_list'],
- allowable_features['possible_is_in_ring6_list'],
- allowable_features['possible_is_in_ring7_list'],
- allowable_features['possible_is_in_ring8_list'],
-])), 1) # number of scalar features
-rec_atom_feature_dims = (list(map(len, [
- allowable_features['possible_amino_acids'],
- allowable_features['possible_atomic_num_list'],
- allowable_features['possible_atom_type_2'],
- allowable_features['possible_atom_type_3'],
-])), 2)
-
-rec_residue_feature_dims = (list(map(len, [
- allowable_features['possible_amino_acids']
-])), 2)
-
-dbcg_prot_residue_feature_dims = [[21],0]
-
-def safe_index(l, e):
- """
- Return index of element e in list l. If e is not present, return the last index
- """
- try:
- return l.index(e)
- except:
- return len(l) - 1
-
-def lig_atom_featurizer_rdmol(mol):
- ComputeGasteigerCharges(mol) # they are Nan for 93 molecules in all of PDBbind. We put a 0 in that case.
- ringinfo = mol.GetRingInfo()
- atom_features_list = []
- for idx, atom in enumerate(mol.GetAtoms()):
- g_charge = atom.GetDoubleProp('_GasteigerCharge')
- atom_features_list.append([
- safe_index(allowable_features['possible_atomic_num_list'], atom.GetAtomicNum()),
- allowable_features['possible_chirality_list'].index(str(atom.GetChiralTag())),
- safe_index(allowable_features['possible_degree_list'], atom.GetTotalDegree()),
- safe_index(allowable_features['possible_formal_charge_list'], atom.GetFormalCharge()),
- safe_index(allowable_features['possible_implicit_valence_list'], atom.GetImplicitValence()),
- safe_index(allowable_features['possible_numH_list'], atom.GetTotalNumHs()),
- safe_index(allowable_features['possible_number_radical_e_list'], atom.GetNumRadicalElectrons()),
- safe_index(allowable_features['possible_hybridization_list'], str(atom.GetHybridization())),
- allowable_features['possible_is_aromatic_list'].index(atom.GetIsAromatic()),
- safe_index(allowable_features['possible_numring_list'], ringinfo.NumAtomRings(idx)),
- allowable_features['possible_is_in_ring3_list'].index(ringinfo.IsAtomInRingOfSize(idx, 3)),
- allowable_features['possible_is_in_ring4_list'].index(ringinfo.IsAtomInRingOfSize(idx, 4)),
- allowable_features['possible_is_in_ring5_list'].index(ringinfo.IsAtomInRingOfSize(idx, 5)),
- allowable_features['possible_is_in_ring6_list'].index(ringinfo.IsAtomInRingOfSize(idx, 6)),
- allowable_features['possible_is_in_ring7_list'].index(ringinfo.IsAtomInRingOfSize(idx, 7)),
- allowable_features['possible_is_in_ring8_list'].index(ringinfo.IsAtomInRingOfSize(idx, 8)),
- g_charge if not np.isnan(g_charge) and not np.isinf(g_charge) else 0.
- ])
-
- return torch.tensor(atom_features_list)
-
-def vina_gaussain_1(d):
- return torch.exp(- ((d / 0.5) ** 2))
-
-def vina_gaussain_2(d):
- return torch.exp(- ( ((d - 3) / 2.0) ** 2))
-
-def vina_repulsion(d):
- if d >= 0:
- return torch.tensor(0.0)
- return torch.tensor(d ** 2)
-
-def hydrophobic(d):
- if d < 0.5:
- return torch.tensor(1.0)
-
- if d <= 1.5:
- return torch.tensor(-d + 1.5)
-
- return torch.tensor(0.0)
-
-def hydrogen_bonding(d):
- if d < -0.7:
- return torch.tensor(1.0)
-
- if d <= 0.0:
- return torch.tensor(-(10/7) * d)
-
- return torch.tensor(0.0)
-
-
-def CusBondFeaturizer(bond):
- return [int(bond.GetBondOrder()), int(bond.IsAromatic()), int(bond.IsInRing())]
-
-def CusBondFeaturizer_new(bond):
- return [int(int(bond.GetBondOrder())==1), int(int(bond.GetBondOrder())==2), int(int(bond.GetBondOrder())==3), int(bond.IsAromatic()), int(bond.IsInRing())]
-
-class Featurizer():
- """Calcaulates atomic features for molecules. Features can encode atom type,
- native pybel properties or any property defined with SMARTS patterns
-
- Attributes
- ----------
- FEATURE_NAMES: list of strings
- Labels for features (in the same order as features)
- NUM_ATOM_CLASSES: int
- Number of atom codes
- ATOM_CODES: dict
- Dictionary mapping atomic numbers to codes
- NAMED_PROPS: list of string
- Names of atomic properties to retrieve from pybel.Atom object
- CALLABLES: list of callables
- Callables used to calculcate custom atomic properties
- SMARTS: list of SMARTS strings
- SMARTS patterns defining additional atomic properties
- """
-
- def __init__(self, atom_codes=None, atom_labels=None,
- named_properties=None, save_molecule_codes=True,
- custom_properties=None, smarts_properties=None,
- smarts_labels=None):
-
- """Creates Featurizer with specified types of features. Elements of a
- feature vector will be in a following order: atom type encoding
- (defined by atom_codes), Pybel atomic properties (defined by
- named_properties), molecule code (if present), custom atomic properties
- (defined `custom_properties`), and additional properties defined with
- SMARTS (defined with `smarts_properties`).
-
- Parameters
- ----------
- atom_codes: dict, optional
- Dictionary mapping atomic numbers to codes. It will be used for
- one-hot encoging therefore if n different types are used, codes
- shpuld be from 0 to n-1. Multiple atoms can have the same code,
- e.g. you can use {6: 0, 7: 1, 8: 1} to encode carbons with [1, 0]
- and nitrogens and oxygens with [0, 1] vectors. If not provided,
- default encoding is used.
- atom_labels: list of strings, optional
- Labels for atoms codes. It should have the same length as the
- number of used codes, e.g. for `atom_codes={6: 0, 7: 1, 8: 1}` you
- should provide something like ['C', 'O or N']. If not specified
- labels 'atom0', 'atom1' etc are used. If `atom_codes` is not
- specified this argument is ignored.
- named_properties: list of strings, optional
- Names of atomic properties to retrieve from pybel.Atom object. If
- not specified ['hyb', 'heavyvalence', 'heterovalence',
- 'partialcharge'] is used.
- save_molecule_codes: bool, optional (default True)
- If set to True, there will be an additional feature to save
- molecule code. It is usefeul when saving molecular complex in a
- single array.
- custom_properties: list of callables, optional
- Custom functions to calculate atomic properties. Each element of
- this list should be a callable that takes pybel.Atom object and
- returns a float. If callable has `__name__` property it is used as
- feature label. Otherwise labels 'func' etc are used, where i is
- the index in `custom_properties` list.
- smarts_properties: list of strings, optional
- Additional atomic properties defined with SMARTS patterns. These
- patterns should match a single atom. If not specified, deafult
- patterns are used.
- smarts_labels: list of strings, optional
- Labels for properties defined with SMARTS. Should have the same
- length as `smarts_properties`. If not specified labels 'smarts0',
- 'smarts1' etc are used. If `smarts_properties` is not specified
- this argument is ignored.
- """
-
- # Remember namse of all features in the correct order
- self.FEATURE_NAMES = []
-
- if atom_codes is not None:
- if not isinstance(atom_codes, dict):
- raise TypeError('Atom codes should be dict, got %s instead'
- % type(atom_codes))
- codes = set(atom_codes.values())
- for i in range(len(codes)):
- if i not in codes:
- raise ValueError('Incorrect atom code %s' % i)
-
- self.NUM_ATOM_CLASSES = len(codes)
- self.ATOM_CODES = atom_codes
- if atom_labels is not None:
- if len(atom_labels) != self.NUM_ATOM_CLASSES:
- raise ValueError('Incorrect number of atom labels: '
- '%s instead of %s'
- % (len(atom_labels), self.NUM_ATOM_CLASSES))
- else:
- atom_labels = ['atom%s' % i for i in range(self.NUM_ATOM_CLASSES)]
- self.FEATURE_NAMES += atom_labels
- else:
- self.ATOM_CODES = {}
-
- metals = ([3, 4, 11, 12, 13] + list(range(19, 32))
- + list(range(37, 51)) + list(range(55, 84))
- + list(range(87, 104)))
-
- # List of tuples (atomic_num, class_name) with atom types to encode.
- atom_classes = [
- (5, 'B'),
- (6, 'C'),
- (7, 'N'),
- (8, 'O'),
- (15, 'P'),
- (16, 'S'),
- (34, 'Se'),
- ([9, 17, 35, 53], 'halogen'),
- (metals, 'metal')
- ]
-
- for code, (atom, name) in enumerate(atom_classes):
- if type(atom) is list:
- #
- for a in atom:
- self.ATOM_CODES[a] = code
- else:
- self.ATOM_CODES[atom] = code
- self.FEATURE_NAMES.append(name)
-
- self.NUM_ATOM_CLASSES = len(atom_classes)
-
- if named_properties is not None:
- if not isinstance(named_properties, (list, tuple, np.ndarray)):
- raise TypeError('named_properties must be a list')
- allowed_props = [prop for prop in dir(pybel.Atom)
- if not prop.startswith('__')]
- for prop_id, prop in enumerate(named_properties):
- if prop not in allowed_props:
- raise ValueError(
- 'named_properties must be in pybel.Atom attributes,'
- ' %s was given at position %s' % (prop_id, prop)
- )
- self.NAMED_PROPS = named_properties
- else:
- # pybel.Atom properties to save
- self.NAMED_PROPS = ['hyb', 'heavydegree', 'heterodegree',
- 'partialcharge']
- self.FEATURE_NAMES += self.NAMED_PROPS
-
- if not isinstance(save_molecule_codes, bool):
- raise TypeError('save_molecule_codes should be bool, got %s '
- 'instead' % type(save_molecule_codes))
- self.save_molecule_codes = save_molecule_codes
- if save_molecule_codes:
- # Remember if an atom belongs to the ligand or to the protein
- self.FEATURE_NAMES.append('molcode')
-
- self.CALLABLES = []
- if custom_properties is not None:
- for i, func in enumerate(custom_properties):
- if not callable(func):
- raise TypeError('custom_properties should be list of'
- ' callables, got %s instead' % type(func))
- name = getattr(func, '__name__', '')
- if name == '':
- name = 'func%s' % i
- self.CALLABLES.append(func)
- self.FEATURE_NAMES.append(name)
-
- if smarts_properties is None:
- # SMARTS definition for other properties
- self.SMARTS = [
- '[#6+0!$(*~[#7,#8,F]),SH0+0v2,s+0,S^3,Cl+0,Br+0,I+0]',
- '[a]',
- '[!$([#1,#6,F,Cl,Br,I,o,s,nX3,#7v5,#15v5,#16v4,#16v6,*+1,*+2,*+3])]',
- '[!$([#6,H0,-,-2,-3]),$([!H0;#7,#8,#9])]',
- '[r]'
- ]
- smarts_labels = ['hydrophobic', 'aromatic', 'acceptor', 'donor',
- 'ring']
- elif not isinstance(smarts_properties, (list, tuple, np.ndarray)):
- raise TypeError('smarts_properties must be a list')
- else:
- self.SMARTS = smarts_properties
-
- if smarts_labels is not None:
- if len(smarts_labels) != len(self.SMARTS):
- raise ValueError('Incorrect number of SMARTS labels: %s'
- ' instead of %s'
- % (len(smarts_labels), len(self.SMARTS)))
- else:
- smarts_labels = ['smarts%s' % i for i in range(len(self.SMARTS))]
-
- # Compile patterns
- self.compile_smarts()
- self.FEATURE_NAMES += smarts_labels
-
- def compile_smarts(self):
- self.__PATTERNS = []
- for smarts in self.SMARTS:
- self.__PATTERNS.append(pybel.Smarts(smarts))
-
- def encode_num(self, atomic_num):
- """Encode atom type with a binary vector. If atom type is not included in
- the `atom_classes`, its encoding is an all-zeros vector.
-
- Parameters
- ----------
- atomic_num: int
- Atomic number
-
- Returns
- -------
- encoding: np.ndarray
- Binary vector encoding atom type (one-hot or null).
- """
-
- if not isinstance(atomic_num, int):
- raise TypeError('Atomic number must be int, %s was given'
- % type(atomic_num))
-
- encoding = np.zeros(self.NUM_ATOM_CLASSES)
- try:
- encoding[self.ATOM_CODES[atomic_num]] = 1.0
- except:
- pass
- return encoding
-
- def find_smarts(self, molecule):
- """Find atoms that match SMARTS patterns.
-
- Parameters
- ----------
- molecule: pybel.Molecule
-
- Returns
- -------
- features: np.ndarray
- NxM binary array, where N is the number of atoms in the `molecule`
- and M is the number of patterns. `features[i, j]` == 1.0 if i'th
- atom has j'th property
- """
-
- if not isinstance(molecule, pybel.Molecule):
- raise TypeError('molecule must be pybel.Molecule object, %s was given'
- % type(molecule))
-
- features = np.zeros((len(molecule.atoms), len(self.__PATTERNS)))
-
- for (pattern_id, pattern) in enumerate(self.__PATTERNS):
- atoms_with_prop = np.array(list(*zip(*pattern.findall(molecule))),
- dtype=int) - 1
- features[atoms_with_prop, pattern_id] = 1.0
- return features
-
- def get_features(self, molecule, molcode=None):
- """Get coordinates and features for all heavy atoms in the molecule.
-
- Parameters
- ----------
- molecule: pybel.Molecule
- molcode: float, optional
- Molecule type. You can use it to encode whether an atom belongs to
- the ligand (1.0) or to the protein (-1.0) etc.
-
- Returns
- -------
- coords: np.ndarray, shape = (N, 3)
- Coordinates of all heavy atoms in the `molecule`.
- features: np.ndarray, shape = (N, F)
- Features of all heavy atoms in the `molecule`: atom type
- (one-hot encoding), pybel.Atom attributes, type of a molecule
- (e.g protein/ligand distinction), and other properties defined with
- SMARTS patterns
- """
-
- if not isinstance(molecule, pybel.Molecule):
- raise TypeError('molecule must be pybel.Molecule object,'
- ' %s was given' % type(molecule))
- if molcode is None:
- if self.save_molecule_codes is True:
- raise ValueError('save_molecule_codes is set to True,'
- ' you must specify code for the molecule')
- elif not isinstance(molcode, (float, int)):
- raise TypeError('motlype must be float, %s was given'
- % type(molcode))
-
- coords = []
- features = []
- heavy_atoms = []
-
- for i, atom in enumerate(molecule):
- # ignore hydrogens and dummy atoms (they have atomicnum set to 0)
- if atom.atomicnum > 1:
- heavy_atoms.append(i)
- coords.append(atom.coords)
-
- features.append(np.concatenate((
- self.encode_num(atom.atomicnum),
- [atom.__getattribute__(prop) for prop in self.NAMED_PROPS],
- [func(atom) for func in self.CALLABLES],
- )))
-
- coords = np.array(coords, dtype=np.float32)
- features = np.array(features, dtype=np.float32)
- if self.save_molecule_codes:
- features = np.hstack((features,
- molcode * np.ones((len(features), 1))))
- features = np.hstack([features,
- self.find_smarts(molecule)[heavy_atoms]])
-
- if np.isnan(features).any():
- raise RuntimeError('Got NaN when calculating features')
-
- return coords, features
-
- def get_features_CSAR(self, molecule, protein_idxs, ligand_idxs, molcode=None):
- """Get coordinates and features for all heavy atoms in the molecule.
-
- Parameters
- ----------
- molecule: pybel.Molecule
- molcode: float, optional
- Molecule type. You can use it to encode whether an atom belongs to
- the ligand (1.0) or to the protein (-1.0) etc.
-
- Returns
- -------
- coords: np.ndarray, shape = (N, 3)
- Coordinates of all heavy atoms in the `molecule`.
- features: np.ndarray, shape = (N, F)
- Features of all heavy atoms in the `molecule`: atom type
- (one-hot encoding), pybel.Atom attributes, type of a molecule
- (e.g protein/ligand distinction), and other properties defined with
- SMARTS patterns
- """
-
- if not isinstance(molecule, pybel.Molecule):
- raise TypeError('molecule must be pybel.Molecule object,'
- ' %s was given' % type(molecule))
- if molcode is None:
- if self.save_molecule_codes is True:
- raise ValueError('save_molecule_codes is set to True,'
- ' you must specify code for the molecule')
- elif not isinstance(molcode, (float, int)):
- raise TypeError('motlype must be float, %s was given'
- % type(molcode))
-
- coords,protein_coords,ligand_coords = [],[],[]
- features,protein_features,ligand_features = [],[],[]
- heavy_atoms,protein_heavy_atoms,ligand_heavy_atoms = [],[],[]
-
- for i, atom in enumerate(molecule):
- # ignore hydrogens and dummy atoms (they have atomicnum set to 0)
- index = i
- if atom.atomicnum > 1:
- heavy_atoms.append(i)
- coords.append(atom.coords)
-
- features.append(np.concatenate((
- self.encode_num(atom.atomicnum),
- [atom.__getattribute__(prop) for prop in self.NAMED_PROPS],
- [func(atom) for func in self.CALLABLES],
- )))
- if index in protein_idxs:
- protein_heavy_atoms.append(i)
- protein_coords.append(atom.coords)
- protein_features.append(np.concatenate((
- self.encode_num(atom.atomicnum),
- [atom.__getattribute__(prop) for prop in self.NAMED_PROPS],
- [func(atom) for func in self.CALLABLES],
- )))
- elif index in ligand_idxs:
- ligand_heavy_atoms.append(i)
- ligand_coords.append(atom.coords)
- ligand_features.append(np.concatenate((
- self.encode_num(atom.atomicnum),
- [atom.__getattribute__(prop) for prop in self.NAMED_PROPS],
- [func(atom) for func in self.CALLABLES],
- )))
-
- coords,protein_coords,ligand_coords = np.array(coords, dtype=np.float32),\
- np.array(protein_coords, dtype=np.float32),\
- np.array(ligand_coords, dtype=np.float32)
- features = np.array(features, dtype=np.float32)
- if self.save_molecule_codes:
- features = np.hstack((features,
- molcode * np.ones((len(features), 1))))
- features = np.hstack([features,
- self.find_smarts(molecule)[heavy_atoms]])
- protein_features = np.hstack([protein_features,
- self.find_smarts(molecule)[protein_heavy_atoms]])
- ligand_features = np.hstack([ligand_features,
- self.find_smarts(molecule)[ligand_heavy_atoms]])
-
- if np.isnan(features).any():
- raise RuntimeError('Got NaN when calculating features')
-
- return coords, features, protein_coords, protein_features, ligand_coords, ligand_features
-
- def to_pickle(self, fname='featurizer.pkl'):
- """Save featurizer in a given file. Featurizer can be restored with
- `from_pickle` method.
-
- Parameters
- ----------
- fname: str, optional
- Path to file in which featurizer will be saved
- """
-
- # patterns can't be pickled, we need to temporarily remove them
- patterns = self.__PATTERNS[:]
- del self.__PATTERNS
- try:
- with open(fname, 'wb') as f:
- pickle.dump(self, f)
- finally:
- self.__PATTERNS = patterns[:]
-
- @staticmethod
- def from_pickle(fname):
- """Load pickled featurizer from a given file
-
- Parameters
- ----------
- fname: str, optional
- Path to file with saved featurizer
-
- Returns
- -------
- featurizer: Featurizer object
- Loaded featurizer
- """
- with open(fname, 'rb') as f:
- featurizer = pickle.load(f)
- featurizer.compile_smarts()
- return featurizer
-
-featurizer = Featurizer(save_molecule_codes=False)
-
-def get_labels_from_names(lables_path,names):
- with open(lables_path, 'rb') as f:
- lines = f.read().decode().strip().split('\n')[6:]
- res = {}
- for line in lines:
- temp = line.split()
- name, score = temp[0], float(temp[3])
- res[name] = score
- labels = []
- for name in names:
- labels.append(res[name])
- return labels
-
-def get_labels_from_names_csar(lables_path,names):
- with open(lables_path, 'rb') as f:
- lines = f.read().decode().strip().split('\n')[1:]
- res = {}
- for line in lines:
- temp = [x.strip() for x in line.split(',')]
- name, score = temp[1], float(temp[2])
- res[name] = score
- labels = []
- for name in names:
- labels.append(res[name])
- return labels
-
-def get_lig_coords_ground_truth_from_names(lables_path,names):
-
- return
-
-def lig_atom_type_obmol(obmol):
- AtomIndex = [atom.atomicnum for atom in obmol if atom.atomicnum > 1]
- return torch.tensor(AtomIndex,dtype=torch.int64)
-
-def lig_atom_type_rdmol(rdmol):
- AtomIndex = [atom.GetAtomicNum() for atom in rdmol.GetAtoms()]
- return torch.tensor(AtomIndex,dtype=torch.int64)
-
-def get_bonded_edges_obmol(pocket):
- edge_l = []
- idx_map = [-1]*(len(pocket.atoms)+1)
- idx_new = 0
- for atom in pocket:
- edges = []
- a1_sym = atom.atomicnum
- a1 = atom.idx
- if a1_sym == 1:
- continue
- idx_map[a1] = idx_new
- idx_new += 1
- for natom in openbabel.OBAtomAtomIter(atom.OBAtom):
- if natom.GetAtomicNum() == 1:
- continue
- a2 = natom.GetIdx()
- bond = openbabel.OBAtom.GetBond(natom,atom.OBAtom)
- bond_type = CusBondFeaturizer_new(bond)
- edges.append((a1,a2,bond_type))
- edge_l += edges
- edge_l_new = []
- for a1,a2,t in edge_l:
- a1_, a2_ = idx_map[a1], idx_map[a2]
- assert((a1_!=-1)&(a2_!=-1))
- edge_l_new.append((a1_,a2_,t))
- return edge_l_new
-
-def get_bonded_edges_rdmol(rdmol):
- row, col, edge_type = [], [], []
- for bond in rdmol.GetBonds():
- start, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
- row += [start, end]
- col += [end, start]
- edge_type += 2 * [BOND_TYPES[bond.GetBondType()]]
- return zip(row,col,edge_type)
-
-def D3_info(a, b, c):
- # 空间夹角
- ab = b - a # 向量ab
- ac = c - a # 向量ac
- cosine_angle = np.dot(ab, ac) / (np.linalg.norm(ab) * np.linalg.norm(ac))
- cosine_angle = cosine_angle if cosine_angle >= -1.0 else -1.0
- angle = np.arccos(cosine_angle)
- # 三角形面积
- ab_ = np.sqrt(np.sum(ab ** 2))
- ac_ = np.sqrt(np.sum(ac ** 2)) # 欧式距离
- area = 0.5 * ab_ * ac_ * np.sin(angle)
- return np.degrees(angle), area, ac_
-
-def D3_info_cal(nodes_ls, g):
- if len(nodes_ls) > 2:
- Angles = []
- Areas = []
- Distances = []
- for node_id in nodes_ls[2:]:
- angle, area, distance = D3_info(g.ndata['pos'][nodes_ls[0]].numpy(), g.ndata['pos'][nodes_ls[1]].numpy(),
- g.ndata['pos'][node_id].numpy())
- Angles.append(angle)
- Areas.append(area)
- Distances.append(distance)
- return [np.max(Angles) * 0.01, np.sum(Angles) * 0.01, np.mean(Angles) * 0.01, np.max(Areas), np.sum(Areas),
- np.mean(Areas),
- np.max(Distances) * 0.1, np.sum(Distances) * 0.1, np.mean(Distances) * 0.1]
- else:
- return [0, 0, 0, 0, 0, 0, 0, 0, 0]
-
-def bond_feature(g):
- src_nodes, dst_nodes = g.find_edges(range(g.number_of_edges()))
- src_nodes, dst_nodes = src_nodes.tolist(), dst_nodes.tolist()
- neighbors_ls = []
- for i, src_node in enumerate(src_nodes):
- tmp = [src_node, dst_nodes[i]] # the source node id and destination id of an edge
- neighbors = g.predecessors(src_node).tolist()
- neighbors.remove(dst_nodes[i])
- tmp.extend(neighbors)
- neighbors_ls.append(tmp)
- D3_info_ls = list(map(partial(D3_info_cal, g=g), neighbors_ls))
- D3_info_th = torch.tensor(D3_info_ls, dtype=torch.float)
- # D3_info_th = torch.cat([g.edata['e'], D3_info_th], dim=-1)
- return D3_info_th
-
-def read_molecules_crossdock(lig_path, prot_path, ligcut, protcut, lig_type, prot_graph_type, dataset_path, chain_cut=5.0):
- lig_path = os.path.join(dataset_path, lig_path)
- prot_path = os.path.join(dataset_path, prot_path)
- if lig_type=='openbabel':
- m_lig = next(pybel.readfile('sdf', lig_path))
- lig_coords, lig_features = featurizer.get_features(m_lig)
- lig_edges = get_bonded_edges_obmol(m_lig) if ligcut is None else None
- lig_node_type = lig_atom_type_obmol(m_lig)
- elif lig_type=='rdkit':
- m_lig = read_rdmol(lig_path, sanitize=True, remove_hs=True)
- try:
- assert m_lig is not None
- except:
- raise ValueError(f'sanitize error : {lig_path}')
-
- conf = m_lig.GetConformer()
- lig_coords, lig_features = conf.GetPositions(), lig_atom_featurizer_rdmol(m_lig)
- lig_edges = get_bonded_edges_rdmol(m_lig) if ligcut is None else None
- lig_node_type = lig_atom_type_rdmol(m_lig)
-
- prot_complex = parsePDB(prot_path)
- prot_structure_no_water = prot_complex.select('protein')
- if chain_cut is not None:
- prot_valid_chains = prot_structure_no_water.select(f'same chain as within {chain_cut} of ligand', ligand=lig_coords)
- else:
- prot_valid_chains = prot_structure_no_water
-
- prot_valid_pocket = prot_structure_no_water.select('same residue as within 12 of ligand', ligand=lig_coords)
- prot_alpha_c = prot_valid_chains.select('calpha')
- prot_pocket_alpha_c = prot_valid_pocket.select('calpha')
- alpha_c_sec_features = None
- prot_pocket_alpha_c_sec_features = None
- alpha_c_coords, c_coords, n_coords, complete_residues = [], [], [], [] # complete_residue means a residue has alpha_c,beta_c,and ,N
-
- if prot_graph_type.startswith('atom'):
- m_prot = prot_valid_pocket if prot_graph_type.endswith('pocket') else prot_valid_chains
- sec_features = None
- prot_coords, prot_features = featurizer.get_features(m_prot)
- prot_edges = get_bonded_edges_obmol(m_prot) if protcut is None else None
- prot_node_type = lig_atom_type_obmol(m_prot)
-
- elif prot_graph_type.startswith('residue'):
- alpha_c_sec_features = None
- prot_pocket_alpha_c_sec_features = None
- m_prot = prot_pocket_alpha_c if prot_graph_type.endswith('pocket') else prot_alpha_c
- m_prot_complete = prot_valid_pocket if prot_graph_type.endswith('pocket') else prot_valid_chains
- sec_features = prot_pocket_alpha_c_sec_features if prot_graph_type.endswith('pocket') else alpha_c_sec_features
-
- prot_coords, prot_features = prot_alpha_c_featurizer(m_prot)
- prot_node_type = prot_residue_type(m_prot)
- prot_edges = None
- hv = m_prot_complete.getHierView()
- for chain in hv:
- for i, residue in enumerate(chain):
- alpha_c_coord, c_coord, n_coord = None, None, None
- for atom in residue:
- if atom.getName() == 'CA':
- alpha_c_coord = atom.getCoords()
- if atom.getName() == 'C':
- c_coord = atom.getCoords()
- if atom.getName() == 'N':
- n_coord = atom.getCoords()
-
- if alpha_c_coord is not None and c_coord is not None and n_coord is not None:
- alpha_c_coords.append(alpha_c_coord)
- c_coords.append(c_coord)
- n_coords.append(n_coord)
- complete_residues.append(True)
- else:
- complete_residues.append(False)
- assert len(complete_residues) == len(prot_coords)
-
- prot_coords = prot_coords[complete_residues]
- prot_features = prot_features[complete_residues]
- prot_node_type = prot_node_type[complete_residues]
- if sec_features is not None:
- sec_features = sec_features[complete_residues]
- assert len(sec_features) == len(prot_coords)
-
- assert len(alpha_c_coords) == len(prot_coords)
- assert len(c_coords) == len(prot_coords)
- assert len(n_coords) == len(prot_coords)
-
- else:
- raise ValueError("error prot_graph_type")
-
- return lig_coords, lig_features, lig_edges, lig_node_type, \
- prot_coords, prot_features, prot_edges, prot_node_type, sec_features,\
- np.array(alpha_c_coords), np.array(c_coords), np.array(n_coords)
-
-def read_ligands_chembl_smina_multi_pose(name, valid_ligand_index, dataset_path, ligcut, lig_type='openbabel', top_N=2,
- docking_type='site_specific'):
- valid_lig_multi_coords_list, valid_lig_features_list, valid_lig_edges_list, valid_lig_node_type_list, valid_index_list = [], [], [], [], []
-
- for index, valid in enumerate(valid_ligand_index):
- if docking_type == 'site_specific':
- lig_paths_mol2 = [os.path.join(dataset_path, name, 'ligand_smina_poses', f'{index}.mol2')]
- elif docking_type == 'blind':
- lig_paths_mol2 = [os.path.join(dataset_path, name, 'ligand_smina_poses', f'{index}_blind.mol2')]
- elif docking_type == 'all':
- lig_paths_mol2 = [os.path.join(dataset_path, name, 'ligand_smina_poses', f'{index}.mol2')] +\
- [os.path.join(dataset_path, name, 'ligand_smina_poses', f'{index}_blind.mol2')]
-
- if valid:
- if lig_type == 'openbabel':
- lig_multi_coords = []
- previou_atom_num = -1
- for lig_path_mol2 in lig_paths_mol2:
- m_lig_iter = pybel.readfile('mol2', lig_path_mol2)
- c_m_lig = 0
- while c_m_lig < top_N:
- try:
- m_lig = next(m_lig_iter)
- lig_coords, lig_features = featurizer.get_features(m_lig)
- if previou_atom_num != -1:
- assert len(lig_coords) == previou_atom_num
- else:
- previou_atom_num == len(lig_coords)
- lig_edges = get_bonded_edges_obmol(m_lig)
- lig_node_type = lig_atom_type_obmol(m_lig)
- lig_multi_coords.append(lig_coords)
- c_m_lig += 1
- except:
- print(f'{lig_path_mol2} only has {c_m_lig} poses')
- break
-
- valid_lig_multi_coords_list.append(lig_multi_coords)
- valid_lig_features_list.append(lig_features)
- valid_lig_edges_list.append(lig_edges)
- valid_lig_node_type_list.append(lig_node_type)
- valid_index_list.append(index)
-
- return valid_lig_multi_coords_list, valid_lig_features_list, valid_lig_edges_list, valid_lig_node_type_list, valid_index_list
-
-
-def read_ligands_chembl_smina(name, valid_ligand_index, dataset_path, ligcut, lig_type='openbabel',docking_type='site_specific'):
- valid_lig_coords_list, valid_lig_features_list, valid_lig_edges_list, valid_lig_node_type_list, valid_index_list = [], [], [], [], []
-
- for index, valid in enumerate(valid_ligand_index):
- lig_path_mol2 = os.path.join(dataset_path, name, 'ligand_smina_poses', f'{index}.mol2')
- if docking_type == 'blind':
- lig_path_mol2 = os.path.join(dataset_path, name, 'ligand_smina_poses', f'{index}_blind.mol2')
- if valid:
- if lig_type == 'openbabel':
- try:
- m_lig = next(pybel.readfile('mol2', lig_path_mol2))
- except:
- print(lig_path_mol2)
- lig_coords, lig_features = featurizer.get_features(m_lig)
- lig_edges = get_bonded_edges_obmol(m_lig)
- lig_node_type = lig_atom_type_obmol(m_lig)
-
- valid_lig_coords_list.append(lig_coords)
- valid_lig_features_list.append(lig_features)
- valid_lig_edges_list.append(lig_edges)
- valid_lig_node_type_list.append(lig_node_type)
- valid_index_list.append(index)
- elif lig_type == 'rdkit':
- m_lig = read_rdmol(lig_path_mol2)
- conf = m_lig.GetConformer()
-
- lig_coords, lig_features = conf.GetPositions(), lig_atom_featurizer_rdmol(m_lig)
- lig_edges = get_bonded_edges_rdmol(m_lig)
- lig_node_type = lig_atom_type_rdmol(m_lig)
-
- valid_lig_coords_list.append(lig_coords)
- valid_lig_features_list.append(lig_features)
- valid_lig_edges_list.append(lig_edges)
- valid_lig_node_type_list.append(lig_node_type)
- valid_index_list.append(index)
-
- return valid_lig_coords_list, valid_lig_features_list, valid_lig_edges_list, valid_lig_node_type_list, valid_index_list
-
-def read_ligands(name, dataset_path, ligcut, lig_type='openbabel'):
- #########################Read Ligand########################################################
- lig_path_sdf = os.path.join(dataset_path, name, 'visualize_dir', 'total_vs.sdf')
- valid_lig_coords_list, valid_lig_features_list, valid_lig_edges_list, valid_lig_node_type_list, valid_index_list = [], [], [], [], []
- if lig_type == 'openbabel':
- m_ligs = pybel.readfile('sdf', lig_path_sdf)
- for index, m_lig in enumerate(m_ligs):
- try:
- lig_coords, lig_features = featurizer.get_features(m_lig)
- lig_edges = get_bonded_edges_obmol(m_lig)
- lig_node_type = lig_atom_type_obmol(m_lig)
-
- valid_lig_coords_list.append(lig_coords)
- valid_lig_features_list.append(lig_features)
- valid_lig_edges_list.append(lig_edges)
- valid_lig_node_type_list.append(lig_node_type)
- valid_index_list.append(index)
- except:
- print(f'{index} error')
- elif lig_type == 'rdkit':
- supplier = Chem.SDMolSupplier(lig_path_sdf, sanitize=True, removeHs=False)
- for index, m_lig in enumerate(supplier):
- try:
- conf = m_lig.GetConformer()
- lig_coords, lig_features = conf.GetPositions(), lig_atom_featurizer_rdmol(m_lig)
- lig_edges = get_bonded_edges_rdmol(m_lig)
- lig_node_type = lig_atom_type_rdmol(m_lig)
-
- valid_lig_coords_list.append(lig_coords)
- valid_lig_features_list.append(lig_features)
- valid_lig_edges_list.append(lig_edges)
- valid_lig_node_type_list.append(lig_node_type)
- valid_index_list.append(index)
- except:
- print(f'{index} error')
-
- return valid_lig_coords_list, valid_lig_features_list, valid_lig_edges_list, valid_lig_node_type_list, valid_index_list
-
-def read_casf_ligands(name, dataset_path, ligcut, lig_type='openbabel'):
- lig_files = os.listdir(os.path.join(dataset_path, name))
- assert lig_type == 'openbabel'
- lig_multi_name_list, lig_multi_coords_list, lig_features_list, lig_edges_list, lig_node_type_list = [], [], [], [], []
- for lig_file in lig_files:
- lig_name = lig_file.split('_')[-1][:4]
- file_type = lig_file.split('.')[-1]
- lig_path = os.path.join(dataset_path, name, lig_file)
- m_ligs = pybel.readfile(file_type, lig_path)
-
- multi_coords, multi_names = [], []
- for index, m_lig in enumerate(m_ligs):
- lig_coords, lig_features = featurizer.get_features(m_lig)
- if index == 0:
- lig_edges = get_bonded_edges_obmol(m_lig)
- lig_node_type = lig_atom_type_obmol(m_lig)
- multi_coords.append(lig_coords)
- multi_names.append(f'{lig_name}_ligand_{index+1}')
-
- lig_multi_name_list.append(multi_names)
- lig_multi_coords_list.append(multi_coords)
- lig_features_list.append(lig_features)
- lig_edges_list.append(lig_edges)
- lig_node_type_list.append(lig_node_type)
-
- return lig_multi_name_list, lig_multi_coords_list, lig_features_list, lig_edges_list, lig_node_type_list
-
-def read_proteins(name, dataset_path, prot_graph_type, protcut):
- #########################Read Protein########################################################
- try:
- prot_valid_chains = parsePDB(os.path.join(dataset_path, name, f'{name}_valid_chains.pdb'))
- except:
- raise ValueError(os.path.join(dataset_path, name, f'{name}_valid_chains.pdb'))
- prot_alpha_c = prot_valid_chains.select('calpha')
- alpha_c_coords, c_coords, n_coords = [], [], []
- # writePDB(os.path.join(dataset_path, name, f'{name}_valid_chains.pdb'), prot_valid_chains)
-
- if prot_graph_type.startswith('atom'):
- prot_path = os.path.join(dataset_path, name, f'{name}_{graph_type_filename[prot_graph_type]}')
- m_prot = next(pybel.readfile('pdb', prot_path))
- sec_features = None
- prot_coords_valid, prot_features_valid = featurizer.get_features(m_prot)
- prot_edges = get_bonded_edges_obmol(m_prot) if protcut is None else None
- prot_node_type = lig_atom_type_obmol(m_prot)
-
- elif prot_graph_type.startswith('residue'):
- alpha_c_sec_features = None
- m_prot = prot_alpha_c
- m_prot_complete = prot_valid_chains
- sec_features = alpha_c_sec_features
-
- prot_coords, prot_features = prot_alpha_c_featurizer(m_prot)
- prot_node_type = prot_residue_type(m_prot)
- prot_edges = None
- hv = m_prot_complete.getHierView()
- index = 0
- valid_index, prot_coords_valid, prot_features_valid = [], [], []
- for chain in hv:
- for i, residue in enumerate(chain):
- alpha_c_coord, c_coord, n_coord = None, None, None
- for atom in residue:
- if atom.getName() == 'CA':
- alpha_c_coord = atom.getCoords()
-
- if atom.getName() == 'C':
- c_coord = atom.getCoords()
-
- if atom.getName() == 'N':
- n_coord = atom.getCoords()
-
- if alpha_c_coord is not None and c_coord is not None and n_coord is not None:
- alpha_c_coords.append(alpha_c_coord)
- c_coords.append(c_coord)
- n_coords.append(n_coord)
- valid_index.append(index)
- index += 1
-
- prot_coords_valid = prot_coords[valid_index]
- prot_features_valid = prot_features[valid_index]
-
- else:
- raise ValueError("error prot_graph_type")
-
- return prot_coords_valid, prot_features_valid, prot_edges, prot_node_type, sec_features,\
- np.array(alpha_c_coords), np.array(c_coords), np.array(n_coords),\
-
-
-def read_molecules(name, dataset_path, prot_graph_type, ligcut, protcut, lig_type='openbabel',init_type='redock_init',
- chain_cut=5.0, p2rank_base=None, binding_site_type='ligand_center', LAS_mask=True,
- keep_hs_before_rdkit_generate=False, rd_gen_maxIters=200):
- #########################Read Ligand########################################################
- lig_path_mol2 = os.path.join(dataset_path, name, f'{name}_ligand.mol2')
- lig_path_sdf = os.path.join(dataset_path, name, f'{name}_ligand.sdf')
- if lig_type == 'openbabel':
- m_lig = next(pybel.readfile('mol2', lig_path_mol2))
- lig_coords, lig_features = featurizer.get_features(m_lig)
- lig_edges = get_bonded_edges_obmol(m_lig)
- lig_node_type = lig_atom_type_obmol(m_lig)
- elif lig_type == 'rdkit':
- m_lig = read_rdmol(lig_path_sdf, sanitize=True, remove_hs=True)
- if m_lig == None: # read mol2 file if sdf file cannot be sanitized
- m_lig = read_rdmol(lig_path_mol2, sanitize=True, remove_hs=True)
-
- conf = m_lig.GetConformer()
- lig_coords, lig_features = conf.GetPositions(), lig_atom_featurizer_rdmol(m_lig)
- lig_edges = get_bonded_edges_rdmol(m_lig)
- lig_node_type = lig_atom_type_rdmol(m_lig)
-
- #########################Get Ligand Rdkit Init Coordinates###################################
- if init_type == 'rdkit_init':
- rd_lig = read_rdmol(lig_path_sdf, sanitize=True, remove_hs=not keep_hs_before_rdkit_generate)
- if rd_lig == None: # read mol2 file if sdf file cannot be sanitized
- rd_lig = read_rdmol(lig_path_mol2, sanitize=True, remove_hs=not keep_hs_before_rdkit_generate)
- try:
- lig_init_coords = get_rdkit_coords(rd_lig, sanitize=True, remove_hs=True, maxIters=rd_gen_maxIters)
- except Exception as e:
- lig_init_coords = lig_coords
- with open(f'temp_create_dataset_rdkit_timesplit_no_lig_or_rec_overlap_train_remove_hs_before_generate_{not keep_hs_before_rdkit_generate}.log', 'a') as f:
- f.write('Generating RDKit conformer failed for \n')
- f.write(name)
- f.write('\n')
- f.write(str(e))
- f.write('\n')
- f.flush()
-
- assert len(lig_init_coords) == len(lig_coords)
-
- rdlig_node_type = lig_atom_type_rdmol(rd_lig)
-
- # remove all h
- if lig_type == 'openbabel':
- lig_init_coords = lig_init_coords[rdlig_node_type != 1]
- try:
- if len(lig_init_coords)!=len(lig_coords):
- raise ValueError('{} {}!={}'.format(name, len(lig_init_coords), len(lig_coords)))
- except ValueError as e:
- print("error raise:", repr(e))
- raise
- elif init_type == 'redock_init':
- lig_init_coords = lig_coords
- elif init_type == 'random_init':
- lig_init_coords = np.random.randn(len(lig_coords),3)
- else:
- lig_init_coords = None
-
- # random location and orientation
- if lig_init_coords is not None:
- rot_T, rot_b = random_rotation_translation()
- mean_to_remove = lig_init_coords.mean(axis=0, keepdims=True)
- lig_init_coords = (rot_T @ (lig_init_coords - mean_to_remove).T).T + rot_b
-
- #########################Read Protein########################################################
- if os.path.exists(os.path.join(dataset_path, name, f'{name}_protein_processed.pdb')):
- prot_complex = parsePDB(os.path.join(dataset_path, name, f'{name}_protein_processed.pdb'))
- else:
- prot_complex = parsePDB(os.path.join(dataset_path, name, f'{name}_protein.pdb'))
- prot_structure_no_water = prot_complex.select('protein')
- if chain_cut is not None:
- prot_valid_chains = prot_structure_no_water.select(f'same chain as within {chain_cut} of ligand', ligand=lig_coords)
- else:
- prot_valid_chains = prot_structure_no_water
-
- prot_valid_pocket = prot_structure_no_water.select('same residue as within 12 of ligand', ligand=lig_coords)
- try:
- prot_alpha_c = prot_valid_chains.select('calpha')
- prot_pocket_alpha_c = prot_valid_pocket.select('calpha')
- except:
- raise ValueError(f'{name} process pdb error')
- alpha_c_sec_features = None
- prot_pocket_alpha_c_sec_features = None
- alpha_c_coords, c_coords, n_coords = [], [], []
- writePDB(os.path.join(dataset_path, name, f'{name}_valid_chains.pdb'), prot_valid_chains)
- writePDB(os.path.join(dataset_path, name, f'{name}_valid_pocket.pdb'), prot_valid_pocket)
-
- if prot_graph_type.startswith('atom'):
- prot_path = os.path.join(dataset_path, name, f'{name}_{graph_type_filename[prot_graph_type]}')
- m_prot = next(pybel.readfile('pdb', prot_path))
- sec_features = None
- prot_coords_valid, prot_features_valid = featurizer.get_features(m_prot)
- prot_edges = get_bonded_edges_obmol(m_prot) if protcut is None else None
- prot_node_type = lig_atom_type_obmol(m_prot)
-
- elif prot_graph_type.startswith('residue'):
- alpha_c_sec_features = None
- prot_pocket_alpha_c_sec_features = None
- m_prot = prot_pocket_alpha_c if prot_graph_type.endswith('pocket') else prot_alpha_c
- m_prot_complete = prot_valid_pocket if prot_graph_type.endswith('pocket') else prot_valid_chains
- sec_features = prot_pocket_alpha_c_sec_features if prot_graph_type.endswith('pocket') else alpha_c_sec_features
-
- prot_coords, prot_features = prot_alpha_c_featurizer(m_prot)
- prot_node_type = prot_residue_type(m_prot)
- prot_edges = None
- hv = m_prot_complete.getHierView()
- index = 0
- valid_index, prot_coords_valid, prot_features_valid = [], [], []
- for chain in hv:
- for i, residue in enumerate(chain):
- alpha_c_coord, c_coord, n_coord = None, None, None
- for atom in residue:
- if atom.getName() == 'CA':
- alpha_c_coord = atom.getCoords()
-
- if atom.getName() == 'C':
- c_coord = atom.getCoords()
-
- if atom.getName() == 'N':
- n_coord = atom.getCoords()
-
- if alpha_c_coord is not None and c_coord is not None and n_coord is not None:
- alpha_c_coords.append(alpha_c_coord)
- c_coords.append(c_coord)
- n_coords.append(n_coord)
- valid_index.append(index)
- index += 1
-
- prot_coords_valid = prot_coords[valid_index]
- prot_features_valid = prot_features[valid_index]
-
- else:
- raise ValueError("error prot_graph_type")
-
- ############################### Read Binding Site ##########################################
- if binding_site_type == 'p2rank':
- p2rank_result_path = os.path.join(p2rank_base, f'{name}_valid_chains.pdb_predictions.csv')
- df = pd.read_csv(p2rank_result_path, usecols= [' center_x',' center_y',' center_z'])
- possible_binding_sites = df.values
- ligand_center = lig_coords.mean(axis=0)
- if len(possible_binding_sites) == 0:
- binding_site = ligand_center
- else:
- binding_site_index = ((possible_binding_sites - ligand_center) ** 2).sum(axis=1).argmin()
- binding_site = possible_binding_sites[binding_site_index]
-
- elif binding_site_type == 'ligand_center':
- binding_site = lig_coords.mean(axis=0)
-
- ############################### Get LAS Mask ##########################################
- if LAS_mask:
- assert lig_type == 'rdkit'
- lig_LAS_mask = get_LAS_distance_constraint_mask(m_lig)
- else:
- lig_LAS_mask = None
-
- return lig_coords, lig_features, lig_edges, lig_node_type, lig_init_coords, \
- prot_coords_valid, prot_features_valid, prot_edges, prot_node_type, sec_features,\
- np.array(alpha_c_coords), np.array(c_coords), np.array(n_coords),\
- binding_site.reshape(1,-1), lig_LAS_mask
-
-
-def read_molecules_inference(lig_path, protein_path, prot_graph_type, chain_cut=5.0):
- #########################Read Ligand########################################################
- m_lig = next(pybel.readfile(lig_path.split('.')[-1], lig_path))
- lig_coords, lig_features = featurizer.get_features(m_lig)
- lig_edges = get_bonded_edges_obmol(m_lig)
- lig_node_type = lig_atom_type_obmol(m_lig)
-
- #########################Read Protein########################################################
- prot_complex = parsePDB(protein_path)
- prot_structure_no_water = prot_complex.select('protein')
- if chain_cut is not None:
- prot_valid_chains = prot_structure_no_water.select(f'same chain as within {chain_cut} of ligand',
- ligand=lig_coords)
- else:
- prot_valid_chains = prot_structure_no_water
-
- prot_valid_pocket = prot_structure_no_water.select('same residue as within 12 of ligand', ligand=lig_coords)
- prot_alpha_c = prot_valid_chains.select('calpha')
- prot_pocket_alpha_c = prot_valid_pocket.select('calpha')
-
- alpha_c_coords, c_coords, n_coords = [], [], []
-
- alpha_c_sec_features,prot_pocket_alpha_c_sec_features = None, None
- m_prot = prot_pocket_alpha_c if prot_graph_type.endswith('pocket') else prot_alpha_c
- m_prot_complete = prot_valid_pocket if prot_graph_type.endswith('pocket') else prot_valid_chains
- sec_features = prot_pocket_alpha_c_sec_features if prot_graph_type.endswith('pocket') else alpha_c_sec_features
-
- prot_coords, prot_features = prot_alpha_c_featurizer(m_prot)
- prot_node_type = prot_residue_type(m_prot)
- prot_edges = None
- hv = m_prot_complete.getHierView()
- index = 0
- valid_index, prot_coords_valid, prot_features_valid, ca_res_number_valid, residue_name_valid, chain_index_valid = [], [], [], [], [], []
- for chain in hv:
- for i, residue in enumerate(chain):
- alpha_c_coord, c_coord, n_coord = None, None, None
- ca_res_number = residue.getResnums()[0]
- residue_name = residue.getResname()
- chain_index = residue.getChid()
- # input(ca_res_number)
- # input(residue_name)
- for atom in residue:
- if atom.getName() == 'CA':
- alpha_c_coord = atom.getCoords()
-
- if atom.getName() == 'C':
- c_coord = atom.getCoords()
-
- if atom.getName() == 'N':
- n_coord = atom.getCoords()
-
- if alpha_c_coord is not None and c_coord is not None and n_coord is not None:
- alpha_c_coords.append(alpha_c_coord)
- c_coords.append(c_coord)
- n_coords.append(n_coord)
- valid_index.append(index)
- ca_res_number_valid.append(ca_res_number)
- residue_name_valid.append(residue_name)
- chain_index_valid.append(chain_index)
- index += 1
-
- prot_coords_valid = alpha_c_coords
- ResIndex_valid = [ResDict.get(ResName,UNKOWN_RES) for ResName in residue_name_valid]
- prot_node_type = torch.tensor(ResIndex_valid,dtype=torch.int64)
- prot_features_valid = torch.tensor(np.eye(UNKOWN_RES + 1)[ResIndex_valid])
-
- return lig_coords, lig_features, lig_edges, lig_node_type, \
- prot_coords_valid, prot_features_valid, prot_edges, prot_node_type, sec_features, \
- np.array(alpha_c_coords), np.array(c_coords), np.array(n_coords), ca_res_number_valid, chain_index_valid
-
-def get_ligand_smiles(name, dataset_path,):
- lig_path_mol2 = os.path.join(dataset_path, name, f'{name}_ligand.mol2')
- lig_path_sdf = os.path.join(dataset_path, name, f'{name}_ligand.sdf')
- m_lig = read_rdmol(lig_path_sdf, sanitize=True, remove_hs=True)
- if m_lig == None: # read mol2 file if sdf file cannot be sanitized
- m_lig = read_rdmol(lig_path_mol2, sanitize=True, remove_hs=True)
-
- sm = Chem.MolToSmiles(m_lig)
- m_sm_order = list(m_lig.GetPropsAsDict(includePrivate=True, includeComputed=True)['_smilesAtomOutputOrder'])
-
- sm2m_order = [0] * len(m_sm_order)
- for index, order in enumerate(m_sm_order):
- sm2m_order[order] = index
-
- return sm, sm2m_order
-
-def get_protein_fasta(name, dataset_path,):
- try:
- prot_valid_chains = parsePDB(os.path.join(dataset_path, name, f'{name}_valid_chains.pdb'))
- except:
- raise ValueError(f'{name} error!')
- hv = prot_valid_chains.getHierView()
- index = 0
- valid_index, prot_coords_valid, prot_features_valid = [], [], []
- alpha_c_coords, c_coords, n_coords = [], [], []
- fasta_list = []
- for chain in hv:
- fasta_list.append(chain.getSequence())
- for i, residue in enumerate(chain):
- alpha_c_coord, c_coord, n_coord = None, None, None
- for atom in residue:
- if atom.getName() == 'CA':
- alpha_c_coord = atom.getCoords()
-
- if atom.getName() == 'C':
- c_coord = atom.getCoords()
-
- if atom.getName() == 'N':
- n_coord = atom.getCoords()
-
- if alpha_c_coord is not None and c_coord is not None and n_coord is not None:
- alpha_c_coords.append(alpha_c_coord)
- c_coords.append(c_coord)
- n_coords.append(n_coord)
- valid_index.append(index)
- index += 1
-
- return fasta_list, valid_index
-
-def prot_p2rank_feats(p2rank_result_path, p2rank_feats_tpye='zscore', pocket_cut=10):
- df = pd.read_csv(p2rank_result_path)
- df.columns = df.columns.str.strip()
- residue_zscores, residue_pocket_idxs = df[p2rank_feats_tpye].values, df['pocket'].values
- feat_len = pocket_cut + 2
- p2rank_feats = torch.zeros(len(residue_zscores), feat_len)
- for index, residue_zscore in enumerate(residue_zscores):
- pocket_idx = residue_pocket_idxs[index]
- if pocket_idx == 0 :
- p2rank_feats[index, feat_len - 1] = residue_zscore
- elif pocket_idx > pocket_cut :
- p2rank_feats[index, feat_len - 2] = residue_zscore
- else:
- p2rank_feats[index, pocket_idx - 1] = residue_zscore
-
- return p2rank_feats
-
-def get_p2rank_feats(name, dataset_path, p2rank_base=None, p2rank_feats_tpye='zscore', pocket_cut=10):
- try:
- prot_valid_chains = parsePDB(os.path.join(dataset_path, name, f'{name}_valid_chains.pdb'))
- except:
- raise ValueError(f'{name} error!')
- prot_alpha_c = prot_valid_chains.select('calpha')
- alpha_c_coords, c_coords, n_coords = [], [], []
-
- p2rank_result_path = os.path.join(p2rank_base, f'{name}_valid_chains.pdb_residues.csv')
- p2rank_features = prot_p2rank_feats(p2rank_result_path, pocket_cut=pocket_cut, p2rank_feats_tpye=p2rank_feats_tpye)
- prot_coords, prot_features = prot_alpha_c_featurizer(prot_alpha_c)
-
- try:
- assert len(p2rank_features) == len(prot_features)
- except:
- # print(f'p2rank protein number, {len(p2rank_features)}')
- # print(f'prot_features protein number, {len(prot_features)}')
- # raise ValueError(f'p2rank length error, {name}')
- with open('p2rank_feats_error.txt','a') as f:
- f.write(f'{name}\n')
- return torch.zeros(len(prot_features), pocket_cut + 2)
-
- hv = prot_valid_chains.getHierView()
- index = 0
- valid_index, prot_coords_valid, prot_features_valid = [], [], []
- for chain in hv:
- for i, residue in enumerate(chain):
- alpha_c_coord, c_coord, n_coord = None, None, None
- for atom in residue:
- if atom.getName() == 'CA':
- alpha_c_coord = atom.getCoords()
-
- if atom.getName() == 'C':
- c_coord = atom.getCoords()
-
- if atom.getName() == 'N':
- n_coord = atom.getCoords()
-
- if alpha_c_coord is not None and c_coord is not None and n_coord is not None:
- alpha_c_coords.append(alpha_c_coord)
- c_coords.append(c_coord)
- n_coords.append(n_coord)
- valid_index.append(index)
- index += 1
-
- p2rank_features_valid = p2rank_features[valid_index]
-
- return p2rank_features_valid
-
-def binarize(x):
- return torch.where(x > 0, torch.ones_like(x), torch.zeros_like(x))
-
-#adj - > n_hops connections adj
-def n_hops_adj(adj, n_hops):
- adj_mats = [torch.eye(adj.size(0), dtype=torch.long, device=adj.device), binarize(adj + torch.eye(adj.size(0), dtype=torch.long, device=adj.device))]
-
- for i in range(2, n_hops+1):
- adj_mats.append(binarize(adj_mats[i-1] @ adj_mats[1]))
- extend_mat = torch.zeros_like(adj)
-
- for i in range(1, n_hops+1):
- extend_mat += (adj_mats[i] - adj_mats[i-1]) * i
-
- return extend_mat
-
-def get_LAS_distance_constraint_mask(mol):
- # Get the adj
- adj = Chem.GetAdjacencyMatrix(mol)
- adj = torch.from_numpy(adj)
- extend_adj = n_hops_adj(adj,2)
- # add ring
- ssr = Chem.GetSymmSSSR(mol)
- for ring in ssr:
- # print(ring)
- for i in ring:
- for j in ring:
- if i==j:
- continue
- else:
- extend_adj[i][j]+=1
- # turn to mask
- mol_mask = binarize(extend_adj)
- return mol_mask
-
-def get_lig_graph_geodiff(lig_coords, lig_features, lig_node_type, lig_edges):
- g_lig = dgl.DGLGraph()
-
- num_atoms_lig = len(lig_coords) # number of ligand atom_level
- g_lig.add_nodes(num_atoms_lig)
- g_lig.ndata['h'] = torch.from_numpy(lig_features) if isinstance(lig_features, np.ndarray) else lig_features
- g_lig.ndata['node_type'] = lig_node_type # schnet\mgcn features
-
- edges = lig_edges
- src_ls, dst_ls, bond_type = list(zip(*edges))
- src_ls, dst_ls = np.array(src_ls), np.array(dst_ls)
-
- g_lig.add_edges(src_ls, dst_ls)
- g_lig.ndata['pos'] = torch.tensor(lig_coords, dtype=torch.float)
- g_lig.edata['bond_type'] = torch.tensor(bond_type, dtype=torch.int64)
-
- return g_lig
-
-def get_lig_multi_pose_graph_equibind(lig_multi_coords, lig_features, lig_node_type, max_neighbors=None, cutoff=5.0):
- multi_graphs = []
- for lig_coords in lig_multi_coords:
- multi_graphs.append(get_lig_graph_equibind(lig_coords, lig_features, lig_node_type, max_neighbors, cutoff))
-
- return multi_graphs
-
-def get_lig_graph_equibind(lig_coords, lig_features, lig_edges, lig_node_type, max_neighbors=None, cutoff=5.0):
-
- num_nodes = lig_coords.shape[0]
- assert lig_coords.shape[1] == 3
- distance = spatial.distance_matrix(lig_coords, lig_coords)
-
- src_list = []
- dst_list = []
- dist_list = []
- mean_norm_list = []
- for i in range(num_nodes):
- dst = list(np.where(distance[i, :] < cutoff)[0])
- dst.remove(i)
- if max_neighbors != None and len(dst) > max_neighbors:
- dst = list(np.argsort(distance[i, :]))[1: max_neighbors + 1] # closest would be self loop
- if len(dst) == 0:
- dst = list(np.argsort(distance[i, :]))[1:2] # closest would be the index i itself > self loop
- print(
- f'The lig_radius {cutoff} was too small for one lig atom such that it had no neighbors. So we connected {i} to the closest other lig atom {dst}')
- assert i not in dst
- src = [i] * len(dst)
- src_list.extend(src)
- dst_list.extend(dst)
- valid_dist = list(distance[i, dst])
- dist_list.extend(valid_dist)
- valid_dist_np = distance[i, dst]
- sigma = np.array([1., 2., 5., 10., 30.]).reshape((-1, 1))
- weights = softmax(- valid_dist_np.reshape((1, -1)) ** 2 / sigma, axis=1) # (sigma_num, neigh_num)
- assert weights[0].sum() > 1 - 1e-2 and weights[0].sum() < 1.01
- diff_vecs = lig_coords[src, :] - lig_coords[dst, :] # (neigh_num, 3)
- mean_vec = weights.dot(diff_vecs) # (sigma_num, 3)
- denominator = weights.dot(np.linalg.norm(diff_vecs, axis=1)) # (sigma_num,)
- mean_vec_ratio_norm = np.linalg.norm(mean_vec, axis=1) / denominator # (sigma_num,)
-
- mean_norm_list.append(mean_vec_ratio_norm)
- assert len(src_list) == len(dst_list)
- assert len(dist_list) == len(dst_list)
- graph = dgl.graph((torch.tensor(src_list), torch.tensor(dst_list)), num_nodes=num_nodes, idtype=torch.int32)
-
- graph.ndata['h'] = torch.from_numpy(lig_features) if isinstance(lig_features, np.ndarray) else lig_features
- graph.ndata['node_type'] = lig_node_type # schnet\mgcn features
- graph.edata['e'] = distance_featurizer(dist_list, 0.75) # avg distance = 1.3 So divisor = (4/7)*1.3 = ~0.75
- graph.ndata['pos'] = torch.from_numpy(np.array(lig_coords).astype(np.float32))
- graph.ndata['mu_r_norm'] = torch.from_numpy(np.array(mean_norm_list).astype(np.float32))
-
- if lig_edges is not None:
- edge_src_dst_2_edge_index = {}
- for idx, (s, d) in enumerate(zip(src_list, dst_list)):
- edge_src_dst_2_edge_index[(s, d)] = idx
- bond_src_ls, bond_dst_ls, bond_type = list(zip(*lig_edges))
-
- bond_edge_idx = []
- for bs, bd in zip(bond_src_ls, bond_dst_ls):
- bond_edge_idx.append(edge_src_dst_2_edge_index[(bs, bd)])
-
- graph.edata['bond_type'] = torch.zeros(len(src_list), len(bond_type[0]))
- graph.edata['bond_type'][bond_edge_idx] = torch.tensor(bond_type).to(torch.float32)
-
- return graph
-
-def get_lig_graph(lig_coords,lig_features, lig_edges, lig_node_type, cutoff=None):
- g_lig = dgl.DGLGraph()
-
- num_atoms_lig = len(lig_coords) # number of ligand atom_level
- g_lig.add_nodes(num_atoms_lig)
- g_lig.ndata['h'] = torch.from_numpy(lig_features) if isinstance(lig_features, np.ndarray) else lig_features
- g_lig.ndata['node_type'] = lig_node_type # schnet\mgcn features
- dis_matrix_lig = spatial.distance_matrix(lig_coords, lig_coords)
- if cutoff is None:
- edges = lig_edges
- src_ls, dst_ls, bond_type = list(zip(*edges))
- src_ls, dst_ls = np.array(src_ls), np.array(dst_ls)
- else:
- node_idx = np.where( (dis_matrix_lig < cutoff) & (dis_matrix_lig!=0) ) # no self-loop
- src_ls = node_idx[0]
- dst_ls = node_idx[1]
- g_lig.add_edges(src_ls, dst_ls)
- lig_d = torch.tensor(dis_matrix_lig[src_ls, dst_ls], dtype=torch.float).view(-1, 1)
- g_lig.edata['distance'] = lig_d
- g_lig.edata['e'] = lig_d * 0.1 # g.edata['e'] ~ (n_bond1+n_bond2) * k
- g_lig.ndata['pos'] = torch.tensor(lig_coords, dtype=torch.float)
- D3_info = bond_feature(g_lig)
- g_lig.edata['e'] = torch.cat([g_lig.edata['e'], D3_info], dim=-1)
- g_lig.edata['bond_type'] = torch.tensor(bond_type,dtype=torch.int64)
- # g_lig.ndata.pop('pos')
- assert not torch.any(torch.isnan(D3_info))
- return g_lig
-
-def get_prot_atom_graph(prot_coords, prot_features, prot_edges, prot_node_type, cutoff=None):
- g_prot = dgl.DGLGraph()
- num_atoms_prot = len(prot_coords)
- g_prot.add_nodes(num_atoms_prot)
- g_prot.ndata['h'] = torch.from_numpy(prot_features) if isinstance(prot_features, np.ndarray) else prot_features
- g_prot.ndata['node_type'] = prot_node_type # schnet\mgcn features
- dis_matrix_lig = spatial.distance_matrix(prot_coords, prot_coords)
- if cutoff is None:
- edges = prot_edges
- src_ls, dst_ls, bond_type = list(zip(*edges))
- src_ls, dst_ls = np.array(src_ls), np.array(dst_ls)
- else:
- node_idx = np.where( (dis_matrix_lig < cutoff) & (dis_matrix_lig!=0) ) # no self-loop
- src_ls = node_idx[0]
- dst_ls = node_idx[1]
- g_prot.add_edges(src_ls, dst_ls)
- prot_d = torch.tensor(dis_matrix_lig[src_ls, dst_ls], dtype=torch.float).view(-1, 1)
- g_prot.edata['distance'] = prot_d
- g_prot.edata['e'] = prot_d * 0.1 # g.edata['e'] ~ (n_bond1+n_bond2) * k
- g_prot.ndata['pos'] = torch.tensor(prot_coords, dtype=torch.float)
- D3_info = bond_feature(g_prot)
- g_prot.edata['e'] = torch.cat([g_prot.edata['e'], D3_info], dim=-1)
- g_prot.edata['bond_type'] = torch.tensor(bond_type, dtype=torch.int64)
- # g_prot.ndata.pop('pos')
- assert not torch.any(torch.isnan(D3_info))
- return g_prot
-
-def distance_featurizer(dist_list, divisor) -> torch.Tensor:
- # you want to use a divisor that is close to 4/7 times the average distance that you want to encode
- length_scale_list = [1.5 ** x for x in range(15)]
- center_list = [0. for _ in range(15)]
-
- num_edge = len(dist_list)
- dist_list = np.array(dist_list)
-
- transformed_dist = [np.exp(- ((dist_list / divisor) ** 2) / float(length_scale))
- for length_scale, center in zip(length_scale_list, center_list)]
-
- transformed_dist = np.array(transformed_dist).T
- transformed_dist = transformed_dist.reshape((num_edge, -1))
- return torch.from_numpy(transformed_dist.astype(np.float32))
-
-def local_coordinate_system_feature(prot_coords, c_alpha_coords, c_coords, n_coords, prot_d, src_ls, dst_ls):
- n_i_list, u_i_list, v_i_list = [], [], []
- for i in range(len(prot_coords)):
- nitrogen = n_coords[i]
- c_alpha = c_alpha_coords[i]
- carbon = c_coords[i]
- u_i = (nitrogen - c_alpha) / np.linalg.norm(nitrogen - c_alpha)
- t_i = (carbon - c_alpha) / np.linalg.norm(carbon - c_alpha)
- n_i = np.cross(u_i, t_i) / np.linalg.norm(np.cross(u_i, t_i))
- v_i = np.cross(n_i, u_i)
- assert (math.fabs(
- np.linalg.norm(v_i) - 1.) < 1e-5), "protein utils protein_to_graph_dips, v_i norm larger than 1"
- n_i_list.append(n_i)
- u_i_list.append(u_i)
- v_i_list.append(v_i)
- n_i_feat, u_i_feat, v_i_feat = np.stack(n_i_list), np.stack(u_i_list), np.stack(v_i_list)
-
- edge_feat_ori_list = []
- for i in range(len(prot_d)):
- src = src_ls[i]
- dst = dst_ls[i]
- # place n_i, u_i, v_i as lines in a 3x3 basis matrix
- basis_matrix = np.stack((n_i_feat[dst, :], u_i_feat[dst, :], v_i_feat[dst, :]), axis=0)
- p_ij = np.matmul(basis_matrix, c_alpha_coords[src, :] - c_alpha_coords[dst, :])
- q_ij = np.matmul(basis_matrix, n_i_feat[src, :]) # shape (3,)
- k_ij = np.matmul(basis_matrix, u_i_feat[src, :])
- t_ij = np.matmul(basis_matrix, v_i_feat[src, :])
- s_ij = np.concatenate((p_ij, q_ij, k_ij, t_ij), axis=0) # shape (12,)
- edge_feat_ori_list.append(s_ij)
- edge_feat_ori_feat = np.stack(edge_feat_ori_list, axis=0) # shape (num_edges, 12)
- edge_feat_ori_feat = torch.from_numpy(edge_feat_ori_feat.astype(np.float32))
- c_alpha_edge_feat = torch.cat([distance_featurizer(prot_d, divisor=4), edge_feat_ori_feat],axis=1) # shape (num_edges, 17)
- return c_alpha_edge_feat
-
-def get_prot_alpha_c_graph_equibind(prot_coords, prot_features, prot_node_type, sec_features,
- alpha_c_coords, c_coords, n_coords,
- max_neighbor=None, cutoff=None):
- try:
- assert len(alpha_c_coords) == len(prot_coords)
- assert len(c_coords) == len(prot_coords)
- assert len(n_coords) == len(prot_coords)
- except:
- raise ValueError(f'{len(alpha_c_coords)} == {len(prot_coords)}, {len(c_coords)} == {len(prot_coords)}, {len(n_coords)} == {len(prot_coords)}')
-
- g_prot = dgl.DGLGraph()
- num_atoms_prot = len(prot_coords) # number of pocket atom_level
- g_prot.add_nodes(num_atoms_prot)
-
- g_prot.ndata['h'] = prot_features
- g_prot.ndata['node_type'] = prot_node_type[:num_atoms_prot]
- distances = spatial.distance_matrix(prot_coords, prot_coords)
- src_list = []
- dst_list = []
- dist_list = []
- mean_norm_list = []
- for i in range(num_atoms_prot):
- dst = list(np.where(distances[i, :] < cutoff)[0])
- dst.remove(i)
- if max_neighbor != None and len(dst) > max_neighbor:
- dst = list(np.argsort(distances[i, :]))[1: max_neighbor + 1]
- if len(dst) == 0:
- dst = list(np.argsort(distances[i, :]))[1:2] # choose second because first is i itself
- print(
- f'The c_alpha_cutoff {cutoff} was too small for one c_alpha such that it had no neighbors. So we connected it to the closest other c_alpha')
- assert i not in dst
- src = [i] * len(dst)
- src_list.extend(src)
- dst_list.extend(dst)
- valid_dist = list(distances[i, dst])
- dist_list.extend(valid_dist)
- valid_dist_np = distances[i, dst]
- sigma = np.array([1., 2., 5., 10., 30.]).reshape((-1, 1))
- weights = softmax(- valid_dist_np.reshape((1, -1)) ** 2 / sigma, axis=1) # (sigma_num, neigh_num)
- assert weights[0].sum() > 1 - 1e-2 and weights[0].sum() < 1.01
- diff_vecs = alpha_c_coords[src, :] - alpha_c_coords[dst, :] # (neigh_num, 3)
- mean_vec = weights.dot(diff_vecs) # (sigma_num, 3)
- denominator = weights.dot(np.linalg.norm(diff_vecs, axis=1)) # (sigma_num,)
- mean_vec_ratio_norm = np.linalg.norm(mean_vec, axis=1) / denominator # (sigma_num,)
- mean_norm_list.append(mean_vec_ratio_norm)
- assert len(src_list) == len(dst_list)
- assert len(dist_list) == len(dst_list)
-
- g_prot.add_edges(src_list, dst_list)
-
- g_prot.edata['e'] = local_coordinate_system_feature(prot_coords, alpha_c_coords, c_coords, n_coords,
- dist_list, src_list, dst_list)
- residue_representatives_loc_feat = torch.from_numpy(alpha_c_coords.astype(np.float32))
- g_prot.ndata['pos'] = residue_representatives_loc_feat
- g_prot.ndata['mu_r_norm'] = torch.from_numpy(np.array(mean_norm_list).astype(np.float32))
- return g_prot
-
-def get_prot_alpha_c_graph_ign(prot_coords, prot_features, prot_node_type, sec_features, cutoff=None):
- g_prot = dgl.DGLGraph()
- num_atoms_prot = len(prot_coords) # number of pocket atom_level
- g_prot.add_nodes(num_atoms_prot)
- g_prot.ndata['h'] = torch.from_numpy(prot_features) if isinstance(prot_features, np.ndarray) else prot_features
- g_prot.ndata['node_type'] = prot_node_type[:num_atoms_prot]
- dis_matrix = spatial.distance_matrix(prot_coords, prot_coords)
- node_idx = np.where((dis_matrix < cutoff) & (dis_matrix != 0)) # no self-loop
- src_ls = node_idx[0]
- dst_ls = node_idx[1]
- g_prot.add_edges(src_ls, dst_ls)
- g_prot.ndata['pos'] = torch.tensor(prot_coords, dtype=torch.float)
- prot_d = torch.tensor(dis_matrix[src_ls, dst_ls], dtype=torch.float).view(-1, 1)
- g_prot.edata['distance'] = prot_d
-
- # g_prot.edata['e'] = prot_d * 0.1
-
- # calculate the 3D info for g
- D3_info_th = bond_feature(g_prot)
-
- g_prot.edata['e'] = torch.cat([D3_info_th, prot_d * 0.1], dim=-1)
- # g_prot.ndata.pop('pos')
- assert not torch.any(torch.isnan(D3_info_th))
-
- return g_prot
-
-def get_interact_graph_fc(lig_coords,prot_coords,cutoff=None):
- # get fully connected graph
- g_inter = dgl.DGLGraph()
- num_atoms_lig = len(lig_coords)
- num_atoms_prot = len(prot_coords)
-
- g_inter.add_nodes(num_atoms_lig + num_atoms_prot)
- dis_matrix = spatial.distance_matrix(lig_coords, prot_coords)
- node_idx = np.where(dis_matrix > 0)
- src_ls = np.concatenate([node_idx[0], node_idx[1] + num_atoms_lig])
- dst_ls = np.concatenate([node_idx[1] + num_atoms_lig, node_idx[0]])
- g_inter.add_edges(src_ls, dst_ls)
- # 'd', distance between ligand atom_level and pocket atom_level
- inter_dis = np.concatenate([dis_matrix[node_idx[0], node_idx[1]], dis_matrix[node_idx[0], node_idx[1]]])
- inter_d = torch.tensor(inter_dis, dtype=torch.float).view(-1, 1)
- g_inter.edata['e'] = inter_d # if add_self_loop=ture, need to modify here6+'
- g_inter.ndata['pos'] = torch.cat([torch.tensor(lig_coords, dtype=torch.float),torch.tensor(prot_coords, dtype=torch.float)],dim=0)
- return g_inter
-
-def get_interact_multi_pose_graph_knn(lig_multi_coords, prot_coords, max_neighbor=None, min_neighbor=None, cutoff=None):
- multi_graphs = []
- for lig_coords in lig_multi_coords:
- multi_graphs.append(get_interact_graph_knn(lig_coords,prot_coords,max_neighbor,min_neighbor,cutoff))
-
- return multi_graphs
-
-def get_interact_graph_knn(lig_coords,prot_coords,max_neighbor=None,min_neighbor=None,cutoff=None):
- g_inter = dgl.DGLGraph()
- num_atoms_lig = len(lig_coords)
- num_atoms_prot = len(prot_coords)
-
- g_inter.add_nodes(num_atoms_lig + num_atoms_prot)
- dis_matrix = spatial.distance_matrix(lig_coords, prot_coords)
-
- src_list, dst_list, dis_list = [], [], []
- for i in range(num_atoms_lig):
- dst = np.where(dis_matrix[i, :] < cutoff)[0]
- if max_neighbor != None and len(dst) > max_neighbor:
- dst = list(np.argsort(dis_matrix[i, :]))[:max_neighbor]
- if min_neighbor != None and len(dst) == 0:
- dst = list(np.argsort(dis_matrix[i, :]))[:min_neighbor]
-
- src = [i] * len(dst)
- src_list.extend(src)
- dst_list.extend([x + num_atoms_lig for x in dst])
- dis_list.extend(list(dis_matrix[i,dst]))
-
- for i in range(num_atoms_prot):
- dst = list(np.where(dis_matrix[:, i] < cutoff)[0])
-
- if max_neighbor != None and len(dst) > max_neighbor:
- dst = list(np.argsort(dis_matrix[:, i]))[:max_neighbor]
- if min_neighbor != None and len(dst) == 0:
- dst = list(np.argsort(dis_matrix[:, i]))[:min_neighbor] # choose second because first is i itself
-
- src = [i] * len(dst)
- src_list.extend([x + num_atoms_lig for x in src])
- dst_list.extend(dst)
- dis_list.extend(list(dis_matrix[dst, i]))
-
- src_ls = np.array(src_list)
- dst_ls = np.array(dst_list)
- g_inter.add_edges(src_ls, dst_ls)
- # 'd', distance between ligand atom_level and pocket atom_level
- inter_dis = np.array(dis_list)
- inter_d = torch.tensor(inter_dis, dtype=torch.float).view(-1, 1)
-
- # squared_distance = inter_d ** 2
- # all_sigmas_dist = [1.5 ** x for x in range(15)]
- # prot_square_distance_scale = 10.0
- # x_rel_mag = torch.cat([torch.exp(-(squared_distance / prot_square_distance_scale) / sigma) for sigma in
- # all_sigmas_dist], dim=-1)
- # g_inter.edata['e'] = x_rel_mag
- g_inter.edata['d'] = inter_d
- return g_inter
-
-def get_interact_graph_knn_v2(lig_coords,prot_coords,max_neighbor=None,min_neighbor=None,cutoff=None,):
- g_inter = dgl.DGLGraph()
- num_atoms_lig = len(lig_coords)
- num_atoms_prot = len(prot_coords)
-
- g_inter.add_nodes(num_atoms_lig + num_atoms_prot)
- dis_matrix = spatial.distance_matrix(lig_coords, prot_coords)
-
- src_list, dst_list, dis_list = [], [], []
- for i in range(num_atoms_lig):
- dst = np.where(dis_matrix[i, :] < cutoff)[0]
- if max_neighbor != None and len(dst) > max_neighbor:
- dst = list(np.argsort(dis_matrix[i, :]))[:max_neighbor]
- if min_neighbor != None and len(dst) == 0:
- dst = list(np.argsort(dis_matrix[i, :]))[:min_neighbor]
-
- src = [i] * len(dst)
- src_list.extend(src)
- dst_list.extend([x + num_atoms_lig for x in dst])
- dis_list.extend(list(dis_matrix[i,dst]))
-
- for i in range(num_atoms_prot):
- dst = list(np.where(dis_matrix[:, i] < cutoff)[0])
-
- if max_neighbor != None and len(dst) > max_neighbor:
- dst = list(np.argsort(dis_matrix[:, i]))[:max_neighbor]
- if min_neighbor != None and len(dst) == 0:
- dst = list(np.argsort(dis_matrix[:, i]))[:min_neighbor] # choose second because first is i itself
-
- src = [i] * len(dst)
- src_list.extend([x + num_atoms_lig for x in src])
- dst_list.extend(dst)
- dis_list.extend(list(dis_matrix[dst, i]))
-
- src_ls = np.array(src_list)
- dst_ls = np.array(dst_list)
- g_inter.add_edges(src_ls, dst_ls)
- # 'd', distance between ligand atom_level and pocket atom_level
- inter_dis = np.array(dis_list)
- inter_d = torch.tensor(inter_dis, dtype=torch.float).view(-1, 1)
-
- squared_distance = inter_d ** 2
- all_sigmas_dist = [1.5 ** x for x in range(15)]
- prot_square_distance_scale = 10.0
- x_rel_mag = torch.cat([torch.exp(-(squared_distance / prot_square_distance_scale) / sigma) for sigma in
- all_sigmas_dist], dim=-1)
- g_inter.edata['e'] = x_rel_mag
- g_inter.edata['d'] = inter_d
- return g_inter
-
-def pack_graph_and_labels(lig_graphs, prot_graphs, inter_graphs, labels):
- return lig_graphs, prot_graphs, inter_graphs, labels
-
-SSE_color = {'H':'R','G':'R','I':'R','E':'B','T':'G','S':'W','B':'W',' ':'W'}
-
-def ExcuteDSSP(dataset_path,name,dssp = '/data/jiaxianyan/anaconda3/bin/mkdssp'):
- InPath = os.path.join(dataset_path, name, f'{name}_protein_chains.pdb')
- OutPath = os.path.join(dataset_path, name, f'{name}_protein_chains.pdb.dssp')
- SSPath = os.path.join(dataset_path, name, f'{name}_protein_chains.pdb.SS')
- cmd = '{} -i {} -o {}'.format(dssp,InPath,OutPath)
- os.system(cmd)
-
-def ListToStr(res):
- # res is list
- Res = ''
- for r in res:
- Res += r[0]*r[1]
- return Res
-
-def StrToList(res):
- # res is string
- before, length, Res = 'W',0,[]
- for s in res:
- if s != before:
- Res.append((before, length))
- length = 1
- before = s
- else:
- length += 1
- Res.append((before, length))
- return Res
-
-def AllocateIndex(res,BeginIndex=0):
- # res is list
- str_res = ListToStr(res)
- ClusterIndex,Res = BeginIndex,[]
- for index in range(len(str_res)):
- if index!=0 and str_res[index]!=str_res[index-1]:
- ClusterIndex += 1
- Res.append(str(ClusterIndex)+','+str_res[index])
- return Res,ClusterIndex+1
-
-def SmoothHiearachical(res,threshod=0):
- if threshod==0:
- return res
- # res is list
- Res = [0] * len(res)
- if len(res)==1:
- return res
-
- Res[0] = (res[1][0], res[0][1])
- Res[-1] = (res[-2][0], res[-1][1])
- for index in range(1,len(res)-1):
- SSE,length = res[index]
- if length<=threshod:
- Res[index] = (Res[index-1][0],length)
- else:
- Res[index] = res[index]
- return Res
-
-
-def ExtractHiearachical(FilePath):
- with open(FilePath,'r') as f:
- lines = f.read().strip().split('\n')[28:]
-
- borders, start = [], 0
- for index,line in enumerate(lines):
- if '!' in line:
- borders.append((start,index))
- start = index + 1
- borders.append((start,len(lines)+1))
-
- SSEIndex,SupplyIndex = 16,18
-
- ResSSE = ''
- ResSmoothSSE = ''
- NumClusters = 0
- ClusterIndexs = []
- for border in borders:
- SSE,Color = '',''
- OneChain = lines[border[0]:border[1]]
- for index,line in enumerate(OneChain):
- if line[SupplyIndex]=='>' and line[SSEIndex] == ' ':
- SSE += OneChain[index + 1][SSEIndex]
- Color += SSE_color[OneChain[index + 1][SSEIndex]]
- elif line[SupplyIndex]=='<' and line[SSEIndex] ==' ':
- SSE += OneChain[index - 1][SSEIndex]
- Color += SSE_color[OneChain[index - 1][SSEIndex]]
- else:
- SSE += line[SSEIndex]
- Color += SSE_color[line[SSEIndex]]
-
- ResSSE = ResSSE + SSE
- res = StrToList(SSE)
- Smoothres = SmoothHiearachical(res)
- ClusterIndex,NumCluster = AllocateIndex(Smoothres,NumClusters)
- ResSmoothSSE = ResSmoothSSE + ListToStr(Smoothres)
- NumClusters = NumCluster
- ClusterIndexs.extend(ClusterIndex)
-
-def prot_alpha_c_featurizer(Structure):
- Coords = Structure.getCoords()
- ResNames = Structure.getResnames()
- ResIndex = [ResDict.get(ResName,UNKOWN_RES) for ResName in ResNames]
- ProtFeature = torch.tensor(np.eye(UNKOWN_RES+1)[ResIndex])
- return Coords, ProtFeature
-
-def prot_residue_type(Structure):
- ResNames = Structure.getResnames()
- ResIndex = [ResDict.get(ResName,UNKOWN_RES) for ResName in ResNames]
- return torch.tensor(ResIndex,dtype=torch.int64)
-
-def break_molecules_by_rotatable_bonds(mol):
- patt = Chem.MolFromSmarts('[!$([NH]!@C(=O))&!D1&!$(*#*)]-&!@[!$([NH]!@C(=O))&!D1&!$(*#*)]')
- rotatable_bonds = mol.GetSubstructMatches(patt)
- bs = [mol.GetBondBetweenAtoms(x, y).GetIdx() for x, y in rotatable_bonds]
- mol_broken = Chem.rdmolops.FragmentOnBonds(mol, bs)
- frags = Chem.rdmolops.GetMolFrags(mol_broken)
- return frags,rotatable_bonds
-
-def get_molecule_mass_center(mol):
- from rdkit.Chem import Descriptors
- numatoms = mol.GetNumAtoms()
- pos = mol.GetConformer().GetPositions()
- atoms = [atom for atom in mol.GetAtoms()]
- # get center of mass
- mass = Descriptors.MolWt(mol)
- center_of_mass = np.array(np.sum(atoms[i].GetMass() * pos[i] for i in range(numatoms))) / mass
- return center_of_mass
-
-def get_frag_geo_center(mol, frag):
- pos = mol.GetConformer().GetPositions()
- center_of_geo = np.array(np.sum(pos[i] for i in frag))/len(frag)
- return center_of_geo
-
-def get_center_frag(mol, frags):
- mass_center = get_molecule_mass_center(mol)
- frags_geo_center = np.array([get_frag_geo_center(mol, frag) for frag in frags])
- center_distance = (frags_geo_center - mass_center) ** 2
- return np.min(center_distance)
-
-def get_frag_neighbors(mol, frags, rotatable_bonds):
- numatoms = mol.GetNumAtoms()
- frag2neighbour,frag2atom,neighbour2bond,atom2bond,bond2frag = {},{},{},{},{}
-
- for i in range(len(numatoms)):
- atom2bond[i] = []
-
- for b_index,rb in enumerate(rotatable_bonds):
- x, y = rb
- atom2bond[x].append(b_index)
- atom2bond[y].append(b_index)
- bond2frag[b_index] = []
-
- for f_index,frag in enumerate(frags):
- frag2neighbour[f_index] = []
- frag2atom[f_index] = []
- neighbour2bond[f_index] = []
- for atom in frag:
- if len(atom2bond[atom]) != 0:
- for bond in atom2bond[atom]:
- bond2frag[bond].append(frag)
- x, y = rotatable_bonds[bond]
- if x == atom:
- frag2atom[f_index].append(y)
- else:
- frag2atom[f_index].append(x)
-
- for bond in bond2frag.keys():
- x,y = bond2frag[bond]
- frag2neighbour[x].append(y)
- neighbour2bond[x].append(bond)
- frag2neighbour[y].append(x)
- neighbour2bond[y].append(bond)
-
- return frag2neighbour,frag2atom,neighbour2bond
-
-def bfs(frag_neighbors,neighbour2bond,center_frag_index):
- bfs_rank = [center_frag_index]
- bfs_bond_rank = []
- visit = [0 for i in range(len(frag_neighbors))]
- left,right = 0,1
- while left < right:
- cur_frag_index = bfs_rank[left]
- for index,neighbor_frag_index in enumerate(frag_neighbors[cur_frag_index]):
- if not visit[neighbor_frag_index]:
- visit[neighbor_frag_index] = 1
- bfs_rank.append(neighbor_frag_index)
- bfs_bond_rank.append(neighbour2bond[cur_frag_index][index])
- right += 1
- left += 1
- return bfs_rank
-
-def get_bfs_generate_rank(mol, center_frag_index, frags, rotatable_bonds):
- frag_neighbors,frag2atom,neighbour2bond = get_frag_neighbors(mol, frags, rotatable_bonds)
- bfs_rank = bfs(frag_neighbors,neighbour2bond,center_frag_index)
- assert bfs_rank==len(frags)
- return bfs_rank
-
-def get_molecule_tree_by_rotatable_bonds(molecule_path):
- mol = Chem.MolFromMol2File(molecule_path, sanitize=False, removeHs=False)
- frags, rotatable_bonds = break_molecules_by_rotatable_bonds(mol)
- center_frag_index = get_center_frag(mol, frags)
- moltree = get_bfs_generate_rank(mol, center_frag_index, frags, rotatable_bonds)
- return moltree
-
-def get_rdkit_coords(mol, sanitize=True, remove_hs=True, maxIters=200):
- ps = AllChem.ETKDGv2()
- id = AllChem.EmbedMolecule(mol, ps)
- if id == -1:
- print('rdkit coords could not be generated without using random coords. using random coords now.')
- ps.useRandomCoords = True
- AllChem.EmbedMolecule(mol, ps)
- AllChem.MMFFOptimizeMolecule(mol, maxIters=maxIters, confId=0)
- else:
- AllChem.MMFFOptimizeMolecule(mol, maxIters=maxIters, confId=0)
-
- if remove_hs:
- mol = Chem.RemoveHs(mol, sanitize=sanitize)
-
- conf = mol.GetConformer()
- lig_coords = conf.GetPositions()
- # return torch.tensor(lig_coords, dtype=torch.float32)
- return lig_coords
-
-
-def read_rdmol_v2(dataset_path, name):
-
- lig_path_mol2 = os.path.join(dataset_path, name, f'{name}_ligand.mol2')
- lig_path_sdf = os.path.join(dataset_path, name, f'{name}_ligand.sdf')
- m_lig = read_rdmol(lig_path_sdf, sanitize=True, remove_hs=True)
- if m_lig == None: # read mol2 file if sdf file cannot be sanitized
- m_lig = read_rdmol(lig_path_mol2, sanitize=True, remove_hs=True)
- return m_lig
-
-def read_rdmol(molecule_file, sanitize=False, calc_charges=False, remove_hs=False):
- """Load a molecule from a file of format ``.mol2`` or ``.sdf`` or ``.pdbqt`` or ``.pdb``.
-
- Parameters
- ----------
- molecule_file : str
- Path to file for storing a molecule, which can be of format ``.mol2`` or ``.sdf``
- or ``.pdbqt`` or ``.pdb``.
- sanitize : bool
- Whether sanitization is performed in initializing RDKit molecule instances. See
- https://www.rdkit.org/docs/RDKit_Book.html for details of the sanitization.
- Default to False.
- calc_charges : bool
- Whether to add Gasteiger charges via RDKit. Setting this to be True will enforce
- ``sanitize`` to be True. Default to False.
- remove_hs : bool
- Whether to remove hydrogens via RDKit. Note that removing hydrogens can be quite
- slow for large molecules. Default to False.
- use_conformation : bool
- Whether we need to extract molecular conformation from proteins and ligands.
- Default to True.
-
- Returns
- -------
- mol : rdkit.Chem.rdchem.Mol
- RDKit molecule instance for the loaded molecule.
- coordinates : np.ndarray of shape (N, 3) or None
- The 3D coordinates of atoms in the molecule. N for the number of atoms in
- the molecule. None will be returned if ``use_conformation`` is False or
- we failed to get conformation information.
- """
- if molecule_file.endswith('.mol2'):
- mol = Chem.MolFromMol2File(molecule_file, sanitize=False, removeHs=False)
- elif molecule_file.endswith('.sdf'):
- supplier = Chem.SDMolSupplier(molecule_file, sanitize=False, removeHs=False)
- mol = supplier[0]
- elif molecule_file.endswith('.pdbqt'):
- with open(molecule_file) as file:
- pdbqt_data = file.readlines()
- pdb_block = ''
- for line in pdbqt_data:
- pdb_block += '{}\n'.format(line[:66])
- mol = Chem.MolFromPDBBlock(pdb_block, sanitize=False, removeHs=False)
- elif molecule_file.endswith('.pdb'):
- mol = Chem.MolFromPDBFile(molecule_file, sanitize=False, removeHs=False)
- else:
- return ValueError('Expect the format of the molecule_file to be '
- 'one of .mol2, .sdf, .pdbqt and .pdb, got {}'.format(molecule_file))
-
- try:
- if sanitize or calc_charges:
- Chem.SanitizeMol(mol)
-
- if calc_charges:
- # Compute Gasteiger charges on the molecule.
- try:
- AllChem.ComputeGasteigerCharges(mol)
- except:
- warnings.warn('Unable to compute charges for the molecule.')
-
- if remove_hs:
- mol = Chem.RemoveHs(mol, sanitize=sanitize)
- except:
- return None
-
- return mol
-
-def random_rotation_translation(translation_distance=5.0):
- rotation = Rotation.random(num=1)
- rotation_matrix = rotation.as_matrix().squeeze()
-
- t = np.random.randn(1, 3)
- t = t / np.sqrt( np.sum(t * t))
- length = np.random.uniform(low=0, high=translation_distance)
- t = t * length
- return torch.from_numpy(rotation_matrix.astype(np.float32)), torch.from_numpy(t.astype(np.float32))
\ No newline at end of file
diff --git a/spaces/jin-nin/artist/README.md b/spaces/jin-nin/artist/README.md
deleted file mode 100644
index 09f1a8c38ac6e541f98b42cab9aec0e9570af32d..0000000000000000000000000000000000000000
--- a/spaces/jin-nin/artist/README.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-title: DreamlikeArt-PhotoReal 2.0
-emoji: 📈
-colorFrom: blue
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.16.1
-app_file: app.py
-pinned: false
-duplicated_from: Duskfallcrew/DreamlikeArt-PhotoReal-2.0
----
----
-title: DreamlikeArt-PhotoReal 2.0
-emoji: 📈
-colorFrom: blue
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.16.1
-app_file: app.py
\ No newline at end of file
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageCms.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageCms.py
deleted file mode 100644
index 3a337f9f20993ab45ea5512d473a931396755846..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageCms.py
+++ /dev/null
@@ -1,1009 +0,0 @@
-# The Python Imaging Library.
-# $Id$
-
-# Optional color management support, based on Kevin Cazabon's PyCMS
-# library.
-
-# History:
-
-# 2009-03-08 fl Added to PIL.
-
-# Copyright (C) 2002-2003 Kevin Cazabon
-# Copyright (c) 2009 by Fredrik Lundh
-# Copyright (c) 2013 by Eric Soroos
-
-# See the README file for information on usage and redistribution. See
-# below for the original description.
-
-import sys
-from enum import IntEnum
-
-from . import Image
-
-try:
- from . import _imagingcms
-except ImportError as ex:
- # Allow error import for doc purposes, but error out when accessing
- # anything in core.
- from ._util import DeferredError
-
- _imagingcms = DeferredError(ex)
-
-DESCRIPTION = """
-pyCMS
-
- a Python / PIL interface to the littleCMS ICC Color Management System
- Copyright (C) 2002-2003 Kevin Cazabon
- kevin@cazabon.com
- https://www.cazabon.com
-
- pyCMS home page: https://www.cazabon.com/pyCMS
- littleCMS home page: https://www.littlecms.com
- (littleCMS is Copyright (C) 1998-2001 Marti Maria)
-
- Originally released under LGPL. Graciously donated to PIL in
- March 2009, for distribution under the standard PIL license
-
- The pyCMS.py module provides a "clean" interface between Python/PIL and
- pyCMSdll, taking care of some of the more complex handling of the direct
- pyCMSdll functions, as well as error-checking and making sure that all
- relevant data is kept together.
-
- While it is possible to call pyCMSdll functions directly, it's not highly
- recommended.
-
- Version History:
-
- 1.0.0 pil Oct 2013 Port to LCMS 2.
-
- 0.1.0 pil mod March 10, 2009
-
- Renamed display profile to proof profile. The proof
- profile is the profile of the device that is being
- simulated, not the profile of the device which is
- actually used to display/print the final simulation
- (that'd be the output profile) - also see LCMSAPI.txt
- input colorspace -> using 'renderingIntent' -> proof
- colorspace -> using 'proofRenderingIntent' -> output
- colorspace
-
- Added LCMS FLAGS support.
- Added FLAGS["SOFTPROOFING"] as default flag for
- buildProofTransform (otherwise the proof profile/intent
- would be ignored).
-
- 0.1.0 pil March 2009 - added to PIL, as PIL.ImageCms
-
- 0.0.2 alpha Jan 6, 2002
-
- Added try/except statements around type() checks of
- potential CObjects... Python won't let you use type()
- on them, and raises a TypeError (stupid, if you ask
- me!)
-
- Added buildProofTransformFromOpenProfiles() function.
- Additional fixes in DLL, see DLL code for details.
-
- 0.0.1 alpha first public release, Dec. 26, 2002
-
- Known to-do list with current version (of Python interface, not pyCMSdll):
-
- none
-
-"""
-
-VERSION = "1.0.0 pil"
-
-# --------------------------------------------------------------------.
-
-core = _imagingcms
-
-#
-# intent/direction values
-
-
-class Intent(IntEnum):
- PERCEPTUAL = 0
- RELATIVE_COLORIMETRIC = 1
- SATURATION = 2
- ABSOLUTE_COLORIMETRIC = 3
-
-
-class Direction(IntEnum):
- INPUT = 0
- OUTPUT = 1
- PROOF = 2
-
-
-#
-# flags
-
-FLAGS = {
- "MATRIXINPUT": 1,
- "MATRIXOUTPUT": 2,
- "MATRIXONLY": (1 | 2),
- "NOWHITEONWHITEFIXUP": 4, # Don't hot fix scum dot
- # Don't create prelinearization tables on precalculated transforms
- # (internal use):
- "NOPRELINEARIZATION": 16,
- "GUESSDEVICECLASS": 32, # Guess device class (for transform2devicelink)
- "NOTCACHE": 64, # Inhibit 1-pixel cache
- "NOTPRECALC": 256,
- "NULLTRANSFORM": 512, # Don't transform anyway
- "HIGHRESPRECALC": 1024, # Use more memory to give better accuracy
- "LOWRESPRECALC": 2048, # Use less memory to minimize resources
- "WHITEBLACKCOMPENSATION": 8192,
- "BLACKPOINTCOMPENSATION": 8192,
- "GAMUTCHECK": 4096, # Out of Gamut alarm
- "SOFTPROOFING": 16384, # Do softproofing
- "PRESERVEBLACK": 32768, # Black preservation
- "NODEFAULTRESOURCEDEF": 16777216, # CRD special
- "GRIDPOINTS": lambda n: (n & 0xFF) << 16, # Gridpoints
-}
-
-_MAX_FLAG = 0
-for flag in FLAGS.values():
- if isinstance(flag, int):
- _MAX_FLAG = _MAX_FLAG | flag
-
-
-# --------------------------------------------------------------------.
-# Experimental PIL-level API
-# --------------------------------------------------------------------.
-
-##
-# Profile.
-
-
-class ImageCmsProfile:
- def __init__(self, profile):
- """
- :param profile: Either a string representing a filename,
- a file like object containing a profile or a
- low-level profile object
-
- """
-
- if isinstance(profile, str):
- if sys.platform == "win32":
- profile_bytes_path = profile.encode()
- try:
- profile_bytes_path.decode("ascii")
- except UnicodeDecodeError:
- with open(profile, "rb") as f:
- self._set(core.profile_frombytes(f.read()))
- return
- self._set(core.profile_open(profile), profile)
- elif hasattr(profile, "read"):
- self._set(core.profile_frombytes(profile.read()))
- elif isinstance(profile, _imagingcms.CmsProfile):
- self._set(profile)
- else:
- msg = "Invalid type for Profile"
- raise TypeError(msg)
-
- def _set(self, profile, filename=None):
- self.profile = profile
- self.filename = filename
- self.product_name = None # profile.product_name
- self.product_info = None # profile.product_info
-
- def tobytes(self):
- """
- Returns the profile in a format suitable for embedding in
- saved images.
-
- :returns: a bytes object containing the ICC profile.
- """
-
- return core.profile_tobytes(self.profile)
-
-
-class ImageCmsTransform(Image.ImagePointHandler):
-
- """
- Transform. This can be used with the procedural API, or with the standard
- :py:func:`~PIL.Image.Image.point` method.
-
- Will return the output profile in the ``output.info['icc_profile']``.
- """
-
- def __init__(
- self,
- input,
- output,
- input_mode,
- output_mode,
- intent=Intent.PERCEPTUAL,
- proof=None,
- proof_intent=Intent.ABSOLUTE_COLORIMETRIC,
- flags=0,
- ):
- if proof is None:
- self.transform = core.buildTransform(
- input.profile, output.profile, input_mode, output_mode, intent, flags
- )
- else:
- self.transform = core.buildProofTransform(
- input.profile,
- output.profile,
- proof.profile,
- input_mode,
- output_mode,
- intent,
- proof_intent,
- flags,
- )
- # Note: inputMode and outputMode are for pyCMS compatibility only
- self.input_mode = self.inputMode = input_mode
- self.output_mode = self.outputMode = output_mode
-
- self.output_profile = output
-
- def point(self, im):
- return self.apply(im)
-
- def apply(self, im, imOut=None):
- im.load()
- if imOut is None:
- imOut = Image.new(self.output_mode, im.size, None)
- self.transform.apply(im.im.id, imOut.im.id)
- imOut.info["icc_profile"] = self.output_profile.tobytes()
- return imOut
-
- def apply_in_place(self, im):
- im.load()
- if im.mode != self.output_mode:
- msg = "mode mismatch"
- raise ValueError(msg) # wrong output mode
- self.transform.apply(im.im.id, im.im.id)
- im.info["icc_profile"] = self.output_profile.tobytes()
- return im
-
-
-def get_display_profile(handle=None):
- """
- (experimental) Fetches the profile for the current display device.
-
- :returns: ``None`` if the profile is not known.
- """
-
- if sys.platform != "win32":
- return None
-
- from . import ImageWin
-
- if isinstance(handle, ImageWin.HDC):
- profile = core.get_display_profile_win32(handle, 1)
- else:
- profile = core.get_display_profile_win32(handle or 0)
- if profile is None:
- return None
- return ImageCmsProfile(profile)
-
-
-# --------------------------------------------------------------------.
-# pyCMS compatible layer
-# --------------------------------------------------------------------.
-
-
-class PyCMSError(Exception):
-
- """(pyCMS) Exception class.
- This is used for all errors in the pyCMS API."""
-
- pass
-
-
-def profileToProfile(
- im,
- inputProfile,
- outputProfile,
- renderingIntent=Intent.PERCEPTUAL,
- outputMode=None,
- inPlace=False,
- flags=0,
-):
- """
- (pyCMS) Applies an ICC transformation to a given image, mapping from
- ``inputProfile`` to ``outputProfile``.
-
- If the input or output profiles specified are not valid filenames, a
- :exc:`PyCMSError` will be raised. If ``inPlace`` is ``True`` and
- ``outputMode != im.mode``, a :exc:`PyCMSError` will be raised.
- If an error occurs during application of the profiles,
- a :exc:`PyCMSError` will be raised.
- If ``outputMode`` is not a mode supported by the ``outputProfile`` (or by pyCMS),
- a :exc:`PyCMSError` will be raised.
-
- This function applies an ICC transformation to im from ``inputProfile``'s
- color space to ``outputProfile``'s color space using the specified rendering
- intent to decide how to handle out-of-gamut colors.
-
- ``outputMode`` can be used to specify that a color mode conversion is to
- be done using these profiles, but the specified profiles must be able
- to handle that mode. I.e., if converting im from RGB to CMYK using
- profiles, the input profile must handle RGB data, and the output
- profile must handle CMYK data.
-
- :param im: An open :py:class:`~PIL.Image.Image` object (i.e. Image.new(...)
- or Image.open(...), etc.)
- :param inputProfile: String, as a valid filename path to the ICC input
- profile you wish to use for this image, or a profile object
- :param outputProfile: String, as a valid filename path to the ICC output
- profile you wish to use for this image, or a profile object
- :param renderingIntent: Integer (0-3) specifying the rendering intent you
- wish to use for the transform
-
- ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
- ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
- ImageCms.Intent.SATURATION = 2
- ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
-
- see the pyCMS documentation for details on rendering intents and what
- they do.
- :param outputMode: A valid PIL mode for the output image (i.e. "RGB",
- "CMYK", etc.). Note: if rendering the image "inPlace", outputMode
- MUST be the same mode as the input, or omitted completely. If
- omitted, the outputMode will be the same as the mode of the input
- image (im.mode)
- :param inPlace: Boolean. If ``True``, the original image is modified in-place,
- and ``None`` is returned. If ``False`` (default), a new
- :py:class:`~PIL.Image.Image` object is returned with the transform applied.
- :param flags: Integer (0-...) specifying additional flags
- :returns: Either None or a new :py:class:`~PIL.Image.Image` object, depending on
- the value of ``inPlace``
- :exception PyCMSError:
- """
-
- if outputMode is None:
- outputMode = im.mode
-
- if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
- msg = "renderingIntent must be an integer between 0 and 3"
- raise PyCMSError(msg)
-
- if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
- msg = f"flags must be an integer between 0 and {_MAX_FLAG}"
- raise PyCMSError(msg)
-
- try:
- if not isinstance(inputProfile, ImageCmsProfile):
- inputProfile = ImageCmsProfile(inputProfile)
- if not isinstance(outputProfile, ImageCmsProfile):
- outputProfile = ImageCmsProfile(outputProfile)
- transform = ImageCmsTransform(
- inputProfile,
- outputProfile,
- im.mode,
- outputMode,
- renderingIntent,
- flags=flags,
- )
- if inPlace:
- transform.apply_in_place(im)
- imOut = None
- else:
- imOut = transform.apply(im)
- except (OSError, TypeError, ValueError) as v:
- raise PyCMSError(v) from v
-
- return imOut
-
-
-def getOpenProfile(profileFilename):
- """
- (pyCMS) Opens an ICC profile file.
-
- The PyCMSProfile object can be passed back into pyCMS for use in creating
- transforms and such (as in ImageCms.buildTransformFromOpenProfiles()).
-
- If ``profileFilename`` is not a valid filename for an ICC profile,
- a :exc:`PyCMSError` will be raised.
-
- :param profileFilename: String, as a valid filename path to the ICC profile
- you wish to open, or a file-like object.
- :returns: A CmsProfile class object.
- :exception PyCMSError:
- """
-
- try:
- return ImageCmsProfile(profileFilename)
- except (OSError, TypeError, ValueError) as v:
- raise PyCMSError(v) from v
-
-
-def buildTransform(
- inputProfile,
- outputProfile,
- inMode,
- outMode,
- renderingIntent=Intent.PERCEPTUAL,
- flags=0,
-):
- """
- (pyCMS) Builds an ICC transform mapping from the ``inputProfile`` to the
- ``outputProfile``. Use applyTransform to apply the transform to a given
- image.
-
- If the input or output profiles specified are not valid filenames, a
- :exc:`PyCMSError` will be raised. If an error occurs during creation
- of the transform, a :exc:`PyCMSError` will be raised.
-
- If ``inMode`` or ``outMode`` are not a mode supported by the ``outputProfile``
- (or by pyCMS), a :exc:`PyCMSError` will be raised.
-
- This function builds and returns an ICC transform from the ``inputProfile``
- to the ``outputProfile`` using the ``renderingIntent`` to determine what to do
- with out-of-gamut colors. It will ONLY work for converting images that
- are in ``inMode`` to images that are in ``outMode`` color format (PIL mode,
- i.e. "RGB", "RGBA", "CMYK", etc.).
-
- Building the transform is a fair part of the overhead in
- ImageCms.profileToProfile(), so if you're planning on converting multiple
- images using the same input/output settings, this can save you time.
- Once you have a transform object, it can be used with
- ImageCms.applyProfile() to convert images without the need to re-compute
- the lookup table for the transform.
-
- The reason pyCMS returns a class object rather than a handle directly
- to the transform is that it needs to keep track of the PIL input/output
- modes that the transform is meant for. These attributes are stored in
- the ``inMode`` and ``outMode`` attributes of the object (which can be
- manually overridden if you really want to, but I don't know of any
- time that would be of use, or would even work).
-
- :param inputProfile: String, as a valid filename path to the ICC input
- profile you wish to use for this transform, or a profile object
- :param outputProfile: String, as a valid filename path to the ICC output
- profile you wish to use for this transform, or a profile object
- :param inMode: String, as a valid PIL mode that the appropriate profile
- also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
- :param outMode: String, as a valid PIL mode that the appropriate profile
- also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
- :param renderingIntent: Integer (0-3) specifying the rendering intent you
- wish to use for the transform
-
- ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
- ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
- ImageCms.Intent.SATURATION = 2
- ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
-
- see the pyCMS documentation for details on rendering intents and what
- they do.
- :param flags: Integer (0-...) specifying additional flags
- :returns: A CmsTransform class object.
- :exception PyCMSError:
- """
-
- if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
- msg = "renderingIntent must be an integer between 0 and 3"
- raise PyCMSError(msg)
-
- if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
- msg = "flags must be an integer between 0 and %s" + _MAX_FLAG
- raise PyCMSError(msg)
-
- try:
- if not isinstance(inputProfile, ImageCmsProfile):
- inputProfile = ImageCmsProfile(inputProfile)
- if not isinstance(outputProfile, ImageCmsProfile):
- outputProfile = ImageCmsProfile(outputProfile)
- return ImageCmsTransform(
- inputProfile, outputProfile, inMode, outMode, renderingIntent, flags=flags
- )
- except (OSError, TypeError, ValueError) as v:
- raise PyCMSError(v) from v
-
-
-def buildProofTransform(
- inputProfile,
- outputProfile,
- proofProfile,
- inMode,
- outMode,
- renderingIntent=Intent.PERCEPTUAL,
- proofRenderingIntent=Intent.ABSOLUTE_COLORIMETRIC,
- flags=FLAGS["SOFTPROOFING"],
-):
- """
- (pyCMS) Builds an ICC transform mapping from the ``inputProfile`` to the
- ``outputProfile``, but tries to simulate the result that would be
- obtained on the ``proofProfile`` device.
-
- If the input, output, or proof profiles specified are not valid
- filenames, a :exc:`PyCMSError` will be raised.
-
- If an error occurs during creation of the transform,
- a :exc:`PyCMSError` will be raised.
-
- If ``inMode`` or ``outMode`` are not a mode supported by the ``outputProfile``
- (or by pyCMS), a :exc:`PyCMSError` will be raised.
-
- This function builds and returns an ICC transform from the ``inputProfile``
- to the ``outputProfile``, but tries to simulate the result that would be
- obtained on the ``proofProfile`` device using ``renderingIntent`` and
- ``proofRenderingIntent`` to determine what to do with out-of-gamut
- colors. This is known as "soft-proofing". It will ONLY work for
- converting images that are in ``inMode`` to images that are in outMode
- color format (PIL mode, i.e. "RGB", "RGBA", "CMYK", etc.).
-
- Usage of the resulting transform object is exactly the same as with
- ImageCms.buildTransform().
-
- Proof profiling is generally used when using an output device to get a
- good idea of what the final printed/displayed image would look like on
- the ``proofProfile`` device when it's quicker and easier to use the
- output device for judging color. Generally, this means that the
- output device is a monitor, or a dye-sub printer (etc.), and the simulated
- device is something more expensive, complicated, or time consuming
- (making it difficult to make a real print for color judgement purposes).
-
- Soft-proofing basically functions by adjusting the colors on the
- output device to match the colors of the device being simulated. However,
- when the simulated device has a much wider gamut than the output
- device, you may obtain marginal results.
-
- :param inputProfile: String, as a valid filename path to the ICC input
- profile you wish to use for this transform, or a profile object
- :param outputProfile: String, as a valid filename path to the ICC output
- (monitor, usually) profile you wish to use for this transform, or a
- profile object
- :param proofProfile: String, as a valid filename path to the ICC proof
- profile you wish to use for this transform, or a profile object
- :param inMode: String, as a valid PIL mode that the appropriate profile
- also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
- :param outMode: String, as a valid PIL mode that the appropriate profile
- also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
- :param renderingIntent: Integer (0-3) specifying the rendering intent you
- wish to use for the input->proof (simulated) transform
-
- ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
- ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
- ImageCms.Intent.SATURATION = 2
- ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
-
- see the pyCMS documentation for details on rendering intents and what
- they do.
- :param proofRenderingIntent: Integer (0-3) specifying the rendering intent
- you wish to use for proof->output transform
-
- ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
- ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
- ImageCms.Intent.SATURATION = 2
- ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
-
- see the pyCMS documentation for details on rendering intents and what
- they do.
- :param flags: Integer (0-...) specifying additional flags
- :returns: A CmsTransform class object.
- :exception PyCMSError:
- """
-
- if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
- msg = "renderingIntent must be an integer between 0 and 3"
- raise PyCMSError(msg)
-
- if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
- msg = "flags must be an integer between 0 and %s" + _MAX_FLAG
- raise PyCMSError(msg)
-
- try:
- if not isinstance(inputProfile, ImageCmsProfile):
- inputProfile = ImageCmsProfile(inputProfile)
- if not isinstance(outputProfile, ImageCmsProfile):
- outputProfile = ImageCmsProfile(outputProfile)
- if not isinstance(proofProfile, ImageCmsProfile):
- proofProfile = ImageCmsProfile(proofProfile)
- return ImageCmsTransform(
- inputProfile,
- outputProfile,
- inMode,
- outMode,
- renderingIntent,
- proofProfile,
- proofRenderingIntent,
- flags,
- )
- except (OSError, TypeError, ValueError) as v:
- raise PyCMSError(v) from v
-
-
-buildTransformFromOpenProfiles = buildTransform
-buildProofTransformFromOpenProfiles = buildProofTransform
-
-
-def applyTransform(im, transform, inPlace=False):
- """
- (pyCMS) Applies a transform to a given image.
-
- If ``im.mode != transform.inMode``, a :exc:`PyCMSError` is raised.
-
- If ``inPlace`` is ``True`` and ``transform.inMode != transform.outMode``, a
- :exc:`PyCMSError` is raised.
-
- If ``im.mode``, ``transform.inMode`` or ``transform.outMode`` is not
- supported by pyCMSdll or the profiles you used for the transform, a
- :exc:`PyCMSError` is raised.
-
- If an error occurs while the transform is being applied,
- a :exc:`PyCMSError` is raised.
-
- This function applies a pre-calculated transform (from
- ImageCms.buildTransform() or ImageCms.buildTransformFromOpenProfiles())
- to an image. The transform can be used for multiple images, saving
- considerable calculation time if doing the same conversion multiple times.
-
- If you want to modify im in-place instead of receiving a new image as
- the return value, set ``inPlace`` to ``True``. This can only be done if
- ``transform.inMode`` and ``transform.outMode`` are the same, because we can't
- change the mode in-place (the buffer sizes for some modes are
- different). The default behavior is to return a new :py:class:`~PIL.Image.Image`
- object of the same dimensions in mode ``transform.outMode``.
-
- :param im: An :py:class:`~PIL.Image.Image` object, and im.mode must be the same
- as the ``inMode`` supported by the transform.
- :param transform: A valid CmsTransform class object
- :param inPlace: Bool. If ``True``, ``im`` is modified in place and ``None`` is
- returned, if ``False``, a new :py:class:`~PIL.Image.Image` object with the
- transform applied is returned (and ``im`` is not changed). The default is
- ``False``.
- :returns: Either ``None``, or a new :py:class:`~PIL.Image.Image` object,
- depending on the value of ``inPlace``. The profile will be returned in
- the image's ``info['icc_profile']``.
- :exception PyCMSError:
- """
-
- try:
- if inPlace:
- transform.apply_in_place(im)
- imOut = None
- else:
- imOut = transform.apply(im)
- except (TypeError, ValueError) as v:
- raise PyCMSError(v) from v
-
- return imOut
-
-
-def createProfile(colorSpace, colorTemp=-1):
- """
- (pyCMS) Creates a profile.
-
- If colorSpace not in ``["LAB", "XYZ", "sRGB"]``,
- a :exc:`PyCMSError` is raised.
-
- If using LAB and ``colorTemp`` is not a positive integer,
- a :exc:`PyCMSError` is raised.
-
- If an error occurs while creating the profile,
- a :exc:`PyCMSError` is raised.
-
- Use this function to create common profiles on-the-fly instead of
- having to supply a profile on disk and knowing the path to it. It
- returns a normal CmsProfile object that can be passed to
- ImageCms.buildTransformFromOpenProfiles() to create a transform to apply
- to images.
-
- :param colorSpace: String, the color space of the profile you wish to
- create.
- Currently only "LAB", "XYZ", and "sRGB" are supported.
- :param colorTemp: Positive integer for the white point for the profile, in
- degrees Kelvin (i.e. 5000, 6500, 9600, etc.). The default is for D50
- illuminant if omitted (5000k). colorTemp is ONLY applied to LAB
- profiles, and is ignored for XYZ and sRGB.
- :returns: A CmsProfile class object
- :exception PyCMSError:
- """
-
- if colorSpace not in ["LAB", "XYZ", "sRGB"]:
- msg = (
- f"Color space not supported for on-the-fly profile creation ({colorSpace})"
- )
- raise PyCMSError(msg)
-
- if colorSpace == "LAB":
- try:
- colorTemp = float(colorTemp)
- except (TypeError, ValueError) as e:
- msg = f'Color temperature must be numeric, "{colorTemp}" not valid'
- raise PyCMSError(msg) from e
-
- try:
- return core.createProfile(colorSpace, colorTemp)
- except (TypeError, ValueError) as v:
- raise PyCMSError(v) from v
-
-
-def getProfileName(profile):
- """
-
- (pyCMS) Gets the internal product name for the given profile.
-
- If ``profile`` isn't a valid CmsProfile object or filename to a profile,
- a :exc:`PyCMSError` is raised If an error occurs while trying
- to obtain the name tag, a :exc:`PyCMSError` is raised.
-
- Use this function to obtain the INTERNAL name of the profile (stored
- in an ICC tag in the profile itself), usually the one used when the
- profile was originally created. Sometimes this tag also contains
- additional information supplied by the creator.
-
- :param profile: EITHER a valid CmsProfile object, OR a string of the
- filename of an ICC profile.
- :returns: A string containing the internal name of the profile as stored
- in an ICC tag.
- :exception PyCMSError:
- """
-
- try:
- # add an extra newline to preserve pyCMS compatibility
- if not isinstance(profile, ImageCmsProfile):
- profile = ImageCmsProfile(profile)
- # do it in python, not c.
- # // name was "%s - %s" (model, manufacturer) || Description ,
- # // but if the Model and Manufacturer were the same or the model
- # // was long, Just the model, in 1.x
- model = profile.profile.model
- manufacturer = profile.profile.manufacturer
-
- if not (model or manufacturer):
- return (profile.profile.profile_description or "") + "\n"
- if not manufacturer or len(model) > 30:
- return model + "\n"
- return f"{model} - {manufacturer}\n"
-
- except (AttributeError, OSError, TypeError, ValueError) as v:
- raise PyCMSError(v) from v
-
-
-def getProfileInfo(profile):
- """
- (pyCMS) Gets the internal product information for the given profile.
-
- If ``profile`` isn't a valid CmsProfile object or filename to a profile,
- a :exc:`PyCMSError` is raised.
-
- If an error occurs while trying to obtain the info tag,
- a :exc:`PyCMSError` is raised.
-
- Use this function to obtain the information stored in the profile's
- info tag. This often contains details about the profile, and how it
- was created, as supplied by the creator.
-
- :param profile: EITHER a valid CmsProfile object, OR a string of the
- filename of an ICC profile.
- :returns: A string containing the internal profile information stored in
- an ICC tag.
- :exception PyCMSError:
- """
-
- try:
- if not isinstance(profile, ImageCmsProfile):
- profile = ImageCmsProfile(profile)
- # add an extra newline to preserve pyCMS compatibility
- # Python, not C. the white point bits weren't working well,
- # so skipping.
- # info was description \r\n\r\n copyright \r\n\r\n K007 tag \r\n\r\n whitepoint
- description = profile.profile.profile_description
- cpright = profile.profile.copyright
- arr = []
- for elt in (description, cpright):
- if elt:
- arr.append(elt)
- return "\r\n\r\n".join(arr) + "\r\n\r\n"
-
- except (AttributeError, OSError, TypeError, ValueError) as v:
- raise PyCMSError(v) from v
-
-
-def getProfileCopyright(profile):
- """
- (pyCMS) Gets the copyright for the given profile.
-
- If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
- :exc:`PyCMSError` is raised.
-
- If an error occurs while trying to obtain the copyright tag,
- a :exc:`PyCMSError` is raised.
-
- Use this function to obtain the information stored in the profile's
- copyright tag.
-
- :param profile: EITHER a valid CmsProfile object, OR a string of the
- filename of an ICC profile.
- :returns: A string containing the internal profile information stored in
- an ICC tag.
- :exception PyCMSError:
- """
- try:
- # add an extra newline to preserve pyCMS compatibility
- if not isinstance(profile, ImageCmsProfile):
- profile = ImageCmsProfile(profile)
- return (profile.profile.copyright or "") + "\n"
- except (AttributeError, OSError, TypeError, ValueError) as v:
- raise PyCMSError(v) from v
-
-
-def getProfileManufacturer(profile):
- """
- (pyCMS) Gets the manufacturer for the given profile.
-
- If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
- :exc:`PyCMSError` is raised.
-
- If an error occurs while trying to obtain the manufacturer tag, a
- :exc:`PyCMSError` is raised.
-
- Use this function to obtain the information stored in the profile's
- manufacturer tag.
-
- :param profile: EITHER a valid CmsProfile object, OR a string of the
- filename of an ICC profile.
- :returns: A string containing the internal profile information stored in
- an ICC tag.
- :exception PyCMSError:
- """
- try:
- # add an extra newline to preserve pyCMS compatibility
- if not isinstance(profile, ImageCmsProfile):
- profile = ImageCmsProfile(profile)
- return (profile.profile.manufacturer or "") + "\n"
- except (AttributeError, OSError, TypeError, ValueError) as v:
- raise PyCMSError(v) from v
-
-
-def getProfileModel(profile):
- """
- (pyCMS) Gets the model for the given profile.
-
- If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
- :exc:`PyCMSError` is raised.
-
- If an error occurs while trying to obtain the model tag,
- a :exc:`PyCMSError` is raised.
-
- Use this function to obtain the information stored in the profile's
- model tag.
-
- :param profile: EITHER a valid CmsProfile object, OR a string of the
- filename of an ICC profile.
- :returns: A string containing the internal profile information stored in
- an ICC tag.
- :exception PyCMSError:
- """
-
- try:
- # add an extra newline to preserve pyCMS compatibility
- if not isinstance(profile, ImageCmsProfile):
- profile = ImageCmsProfile(profile)
- return (profile.profile.model or "") + "\n"
- except (AttributeError, OSError, TypeError, ValueError) as v:
- raise PyCMSError(v) from v
-
-
-def getProfileDescription(profile):
- """
- (pyCMS) Gets the description for the given profile.
-
- If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
- :exc:`PyCMSError` is raised.
-
- If an error occurs while trying to obtain the description tag,
- a :exc:`PyCMSError` is raised.
-
- Use this function to obtain the information stored in the profile's
- description tag.
-
- :param profile: EITHER a valid CmsProfile object, OR a string of the
- filename of an ICC profile.
- :returns: A string containing the internal profile information stored in an
- ICC tag.
- :exception PyCMSError:
- """
-
- try:
- # add an extra newline to preserve pyCMS compatibility
- if not isinstance(profile, ImageCmsProfile):
- profile = ImageCmsProfile(profile)
- return (profile.profile.profile_description or "") + "\n"
- except (AttributeError, OSError, TypeError, ValueError) as v:
- raise PyCMSError(v) from v
-
-
-def getDefaultIntent(profile):
- """
- (pyCMS) Gets the default intent name for the given profile.
-
- If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
- :exc:`PyCMSError` is raised.
-
- If an error occurs while trying to obtain the default intent, a
- :exc:`PyCMSError` is raised.
-
- Use this function to determine the default (and usually best optimized)
- rendering intent for this profile. Most profiles support multiple
- rendering intents, but are intended mostly for one type of conversion.
- If you wish to use a different intent than returned, use
- ImageCms.isIntentSupported() to verify it will work first.
-
- :param profile: EITHER a valid CmsProfile object, OR a string of the
- filename of an ICC profile.
- :returns: Integer 0-3 specifying the default rendering intent for this
- profile.
-
- ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
- ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
- ImageCms.Intent.SATURATION = 2
- ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
-
- see the pyCMS documentation for details on rendering intents and what
- they do.
- :exception PyCMSError:
- """
-
- try:
- if not isinstance(profile, ImageCmsProfile):
- profile = ImageCmsProfile(profile)
- return profile.profile.rendering_intent
- except (AttributeError, OSError, TypeError, ValueError) as v:
- raise PyCMSError(v) from v
-
-
-def isIntentSupported(profile, intent, direction):
- """
- (pyCMS) Checks if a given intent is supported.
-
- Use this function to verify that you can use your desired
- ``intent`` with ``profile``, and that ``profile`` can be used for the
- input/output/proof profile as you desire.
-
- Some profiles are created specifically for one "direction", can cannot
- be used for others. Some profiles can only be used for certain
- rendering intents, so it's best to either verify this before trying
- to create a transform with them (using this function), or catch the
- potential :exc:`PyCMSError` that will occur if they don't
- support the modes you select.
-
- :param profile: EITHER a valid CmsProfile object, OR a string of the
- filename of an ICC profile.
- :param intent: Integer (0-3) specifying the rendering intent you wish to
- use with this profile
-
- ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
- ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
- ImageCms.Intent.SATURATION = 2
- ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
-
- see the pyCMS documentation for details on rendering intents and what
- they do.
- :param direction: Integer specifying if the profile is to be used for
- input, output, or proof
-
- INPUT = 0 (or use ImageCms.Direction.INPUT)
- OUTPUT = 1 (or use ImageCms.Direction.OUTPUT)
- PROOF = 2 (or use ImageCms.Direction.PROOF)
-
- :returns: 1 if the intent/direction are supported, -1 if they are not.
- :exception PyCMSError:
- """
-
- try:
- if not isinstance(profile, ImageCmsProfile):
- profile = ImageCmsProfile(profile)
- # FIXME: I get different results for the same data w. different
- # compilers. Bug in LittleCMS or in the binding?
- if profile.profile.is_intent_supported(intent, direction):
- return 1
- else:
- return -1
- except (AttributeError, OSError, TypeError, ValueError) as v:
- raise PyCMSError(v) from v
-
-
-def versions():
- """
- (pyCMS) Fetches versions.
- """
-
- return VERSION, core.littlecms_version, sys.version.split()[0], Image.__version__
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/utils/_importers.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/utils/_importers.py
deleted file mode 100644
index ed83b1a72dfb172841df85f2bccff5ce88a744d3..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/utils/_importers.py
+++ /dev/null
@@ -1,85 +0,0 @@
-from types import ModuleType
-from packaging.version import Version
-from importlib.metadata import version as importlib_version
-
-
-def import_vegafusion() -> ModuleType:
- min_version = "1.4.0"
- try:
- version = importlib_version("vegafusion")
- if Version(version) < Version(min_version):
- raise ImportError(
- f"The vegafusion package must be version {min_version} or greater. "
- f"Found version {version}"
- )
- import vegafusion as vf # type: ignore
-
- return vf
- except ImportError as err:
- raise ImportError(
- 'The "vegafusion" data transformer and chart.transformed_data feature requires\n'
- f"version {min_version} or greater of the 'vegafusion-python-embed' and 'vegafusion' packages.\n"
- "These can be installed with pip using:\n"
- f' pip install "vegafusion[embed]>={min_version}"\n'
- "Or with conda using:\n"
- f' conda install -c conda-forge "vegafusion-python-embed>={min_version}" '
- f'"vegafusion>={min_version}"\n\n'
- f"ImportError: {err.args[0]}"
- ) from err
-
-
-def import_vl_convert() -> ModuleType:
- min_version = "0.13.0"
- try:
- version = importlib_version("vl-convert-python")
- if Version(version) < Version(min_version):
- raise ImportError(
- f"The vl-convert-python package must be version {min_version} or greater. "
- f"Found version {version}"
- )
- import vl_convert as vlc
-
- return vlc
- except ImportError as err:
- raise ImportError(
- f"The vl-convert Vega-Lite compiler and image export feature requires\n"
- f"version {min_version} or greater of the 'vl-convert-python' package. \n"
- f"This can be installed with pip using:\n"
- f' pip install "vl-convert-python>={min_version}"\n'
- "or conda:\n"
- f' conda install -c conda-forge "vl-convert-python>={min_version}"\n\n'
- f"ImportError: {err.args[0]}"
- ) from err
-
-
-def import_pyarrow_interchange() -> ModuleType:
- min_version = "11.0.0"
- try:
- version = importlib_version("pyarrow")
-
- if Version(version) < Version(min_version):
- raise ImportError(
- f"The pyarrow package must be version {min_version} or greater. "
- f"Found version {version}"
- )
- import pyarrow.interchange as pi
-
- return pi
- except ImportError as err:
- raise ImportError(
- f"Usage of the DataFrame Interchange Protocol requires\n"
- f"version {min_version} or greater of the pyarrow package. \n"
- f"This can be installed with pip using:\n"
- f' pip install "pyarrow>={min_version}"\n'
- "or conda:\n"
- f' conda install -c conda-forge "pyarrow>={min_version}"\n\n'
- f"ImportError: {err.args[0]}"
- ) from err
-
-
-def pyarrow_available() -> bool:
- try:
- import_pyarrow_interchange()
- return True
- except ImportError:
- return False
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/utils/core.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/utils/core.py
deleted file mode 100644
index f2ce1d4bc0741e67d967879427aad7b42baed44e..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/utils/core.py
+++ /dev/null
@@ -1,841 +0,0 @@
-"""
-Utility routines
-"""
-from collections.abc import Mapping, MutableMapping
-from copy import deepcopy
-import json
-import itertools
-import re
-import sys
-import traceback
-import warnings
-from typing import (
- Callable,
- TypeVar,
- Any,
- Union,
- Dict,
- Optional,
- Tuple,
- Sequence,
- Type,
- cast,
-)
-from types import ModuleType
-
-import jsonschema
-import pandas as pd
-import numpy as np
-from pandas.api.types import infer_dtype
-
-from altair.utils.schemapi import SchemaBase
-from altair.utils._dfi_types import Column, DtypeKind, DataFrame as DfiDataFrame
-
-if sys.version_info >= (3, 10):
- from typing import ParamSpec
-else:
- from typing_extensions import ParamSpec
-
-from typing import Literal, Protocol, TYPE_CHECKING
-
-if TYPE_CHECKING:
- from pandas.core.interchange.dataframe_protocol import Column as PandasColumn
-
-_V = TypeVar("_V")
-_P = ParamSpec("_P")
-
-
-class _DataFrameLike(Protocol):
- def __dataframe__(self, *args, **kwargs) -> DfiDataFrame:
- ...
-
-
-TYPECODE_MAP = {
- "ordinal": "O",
- "nominal": "N",
- "quantitative": "Q",
- "temporal": "T",
- "geojson": "G",
-}
-
-INV_TYPECODE_MAP = {v: k for k, v in TYPECODE_MAP.items()}
-
-
-# aggregates from vega-lite version 4.6.0
-AGGREGATES = [
- "argmax",
- "argmin",
- "average",
- "count",
- "distinct",
- "max",
- "mean",
- "median",
- "min",
- "missing",
- "product",
- "q1",
- "q3",
- "ci0",
- "ci1",
- "stderr",
- "stdev",
- "stdevp",
- "sum",
- "valid",
- "values",
- "variance",
- "variancep",
-]
-
-# window aggregates from vega-lite version 4.6.0
-WINDOW_AGGREGATES = [
- "row_number",
- "rank",
- "dense_rank",
- "percent_rank",
- "cume_dist",
- "ntile",
- "lag",
- "lead",
- "first_value",
- "last_value",
- "nth_value",
-]
-
-# timeUnits from vega-lite version 4.17.0
-TIMEUNITS = [
- "year",
- "quarter",
- "month",
- "week",
- "day",
- "dayofyear",
- "date",
- "hours",
- "minutes",
- "seconds",
- "milliseconds",
- "yearquarter",
- "yearquartermonth",
- "yearmonth",
- "yearmonthdate",
- "yearmonthdatehours",
- "yearmonthdatehoursminutes",
- "yearmonthdatehoursminutesseconds",
- "yearweek",
- "yearweekday",
- "yearweekdayhours",
- "yearweekdayhoursminutes",
- "yearweekdayhoursminutesseconds",
- "yeardayofyear",
- "quartermonth",
- "monthdate",
- "monthdatehours",
- "monthdatehoursminutes",
- "monthdatehoursminutesseconds",
- "weekday",
- "weeksdayhours",
- "weekdayhoursminutes",
- "weekdayhoursminutesseconds",
- "dayhours",
- "dayhoursminutes",
- "dayhoursminutesseconds",
- "hoursminutes",
- "hoursminutesseconds",
- "minutesseconds",
- "secondsmilliseconds",
- "utcyear",
- "utcquarter",
- "utcmonth",
- "utcweek",
- "utcday",
- "utcdayofyear",
- "utcdate",
- "utchours",
- "utcminutes",
- "utcseconds",
- "utcmilliseconds",
- "utcyearquarter",
- "utcyearquartermonth",
- "utcyearmonth",
- "utcyearmonthdate",
- "utcyearmonthdatehours",
- "utcyearmonthdatehoursminutes",
- "utcyearmonthdatehoursminutesseconds",
- "utcyearweek",
- "utcyearweekday",
- "utcyearweekdayhours",
- "utcyearweekdayhoursminutes",
- "utcyearweekdayhoursminutesseconds",
- "utcyeardayofyear",
- "utcquartermonth",
- "utcmonthdate",
- "utcmonthdatehours",
- "utcmonthdatehoursminutes",
- "utcmonthdatehoursminutesseconds",
- "utcweekday",
- "utcweeksdayhours",
- "utcweekdayhoursminutes",
- "utcweekdayhoursminutesseconds",
- "utcdayhours",
- "utcdayhoursminutes",
- "utcdayhoursminutesseconds",
- "utchoursminutes",
- "utchoursminutesseconds",
- "utcminutesseconds",
- "utcsecondsmilliseconds",
-]
-
-
-_InferredVegaLiteType = Literal["ordinal", "nominal", "quantitative", "temporal"]
-
-
-def infer_vegalite_type(
- data: object,
-) -> Union[_InferredVegaLiteType, Tuple[_InferredVegaLiteType, list]]:
- """
- From an array-like input, infer the correct vega typecode
- ('ordinal', 'nominal', 'quantitative', or 'temporal')
-
- Parameters
- ----------
- data: object
- """
- typ = infer_dtype(data, skipna=False)
-
- if typ in [
- "floating",
- "mixed-integer-float",
- "integer",
- "mixed-integer",
- "complex",
- ]:
- return "quantitative"
- elif typ == "categorical" and hasattr(data, "cat") and data.cat.ordered:
- return ("ordinal", data.cat.categories.tolist())
- elif typ in ["string", "bytes", "categorical", "boolean", "mixed", "unicode"]:
- return "nominal"
- elif typ in [
- "datetime",
- "datetime64",
- "timedelta",
- "timedelta64",
- "date",
- "time",
- "period",
- ]:
- return "temporal"
- else:
- warnings.warn(
- "I don't know how to infer vegalite type from '{}'. "
- "Defaulting to nominal.".format(typ),
- stacklevel=1,
- )
- return "nominal"
-
-
-def merge_props_geom(feat: dict) -> dict:
- """
- Merge properties with geometry
- * Overwrites 'type' and 'geometry' entries if existing
- """
-
- geom = {k: feat[k] for k in ("type", "geometry")}
- try:
- feat["properties"].update(geom)
- props_geom = feat["properties"]
- except (AttributeError, KeyError):
- # AttributeError when 'properties' equals None
- # KeyError when 'properties' is non-existing
- props_geom = geom
-
- return props_geom
-
-
-def sanitize_geo_interface(geo: MutableMapping) -> dict:
- """Santize a geo_interface to prepare it for serialization.
-
- * Make a copy
- * Convert type array or _Array to list
- * Convert tuples to lists (using json.loads/dumps)
- * Merge properties with geometry
- """
-
- geo = deepcopy(geo)
-
- # convert type _Array or array to list
- for key in geo.keys():
- if str(type(geo[key]).__name__).startswith(("_Array", "array")):
- geo[key] = geo[key].tolist()
-
- # convert (nested) tuples to lists
- geo_dct: dict = json.loads(json.dumps(geo))
-
- # sanitize features
- if geo_dct["type"] == "FeatureCollection":
- geo_dct = geo_dct["features"]
- if len(geo_dct) > 0:
- for idx, feat in enumerate(geo_dct):
- geo_dct[idx] = merge_props_geom(feat)
- elif geo_dct["type"] == "Feature":
- geo_dct = merge_props_geom(geo_dct)
- else:
- geo_dct = {"type": "Feature", "geometry": geo_dct}
-
- return geo_dct
-
-
-def numpy_is_subtype(dtype: Any, subtype: Any) -> bool:
- try:
- return np.issubdtype(dtype, subtype)
- except (NotImplementedError, TypeError):
- return False
-
-
-def sanitize_dataframe(df: pd.DataFrame) -> pd.DataFrame: # noqa: C901
- """Sanitize a DataFrame to prepare it for serialization.
-
- * Make a copy
- * Convert RangeIndex columns to strings
- * Raise ValueError if column names are not strings
- * Raise ValueError if it has a hierarchical index.
- * Convert categoricals to strings.
- * Convert np.bool_ dtypes to Python bool objects
- * Convert np.int dtypes to Python int objects
- * Convert floats to objects and replace NaNs/infs with None.
- * Convert DateTime dtypes into appropriate string representations
- * Convert Nullable integers to objects and replace NaN with None
- * Convert Nullable boolean to objects and replace NaN with None
- * convert dedicated string column to objects and replace NaN with None
- * Raise a ValueError for TimeDelta dtypes
- """
- df = df.copy()
-
- if isinstance(df.columns, pd.RangeIndex):
- df.columns = df.columns.astype(str)
-
- for col_name in df.columns:
- if not isinstance(col_name, str):
- raise ValueError(
- "Dataframe contains invalid column name: {0!r}. "
- "Column names must be strings".format(col_name)
- )
-
- if isinstance(df.index, pd.MultiIndex):
- raise ValueError("Hierarchical indices not supported")
- if isinstance(df.columns, pd.MultiIndex):
- raise ValueError("Hierarchical indices not supported")
-
- def to_list_if_array(val):
- if isinstance(val, np.ndarray):
- return val.tolist()
- else:
- return val
-
- for dtype_item in df.dtypes.items():
- # We know that the column names are strings from the isinstance check
- # further above but mypy thinks it is of type Hashable and therefore does not
- # let us assign it to the col_name variable which is already of type str.
- col_name = cast(str, dtype_item[0])
- dtype = dtype_item[1]
- dtype_name = str(dtype)
- if dtype_name == "category":
- # Work around bug in to_json for categorical types in older versions
- # of pandas as they do not properly convert NaN values to null in to_json.
- # We can probably remove this part once we require Pandas >= 1.0
- col = df[col_name].astype(object)
- df[col_name] = col.where(col.notnull(), None)
- elif dtype_name == "string":
- # dedicated string datatype (since 1.0)
- # https://pandas.pydata.org/pandas-docs/version/1.0.0/whatsnew/v1.0.0.html#dedicated-string-data-type
- col = df[col_name].astype(object)
- df[col_name] = col.where(col.notnull(), None)
- elif dtype_name == "bool":
- # convert numpy bools to objects; np.bool is not JSON serializable
- df[col_name] = df[col_name].astype(object)
- elif dtype_name == "boolean":
- # dedicated boolean datatype (since 1.0)
- # https://pandas.io/docs/user_guide/boolean.html
- col = df[col_name].astype(object)
- df[col_name] = col.where(col.notnull(), None)
- elif dtype_name.startswith("datetime") or dtype_name.startswith("timestamp"):
- # Convert datetimes to strings. This needs to be a full ISO string
- # with time, which is why we cannot use ``col.astype(str)``.
- # This is because Javascript parses date-only times in UTC, but
- # parses full ISO-8601 dates as local time, and dates in Vega and
- # Vega-Lite are displayed in local time by default.
- # (see https://github.com/altair-viz/altair/issues/1027)
- df[col_name] = (
- df[col_name].apply(lambda x: x.isoformat()).replace("NaT", "")
- )
- elif dtype_name.startswith("timedelta"):
- raise ValueError(
- 'Field "{col_name}" has type "{dtype}" which is '
- "not supported by Altair. Please convert to "
- "either a timestamp or a numerical value."
- "".format(col_name=col_name, dtype=dtype)
- )
- elif dtype_name.startswith("geometry"):
- # geopandas >=0.6.1 uses the dtype geometry. Continue here
- # otherwise it will give an error on np.issubdtype(dtype, np.integer)
- continue
- elif dtype_name in {
- "Int8",
- "Int16",
- "Int32",
- "Int64",
- "UInt8",
- "UInt16",
- "UInt32",
- "UInt64",
- "Float32",
- "Float64",
- }: # nullable integer datatypes (since 24.0) and nullable float datatypes (since 1.2.0)
- # https://pandas.pydata.org/pandas-docs/version/0.25/whatsnew/v0.24.0.html#optional-integer-na-support
- col = df[col_name].astype(object)
- df[col_name] = col.where(col.notnull(), None)
- elif numpy_is_subtype(dtype, np.integer):
- # convert integers to objects; np.int is not JSON serializable
- df[col_name] = df[col_name].astype(object)
- elif numpy_is_subtype(dtype, np.floating):
- # For floats, convert to Python float: np.float is not JSON serializable
- # Also convert NaN/inf values to null, as they are not JSON serializable
- col = df[col_name]
- bad_values = col.isnull() | np.isinf(col)
- df[col_name] = col.astype(object).where(~bad_values, None)
- elif dtype == object:
- # Convert numpy arrays saved as objects to lists
- # Arrays are not JSON serializable
- col = df[col_name].apply(to_list_if_array, convert_dtype=False)
- df[col_name] = col.where(col.notnull(), None)
- return df
-
-
-def sanitize_arrow_table(pa_table):
- """Sanitize arrow table for JSON serialization"""
- import pyarrow as pa
- import pyarrow.compute as pc
-
- arrays = []
- schema = pa_table.schema
- for name in schema.names:
- array = pa_table[name]
- dtype = schema.field(name).type
- if str(dtype).startswith("timestamp"):
- arrays.append(pc.strftime(array))
- elif str(dtype).startswith("duration"):
- raise ValueError(
- 'Field "{col_name}" has type "{dtype}" which is '
- "not supported by Altair. Please convert to "
- "either a timestamp or a numerical value."
- "".format(col_name=name, dtype=dtype)
- )
- else:
- arrays.append(array)
-
- return pa.Table.from_arrays(arrays, names=schema.names)
-
-
-def parse_shorthand(
- shorthand: Union[Dict[str, Any], str],
- data: Optional[Union[pd.DataFrame, _DataFrameLike]] = None,
- parse_aggregates: bool = True,
- parse_window_ops: bool = False,
- parse_timeunits: bool = True,
- parse_types: bool = True,
-) -> Dict[str, Any]:
- """General tool to parse shorthand values
-
- These are of the form:
-
- - "col_name"
- - "col_name:O"
- - "average(col_name)"
- - "average(col_name):O"
-
- Optionally, a dataframe may be supplied, from which the type
- will be inferred if not specified in the shorthand.
-
- Parameters
- ----------
- shorthand : dict or string
- The shorthand representation to be parsed
- data : DataFrame, optional
- If specified and of type DataFrame, then use these values to infer the
- column type if not provided by the shorthand.
- parse_aggregates : boolean
- If True (default), then parse aggregate functions within the shorthand.
- parse_window_ops : boolean
- If True then parse window operations within the shorthand (default:False)
- parse_timeunits : boolean
- If True (default), then parse timeUnits from within the shorthand
- parse_types : boolean
- If True (default), then parse typecodes within the shorthand
-
- Returns
- -------
- attrs : dict
- a dictionary of attributes extracted from the shorthand
-
- Examples
- --------
- >>> data = pd.DataFrame({'foo': ['A', 'B', 'A', 'B'],
- ... 'bar': [1, 2, 3, 4]})
-
- >>> parse_shorthand('name') == {'field': 'name'}
- True
-
- >>> parse_shorthand('name:Q') == {'field': 'name', 'type': 'quantitative'}
- True
-
- >>> parse_shorthand('average(col)') == {'aggregate': 'average', 'field': 'col'}
- True
-
- >>> parse_shorthand('foo:O') == {'field': 'foo', 'type': 'ordinal'}
- True
-
- >>> parse_shorthand('min(foo):Q') == {'aggregate': 'min', 'field': 'foo', 'type': 'quantitative'}
- True
-
- >>> parse_shorthand('month(col)') == {'field': 'col', 'timeUnit': 'month', 'type': 'temporal'}
- True
-
- >>> parse_shorthand('year(col):O') == {'field': 'col', 'timeUnit': 'year', 'type': 'ordinal'}
- True
-
- >>> parse_shorthand('foo', data) == {'field': 'foo', 'type': 'nominal'}
- True
-
- >>> parse_shorthand('bar', data) == {'field': 'bar', 'type': 'quantitative'}
- True
-
- >>> parse_shorthand('bar:O', data) == {'field': 'bar', 'type': 'ordinal'}
- True
-
- >>> parse_shorthand('sum(bar)', data) == {'aggregate': 'sum', 'field': 'bar', 'type': 'quantitative'}
- True
-
- >>> parse_shorthand('count()', data) == {'aggregate': 'count', 'type': 'quantitative'}
- True
- """
- from altair.utils._importers import pyarrow_available
-
- if not shorthand:
- return {}
-
- valid_typecodes = list(TYPECODE_MAP) + list(INV_TYPECODE_MAP)
-
- units = {
- "field": "(?P
".join(result)
- if json_response.get('ok', None):
- for model in json_response['ocurrences']:
- if cont < 20:
- model_name = str(model.get('name', 'N/A')).strip()
- model_url = model.get('url', 'N/A')
- epoch = model.get('epoch', 'N/A')
- sr = model.get('sr', 'N/A')
- line = f"""|{model_name}|{model_url}|{epoch}|{sr}|
- """
- result.append(line)
- yield "".join(result)
- cont += 1
-
-def update_tts_methods_voice(select_value):
- if select_value == "Edge-tts":
- return gr.Dropdown.update(choices=EDGE_VOICES, visible=True, value="es-CO-GonzaloNeural-Male"), gr.Markdown.update(visible=False), gr.Textbox.update(visible=False),gr.Radio.update(visible=False)
- elif select_value == "Bark-tts":
- return gr.Dropdown.update(choices=BARK_VOICES, visible=True), gr.Markdown.update(visible=False), gr.Textbox.update(visible=False),gr.Radio.update(visible=False)
- elif select_value == 'ElevenLabs':
- return gr.Dropdown.update(choices=ELEVENLABS_VOICES_NAMES, visible=True, value="Bella"), gr.Markdown.update(visible=True), gr.Textbox.update(visible=True), gr.Radio.update(visible=False)
- elif select_value == 'CoquiTTS':
- return gr.Dropdown.update(visible=False), gr.Markdown.update(visible=False), gr.Textbox.update(visible=False), gr.Radio.update(visible=True)
diff --git a/spaces/jvcanavarro/emotion-recognition/app.py b/spaces/jvcanavarro/emotion-recognition/app.py
deleted file mode 100644
index 87f7a3af6c7e31aff218671a7ef7c4b0f2364eab..0000000000000000000000000000000000000000
--- a/spaces/jvcanavarro/emotion-recognition/app.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import gradio as gr
-import glob
-
-from src.dnn import LSTM, CNN
-from src.utilities import get_feature_vector_from_mfcc
-
-CLASS_LABELS = ("Neutral", "Angry", "Happy", "Sad")
-INPUT_SHAPE = (198, 39)
-FILES = glob.glob("egs/*")
-EXAMPLES = [[f, "LSTM"] for f in FILES] + [[f, "CNN"] for f in FILES]
-
-
-def handler(audio_path, model_type):
- params = dict(input_shape=INPUT_SHAPE, num_classes=len(CLASS_LABELS))
-
- arch, weights = (LSTM, "lstm.h5") if model_type == 0 else (CNN, "cnn.h5")
- model = arch(**params)
- model.restore_model(f"weights/{weights}")
-
- pred = model.predict_one(get_feature_vector_from_mfcc(audio_path, flatten=False))
- return f"Detected Emotion: {CLASS_LABELS[pred]}"
-
-
-params = dict(
- description="Predicts user's emotion from recorded **speech** using `Deep Neural Network` models (`LSTM` or `CNN`) trained on the [EMODB Dataset](http://www.emodb.bilderbar.info/download/).",
- article="⭐ `ARCHITECTURES` -- **LSTM**: Long Short Term Memory. **CNN**: Convolutional Neural Network.",
-)
-
-first = gr.Interface(
- fn=handler,
- inputs=[
- gr.Audio(label="Speech Audio", type="filepath"),
- gr.Dropdown(
- ["LSTM", "CNN"], value="LSTM", label="Model Architecture", type="index"
- ),
- ],
- outputs=gr.Text(label="Emotion", value="..."),
- examples=EXAMPLES,
- **params,
-)
-
-second = gr.Interface(
- fn=handler,
- inputs=[
- gr.Audio(label="Microphone Input", source="microphone", type="filepath"),
- gr.Dropdown(
- ["LSTM", "CNN"], value="LSTM", label="Model Architecture", type="index"
- ),
- ],
- outputs=gr.Text(label="Emotion", value="..."),
- **params,
-)
-
-app = gr.TabbedInterface(
- [first, second],
- title="Speech Emotion Recognition 🗣️🎤",
- tab_names=["Audio Upload", "Microphone"],
-)
-app.launch()
diff --git a/spaces/k1ngtai/MMS/vits/README.md b/spaces/k1ngtai/MMS/vits/README.md
deleted file mode 100644
index f7883f8c5badbece0887d48e41436a32e64c5935..0000000000000000000000000000000000000000
--- a/spaces/k1ngtai/MMS/vits/README.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# VITS: Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech
-
-### Jaehyeon Kim, Jungil Kong, and Juhee Son
-
-In our recent [paper](https://arxiv.org/abs/2106.06103), we propose VITS: Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech.
-
-Several recent end-to-end text-to-speech (TTS) models enabling single-stage training and parallel sampling have been proposed, but their sample quality does not match that of two-stage TTS systems. In this work, we present a parallel end-to-end TTS method that generates more natural sounding audio than current two-stage models. Our method adopts variational inference augmented with normalizing flows and an adversarial training process, which improves the expressive power of generative modeling. We also propose a stochastic duration predictor to synthesize speech with diverse rhythms from input text. With the uncertainty modeling over latent variables and the stochastic duration predictor, our method expresses the natural one-to-many relationship in which a text input can be spoken in multiple ways with different pitches and rhythms. A subjective human evaluation (mean opinion score, or MOS) on the LJ Speech, a single speaker dataset, shows that our method outperforms the best publicly available TTS systems and achieves a MOS comparable to ground truth.
-
-Visit our [demo](https://jaywalnut310.github.io/vits-demo/index.html) for audio samples.
-
-We also provide the [pretrained models](https://drive.google.com/drive/folders/1ksarh-cJf3F5eKJjLVWY0X1j1qsQqiS2?usp=sharing).
-
-** Update note: Thanks to [Rishikesh (ऋषिकेश)](https://github.com/jaywalnut310/vits/issues/1), our interactive TTS demo is now available on [Colab Notebook](https://colab.research.google.com/drive/1CO61pZizDj7en71NQG_aqqKdGaA_SaBf?usp=sharing).
-
-
-
-
-
-## Pre-requisites
-0. Python >= 3.6
-0. Clone this repository
-0. Install python requirements. Please refer [requirements.txt](requirements.txt)
- 1. You may need to install espeak first: `apt-get install espeak`
-0. Download datasets
- 1. Download and extract the LJ Speech dataset, then rename or create a link to the dataset folder: `ln -s /path/to/LJSpeech-1.1/wavs DUMMY1`
- 1. For mult-speaker setting, download and extract the VCTK dataset, and downsample wav files to 22050 Hz. Then rename or create a link to the dataset folder: `ln -s /path/to/VCTK-Corpus/downsampled_wavs DUMMY2`
-0. Build Monotonic Alignment Search and run preprocessing if you use your own datasets.
-```sh
-# Cython-version Monotonoic Alignment Search
-cd monotonic_align
-python setup.py build_ext --inplace
-
-# Preprocessing (g2p) for your own datasets. Preprocessed phonemes for LJ Speech and VCTK have been already provided.
-# python preprocess.py --text_index 1 --filelists filelists/ljs_audio_text_train_filelist.txt filelists/ljs_audio_text_val_filelist.txt filelists/ljs_audio_text_test_filelist.txt
-# python preprocess.py --text_index 2 --filelists filelists/vctk_audio_sid_text_train_filelist.txt filelists/vctk_audio_sid_text_val_filelist.txt filelists/vctk_audio_sid_text_test_filelist.txt
-```
-
-
-## Training Exmaple
-```sh
-# LJ Speech
-python train.py -c configs/ljs_base.json -m ljs_base
-
-# VCTK
-python train_ms.py -c configs/vctk_base.json -m vctk_base
-```
-
-
-## Inference Example
-See [inference.ipynb](inference.ipynb)
diff --git a/spaces/kaushikdatta/generate-webslides/README.md b/spaces/kaushikdatta/generate-webslides/README.md
deleted file mode 100644
index 7de70c6029e05a2b670b422cb3a1be440e239138..0000000000000000000000000000000000000000
--- a/spaces/kaushikdatta/generate-webslides/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Generate Webslides
-emoji: 🎙
-colorFrom: yellow
-colorTo: gray
-sdk: gradio
-app_file: app.py
-pinned: false
-duplicated_from: kaushikdatta/new-car-inventory
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/keras-io/english-speaker-accent-recognition-using-transfer-learning/app.py b/spaces/keras-io/english-speaker-accent-recognition-using-transfer-learning/app.py
deleted file mode 100644
index 8b93280bdfcddee780747b783704c9a0dba78a01..0000000000000000000000000000000000000000
--- a/spaces/keras-io/english-speaker-accent-recognition-using-transfer-learning/app.py
+++ /dev/null
@@ -1,198 +0,0 @@
-import os
-import io
-import csv
-import gradio as gr
-import numpy as np
-import tensorflow as tf
-import tensorflow_hub as hub
-import tensorflow_io as tfio
-import matplotlib.pyplot as plt
-from tensorflow import keras
-from huggingface_hub import from_pretrained_keras
-
-# Configuration
-class_names = [
- "Irish",
- "Midlands",
- "Northern",
- "Scottish",
- "Southern",
- "Welsh",
- "Not a speech",
-]
-
-# Download Yamnet model from TF Hub
-yamnet_model = hub.load("https://tfhub.dev/google/yamnet/1")
-
-# Download dense model from HF Hub
-model = from_pretrained_keras(
- pretrained_model_name_or_path="fbadine/uk_ireland_accent_classification"
-)
-
-# Function that reads a wav audio file and resamples it to 16000 Hz
-# This function is copied from the tutorial:
-# https://www.tensorflow.org/tutorials/audio/transfer_learning_audio
-def load_16k_audio_wav(filename):
- # Read file content
- file_content = tf.io.read_file(filename)
-
- # Decode audio wave
- audio_wav, sample_rate = tf.audio.decode_wav(file_content, desired_channels=1)
- audio_wav = tf.squeeze(audio_wav, axis=-1)
- sample_rate = tf.cast(sample_rate, dtype=tf.int64)
-
- # Resample to 16k
- audio_wav = tfio.audio.resample(audio_wav, rate_in=sample_rate, rate_out=16000)
-
- return audio_wav
-
-
-# Function thatt takes the audio file produced by gr.Audio(source="microphone") and
-# returns a tensor applying the following transformations:
-# - Resample to 16000 Hz
-# - Normalize
-# - Reshape to [1, -1]
-def mic_to_tensor(recorded_audio_file):
- sample_rate, audio = recorded_audio_file
-
- audio_wav = tf.constant(audio, dtype=tf.float32)
- if tf.rank(audio_wav) > 1:
- audio_wav = tf.reduce_mean(audio_wav, axis=1)
- audio_wav = tfio.audio.resample(audio_wav, rate_in=sample_rate, rate_out=16000)
-
- audio_wav = tf.divide(audio_wav, tf.reduce_max(tf.abs(audio_wav)))
-
- return audio_wav
-
-
-# Function that takes a tensor and applies the following:
-# - Pass it through Yamnet model to get the embeddings which are the input of the dense model
-# - Pass the embeddings through the dense model to get the predictions
-def tensor_to_predictions(audio_tensor):
- # Get audio embeddings & scores.
- scores, embeddings, mel_spectrogram = yamnet_model(audio_tensor)
-
- # Predict the output of the accent recognition model with embeddings as input
- predictions = model.predict(embeddings)
-
- return predictions, mel_spectrogram
-
-
-# Function tha is called when the user clicks "Predict" button. It does the following:
-# - Calls tensor_to_predictions() to get the predictions
-# - Generates the top scoring labels
-# - Generates the top scoring plot
-def predict_accent(recorded_audio_file, uploaded_audio_file):
- # Transform input to tensor
- if recorded_audio_file:
- audio_tensor = mic_to_tensor(recorded_audio_file)
- else:
- audio_tensor = load_16k_audio_wav(uploaded_audio_file)
-
- # Model Inference
- predictions, mel_spectrogram = tensor_to_predictions(audio_tensor)
-
- # Get the infered class
- infered_class = class_names[predictions.mean(axis=0).argmax()]
-
- # Generate Output 1 - Accents
- top_scoring_labels_output = {
- class_names[i]: float(predictions.mean(axis=0)[i])
- for i in range(len(class_names))
- }
-
- # Generate Output 2
- top_scoring_plot_output = generate_top_scoring_plot(predictions)
-
- return [top_scoring_labels_output, top_scoring_plot_output]
-
-
-# Clears all inputs and outputs when the user clicks "Clear" button
-def clear_inputs_and_outputs():
- return [None, None, None, None]
-
-
-# Function that generates the top scoring plot
-# This function is copied from the tutorial and adjusted to our needs
-# https://keras.io/examples/audio/uk_ireland_accent_recognition/tinyurl.com/4a8xn7at
-def generate_top_scoring_plot(predictions):
- # Plot and label the model output scores for the top-scoring classes.
- mean_predictions = np.mean(predictions, axis=0)
-
- top_class_indices = np.argsort(mean_predictions)[::-1]
- fig = plt.figure(figsize=(10, 2))
- plt.imshow(
- predictions[:, top_class_indices].T,
- aspect="auto",
- interpolation="nearest",
- cmap="gray_r",
- )
-
- # patch_padding = (PATCH_WINDOW_SECONDS / 2) / PATCH_HOP_SECONDS
- # values from the model documentation
- patch_padding = (0.025 / 2) / 0.01
- plt.xlim([-patch_padding - 0.5, predictions.shape[0] + patch_padding - 0.5])
- # Label the top_N classes.
- yticks = range(0, len(class_names), 1)
- plt.yticks(yticks, [class_names[top_class_indices[x]] for x in yticks])
- _ = plt.ylim(-0.5 + np.array([len(class_names), 0]))
-
- return fig
-
-
-# Main function
-if __name__ == "__main__":
- demo = gr.Blocks()
-
- with demo:
- gr.Markdown(
- """
-
-
- VITS at training
- VITS at inference
-
-
-
-
- English speaker accent recognition using Transfer Learning
\
- In this space, you can record your voice or upload a wav file and the model will predict the English accent spoken in the audio
- """
- )
- with gr.Row():
- ## Input
- with gr.Column():
- mic_input = gr.Audio(source="microphone", label="Record your own voice")
- upl_input = gr.Audio(
- source="upload", type="filepath", label="Upload a wav file"
- )
-
- with gr.Row():
- clr_btn = gr.Button(value="Clear", variant="secondary")
- prd_btn = gr.Button(value="Predict")
-
- # Outputs
- with gr.Column():
- lbl_output = gr.Label(label="Top Predictions")
- with gr.Group():
- gr.Markdown("Credits
- Author: Fadi Badine.
- Based on the following Keras example English speaker accent recognition using Transfer Learning by Fadi Badine
- Check out the model here
- """
- )
-
- clr_btn.click(
- fn=clear_inputs_and_outputs,
- inputs=[],
- outputs=[mic_input, upl_input, lbl_output, plt_output],
- )
- prd_btn.click(
- fn=predict_accent,
- inputs=[mic_input, upl_input],
- outputs=[lbl_output, plt_output],
- )
-
- demo.launch(debug=True, share=True)
diff --git a/spaces/keras-io/what-convnets-learn/README.md b/spaces/keras-io/what-convnets-learn/README.md
deleted file mode 100644
index b6e5e7557bde0ac7a636c44894b4f07300c7a368..0000000000000000000000000000000000000000
--- a/spaces/keras-io/what-convnets-learn/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Visualizing What ConvNets Learn
-emoji: 👀
-colorFrom: pink
-colorTo: gray
-sdk: streamlit
-sdk_version: 1.9.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/kevinwang676/FreeVC/mel_processing.py b/spaces/kevinwang676/FreeVC/mel_processing.py
deleted file mode 100644
index 99c5b35beb83f3b288af0fac5b49ebf2c69f062c..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/FreeVC/mel_processing.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import math
-import os
-import random
-import torch
-from torch import nn
-import torch.nn.functional as F
-import torch.utils.data
-import numpy as np
-import librosa
-import librosa.util as librosa_util
-from librosa.util import normalize, pad_center, tiny
-from scipy.signal import get_window
-from scipy.io.wavfile import read
-from librosa.filters import mel as librosa_mel_fn
-
-MAX_WAV_VALUE = 32768.0
-
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- """
- PARAMS
- ------
- C: compression factor
- """
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-
-def dynamic_range_decompression_torch(x, C=1):
- """
- PARAMS
- ------
- C: compression factor used to compress
- """
- return torch.exp(x) / C
-
-
-def spectral_normalize_torch(magnitudes):
- output = dynamic_range_compression_torch(magnitudes)
- return output
-
-
-def spectral_de_normalize_torch(magnitudes):
- output = dynamic_range_decompression_torch(magnitudes)
- return output
-
-
-mel_basis = {}
-hann_window = {}
-
-
-def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
- return spec
-
-
-def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
- global mel_basis
- dtype_device = str(spec.dtype) + '_' + str(spec.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
- return spec
-
-
-def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global mel_basis, hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
-
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
-
- return spec
diff --git a/spaces/kevinwang676/SadTalker/src/face3d/extract_kp_videos_safe.py b/spaces/kevinwang676/SadTalker/src/face3d/extract_kp_videos_safe.py
deleted file mode 100644
index 5141ba3adfdd62b6205909dca519d66271c425ad..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/SadTalker/src/face3d/extract_kp_videos_safe.py
+++ /dev/null
@@ -1,151 +0,0 @@
-import os
-import cv2
-import time
-import glob
-import argparse
-import numpy as np
-from PIL import Image
-import torch
-from tqdm import tqdm
-from itertools import cycle
-from torch.multiprocessing import Pool, Process, set_start_method
-
-from facexlib.alignment import landmark_98_to_68
-from facexlib.detection import init_detection_model
-
-from facexlib.utils import load_file_from_url
-from src.face3d.util.my_awing_arch import FAN
-
-def init_alignment_model(model_name, half=False, device='cuda', model_rootpath=None):
- if model_name == 'awing_fan':
- model = FAN(num_modules=4, num_landmarks=98, device=device)
- model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/alignment_WFLW_4HG.pth'
- else:
- raise NotImplementedError(f'{model_name} is not implemented.')
-
- model_path = load_file_from_url(
- url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath)
- model.load_state_dict(torch.load(model_path, map_location=device)['state_dict'], strict=True)
- model.eval()
- model = model.to(device)
- return model
-
-
-class KeypointExtractor():
- def __init__(self, device='cuda'):
-
- ### gfpgan/weights
- try:
- import webui # in webui
- root_path = 'extensions/SadTalker/gfpgan/weights'
-
- except:
- root_path = 'gfpgan/weights'
-
- self.detector = init_alignment_model('awing_fan',device=device, model_rootpath=root_path)
- self.det_net = init_detection_model('retinaface_resnet50', half=False,device=device, model_rootpath=root_path)
-
- def extract_keypoint(self, images, name=None, info=True):
- if isinstance(images, list):
- keypoints = []
- if info:
- i_range = tqdm(images,desc='landmark Det:')
- else:
- i_range = images
-
- for image in i_range:
- current_kp = self.extract_keypoint(image)
- # current_kp = self.detector.get_landmarks(np.array(image))
- if np.mean(current_kp) == -1 and keypoints:
- keypoints.append(keypoints[-1])
- else:
- keypoints.append(current_kp[None])
-
- keypoints = np.concatenate(keypoints, 0)
- np.savetxt(os.path.splitext(name)[0]+'.txt', keypoints.reshape(-1))
- return keypoints
- else:
- while True:
- try:
- with torch.no_grad():
- # face detection -> face alignment.
- img = np.array(images)
- bboxes = self.det_net.detect_faces(images, 0.97)
-
- bboxes = bboxes[0]
- img = img[int(bboxes[1]):int(bboxes[3]), int(bboxes[0]):int(bboxes[2]), :]
-
- keypoints = landmark_98_to_68(self.detector.get_landmarks(img)) # [0]
-
- #### keypoints to the original location
- keypoints[:,0] += int(bboxes[0])
- keypoints[:,1] += int(bboxes[1])
-
- break
- except RuntimeError as e:
- if str(e).startswith('CUDA'):
- print("Warning: out of memory, sleep for 1s")
- time.sleep(1)
- else:
- print(e)
- break
- except TypeError:
- print('No face detected in this image')
- shape = [68, 2]
- keypoints = -1. * np.ones(shape)
- break
- if name is not None:
- np.savetxt(os.path.splitext(name)[0]+'.txt', keypoints.reshape(-1))
- return keypoints
-
-def read_video(filename):
- frames = []
- cap = cv2.VideoCapture(filename)
- while cap.isOpened():
- ret, frame = cap.read()
- if ret:
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
- frame = Image.fromarray(frame)
- frames.append(frame)
- else:
- break
- cap.release()
- return frames
-
-def run(data):
- filename, opt, device = data
- os.environ['CUDA_VISIBLE_DEVICES'] = device
- kp_extractor = KeypointExtractor()
- images = read_video(filename)
- name = filename.split('/')[-2:]
- os.makedirs(os.path.join(opt.output_dir, name[-2]), exist_ok=True)
- kp_extractor.extract_keypoint(
- images,
- name=os.path.join(opt.output_dir, name[-2], name[-1])
- )
-
-if __name__ == '__main__':
- set_start_method('spawn')
- parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument('--input_dir', type=str, help='the folder of the input files')
- parser.add_argument('--output_dir', type=str, help='the folder of the output files')
- parser.add_argument('--device_ids', type=str, default='0,1')
- parser.add_argument('--workers', type=int, default=4)
-
- opt = parser.parse_args()
- filenames = list()
- VIDEO_EXTENSIONS_LOWERCASE = {'mp4'}
- VIDEO_EXTENSIONS = VIDEO_EXTENSIONS_LOWERCASE.union({f.upper() for f in VIDEO_EXTENSIONS_LOWERCASE})
- extensions = VIDEO_EXTENSIONS
-
- for ext in extensions:
- os.listdir(f'{opt.input_dir}')
- print(f'{opt.input_dir}/*.{ext}')
- filenames = sorted(glob.glob(f'{opt.input_dir}/*.{ext}'))
- print('Total number of videos:', len(filenames))
- pool = Pool(opt.workers)
- args_list = cycle([opt])
- device_ids = opt.device_ids.split(",")
- device_ids = cycle(device_ids)
- for data in tqdm(pool.imap_unordered(run, zip(filenames, args_list, device_ids))):
- None
diff --git a/spaces/kevinwang676/SadTalker/src/face3d/util/generate_list.py b/spaces/kevinwang676/SadTalker/src/face3d/util/generate_list.py
deleted file mode 100644
index 943d906781063c3584a7e5b5c784f8aac0694985..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/SadTalker/src/face3d/util/generate_list.py
+++ /dev/null
@@ -1,34 +0,0 @@
-"""This script is to generate training list files for Deep3DFaceRecon_pytorch
-"""
-
-import os
-
-# save path to training data
-def write_list(lms_list, imgs_list, msks_list, mode='train',save_folder='datalist', save_name=''):
- save_path = os.path.join(save_folder, mode)
- if not os.path.isdir(save_path):
- os.makedirs(save_path)
- with open(os.path.join(save_path, save_name + 'landmarks.txt'), 'w') as fd:
- fd.writelines([i + '\n' for i in lms_list])
-
- with open(os.path.join(save_path, save_name + 'images.txt'), 'w') as fd:
- fd.writelines([i + '\n' for i in imgs_list])
-
- with open(os.path.join(save_path, save_name + 'masks.txt'), 'w') as fd:
- fd.writelines([i + '\n' for i in msks_list])
-
-# check if the path is valid
-def check_list(rlms_list, rimgs_list, rmsks_list):
- lms_list, imgs_list, msks_list = [], [], []
- for i in range(len(rlms_list)):
- flag = 'false'
- lm_path = rlms_list[i]
- im_path = rimgs_list[i]
- msk_path = rmsks_list[i]
- if os.path.isfile(lm_path) and os.path.isfile(im_path) and os.path.isfile(msk_path):
- flag = 'true'
- lms_list.append(rlms_list[i])
- imgs_list.append(rimgs_list[i])
- msks_list.append(rmsks_list[i])
- print(i, rlms_list[i], flag)
- return lms_list, imgs_list, msks_list
diff --git a/spaces/kevinwang676/VITS2-Mandarin/text/cleaners.py b/spaces/kevinwang676/VITS2-Mandarin/text/cleaners.py
deleted file mode 100644
index 13481001a5e55004f59fccd04d1c047aac25a39e..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/VITS2-Mandarin/text/cleaners.py
+++ /dev/null
@@ -1,235 +0,0 @@
-import re
-from text.japanese import japanese_to_romaji_with_accent, japanese_to_ipa, japanese_to_ipa2, japanese_to_ipa3
-from text.korean import latin_to_hangul, number_to_hangul, divide_hangul, korean_to_lazy_ipa, korean_to_ipa
-from g2pk2 import G2p
-from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo, chinese_to_romaji, chinese_to_lazy_ipa, chinese_to_ipa, chinese_to_ipa2
-#from text.sanskrit import devanagari_to_ipa
-from text.english import english_to_ipa, english_to_lazy_ipa, english_to_ipa2, english_to_lazy_ipa2
-#from text.thai import num_to_thai, latin_to_thai
-#from text.shanghainese import shanghainese_to_ipa
-#from text.cantonese import cantonese_to_ipa
-#from text.ngu_dialect import ngu_dialect_to_ipa
-from unidecode import unidecode
-from phonemizer import phonemize
-
-
-_whitespace_re = re.compile(r'\s+')
-
-# Regular expression matching Japanese without punctuation marks:
-_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# Regular expression matching non-Japanese characters or punctuation marks:
-_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
-
-# List of (regular expression, replacement) pairs for abbreviations:
-_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
- ('mrs', 'misess'),
- ('mr', 'mister'),
- ('dr', 'doctor'),
- ('st', 'saint'),
- ('co', 'company'),
- ('jr', 'junior'),
- ('maj', 'major'),
- ('gen', 'general'),
- ('drs', 'doctors'),
- ('rev', 'reverend'),
- ('lt', 'lieutenant'),
- ('hon', 'honorable'),
- ('sgt', 'sergeant'),
- ('capt', 'captain'),
- ('esq', 'esquire'),
- ('ltd', 'limited'),
- ('col', 'colonel'),
- ('ft', 'fort'),
-]]
-
-
-def expand_abbreviations(text):
- for regex, replacement in _abbreviations:
- text = re.sub(regex, replacement, text)
- return text
-
-def collapse_whitespace(text):
- return re.sub(_whitespace_re, ' ', text)
-
-
-def convert_to_ascii(text):
- return unidecode(text)
-
-#- For replication of https://github.com/FENRlR/MB-iSTFT-VITS2/issues/2
-# you may need to replace the symbol to Russian one
-def basic_cleaners(text):
- '''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
- text = text.lower()
- text = collapse_whitespace(text)
- return text
-
-
-def fix_g2pk2_error(text):
- new_text = ""
- i = 0
- while i < len(text) - 4:
- if (text[i:i+3] == 'ㅇㅡㄹ' or text[i:i+3] == 'ㄹㅡㄹ') and text[i+3] == ' ' and text[i+4] == 'ㄹ':
- new_text += text[i:i+3] + ' ' + 'ㄴ'
- i += 5
- else:
- new_text += text[i]
- i += 1
-
- new_text += text[i:]
- return new_text
-
-
-def english_cleaners(text):
- return english_to_ipa(text)
-
-
-def english_cleaners2(text):
- return english_to_ipa2(text)
-
-
-def english_cleaners3(text): # needs espeak - apt-get install espeak
- text = convert_to_ascii(text)
- text = expand_abbreviations(text.lower())
- phonemes = phonemize(text, language='en-us', backend='espeak', strip=True, preserve_punctuation=True,with_stress=True)
- phonemes = collapse_whitespace(phonemes)
- return phonemes
-
-
-def japanese_cleaners(text):
- text = japanese_to_romaji_with_accent(text)
- text = re.sub(r'([A-Za-z])$', r'\1.', text)
- return text
-
-
-def japanese_cleaners2(text):
- return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…')
-
-
-def korean_cleaners(text):
- '''Pipeline for Korean text'''
- text = latin_to_hangul(text)
- g2p = G2p()
- text = g2p(text)
- text = divide_hangul(text)
- text = fix_g2pk2_error(text)
- text = re.sub(r'([\u3131-\u3163])$', r'\1.', text)
- return text
-
-
-def korean_cleaners2(text): # KO part from cjke
- '''Pipeline for Korean text'''
- korean_to_ipa(text)
- text = re.sub(r'\s+$', '', text)
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
- return text
-
-
-def chinese_cleaners(text):
- '''Pipeline for Chinese text'''
- text = number_to_chinese(text)
- text = chinese_to_bopomofo(text)
- text = latin_to_bopomofo(text)
- text = re.sub(r'([ˉˊˇˋ˙])$', r'\1。', text)
- return text
-
-
-def zh_ja_mixture_cleaners(text):
- text = re.sub(r'\[ZH\](.*?)\[ZH\]',
- lambda x: chinese_to_romaji(x.group(1))+' ', text)
- text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_romaji_with_accent(
- x.group(1)).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…')+' ', text)
- text = re.sub(r'\s+$', '', text)
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
- return text
-
-
-def sanskrit_cleaners(text):
- text = text.replace('॥', '।').replace('ॐ', 'ओम्')
- text = re.sub(r'([^।])$', r'\1।', text)
- return text
-
-
-def kej_cleaners(text):
- text = re.sub(r'\[KO\](.*?)\[KO\]',
- lambda x: korean_to_ipa(x.group(1))+' ', text)
- text = re.sub(r'\[EN\](.*?)\[EN\]',
- lambda x: english_to_ipa2(x.group(1)) + ' ', text)
- text = re.sub(r'\[JA\](.*?)\[JA\]',
- lambda x: japanese_to_ipa2(x.group(1)) + ' ', text)
- text = re.sub(r'\s+$', '', text)
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
- return text
-
-
-def cjks_cleaners(text):
- text = re.sub(r'\[ZH\](.*?)\[ZH\]',
- lambda x: chinese_to_lazy_ipa(x.group(1))+' ', text)
- text = re.sub(r'\[JA\](.*?)\[JA\]',
- lambda x: japanese_to_ipa(x.group(1))+' ', text)
- text = re.sub(r'\[KO\](.*?)\[KO\]',
- lambda x: korean_to_lazy_ipa(x.group(1))+' ', text)
- #text = re.sub(r'\[SA\](.*?)\[SA\]',
- # lambda x: devanagari_to_ipa(x.group(1))+' ', text)
- text = re.sub(r'\[EN\](.*?)\[EN\]',
- lambda x: english_to_lazy_ipa(x.group(1))+' ', text)
- text = re.sub(r'\s+$', '', text)
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
- return text
-
-def cjke_cleaners(text):
- text = re.sub(r'\[ZH\](.*?)\[ZH\]', lambda x: chinese_to_lazy_ipa(x.group(1)).replace(
- 'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn')+' ', text)
- text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_ipa(x.group(1)).replace('ʧ', 'tʃ').replace(
- 'ʦ', 'ts').replace('ɥan', 'ɥæn').replace('ʥ', 'dz')+' ', text)
- text = re.sub(r'\[KO\](.*?)\[KO\]',
- lambda x: korean_to_ipa(x.group(1))+' ', text)
- text = re.sub(r'\[EN\](.*?)\[EN\]', lambda x: english_to_ipa2(x.group(1)).replace('ɑ', 'a').replace(
- 'ɔ', 'o').replace('ɛ', 'e').replace('ɪ', 'i').replace('ʊ', 'u')+' ', text)
- text = re.sub(r'\s+$', '', text)
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
- return text
-
-def cjke_cleaners2(text):
- text = re.sub(r'\[ZH\](.*?)\[ZH\]',
- lambda x: chinese_to_ipa(x.group(1))+' ', text)
- text = re.sub(r'\[JA\](.*?)\[JA\]',
- lambda x: japanese_to_ipa2(x.group(1))+' ', text)
- text = re.sub(r'\[KO\](.*?)\[KO\]',
- lambda x: korean_to_ipa(x.group(1))+' ', text)
- text = re.sub(r'\[EN\](.*?)\[EN\]',
- lambda x: english_to_ipa2(x.group(1))+' ', text)
- text = re.sub(r'\s+$', '', text)
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
- return text
-
-'''
-def thai_cleaners(text):
- text = num_to_thai(text)
- text = latin_to_thai(text)
- return text
-
-
-def shanghainese_cleaners(text):
- text = shanghainese_to_ipa(text)
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
- return text
-
-
-def chinese_dialect_cleaners(text):
- text = re.sub(r'\[ZH\](.*?)\[ZH\]',
- lambda x: chinese_to_ipa2(x.group(1))+' ', text)
- text = re.sub(r'\[JA\](.*?)\[JA\]',
- lambda x: japanese_to_ipa3(x.group(1)).replace('Q', 'ʔ')+' ', text)
- text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: shanghainese_to_ipa(x.group(1)).replace('1', '˥˧').replace('5',
- '˧˧˦').replace('6', '˩˩˧').replace('7', '˥').replace('8', '˩˨').replace('ᴀ', 'ɐ').replace('ᴇ', 'e')+' ', text)
- text = re.sub(r'\[GD\](.*?)\[GD\]',
- lambda x: cantonese_to_ipa(x.group(1))+' ', text)
- text = re.sub(r'\[EN\](.*?)\[EN\]',
- lambda x: english_to_lazy_ipa2(x.group(1))+' ', text)
- text = re.sub(r'\[([A-Z]{2})\](.*?)\[\1\]', lambda x: ngu_dialect_to_ipa(x.group(2), x.group(
- 1)).replace('ʣ', 'dz').replace('ʥ', 'dʑ').replace('ʦ', 'ts').replace('ʨ', 'tɕ')+' ', text)
- text = re.sub(r'\s+$', '', text)
- text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
- return text
-'''
diff --git a/spaces/kevinwang676/vits-fast-finetuning-pcr/text/ngu_dialect.py b/spaces/kevinwang676/vits-fast-finetuning-pcr/text/ngu_dialect.py
deleted file mode 100644
index ce3e12bbf0469426872eed5f681985d3e1be9b26..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/vits-fast-finetuning-pcr/text/ngu_dialect.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import re
-import opencc
-
-
-dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou',
- 'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing',
- 'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang',
- 'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan',
- 'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen',
- 'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'}
-
-converters = {}
-
-for dialect in dialects.values():
- try:
- converters[dialect] = opencc.OpenCC(dialect)
- except:
- pass
-
-
-def ngu_dialect_to_ipa(text, dialect):
- dialect = dialects[dialect]
- text = converters[dialect].convert(text).replace('-','').replace('$',' ')
- text = re.sub(r'[、;:]', ',', text)
- text = re.sub(r'\s*,\s*', ', ', text)
- text = re.sub(r'\s*。\s*', '. ', text)
- text = re.sub(r'\s*?\s*', '? ', text)
- text = re.sub(r'\s*!\s*', '! ', text)
- text = re.sub(r'\s*$', '', text)
- return text
diff --git a/spaces/kohrisatou-infinity/KIP_01_beta/vdecoder/hifigan/utils.py b/spaces/kohrisatou-infinity/KIP_01_beta/vdecoder/hifigan/utils.py
deleted file mode 100644
index 84bff024f4d2e2de194b2a88ee7bbe5f0d33f67c..0000000000000000000000000000000000000000
--- a/spaces/kohrisatou-infinity/KIP_01_beta/vdecoder/hifigan/utils.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import glob
-import os
-import matplotlib
-import torch
-from torch.nn.utils import weight_norm
-matplotlib.use("Agg")
-import matplotlib.pylab as plt
-
-
-def plot_spectrogram(spectrogram):
- fig, ax = plt.subplots(figsize=(10, 2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
-
- fig.canvas.draw()
- plt.close()
-
- return fig
-
-
-def init_weights(m, mean=0.0, std=0.01):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- m.weight.data.normal_(mean, std)
-
-
-def apply_weight_norm(m):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- weight_norm(m)
-
-
-def get_padding(kernel_size, dilation=1):
- return int((kernel_size*dilation - dilation)/2)
-
-
-def load_checkpoint(filepath, device):
- assert os.path.isfile(filepath)
- print("Loading '{}'".format(filepath))
- checkpoint_dict = torch.load(filepath, map_location=device)
- print("Complete.")
- return checkpoint_dict
-
-
-def save_checkpoint(filepath, obj):
- print("Saving checkpoint to {}".format(filepath))
- torch.save(obj, filepath)
- print("Complete.")
-
-
-def del_old_checkpoints(cp_dir, prefix, n_models=2):
- pattern = os.path.join(cp_dir, prefix + '????????')
- cp_list = glob.glob(pattern) # get checkpoint paths
- cp_list = sorted(cp_list)# sort by iter
- if len(cp_list) > n_models: # if more than n_models models are found
- for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models
- open(cp, 'w').close()# empty file contents
- os.unlink(cp)# delete file (move to trash when using Colab)
-
-
-def scan_checkpoint(cp_dir, prefix):
- pattern = os.path.join(cp_dir, prefix + '????????')
- cp_list = glob.glob(pattern)
- if len(cp_list) == 0:
- return None
- return sorted(cp_list)[-1]
-
diff --git a/spaces/kornia/kornia-resize-antialias/app.py b/spaces/kornia/kornia-resize-antialias/app.py
deleted file mode 100644
index 5b152e50750bf26d08ef74cc4cac9e23c262a81f..0000000000000000000000000000000000000000
--- a/spaces/kornia/kornia-resize-antialias/app.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import gradio as gr
-
-import kornia as K
-from kornia.core import concatenate, Tensor
-
-def rescale_aa(file, height, width):
-
- img: Tensor = K.io.load_image(file.name, K.io.ImageLoadType.RGB32)
- img = img[None]
-
- img_rescale: Tensor = K.geometry.rescale(img, (float(height),float(width)),antialias=False)
- img_rescale_aa: Tensor = K.geometry.rescale(img, (float(height),float(width)),antialias=True)
- img_out = concatenate([img_rescale, img_rescale_aa], -1)
-
- # when antialiasing , some values are going greater than 1 i.e 1.00001 which is giving error while displaying the output image,so clipping the output values from 0 to 1
- return K.utils.tensor_to_image(img_out.clamp_(0, 1))
-
-examples = [
- ["examples/a.png",0.25,0.25],
- ["examples/iron_man.jpeg",0.25,0.25],
-]
-
-kornia_resizing_demo = gr.Interface(
- rescale_aa,
- [
- gr.inputs.Image(type="file"),
- gr.inputs.Slider(minimum=0.005, maximum=2, step=0.005, default=0.25, label="Height"),
- gr.inputs.Slider(minimum=0.005, maximum=2, step=0.005, default=0.25, label="Width")
- ],
- "image",
- examples=examples,
- live=False,
- enable_queue = True,
- allow_flagging = "never"
-)
-
-kornia_resizing_demo.launch()
\ No newline at end of file
diff --git a/spaces/kquote03/lama-video-watermark-remover/bin/side_by_side.py b/spaces/kquote03/lama-video-watermark-remover/bin/side_by_side.py
deleted file mode 100644
index 8ba7a42a3b8597552b8002d1eb245d5776aff7f7..0000000000000000000000000000000000000000
--- a/spaces/kquote03/lama-video-watermark-remover/bin/side_by_side.py
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/usr/bin/env python3
-import os
-import random
-
-import cv2
-import numpy as np
-
-from saicinpainting.evaluation.data import PrecomputedInpaintingResultsDataset
-from saicinpainting.evaluation.utils import load_yaml
-from saicinpainting.training.visualizers.base import visualize_mask_and_images
-
-
-def main(args):
- config = load_yaml(args.config)
-
- datasets = [PrecomputedInpaintingResultsDataset(args.datadir, cur_predictdir, **config.dataset_kwargs)
- for cur_predictdir in args.predictdirs]
- assert len({len(ds) for ds in datasets}) == 1
- len_first = len(datasets[0])
-
- indices = list(range(len_first))
- if len_first > args.max_n:
- indices = sorted(random.sample(indices, args.max_n))
-
- os.makedirs(args.outpath, exist_ok=True)
-
- filename2i = {}
-
- keys = ['image'] + [i for i in range(len(datasets))]
- for img_i in indices:
- try:
- mask_fname = os.path.basename(datasets[0].mask_filenames[img_i])
- if mask_fname in filename2i:
- filename2i[mask_fname] += 1
- idx = filename2i[mask_fname]
- mask_fname_only, ext = os.path.split(mask_fname)
- mask_fname = f'{mask_fname_only}_{idx}{ext}'
- else:
- filename2i[mask_fname] = 1
-
- cur_vis_dict = datasets[0][img_i]
- for ds_i, ds in enumerate(datasets):
- cur_vis_dict[ds_i] = ds[img_i]['inpainted']
-
- vis_img = visualize_mask_and_images(cur_vis_dict, keys,
- last_without_mask=False,
- mask_only_first=True,
- black_mask=args.black)
- vis_img = np.clip(vis_img * 255, 0, 255).astype('uint8')
-
- out_fname = os.path.join(args.outpath, mask_fname)
-
-
-
- vis_img = cv2.cvtColor(vis_img, cv2.COLOR_RGB2BGR)
- cv2.imwrite(out_fname, vis_img)
- except Exception as ex:
- print(f'Could not process {img_i} due to {ex}')
-
-
-if __name__ == '__main__':
- import argparse
-
- aparser = argparse.ArgumentParser()
- aparser.add_argument('--max-n', type=int, default=100, help='Maximum number of images to print')
- aparser.add_argument('--black', action='store_true', help='Whether to fill mask on GT with black')
- aparser.add_argument('config', type=str, help='Path to evaluation config (e.g. configs/eval1.yaml)')
- aparser.add_argument('outpath', type=str, help='Where to put results')
- aparser.add_argument('datadir', type=str,
- help='Path to folder with images and masks')
- aparser.add_argument('predictdirs', type=str,
- nargs='+',
- help='Path to folders with predicts')
-
-
- main(aparser.parse_args())
diff --git a/spaces/kquote03/lama-video-watermark-remover/models/ade20k/resnet.py b/spaces/kquote03/lama-video-watermark-remover/models/ade20k/resnet.py
deleted file mode 100644
index 3e1d521f171c984cf6a7ff3dcebd96f8c5faf908..0000000000000000000000000000000000000000
--- a/spaces/kquote03/lama-video-watermark-remover/models/ade20k/resnet.py
+++ /dev/null
@@ -1,181 +0,0 @@
-"""Modified from https://github.com/CSAILVision/semantic-segmentation-pytorch"""
-
-import math
-
-import torch.nn as nn
-from torch.nn import BatchNorm2d
-
-from .utils import load_url
-
-__all__ = ['ResNet', 'resnet50']
-
-
-model_urls = {
- 'resnet50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet50-imagenet.pth',
-}
-
-
-def conv3x3(in_planes, out_planes, stride=1):
- "3x3 convolution with padding"
- return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
- padding=1, bias=False)
-
-
-class BasicBlock(nn.Module):
- expansion = 1
-
- def __init__(self, inplanes, planes, stride=1, downsample=None):
- super(BasicBlock, self).__init__()
- self.conv1 = conv3x3(inplanes, planes, stride)
- self.bn1 = BatchNorm2d(planes)
- self.relu = nn.ReLU(inplace=True)
- self.conv2 = conv3x3(planes, planes)
- self.bn2 = BatchNorm2d(planes)
- self.downsample = downsample
- self.stride = stride
-
- def forward(self, x):
- residual = x
-
- out = self.conv1(x)
- out = self.bn1(out)
- out = self.relu(out)
-
- out = self.conv2(out)
- out = self.bn2(out)
-
- if self.downsample is not None:
- residual = self.downsample(x)
-
- out += residual
- out = self.relu(out)
-
- return out
-
-
-class Bottleneck(nn.Module):
- expansion = 4
-
- def __init__(self, inplanes, planes, stride=1, downsample=None):
- super(Bottleneck, self).__init__()
- self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
- self.bn1 = BatchNorm2d(planes)
- self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
- padding=1, bias=False)
- self.bn2 = BatchNorm2d(planes)
- self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
- self.bn3 = BatchNorm2d(planes * 4)
- self.relu = nn.ReLU(inplace=True)
- self.downsample = downsample
- self.stride = stride
-
- def forward(self, x):
- residual = x
-
- out = self.conv1(x)
- out = self.bn1(out)
- out = self.relu(out)
-
- out = self.conv2(out)
- out = self.bn2(out)
- out = self.relu(out)
-
- out = self.conv3(out)
- out = self.bn3(out)
-
- if self.downsample is not None:
- residual = self.downsample(x)
-
- out += residual
- out = self.relu(out)
-
- return out
-
-
-class ResNet(nn.Module):
-
- def __init__(self, block, layers, num_classes=1000):
- self.inplanes = 128
- super(ResNet, self).__init__()
- self.conv1 = conv3x3(3, 64, stride=2)
- self.bn1 = BatchNorm2d(64)
- self.relu1 = nn.ReLU(inplace=True)
- self.conv2 = conv3x3(64, 64)
- self.bn2 = BatchNorm2d(64)
- self.relu2 = nn.ReLU(inplace=True)
- self.conv3 = conv3x3(64, 128)
- self.bn3 = BatchNorm2d(128)
- self.relu3 = nn.ReLU(inplace=True)
- self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
-
- self.layer1 = self._make_layer(block, 64, layers[0])
- self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
- self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
- self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
- self.avgpool = nn.AvgPool2d(7, stride=1)
- self.fc = nn.Linear(512 * block.expansion, num_classes)
-
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
- m.weight.data.normal_(0, math.sqrt(2. / n))
- elif isinstance(m, BatchNorm2d):
- m.weight.data.fill_(1)
- m.bias.data.zero_()
-
- def _make_layer(self, block, planes, blocks, stride=1):
- downsample = None
- if stride != 1 or self.inplanes != planes * block.expansion:
- downsample = nn.Sequential(
- nn.Conv2d(self.inplanes, planes * block.expansion,
- kernel_size=1, stride=stride, bias=False),
- BatchNorm2d(planes * block.expansion),
- )
-
- layers = []
- layers.append(block(self.inplanes, planes, stride, downsample))
- self.inplanes = planes * block.expansion
- for i in range(1, blocks):
- layers.append(block(self.inplanes, planes))
-
- return nn.Sequential(*layers)
-
- def forward(self, x):
- x = self.relu1(self.bn1(self.conv1(x)))
- x = self.relu2(self.bn2(self.conv2(x)))
- x = self.relu3(self.bn3(self.conv3(x)))
- x = self.maxpool(x)
-
- x = self.layer1(x)
- x = self.layer2(x)
- x = self.layer3(x)
- x = self.layer4(x)
-
- x = self.avgpool(x)
- x = x.view(x.size(0), -1)
- x = self.fc(x)
-
- return x
-
-
-def resnet50(pretrained=False, **kwargs):
- """Constructs a ResNet-50 model.
-
- Args:
- pretrained (bool): If True, returns a model pre-trained on ImageNet
- """
- model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
- if pretrained:
- model.load_state_dict(load_url(model_urls['resnet50']), strict=False)
- return model
-
-
-def resnet18(pretrained=False, **kwargs):
- """Constructs a ResNet-18 model.
- Args:
- pretrained (bool): If True, returns a model pre-trained on ImageNet
- """
- model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
- if pretrained:
- model.load_state_dict(load_url(model_urls['resnet18']))
- return model
\ No newline at end of file
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S_T_A_T_.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S_T_A_T_.py
deleted file mode 100644
index 1769de91b5f0416354e040b52e3615c6824fd2f9..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S_T_A_T_.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from .otBase import BaseTTXConverter
-
-
-class table_S_T_A_T_(BaseTTXConverter):
- pass
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-52a22f96.js b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-52a22f96.js
deleted file mode 100644
index e3fcc726d0a0b673b9d845d6f9c28e29a745f70e..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-52a22f96.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import{S as g,i as v,s as d,V as q,G as r,C as o,af as h,M as f,g as b,X as w,Y as C,Z as R,p as j,t as S,q as G}from"./index-8c3da1d9.js";function M(i){let e,_,s;const u=i[6].default,t=q(u,i,i[5],null);return{c(){e=r("div"),t&&t.c(),o(e,"id",i[1]),o(e,"class",_=h(i[2].join(" "))+" svelte-15lo0d8"),f(e,"compact",i[4]==="compact"),f(e,"panel",i[4]==="panel"),f(e,"unequal-height",i[0].equal_height===!1),f(e,"stretch",i[0].equal_height),f(e,"hide",!i[3])},m(l,a){b(l,e,a),t&&t.m(e,null),s=!0},p(l,[a]){t&&t.p&&(!s||a&32)&&w(t,u,l,l[5],s?R(u,l[5],a,null):C(l[5]),null),(!s||a&2)&&o(e,"id",l[1]),(!s||a&4&&_!==(_=h(l[2].join(" "))+" svelte-15lo0d8"))&&o(e,"class",_),(!s||a&20)&&f(e,"compact",l[4]==="compact"),(!s||a&20)&&f(e,"panel",l[4]==="panel"),(!s||a&5)&&f(e,"unequal-height",l[0].equal_height===!1),(!s||a&5)&&f(e,"stretch",l[0].equal_height),(!s||a&12)&&f(e,"hide",!l[3])},i(l){s||(j(t,l),s=!0)},o(l){S(t,l),s=!1},d(l){l&&G(e),t&&t.d(l)}}}function V(i,e,_){let{$$slots:s={},$$scope:u}=e,{style:t={}}=e,{elem_id:l}=e,{elem_classes:a=[]}=e,{visible:m=!0}=e,{variant:c="default"}=e;return i.$$set=n=>{"style"in n&&_(0,t=n.style),"elem_id"in n&&_(1,l=n.elem_id),"elem_classes"in n&&_(2,a=n.elem_classes),"visible"in n&&_(3,m=n.visible),"variant"in n&&_(4,c=n.variant),"$$scope"in n&&_(5,u=n.$$scope)},[t,l,a,m,c,u,s]}class X extends g{constructor(e){super(),v(this,e,V,M,d,{style:0,elem_id:1,elem_classes:2,visible:3,variant:4})}}const Z=X,k=["static"];export{Z as Component,k as modes};
-//# sourceMappingURL=index-52a22f96.js.map
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/httpx/_transports/mock.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/httpx/_transports/mock.py
deleted file mode 100644
index 82043da2d908f7575097f14b08c1a8a60fa1f8a4..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/httpx/_transports/mock.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import typing
-
-from .._models import Request, Response
-from .base import AsyncBaseTransport, BaseTransport
-
-SyncHandler = typing.Callable[[Request], Response]
-AsyncHandler = typing.Callable[[Request], typing.Coroutine[None, None, Response]]
-
-
-class MockTransport(AsyncBaseTransport, BaseTransport):
- def __init__(self, handler: typing.Union[SyncHandler, AsyncHandler]) -> None:
- self.handler = handler
-
- def handle_request(
- self,
- request: Request,
- ) -> Response:
- request.read()
- response = self.handler(request)
- if not isinstance(response, Response): # pragma: no cover
- raise TypeError("Cannot use an async handler in a sync Client")
- return response
-
- async def handle_async_request(
- self,
- request: Request,
- ) -> Response:
- await request.aread()
- response = self.handler(request)
-
- # Allow handler to *optionally* be an `async` function.
- # If it is, then the `response` variable need to be awaited to actually
- # return the result.
-
- if not isinstance(response, Response):
- response = await response
-
- return response
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/repository.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/repository.py
deleted file mode 100644
index 757995870b202b1a18a83df61e55cacdd7b21439..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/repository.py
+++ /dev/null
@@ -1,1461 +0,0 @@
-import atexit
-import os
-import re
-import subprocess
-import threading
-import time
-from contextlib import contextmanager
-from pathlib import Path
-from typing import Callable, Dict, Iterator, List, Optional, Tuple, Union
-from urllib.parse import urlparse
-
-from huggingface_hub.constants import REPO_TYPES_URL_PREFIXES, REPOCARD_NAME
-from huggingface_hub.repocard import metadata_load, metadata_save
-
-from .hf_api import HfApi, repo_type_and_id_from_hf_id
-from .lfs import LFS_MULTIPART_UPLOAD_COMMAND
-from .utils import (
- HfFolder,
- SoftTemporaryDirectory,
- logging,
- run_subprocess,
- tqdm,
- validate_hf_hub_args,
-)
-from .utils._typing import TypedDict
-
-
-logger = logging.get_logger(__name__)
-
-
-class CommandInProgress:
- """
- Utility to follow commands launched asynchronously.
- """
-
- def __init__(
- self,
- title: str,
- is_done_method: Callable,
- status_method: Callable,
- process: subprocess.Popen,
- post_method: Optional[Callable] = None,
- ):
- self.title = title
- self._is_done = is_done_method
- self._status = status_method
- self._process = process
- self._stderr = ""
- self._stdout = ""
- self._post_method = post_method
-
- @property
- def is_done(self) -> bool:
- """
- Whether the process is done.
- """
- result = self._is_done()
-
- if result and self._post_method is not None:
- self._post_method()
- self._post_method = None
-
- return result
-
- @property
- def status(self) -> int:
- """
- The exit code/status of the current action. Will return `0` if the
- command has completed successfully, and a number between 1 and 255 if
- the process errored-out.
-
- Will return -1 if the command is still ongoing.
- """
- return self._status()
-
- @property
- def failed(self) -> bool:
- """
- Whether the process errored-out.
- """
- return self.status > 0
-
- @property
- def stderr(self) -> str:
- """
- The current output message on the standard error.
- """
- if self._process.stderr is not None:
- self._stderr += self._process.stderr.read()
- return self._stderr
-
- @property
- def stdout(self) -> str:
- """
- The current output message on the standard output.
- """
- if self._process.stdout is not None:
- self._stdout += self._process.stdout.read()
- return self._stdout
-
- def __repr__(self):
- status = self.status
-
- if status == -1:
- status = "running"
-
- return (
- f"[{self.title} command, status code: {status},"
- f" {'in progress.' if not self.is_done else 'finished.'} PID:"
- f" {self._process.pid}]"
- )
-
-
-def is_git_repo(folder: Union[str, Path]) -> bool:
- """
- Check if the folder is the root or part of a git repository
-
- Args:
- folder (`str`):
- The folder in which to run the command.
-
- Returns:
- `bool`: `True` if the repository is part of a repository, `False`
- otherwise.
- """
- folder_exists = os.path.exists(os.path.join(folder, ".git"))
- git_branch = subprocess.run("git branch".split(), cwd=folder, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- return folder_exists and git_branch.returncode == 0
-
-
-def is_local_clone(folder: Union[str, Path], remote_url: str) -> bool:
- """
- Check if the folder is a local clone of the remote_url
-
- Args:
- folder (`str` or `Path`):
- The folder in which to run the command.
- remote_url (`str`):
- The url of a git repository.
-
- Returns:
- `bool`: `True` if the repository is a local clone of the remote
- repository specified, `False` otherwise.
- """
- if not is_git_repo(folder):
- return False
-
- remotes = run_subprocess("git remote -v", folder).stdout
-
- # Remove token for the test with remotes.
- remote_url = re.sub(r"https://.*@", "https://", remote_url)
- remotes = [re.sub(r"https://.*@", "https://", remote) for remote in remotes.split()]
- return remote_url in remotes
-
-
-def is_tracked_with_lfs(filename: Union[str, Path]) -> bool:
- """
- Check if the file passed is tracked with git-lfs.
-
- Args:
- filename (`str` or `Path`):
- The filename to check.
-
- Returns:
- `bool`: `True` if the file passed is tracked with git-lfs, `False`
- otherwise.
- """
- folder = Path(filename).parent
- filename = Path(filename).name
-
- try:
- p = run_subprocess("git check-attr -a".split() + [filename], folder)
- attributes = p.stdout.strip()
- except subprocess.CalledProcessError as exc:
- if not is_git_repo(folder):
- return False
- else:
- raise OSError(exc.stderr)
-
- if len(attributes) == 0:
- return False
-
- found_lfs_tag = {"diff": False, "merge": False, "filter": False}
-
- for attribute in attributes.split("\n"):
- for tag in found_lfs_tag.keys():
- if tag in attribute and "lfs" in attribute:
- found_lfs_tag[tag] = True
-
- return all(found_lfs_tag.values())
-
-
-def is_git_ignored(filename: Union[str, Path]) -> bool:
- """
- Check if file is git-ignored. Supports nested .gitignore files.
-
- Args:
- filename (`str` or `Path`):
- The filename to check.
-
- Returns:
- `bool`: `True` if the file passed is ignored by `git`, `False`
- otherwise.
- """
- folder = Path(filename).parent
- filename = Path(filename).name
-
- try:
- p = run_subprocess("git check-ignore".split() + [filename], folder, check=False)
- # Will return exit code 1 if not gitignored
- is_ignored = not bool(p.returncode)
- except subprocess.CalledProcessError as exc:
- raise OSError(exc.stderr)
-
- return is_ignored
-
-
-def is_binary_file(filename: Union[str, Path]) -> bool:
- """
- Check if file is a binary file.
-
- Args:
- filename (`str` or `Path`):
- The filename to check.
-
- Returns:
- `bool`: `True` if the file passed is a binary file, `False` otherwise.
- """
- try:
- with open(filename, "rb") as f:
- content = f.read(10 * (1024**2)) # Read a maximum of 10MB
-
- # Code sample taken from the following stack overflow thread
- # https://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python/7392391#7392391
- text_chars = bytearray({7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) - {0x7F})
- return bool(content.translate(None, text_chars))
- except UnicodeDecodeError:
- return True
-
-
-def files_to_be_staged(pattern: str = ".", folder: Union[str, Path, None] = None) -> List[str]:
- """
- Returns a list of filenames that are to be staged.
-
- Args:
- pattern (`str` or `Path`):
- The pattern of filenames to check. Put `.` to get all files.
- folder (`str` or `Path`):
- The folder in which to run the command.
-
- Returns:
- `List[str]`: List of files that are to be staged.
- """
- try:
- p = run_subprocess("git ls-files --exclude-standard -mo".split() + [pattern], folder)
- if len(p.stdout.strip()):
- files = p.stdout.strip().split("\n")
- else:
- files = []
- except subprocess.CalledProcessError as exc:
- raise EnvironmentError(exc.stderr)
-
- return files
-
-
-def is_tracked_upstream(folder: Union[str, Path]) -> bool:
- """
- Check if the current checked-out branch is tracked upstream.
-
- Args:
- folder (`str` or `Path`):
- The folder in which to run the command.
-
- Returns:
- `bool`: `True` if the current checked-out branch is tracked upstream,
- `False` otherwise.
- """
- try:
- run_subprocess("git rev-parse --symbolic-full-name --abbrev-ref @{u}", folder)
- return True
- except subprocess.CalledProcessError as exc:
- if "HEAD" in exc.stderr:
- raise OSError("No branch checked out")
-
- return False
-
-
-def commits_to_push(folder: Union[str, Path], upstream: Optional[str] = None) -> int:
- """
- Check the number of commits that would be pushed upstream
-
- Args:
- folder (`str` or `Path`):
- The folder in which to run the command.
- upstream (`str`, *optional*):
- The name of the upstream repository with which the comparison should be
- made.
-
- Returns:
- `int`: Number of commits that would be pushed upstream were a `git
- push` to proceed.
- """
- try:
- result = run_subprocess(f"git cherry -v {upstream or ''}", folder)
- return len(result.stdout.split("\n")) - 1
- except subprocess.CalledProcessError as exc:
- raise EnvironmentError(exc.stderr)
-
-
-class PbarT(TypedDict):
- # Used to store an opened progress bar in `_lfs_log_progress`
- bar: tqdm
- past_bytes: int
-
-
-@contextmanager
-def _lfs_log_progress():
- """
- This is a context manager that will log the Git LFS progress of cleaning,
- smudging, pulling and pushing.
- """
-
- if logger.getEffectiveLevel() >= logging.ERROR:
- try:
- yield
- except Exception:
- pass
- return
-
- def output_progress(stopping_event: threading.Event):
- """
- To be launched as a separate thread with an event meaning it should stop
- the tail.
- """
- # Key is tuple(state, filename), value is a dict(tqdm bar and a previous value)
- pbars: Dict[Tuple[str, str], PbarT] = {}
-
- def close_pbars():
- for pbar in pbars.values():
- pbar["bar"].update(pbar["bar"].total - pbar["past_bytes"])
- pbar["bar"].refresh()
- pbar["bar"].close()
-
- def tail_file(filename) -> Iterator[str]:
- """
- Creates a generator to be iterated through, which will return each
- line one by one. Will stop tailing the file if the stopping_event is
- set.
- """
- with open(filename, "r") as file:
- current_line = ""
- while True:
- if stopping_event.is_set():
- close_pbars()
- break
-
- line_bit = file.readline()
- if line_bit is not None and not len(line_bit.strip()) == 0:
- current_line += line_bit
- if current_line.endswith("\n"):
- yield current_line
- current_line = ""
- else:
- time.sleep(1)
-
- # If the file isn't created yet, wait for a few seconds before trying again.
- # Can be interrupted with the stopping_event.
- while not os.path.exists(os.environ["GIT_LFS_PROGRESS"]):
- if stopping_event.is_set():
- close_pbars()
- return
-
- time.sleep(2)
-
- for line in tail_file(os.environ["GIT_LFS_PROGRESS"]):
- try:
- state, file_progress, byte_progress, filename = line.split()
- except ValueError as error:
- # Try/except to ease debugging. See https://github.com/huggingface/huggingface_hub/issues/1373.
- raise ValueError(f"Cannot unpack LFS progress line:\n{line}") from error
- description = f"{state.capitalize()} file {filename}"
-
- current_bytes, total_bytes = byte_progress.split("/")
- current_bytes_int = int(current_bytes)
- total_bytes_int = int(total_bytes)
-
- pbar = pbars.get((state, filename))
- if pbar is None:
- # Initialize progress bar
- pbars[(state, filename)] = {
- "bar": tqdm(
- desc=description,
- initial=current_bytes_int,
- total=total_bytes_int,
- unit="B",
- unit_scale=True,
- unit_divisor=1024,
- ),
- "past_bytes": int(current_bytes),
- }
- else:
- # Update progress bar
- pbar["bar"].update(current_bytes_int - pbar["past_bytes"])
- pbar["past_bytes"] = current_bytes_int
-
- current_lfs_progress_value = os.environ.get("GIT_LFS_PROGRESS", "")
-
- with SoftTemporaryDirectory() as tmpdir:
- os.environ["GIT_LFS_PROGRESS"] = os.path.join(tmpdir, "lfs_progress")
- logger.debug(f"Following progress in {os.environ['GIT_LFS_PROGRESS']}")
-
- exit_event = threading.Event()
- x = threading.Thread(target=output_progress, args=(exit_event,), daemon=True)
- x.start()
-
- try:
- yield
- finally:
- exit_event.set()
- x.join()
-
- os.environ["GIT_LFS_PROGRESS"] = current_lfs_progress_value
-
-
-class Repository:
- """
- Helper class to wrap the git and git-lfs commands.
-
- The aim is to facilitate interacting with huggingface.co hosted model or
- dataset repos, though not a lot here (if any) is actually specific to
- huggingface.co.
- """
-
- command_queue: List[CommandInProgress]
-
- @validate_hf_hub_args
- def __init__(
- self,
- local_dir: Union[str, Path],
- clone_from: Optional[str] = None,
- repo_type: Optional[str] = None,
- token: Union[bool, str] = True,
- git_user: Optional[str] = None,
- git_email: Optional[str] = None,
- revision: Optional[str] = None,
- skip_lfs_files: bool = False,
- client: Optional[HfApi] = None,
- ):
- """
- Instantiate a local clone of a git repo.
-
- If `clone_from` is set, the repo will be cloned from an existing remote repository.
- If the remote repo does not exist, a `EnvironmentError` exception will be thrown.
- Please create the remote repo first using [`create_repo`].
-
- `Repository` uses the local git credentials by default. If explicitly set, the `token`
- or the `git_user`/`git_email` pair will be used instead.
-
- Args:
- local_dir (`str` or `Path`):
- path (e.g. `'my_trained_model/'`) to the local directory, where
- the `Repository` will be initialized.
- clone_from (`str`, *optional*):
- Either a repository url or `repo_id`.
- Example:
- - `"https://huggingface.co/philschmid/playground-tests"`
- - `"philschmid/playground-tests"`
- repo_type (`str`, *optional*):
- To set when cloning a repo from a repo_id. Default is model.
- token (`bool` or `str`, *optional*):
- A valid authentication token (see https://huggingface.co/settings/token).
- If `None` or `True` and machine is logged in (through `huggingface-cli login`
- or [`~huggingface_hub.login`]), token will be retrieved from the cache.
- If `False`, token is not sent in the request header.
- git_user (`str`, *optional*):
- will override the `git config user.name` for committing and
- pushing files to the hub.
- git_email (`str`, *optional*):
- will override the `git config user.email` for committing and
- pushing files to the hub.
- revision (`str`, *optional*):
- Revision to checkout after initializing the repository. If the
- revision doesn't exist, a branch will be created with that
- revision name from the default branch's current HEAD.
- skip_lfs_files (`bool`, *optional*, defaults to `False`):
- whether to skip git-LFS files or not.
- client (`HfApi`, *optional*):
- Instance of [`HfApi`] to use when calling the HF Hub API. A new
- instance will be created if this is left to `None`.
-
- Raises:
- - [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
- if the remote repository set in `clone_from` does not exist.
- """
- if isinstance(local_dir, Path):
- local_dir = str(local_dir)
- os.makedirs(local_dir, exist_ok=True)
- self.local_dir = os.path.join(os.getcwd(), local_dir)
- self._repo_type = repo_type
- self.command_queue = []
- self.skip_lfs_files = skip_lfs_files
- self.client = client if client is not None else HfApi()
-
- self.check_git_versions()
-
- if isinstance(token, str):
- self.huggingface_token: Optional[str] = token
- elif token is False:
- self.huggingface_token = None
- else:
- # if `True` -> explicit use of the cached token
- # if `None` -> implicit use of the cached token
- self.huggingface_token = HfFolder.get_token()
-
- if clone_from is not None:
- self.clone_from(repo_url=clone_from)
- else:
- if is_git_repo(self.local_dir):
- logger.debug("[Repository] is a valid git repo")
- else:
- raise ValueError("If not specifying `clone_from`, you need to pass Repository a valid git clone.")
-
- if self.huggingface_token is not None and (git_email is None or git_user is None):
- user = self.client.whoami(self.huggingface_token)
-
- if git_email is None:
- git_email = user["email"]
-
- if git_user is None:
- git_user = user["fullname"]
-
- if git_user is not None or git_email is not None:
- self.git_config_username_and_email(git_user, git_email)
-
- self.lfs_enable_largefiles()
- self.git_credential_helper_store()
-
- if revision is not None:
- self.git_checkout(revision, create_branch_ok=True)
-
- # This ensures that all commands exit before exiting the Python runtime.
- # This will ensure all pushes register on the hub, even if other errors happen in subsequent operations.
- atexit.register(self.wait_for_commands)
-
- @property
- def current_branch(self) -> str:
- """
- Returns the current checked out branch.
-
- Returns:
- `str`: Current checked out branch.
- """
- try:
- result = run_subprocess("git rev-parse --abbrev-ref HEAD", self.local_dir).stdout.strip()
- except subprocess.CalledProcessError as exc:
- raise EnvironmentError(exc.stderr)
-
- return result
-
- def check_git_versions(self):
- """
- Checks that `git` and `git-lfs` can be run.
-
- Raises:
- - [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
- if `git` or `git-lfs` are not installed.
- """
- try:
- git_version = run_subprocess("git --version", self.local_dir).stdout.strip()
- except FileNotFoundError:
- raise EnvironmentError("Looks like you do not have git installed, please install.")
-
- try:
- lfs_version = run_subprocess("git-lfs --version", self.local_dir).stdout.strip()
- except FileNotFoundError:
- raise EnvironmentError(
- "Looks like you do not have git-lfs installed, please install."
- " You can install from https://git-lfs.github.com/."
- " Then run `git lfs install` (you only have to do this once)."
- )
- logger.info(git_version + "\n" + lfs_version)
-
- @validate_hf_hub_args
- def clone_from(self, repo_url: str, token: Union[bool, str, None] = None):
- """
- Clone from a remote. If the folder already exists, will try to clone the
- repository within it.
-
- If this folder is a git repository with linked history, will try to
- update the repository.
-
- Args:
- repo_url (`str`):
- The URL from which to clone the repository
- token (`Union[str, bool]`, *optional*):
- Whether to use the authentication token. It can be:
- - a string which is the token itself
- - `False`, which would not use the authentication token
- - `True`, which would fetch the authentication token from the
- local folder and use it (you should be logged in for this to
- work).
- - `None`, which would retrieve the value of
- `self.huggingface_token`.
-
- HD Online Player (Rebellious Flower Hindi Dubbed Torre)
-
-Good Will Hunting: Directed by Gus Van Sant. With Matt Damon, Ben Affleck, Stellan Skarsgard, John Mighton. Will Hunting, janitor at the Massachusetts Institute of Technology. And now in order.
-The film is based on a real story, about which articles were written at one time. Will Hunting, by definition, is a total failure with little effort but great potential. In his past there is rape, and prison, and many, many things that we don’t even know about.
-, who played in "Good Will Hunting" (film writer) and who, after failing to get into Harvard, began working as a janitor at the Massachusetts Institute of Technology.
-. He 8a78ff9644
-
-
-
diff --git a/spaces/lojban/text-to-speech/vits/commons.py b/spaces/lojban/text-to-speech/vits/commons.py
deleted file mode 100644
index efb2d00b0d107e9a7e11158063e2fe561b2c49c5..0000000000000000000000000000000000000000
--- a/spaces/lojban/text-to-speech/vits/commons.py
+++ /dev/null
@@ -1,161 +0,0 @@
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-
-def init_weights(m, mean=0.0, std=0.01):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- m.weight.data.normal_(mean, std)
-
-
-def get_padding(kernel_size, dilation=1):
- return int((kernel_size*dilation - dilation)/2)
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def intersperse(lst, item):
- result = [item] * (len(lst) * 2 + 1)
- result[1::2] = lst
- return result
-
-
-def kl_divergence(m_p, logs_p, m_q, logs_q):
- """KL(P||Q)"""
- kl = (logs_q - logs_p) - 0.5
- kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
- return kl
-
-
-def rand_gumbel(shape):
- """Sample from the Gumbel distribution, protect from overflows."""
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
- return -torch.log(-torch.log(uniform_samples))
-
-
-def rand_gumbel_like(x):
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
- return g
-
-
-def slice_segments(x, ids_str, segment_size=4):
- ret = torch.zeros_like(x[:, :, :segment_size])
- for i in range(x.size(0)):
- idx_str = ids_str[i]
- idx_end = idx_str + segment_size
- ret[i] = x[i, :, idx_str:idx_end]
- return ret
-
-
-def rand_slice_segments(x, x_lengths=None, segment_size=4):
- b, d, t = x.size()
- if x_lengths is None:
- x_lengths = t
- ids_str_max = x_lengths - segment_size + 1
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
- ret = slice_segments(x, ids_str, segment_size)
- return ret, ids_str
-
-
-def get_timing_signal_1d(
- length, channels, min_timescale=1.0, max_timescale=1.0e4):
- position = torch.arange(length, dtype=torch.float)
- num_timescales = channels // 2
- log_timescale_increment = (
- math.log(float(max_timescale) / float(min_timescale)) /
- (num_timescales - 1))
- inv_timescales = min_timescale * torch.exp(
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
- signal = F.pad(signal, [0, 0, 0, channels % 2])
- signal = signal.view(1, channels, length)
- return signal
-
-
-def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return x + signal.to(dtype=x.dtype, device=x.device)
-
-
-def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
-
-
-def subsequent_mask(length):
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
- return mask
-
-
-@torch.jit.script
-def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
- n_channels_int = n_channels[0]
- in_act = input_a + input_b
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
- acts = t_act * s_act
- return acts
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def shift_1d(x):
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
- return x
-
-
-def sequence_mask(length, max_length=None):
- if max_length is None:
- max_length = length.max()
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
- return x.unsqueeze(0) < length.unsqueeze(1)
-
-
-def generate_path(duration, mask):
- """
- duration: [b, 1, t_x]
- mask: [b, 1, t_y, t_x]
- """
- device = duration.device
-
- b, _, t_y, t_x = mask.shape
- cum_duration = torch.cumsum(duration, -1)
-
- cum_duration_flat = cum_duration.view(b * t_x)
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
- path = path.view(b, t_x, t_y)
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
- path = path.unsqueeze(1).transpose(2,3) * mask
- return path
-
-
-def clip_grad_value_(parameters, clip_value, norm_type=2):
- if isinstance(parameters, torch.Tensor):
- parameters = [parameters]
- parameters = list(filter(lambda p: p.grad is not None, parameters))
- norm_type = float(norm_type)
- if clip_value is not None:
- clip_value = float(clip_value)
-
- total_norm = 0
- for p in parameters:
- param_norm = p.grad.data.norm(norm_type)
- total_norm += param_norm.item() ** norm_type
- if clip_value is not None:
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
- total_norm = total_norm ** (1. / norm_type)
- return total_norm
\ No newline at end of file
diff --git a/spaces/ma-xu/LIVE/thrust/thrust/detail/allocator/malloc_allocator.h b/spaces/ma-xu/LIVE/thrust/thrust/detail/allocator/malloc_allocator.h
deleted file mode 100644
index 2c01c66bd0a8e5f6f689c580e6d79df5d4e3a45c..0000000000000000000000000000000000000000
--- a/spaces/ma-xu/LIVE/thrust/thrust/detail/allocator/malloc_allocator.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include
-
-
-> Tero Karras (NVIDIA), Samuli Laine (NVIDIA), Timo Aila (NVIDIA)
-> https://arxiv.org/abs/1812.04948
->
-> **Abstract:** *We propose an alternative generator architecture for generative adversarial networks, borrowing from style transfer literature. The new architecture leads to an automatically learned, unsupervised separation of high-level attributes (e.g., pose and identity when trained on human faces) and stochastic variation in the generated images (e.g., freckles, hair), and it enables intuitive, scale-specific control of the synthesis. The new generator improves the state-of-the-art in terms of traditional distribution quality metrics, leads to demonstrably better interpolation properties, and also better disentangles the latent factors of variation. To quantify interpolation quality and disentanglement, we propose two new, automated methods that are applicable to any generator architecture. Finally, we introduce a new, highly varied and high-quality dataset of human faces.*
-
-For business inquiries, please contact [researchinquiries@nvidia.com](mailto:researchinquiries@nvidia.com)
-For press and other inquiries, please contact Hector Marinez at [hmarinez@nvidia.com](mailto:hmarinez@nvidia.com)
-
-**★★★ NEW: StyleGAN2 is available at [https://github.com/NVlabs/stylegan2](https://github.com/NVlabs/stylegan2) ★★★**
-
-## Resources
-
-Material related to our paper is available via the following links:
-
-- Paper: https://arxiv.org/abs/1812.04948
-- Video: https://youtu.be/kSLJriaOumA
-- Code: https://github.com/NVlabs/stylegan
-- FFHQ: https://github.com/NVlabs/ffhq-dataset
-
-Additional material can be found on Google Drive:
-
-| Path | Description
-| :--- | :----------
-| [StyleGAN](https://drive.google.com/open?id=1uka3a1noXHAydRPRbknqwKVGODvnmUBX) | Main folder.
-| ├ [stylegan-paper.pdf](https://drive.google.com/open?id=1v-HkF3Ehrpon7wVIx4r5DLcko_U_V6Lt) | High-quality version of the paper PDF.
-| ├ [stylegan-video.mp4](https://drive.google.com/open?id=1uzwkZHQX_9pYg1i0d1Nbe3D9xPO8-qBf) | High-quality version of the result video.
-| ├ [images](https://drive.google.com/open?id=1-l46akONUWF6LCpDoeq63H53rD7MeiTd) | Example images produced using our generator.
-| │ ├ [representative-images](https://drive.google.com/open?id=1ToY5P4Vvf5_c3TyUizQ8fckFFoFtBvD8) | High-quality images to be used in articles, blog posts, etc.
-| │ └ [100k-generated-images](https://drive.google.com/open?id=100DJ0QXyG89HZzB4w2Cbyf4xjNK54cQ1) | 100,000 generated images for different amounts of truncation.
-| │ ├ [ffhq-1024x1024](https://drive.google.com/open?id=14lm8VRN1pr4g_KVe6_LvyDX1PObst6d4) | Generated using Flickr-Faces-HQ dataset at 1024×1024.
-| │ ├ [bedrooms-256x256](https://drive.google.com/open?id=1Vxz9fksw4kgjiHrvHkX4Hze4dyThFW6t) | Generated using LSUN Bedroom dataset at 256×256.
-| │ ├ [cars-512x384](https://drive.google.com/open?id=1MFCvOMdLE2_mpeLPTiDw5dxc2CRuKkzS) | Generated using LSUN Car dataset at 512×384.
-| │ └ [cats-256x256](https://drive.google.com/open?id=1gq-Gj3GRFiyghTPKhp8uDMA9HV_0ZFWQ) | Generated using LSUN Cat dataset at 256×256.
-| ├ [videos](https://drive.google.com/open?id=1N8pOd_Bf8v89NGUaROdbD8-ayLPgyRRo) | Example videos produced using our generator.
-| │ └ [high-quality-video-clips](https://drive.google.com/open?id=1NFO7_vH0t98J13ckJYFd7kuaTkyeRJ86) | Individual segments of the result video as high-quality MP4.
-| ├ [ffhq-dataset](https://drive.google.com/open?id=1u2xu7bSrWxrbUxk-dT-UvEJq8IjdmNTP) | Raw data for the [Flickr-Faces-HQ dataset](https://github.com/NVlabs/ffhq-dataset).
-| └ [networks](https://drive.google.com/open?id=1MASQyN5m0voPcx7-9K0r5gObhvvPups7) | Pre-trained networks as pickled instances of [dnnlib.tflib.Network](./dnnlib/tflib/network.py).
-| ├ [stylegan-ffhq-1024x1024.pkl](https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ) | StyleGAN trained with Flickr-Faces-HQ dataset at 1024×1024.
-| ├ [stylegan-celebahq-1024x1024.pkl](https://drive.google.com/uc?id=1MGqJl28pN4t7SAtSrPdSRJSQJqahkzUf) | StyleGAN trained with CelebA-HQ dataset at 1024×1024.
-| ├ [stylegan-bedrooms-256x256.pkl](https://drive.google.com/uc?id=1MOSKeGF0FJcivpBI7s63V9YHloUTORiF) | StyleGAN trained with LSUN Bedroom dataset at 256×256.
-| ├ [stylegan-cars-512x384.pkl](https://drive.google.com/uc?id=1MJ6iCfNtMIRicihwRorsM3b7mmtmK9c3) | StyleGAN trained with LSUN Car dataset at 512×384.
-| ├ [stylegan-cats-256x256.pkl](https://drive.google.com/uc?id=1MQywl0FNt6lHu8E_EUqnRbviagS7fbiJ) | StyleGAN trained with LSUN Cat dataset at 256×256.
-| └ [metrics](https://drive.google.com/open?id=1MvYdWCBuMfnoYGptRH-AgKLbPTsIQLhl) | Auxiliary networks for the quality and disentanglement metrics.
-| ├ [inception_v3_features.pkl](https://drive.google.com/uc?id=1MzTY44rLToO5APn8TZmfR7_ENSe5aZUn) | Standard [Inception-v3](https://arxiv.org/abs/1512.00567) classifier that outputs a raw feature vector.
-| ├ [vgg16_zhang_perceptual.pkl](https://drive.google.com/uc?id=1N2-m9qszOeVC9Tq77WxsLnuWwOedQiD2) | Standard [LPIPS](https://arxiv.org/abs/1801.03924) metric to estimate perceptual similarity.
-| ├ [celebahq-classifier-00-male.pkl](https://drive.google.com/uc?id=1Q5-AI6TwWhCVM7Muu4tBM7rp5nG_gmCX) | Binary classifier trained to detect a single attribute of CelebA-HQ.
-| └ ⋯ | Please see the file listing for remaining networks.
-
-## Licenses
-
-All material, excluding the Flickr-Faces-HQ dataset, is made available under [Creative Commons BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/) license by NVIDIA Corporation. You can **use, redistribute, and adapt** the material for **non-commercial purposes**, as long as you give appropriate credit by **citing our paper** and **indicating any changes** that you've made.
-
-For license information regarding the FFHQ dataset, please refer to the [Flickr-Faces-HQ repository](https://github.com/NVlabs/ffhq-dataset).
-
-`inception_v3_features.pkl` and `inception_v3_softmax.pkl` are derived from the pre-trained [Inception-v3](https://arxiv.org/abs/1512.00567) network by Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, and Zbigniew Wojna. The network was originally shared under [Apache 2.0](https://github.com/tensorflow/models/blob/master/LICENSE) license on the [TensorFlow Models](https://github.com/tensorflow/models) repository.
-
-`vgg16.pkl` and `vgg16_zhang_perceptual.pkl` are derived from the pre-trained [VGG-16](https://arxiv.org/abs/1409.1556) network by Karen Simonyan and Andrew Zisserman. The network was originally shared under [Creative Commons BY 4.0](https://creativecommons.org/licenses/by/4.0/) license on the [Very Deep Convolutional Networks for Large-Scale Visual Recognition](http://www.robots.ox.ac.uk/~vgg/research/very_deep/) project page.
-
-`vgg16_zhang_perceptual.pkl` is further derived from the pre-trained [LPIPS](https://arxiv.org/abs/1801.03924) weights by Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, and Oliver Wang. The weights were originally shared under [BSD 2-Clause "Simplified" License](https://github.com/richzhang/PerceptualSimilarity/blob/master/LICENSE) on the [PerceptualSimilarity](https://github.com/richzhang/PerceptualSimilarity) repository.
-
-## System requirements
-
-* Both Linux and Windows are supported, but we strongly recommend Linux for performance and compatibility reasons.
-* 64-bit Python 3.6 installation. We recommend Anaconda3 with numpy 1.14.3 or newer.
-* TensorFlow 1.10.0 or newer with GPU support.
-* One or more high-end NVIDIA GPUs with at least 11GB of DRAM. We recommend NVIDIA DGX-1 with 8 Tesla V100 GPUs.
-* NVIDIA driver 391.35 or newer, CUDA toolkit 9.0 or newer, cuDNN 7.3.1 or newer.
-
-## Using pre-trained networks
-
-A minimal example of using a pre-trained StyleGAN generator is given in [pretrained_example.py](./pretrained_example.py). When executed, the script downloads a pre-trained StyleGAN generator from Google Drive and uses it to generate an image:
-
-```
-> python pretrained_example.py
-Downloading https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ .... done
-
-Gs Params OutputShape WeightShape
---- --- --- ---
-latents_in - (?, 512) -
-...
-images_out - (?, 3, 1024, 1024) -
---- --- --- ---
-Total 26219627
-
-> ls results
-example.png # https://drive.google.com/uc?id=1UDLT_zb-rof9kKH0GwiJW_bS9MoZi8oP
-```
-
-A more advanced example is given in [generate_figures.py](./generate_figures.py). The script reproduces the figures from our paper in order to illustrate style mixing, noise inputs, and truncation:
-```
-> python generate_figures.py
-results/figure02-uncurated-ffhq.png # https://drive.google.com/uc?id=1U3r1xgcD7o-Fd0SBRpq8PXYajm7_30cu
-results/figure03-style-mixing.png # https://drive.google.com/uc?id=1U-nlMDtpnf1RcYkaFQtbh5oxnhA97hy6
-results/figure04-noise-detail.png # https://drive.google.com/uc?id=1UX3m39u_DTU6eLnEW6MqGzbwPFt2R9cG
-results/figure05-noise-components.png # https://drive.google.com/uc?id=1UQKPcvYVeWMRccGMbs2pPD9PVv1QDyp_
-results/figure08-truncation-trick.png # https://drive.google.com/uc?id=1ULea0C12zGlxdDQFNLXOWZCHi3QNfk_v
-results/figure10-uncurated-bedrooms.png # https://drive.google.com/uc?id=1UEBnms1XMfj78OHj3_cx80mUf_m9DUJr
-results/figure11-uncurated-cars.png # https://drive.google.com/uc?id=1UO-4JtAs64Kun5vIj10UXqAJ1d5Ir1Ke
-results/figure12-uncurated-cats.png # https://drive.google.com/uc?id=1USnJc14prlu3QAYxstrtlfXC9sDWPA-W
-```
-
-The pre-trained networks are stored as standard pickle files on Google Drive:
-
-```
-# Load pre-trained network.
-url = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl
-with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f:
- _G, _D, Gs = pickle.load(f)
- # _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run.
- # _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run.
- # Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot.
-```
-
-The above code downloads the file and unpickles it to yield 3 instances of [dnnlib.tflib.Network](./dnnlib/tflib/network.py). To generate images, you will typically want to use `Gs` – the other two networks are provided for completeness. In order for `pickle.load()` to work, you will need to have the `dnnlib` source directory in your PYTHONPATH and a `tf.Session` set as default. The session can initialized by calling `dnnlib.tflib.init_tf()`.
-
-There are three ways to use the pre-trained generator:
-
-1. Use `Gs.run()` for immediate-mode operation where the inputs and outputs are numpy arrays:
- ```
- # Pick latent vector.
- rnd = np.random.RandomState(5)
- latents = rnd.randn(1, Gs.input_shape[1])
-
- # Generate image.
- fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
- images = Gs.run(latents, None, truncation_psi=0.7, randomize_noise=True, output_transform=fmt)
- ```
- The first argument is a batch of latent vectors of shape `[num, 512]`. The second argument is reserved for class labels (not used by StyleGAN). The remaining keyword arguments are optional and can be used to further modify the operation (see below). The output is a batch of images, whose format is dictated by the `output_transform` argument.
-
-2. Use `Gs.get_output_for()` to incorporate the generator as a part of a larger TensorFlow expression:
- ```
- latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])
- images = Gs_clone.get_output_for(latents, None, is_validation=True, randomize_noise=True)
- images = tflib.convert_images_to_uint8(images)
- result_expr.append(inception_clone.get_output_for(images))
- ```
- The above code is from [metrics/frechet_inception_distance.py](./metrics/frechet_inception_distance.py). It generates a batch of random images and feeds them directly to the [Inception-v3](https://arxiv.org/abs/1512.00567) network without having to convert the data to numpy arrays in between.
-
-3. Look up `Gs.components.mapping` and `Gs.components.synthesis` to access individual sub-networks of the generator. Similar to `Gs`, the sub-networks are represented as independent instances of [dnnlib.tflib.Network](./dnnlib/tflib/network.py):
- ```
- src_latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in src_seeds)
- src_dlatents = Gs.components.mapping.run(src_latents, None) # [seed, layer, component]
- src_images = Gs.components.synthesis.run(src_dlatents, randomize_noise=False, **synthesis_kwargs)
- ```
- The above code is from [generate_figures.py](./generate_figures.py). It first transforms a batch of latent vectors into the intermediate *W* space using the mapping network and then turns these vectors into a batch of images using the synthesis network. The `dlatents` array stores a separate copy of the same *w* vector for each layer of the synthesis network to facilitate style mixing.
-
-The exact details of the generator are defined in [training/networks_stylegan.py](./training/networks_stylegan.py) (see `G_style`, `G_mapping`, and `G_synthesis`). The following keyword arguments can be specified to modify the behavior when calling `run()` and `get_output_for()`:
-
-* `truncation_psi` and `truncation_cutoff` control the truncation trick that that is performed by default when using `Gs` (ψ=0.7, cutoff=8). It can be disabled by setting `truncation_psi=1` or `is_validation=True`, and the image quality can be further improved at the cost of variation by setting e.g. `truncation_psi=0.5`. Note that truncation is always disabled when using the sub-networks directly. The average *w* needed to manually perform the truncation trick can be looked up using `Gs.get_var('dlatent_avg')`.
-
-* `randomize_noise` determines whether to use re-randomize the noise inputs for each generated image (`True`, default) or whether to use specific noise values for the entire minibatch (`False`). The specific values can be accessed via the `tf.Variable` instances that are found using `[var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]`.
-
-* When using the mapping network directly, you can specify `dlatent_broadcast=None` to disable the automatic duplication of `dlatents` over the layers of the synthesis network.
-
-* Runtime performance can be fine-tuned via `structure='fixed'` and `dtype='float16'`. The former disables support for progressive growing, which is not needed for a fully-trained generator, and the latter performs all computation using half-precision floating point arithmetic.
-
-## Preparing datasets for training
-
-The training and evaluation scripts operate on datasets stored as multi-resolution TFRecords. Each dataset is represented by a directory containing the same image data in several resolutions to enable efficient streaming. There is a separate *.tfrecords file for each resolution, and if the dataset contains labels, they are stored in a separate file as well. By default, the scripts expect to find the datasets at `datasets/
w: 3.7447 | Linear Separability in *Z* and *W*.
-
-Please note that the exact results may vary from run to run due to the non-deterministic nature of TensorFlow.
-
-## Acknowledgements
-
-We thank Jaakko Lehtinen, David Luebke, and Tuomas Kynkäänniemi for in-depth discussions and helpful comments; Janne Hellsten, Tero Kuosmanen, and Pekka Jänis for compute infrastructure and help with the code release.
diff --git a/spaces/michaljunczyk/pl-asr-bigos-workspace/test.md b/spaces/michaljunczyk/pl-asr-bigos-workspace/test.md
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/mikonvergence/theaTRON/src/ControlNetInpaint/src/pipeline_stable_diffusion_controlnet_inpaint.py b/spaces/mikonvergence/theaTRON/src/ControlNetInpaint/src/pipeline_stable_diffusion_controlnet_inpaint.py
deleted file mode 100644
index 1bd75774366cf0e913bae98a333581501b628e73..0000000000000000000000000000000000000000
--- a/spaces/mikonvergence/theaTRON/src/ControlNetInpaint/src/pipeline_stable_diffusion_controlnet_inpaint.py
+++ /dev/null
@@ -1,507 +0,0 @@
-import torch
-import PIL.Image
-import numpy as np
-
-from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import *
-
-EXAMPLE_DOC_STRING = """
- Examples:
- ```py
- >>> # !pip install opencv-python transformers accelerate
- >>> from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, UniPCMultistepScheduler
- >>> from diffusers.utils import load_image
- >>> import numpy as np
- >>> import torch
-
- >>> import cv2
- >>> from PIL import Image
- >>> # download an image
- >>> image = load_image(
- ... "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
- ... )
- >>> image = np.array(image)
- >>> mask_image = load_image(
- ... "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
- ... )
- >>> mask_image = np.array(mask_image)
- >>> # get canny image
- >>> canny_image = cv2.Canny(image, 100, 200)
- >>> canny_image = canny_image[:, :, None]
- >>> canny_image = np.concatenate([canny_image, canny_image, canny_image], axis=2)
- >>> canny_image = Image.fromarray(canny_image)
-
- >>> # load control net and stable diffusion v1-5
- >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
- >>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
- ... "runwayml/stable-diffusion-inpainting", controlnet=controlnet, torch_dtype=torch.float16
- ... )
-
- >>> # speed up diffusion process with faster scheduler and memory optimization
- >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
- >>> # remove following line if xformers is not installed
- >>> pipe.enable_xformers_memory_efficient_attention()
-
- >>> pipe.enable_model_cpu_offload()
-
- >>> # generate image
- >>> generator = torch.manual_seed(0)
- >>> image = pipe(
- ... "futuristic-looking doggo",
- ... num_inference_steps=20,
- ... generator=generator,
- ... image=image,
- ... control_image=canny_image,
- ... mask_image=mask_image
- ... ).images[0]
- ```
-"""
-
-
-def prepare_mask_and_masked_image(image, mask):
- """
- Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
- converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
- ``image`` and ``1`` for the ``mask``.
- The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
- binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
- Args:
- image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
- It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
- ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
- mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
- It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
- ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
- Raises:
- ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
- should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
- TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
- (ot the other way around).
- Returns:
- tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
- dimensions: ``batch x channels x height x width``.
- """
- if isinstance(image, torch.Tensor):
- if not isinstance(mask, torch.Tensor):
- raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not")
-
- # Batch single image
- if image.ndim == 3:
- assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
- image = image.unsqueeze(0)
-
- # Batch and add channel dim for single mask
- if mask.ndim == 2:
- mask = mask.unsqueeze(0).unsqueeze(0)
-
- # Batch single mask or add channel dim
- if mask.ndim == 3:
- # Single batched mask, no channel dim or single mask not batched but channel dim
- if mask.shape[0] == 1:
- mask = mask.unsqueeze(0)
-
- # Batched masks no channel dim
- else:
- mask = mask.unsqueeze(1)
-
- assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
- assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
- assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
-
- # Check image is in [-1, 1]
- if image.min() < -1 or image.max() > 1:
- raise ValueError("Image should be in [-1, 1] range")
-
- # Check mask is in [0, 1]
- if mask.min() < 0 or mask.max() > 1:
- raise ValueError("Mask should be in [0, 1] range")
-
- # Binarize mask
- mask[mask < 0.5] = 0
- mask[mask >= 0.5] = 1
-
- # Image as float32
- image = image.to(dtype=torch.float32)
- elif isinstance(mask, torch.Tensor):
- raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
- else:
- # preprocess image
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
- image = [image]
-
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
- image = np.concatenate(image, axis=0)
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
- image = np.concatenate([i[None, :] for i in image], axis=0)
-
- image = image.transpose(0, 3, 1, 2)
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
-
- # preprocess mask
- if isinstance(mask, (PIL.Image.Image, np.ndarray)):
- mask = [mask]
-
- if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
- mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
- mask = mask.astype(np.float32) / 255.0
- elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
- mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
-
- mask[mask < 0.5] = 0
- mask[mask >= 0.5] = 1
- mask = torch.from_numpy(mask)
-
- masked_image = image * (mask < 0.5)
-
- return mask, masked_image
-
-class StableDiffusionControlNetInpaintPipeline(StableDiffusionControlNetPipeline):
- r"""
- Pipeline for text-guided image inpainting using Stable Diffusion with ControlNet guidance.
-
- This model inherits from [`StableDiffusionControlNetPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
-
- Args:
- vae ([`AutoencoderKL`]):
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
- text_encoder ([`CLIPTextModel`]):
- Frozen text-encoder. Stable Diffusion uses the text portion of
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
- tokenizer (`CLIPTokenizer`):
- Tokenizer of class
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
- controlnet ([`ControlNetModel`]):
- Provides additional conditioning to the unet during the denoising process
- scheduler ([`SchedulerMixin`]):
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
- safety_checker ([`StableDiffusionSafetyChecker`]):
- Classification module that estimates whether generated images could be considered offensive or harmful.
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
- feature_extractor ([`CLIPFeatureExtractor`]):
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
- """
-
- def prepare_mask_latents(
- self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
- ):
- # resize the mask to latents shape as we concatenate the mask to the latents
- # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
- # and half precision
- mask = torch.nn.functional.interpolate(
- mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
- )
- mask = mask.to(device=device, dtype=dtype)
-
- masked_image = masked_image.to(device=device, dtype=dtype)
-
- # encode the mask image into latents space so we can concatenate it to the latents
- if isinstance(generator, list):
- masked_image_latents = [
- self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(generator=generator[i])
- for i in range(batch_size)
- ]
- masked_image_latents = torch.cat(masked_image_latents, dim=0)
- else:
- masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
- masked_image_latents = self.vae.config.scaling_factor * masked_image_latents
-
- # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
- if mask.shape[0] < batch_size:
- if not batch_size % mask.shape[0] == 0:
- raise ValueError(
- "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
- f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
- " of masks that you pass is divisible by the total requested batch size."
- )
- mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
- if masked_image_latents.shape[0] < batch_size:
- if not batch_size % masked_image_latents.shape[0] == 0:
- raise ValueError(
- "The passed images and the required batch size don't match. Images are supposed to be duplicated"
- f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
- " Make sure the number of images that you pass is divisible by the total requested batch size."
- )
- masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
-
- mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
- masked_image_latents = (
- torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
- )
-
- # aligning device to prevent device errors when concating it with the latent model input
- masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
- return mask, masked_image_latents
-
- @torch.no_grad()
- @replace_example_docstring(EXAMPLE_DOC_STRING)
- def __call__(
- self,
- prompt: Union[str, List[str]] = None,
- image: Union[torch.FloatTensor, PIL.Image.Image] = None,
- control_image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] = None,
- mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
- height: Optional[int] = None,
- width: Optional[int] = None,
- num_inference_steps: int = 50,
- guidance_scale: float = 7.5,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: Optional[int] = 1,
- eta: float = 0.0,
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
- latents: Optional[torch.FloatTensor] = None,
- prompt_embeds: Optional[torch.FloatTensor] = None,
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
- callback_steps: int = 1,
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
- controlnet_conditioning_scale: float = 1.0,
- ):
- r"""
- Function invoked when calling the pipeline for generation.
- Args:
- prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
- instead.
- image (`PIL.Image.Image`):
- `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
- be masked out with `mask_image` and repainted according to `prompt`.
- control_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]`):
- The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
- the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. PIL.Image.Image` can
- also be accepted as an image. The control image is automatically resized to fit the output image.
- mask_image (`PIL.Image.Image`):
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
- repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
- to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
- instead of 3, so the expected shape would be `(B, H, W, 1)`.
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
- The height in pixels of the generated image.
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
- The width in pixels of the generated image.
- num_inference_steps (`int`, *optional*, defaults to 50):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- guidance_scale (`float`, *optional*, defaults to 7.5):
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
- usually at the expense of lower image quality.
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
- `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
- Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
- num_images_per_prompt (`int`, *optional*, defaults to 1):
- The number of images to generate per prompt.
- eta (`float`, *optional*, defaults to 0.0):
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
- [`schedulers.DDIMScheduler`], will be ignored for others.
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
- to make generation deterministic.
- latents (`torch.FloatTensor`, *optional*):
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
- tensor will ge generated by sampling using the supplied random `generator`.
- prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
- provided, text embeddings will be generated from `prompt` input argument.
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
- argument.
- output_type (`str`, *optional*, defaults to `"pil"`):
- The output format of the generate image. Choose between
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
- plain tuple.
- callback (`Callable`, *optional*):
- A function that will be called every `callback_steps` steps during inference. The function will be
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
- callback_steps (`int`, *optional*, defaults to 1):
- The frequency at which the `callback` function will be called. If not specified, the callback will be
- called at every step.
- cross_attention_kwargs (`dict`, *optional*):
- A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under
- `self.processor` in
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
- controlnet_conditioning_scale (`float`, *optional*, defaults to 1.0):
- The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
- to the residual in the original unet.
- Examples:
- Returns:
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
- When returning a tuple, the first element is a list with the generated images, and the second element is a
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
- (nsfw) content, according to the `safety_checker`.
- """
- # 0. Default height and width to unet
- height, width = self._default_height_width(height, width, control_image)
-
- # 1. Check inputs. Raise error if not correct
- self.check_inputs(
- prompt, control_image, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
- )
-
- # 2. Define call parameters
- if prompt is not None and isinstance(prompt, str):
- batch_size = 1
- elif prompt is not None and isinstance(prompt, list):
- batch_size = len(prompt)
- else:
- batch_size = prompt_embeds.shape[0]
-
- device = self._execution_device
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
- # corresponds to doing no classifier free guidance.
- do_classifier_free_guidance = guidance_scale > 1.0
-
- # 3. Encode input prompt
- prompt_embeds = self._encode_prompt(
- prompt,
- device,
- num_images_per_prompt,
- do_classifier_free_guidance,
- negative_prompt,
- prompt_embeds=prompt_embeds,
- negative_prompt_embeds=negative_prompt_embeds,
- )
-
- # 4. Prepare image
- control_image = self.prepare_image(
- control_image,
- width,
- height,
- batch_size * num_images_per_prompt,
- num_images_per_prompt,
- device,
- self.controlnet.dtype,
- )
-
- if do_classifier_free_guidance:
- control_image = torch.cat([control_image] * 2)
-
- # 5. Prepare timesteps
- self.scheduler.set_timesteps(num_inference_steps, device=device)
- timesteps = self.scheduler.timesteps
-
- # 6. Prepare latent variables
- num_channels_latents = self.controlnet.config.in_channels
- latents = self.prepare_latents(
- batch_size * num_images_per_prompt,
- num_channels_latents,
- height,
- width,
- prompt_embeds.dtype,
- device,
- generator,
- latents,
- )
-
- # EXTRA: prepare mask latents
- mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
- mask, masked_image_latents = self.prepare_mask_latents(
- mask,
- masked_image,
- batch_size * num_images_per_prompt,
- height,
- width,
- prompt_embeds.dtype,
- device,
- generator,
- do_classifier_free_guidance,
- )
-
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
-
- # 8. Denoising loop
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
- with self.progress_bar(total=num_inference_steps) as progress_bar:
- for i, t in enumerate(timesteps):
- # expand the latents if we are doing classifier free guidance
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
-
- down_block_res_samples, mid_block_res_sample = self.controlnet(
- latent_model_input,
- t,
- encoder_hidden_states=prompt_embeds,
- controlnet_cond=control_image,
- return_dict=False,
- )
-
- down_block_res_samples = [
- down_block_res_sample * controlnet_conditioning_scale
- for down_block_res_sample in down_block_res_samples
- ]
- mid_block_res_sample *= controlnet_conditioning_scale
-
- # predict the noise residual
- latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
- noise_pred = self.unet(
- latent_model_input,
- t,
- encoder_hidden_states=prompt_embeds,
- cross_attention_kwargs=cross_attention_kwargs,
- down_block_additional_residuals=down_block_res_samples,
- mid_block_additional_residual=mid_block_res_sample,
- ).sample
-
- # perform guidance
- if do_classifier_free_guidance:
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
-
- # compute the previous noisy sample x_t -> x_t-1
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
-
- # call the callback, if provided
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
- progress_bar.update()
- if callback is not None and i % callback_steps == 0:
- callback(i, t, latents)
-
- # If we do sequential model offloading, let's offload unet and controlnet
- # manually for max memory savings
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
- self.unet.to("cpu")
- self.controlnet.to("cpu")
- torch.cuda.empty_cache()
-
- if output_type == "latent":
- image = latents
- has_nsfw_concept = None
- elif output_type == "pil":
- # 8. Post-processing
- image = self.decode_latents(latents)
-
- # 9. Run safety checker
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
-
- # 10. Convert to PIL
- image = self.numpy_to_pil(image)
- else:
- # 8. Post-processing
- image = self.decode_latents(latents)
-
- # 9. Run safety checker
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
-
- # Offload last model to CPU
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
- self.final_offload_hook.offload()
-
- if not return_dict:
- return (image, has_nsfw_concept)
-
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diff --git a/spaces/mithril-security/NonSuspiciousImageDecoder/app.py b/spaces/mithril-security/NonSuspiciousImageDecoder/app.py
deleted file mode 100644
index aa79b4766b241cc1c4f2105b78fedbe8710c2bb4..0000000000000000000000000000000000000000
--- a/spaces/mithril-security/NonSuspiciousImageDecoder/app.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import gradio as gr
-import cv2
-import io
-import pandas as pd
-
-from LSBSteg import LSBSteg
-
-
-def convert(file):
- print(f"Converting file {file}")
- in_img = cv2.imread(file, cv2.IMREAD_UNCHANGED)
- lsbsteg = LSBSteg(in_img)
- data = lsbsteg.decode_binary()
- bytes = io.BytesIO(data)
- dataframe = pd.read_parquet(bytes)
-
- # dataframe.to_csv('output.csv')
- return dataframe.head(20)
-
-
-with gr.Blocks() as demo:
- gr.Markdown("""
- ## Non-Suspicious image decoder
-
- This tool shows the extraction a dataframe hidden inside an image.
-
- There are a few ways to hide data in a PNG file, notably:
- * adding it after the end of the file (after the PNG IEND chunk), so that it gets
- ignored by image viewers
- * adding it as comments in the PNG file (tEXt chunks)
-
- These methods are kind of easy to spot! Also, a lot of software, browsers, image upload
- websites etc often just strip them.
-
- So, here, we have a different, more thoughtful (and arguably cooler) method.
-
- This class hides the data using a basic kind of **[steganography](https://en.wikipedia.org/wiki/Steganography)**:
- it hides it in the
- *least significant bits* of the raw (uncompressed) picture: tiny differences in the red, green and blue
- channel of the image encodes the data we're interested in.
-
- This means the resulting picture
- looks **very close to the original image**; and for the data we hide here, it is **imperceptible
- to the naked eye**.
-
- The resulting PNG file will probably get a little bit bigger as a result, since PNG uses compression,
- which will have a harder time when we have our stolen data injected in the image. This is
- not that much of a problem since it stays <100Ko, so it's not that noticeable.
-
- """)
- with gr.Row():
- im = gr.Image(label="Input image file", type="filepath")
-
- def preprocess(encoding: str) -> str:
- # We do our own preprocessing because gradio's deletes PNG metadata :(
- import tempfile
- import base64
-
- content = encoding.split(";")[1]
- image_encoded = content.split(",")[1]
- png_content = base64.b64decode(image_encoded)
- file_obj = tempfile.NamedTemporaryFile(
- delete=False,
- suffix=".input.png",
- )
- file_obj.write(png_content)
- return file_obj.name
-
- im.preprocess = preprocess
- df_out = gr.Dataframe(
- label="Output dataframe (first 20 rows)", max_rows=20, overflow_row_behaviour="paginate"
- )
- gr.Markdown("Click on the example below to get the data from the associated colab notebook :)")
- gr.Examples(
- examples=["sample-picture.png"],
- inputs=[im],
- outputs=[df_out],
- fn=convert,
- cache_examples=True,
- )
- # file_out = gr.File(label="Full output CSV file")
- btn = gr.Button(value="Extract")
- # demo = gr.Interface(convert, im, im_2)
- btn.click(convert, inputs=[im], outputs=[df_out])
-
- # example_img = os.path.join(os.path.dirname(__file__), "example-picture.png")
-
-if __name__ == "__main__":
- demo.launch()
diff --git a/spaces/miyaaa666/bingo/src/components/learn-more.tsx b/spaces/miyaaa666/bingo/src/components/learn-more.tsx
deleted file mode 100644
index a64459ee7900a612292e117a6bda96ee9260990f..0000000000000000000000000000000000000000
--- a/spaces/miyaaa666/bingo/src/components/learn-more.tsx
+++ /dev/null
@@ -1,39 +0,0 @@
-import React from 'react'
-import { SourceAttribution } from '@/lib/bots/bing/types'
-
-export interface LearnMoreProps {
- sourceAttributions?: SourceAttribution[]
-}
-
-export function LearnMore({ sourceAttributions }: LearnMoreProps) {
- if (!sourceAttributions?.length) {
- return null
- }
-
- return (
-
-Champion Jack Dupree: Blues from the Gutter - A Classic Album of Piano Blues
-Champion Jack Dupree-Blues from the Gutter full album zip
-
-
-
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Dragon Ball Z Battle Of Gods Tscam.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Dragon Ball Z Battle Of Gods Tscam.md
deleted file mode 100644
index cdf76932614ea73ec64219074dc162ccf9f779b3..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Dragon Ball Z Battle Of Gods Tscam.md
+++ /dev/null
@@ -1,17 +0,0 @@
-
-Dragon Ball Z: Battle of Gods - The Movie That Shook the Anime World
-Dragon ball z battle of gods tscam
-
-
-
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Kush Audio Clariphonic Crack Cocainel.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Kush Audio Clariphonic Crack Cocainel.md
deleted file mode 100644
index 36b485a24db0c0b1c68f49e61494101ab56ec7b9..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Kush Audio Clariphonic Crack Cocainel.md
+++ /dev/null
@@ -1,109 +0,0 @@
-
-Kush Audio Clariphonic Crack Cocaine: What You Need to Know
-Kush Audio Clariphonic: A Parallel High-Frequency Equalizer
-Kush Audio Clariphonic Crack Cocainel
-
-
-
-
Crack Cocaine: A Highly Addictive Stimulant Drug
-
-
-
-
- Kush Audio Clariphonic Crack Cocaine: A Dangerous Combination
-Conclusion
-
-
-
-
- FAQs
-
-
b2dd77e56b
-
-
-
-
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/RealFlight Expansion Packs Add-Ons Mod.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/RealFlight Expansion Packs Add-Ons Mod.md
deleted file mode 100644
index 109d965666767e2c5723e4e1134af9855effc443..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/RealFlight Expansion Packs Add-Ons Mod.md
+++ /dev/null
@@ -1,91 +0,0 @@
-
-RealFlight Expansion Packs Add-Ons Mod: How to Enhance Your Flight Simulation Experience
- RealFlight Expansion Packs Add-Ons mod
- What is RealFlight and why is it popular among RC enthusiasts?
- RealFlight is a realistic and immersive flight simulator for RC aircraft
- RealFlight offers a variety of features and options to customize your flying experience
- What are expansion packs and add-ons and how do they work with RealFlight?
- Expansion packs are official software updates that add new aircraft and flying sites to RealFlight
- Add-ons are unofficial software modifications that enhance or alter the existing features of RealFlight
- What are some of the best expansion packs and add-ons for RealFlight?
- Expansion Pack 6: The most value-packed expansion pack with 18 aircraft and 3 flying sites
- Expansion Pack 4: The most diverse expansion pack with 16 aircraft and 4 flying sites
- Add-Ons Volume 5: The most comprehensive add-on with 24 flying sites and 70 aircraft
- How to install and use expansion packs and add-ons for RealFlight?
- Installing expansion packs is easy and straightforward with the RealFlight Launcher
-
-
- Installing add-ons requires some manual steps and precautions to avoid errors or conflicts
-
-
- Conclusion: RealFlight expansion packs and add-ons are a great way to expand your flight simulation horizons
- FAQs
- Can I use expansion packs and add-ons from older versions of RealFlight with the latest version?
- Can I create my own expansion packs or add-ons for RealFlight?
Yes, you can create your own expansion packs or add-ons for RealFlight if you have the skills and tools to do so. RealFlight provides some editors and utilities that allow you to create your own aircraft or flying sites. You can also use other software programs or tools to modify the graphics, physics, sound, or other aspects of RealFlight. However, creating your own expansion packs or add-ons requires a lot of time, effort, and knowledge. You also need to respect the intellectual property rights of Knife Edge Software, Great Planes Model Manufacturing, and other developers or users.
- Where can I find more expansion packs or add-ons for RealFlight?
-
-
- How can I uninstall or disable expansion packs or add-ons for RealFlight?
-
-
- Are expansion packs or add-ons compatible with multiplayer mode?
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/docs/README.md b/spaces/nikitaPDL2023/assignment4/detectron2/docs/README.md
deleted file mode 100644
index 8531cafd4d1aae0267f4fc5e7212f7db5ed90686..0000000000000000000000000000000000000000
--- a/spaces/nikitaPDL2023/assignment4/detectron2/docs/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# Read the docs:
-
-The latest documentation built from this directory is available at [detectron2.readthedocs.io](https://detectron2.readthedocs.io/).
-Documents in this directory are not meant to be read on github.
-
-# Build the docs:
-
-1. Install detectron2 according to [INSTALL.md](../INSTALL.md).
-2. Install additional libraries required to build docs:
- - docutils==0.16
- - Sphinx==3.2.0
- - recommonmark==0.6.0
- - sphinx_rtd_theme
-
-3. Run `make html` from this directory.
diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/vis/bounding_box.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/vis/bounding_box.py
deleted file mode 100644
index 4f83957221f4503e707f2270a20e8d3829a299af..0000000000000000000000000000000000000000
--- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/vis/bounding_box.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-from .base import RectangleVisualizer, TextVisualizer
-
-
-class BoundingBoxVisualizer(object):
- def __init__(self):
- self.rectangle_visualizer = RectangleVisualizer()
-
- def visualize(self, image_bgr, boxes_xywh):
- for bbox_xywh in boxes_xywh:
- image_bgr = self.rectangle_visualizer.visualize(image_bgr, bbox_xywh)
- return image_bgr
-
-
-class ScoredBoundingBoxVisualizer(object):
- def __init__(self, bbox_visualizer_params=None, score_visualizer_params=None, **kwargs):
- if bbox_visualizer_params is None:
- bbox_visualizer_params = {}
- if score_visualizer_params is None:
- score_visualizer_params = {}
- self.visualizer_bbox = RectangleVisualizer(**bbox_visualizer_params)
- self.visualizer_score = TextVisualizer(**score_visualizer_params)
-
- def visualize(self, image_bgr, scored_bboxes):
- boxes_xywh, box_scores = scored_bboxes
- assert len(boxes_xywh) == len(
- box_scores
- ), "Number of bounding boxes {} should be equal to the number of scores {}".format(
- len(boxes_xywh), len(box_scores)
- )
- for i, box_xywh in enumerate(boxes_xywh):
- score_i = box_scores[i]
- image_bgr = self.visualizer_bbox.visualize(image_bgr, box_xywh)
- score_txt = "{0:6.4f}".format(score_i)
- topleft_xy = box_xywh[0], box_xywh[1]
- image_bgr = self.visualizer_score.visualize(image_bgr, score_txt, topleft_xy)
- return image_bgr
diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_swin_b_in21k_50ep.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_swin_b_in21k_50ep.py
deleted file mode 100644
index d18c925f7349b42e52adb9c7b4e5461e1a25657f..0000000000000000000000000000000000000000
--- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_swin_b_in21k_50ep.py
+++ /dev/null
@@ -1,49 +0,0 @@
-from detectron2.config.lazy import LazyCall as L
-from detectron2.data.detection_utils import get_fed_loss_cls_weights
-from detectron2.data.samplers import RepeatFactorTrainingSampler
-from detectron2.evaluation.lvis_evaluation import LVISEvaluator
-
-from ..COCO.cascade_mask_rcnn_swin_b_in21k_50ep import (
- dataloader,
- model,
- train,
- lr_multiplier,
- optimizer,
-)
-
-dataloader.train.dataset.names = "lvis_v1_train"
-dataloader.train.sampler = L(RepeatFactorTrainingSampler)(
- repeat_factors=L(RepeatFactorTrainingSampler.repeat_factors_from_category_frequency)(
- dataset_dicts="${dataloader.train.dataset}", repeat_thresh=0.001
- )
-)
-dataloader.test.dataset.names = "lvis_v1_val"
-dataloader.evaluator = L(LVISEvaluator)(
- dataset_name="${..test.dataset.names}",
- max_dets_per_image=300,
-)
-
-model.backbone.bottom_up.drop_path_rate = 0.3
-
-model.roi_heads.num_classes = 1203
-for i in range(3):
- model.roi_heads.box_predictors[i].test_score_thresh = 0.02
- model.roi_heads.box_predictors[i].test_topk_per_image = 300
- model.roi_heads.box_predictors[i].use_sigmoid_ce = True
- model.roi_heads.box_predictors[i].use_fed_loss = True
- model.roi_heads.box_predictors[i].get_fed_loss_cls_weights = lambda: get_fed_loss_cls_weights(
- dataloader.train.dataset.names, 0.5
- )
-
-# Schedule
-# 100 ep = 156250 iters * 64 images/iter / 100000 images/ep
-# 100 ep -> 50 ep as the model achieves better performance with 50 epochs
-train.max_iter = 156250 // 2
-train.eval_period = 30000
-
-lr_multiplier.scheduler.milestones = [milestone // 2 for milestone in [138889, 150463]]
-lr_multiplier.scheduler.num_updates = train.max_iter
-lr_multiplier.warmup_length = 250 / train.max_iter
-
-# Optimized hyperparams
-optimizer.lr = 1e-4
diff --git a/spaces/nota-ai/compressed-wav2lip/demo.py b/spaces/nota-ai/compressed-wav2lip/demo.py
deleted file mode 100644
index ffc1720318277a60274828e1a8be85d10e18a46a..0000000000000000000000000000000000000000
--- a/spaces/nota-ai/compressed-wav2lip/demo.py
+++ /dev/null
@@ -1,302 +0,0 @@
-from pathlib import Path
-import time
-import argparse
-import json
-import subprocess
-import platform
-
-import numpy as np
-import cv2
-from tqdm import tqdm
-import torch
-
-from models import Wav2Lip, Wav2Lip_noRes
-import audio
-
-# device = 'cuda' if torch.cuda.is_available() else 'cpu'
-device = 'cpu'
-print('Using {} for inference.'.format(device))
-
-BATCH_SIZE = 1
-WAV2LIP_BATCH_SIZE = 1
-IMG_SIZE = 96
-VIDEO_FPS = 25
-FRAME_H = 224
-FRAME_W = 224
-MEL_STEP_SIZE = 16
-SAMPLING_RATE = 16000
-ORIGINAL_CHECKPOINT_PATH = "checkpoints/lrs3_e16a32d32.pth"
-COMPRESSED_CHECKPOINT_PATH = "checkpoints/lrs3_e4a8d8_noRes.pth"
-
-
-def _load(checkpoint_path):
- if device == 'cuda':
- checkpoint = torch.load(checkpoint_path)
- else:
- checkpoint = torch.load(checkpoint_path,
- map_location=lambda storage, loc: storage)
- return checkpoint
-
-
-def load_model_nota(cls, path, **kwargs):
- model = cls(**kwargs)
- print("Load checkpoint from: {}".format(path))
- checkpoint = _load(path)
- model.load_state_dict(checkpoint)
- model = model.to(device)
- return model.eval()
-
-def count_params(model):
- return sum(p.numel() for p in model.parameters())
-
-
-class VideoSlicer:
- def __init__(self, args, frame_dir, bbox_path, video_path=None):
- self.args = args
- self.fps = VIDEO_FPS
- self.frame_dir = frame_dir
- self.frame_path_list = sorted(list(Path(self.frame_dir).glob("*.jpg")))
- self.frame_array_list = [cv2.imread(str(image)) for image in self.frame_path_list]
-
- with open(bbox_path, 'r') as f:
- metadata = json.load(f)
- self.bbox = [metadata['bbox'][key] for key in sorted(metadata['bbox'].keys())]
- self.bbox_format = metadata['format']
- assert len(self.bbox) == len(self.frame_array_list)
- self._video_path = video_path
-
- @property
- def video_path(self):
- return self._video_path
-
- def __len__(self):
- return len(self.frame_array_list)
-
- def __getitem__(self, idx):
- bbox = self.bbox[idx]
- frame_original = self.frame_array_list[idx]
- # return frame_original[bbox[0]:bbox[1], bbox[2]:bbox[3], :]
- return frame_original, bbox
-
-
-class AudioSlicer:
- def __init__(self, args, audio_path):
- self.args = args
- self.fps = VIDEO_FPS
- self.mel_chunks = self._audio_chunk_generator(audio_path)
- self._audio_path = audio_path
-
- @property
- def audio_path(self):
- return self._audio_path
-
- def __len__(self):
- return len(self.mel_chunks)
-
- def _audio_chunk_generator(self, audio_path):
- wav = audio.load_wav(audio_path, SAMPLING_RATE)
- mel = audio.melspectrogram(wav)
-
- if np.isnan(mel.reshape(-1)).sum() > 0:
- raise ValueError('Mel contains nan! Using a TTS voice? Add a small epsilon noise to the wav file and try again')
-
- mel_chunks = []
- mel_idx_multiplier = 80. / self.fps
-
- i = 0
- while True:
- start_idx = int(i * mel_idx_multiplier)
- if start_idx + MEL_STEP_SIZE > len(mel[0]):
- mel_chunks.append(mel[:, len(mel[0]) - MEL_STEP_SIZE:])
- return mel_chunks
- mel_chunks.append(mel[:, start_idx: start_idx + MEL_STEP_SIZE])
- i += 1
-
- def __getitem__(self, idx):
- return self.mel_chunks[idx]
-
-
-class Wav2LipCompressionDemo:
- def __init__(self, args, result_dir='./temp') -> None:
- self.args = args
- self.video_dict = {}
- self.audio_dict = {}
- self.model_original = load_model_nota(Wav2Lip, ORIGINAL_CHECKPOINT_PATH)
- self.model_compressed = load_model_nota(Wav2Lip_noRes, COMPRESSED_CHECKPOINT_PATH, nef=4, naf=8, ndf=8)
-
- self.params_original = f"{(count_params(self.model_original)/1e6):.1f}M"
- self.params_compressed = f"{(count_params(self.model_compressed)/1e6):.1f}M"
-
- self.result_dir = Path(result_dir)
- self.result_dir.mkdir(exist_ok=True)
-
- def update_audio(self, audio_path, name=None):
- _name = name if name is not None else Path(audio_path).stem
- self.audio_dict.update(
- {_name: AudioSlicer(self.args, audio_path)}
- )
-
- def update_video(self, frame_dir_path, bbox_path, video_path=None, name=None):
- _name = name if name is not None else Path(frame_dir_path).stem
- self.video_dict.update(
- {_name: VideoSlicer(self.args, frame_dir_path, bbox_path, video_path=video_path)}
- )
-
- @staticmethod
- def _paired_data_iterator(audio_iterable, video_iterable):
- img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []
-
- for i, m in enumerate(audio_iterable):
- idx = i % len(video_iterable)
- _frame_to_save, coords = video_iterable[idx]
- frame_to_save = _frame_to_save.copy()
- face = frame_to_save[coords[0]:coords[1], coords[2]:coords[3]].copy()
-
- face = cv2.resize(face, (IMG_SIZE, IMG_SIZE))
-
- img_batch.append(face)
- mel_batch.append(m)
- frame_batch.append(frame_to_save)
- coords_batch.append(coords)
-
- if len(img_batch) >= WAV2LIP_BATCH_SIZE:
- img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)
-
- img_masked = img_batch.copy()
- img_masked[:, IMG_SIZE//2:] = 0
-
- img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.
- mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])
-
- yield img_batch, mel_batch, frame_batch, coords_batch
- img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []
-
- if len(img_batch) > 0:
- img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)
-
- img_masked = img_batch.copy()
- img_masked[:, IMG_SIZE//2:] = 0
-
- img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.
- mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])
-
- yield img_batch, mel_batch, frame_batch, coords_batch
-
- def _infer(self, audio_name, video_name, model_type='original'):
- audio_iterable = self.audio_dict[audio_name]
- video_iterable = self.video_dict[video_name]
- data_iterator = self._paired_data_iterator(audio_iterable, video_iterable)
-
- for (img_batch, mel_batch, frames, coords) in tqdm(data_iterator,
- total=int(np.ceil(float(len(audio_iterable)) / WAV2LIP_BATCH_SIZE))):
-
- img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(device)
- mel_batch = torch.FloatTensor(np.transpose(mel_batch, (0, 3, 1, 2))).to(device)
-
- with torch.no_grad():
- if model_type == 'original':
- preds = self.model_original(mel_batch, img_batch)
- elif model_type == 'compressed':
- preds = self.model_compressed(mel_batch, img_batch)
- else:
- raise ValueError(f"`model_type` should be either `original` or `compressed`!")
-
- preds = preds.cpu().numpy().transpose(0, 2, 3, 1) * 255.
- for pred, frame, coord in zip(preds, frames, coords):
- y1, y2, x1, x2 = coord
- pred = cv2.resize(pred.astype(np.uint8), (x2 - x1, y2 - y1))
-
- frame[y1:y2, x1:x2] = pred
- yield frame
-
- def save_as_video(self, audio_name, video_name, model_type):
-
- output_video_path = self.result_dir / 'original_voice.mp4'
- frame_only_video_path = self.result_dir / 'original.mp4'
- audio_path = self.audio_dict[audio_name].audio_path
-
- out = cv2.VideoWriter(str(frame_only_video_path),
- cv2.VideoWriter_fourcc(*'mp4v'), VIDEO_FPS, (FRAME_W, FRAME_H))
- start = time.time()
- for frame in self._infer(audio_name=audio_name, video_name=video_name, model_type=model_type):
- out.write(frame)
- inference_time = time.time() - start
- out.release()
-
- command = f"ffmpeg -hide_banner -loglevel error -y -i {audio_path} -i {frame_only_video_path} -strict -2 -q:v 1 {output_video_path}"
- subprocess.call(command, shell=platform.system() != 'Windows')
-
- # The number of frames of generated video
- video_frames_num = len(self.audio_dict[audio_name])
- inference_fps = video_frames_num / inference_time
-
- return output_video_path, inference_time, inference_fps
-
-
-def get_parsed_args():
-
- parser = argparse.ArgumentParser(description='Inference code to lip-sync videos in the wild using Wav2Lip models')
-
- parser.add_argument('--resize_factor', default=1, type=int,
- help='Reduce the resolution by this factor. Sometimes, best results are obtained at 480p or 720p')
-
- parser.add_argument('--crop', nargs='+', type=int, default=[0, -1, 0, -1],
- help='Crop video to a smaller region (top, bottom, left, right). Applied after resize_factor and rotate arg. '
- 'Useful if multiple face present. -1 implies the value will be auto-inferred based on height, width')
-
- parser.add_argument('--box', nargs='+', type=int, default=[-1, -1, -1, -1],
- help='Specify a constant bounding box for the face. Use only as a last resort if the face is not detected.'
- 'Also, might work only if the face is not moving around much. Syntax: (top, bottom, left, right).')
-
- parser.add_argument('--rotate', default=False, action='store_true',
- help='Sometimes videos taken from a phone can be flipped 90deg. If true, will flip video right by 90deg.'
- 'Use if you get a flipped result, despite feeding a normal looking video')
-
- parser.add_argument('--nosmooth', default=False, action='store_true',
- help='Prevent smoothing face detections over a short temporal window')
-
- args = parser.parse_args()
-
- return args
-
-
-def main():
- args = get_parsed_args()
-
- demo_generator = Wav2LipCompressionDemo(args)
- demo_generator.update_audio("sample/1673_orig.wav", name="1673")
- demo_generator.update_audio("sample/4598_orig.wav", name="4598")
- demo_generator.update_video("sample/2145_orig", "sample/2145_orig.json", name="2145")
- demo_generator.update_video("sample/2942_orig", "sample/2942_orig.json", name="2942")
-
- processed_time = []
- for i in range(5):
- start = time.time()
- out = cv2.VideoWriter('temp/original.mp4',
- cv2.VideoWriter_fourcc(*'mp4v'), VIDEO_FPS, (FRAME_W, FRAME_H))
- for frame in demo_generator.infer(audio_name="4598", video_name="2145", model_type="original"):
- out.write(frame)
- out.release()
- processed_time.append(time.time() - start)
-
- command = f"ffmpeg -hide_banner -loglevel error -y -i {'sample/4598_orig.wav'} -i {'temp/original.mp4'} -strict -2 -q:v 1 {'temp/original_voice.mp4'}"
- subprocess.call(command, shell=platform.system() != 'Windows')
- print(f"Processed time: {np.mean(processed_time)}")
-
- processed_time = []
- for i in range(5):
- start = time.time()
- out = cv2.VideoWriter('temp/compressed.mp4',
- cv2.VideoWriter_fourcc(*'mp4v'), VIDEO_FPS, (FRAME_W, FRAME_H))
- for frame in demo_generator.infer(audio_name="4598", video_name="2145", model_type="compressed"):
- out.write(frame)
- out.release()
- processed_time.append(time.time() - start)
-
- command = f"ffmpeg -hide_banner -loglevel error -y -i {'sample/4598_orig.wav'} -i {'temp/compressed.mp4'} -strict -2 -q:v 1 {'temp/compressed_voice.mp4'}"
- subprocess.call(command, shell=platform.system() != 'Windows')
- print(f"Processed time: {np.mean(processed_time)}")
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/olimpa/Agenda-Inter/README.md b/spaces/olimpa/Agenda-Inter/README.md
deleted file mode 100644
index 796112f857101bbff5f341adfcb7965fd68a997d..0000000000000000000000000000000000000000
--- a/spaces/olimpa/Agenda-Inter/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Agenda Inter
-emoji: 🔥
-colorFrom: indigo
-colorTo: gray
-sdk: streamlit
-sdk_version: 1.21.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/omlab/vlchecklist_demo/utils/colormap.py b/spaces/omlab/vlchecklist_demo/utils/colormap.py
deleted file mode 100644
index de6c8a15f52ff5f37885b24d6603982c3de95db6..0000000000000000000000000000000000000000
--- a/spaces/omlab/vlchecklist_demo/utils/colormap.py
+++ /dev/null
@@ -1,178 +0,0 @@
-import numpy as np
-
-
-def colormap(rgb=False):
- color_list = np.array(
- [
- 0.000, 0.447, 0.741,
- 0.850, 0.325, 0.098,
- 0.929, 0.694, 0.125,
- 0.494, 0.184, 0.556,
- 0.466, 0.674, 0.188,
- 0.301, 0.745, 0.933,
- 0.635, 0.078, 0.184,
- 0.300, 0.300, 0.300,
- 0.600, 0.600, 0.600,
- 1.000, 0.000, 0.000,
- 1.000, 0.500, 0.000,
- 0.749, 0.749, 0.000,
- 0.000, 1.000, 0.000,
- 0.000, 0.000, 1.000,
- 0.667, 0.000, 1.000,
- 0.333, 0.333, 0.000,
- 0.333, 0.667, 0.000,
- 0.333, 1.000, 0.000,
- 0.667, 0.333, 0.000,
- 0.667, 0.667, 0.000,
- 0.667, 1.000, 0.000,
- 1.000, 0.333, 0.000,
- 1.000, 0.667, 0.000,
- 1.000, 1.000, 0.000,
- 0.000, 0.333, 0.500,
- 0.000, 0.667, 0.500,
- 0.000, 1.000, 0.500,
- 0.333, 0.000, 0.500,
- 0.333, 0.333, 0.500,
- 0.333, 0.667, 0.500,
- 0.333, 1.000, 0.500,
- 0.667, 0.000, 0.500,
- 0.667, 0.333, 0.500,
- 0.667, 0.667, 0.500,
- 0.667, 1.000, 0.500,
- 1.000, 0.000, 0.500,
- 1.000, 0.333, 0.500,
- 1.000, 0.667, 0.500,
- 1.000, 1.000, 0.500,
- 0.000, 0.333, 1.000,
- 0.000, 0.667, 1.000,
- 0.000, 1.000, 1.000,
- 0.333, 0.000, 1.000,
- 0.333, 0.333, 1.000,
- 0.333, 0.667, 1.000,
- 0.333, 1.000, 1.000,
- 0.667, 0.000, 1.000,
- 0.667, 0.333, 1.000,
- 0.667, 0.667, 1.000,
- 0.667, 1.000, 1.000,
- 1.000, 0.000, 1.000,
- 1.000, 0.333, 1.000,
- 1.000, 0.667, 1.000,
- 0.167, 0.000, 0.000,
- 0.333, 0.000, 0.000,
- 0.500, 0.000, 0.000,
- 0.667, 0.000, 0.000,
- 0.833, 0.000, 0.000,
- 1.000, 0.000, 0.000,
- 0.000, 0.167, 0.000,
- 0.000, 0.333, 0.000,
- 0.000, 0.500, 0.000,
- 0.000, 0.667, 0.000,
- 0.000, 0.833, 0.000,
- 0.000, 1.000, 0.000,
- 0.000, 0.000, 0.167,
- 0.000, 0.000, 0.333,
- 0.000, 0.000, 0.500,
- 0.000, 0.000, 0.667,
- 0.000, 0.000, 0.833,
- 0.000, 0.000, 1.000,
- 0.000, 0.000, 0.000,
- 0.143, 0.143, 0.143,
- 0.286, 0.286, 0.286,
- 0.429, 0.429, 0.429,
- 0.571, 0.571, 0.571,
- 0.714, 0.714, 0.714,
- 0.857, 0.857, 0.857,
- 1.000, 1.000, 1.000
- ]
- ).astype(np.float32)
- color_list = color_list.reshape((-1, 3)) * 255
- if not rgb:
- color_list = color_list[:, ::-1]
- return color_list
-
-
-def category():
-
- category = [
- "person",
- "bicycle",
- "car",
- "motorbike",
- "aeroplane",
- "bus",
- "train",
- "truck",
- "boat",
- "traffic light",
- "fire hydrant",
- "stop sign",
- "parking meter",
- "bench",
- "bird",
- "cat",
- "dog",
- "horse",
- "sheep",
- "cow",
- "elephant",
- "bear",
- "zebra",
- "giraffe",
- "backpack",
- "umbrella",
- "handbag",
- "tie",
- "suitcase",
- "frisbee",
- "skis",
- "snowboard",
- "sports ball",
- "kite",
- "baseball bat",
- "baseball glove",
- "skateboard",
- "surfboard",
- "tennis racket",
- "bottle",
- "wine glass",
- "cup",
- "fork",
- "knife",
- "spoon",
- "bowl",
- "banana",
- "apple",
- "sandwich",
- "orange",
- "broccoli",
- "carrot",
- "hot dog",
- "pizza",
- "donut",
- "cake",
- "chair",
- "sofa",
- "pottedplant",
- "bed",
- "diningtable",
- "toilet",
- "tvmonitor",
- "laptop",
- "mouse",
- "remote",
- "keyboard",
- "cell phone",
- "microwave",
- "oven",
- "toaster",
- "sink",
- "refrigerator",
- "book",
- "clock",
- "vase",
- "scissors",
- "teddy bear",
- "hair drier",
- "toothbrush"]
-
- return category
\ No newline at end of file
diff --git a/spaces/openMUSE/MUSE-vs-SD.1.5/swin_ir_2.py b/spaces/openMUSE/MUSE-vs-SD.1.5/swin_ir_2.py
deleted file mode 100644
index 21fb0d2d5e15f6f678f47eee6e8b988f962b4907..0000000000000000000000000000000000000000
--- a/spaces/openMUSE/MUSE-vs-SD.1.5/swin_ir_2.py
+++ /dev/null
@@ -1,1326 +0,0 @@
-# -----------------------------------------------------------------------------------
-# Swin2SR: Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration, https://arxiv.org/abs/2209.11345
-# Written by Conde and Choi et al.
-# -----------------------------------------------------------------------------------
-
-import os
-import math
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.utils.checkpoint as checkpoint
-import PIL
-import requests
-from timm.models.layers import DropPath, to_2tuple, trunc_normal_
-
-
-class Mlp(nn.Module):
- def __init__(
- self,
- in_features,
- hidden_features=None,
- out_features=None,
- act_layer=nn.GELU,
- drop=0.0,
- ):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Linear(in_features, hidden_features)
- self.act = act_layer()
- self.fc2 = nn.Linear(hidden_features, out_features)
- self.drop = nn.Dropout(drop)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act(x)
- x = self.drop(x)
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-
-def window_partition(x, window_size):
- """
- Args:
- x: (B, H, W, C)
- window_size (int): window size
- Returns:
- windows: (num_windows*B, window_size, window_size, C)
- """
- B, H, W, C = x.shape
- x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
- windows = (
- x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
- )
- return windows
-
-
-def window_reverse(windows, window_size, H, W):
- """
- Args:
- windows: (num_windows*B, window_size, window_size, C)
- window_size (int): Window size
- H (int): Height of image
- W (int): Width of image
- Returns:
- x: (B, H, W, C)
- """
- B = int(windows.shape[0] / (H * W / window_size / window_size))
- x = windows.view(
- B, H // window_size, W // window_size, window_size, window_size, -1
- )
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
- return x
-
-
-class WindowAttention(nn.Module):
- r"""Window based multi-head self attention (W-MSA) module with relative position bias.
- It supports both of shifted and non-shifted window.
- Args:
- dim (int): Number of input channels.
- window_size (tuple[int]): The height and width of the window.
- num_heads (int): Number of attention heads.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
- proj_drop (float, optional): Dropout ratio of output. Default: 0.0
- pretrained_window_size (tuple[int]): The height and width of the window in pre-training.
- """
-
- def __init__(
- self,
- dim,
- window_size,
- num_heads,
- qkv_bias=True,
- attn_drop=0.0,
- proj_drop=0.0,
- pretrained_window_size=[0, 0],
- ):
- super().__init__()
- self.dim = dim
- self.window_size = window_size # Wh, Ww
- self.pretrained_window_size = pretrained_window_size
- self.num_heads = num_heads
-
- self.logit_scale = nn.Parameter(
- torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True
- )
-
- # mlp to generate continuous relative position bias
- self.cpb_mlp = nn.Sequential(
- nn.Linear(2, 512, bias=True),
- nn.ReLU(inplace=True),
- nn.Linear(512, num_heads, bias=False),
- )
-
- # get relative_coords_table
- relative_coords_h = torch.arange(
- -(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32
- )
- relative_coords_w = torch.arange(
- -(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32
- )
- relative_coords_table = (
- torch.stack(torch.meshgrid([relative_coords_h, relative_coords_w]))
- .permute(1, 2, 0)
- .contiguous()
- .unsqueeze(0)
- ) # 1, 2*Wh-1, 2*Ww-1, 2
- if pretrained_window_size[0] > 0:
- relative_coords_table[:, :, :, 0] /= pretrained_window_size[0] - 1
- relative_coords_table[:, :, :, 1] /= pretrained_window_size[1] - 1
- else:
- relative_coords_table[:, :, :, 0] /= self.window_size[0] - 1
- relative_coords_table[:, :, :, 1] /= self.window_size[1] - 1
- relative_coords_table *= 8 # normalize to -8, 8
- relative_coords_table = (
- torch.sign(relative_coords_table)
- * torch.log2(torch.abs(relative_coords_table) + 1.0)
- / np.log2(8)
- )
-
- self.register_buffer("relative_coords_table", relative_coords_table)
-
- # get pair-wise relative position index for each token inside the window
- coords_h = torch.arange(self.window_size[0])
- coords_w = torch.arange(self.window_size[1])
- coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
- coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
- relative_coords = (
- coords_flatten[:, :, None] - coords_flatten[:, None, :]
- ) # 2, Wh*Ww, Wh*Ww
- relative_coords = relative_coords.permute(
- 1, 2, 0
- ).contiguous() # Wh*Ww, Wh*Ww, 2
- relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
- relative_coords[:, :, 1] += self.window_size[1] - 1
- relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
- relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
- self.register_buffer("relative_position_index", relative_position_index)
-
- self.qkv = nn.Linear(dim, dim * 3, bias=False)
- if qkv_bias:
- self.q_bias = nn.Parameter(torch.zeros(dim))
- self.v_bias = nn.Parameter(torch.zeros(dim))
- else:
- self.q_bias = None
- self.v_bias = None
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
- self.proj_drop = nn.Dropout(proj_drop)
- self.softmax = nn.Softmax(dim=-1)
-
- def forward(self, x, mask=None):
- """
- Args:
- x: input features with shape of (num_windows*B, N, C)
- mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
- """
- B_, N, C = x.shape
- qkv_bias = None
- if self.q_bias is not None:
- qkv_bias = torch.cat(
- (
- self.q_bias,
- torch.zeros_like(self.v_bias, requires_grad=False),
- self.v_bias,
- )
- )
- qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
- qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
- q, k, v = (
- qkv[0],
- qkv[1],
- qkv[2],
- ) # make torchscript happy (cannot use tensor as tuple)
-
- # cosine attention
- attn = F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)
- logit_scale = torch.clamp(
- self.logit_scale,
- max=torch.log(torch.tensor(1.0 / 0.01)).to(self.logit_scale.device),
- ).exp()
- attn = attn * logit_scale
-
- relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(
- -1, self.num_heads
- )
- relative_position_bias = relative_position_bias_table[
- self.relative_position_index.view(-1)
- ].view(
- self.window_size[0] * self.window_size[1],
- self.window_size[0] * self.window_size[1],
- -1,
- ) # Wh*Ww,Wh*Ww,nH
- relative_position_bias = relative_position_bias.permute(
- 2, 0, 1
- ).contiguous() # nH, Wh*Ww, Wh*Ww
- relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
- attn = attn + relative_position_bias.unsqueeze(0)
-
- if mask is not None:
- nW = mask.shape[0]
- attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(
- 1
- ).unsqueeze(0)
- attn = attn.view(-1, self.num_heads, N, N)
- attn = self.softmax(attn)
- else:
- attn = self.softmax(attn)
-
- attn = self.attn_drop(attn)
-
- x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
- x = self.proj(x)
- x = self.proj_drop(x)
- return x
-
- def extra_repr(self) -> str:
- return (
- f"dim={self.dim}, window_size={self.window_size}, "
- f"pretrained_window_size={self.pretrained_window_size}, num_heads={self.num_heads}"
- )
-
- def flops(self, N):
- # calculate flops for 1 window with token length of N
- flops = 0
- # qkv = self.qkv(x)
- flops += N * self.dim * 3 * self.dim
- # attn = (q @ k.transpose(-2, -1))
- flops += self.num_heads * N * (self.dim // self.num_heads) * N
- # x = (attn @ v)
- flops += self.num_heads * N * N * (self.dim // self.num_heads)
- # x = self.proj(x)
- flops += N * self.dim * self.dim
- return flops
-
-
-class SwinTransformerBlock(nn.Module):
- r"""Swin Transformer Block.
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resulotion.
- num_heads (int): Number of attention heads.
- window_size (int): Window size.
- shift_size (int): Shift size for SW-MSA.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float, optional): Stochastic depth rate. Default: 0.0
- act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- pretrained_window_size (int): Window size in pre-training.
- """
-
- def __init__(
- self,
- dim,
- input_resolution,
- num_heads,
- window_size=7,
- shift_size=0,
- mlp_ratio=4.0,
- qkv_bias=True,
- drop=0.0,
- attn_drop=0.0,
- drop_path=0.0,
- act_layer=nn.GELU,
- norm_layer=nn.LayerNorm,
- pretrained_window_size=0,
- ):
- super().__init__()
- self.dim = dim
- self.input_resolution = input_resolution
- self.num_heads = num_heads
- self.window_size = window_size
- self.shift_size = shift_size
- self.mlp_ratio = mlp_ratio
- if min(self.input_resolution) <= self.window_size:
- # if window size is larger than input resolution, we don't partition windows
- self.shift_size = 0
- self.window_size = min(self.input_resolution)
- assert (
- 0 <= self.shift_size < self.window_size
- ), "shift_size must in 0-window_size"
-
- self.norm1 = norm_layer(dim)
- self.attn = WindowAttention(
- dim,
- window_size=to_2tuple(self.window_size),
- num_heads=num_heads,
- qkv_bias=qkv_bias,
- attn_drop=attn_drop,
- proj_drop=drop,
- pretrained_window_size=to_2tuple(pretrained_window_size),
- )
-
- self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(
- in_features=dim,
- hidden_features=mlp_hidden_dim,
- act_layer=act_layer,
- drop=drop,
- )
-
- if self.shift_size > 0:
- attn_mask = self.calculate_mask(self.input_resolution)
- else:
- attn_mask = None
-
- self.register_buffer("attn_mask", attn_mask)
-
- def calculate_mask(self, x_size):
- # calculate attention mask for SW-MSA
- H, W = x_size
- img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
- h_slices = (
- slice(0, -self.window_size),
- slice(-self.window_size, -self.shift_size),
- slice(-self.shift_size, None),
- )
- w_slices = (
- slice(0, -self.window_size),
- slice(-self.window_size, -self.shift_size),
- slice(-self.shift_size, None),
- )
- cnt = 0
- for h in h_slices:
- for w in w_slices:
- img_mask[:, h, w, :] = cnt
- cnt += 1
-
- mask_windows = window_partition(
- img_mask, self.window_size
- ) # nW, window_size, window_size, 1
- mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
- attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
- attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(
- attn_mask == 0, float(0.0)
- )
-
- return attn_mask
-
- def forward(self, x, x_size):
- H, W = x_size
- B, L, C = x.shape
- # assert L == H * W, "input feature has wrong size"
-
- shortcut = x
- x = x.view(B, H, W, C)
-
- # cyclic shift
- if self.shift_size > 0:
- shifted_x = torch.roll(
- x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)
- )
- else:
- shifted_x = x
-
- # partition windows
- x_windows = window_partition(
- shifted_x, self.window_size
- ) # nW*B, window_size, window_size, C
- x_windows = x_windows.view(
- -1, self.window_size * self.window_size, C
- ) # nW*B, window_size*window_size, C
-
- # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
- if self.input_resolution == x_size:
- attn_windows = self.attn(
- x_windows, mask=self.attn_mask
- ) # nW*B, window_size*window_size, C
- else:
- attn_windows = self.attn(
- x_windows, mask=self.calculate_mask(x_size).to(x.device)
- )
-
- # merge windows
- attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
- shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
-
- # reverse cyclic shift
- if self.shift_size > 0:
- x = torch.roll(
- shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)
- )
- else:
- x = shifted_x
- x = x.view(B, H * W, C)
- x = shortcut + self.drop_path(self.norm1(x))
-
- # FFN
- x = x + self.drop_path(self.norm2(self.mlp(x)))
-
- return x
-
- def extra_repr(self) -> str:
- return (
- f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, "
- f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
- )
-
- def flops(self):
- flops = 0
- H, W = self.input_resolution
- # norm1
- flops += self.dim * H * W
- # W-MSA/SW-MSA
- nW = H * W / self.window_size / self.window_size
- flops += nW * self.attn.flops(self.window_size * self.window_size)
- # mlp
- flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
- # norm2
- flops += self.dim * H * W
- return flops
-
-
-class PatchMerging(nn.Module):
- r"""Patch Merging Layer.
- Args:
- input_resolution (tuple[int]): Resolution of input feature.
- dim (int): Number of input channels.
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- """
-
- def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
- super().__init__()
- self.input_resolution = input_resolution
- self.dim = dim
- self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
- self.norm = norm_layer(2 * dim)
-
- def forward(self, x):
- """
- x: B, H*W, C
- """
- H, W = self.input_resolution
- B, L, C = x.shape
- assert L == H * W, "input feature has wrong size"
- assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
-
- x = x.view(B, H, W, C)
-
- x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
- x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
- x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
- x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
- x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
- x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
-
- x = self.reduction(x)
- x = self.norm(x)
-
- return x
-
- def extra_repr(self) -> str:
- return f"input_resolution={self.input_resolution}, dim={self.dim}"
-
- def flops(self):
- H, W = self.input_resolution
- flops = (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
- flops += H * W * self.dim // 2
- return flops
-
-
-class BasicLayer(nn.Module):
- """A basic Swin Transformer layer for one stage.
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resolution.
- depth (int): Number of blocks.
- num_heads (int): Number of attention heads.
- window_size (int): Local window size.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
- pretrained_window_size (int): Local window size in pre-training.
- """
-
- def __init__(
- self,
- dim,
- input_resolution,
- depth,
- num_heads,
- window_size,
- mlp_ratio=4.0,
- qkv_bias=True,
- drop=0.0,
- attn_drop=0.0,
- drop_path=0.0,
- norm_layer=nn.LayerNorm,
- downsample=None,
- use_checkpoint=False,
- pretrained_window_size=0,
- ):
- super().__init__()
- self.dim = dim
- self.input_resolution = input_resolution
- self.depth = depth
- self.use_checkpoint = use_checkpoint
-
- # build blocks
- self.blocks = nn.ModuleList(
- [
- SwinTransformerBlock(
- dim=dim,
- input_resolution=input_resolution,
- num_heads=num_heads,
- window_size=window_size,
- shift_size=0 if (i % 2 == 0) else window_size // 2,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
- drop=drop,
- attn_drop=attn_drop,
- drop_path=drop_path[i]
- if isinstance(drop_path, list)
- else drop_path,
- norm_layer=norm_layer,
- pretrained_window_size=pretrained_window_size,
- )
- for i in range(depth)
- ]
- )
-
- # patch merging layer
- if downsample is not None:
- self.downsample = downsample(
- input_resolution, dim=dim, norm_layer=norm_layer
- )
- else:
- self.downsample = None
-
- def forward(self, x, x_size):
- for blk in self.blocks:
- if self.use_checkpoint:
- x = checkpoint.checkpoint(blk, x, x_size)
- else:
- x = blk(x, x_size)
- if self.downsample is not None:
- x = self.downsample(x)
- return x
-
- def extra_repr(self) -> str:
- return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
-
- def flops(self):
- flops = 0
- for blk in self.blocks:
- flops += blk.flops()
- if self.downsample is not None:
- flops += self.downsample.flops()
- return flops
-
- def _init_respostnorm(self):
- for blk in self.blocks:
- nn.init.constant_(blk.norm1.bias, 0)
- nn.init.constant_(blk.norm1.weight, 0)
- nn.init.constant_(blk.norm2.bias, 0)
- nn.init.constant_(blk.norm2.weight, 0)
-
-
-class PatchEmbed(nn.Module):
- r"""Image to Patch Embedding
- Args:
- img_size (int): Image size. Default: 224.
- patch_size (int): Patch token size. Default: 4.
- in_chans (int): Number of input image channels. Default: 3.
- embed_dim (int): Number of linear projection output channels. Default: 96.
- norm_layer (nn.Module, optional): Normalization layer. Default: None
- """
-
- def __init__(
- self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None
- ):
- super().__init__()
- img_size = to_2tuple(img_size)
- patch_size = to_2tuple(patch_size)
- patches_resolution = [
- img_size[0] // patch_size[0],
- img_size[1] // patch_size[1],
- ]
- self.img_size = img_size
- self.patch_size = patch_size
- self.patches_resolution = patches_resolution
- self.num_patches = patches_resolution[0] * patches_resolution[1]
-
- self.in_chans = in_chans
- self.embed_dim = embed_dim
-
- self.proj = nn.Conv2d(
- in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
- )
- if norm_layer is not None:
- self.norm = norm_layer(embed_dim)
- else:
- self.norm = None
-
- def forward(self, x):
- B, C, H, W = x.shape
- # FIXME look at relaxing size constraints
- # assert H == self.img_size[0] and W == self.img_size[1],
- # f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
- x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
- if self.norm is not None:
- x = self.norm(x)
- return x
-
- def flops(self):
- Ho, Wo = self.patches_resolution
- flops = (
- Ho
- * Wo
- * self.embed_dim
- * self.in_chans
- * (self.patch_size[0] * self.patch_size[1])
- )
- if self.norm is not None:
- flops += Ho * Wo * self.embed_dim
- return flops
-
-
-class RSTB(nn.Module):
- """Residual Swin Transformer Block (RSTB).
-
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resolution.
- depth (int): Number of blocks.
- num_heads (int): Number of attention heads.
- window_size (int): Local window size.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
- img_size: Input image size.
- patch_size: Patch size.
- resi_connection: The convolutional block before residual connection.
- """
-
- def __init__(
- self,
- dim,
- input_resolution,
- depth,
- num_heads,
- window_size,
- mlp_ratio=4.0,
- qkv_bias=True,
- drop=0.0,
- attn_drop=0.0,
- drop_path=0.0,
- norm_layer=nn.LayerNorm,
- downsample=None,
- use_checkpoint=False,
- img_size=224,
- patch_size=4,
- resi_connection="1conv",
- ):
- super(RSTB, self).__init__()
-
- self.dim = dim
- self.input_resolution = input_resolution
-
- self.residual_group = BasicLayer(
- dim=dim,
- input_resolution=input_resolution,
- depth=depth,
- num_heads=num_heads,
- window_size=window_size,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
- drop=drop,
- attn_drop=attn_drop,
- drop_path=drop_path,
- norm_layer=norm_layer,
- downsample=downsample,
- use_checkpoint=use_checkpoint,
- )
-
- if resi_connection == "1conv":
- self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
- elif resi_connection == "3conv":
- # to save parameters and memory
- self.conv = nn.Sequential(
- nn.Conv2d(dim, dim // 4, 3, 1, 1),
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(dim // 4, dim, 3, 1, 1),
- )
-
- self.patch_embed = PatchEmbed(
- img_size=img_size,
- patch_size=patch_size,
- in_chans=dim,
- embed_dim=dim,
- norm_layer=None,
- )
-
- self.patch_unembed = PatchUnEmbed(
- img_size=img_size,
- patch_size=patch_size,
- in_chans=dim,
- embed_dim=dim,
- norm_layer=None,
- )
-
- def forward(self, x, x_size):
- return (
- self.patch_embed(
- self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))
- )
- + x
- )
-
- def flops(self):
- flops = 0
- flops += self.residual_group.flops()
- H, W = self.input_resolution
- flops += H * W * self.dim * self.dim * 9
- flops += self.patch_embed.flops()
- flops += self.patch_unembed.flops()
-
- return flops
-
-
-class PatchUnEmbed(nn.Module):
- r"""Image to Patch Unembedding
-
- Args:
- img_size (int): Image size. Default: 224.
- patch_size (int): Patch token size. Default: 4.
- in_chans (int): Number of input image channels. Default: 3.
- embed_dim (int): Number of linear projection output channels. Default: 96.
- norm_layer (nn.Module, optional): Normalization layer. Default: None
- """
-
- def __init__(
- self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None
- ):
- super().__init__()
- img_size = to_2tuple(img_size)
- patch_size = to_2tuple(patch_size)
- patches_resolution = [
- img_size[0] // patch_size[0],
- img_size[1] // patch_size[1],
- ]
- self.img_size = img_size
- self.patch_size = patch_size
- self.patches_resolution = patches_resolution
- self.num_patches = patches_resolution[0] * patches_resolution[1]
-
- self.in_chans = in_chans
- self.embed_dim = embed_dim
-
- def forward(self, x, x_size):
- B, HW, C = x.shape
- x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
- return x
-
- def flops(self):
- flops = 0
- return flops
-
-
-class Upsample(nn.Sequential):
- """Upsample module.
-
- Args:
- scale (int): Scale factor. Supported scales: 2^n and 3.
- num_feat (int): Channel number of intermediate features.
- """
-
- def __init__(self, scale, num_feat):
- m = []
- if (scale & (scale - 1)) == 0: # scale = 2^n
- for _ in range(int(math.log(scale, 2))):
- m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(2))
- elif scale == 3:
- m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(3))
- else:
- raise ValueError(
- f"scale {scale} is not supported. " "Supported scales: 2^n and 3."
- )
- super(Upsample, self).__init__(*m)
-
-
-class Upsample_hf(nn.Sequential):
- """Upsample module.
-
- Args:
- scale (int): Scale factor. Supported scales: 2^n and 3.
- num_feat (int): Channel number of intermediate features.
- """
-
- def __init__(self, scale, num_feat):
- m = []
- if (scale & (scale - 1)) == 0: # scale = 2^n
- for _ in range(int(math.log(scale, 2))):
- m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(2))
- elif scale == 3:
- m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(3))
- else:
- raise ValueError(
- f"scale {scale} is not supported. " "Supported scales: 2^n and 3."
- )
- super(Upsample_hf, self).__init__(*m)
-
-
-class UpsampleOneStep(nn.Sequential):
- """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
- Used in lightweight SR to save parameters.
-
- Args:
- scale (int): Scale factor. Supported scales: 2^n and 3.
- num_feat (int): Channel number of intermediate features.
-
- """
-
- def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
- self.num_feat = num_feat
- self.input_resolution = input_resolution
- m = []
- m.append(nn.Conv2d(num_feat, (scale**2) * num_out_ch, 3, 1, 1))
- m.append(nn.PixelShuffle(scale))
- super(UpsampleOneStep, self).__init__(*m)
-
- def flops(self):
- H, W = self.input_resolution
- flops = H * W * self.num_feat * 3 * 9
- return flops
-
-
-class Swin2SR(nn.Module):
- r"""Swin2SR
- A PyTorch impl of : `Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration`.
-
- Args:
- img_size (int | tuple(int)): Input image size. Default 64
- patch_size (int | tuple(int)): Patch size. Default: 1
- in_chans (int): Number of input image channels. Default: 3
- embed_dim (int): Patch embedding dimension. Default: 96
- depths (tuple(int)): Depth of each Swin Transformer layer.
- num_heads (tuple(int)): Number of attention heads in different layers.
- window_size (int): Window size. Default: 7
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
- qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
- drop_rate (float): Dropout rate. Default: 0
- attn_drop_rate (float): Attention dropout rate. Default: 0
- drop_path_rate (float): Stochastic depth rate. Default: 0.1
- norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
- ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
- patch_norm (bool): If True, add normalization after patch embedding. Default: True
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
- upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
- img_range: Image range. 1. or 255.
- upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
- resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
- """
-
- def __init__(
- self,
- img_size=64,
- patch_size=1,
- in_chans=3,
- embed_dim=96,
- depths=[6, 6, 6, 6],
- num_heads=[6, 6, 6, 6],
- window_size=7,
- mlp_ratio=4.0,
- qkv_bias=True,
- drop_rate=0.0,
- attn_drop_rate=0.0,
- drop_path_rate=0.1,
- norm_layer=nn.LayerNorm,
- ape=False,
- patch_norm=True,
- use_checkpoint=False,
- upscale=2,
- img_range=1.0,
- upsampler="",
- resi_connection="1conv",
- **kwargs,
- ):
- super(Swin2SR, self).__init__()
- num_in_ch = in_chans
- num_out_ch = in_chans
- num_feat = 64
- self.img_range = img_range
- if in_chans == 3:
- rgb_mean = (0.4488, 0.4371, 0.4040)
- self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
- else:
- self.mean = torch.zeros(1, 1, 1, 1)
- self.upscale = upscale
- self.upsampler = upsampler
- self.window_size = window_size
-
- #####################################################################################################
- ################################### 1, shallow feature extraction ###################################
- self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
-
- #####################################################################################################
- ################################### 2, deep feature extraction ######################################
- self.num_layers = len(depths)
- self.embed_dim = embed_dim
- self.ape = ape
- self.patch_norm = patch_norm
- self.num_features = embed_dim
- self.mlp_ratio = mlp_ratio
-
- # split image into non-overlapping patches
- self.patch_embed = PatchEmbed(
- img_size=img_size,
- patch_size=patch_size,
- in_chans=embed_dim,
- embed_dim=embed_dim,
- norm_layer=norm_layer if self.patch_norm else None,
- )
- num_patches = self.patch_embed.num_patches
- patches_resolution = self.patch_embed.patches_resolution
- self.patches_resolution = patches_resolution
-
- # merge non-overlapping patches into image
- self.patch_unembed = PatchUnEmbed(
- img_size=img_size,
- patch_size=patch_size,
- in_chans=embed_dim,
- embed_dim=embed_dim,
- norm_layer=norm_layer if self.patch_norm else None,
- )
-
- # absolute position embedding
- if self.ape:
- self.absolute_pos_embed = nn.Parameter(
- torch.zeros(1, num_patches, embed_dim)
- )
- trunc_normal_(self.absolute_pos_embed, std=0.02)
-
- self.pos_drop = nn.Dropout(p=drop_rate)
-
- # stochastic depth
- dpr = [
- x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
- ] # stochastic depth decay rule
-
- # build Residual Swin Transformer blocks (RSTB)
- self.layers = nn.ModuleList()
- for i_layer in range(self.num_layers):
- layer = RSTB(
- dim=embed_dim,
- input_resolution=(patches_resolution[0], patches_resolution[1]),
- depth=depths[i_layer],
- num_heads=num_heads[i_layer],
- window_size=window_size,
- mlp_ratio=self.mlp_ratio,
- qkv_bias=qkv_bias,
- drop=drop_rate,
- attn_drop=attn_drop_rate,
- drop_path=dpr[
- sum(depths[:i_layer]) : sum(depths[: i_layer + 1])
- ], # no impact on SR results
- norm_layer=norm_layer,
- downsample=None,
- use_checkpoint=use_checkpoint,
- img_size=img_size,
- patch_size=patch_size,
- resi_connection=resi_connection,
- )
- self.layers.append(layer)
-
- if self.upsampler == "pixelshuffle_hf":
- self.layers_hf = nn.ModuleList()
- for i_layer in range(self.num_layers):
- layer = RSTB(
- dim=embed_dim,
- input_resolution=(patches_resolution[0], patches_resolution[1]),
- depth=depths[i_layer],
- num_heads=num_heads[i_layer],
- window_size=window_size,
- mlp_ratio=self.mlp_ratio,
- qkv_bias=qkv_bias,
- drop=drop_rate,
- attn_drop=attn_drop_rate,
- drop_path=dpr[
- sum(depths[:i_layer]) : sum(depths[: i_layer + 1])
- ], # no impact on SR results
- norm_layer=norm_layer,
- downsample=None,
- use_checkpoint=use_checkpoint,
- img_size=img_size,
- patch_size=patch_size,
- resi_connection=resi_connection,
- )
- self.layers_hf.append(layer)
-
- self.norm = norm_layer(self.num_features)
-
- # build the last conv layer in deep feature extraction
- if resi_connection == "1conv":
- self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
- elif resi_connection == "3conv":
- # to save parameters and memory
- self.conv_after_body = nn.Sequential(
- nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1),
- )
-
- #####################################################################################################
- ################################ 3, high quality image reconstruction ################################
- if self.upsampler == "pixelshuffle":
- # for classical SR
- self.conv_before_upsample = nn.Sequential(
- nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)
- )
- self.upsample = Upsample(upscale, num_feat)
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
- elif self.upsampler == "pixelshuffle_aux":
- self.conv_bicubic = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
- self.conv_before_upsample = nn.Sequential(
- nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)
- )
- self.conv_aux = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
- self.conv_after_aux = nn.Sequential(
- nn.Conv2d(3, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)
- )
- self.upsample = Upsample(upscale, num_feat)
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
-
- elif self.upsampler == "pixelshuffle_hf":
- self.conv_before_upsample = nn.Sequential(
- nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)
- )
- self.upsample = Upsample(upscale, num_feat)
- self.upsample_hf = Upsample_hf(upscale, num_feat)
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
- self.conv_first_hf = nn.Sequential(
- nn.Conv2d(num_feat, embed_dim, 3, 1, 1), nn.LeakyReLU(inplace=True)
- )
- self.conv_after_body_hf = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
- self.conv_before_upsample_hf = nn.Sequential(
- nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)
- )
- self.conv_last_hf = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
-
- elif self.upsampler == "pixelshuffledirect":
- # for lightweight SR (to save parameters)
- self.upsample = UpsampleOneStep(
- upscale,
- embed_dim,
- num_out_ch,
- (patches_resolution[0], patches_resolution[1]),
- )
- elif self.upsampler == "nearest+conv":
- # for real-world SR (less artifacts)
- assert self.upscale == 4, "only support x4 now."
- self.conv_before_upsample = nn.Sequential(
- nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)
- )
- self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
- self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
- else:
- # for image denoising and JPEG compression artifact reduction
- self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
-
- self.apply(self._init_weights)
-
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=0.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.LayerNorm):
- nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
-
- @torch.jit.ignore
- def no_weight_decay(self):
- return {"absolute_pos_embed"}
-
- @torch.jit.ignore
- def no_weight_decay_keywords(self):
- return {"relative_position_bias_table"}
-
- def check_image_size(self, x):
- _, _, h, w = x.size()
- mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
- mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
- x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), "reflect")
- return x
-
- def forward_features(self, x):
- x_size = (x.shape[2], x.shape[3])
- x = self.patch_embed(x)
- if self.ape:
- x = x + self.absolute_pos_embed
- x = self.pos_drop(x)
-
- for layer in self.layers:
- x = layer(x, x_size)
-
- x = self.norm(x) # B L C
- x = self.patch_unembed(x, x_size)
-
- return x
-
- def forward_features_hf(self, x):
- x_size = (x.shape[2], x.shape[3])
- x = self.patch_embed(x)
- if self.ape:
- x = x + self.absolute_pos_embed
- x = self.pos_drop(x)
-
- for layer in self.layers_hf:
- x = layer(x, x_size)
-
- x = self.norm(x) # B L C
- x = self.patch_unembed(x, x_size)
-
- return x
-
- def forward(self, x):
- H, W = x.shape[2:]
- x = self.check_image_size(x)
-
- self.mean = self.mean.type_as(x)
- x = (x - self.mean) * self.img_range
-
- if self.upsampler == "pixelshuffle":
- # for classical SR
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.conv_before_upsample(x)
- x = self.conv_last(self.upsample(x))
- elif self.upsampler == "pixelshuffle_aux":
- bicubic = F.interpolate(
- x,
- size=(H * self.upscale, W * self.upscale),
- mode="bicubic",
- align_corners=False,
- )
- bicubic = self.conv_bicubic(bicubic)
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.conv_before_upsample(x)
- aux = self.conv_aux(x) # b, 3, LR_H, LR_W
- x = self.conv_after_aux(aux)
- x = (
- self.upsample(x)[:, :, : H * self.upscale, : W * self.upscale]
- + bicubic[:, :, : H * self.upscale, : W * self.upscale]
- )
- x = self.conv_last(x)
- aux = aux / self.img_range + self.mean
- elif self.upsampler == "pixelshuffle_hf":
- # for classical SR with HF
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x_before = self.conv_before_upsample(x)
- x_out = self.conv_last(self.upsample(x_before))
-
- x_hf = self.conv_first_hf(x_before)
- x_hf = self.conv_after_body_hf(self.forward_features_hf(x_hf)) + x_hf
- x_hf = self.conv_before_upsample_hf(x_hf)
- x_hf = self.conv_last_hf(self.upsample_hf(x_hf))
- x = x_out + x_hf
- x_hf = x_hf / self.img_range + self.mean
-
- elif self.upsampler == "pixelshuffledirect":
- # for lightweight SR
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.upsample(x)
- elif self.upsampler == "nearest+conv":
- # for real-world SR
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.conv_before_upsample(x)
- x = self.lrelu(
- self.conv_up1(
- torch.nn.functional.interpolate(x, scale_factor=2, mode="nearest")
- )
- )
- x = self.lrelu(
- self.conv_up2(
- torch.nn.functional.interpolate(x, scale_factor=2, mode="nearest")
- )
- )
- x = self.conv_last(self.lrelu(self.conv_hr(x)))
- else:
- # for image denoising and JPEG compression artifact reduction
- x_first = self.conv_first(x)
- res = self.conv_after_body(self.forward_features(x_first)) + x_first
- x = x + self.conv_last(res)
-
- x = x / self.img_range + self.mean
- if self.upsampler == "pixelshuffle_aux":
- return x[:, :, : H * self.upscale, : W * self.upscale], aux
-
- elif self.upsampler == "pixelshuffle_hf":
- x_out = x_out / self.img_range + self.mean
- return (
- x_out[:, :, : H * self.upscale, : W * self.upscale],
- x[:, :, : H * self.upscale, : W * self.upscale],
- x_hf[:, :, : H * self.upscale, : W * self.upscale],
- )
-
- else:
- return x[:, :, : H * self.upscale, : W * self.upscale]
-
- def flops(self):
- flops = 0
- H, W = self.patches_resolution
- flops += H * W * 3 * self.embed_dim * 9
- flops += self.patch_embed.flops()
- for i, layer in enumerate(self.layers):
- flops += layer.flops()
- flops += H * W * 3 * self.embed_dim * self.embed_dim
- flops += self.upsample.flops()
- return flops
-
-
-MODEL_PATH = "model_zoo/swin2sr/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth"
-PARAM_KEY_G = "params_ema"
-SCALE = 4
-WINDOW_SIZE = 8
-
-
-
-def download_model_weights() -> None:
- os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
- url = "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/{}".format(
- os.path.basename(MODEL_PATH)
- )
- r = requests.get(url, allow_redirects=True)
- with open(MODEL_PATH, "wb") as f:
- f.write(r.content)
-
-
-def load_model() -> torch.nn.Module:
- if not os.path.exists(MODEL_PATH):
- download_model_weights()
- model = Swin2SR(
- upscale=SCALE,
- in_chans=3,
- img_size=64,
- window_size=8,
- img_range=1.0,
- depths=[6, 6, 6, 6, 6, 6],
- embed_dim=180,
- num_heads=[6, 6, 6, 6, 6, 6],
- mlp_ratio=2,
- upsampler="nearest+conv",
- resi_connection="1conv",
- )
- pretrained_model = torch.load(MODEL_PATH)
- model.load_state_dict(
- pretrained_model[PARAM_KEY_G]
- if PARAM_KEY_G in pretrained_model.keys()
- else pretrained_model,
- strict=True,
- )
- return model
-
-def preprocesss_image(image: PIL.Image.Image) -> torch.FloatTensor:
- image = np.array(image).astype("float32") / 255.0
- image = np.transpose(image, (2, 0, 1)) # HWC -> CHW
- img_lq = torch.from_numpy(image).float().unsqueeze(0)
-
- _, _, h_old, w_old = img_lq.size()
- h_pad = (h_old // WINDOW_SIZE + 1) * WINDOW_SIZE - h_old
- w_pad = (w_old // WINDOW_SIZE + 1) * WINDOW_SIZE - w_old
- img_lq = torch.cat([img_lq, torch.flip(img_lq, [2])], 2)[:, :, : h_old + h_pad, :]
- img_lq = torch.cat([img_lq, torch.flip(img_lq, [3])], 3)[:, :, :, : w_old + w_pad]
- return img_lq
-
-
-def postprocess_image(output: torch.Tensor) -> PIL.Image.Image:
- output = output.data.float().cpu().clamp_(0, 1).numpy()
- output = (output * 255).round().astype("uint8")
- output = output.transpose(1, 2, 0)
- return PIL.Image.fromarray(output)
\ No newline at end of file
diff --git a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/plotutil.py b/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/plotutil.py
deleted file mode 100644
index 187bcb9d5615c8ec51a43148b011c06b8ed6aff7..0000000000000000000000000000000000000000
--- a/spaces/paulengstler/interpretable-vertebral-fracture-diagnosis/netdissect/plotutil.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import matplotlib.pyplot as plt
-import numpy
-
-def plot_tensor_images(data, **kwargs):
- data = ((data + 1) / 2 * 255).permute(0, 2, 3, 1).byte().cpu().numpy()
- width = int(numpy.ceil(numpy.sqrt(data.shape[0])))
- height = int(numpy.ceil(data.shape[0] / float(width)))
- kwargs = dict(kwargs)
- margin = 0.01
- if 'figsize' not in kwargs:
- # Size figure to one display pixel per data pixel
- dpi = plt.rcParams['figure.dpi']
- kwargs['figsize'] = (
- (1 + margin) * (width * data.shape[2] / dpi),
- (1 + margin) * (height * data.shape[1] / dpi))
- f, axarr = plt.subplots(height, width, **kwargs)
- if len(numpy.shape(axarr)) == 0:
- axarr = numpy.array([[axarr]])
- if len(numpy.shape(axarr)) == 1:
- axarr = axarr[None,:]
- for i, im in enumerate(data):
- ax = axarr[i // width, i % width]
- ax.imshow(data[i])
- ax.axis('off')
- for i in range(i, width * height):
- ax = axarr[i // width, i % width]
- ax.axis('off')
- plt.subplots_adjust(wspace=margin, hspace=margin,
- left=0, right=1, bottom=0, top=1)
- plt.show()
-
-def plot_max_heatmap(data, shape=None, **kwargs):
- if shape is None:
- shape = data.shape[2:]
- data = data.max(1)[0].cpu().numpy()
- vmin = data.min()
- vmax = data.max()
- width = int(numpy.ceil(numpy.sqrt(data.shape[0])))
- height = int(numpy.ceil(data.shape[0] / float(width)))
- kwargs = dict(kwargs)
- margin = 0.01
- if 'figsize' not in kwargs:
- # Size figure to one display pixel per data pixel
- dpi = plt.rcParams['figure.dpi']
- kwargs['figsize'] = (
- width * shape[1] / dpi, height * shape[0] / dpi)
- f, axarr = plt.subplots(height, width, **kwargs)
- if len(numpy.shape(axarr)) == 0:
- axarr = numpy.array([[axarr]])
- if len(numpy.shape(axarr)) == 1:
- axarr = axarr[None,:]
- for i, im in enumerate(data):
- ax = axarr[i // width, i % width]
- img = ax.imshow(data[i], vmin=vmin, vmax=vmax, cmap='hot')
- ax.axis('off')
- for i in range(i, width * height):
- ax = axarr[i // width, i % width]
- ax.axis('off')
- plt.subplots_adjust(wspace=margin, hspace=margin,
- left=0, right=1, bottom=0, top=1)
- plt.show()
diff --git a/spaces/paultay/image_generator/README.md b/spaces/paultay/image_generator/README.md
deleted file mode 100644
index 9ab4cff0d0f41e5004f686f84230c1ea6e506a7c..0000000000000000000000000000000000000000
--- a/spaces/paultay/image_generator/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-title: Image_generator
-emoji: 🦀
-colorFrom: red
-colorTo: yellow
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distlib/version.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distlib/version.py
deleted file mode 100644
index c7c8bb6ff4f8ed84e466a66cac6b953b901626ea..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distlib/version.py
+++ /dev/null
@@ -1,739 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2012-2017 The Python Software Foundation.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-"""
-Implementation of a flexible versioning scheme providing support for PEP-440,
-setuptools-compatible and semantic versioning.
-"""
-
-import logging
-import re
-
-from .compat import string_types
-from .util import parse_requirement
-
-__all__ = ['NormalizedVersion', 'NormalizedMatcher',
- 'LegacyVersion', 'LegacyMatcher',
- 'SemanticVersion', 'SemanticMatcher',
- 'UnsupportedVersionError', 'get_scheme']
-
-logger = logging.getLogger(__name__)
-
-
-class UnsupportedVersionError(ValueError):
- """This is an unsupported version."""
- pass
-
-
-class Version(object):
- def __init__(self, s):
- self._string = s = s.strip()
- self._parts = parts = self.parse(s)
- assert isinstance(parts, tuple)
- assert len(parts) > 0
-
- def parse(self, s):
- raise NotImplementedError('please implement in a subclass')
-
- def _check_compatible(self, other):
- if type(self) != type(other):
- raise TypeError('cannot compare %r and %r' % (self, other))
-
- def __eq__(self, other):
- self._check_compatible(other)
- return self._parts == other._parts
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __lt__(self, other):
- self._check_compatible(other)
- return self._parts < other._parts
-
- def __gt__(self, other):
- return not (self.__lt__(other) or self.__eq__(other))
-
- def __le__(self, other):
- return self.__lt__(other) or self.__eq__(other)
-
- def __ge__(self, other):
- return self.__gt__(other) or self.__eq__(other)
-
- # See http://docs.python.org/reference/datamodel#object.__hash__
- def __hash__(self):
- return hash(self._parts)
-
- def __repr__(self):
- return "%s('%s')" % (self.__class__.__name__, self._string)
-
- def __str__(self):
- return self._string
-
- @property
- def is_prerelease(self):
- raise NotImplementedError('Please implement in subclasses.')
-
-
-class Matcher(object):
- version_class = None
-
- # value is either a callable or the name of a method
- _operators = {
- '<': lambda v, c, p: v < c,
- '>': lambda v, c, p: v > c,
- '<=': lambda v, c, p: v == c or v < c,
- '>=': lambda v, c, p: v == c or v > c,
- '==': lambda v, c, p: v == c,
- '===': lambda v, c, p: v == c,
- # by default, compatible => >=.
- '~=': lambda v, c, p: v == c or v > c,
- '!=': lambda v, c, p: v != c,
- }
-
- # this is a method only to support alternative implementations
- # via overriding
- def parse_requirement(self, s):
- return parse_requirement(s)
-
- def __init__(self, s):
- if self.version_class is None:
- raise ValueError('Please specify a version class')
- self._string = s = s.strip()
- r = self.parse_requirement(s)
- if not r:
- raise ValueError('Not valid: %r' % s)
- self.name = r.name
- self.key = self.name.lower() # for case-insensitive comparisons
- clist = []
- if r.constraints:
- # import pdb; pdb.set_trace()
- for op, s in r.constraints:
- if s.endswith('.*'):
- if op not in ('==', '!='):
- raise ValueError('\'.*\' not allowed for '
- '%r constraints' % op)
- # Could be a partial version (e.g. for '2.*') which
- # won't parse as a version, so keep it as a string
- vn, prefix = s[:-2], True
- # Just to check that vn is a valid version
- self.version_class(vn)
- else:
- # Should parse as a version, so we can create an
- # instance for the comparison
- vn, prefix = self.version_class(s), False
- clist.append((op, vn, prefix))
- self._parts = tuple(clist)
-
- def match(self, version):
- """
- Check if the provided version matches the constraints.
-
- :param version: The version to match against this instance.
- :type version: String or :class:`Version` instance.
- """
- if isinstance(version, string_types):
- version = self.version_class(version)
- for operator, constraint, prefix in self._parts:
- f = self._operators.get(operator)
- if isinstance(f, string_types):
- f = getattr(self, f)
- if not f:
- msg = ('%r not implemented '
- 'for %s' % (operator, self.__class__.__name__))
- raise NotImplementedError(msg)
- if not f(version, constraint, prefix):
- return False
- return True
-
- @property
- def exact_version(self):
- result = None
- if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
- result = self._parts[0][1]
- return result
-
- def _check_compatible(self, other):
- if type(self) != type(other) or self.name != other.name:
- raise TypeError('cannot compare %s and %s' % (self, other))
-
- def __eq__(self, other):
- self._check_compatible(other)
- return self.key == other.key and self._parts == other._parts
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- # See http://docs.python.org/reference/datamodel#object.__hash__
- def __hash__(self):
- return hash(self.key) + hash(self._parts)
-
- def __repr__(self):
- return "%s(%r)" % (self.__class__.__name__, self._string)
-
- def __str__(self):
- return self._string
-
-
-PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
- r'(\.(post)(\d+))?(\.(dev)(\d+))?'
- r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
-
-
-def _pep_440_key(s):
- s = s.strip()
- m = PEP440_VERSION_RE.match(s)
- if not m:
- raise UnsupportedVersionError('Not a valid version: %s' % s)
- groups = m.groups()
- nums = tuple(int(v) for v in groups[1].split('.'))
- while len(nums) > 1 and nums[-1] == 0:
- nums = nums[:-1]
-
- if not groups[0]:
- epoch = 0
- else:
- epoch = int(groups[0][:-1])
- pre = groups[4:6]
- post = groups[7:9]
- dev = groups[10:12]
- local = groups[13]
- if pre == (None, None):
- pre = ()
- else:
- pre = pre[0], int(pre[1])
- if post == (None, None):
- post = ()
- else:
- post = post[0], int(post[1])
- if dev == (None, None):
- dev = ()
- else:
- dev = dev[0], int(dev[1])
- if local is None:
- local = ()
- else:
- parts = []
- for part in local.split('.'):
- # to ensure that numeric compares as > lexicographic, avoid
- # comparing them directly, but encode a tuple which ensures
- # correct sorting
- if part.isdigit():
- part = (1, int(part))
- else:
- part = (0, part)
- parts.append(part)
- local = tuple(parts)
- if not pre:
- # either before pre-release, or final release and after
- if not post and dev:
- # before pre-release
- pre = ('a', -1) # to sort before a0
- else:
- pre = ('z',) # to sort after all pre-releases
- # now look at the state of post and dev.
- if not post:
- post = ('_',) # sort before 'a'
- if not dev:
- dev = ('final',)
-
- #print('%s -> %s' % (s, m.groups()))
- return epoch, nums, pre, post, dev, local
-
-
-_normalized_key = _pep_440_key
-
-
-class NormalizedVersion(Version):
- """A rational version.
-
- Good:
- 1.2 # equivalent to "1.2.0"
- 1.2.0
- 1.2a1
- 1.2.3a2
- 1.2.3b1
- 1.2.3c1
- 1.2.3.4
- TODO: fill this out
-
- Bad:
- 1 # minimum two numbers
- 1.2a # release level must have a release serial
- 1.2.3b
- """
- def parse(self, s):
- result = _normalized_key(s)
- # _normalized_key loses trailing zeroes in the release
- # clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
- # However, PEP 440 prefix matching needs it: for example,
- # (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
- m = PEP440_VERSION_RE.match(s) # must succeed
- groups = m.groups()
- self._release_clause = tuple(int(v) for v in groups[1].split('.'))
- return result
-
- PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
-
- @property
- def is_prerelease(self):
- return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
-
-
-def _match_prefix(x, y):
- x = str(x)
- y = str(y)
- if x == y:
- return True
- if not x.startswith(y):
- return False
- n = len(y)
- return x[n] == '.'
-
-
-class NormalizedMatcher(Matcher):
- version_class = NormalizedVersion
-
- # value is either a callable or the name of a method
- _operators = {
- '~=': '_match_compatible',
- '<': '_match_lt',
- '>': '_match_gt',
- '<=': '_match_le',
- '>=': '_match_ge',
- '==': '_match_eq',
- '===': '_match_arbitrary',
- '!=': '_match_ne',
- }
-
- def _adjust_local(self, version, constraint, prefix):
- if prefix:
- strip_local = '+' not in constraint and version._parts[-1]
- else:
- # both constraint and version are
- # NormalizedVersion instances.
- # If constraint does not have a local component,
- # ensure the version doesn't, either.
- strip_local = not constraint._parts[-1] and version._parts[-1]
- if strip_local:
- s = version._string.split('+', 1)[0]
- version = self.version_class(s)
- return version, constraint
-
- def _match_lt(self, version, constraint, prefix):
- version, constraint = self._adjust_local(version, constraint, prefix)
- if version >= constraint:
- return False
- release_clause = constraint._release_clause
- pfx = '.'.join([str(i) for i in release_clause])
- return not _match_prefix(version, pfx)
-
- def _match_gt(self, version, constraint, prefix):
- version, constraint = self._adjust_local(version, constraint, prefix)
- if version <= constraint:
- return False
- release_clause = constraint._release_clause
- pfx = '.'.join([str(i) for i in release_clause])
- return not _match_prefix(version, pfx)
-
- def _match_le(self, version, constraint, prefix):
- version, constraint = self._adjust_local(version, constraint, prefix)
- return version <= constraint
-
- def _match_ge(self, version, constraint, prefix):
- version, constraint = self._adjust_local(version, constraint, prefix)
- return version >= constraint
-
- def _match_eq(self, version, constraint, prefix):
- version, constraint = self._adjust_local(version, constraint, prefix)
- if not prefix:
- result = (version == constraint)
- else:
- result = _match_prefix(version, constraint)
- return result
-
- def _match_arbitrary(self, version, constraint, prefix):
- return str(version) == str(constraint)
-
- def _match_ne(self, version, constraint, prefix):
- version, constraint = self._adjust_local(version, constraint, prefix)
- if not prefix:
- result = (version != constraint)
- else:
- result = not _match_prefix(version, constraint)
- return result
-
- def _match_compatible(self, version, constraint, prefix):
- version, constraint = self._adjust_local(version, constraint, prefix)
- if version == constraint:
- return True
- if version < constraint:
- return False
-# if not prefix:
-# return True
- release_clause = constraint._release_clause
- if len(release_clause) > 1:
- release_clause = release_clause[:-1]
- pfx = '.'.join([str(i) for i in release_clause])
- return _match_prefix(version, pfx)
-
-_REPLACEMENTS = (
- (re.compile('[.+-]$'), ''), # remove trailing puncts
- (re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
- (re.compile('^[.-]'), ''), # remove leading puncts
- (re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
- (re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
- (re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
- (re.compile('[.]{2,}'), '.'), # multiple runs of '.'
- (re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
- (re.compile(r'\b(pre-alpha|prealpha)\b'),
- 'pre.alpha'), # standardise
- (re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
-)
-
-_SUFFIX_REPLACEMENTS = (
- (re.compile('^[:~._+-]+'), ''), # remove leading puncts
- (re.compile('[,*")([\\]]'), ''), # remove unwanted chars
- (re.compile('[~:+_ -]'), '.'), # replace illegal chars
- (re.compile('[.]{2,}'), '.'), # multiple runs of '.'
- (re.compile(r'\.$'), ''), # trailing '.'
-)
-
-_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
-
-
-def _suggest_semantic_version(s):
- """
- Try to suggest a semantic form for a version for which
- _suggest_normalized_version couldn't come up with anything.
- """
- result = s.strip().lower()
- for pat, repl in _REPLACEMENTS:
- result = pat.sub(repl, result)
- if not result:
- result = '0.0.0'
-
- # Now look for numeric prefix, and separate it out from
- # the rest.
- #import pdb; pdb.set_trace()
- m = _NUMERIC_PREFIX.match(result)
- if not m:
- prefix = '0.0.0'
- suffix = result
- else:
- prefix = m.groups()[0].split('.')
- prefix = [int(i) for i in prefix]
- while len(prefix) < 3:
- prefix.append(0)
- if len(prefix) == 3:
- suffix = result[m.end():]
- else:
- suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
- prefix = prefix[:3]
- prefix = '.'.join([str(i) for i in prefix])
- suffix = suffix.strip()
- if suffix:
- #import pdb; pdb.set_trace()
- # massage the suffix.
- for pat, repl in _SUFFIX_REPLACEMENTS:
- suffix = pat.sub(repl, suffix)
-
- if not suffix:
- result = prefix
- else:
- sep = '-' if 'dev' in suffix else '+'
- result = prefix + sep + suffix
- if not is_semver(result):
- result = None
- return result
-
-
-def _suggest_normalized_version(s):
- """Suggest a normalized version close to the given version string.
-
- If you have a version string that isn't rational (i.e. NormalizedVersion
- doesn't like it) then you might be able to get an equivalent (or close)
- rational version from this function.
-
- This does a number of simple normalizations to the given string, based
- on observation of versions currently in use on PyPI. Given a dump of
- those version during PyCon 2009, 4287 of them:
- - 2312 (53.93%) match NormalizedVersion without change
- with the automatic suggestion
- - 3474 (81.04%) match when using this suggestion method
-
- @param s {str} An irrational version string.
- @returns A rational version string, or None, if couldn't determine one.
- """
- try:
- _normalized_key(s)
- return s # already rational
- except UnsupportedVersionError:
- pass
-
- rs = s.lower()
-
- # part of this could use maketrans
- for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
- ('beta', 'b'), ('rc', 'c'), ('-final', ''),
- ('-pre', 'c'),
- ('-release', ''), ('.release', ''), ('-stable', ''),
- ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
- ('final', '')):
- rs = rs.replace(orig, repl)
-
- # if something ends with dev or pre, we add a 0
- rs = re.sub(r"pre$", r"pre0", rs)
- rs = re.sub(r"dev$", r"dev0", rs)
-
- # if we have something like "b-2" or "a.2" at the end of the
- # version, that is probably beta, alpha, etc
- # let's remove the dash or dot
- rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
-
- # 1.0-dev-r371 -> 1.0.dev371
- # 0.1-dev-r79 -> 0.1.dev79
- rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
-
- # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
- rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
-
- # Clean: v0.3, v1.0
- if rs.startswith('v'):
- rs = rs[1:]
-
- # Clean leading '0's on numbers.
- #TODO: unintended side-effect on, e.g., "2003.05.09"
- # PyPI stats: 77 (~2%) better
- rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
-
- # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
- # zero.
- # PyPI stats: 245 (7.56%) better
- rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
-
- # the 'dev-rNNN' tag is a dev tag
- rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
-
- # clean the - when used as a pre delimiter
- rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
-
- # a terminal "dev" or "devel" can be changed into ".dev0"
- rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
-
- # a terminal "dev" can be changed into ".dev0"
- rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
-
- # a terminal "final" or "stable" can be removed
- rs = re.sub(r"(final|stable)$", "", rs)
-
- # The 'r' and the '-' tags are post release tags
- # 0.4a1.r10 -> 0.4a1.post10
- # 0.9.33-17222 -> 0.9.33.post17222
- # 0.9.33-r17222 -> 0.9.33.post17222
- rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
-
- # Clean 'r' instead of 'dev' usage:
- # 0.9.33+r17222 -> 0.9.33.dev17222
- # 1.0dev123 -> 1.0.dev123
- # 1.0.git123 -> 1.0.dev123
- # 1.0.bzr123 -> 1.0.dev123
- # 0.1a0dev.123 -> 0.1a0.dev123
- # PyPI stats: ~150 (~4%) better
- rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
-
- # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
- # 0.2.pre1 -> 0.2c1
- # 0.2-c1 -> 0.2c1
- # 1.0preview123 -> 1.0c123
- # PyPI stats: ~21 (0.62%) better
- rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
-
- # Tcl/Tk uses "px" for their post release markers
- rs = re.sub(r"p(\d+)$", r".post\1", rs)
-
- try:
- _normalized_key(rs)
- except UnsupportedVersionError:
- rs = None
- return rs
-
-#
-# Legacy version processing (distribute-compatible)
-#
-
-_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
-_VERSION_REPLACE = {
- 'pre': 'c',
- 'preview': 'c',
- '-': 'final-',
- 'rc': 'c',
- 'dev': '@',
- '': None,
- '.': None,
-}
-
-
-def _legacy_key(s):
- def get_parts(s):
- result = []
- for p in _VERSION_PART.split(s.lower()):
- p = _VERSION_REPLACE.get(p, p)
- if p:
- if '0' <= p[:1] <= '9':
- p = p.zfill(8)
- else:
- p = '*' + p
- result.append(p)
- result.append('*final')
- return result
-
- result = []
- for p in get_parts(s):
- if p.startswith('*'):
- if p < '*final':
- while result and result[-1] == '*final-':
- result.pop()
- while result and result[-1] == '00000000':
- result.pop()
- result.append(p)
- return tuple(result)
-
-
-class LegacyVersion(Version):
- def parse(self, s):
- return _legacy_key(s)
-
- @property
- def is_prerelease(self):
- result = False
- for x in self._parts:
- if (isinstance(x, string_types) and x.startswith('*') and
- x < '*final'):
- result = True
- break
- return result
-
-
-class LegacyMatcher(Matcher):
- version_class = LegacyVersion
-
- _operators = dict(Matcher._operators)
- _operators['~='] = '_match_compatible'
-
- numeric_re = re.compile(r'^(\d+(\.\d+)*)')
-
- def _match_compatible(self, version, constraint, prefix):
- if version < constraint:
- return False
- m = self.numeric_re.match(str(constraint))
- if not m:
- logger.warning('Cannot compute compatible match for version %s '
- ' and constraint %s', version, constraint)
- return True
- s = m.groups()[0]
- if '.' in s:
- s = s.rsplit('.', 1)[0]
- return _match_prefix(version, s)
-
-#
-# Semantic versioning
-#
-
-_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
- r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
- r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
-
-
-def is_semver(s):
- return _SEMVER_RE.match(s)
-
-
-def _semantic_key(s):
- def make_tuple(s, absent):
- if s is None:
- result = (absent,)
- else:
- parts = s[1:].split('.')
- # We can't compare ints and strings on Python 3, so fudge it
- # by zero-filling numeric values so simulate a numeric comparison
- result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
- return result
-
- m = is_semver(s)
- if not m:
- raise UnsupportedVersionError(s)
- groups = m.groups()
- major, minor, patch = [int(i) for i in groups[:3]]
- # choose the '|' and '*' so that versions sort correctly
- pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
- return (major, minor, patch), pre, build
-
-
-class SemanticVersion(Version):
- def parse(self, s):
- return _semantic_key(s)
-
- @property
- def is_prerelease(self):
- return self._parts[1][0] != '|'
-
-
-class SemanticMatcher(Matcher):
- version_class = SemanticVersion
-
-
-class VersionScheme(object):
- def __init__(self, key, matcher, suggester=None):
- self.key = key
- self.matcher = matcher
- self.suggester = suggester
-
- def is_valid_version(self, s):
- try:
- self.matcher.version_class(s)
- result = True
- except UnsupportedVersionError:
- result = False
- return result
-
- def is_valid_matcher(self, s):
- try:
- self.matcher(s)
- result = True
- except UnsupportedVersionError:
- result = False
- return result
-
- def is_valid_constraint_list(self, s):
- """
- Used for processing some metadata fields
- """
- # See issue #140. Be tolerant of a single trailing comma.
- if s.endswith(','):
- s = s[:-1]
- return self.is_valid_matcher('dummy_name (%s)' % s)
-
- def suggest(self, s):
- if self.suggester is None:
- result = None
- else:
- result = self.suggester(s)
- return result
-
-_SCHEMES = {
- 'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
- _suggest_normalized_version),
- 'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
- 'semantic': VersionScheme(_semantic_key, SemanticMatcher,
- _suggest_semantic_version),
-}
-
-_SCHEMES['default'] = _SCHEMES['normalized']
-
-
-def get_scheme(name):
- if name not in _SCHEMES:
- raise ValueError('unknown scheme name: %r' % name)
- return _SCHEMES[name]
diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/errors.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/errors.py
deleted file mode 100644
index 626254c321fb31033c54fed7ff57a0df5eaaa608..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/errors.py
+++ /dev/null
@@ -1,127 +0,0 @@
-"""distutils.errors
-
-Provides exceptions used by the Distutils modules. Note that Distutils
-modules may raise standard exceptions; in particular, SystemExit is
-usually raised for errors that are obviously the end-user's fault
-(eg. bad command-line arguments).
-
-This module is safe to use in "from ... import *" mode; it only exports
-symbols whose names start with "Distutils" and end with "Error"."""
-
-
-class DistutilsError(Exception):
- """The root of all Distutils evil."""
-
- pass
-
-
-class DistutilsModuleError(DistutilsError):
- """Unable to load an expected module, or to find an expected class
- within some module (in particular, command modules and classes)."""
-
- pass
-
-
-class DistutilsClassError(DistutilsError):
- """Some command class (or possibly distribution class, if anyone
- feels a need to subclass Distribution) is found not to be holding
- up its end of the bargain, ie. implementing some part of the
- "command "interface."""
-
- pass
-
-
-class DistutilsGetoptError(DistutilsError):
- """The option table provided to 'fancy_getopt()' is bogus."""
-
- pass
-
-
-class DistutilsArgError(DistutilsError):
- """Raised by fancy_getopt in response to getopt.error -- ie. an
- error in the command line usage."""
-
- pass
-
-
-class DistutilsFileError(DistutilsError):
- """Any problems in the filesystem: expected file not found, etc.
- Typically this is for problems that we detect before OSError
- could be raised."""
-
- pass
-
-
-class DistutilsOptionError(DistutilsError):
- """Syntactic/semantic errors in command options, such as use of
- mutually conflicting options, or inconsistent options,
- badly-spelled values, etc. No distinction is made between option
- values originating in the setup script, the command line, config
- files, or what-have-you -- but if we *know* something originated in
- the setup script, we'll raise DistutilsSetupError instead."""
-
- pass
-
-
-class DistutilsSetupError(DistutilsError):
- """For errors that can be definitely blamed on the setup script,
- such as invalid keyword arguments to 'setup()'."""
-
- pass
-
-
-class DistutilsPlatformError(DistutilsError):
- """We don't know how to do something on the current platform (but
- we do know how to do it on some platform) -- eg. trying to compile
- C files on a platform not supported by a CCompiler subclass."""
-
- pass
-
-
-class DistutilsExecError(DistutilsError):
- """Any problems executing an external program (such as the C
- compiler, when compiling C files)."""
-
- pass
-
-
-class DistutilsInternalError(DistutilsError):
- """Internal inconsistencies or impossibilities (obviously, this
- should never be seen if the code is working!)."""
-
- pass
-
-
-class DistutilsTemplateError(DistutilsError):
- """Syntax error in a file list template."""
-
-
-class DistutilsByteCompileError(DistutilsError):
- """Byte compile error."""
-
-
-# Exception classes used by the CCompiler implementation classes
-class CCompilerError(Exception):
- """Some compile/link operation failed."""
-
-
-class PreprocessError(CCompilerError):
- """Failure to preprocess one or more C/C++ files."""
-
-
-class CompileError(CCompilerError):
- """Failure to compile one or more C/C++ source files."""
-
-
-class LibError(CCompilerError):
- """Failure to create a static library from one or more C/C++ object
- files."""
-
-
-class LinkError(CCompilerError):
- """Failure to link one or more C/C++ object files into an executable
- or shared library file."""
-
-
-class UnknownFileError(CCompilerError):
- """Attempt to process an unknown file type."""
diff --git a/spaces/prerna9811/Chord/portaudio/src/os/unix/pa_unix_util.h b/spaces/prerna9811/Chord/portaudio/src/os/unix/pa_unix_util.h
deleted file mode 100644
index 2228cb331d924d6cfcd84824d61ca7bc7c129a99..0000000000000000000000000000000000000000
--- a/spaces/prerna9811/Chord/portaudio/src/os/unix/pa_unix_util.h
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * $Id$
- * Portable Audio I/O Library
- * UNIX platform-specific support functions
- *
- * Based on the Open Source API proposed by Ross Bencina
- * Copyright (c) 1999-2000 Ross Bencina
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files
- * (the "Software"), to deal in the Software without restriction,
- * including without limitation the rights to use, copy, modify, merge,
- * publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so,
- * subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
- * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * The text above constitutes the entire PortAudio license; however,
- * the PortAudio community also makes the following non-binding requests:
- *
- * Any person wishing to distribute modifications to the Software is
- * requested to send the modifications to the original developer so that
- * they can be incorporated into the canonical version. It is also
- * requested that these non-binding requests be included along with the
- * license above.
- */
-
-/** @file
- @ingroup unix_src
-*/
-
-#ifndef PA_UNIX_UTIL_H
-#define PA_UNIX_UTIL_H
-
-#include "pa_cpuload.h"
-#include