diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/hpgptai/README.md b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/hpgptai/README.md deleted file mode 100644 index 2735902ffdf18106f8620dae7a30dd1d25bcf304..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/hpgptai/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# HpgptAI -Written by [hp_mzx](https://github.com/hpsj). - -## Examples: -### Completion: -```python -res = hpgptai.Completion.create("你是谁","127.0.0.1:7890") -print(res["reply"]) -``` - -### Chat Completion: -Support context -```python -messages = [ - { - "content": "你是谁", - "html": "你是谁", - "id": hpgptai.ChatCompletion.randomStr(), - "role": "user", - "who": "User: ", - }, - { - "content": "我是一位AI助手,专门为您提供各种服务和支持。我可以回答您的问题,帮助您解决问题,提供相关信息,并执行一些任务。请随时告诉我您需要什么帮助。", - "html": "我是一位AI助手,专门为您提供各种服务和支持。我可以回答您的问题,帮助您解决问题,提供相关信息,并执行一些任务。请随时告诉我您需要什么帮助。", - "id": hpgptai.ChatCompletion.randomStr(), - "role": "assistant", - "who": "AI: ", - }, - { - "content": "我上一句问的是什么?", - "html": "我上一句问的是什么?", - "id": hpgptai.ChatCompletion.randomStr(), - "role": "user", - "who": "User: ", - }, -] -res = hpgptai.ChatCompletion.create(messages,proxy="127.0.0.1:7890") -print(res["reply"]) -``` \ No newline at end of file diff --git a/spaces/101-5/gpt4free/g4f/.v1/unfinished/openprompt/test.py b/spaces/101-5/gpt4free/g4f/.v1/unfinished/openprompt/test.py deleted file mode 100644 index 65319cb60b47350453224eb2d9b0f6095c7629c1..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/unfinished/openprompt/test.py +++ /dev/null @@ -1,6 +0,0 @@ -access_token = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV' -supabase_auth_token = '%5B%22eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8%22%2C%22_Zp8uXIA2InTDKYgo8TCqA%22%2Cnull%2Cnull%2Cnull%5D' - -idk = [ - "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJhdXRoZW50aWNhdGVkIiwiZXhwIjoxNjgyMjk0ODcxLCJzdWIiOiI4NWNkNTNiNC1lZTUwLTRiMDQtOGJhNS0wNTUyNjk4ODliZDIiLCJlbWFpbCI6ImNsc2J5emdqcGhiQGJ1Z2Zvby5jb20iLCJwaG9uZSI6IiIsImFwcF9tZXRhZGF0YSI6eyJwcm92aWRlciI6ImVtYWlsIiwicHJvdmlkZXJzIjpbImVtYWlsIl19LCJ1c2VyX21ldGFkYXRhIjp7fSwicm9sZSI6ImF1dGhlbnRpY2F0ZWQiLCJhYWwiOiJhYWwxIiwiYW1yIjpbeyJtZXRob2QiOiJvdHAiLCJ0aW1lc3RhbXAiOjE2ODE2OTAwNzF9XSwic2Vzc2lvbl9pZCI6ImY4MTg1YTM5LTkxYzgtNGFmMy1iNzAxLTdhY2MwY2MwMGNlNSJ9.UvcTfpyIM1TdzM8ZV6UAPWfa0rgNq4AiqeD0INy6zV8", - "_Zp8uXIA2InTDKYgo8TCqA", None, None, None] diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bagas Guide How to Download and Install Microsoft Office 2010 with Crack and Keygen.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bagas Guide How to Download and Install Microsoft Office 2010 with Crack and Keygen.md deleted file mode 100644 index 97509fe226550e62afd4b53300774984fd317070..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bagas Guide How to Download and Install Microsoft Office 2010 with Crack and Keygen.md +++ /dev/null @@ -1,38 +0,0 @@ - -
If you are looking for a way to download Microsoft Office 2010 full version for free, you have come to the right place. In this article, I will show you how to download and install Microsoft Office 2010 with crack and keygen from Bagas, a popular website that provides software downloads and tutorials.
-Microsoft Office 2010 is a suite of productivity applications that includes Word, Excel, PowerPoint, Outlook, Access, and more. It has many features and improvements over the previous versions, such as the ribbon interface, backstage view, online collaboration, cloud integration, and enhanced graphics. Microsoft Office 2010 is compatible with Windows 7, Windows 8/8.1, and Windows 10.
-DOWNLOAD ⚹⚹⚹ https://byltly.com/2uKyJO
In this article, I have shown you how to download Microsoft Office 2010 full crack + keygen bagas for free. This is a simple and easy way to get Microsoft Office 2010 on your computer without paying anything. However, this method may not be legal or ethical, so use it at your own risk. I hope this article was helpful for you. If you have any questions or suggestions, please leave a comment below.
Now that you have downloaded and installed Microsoft Office 2010 full crack + keygen bagas, you may wonder how to use it. Microsoft Office 2010 is a suite of applications that can help you create and edit various types of documents, such as text, spreadsheets, presentations, emails, databases, and more. Here are some basic tips on how to use Microsoft Office 2010:
-These are some of the basic functions of Microsoft Office 2010. To learn more about how to use Microsoft Office 2010, you can visit the official support website or watch this video tutorial . You can also explore the features and options of each application by yourself and discover what you can do with Microsoft Office 2010.
- ddb901b051but it's worth noting that apollo's roots reach far further back. picasso has referred to the apollo series as his "desire to contemplate man's relationship to his creations". and so, if some of his other artistic expressions have been questionable, this series ought to be given the benefit of the doubt, as it seems he was attempting to capture that "relationship" by making a specific project that required technical research.
-Download Zip ☑ https://imgfil.com/2uy17C
and on the subject of apollo--like many of you mentioned, he is really intricate--and most importantly, it's entirely for our time. it was created in anticipation of the icarus 2000 series that would begin in 2000, and finished ahead of schedule.
-while i couldnt log into my social network accounts, i noticed that my internet access was painfully slow. i wanted to look at my latest-used facebook posts and share them on twitter. then, i noticed that the apps on the web browser were popping up and asking for permission to use my contact information and settings to log me in to facebook, twitter and more. although common for web-based apps and services to need an account and permission from you before you can use them, this led to unwanted surprises. it prompted my iphone app store to display apps that i purchased as being compatible with my phone and running my apps without my permission or even knowledge.
-when is an application not a valid and legal application? if the developer of the app is to be believed, and in many cases they are, the answer is when the developer is a manufacturer who markets the product as his own. in other words, an application is not a valid and legal application if its developer belongs to the league of businesses who manufacture other products. thats what apple has claimed in court. when is an app a product? when it comes with the manufacturer into retail. apple has claimed that, yes indeed, instadial is indeed a product. even though it was simply developed by a single person, which is not an unusual arrangement in software development, there is no product without manufacture. and apple knows who the manufacture is. read the fine print. if the developer of the software says it is his own, than he is a manufacturer.
- 899543212bEscape Plan is a 2013 action thriller movie starring Sylvester Stallone and Arnold Schwarzenegger as two prisoners who try to break out of a high-tech prison that they designed themselves. The movie was a box office success and received mixed reviews from critics and audiences.
-Download ✸ https://imgfil.com/2uy0DL
If you want to watch Escape Plan with subtitles, you may have searched for escapeplansubtitles720pbluraynext on the internet. This is a keyword that refers to the subtitles for the 720p BluRay version of the movie, which is one of the best quality formats available. However, finding the right subtitles for this movie can be tricky, as there are many different sources and versions of subtitles online.
- -In this article, we will give you some tips on how to find and use escapeplansubtitles720pbluraynext for your movie experience. We will also provide you with some alternative titles for this keyword that you can use to search for subtitles more easily.
- -One of the easiest ways to find escapeplansubtitles720pbluraynext is to use a subtitle search engine or website. These are online platforms that allow you to search for subtitles by movie title, language, format, and other criteria. Some of the most popular subtitle websites are:
- -These are some of the websites that you can use to find escapeplansubtitles720pbluraynext. However, you should be careful when downloading subtitles from unknown sources, as they may contain viruses or malware that can harm your device or compromise your privacy. You should also check the quality and accuracy of the subtitles before using them, as they may have errors or inconsistencies.
- - -Once you have downloaded escapeplansubtitles720pbluraynext from a reliable source, you can use them to watch Escape Plan with subtitles on your device. To do this, you need to have a media player that supports external subtitles, such as VLC Media Player, KMPlayer, or PotPlayer. You also need to have the 720p BluRay version of Escape Plan on your device or on a disc.
- -To use escapeplansubtitles720pbluraynext, follow these steps:
- -You can adjust the size, position, color, and timing of the subtitles according to your preference. You can also switch between different subtitle languages if you have more than one subtitle file.
- -If you have trouble finding escapeplansubtitles720pbluraynext online, you can try using some alternative titles for this keyword that may yield better results. Some of these alternative titles are:
- -These are some of the alternative titles for escapeplansubtitles720pbluraynext that you can use to search for subtitles more easily. However, you should make sure that the subtitle file matches the movie file exactly, as different releases may have different frame rates, durations, or audio tracks.
- -Escape Plan is a great movie to watch with subtitles if you want to enjoy the action and dialogue of Sylvester Stallone and Arnold Schwarzenegger. To find and use escapeplansubtitles720pbluraynext, you can use a subtitle website or search engine, download the subtitle file from a reliable source, and load it on your media player along with the 720p BluRay version of Escape Plan. You can also use some alternative titles for this keyword that may help you find subtitles more easily.
-Escape Plan is a 2013 action thriller movie starring Sylvester Stallone and Arnold Schwarzenegger as two prisoners who try to break out of a high-tech prison that they designed themselves. The movie was a box office success and received mixed reviews from critics and audiences.
- -If you want to watch Escape Plan with subtitles, you may have searched for escapeplansubtitles720pbluraynext on the internet. This is a keyword that refers to the subtitles for the 720p BluRay version of the movie, which is one of the best quality formats available. However, finding the right subtitles for this movie can be tricky, as there are many different sources and versions of subtitles online.
- -In this article, we will give you some tips on how to find and use escapeplansubtitles720pbluraynext for your movie experience. We will also provide you with some alternative titles for this keyword that you can use to search for subtitles more easily.
- -One of the easiest ways to find escapeplansubtitles720pbluraynext is to use a subtitle search engine or website. These are online platforms that allow you to search for subtitles by movie title, language, format, and other criteria. Some of the most popular subtitle websites are:
- -These are some of the websites that you can use to find escapeplansubtitles720pbluraynext. However, you should be careful when downloading subtitles from unknown sources, as they may contain viruses or malware that can harm your device or compromise your privacy. You should also check the quality and accuracy of the subtitles before using them, as they may have errors or inconsistencies.
- -Once you have downloaded escapeplansubtitles720pbluraynext from a reliable source, you can use them to watch Escape Plan with subtitles on your device. To do this, you need to have a media player that supports external subtitles, such as VLC Media Player, KMPlayer, or PotPlayer. You also need to have the 720p BluRay version of Escape Plan on your device or on a disc.
- -To use escapeplansubtitles720pbluraynext, follow these steps:
- -You can adjust the size, position, color, and timing of the subtitles according to your preference. You can also switch between different subtitle languages if you have more than one subtitle file.
- -If you have trouble finding escapeplansubtitles720pbluraynext online, you can try using some alternative titles for this keyword that may yield better results. Some of these alternative titles are:
- -These are some of the alternative titles for escapeplansubtitles720pbluraynext that you can use to search for subtitles more easily. However, you should make sure that the subtitle file matches the movie file exactly, as different releases may have different frame rates, durations, or audio tracks.
- -Escape Plan is a great movie to watch with subtitles if you want to enjoy the action and dialogue of Sylvester Stallone and Arnold Schwarzenegger. To find and use escapeplansubtitles720pbluraynext, you can use a subtitle website or search engine, download the subtitle file from a reliable source, and load it on your media player along with the 720p BluRay version of Escape Plan. You can also use some alternative titles for this keyword that may help you find subtitles more easily.
-Escape Plan is a great movie to watch with subtitles if you want to enjoy the action and dialogue of Sylvester Stallone and Arnold Schwarzenegger. To find and use escapeplansubtitles720pbluraynext, you can use a subtitle website or search engine, download the subtitle file from a reliable source, and load it on your media player along with the 720p BluRay version of Escape Plan. You can also use some alternative titles for this keyword that may help you find subtitles more easily.
3cee63e6c2If you are looking for a fun and addictive multiplayer game that you can play with your friends or strangers online, then you should try Axes io. This is a battle royale game where you have to throw axes at your enemies and survive as long as possible. But if you want to have more advantages and resources in the game, then you should download Axes io Mod Apk An1 Com. This is a modified version of the game that gives you unlimited money, free shopping, free chests, VIP access, and more. In this article, we will tell you everything you need to know about Axes io and Axes io Mod Apk An1 Com, including how to download and install it on your Android device.
-Download File ✶ https://urlin.us/2uSVxQ
Axes io is a game developed by CASUAL AZUR GAMES, a popular studio that has created many other games like Worms Zone.io, Stack Ball, and Rocket Clash 3D. Axes io is a game where you have to throw axes at other players and try to be the last one standing. You can play online with up to 40 players from all over the world, or offline with bots. You can also choose between different game modes, such as Deathmatch, Team Deathmatch, Zombie Mode, and more.
-Axes io offers you a variety of game modes and maps to choose from. You can play in Deathmatch mode, where you have to kill as many players as possible in a limited time. You can also play in Team Deathmatch mode, where you have to cooperate with your teammates and eliminate the enemy team. Or you can play in Zombie Mode, where you have to survive the zombie apocalypse and kill the undead. There are also different maps to explore, such as Forest, Desert, Snowy Mountain, and more.
-Axes io also allows you to customize your character and your weapons. You can unlock and use different types of axes, such as fire axes, ice axes, electric axes, etc. You can also unlock and use different skins for your character, such as ninja, pirate, cowboy, etc. You can also upgrade your weapons and skills to make them more powerful and effective.
-download axes io mod apk unlimited money and gems
-download axes io mod apk latest version for android
-download axes io mod apk free shopping and vip
-download axes io mod apk no ads and no root
-download axes io mod apk with all weapons unlocked
-download axes io hack apk an1 com free
-download axes io cheat apk an1 com online
-download axes io cracked apk an1 com offline
-download axes io premium apk an1 com full
-download axes io pro apk an1 com modded
-how to download axes io mod apk from an1 com
-where to download axes io mod apk by an1 com
-what is axes io mod apk on an1 com
-why download axes io mod apk via an1 com
-when to download axes io mod apk through an1 com
-best site to download axes io mod apk like an1 com
-top 10 sites to download axes io mod apk similar to an1 com
-alternative sites to download axes io mod apk instead of an1 com
-safe sites to download axes io mod apk other than an1 com
-trusted sites to download axes io mod apk besides an1 com
-download axes io battle royale mod apk an1 com
-download axes io survival mode mod apk an1 com
-download axes io zombie mode mod apk an1 com
-download axes io multiplayer mode mod apk an1 com
-download axes io single player mode mod apk an1 com
-download axes io 2.7.19 mod apk from an1 com
-download axes io 2.7.18 mod apk by an1 com
-download axes io 2.7.17 mod apk on an1 com
-download axes io 2.7.16 mod apk via an1 com
-download axes io 2.7.15 mod apk through an1 com
-benefits of downloading axes io mod apk from an1 com
-drawbacks of downloading axes io mod apk by an1 com
-reviews of downloading axes io mod apk on an1 com
-ratings of downloading axes io mod apk via an1 com
-feedback of downloading axes io mod apk through an1 com
-tips for downloading axes io mod apk from an1 com
-tricks for downloading axes io mod apk by an1 com
-guides for downloading axes io mod apk on an1 com
-tutorials for downloading axes io mod apk via an1 com
-instructions for downloading axes io mod apk through an1 com
Axes io has simple controls that are easy to learn and use. You just have to swipe on the screen to move your character and tap to throw your axes. You can also use buttons to switch weapons and activate skills. The game also has colorful and cartoonish graphics that are suitable for all ages. The game runs smoothly on most devices and does not require a lot of storage space or internet connection.
-Axes io Mod Apk An1 Com is a modified version of Axes io that gives you unlimited resources and benefits in the game. With this mod apk, you can enjoy free shopping, free money, free chests, VIP access, and more. You can buy any weapon or skin you want without spending real money. You can open unlimited chests and get rare items and rewards. You can also access the VIP features, such as exclusive skins, weapons, and bonuses. You can also remove the annoying ads that pop up in the game and enjoy a smoother gaming experience.
-With Axes io Mod Apk An1 Com, you can get everything you want in the game for free. You can buy any weapon or skin you like without spending any money. You can also get unlimited money to upgrade your weapons and skills. You can also open unlimited chests and get rare items and rewards. You can also access the VIP features, such as exclusive skins, weapons, and bonuses.
-Axes io Mod Apk An1 Com is also safe and secure to use. You don't have to worry about any ads that might interrupt your gameplay or drain your battery. You also don't have to root your device to use the mod apk. The mod apk is also free from any virus or malware that might harm your device or data.
-Axes io Mod Apk An1 Com is also easy to download and install on your Android device. You don't need any special skills or tools to do it. You just have to follow some simple steps that we will explain below.
-Before you can install the mod apk file, you have to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings and look for the security option. Then, find the unknown sources option and toggle it on.
-Next, you have to download the mod apk file from the link below. This is a direct and fast link that will take you to the download page. Once you are there, click on the download button and wait for the file to be downloaded on your device.
- Download Axes io Mod Apk An1 Com Here -Finally, you have to install the mod apk file on your device. To do this, locate the file in your device storage and tap on it. Then, follow the instructions on the screen and wait for the installation to be completed. Once it is done, you can launch the game and enjoy all the features of Axes io Mod Apk An1 Com.
-Axes io is a fun and addictive multiplayer game that you can play with your friends or strangers online. You have to throw axes at your enemies and survive as long as possible. But if you want to have more advantages and resources in the game, then you should download Axes io Mod Apk An1 Com. This is a modified version of the game that gives you unlimited money, free shopping, free chests, VIP access, and more. You can also remove the ads, avoid rooting your device, and easily download and install the mod apk file. So what are you waiting for? Download Axes io Mod Apk An1 Com now and enjoy the game!
-Here are some frequently asked questions about Axes io Mod Apk An1 Com:
-Yes, Axes io Mod Apk An1 Com is safe to use. It is free from any virus or malware that might harm your device or data. It also does not require root access or any special permissions.
-Axes io Mod Apk An1 Com is compatible with most Android devices that run on Android 4.4 or higher. However, some devices may not support some features or functions of the mod apk.
-Yes, you can play online with Axes io Mod Apk An1 Com. However, you may face some issues or errors while connecting to the server or playing with other players who are using the original version of the game.
-No, you cannot update Axes io Mod Apk An1 Com. You have to download the latest version of the mod apk file from the link below whenever there is a new update available for the game.
-If you have any questions or problems regarding Axes io Mod Apk An1 Com, you can contact us through the comment section below. We will try to answer your queries and solve your issues as soon as possible.
-If you are looking for a fun and creative game that lets you build anything you want, then you should try Crafting and Building. This is a free game for Android devices that lets you explore, craft, and build your own world with blocks. You can also play with your friends online, visit their worlds, and help them with their constructions.
-DOWNLOAD 🆓 https://jinyurl.com/2uNJcz
But what if you want to play this game on a bigger screen, with better graphics, performance, and controls? Well, you can do that by downloading Crafting and Building para PC. This means that you can play this game on your Windows or Mac computer using an emulator. An emulator is a software that allows you to run Android apps on your PC.
-In this guide, we will show you how to download and install Crafting and Building para PC using three different em
ulators: BlueStacks, MEmu, and GameLoop. These are some of the most popular and reliable emulators that you can use to play Android games on your PC. We will also tell you about the features and reviews of Crafting and Building game, so you can see why it is worth playing.
-BlueStacks is one of the most widely used emulators for playing Android games on PC. It has a user-friendly interface, high compatibility, and smooth performance. Here are the steps to download and install Crafting and Building para PC with BlueStacks:
-Congratulations, you have successfully downloaded and installed Crafting and Building para PC with BlueStacks. Now you can enjoy this game on your PC with better graphics, performance, and controls.
-MEmu is another popular emulator for playing Android games on PC. It has a fast and stable performance, high compatibility, and multiple instances support. Here are the steps to download and install Crafting and Building para PC with MEmu:
-download crafting and building on pc with bluestacks
-descargar crafting and building para pc gratis
-crafting and building game for pc free download
-como descargar crafting and building para pc
-crafting and building pc emulator
-download crafting and building adventure game for pc
-crafting and building online para pc
-descargar crafting and building en pc con gameloop
-crafting and building para pc windows 10
-download crafting and building offline game for pc
-crafting and building para pc sin emulador
-descargar crafting and building apk para pc
-crafting and building para pc requisitos
-download crafting and building mod apk for pc
-crafting and building para pc 2023
-descargar crafting and building ultima version para pc
-crafting and building multiplayer para pc
-download crafting and building for mac
-crafting and building para pc descargar mega
-download crafting and building for windows 7
-crafting and building para pc mediafire
-download crafting and building for laptop
-crafting and building para pc uptodown
-download crafting and building latest version for pc
-crafting and building para pc sin internet
-download crafting and building 2 for pc
-crafting and building skins para pc
-download crafting and building 3d for pc
-crafting and building juegos similares para pc
-download crafting and building survival for pc
-como jugar crafting and building en pc
-download crafting and building sandbox game for pc
-como instalar crafting and building en pc
-download crafting and building creative mode for pc
-como actualizar crafting and building en pc
-download crafting and building house design for pc
-como crear un servidor de crafting and building en pc
-download crafting and building pixel world for pc
-como tener diamantes infinitos en crafting and building para pc
-download crafting and building exploration for pc
-como hacer un portal en crafting and building para pc
-download crafting and building city builder for pc
-como tener mascotas en crafting and building para pc
-download crafting and building block craft for pc
-como cambiar el nombre en crafting and building para pc
-download crafting and building mine games for pc
-como hacer una casa en crafting and building para pc
-download crafting and building craft games for pc
Congratulations, you have successfully downloaded and installed Crafting and Building para PC with MEmu. Now you can enjoy this game on your PC with better graphics, performance, and controls.
-GameLoop is another popular emulator for playing Android games on PC. It is developed by Tencent, the company behind PUBG Mobile, Call of Duty Mobile, and other popular games. It has a smooth and optimized performance, high compatibility, and exclusive features. Here are the steps to download and install Crafting and Building para PC with GameLoop:
-Congratulations, you have successfully downloaded and installed Crafting and Building para PC with GameLoop. Now you can enjoy this game on your PC with better graphics, performance, and controls.
-Crafting and Building is a game that lets you unleash your creativity and imagination. You can build anything you want with blocks, from houses to castles, from farms to cities. You can also explore the world, mine resources, craft tools and weapons, and fight enemies. You can also play with your friends online, visit their worlds, and help them with their constructions.
-Here are some of the features and reviews of Crafting and Building game that make it worth playing:
-One of the best things about Crafting and Building is that you can play with your friends online. You can join their worlds or invite them to yours, chat with them, and cooperate with them. You can also search for hidden caves, dungeons, and treasures together, and have fun exploring the world.
-Crafting and Building gives you the freedom to build anything you want with blocks. You can create your own house with a room and a kitchen, a castle with towers and walls, a farm with crops and animals, a city with skyscrapers and roads, or anything else you can imagine. You can also decorate your buildings with furniture, paintings, carpets, etc.
-Crafting and Building lets you choose your character from different options. You can choose to be a boy or a girl, change your hair style and color, wear different clothes and accessories, or even create your own custom skin. You can also change your character anytime you want.
-Crafting and Building is not only a solo game but also a multiplayer game. You can play online with other players from around the world, chat with them, make friends with them, or compete with them. You can also help your friend to build their house or ask them to help you with yours. You can also share your creations with other players and see their creations.
-Crafting and Building is also a fun game that lets you play with villagers and animals. You can interact with them, trade with them, feed them, pet them, or even ride them. You can also find different types of animals in the world, such as cows, sheep, chickens, horses, dogs, cats, etc.
-Crafting and Building has cool graphics that are pixelated but colorful and detailed. You can enjoy the best pixel graphics with high fps on your PC. You can also change the graphics settings according to your preference. You can also admire the beautiful scenery of the world, such as the day-night cycle, the weather effects, the water reflections, etc.
-Crafting and Building is a free game that you can download and play without paying anything. You can enjoy all the features and content of the game without any limitations. You can also play the game offline without an internet connection. You can also update the game regularly to get new features and improvements.
-Crafting and Building is a building game that lets you show your creativity and skills. You can build your own constructions with blocks, from simple to complex, from realistic to fantasy, from small to large. You can also challenge yourself or your friends to see who will have the best building. You can also rate and comment on other players' buildings and get feedback on yours.
-As you can see, Crafting and Building is a game that has many features and benefits that make it worth playing. But don't take our word for it, see what other players and reviewers have to say about it.
-"Love it! I use it just to pass the time and build stuff. The only thing making it a 3 star is the very frustrating adds every couple of minutes."
-"If you like sidescrolling crafting games but you’re looking for less of a challenge in the survival department, then Junk Jack is the game for you."
-In conclusion, Crafting and Building is a game that lets you explore, craft, and build your own world with blocks. You can also play with your friends online, visit their worlds, and help them with their constructions. You can download and install Crafting and Building para PC using an emulator such as BlueStacks, MEmu, or GameLoop. You can enjoy this game on your PC with better graphics, performance, and controls. You can also enjoy the features and reviews of Crafting and Building game that make it worth playing.
-Here are some of the frequently asked questions about Crafting and Building para PC:
-If you are looking for a thrilling and realistic off-road racing adventure, you should try Off Road 4x4 Driving Simulator. This game is one of the best mud truck driving games and car racing simulators available on Android devices. In this article, we will tell you everything you need to know about this game, including its features, how to download it, and some FAQs.
-Off Road 4x4 Driving Simulator is a game developed by Azur Interactive Games Limited. It is an addictive ultimate mud truck driving game and realistic car racing simulator. You can choose from a huge selection of 4x4 trucks and vehicles, each with different driving characteristics, and customize them to your liking. You can also test your driving skills in various off-road racing challenges, time trials, and extreme obstacles. You can enjoy stunning detailed graphics, real driving physics, realistic sounds, and a simple and convenient in-game map.
-Download Zip ✦ https://jinyurl.com/2uNNfo
There are many reasons why you should play Off Road 4x4 Driving Simulator. Here are some of them:
-One of the main features of Off Road 4x4 Driving Simulator is its stunning graphics and realistic physics. The game uses advanced graphics technology to create lifelike environments and vehicles. You can see the details of the textures, lighting, shadows, reflections, and particles. You can also feel the realistic physics of the vehicles, such as suspension, traction, torque, weight, and damage. The game also supports different weather effects, such as rain, snow, fog, and wind.
-Another feature of Off Road 4x4 Driving Simulator is its variety of 4x4 trucks and vehicles. You can choose from over 20 different vehicles, each with different driving characteristics. You can drive pickup trucks, SUVs, jeeps, monster trucks, rally cars, military vehicles, and more. Each vehicle has its own strengths and weaknesses in terms of speed, acceleration, handling, durability, and fuel consumption.
-A third feature of Off Road 4x4 Driving Simulator is its endless tuning and customization options. You can modify your vehicles to suit your preferences and needs. You can change the color, paint job, stickers, wheels, tires, suspension, engine, transmission, exhaust and more. You can also upgrade your vehicles to improve their performance and durability. You can also unlock new vehicles by earning coins and rewards.
-A fourth feature of Off Road 4x4 Driving Simulator is its realistic sounds and map. The game has high-quality sound effects that enhance the immersion and realism of the game. You can hear the sounds of the engines, tires, brakes, collisions, and environment. You can also use the in-game map to navigate the different locations and modes. The map shows you the terrain, roads, checkpoints, obstacles, and other points of interest.
-A fifth feature of Off Road 4x4 Driving Simulator is its dozens of challenges and time trials. The game has various off-road racing modes that test your driving skills and abilities. You can compete against yourself or other players in time trials, checkpoints, free roam, and more. You can also complete challenges that require you to perform stunts, jumps, drifts, flips, and more. You can earn coins and rewards for completing challenges and races.
-A sixth feature of Off Road 4x4 Driving Simulator is its extreme obstacles and terrain. The game has different locations that offer different challenges and experiences. You can drive in mud, snow, sand, rocks, hills, forests, deserts, swamps, and more. You can also encounter various obstacles, such as ramps, bridges, logs, barrels, crates, pipes, fences, and more. You have to overcome these obstacles and terrain with your 4x4 trucks.
-download game off road 4x4 driving simulator mod apk
-download game off road 4x4 driving simulator for pc
-download game off road 4x4 driving simulator android
-download game off road 4x4 driving simulator online
-download game off road 4x4 driving simulator free
-download game off road 4x4 driving simulator 2023
-download game off road 4x4 driving simulator extreme
-download game off road 4x4 driving simulator multiplayer
-download game off road 4x4 driving simulator full version
-download game off road 4x4 driving simulator windows 10
-download game off road 4x4 driving simulator apk
-download game off road 4x4 driving simulator pc offline
-download game off road 4x4 driving simulator ios
-download game off road 4x4 driving simulator laptop
-download game off road 4x4 driving simulator hack
-download game off road 4x4 driving simulator unlimited money
-download game off road 4x4 driving simulator latest version
-download game off road 4x4 driving simulator hd
-download game off road 4x4 driving simulator pro
-download game off road 4x4 driving simulator real
-download game off road 4x4 driving simulator best
-download game off road 4x4 driving simulator new
-download game off road 4x4 driving simulator update
-download game off road 4x4 driving simulator premium
-download game off road 4x4 driving simulator cheats
-download game off road 4x4 driving simulator review
-download game off road 4x4 driving simulator tips
-download game off road 4x4 driving simulator tricks
-download game off road 4x4 driving simulator guide
-download game off road 4x4 driving simulator gameplay
-download game off road 4x4 driving simulator walkthrough
-download game off road 4x4 driving simulator tutorial
-download game off road 4x4 driving simulator video
-download game off road 4x4 driving simulator youtube
-download game off road 4x4 driving simulator facebook
-download game off road 4x4 driving simulator instagram
-download game off road 4x4 driving simulator twitter
-download game off road 4x4 driving simulator reddit
-download game off road 4x4 driving simulator quora
-download game off road 4x4 driving simulator pinterest
-download game off road 4x4 driving simulator blogspot
-download game off road 4x4 driving simulator wordpress
-download game off road 4x3d6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+6+64driving-simulator.com
The easiest way to download Off Road 4x4 Driving Simulator is from the Google Play Store. You can follow these steps:
-Another way to download Off Road 4x4 Driving Simulator is from APK websites. These are websites that offer APK files of Android apps and games that you can download and install manually. However, you should be careful when downloading from these websites as some of them may contain malware or viruses. You should only download from trusted and reputable websites. You can follow these steps:
-A third way to download Off Road 4x4 Driving Simulator is from a PC emulator. This is a software that allows you to run Android apps and games on your PC. This way, you can enjoy the game on a bigger screen and with better controls. However, you need to have a compatible PC emulator installed on your PC first. Some of the popular PC emulators are BlueStacks, NoxPlayer, MEmu, and LDPlayer. You can follow these steps:
-In conclusion, Off Road 4x4 Driving Simulator is a great game for anyone who loves off-road racing and driving. It has amazing graphics, physics, sounds, and map. It has a wide range of 4x4 trucks and vehicles that you can tune and customize. It has various challenges and time trials that you can compete in. It has extreme obstacles and terrain that you can explore. You can download the game from the Google Play Store, APK websites, or PC emulator. We hope you enjoyed this article and found it helpful. If you have any questions or feedback, please let us know in the comments section below.
-Here are some frequently asked questions about Off Road 4x4 Driving Simulator:
-If you are a fan of driving games and simulation games, you might have heard of Euro Truck Simulator 2, or ETS2 for short. This game lets you travel across Europe as a truck driver, delivering cargo and exploring different cities and landscapes. But how can you get ETS2 download for free? In this article, we will show you how to download Euro Truck Simulator 2 demo, full version, and mods from various sources. But first, let's take a look at what this game is all about.
-DOWNLOAD ✒ https://jinyurl.com/2uNNNG
Euro Truck Simulator 2 is a truck driving simulation game developed and published by SCS Software in 2012. It is the sequel to the original Euro Truck Simulator, which was released in 2008. The game features licensed trucks from various manufacturers, such as MAN, Scania, Volvo, DAF, Renault, and more. You can customize your truck with different parts, paint jobs, accessories, and decals. You can also run your own trucking business, hire drivers, buy garages, and manage your finances.
-Some of the main features of Euro Truck Simulator 2 are:
-To run Euro Truck Simulator 2 on your PC, you need to meet the following system requirements:
-Minimum | Recommended |
---|---|
OS: Windows 7 CPU: Dual core CPU 2.4 GHz RAM: 4 GB GPU: GeForce GTS 450-class (Intel HD 4000) HDD: 7 GB available space DirectX: DirectX 9.0c | OS: Windows 7/8.1/10 64-bit CPU: Quad core CPU 3.0 GHz RAM: 6 GB GPU: GeForce GTX 760-class (2 GB) HDD: 7 GB available space DirectX: DirectX 9.0c |
Euro Truck Simulator 2 has received overwhelmingly positive reviews from critics and players alike. The game has a score of 96% on Steam, based on over 470,000 user reviews. The game has also won several awards, such as the “I Thought This Game Was Cool Before It Won An Award” Award and the “Sit Back and Relax” Award from Steam Awards. Some of the praises for the game are:
-"Euro Truck Simulator 2 is that rare thing, a strong sim tethered to a strong game. Where other vehicle-obsessed devs seem to take player motivation for granted, Czech studio SCS understand that a pleasingly modelled steed needs a pleasingly modelled environment to shine." - PC Gamer-
"Euro Truck Simulator 2 reviews are mostly positive, praising the game as the best simulation game period and the best heavy vehicle simulator ever made. The game offers a realistic and varied environment of Europe and a pleasingly modelled steed to drive. The game is also praised for its modding community, which adds new content and features to the game." - Game Rant-
"Euro Truck Simulator 2 is a deep and rewarding game, and it was met with favorable reviews when it released back in 2012. It's maintained popularity with fans, who continue to produce mods that add new vehicles, maps, and more to the game. It's not often that a simulator game can appeal to a wide audience, but Euro Truck Simulator 2 does just that." - Screen Rant-
Now that you know what Euro Truck Simulator 2 is and why it is so popular, you might be wondering how to get ETS2 download for free. There are several ways to do that, depending on what you want to play. Here are some of the options:
-ets2 download free trial
-ets2 download steam key
-ets2 download full version
-ets2 download latest version
-ets2 download for pc
-ets2 download for mac
-ets2 download for linux
-ets2 download mods
-ets2 download map
-ets2 download crack
-ets2 download torrent
-ets2 download apk
-ets2 download android
-ets2 download ios
-ets2 download online
-ets2 download multiplayer
-ets2 download demo
-ets2 download patch
-ets2 download update
-ets2 download dlc
-ets2 download going east
-ets2 download scandinavia
-ets2 download vive la france
-ets2 download italia
-ets2 download road to the black sea
-ets2 download beyond the baltic sea
-ets2 download heart of russia
-ets2 download promods
-ets2 download truckersmp
-ets2 download truck skins
-ets2 download trailer skins
-ets2 download sound mods
-ets2 download traffic mods
-ets2 download weather mods
-ets2 download graphics mods
-ets2 download realistic mods
-ets2 download tuning mods
-ets2 download bus mods
-ets2 download car mods
-ets2 download save game
-ets2 download profile editor
-ets2 download cheat engine
-ets2 download money hack
-ets2 download level hack
-ets2 download console commands
-ets2 download radio stations
-ets2 download custom music
-ets2 download world of trucks account
If you want to try out the game before buying it, you can download the Euro Truck Simulator 2 demo from the official website. The demo version allows you to play for one hour with one of the basic trucks. You can also visit several cities in Germany, Austria, Switzerland, and Italy. The demo is compatible with Windows 7 or later, and requires about 4 GB of disk space.
-If you want to play the full version of the game, you can buy it from Steam, the popular digital distribution platform. The game costs $19.99 USD, but it often goes on sale for up to 75% off. You can also buy various DLCs (downloadable content) that add new maps, trucks, cargoes, and more to the game. Some of the most popular DLCs are:
-If you want to enhance your gameplay experience with custom content created by other players, you can download Euro Truck Simulator 2 mods from Steam Workshop or other websites. Mods are modifications that change or add new features to the game, such as new trucks, trailers, skins, sounds, maps, traffic, weather, and more. You can browse through thousands of mods and choose the ones that suit your preferences. To install mods from Steam Workshop, you need to subscribe to them and enable them in the game's mod manager. To install mods from other websites, you need to download them and copy them to the "mod" folder in your game directory.
-Euro Truck Simulator 2 is a fun and realistic truck driving simulation game that lets you explore Europe as a truck driver. You can download ETS2 for free by using the demo version from the official website, or by buying the full version from Steam. You can also download ETS2 mods from Steam Workshop or other websites to customize your game with new content and features. Whether you want to relax and enjoy the scenery, or challenge yourself with different cargoes and routes, Euro Truck Simulator 2 has something for everyone.
-Si eres un fan de los RPG de mundo abierto, es posible que hayas oído hablar de Genshin Impact, uno de los juegos más populares y aclamados de 2020. Genshin Impact es un MMORPG desarrollado por miHoYo Limited, la misma compañía detrás del exitoso juego de estilo anime Honkai Impact 3rd. En este artículo, te mostraremos cómo descargar y jugar Genshin Impact en tu dispositivo Android usando QooApp, una tienda de aplicaciones de terceros que ofrece una amplia gama de juegos de Asia. Pero primero, echemos un vistazo a lo que es Genshin Impact y por qué deberías jugarlo.
-Download –––––>>> https://bltlly.com/2v6KAx
Genshin Impact es un juego que tiene lugar en un mundo de fantasía llamado Teyvat, donde siete naciones son gobernadas por siete dioses de diferentes elementos. Juegas como un viajero que ha perdido a su hermano en un misterioso incidente, y te embarcas en una búsqueda para encontrarlos y descubrir los secretos de este mundo. En el camino, conocerás a varios personajes que se unirán a ti como compañeros, cada uno con sus propias personalidades, habilidades e historias únicas.
-Genshin Impact es un juego que ofrece mucha libertad y exploración. Puedes correr, escalar, nadar, deslizarte y volar a través de un vasto mundo abierto que está lleno de impresionantes paisajes, tesoros ocultos, rompecabezas, desafíos, enemigos y sorpresas. También puedes cambiar entre diferentes personajes en cualquier momento, y usar sus poderes elementales para crear varios combos y reacciones que pueden ayudarte en combate o exploración. También puedes personalizar tus personajes con diferentes armas, artefactos, talentos y constelaciones que se adapten a tu estilo de juego.
-Jugar Genshin Impact en Android tiene algunas ventajas sobre otras plataformas. Por un lado, puede utilizar controles táctiles que son intuitivos y fáciles de usar. También puede ajustar la configuración del juego para optimizar su rendimiento y duración de la batería de acuerdo con las especificaciones de su dispositivo. Además, puedes usar algunas funciones del juego que son exclusivas para dispositivos móviles, como tomar capturas de pantalla o grabar vídeos con un solo toque.
-QooApp es una tienda de aplicaciones de terceros que se especializa en juegos de Asia, especialmente Japón, Corea, China y Taiwán. Ofrece una gran selección de juegos de varios géneros, como juegos de rol, juegos de acción, juegos de simulación, juegos de cartas y mucho más. También puedes encontrar algunos juegos que no están disponibles en la tienda oficial de Google Play, como Genshin Impact, Fate/Grand Order, Honkai Impact 3rd y más.
- -QooApp es una tienda de aplicaciones segura y confiable que no requiere que rootee su dispositivo o use una VPN. Puedes descargar e instalar juegos desde QooApp sin problemas, y también puedes actualizarlos de forma automática o manual. QooApp también tiene una interfaz fácil de usar que le permite navegar, buscar y filtrar juegos por categorías, regiones, idiomas, calificaciones y popularidad. También puedes leer reseñas de juegos, noticias, guías y consejos de otros usuarios y personal de QooApp.
-Una de las razones por las que es posible que desee utilizar QooApp para descargar Genshin Impact es que es más rápido y más fácil que usar el sitio web oficial. No tienes que pasar por la molestia de escanear un código QR o introducir un código de verificación para descargar el juego. Simplemente puede buscar Genshin Impact en QooApp y toque en el botón de descarga. QooApp también le notificará cuando haya una nueva actualización para el juego, para que siempre pueda mantener su juego actualizado.
- -El primer paso para descargar Genshin Impact de QooApp es descargar e instalar QooApp en su dispositivo Android. Puedes hacer esto siguiendo estos sencillos pasos:
-El siguiente paso para descargar Genshin Impact de QooApp es buscar Genshin Impact en QooApp y toque en el botón de descarga. Puedes hacer esto siguiendo estos sencillos pasos:
-El tercer paso para descargar Genshin Impact de QooApp es esperar a que la descarga termine y toque en el botón de instalación. Puedes hacer esto siguiendo estos sencillos pasos:
-El paso final para descargar Genshin Impact de QooApp es lanzar Genshin Impact y disfrutar del juego. Puedes hacer esto siguiendo estos sencillos pasos:
-Como mencionamos antes, uno de los beneficios de jugar Genshin Impact en Android es que puedes vincular tus datos a través de diferentes plataformas utilizando tu cuenta miHoYo. Esto significa que puedes jugar el mismo juego con el mismo progreso, personajes, elementos y ajustes en diferentes dispositivos como PC, PS4, PS5, iOS y Android. Para hacer esto, debes seguir estos sencillos pasos:
-El último beneficio de jugar a Genshin Impact en Android es que puedes usar algunas características y funciones del juego que son exclusivas para dispositivos móviles o más convenientes en dispositivos móviles. Estas características y funciones pueden ayudarle a mejorar su experiencia de juego y hacer su vida más fácil. Aquí hay algunos ejemplos de estas características y funciones:
-Genshin Impact es un juego que deberías probar si te gustan los juegos de rol de mundo abierto. Es un juego que ofrece mucha libertad, exploración, aventura y diversión. También es un juego que puedes jugar en diferentes plataformas, incluyendo dispositivos Android. Sin embargo, si desea jugar Genshin Impact en dispositivos Android, es posible que desee utilizar QooApp para descargarlo en lugar de usar el sitio web oficial. QooApp es una tienda de aplicaciones de terceros que ofrece una forma más rápida y fácil de descargar Genshin Impact, así como algunos contenidos y eventos exclusivos que solo están disponibles para ciertas regiones. Para descargar Genshin Impact de QooApp, solo tiene que seguir cuatro sencillos pasos: descargar e instalar QooApp en su dispositivo, buscar Genshin Impact en QooApp y toque en el botón de descarga, espere a que la descarga termine y toque en el botón de instalación, y lanzar Genshin Impact y disfrutar del juego. También puedes optimizar la configuración de tu juego para mejorar el rendimiento y la duración de la batería, y usar algunas funciones y características del juego que son exclusivas o convenientes para dispositivos móviles.
-Si estás interesado en jugar Genshin Impact en dispositivos Android usando QooApp, no lo dudes más. ¡Descarga QooApp ahora y comienza tu viaje en Teyvat hoy mismo!
-Sí, Genshin Impact es gratis para jugar. No tienes que pagar nada para descargarlo o jugarlo. Sin embargo, tiene algunas compras opcionales en el juego que pueden ayudarte a progresar más rápido u obtener más artículos.
-Sí, QooApp es legal de usar. No viola ninguna ley o reglamento que prohíba la distribución o el consumo de juegos de diferentes regiones. Tampoco modifica ni hackea los juegos que ofrece. Sin embargo, siempre debes revisar los términos y condiciones de los juegos que descargues de QooApp, y asegurarte de no violar ninguna regla o acuerdo que tengan.
-Sí, puedes jugar a Genshin Impact con tus amigos. Genshin Impact tiene un modo cooperativo que te permite formar equipo con hasta otros tres jugadores y explorar el mundo, completar misiones, luchar contra enemigos y más. También puede unirse o crear una lista de amigos que le permite chatear, enviar regalos e invitarse entre sí al modo cooperativo. Para acceder al modo cooperativo o a la lista de amigos, primero debes alcanzar el rango de aventura 16.
-Los requisitos mínimos para jugar Genshin Impact en dispositivos Android son los siguientes:
-{utils.format_directory(OUTPUT_DIR)}
- """, every=3, elem_id="files"
- )
- download_btn = gr.Button("Download All Files")
-
- chat_history = gr.State([[None, None]])
- api = gr.State(None)
-
- def start(open_ai_key, ai_name, ai_role, top_5_goals):
- auto_api = AutoAPI(open_ai_key, ai_name, ai_role, top_5_goals)
- return gr.Column.update(visible=False), gr.Column.update(visible=True), auto_api
-
- def bot_response(chat, api):
- messages = []
- for message in api.get_chatbot_response():
- messages.append(message)
- chat[-1][1] = "\n".join(messages) + "..."
- yield chat
- chat[-1][1] = "\n".join(messages)
- yield chat
-
- def send_message(count, chat, api, message="Y"):
- if message != "Y":
- count = 1
- for i in range(count):
- chat.append([message, None])
- yield chat, count - i
- api.send_message(message)
- for updated_chat in bot_response(chat, api):
- yield updated_chat, count - i
-
- def activate_inputs():
- return {
- yes_btn: gr.Button.update(interactive=True),
- consecutive_yes: gr.Slider.update(interactive=True),
- custom_response: gr.Textbox.update(interactive=True),
- }
-
- def deactivate_inputs():
- return {
- yes_btn: gr.Button.update(interactive=False),
- consecutive_yes: gr.Slider.update(interactive=False),
- custom_response: gr.Textbox.update(interactive=False),
- }
-
- start_btn.click(
- start,
- [open_ai_key, ai_name, ai_role, top_5_goals],
- [setup_pane, main_pane, api],
- ).then(bot_response, [chat_history, api], chatbot).then(
- activate_inputs, None, [yes_btn, consecutive_yes, custom_response]
- )
-
- yes_btn.click(
- deactivate_inputs, None, [yes_btn, consecutive_yes, custom_response]
- ).then(
- send_message, [consecutive_yes, chat_history, api], [chatbot, consecutive_yes]
- ).then(
- activate_inputs, None, [yes_btn, consecutive_yes, custom_response]
- )
- custom_response.submit(
- deactivate_inputs, None, [yes_btn, consecutive_yes, custom_response]
- ).then(
- send_message,
- [consecutive_yes, chat_history, api, custom_response],
- [chatbot, consecutive_yes],
- ).then(
- activate_inputs, None, [yes_btn, consecutive_yes, custom_response]
- )
-
- def download_all_files():
- shutil.make_archive("outputs", "zip", OUTPUT_DIR)
-
- download_btn.click(download_all_files).then(None, _js=utils.DOWNLOAD_OUTPUTS_JS)
-
-app.queue(concurrency_count=20).launch(file_directories=[OUTPUT_DIR])
diff --git a/spaces/Dimentian/LLMs-Stable-Vicuna-13B/README.md b/spaces/Dimentian/LLMs-Stable-Vicuna-13B/README.md
deleted file mode 100644
index d738fde6770df1012f79ab6811e4f420c75294ac..0000000000000000000000000000000000000000
--- a/spaces/Dimentian/LLMs-Stable-Vicuna-13B/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: LLMs Stable Vicuna 13B
-emoji: ⚡
-colorFrom: indigo
-colorTo: blue
-sdk: gradio
-sdk_version: 3.28.3
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/DpNaze/webui-docker/on_start.sh b/spaces/DpNaze/webui-docker/on_start.sh
deleted file mode 100644
index 7d57b117b5cc42f7090998cecadefdcba9a2cc26..0000000000000000000000000000000000000000
--- a/spaces/DpNaze/webui-docker/on_start.sh
+++ /dev/null
@@ -1,124 +0,0 @@
-#!/bin/bash
-set -euo pipefail
-
-function download-model() {
- local _option=$1
- local _filename=$2
- local _url=$3
- local _dir
-
- ! [ $# -eq 3 ] && (echo "usage: "; for o in checkpoint lora vae control-net embedding; do echo " \$ download-model --$o AI Dad Jokes is an AI system that writes humorous content inspired by images. Whether that's crafting memes, sharing light-hearted yet amiable jests, or playfully witty remarks, AI Dad Jokes assists you in creating delightful jokes!
-AI Dad Jokes is powered by IDEFICS, an open-access large visual language model developped by Hugging Face. Like GPT-4, the multimodal model accepts arbitrary sequences of image and text inputs and produces text outputs. IDEFICS can answer questions about images, describe visual content, create stories grounded in multiple images, etc.
- -⛔️ Intended uses and limitations: This demo is provided as research artifact to the community showcasing IDEFIC's capabilities. We detail misuses and out-of-scope uses here. In particular, the system should not be used to engage in harassment, abuse and bullying. The model can produce factually incorrect texts, hallucinate facts (with or without an image) and will struggle with small details in images. While the system will tend to refuse answering questionable user requests, it can produce problematic outputs (including racist, stereotypical, and disrespectful texts), in particular when prompted to do so.
- """) - - with gr.Row(elem_id="model_selector_row"): - model_selector = gr.Dropdown( - choices=MODELS, - value="HuggingFaceM4/idefics-80b-instruct", - interactive=True, - show_label=False, - container=False, - label="Model", - visible=False, - ) - - with gr.Row(): - with gr.Column(): - imagebox = gr.Image(type="filepath", label="Image input", visible=True) - with gr.Group(): - with gr.Row(): - textbox.render() - submit_btn = gr.Button(value="▶️ Submit", visible=True) - with gr.Row(): - clear_btn = gr.ClearButton([textbox, imagebox, chatbot], value="🧹 Clear") - regenerate_btn = gr.Button(value="🔄 Regenerate", visible=True) - upload_btn = gr.UploadButton("📁 Upload image", file_types=["image"],visible=False) - with gr.Accordion("Advanced settings", open=False, visible=True) as parameter_row: - system_prompt = gr.Textbox( - value=SYSTEM_PROMPT, - visible=False, - lines=20, - max_lines=50, - interactive=True, - ) - max_new_tokens = gr.Slider( - minimum=8, - maximum=256, - value=128, - step=1, - interactive=True, - label="Maximum number of new tokens to generate", - ) - repetition_penalty = gr.Slider( - minimum=0.0, - maximum=5.0, - value=1.2, - step=0.01, - interactive=True, - label="Repetition penalty", - info="1.0 is equivalent to no penalty", - ) - decoding_strategy = gr.Radio( - [ - "Greedy", - "Top P Sampling", - ], - value="Top P Sampling", - label="Decoding strategy", - interactive=True, - info="Higher values is equivalent to sampling more low-probability tokens.", - ) - temperature = gr.Slider( - minimum=0.0, - maximum=5.0, - value=0.6, - step=0.1, - interactive=True, - visible=True, - label="Sampling temperature", - info="Higher values will produce more diverse outputs.", - ) - decoding_strategy.change( - fn=lambda selection: gr.Slider.update( - visible=( - selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"] - ) - ), - inputs=decoding_strategy, - outputs=temperature, - ) - top_p = gr.Slider( - minimum=0.01, - maximum=0.99, - value=0.8, - step=0.01, - interactive=True, - visible=True, - label="Top P", - info="Higher values is equivalent to sampling more low-probability tokens.", - ) - decoding_strategy.change( - fn=lambda selection: gr.Slider.update(visible=(selection in ["Top P Sampling"])), - inputs=decoding_strategy, - outputs=top_p, - ) - with gr.Column(): - chatbot.render() - - def model_inference( - model_selector, - system_prompt, - user_prompt_str, - chat_history, - image, - decoding_strategy, - temperature, - max_new_tokens, - repetition_penalty, - top_p, - ): - if user_prompt_str.strip() == "" and image is None: - return "", None, chat_history - - system_prompt = ast.literal_eval(system_prompt) - formated_prompt_list, user_prompt_list = format_user_prompt_with_im_history_and_system_conditioning( - system_prompt=system_prompt, - current_user_prompt_str=user_prompt_str.strip(), - current_image=image, - history=chat_history, - ) - - client_endpoint = API_PATHS[model_selector] - client = Client( - base_url=client_endpoint, - headers={"x-use-cache": "0", "Authorization": f"Bearer {API_TOKEN}"}, - ) - - # Common parameters to all decoding strategies - # This documentation is useful to read: https://huggingface.co/docs/transformers/main/en/generation_strategies - generation_args = { - "max_new_tokens": max_new_tokens, - "repetition_penalty": repetition_penalty, - "stop_sequences": EOS_STRINGS, - } - - assert decoding_strategy in [ - "Greedy", - "Top P Sampling", - ] - if decoding_strategy == "Greedy": - generation_args["do_sample"] = False - elif decoding_strategy == "Top P Sampling": - generation_args["temperature"] = temperature - generation_args["do_sample"] = True - generation_args["top_p"] = top_p - - if image is None: - # Case where there is no image OR the image is passed as `- Example images from Whatisnewyork. -
-- -* **Convolutional Neural Networks (CNN)** - + [Language Modeling with Gated Convolutional Networks (Dauphin et al., 2017)](examples/language_model/conv_lm/README.md) - + [Convolutional Sequence to Sequence Learning (Gehring et al., 2017)](examples/conv_seq2seq/README.md) - + [Classical Structured Prediction Losses for Sequence to Sequence Learning (Edunov et al., 2018)](https://github.com/pytorch/fairseq/tree/classic_seqlevel) - + [Hierarchical Neural Story Generation (Fan et al., 2018)](examples/stories/README.md) - + [wav2vec: Unsupervised Pre-training for Speech Recognition (Schneider et al., 2019)](examples/wav2vec/README.md) -* **LightConv and DynamicConv models** - + [Pay Less Attention with Lightweight and Dynamic Convolutions (Wu et al., 2019)](examples/pay_less_attention_paper/README.md) -* **Long Short-Term Memory (LSTM) networks** - + Effective Approaches to Attention-based Neural Machine Translation (Luong et al., 2015) -* **Transformer (self-attention) networks** - + Attention Is All You Need (Vaswani et al., 2017) - + [Scaling Neural Machine Translation (Ott et al., 2018)](examples/scaling_nmt/README.md) - + [Understanding Back-Translation at Scale (Edunov et al., 2018)](examples/backtranslation/README.md) - + [Adaptive Input Representations for Neural Language Modeling (Baevski and Auli, 2018)](examples/language_model/README.adaptive_inputs.md) - + [Lexically constrained decoding with dynamic beam allocation (Post & Vilar, 2018)](examples/constrained_decoding/README.md) - + [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context (Dai et al., 2019)](examples/truncated_bptt/README.md) - + [Adaptive Attention Span in Transformers (Sukhbaatar et al., 2019)](examples/adaptive_span/README.md) - + [Mixture Models for Diverse Machine Translation: Tricks of the Trade (Shen et al., 2019)](examples/translation_moe/README.md) - + [RoBERTa: A Robustly Optimized BERT Pretraining Approach (Liu et al., 2019)](examples/roberta/README.md) - + [Facebook FAIR's WMT19 News Translation Task Submission (Ng et al., 2019)](examples/wmt19/README.md) - + [Jointly Learning to Align and Translate with Transformer Models (Garg et al., 2019)](examples/joint_alignment_translation/README.md ) - + [Multilingual Denoising Pre-training for Neural Machine Translation (Liu et at., 2020)](examples/mbart/README.md) - + [Neural Machine Translation with Byte-Level Subwords (Wang et al., 2020)](examples/byte_level_bpe/README.md) - + [Unsupervised Quality Estimation for Neural Machine Translation (Fomicheva et al., 2020)](examples/unsupervised_quality_estimation/README.md) - + [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations (Baevski et al., 2020)](examples/wav2vec/README.md) - + [Generating Medical Reports from Patient-Doctor Conversations Using Sequence-to-Sequence Models (Enarvi et al., 2020)](examples/pointer_generator/README.md) - + [Linformer: Self-Attention with Linear Complexity (Wang et al., 2020)](examples/linformer/README.md) - + [Cross-lingual Retrieval for Iterative Self-Supervised Training (Tran et al., 2020)](examples/criss/README.md) - + [Deep Transformers with Latent Depth (Li et al., 2020)](examples/latent_depth/README.md) - + [Unsupervised Cross-lingual Representation Learning for Speech Recognition (Conneau et al., 2020)](https://arxiv.org/abs/2006.13979) - + [Robust wav2vec 2.0: Analyzing Domain Shift in Self-Supervised Pre-Training (Hsu, et al., 2021)](https://arxiv.org/abs/2104.01027) - + [Unsupervised Speech Recognition (Baevski, et al., 2021)](https://arxiv.org/abs/2105.11084) -* **Non-autoregressive Transformers** - + Non-Autoregressive Neural Machine Translation (Gu et al., 2017) - + Deterministic Non-Autoregressive Neural Sequence Modeling by Iterative Refinement (Lee et al. 2018) - + Insertion Transformer: Flexible Sequence Generation via Insertion Operations (Stern et al. 2019) - + Mask-Predict: Parallel Decoding of Conditional Masked Language Models (Ghazvininejad et al., 2019) - + [Levenshtein Transformer (Gu et al., 2019)](examples/nonautoregressive_translation/README.md) -* **Finetuning** - + [Better Fine-Tuning by Reducing Representational Collapse (Aghajanyan et al. 2020)](examples/rxf/README.md) - -
- -* September 2020: [Added Linformer code](examples/linformer/README.md) -* September 2020: [Added pointer-generator networks](examples/pointer_generator/README.md) -* August 2020: [Added lexically constrained decoding](examples/constrained_decoding/README.md) -* August 2020: [wav2vec2 models and code released](examples/wav2vec/README.md) -* July 2020: [Unsupervised Quality Estimation code released](examples/unsupervised_quality_estimation/README.md) -* May 2020: [Follow fairseq on Twitter](https://twitter.com/fairseq) -* April 2020: [Monotonic Multihead Attention code released](examples/simultaneous_translation/README.md) -* April 2020: [Quant-Noise code released](examples/quant_noise/README.md) -* April 2020: [Initial model parallel support and 11B parameters unidirectional LM released](examples/megatron_11b/README.md) -* March 2020: [Byte-level BPE code released](examples/byte_level_bpe/README.md) -* February 2020: [mBART model and code released](examples/mbart/README.md) -* February 2020: [Added tutorial for back-translation](https://github.com/pytorch/fairseq/tree/main/examples/backtranslation#training-your-own-model-wmt18-english-german) -* December 2019: [fairseq 0.9.0 released](https://github.com/pytorch/fairseq/releases/tag/v0.9.0) -* November 2019: [VizSeq released (a visual analysis toolkit for evaluating fairseq models)](https://facebookresearch.github.io/vizseq/docs/getting_started/fairseq_example) -* November 2019: [CamemBERT model and code released](examples/camembert/README.md) -* November 2019: [BART model and code released](examples/bart/README.md) -* November 2019: [XLM-R models and code released](examples/xlmr/README.md) -* September 2019: [Nonautoregressive translation code released](examples/nonautoregressive_translation/README.md) -* August 2019: [WMT'19 models released](examples/wmt19/README.md) -* July 2019: fairseq relicensed under MIT license -* July 2019: [RoBERTa models and code released](examples/roberta/README.md) -* June 2019: [wav2vec models and code released](examples/wav2vec/README.md) - -
''' + book_base.brace_matcher(10) + r''') }
- )''',
-
- # Accept both
- # \begin[options]{lilypond}
- # and
- # \begin{lilypond}[options]
- 'lilypond_block':
- r'''(?smx)
- ^ [^%\n]*?
- (?P
- \\begin
- \s*
- (?P {lilypond} \s* )?
- ( \[ \s* (?P [^\[\]]*? ) \s* \] )?
- (?(env) | \s* {lilypond} )
- (?P .*? )
- ^ [^%\n]*?
- \\end \s* {lilypond}
- )''',
-
- 'lilypond_file':
- r'''(?smx)
- ^ [^%\n]*?
- (?P
- \\lilypondfile
- \s*
- ( \[ \s* (?P [^\[\]]*? ) \s* \] )?
- \s*
- { (?P \S+? ) }
- )''',
-
- 'musicxml_file':
- r'''(?smx)
- ^ [^%\n]*?
- (?P
- \\musicxmlfile
- \s*
- ( \[ \s* (?P [^\[\]]*? ) \s* \] )?
- \s*
- { (?P \S+? ) }
- )''',
-
- 'singleline_comment':
- r'''(?mx)
- ^.*?
- (?P
- (?P
- %.*$\n+))''',
-
- 'verb':
- r'''(?mx)
- ^[^%\n]*?
- (?P
- (?P
- \\verb(?P.)
- .*?
- (?P=del)))''',
-
- 'verbatim':
- r'''(?msx)
- ^[^%\n]*?
- (?P
- (?P
- \\begin\s*{verbatim}
- .*?
- \\end\s*{verbatim}))''',
-
- 'lilypondversion':
- r'''(?smx)
- (?P
- \\lilypondversion)[^a-zA-Z]''',
-}
-
-Latex_output = {
- book_snippets.FILTER: r'''\begin{lilypond}[%(options)s]
-%(code)s
-\end{lilypond}''',
-
- book_snippets.OUTPUT: r'''{%%
-\parindent 0pt
-\noindent
-\ifx\preLilyPondExample \undefined
-\else
- \expandafter\preLilyPondExample
-\fi
-\def\lilypondbook{}%%
-\input{%(base)s-systems.tex}%%
-\ifx\postLilyPondExample \undefined
-\else
- \expandafter\postLilyPondExample
-\fi
-}''',
-
- book_snippets.PRINTFILENAME: r'''\texttt{%(filename)s}
-\linebreak
-''',
-
- book_snippets.QUOTE: r'''\begin{quote}
-%(str)s
-\end{quote}''',
-
- book_snippets.VERBATIM: r'''\noindent
-\begin{verbatim}%(verb)s\end{verbatim}
-''',
-
- book_snippets.VERSION: r'''%(program_version)s''',
-}
-
-
-###
-# Retrieve dimensions from LaTeX
-LATEX_INSPECTION_DOCUMENT = r'''
-\nonstopmode
-%(preamble)s
-\begin{document}
-\typeout{textwidth=\the\textwidth}
-\typeout{columnsep=\the\columnsep}
-\makeatletter\if@twocolumn\typeout{columns=2}\fi\makeatother
-\end{document}
-'''
-
-# Do we need anything else besides `textwidth'?
-
-
-def get_latex_textwidth(source, global_options):
- # default value
- textwidth = 550.0
-
- m = re.search(r'''(?P\\begin\s*{document})''', source)
- if m is None:
- ly.warning(_("cannot find \\begin{document} in LaTeX document"))
- return textwidth
-
- preamble = source[:m.start(0)]
- latex_document = LATEX_INSPECTION_DOCUMENT % {'preamble': preamble}
-
- (handle, tmpfile) = tempfile.mkstemp('.tex')
- tmpfileroot = os.path.splitext(tmpfile)[0]
- tmpfileroot = os.path.split(tmpfileroot)[1]
- auxfile = tmpfileroot + '.aux'
- logfile = tmpfileroot + '.log'
-
- tmp_handle = os.fdopen(handle, 'w')
- tmp_handle.write(latex_document)
- tmp_handle.close()
-
- ly.progress(_("Running `%s' on file `%s' to detect default page settings.\n")
- % (global_options.latex_program, tmpfile))
- cmd = '%s %s' % (global_options.latex_program, tmpfile)
- ly.debug_output("Executing: %s\n" % cmd)
- run_env = os.environ.copy()
- run_env['LC_ALL'] = 'C'
- run_env['TEXINPUTS'] = os.path.pathsep.join(
- (global_options.input_dir,
- run_env.get('TEXINPUTS', '')))
-
- # unknown why this is necessary
- universal_newlines = True
- if sys.platform == 'mingw32':
- universal_newlines = False
- # use os.system to avoid weird sleep() problems on
- # GUB's python 2.4.2 on mingw
- # make file to write to
- output_dir = tempfile.mkdtemp()
- output_filename = os.path.join(output_dir, 'output.txt')
- # call command
- cmd += " > %s" % output_filename
- oldtexinputs = os.environ.get('TEXINPUTS')
- os.environ['TEXINPUTS'] = run_env['TEXINPUTS']
- returncode = os.system(cmd)
- if oldtexinputs:
- os.environ['TEXINPUTS'] = oldtexinputs
- else:
- del os.environ['TEXINPUTS']
- parameter_string = open(output_filename, encoding="utf8").read()
- if returncode != 0:
- ly.warning(_("Unable to auto-detect default settings:\n"))
- # clean up
- os.remove(output_filename)
- os.rmdir(output_dir)
- else:
- proc = subprocess.Popen(cmd,
- env=run_env,
- universal_newlines=universal_newlines,
- shell=True,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (parameter_string, error_string) = proc.communicate()
- if proc.returncode != 0:
- ly.warning(_("Unable to auto-detect default settings:\n%s")
- % error_string)
- os.unlink(tmpfile)
- if os.path.exists(auxfile):
- os.unlink(auxfile)
- if os.path.exists(logfile):
- parameter_string = open(logfile, encoding="utf8").read()
- os.unlink(logfile)
-
- columns = 0
- m = re.search('columns=([0-9.]+)', parameter_string)
- if m:
- columns = int(m.group(1))
-
- columnsep = 0
- m = re.search('columnsep=([0-9.]+)pt', parameter_string)
- if m:
- columnsep = float(m.group(1))
-
- m = re.search('textwidth=([0-9.]+)pt', parameter_string)
- if m:
- textwidth = float(m.group(1))
- else:
- ly.warning(_("cannot detect textwidth from LaTeX"))
- return textwidth
-
- ly.debug_output('Detected values:')
- ly.debug_output(' columns = %s' % columns)
- ly.debug_output(' columnsep = %s' % columnsep)
- ly.debug_output(' textwidth = %s' % textwidth)
-
- if m and columns:
- textwidth = (textwidth - columnsep) / columns
- ly.debug_output('Adjusted value:')
- ly.debug_output(' textwidth = %s' % textwidth)
-
- return textwidth
-
-
-def modify_preamble(chunk):
- s = chunk.replacement_text()
- if (re.search(r"\\begin *{document}", s)
- and not re.search("{graphic[sx]", s)):
- s = re.sub(r"\\begin{document}",
- r"\\usepackage{graphics}" + '\n'
- + r"\\begin{document}",
- s)
- chunk.override_text = s
-
-
-class BookLatexOutputFormat (book_base.BookOutputFormat):
- def __init__(self):
- book_base.BookOutputFormat.__init__(self)
- self.format = "latex"
- self.default_extension = ".tex"
- self.snippet_res = Latex_snippet_res
- self.output = Latex_output
- self.handled_extensions = ['.latex', '.lytex', '.tex']
- self.image_formats = "ps"
- self.snippet_option_separator = r'\s*,\s*'
-
- def process_options(self, global_options):
- self.process_options_pdfnotdefault(global_options)
-
- def get_line_width(self, source):
- textwidth = get_latex_textwidth(source, self.global_options)
- return '%.0f\\pt' % textwidth
-
- def input_fullname(self, input_filename):
- # Use kpsewhich if available, otherwise fall back to the default:
- try:
- input_fullname = subprocess.run(['kpsewhich', input_filename],
- check=True,
- encoding='utf-8',
- stdout=subprocess.PIPE,
- universal_newlines=True).stdout
-
- input_fullname = input_fullname.strip("\n")
-
- except (subprocess.CalledProcessError, FileNotFoundError):
- input_fullname = book_base.BookOutputFormat.input_fullname(
- self,
- input_filename)
-
- return input_fullname
-
- def process_chunks(self, chunks):
- for c in chunks:
- if (c.is_plain() and
- re.search(r"\\begin *{document}", c.replacement_text())):
- modify_preamble(c)
- break
- return chunks
-
- def snippet_output(self, basename, snippet):
- s = ''
- rep = snippet.get_replacements()
- rep['base'] = basename.replace('\\', '/')
- rep['filename'] = os.path.basename(snippet.filename).replace('\\', '/')
- rep['ext'] = snippet.ext
- if book_snippets.PRINTFILENAME in snippet.option_dict:
- s += self.output[book_snippets.PRINTFILENAME] % rep
- if book_snippets.VERBATIM in snippet.option_dict:
- rep['verb'] = snippet.verb_ly()
- s += self.output[book_snippets.VERBATIM] % rep
-
- s += self.output[book_snippets.OUTPUT] % rep
-
- # todo: maintain breaks
- if 0:
- breaks = snippet.ly().count("\n")
- s += "".ljust(breaks, "\n").replace("\n", "%\n")
-
- if book_snippets.QUOTE in snippet.option_dict:
- s = self.output[book_snippets.QUOTE] % {'str': s}
-
- return s
-
-
-book_base.register_format(BookLatexOutputFormat())
diff --git a/spaces/RMXK/RVC_HFF/lib/uvr5_pack/lib_v5/layers_537238KB.py b/spaces/RMXK/RVC_HFF/lib/uvr5_pack/lib_v5/layers_537238KB.py
deleted file mode 100644
index a38b7bb3ae3136b07eadfc2db445fef4c2de186b..0000000000000000000000000000000000000000
--- a/spaces/RMXK/RVC_HFF/lib/uvr5_pack/lib_v5/layers_537238KB.py
+++ /dev/null
@@ -1,126 +0,0 @@
-import torch
-from torch import nn
-import torch.nn.functional as F
-
-from . import spec_utils
-
-
-class Conv2DBNActiv(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
- super(Conv2DBNActiv, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(
- nin,
- nout,
- kernel_size=ksize,
- stride=stride,
- padding=pad,
- dilation=dilation,
- bias=False,
- ),
- nn.BatchNorm2d(nout),
- activ(),
- )
-
- def __call__(self, x):
- return self.conv(x)
-
-
-class SeperableConv2DBNActiv(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
- super(SeperableConv2DBNActiv, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(
- nin,
- nin,
- kernel_size=ksize,
- stride=stride,
- padding=pad,
- dilation=dilation,
- groups=nin,
- bias=False,
- ),
- nn.Conv2d(nin, nout, kernel_size=1, bias=False),
- nn.BatchNorm2d(nout),
- activ(),
- )
-
- def __call__(self, x):
- return self.conv(x)
-
-
-class Encoder(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
- super(Encoder, self).__init__()
- self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
- self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
-
- def __call__(self, x):
- skip = self.conv1(x)
- h = self.conv2(skip)
-
- return h, skip
-
-
-class Decoder(nn.Module):
- def __init__(
- self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
- ):
- super(Decoder, self).__init__()
- self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
- self.dropout = nn.Dropout2d(0.1) if dropout else None
-
- def __call__(self, x, skip=None):
- x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
- if skip is not None:
- skip = spec_utils.crop_center(skip, x)
- x = torch.cat([x, skip], dim=1)
- h = self.conv(x)
-
- if self.dropout is not None:
- h = self.dropout(h)
-
- return h
-
-
-class ASPPModule(nn.Module):
- def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
- super(ASPPModule, self).__init__()
- self.conv1 = nn.Sequential(
- nn.AdaptiveAvgPool2d((1, None)),
- Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
- )
- self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
- self.conv3 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
- )
- self.conv4 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
- )
- self.conv5 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
- )
- self.conv6 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
- )
- self.conv7 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
- )
- self.bottleneck = nn.Sequential(
- Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
- )
-
- def forward(self, x):
- _, _, h, w = x.size()
- feat1 = F.interpolate(
- self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
- )
- feat2 = self.conv2(x)
- feat3 = self.conv3(x)
- feat4 = self.conv4(x)
- feat5 = self.conv5(x)
- feat6 = self.conv6(x)
- feat7 = self.conv7(x)
- out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
- bottle = self.bottleneck(out)
- return bottle
diff --git a/spaces/Realcat/image-matching-webui/hloc/utils/geometry.py b/spaces/Realcat/image-matching-webui/hloc/utils/geometry.py
deleted file mode 100644
index 1f33e2e3a61e35ca5e4b457ca7b4d75ca4c9ccb8..0000000000000000000000000000000000000000
--- a/spaces/Realcat/image-matching-webui/hloc/utils/geometry.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import numpy as np
-import pycolmap
-
-
-def to_homogeneous(p):
- return np.pad(p, ((0, 0),) * (p.ndim - 1) + ((0, 1),), constant_values=1)
-
-
-def vector_to_cross_product_matrix(v):
- return np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
-
-
-def compute_epipolar_errors(qvec_r2t, tvec_r2t, p2d_r, p2d_t):
- T_r2t = pose_matrix_from_qvec_tvec(qvec_r2t, tvec_r2t)
- # Compute errors in normalized plane to avoid distortion.
- E = vector_to_cross_product_matrix(T_r2t[:3, -1]) @ T_r2t[:3, :3]
- l2d_r2t = (E @ to_homogeneous(p2d_r).T).T
- l2d_t2r = (E.T @ to_homogeneous(p2d_t).T).T
- errors_r = np.abs(
- np.sum(to_homogeneous(p2d_r) * l2d_t2r, axis=1)
- ) / np.linalg.norm(l2d_t2r[:, :2], axis=1)
- errors_t = np.abs(
- np.sum(to_homogeneous(p2d_t) * l2d_r2t, axis=1)
- ) / np.linalg.norm(l2d_r2t[:, :2], axis=1)
- return E, errors_r, errors_t
-
-
-def pose_matrix_from_qvec_tvec(qvec, tvec):
- pose = np.zeros((4, 4))
- pose[:3, :3] = pycolmap.qvec_to_rotmat(qvec)
- pose[:3, -1] = tvec
- pose[-1, -1] = 1
- return pose
diff --git a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/utils/plotting.py b/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/utils/plotting.py
deleted file mode 100644
index 0ca3ef0a336a652e7ca910a5584227da043ac019..0000000000000000000000000000000000000000
--- a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/utils/plotting.py
+++ /dev/null
@@ -1,253 +0,0 @@
-import bisect
-import numpy as np
-import matplotlib.pyplot as plt
-import matplotlib
-from copy import deepcopy
-
-
-def _compute_conf_thresh(data):
- dataset_name = data["dataset_name"][0].lower()
- if dataset_name == "scannet":
- thr = 5e-4
- elif dataset_name == "megadepth" or dataset_name == "gl3d":
- thr = 1e-4
- else:
- raise ValueError(f"Unknown dataset: {dataset_name}")
- return thr
-
-
-# --- VISUALIZATION --- #
-
-
-def make_matching_figure(
- img0,
- img1,
- mkpts0,
- mkpts1,
- color,
- kpts0=None,
- kpts1=None,
- text=[],
- dpi=75,
- path=None,
-):
- # draw image pair
- assert (
- mkpts0.shape[0] == mkpts1.shape[0]
- ), f"mkpts0: {mkpts0.shape[0]} v.s. mkpts1: {mkpts1.shape[0]}"
- fig, axes = plt.subplots(1, 2, figsize=(10, 6), dpi=dpi)
- axes[0].imshow(img0, cmap="gray")
- axes[1].imshow(img1, cmap="gray")
- for i in range(2): # clear all frames
- axes[i].get_yaxis().set_ticks([])
- axes[i].get_xaxis().set_ticks([])
- for spine in axes[i].spines.values():
- spine.set_visible(False)
- plt.tight_layout(pad=1)
-
- if kpts0 is not None:
- assert kpts1 is not None
- axes[0].scatter(kpts0[:, 0], kpts0[:, 1], c="w", s=2)
- axes[1].scatter(kpts1[:, 0], kpts1[:, 1], c="w", s=2)
-
- # draw matches
- if mkpts0.shape[0] != 0 and mkpts1.shape[0] != 0:
- fig.canvas.draw()
- transFigure = fig.transFigure.inverted()
- fkpts0 = transFigure.transform(axes[0].transData.transform(mkpts0))
- fkpts1 = transFigure.transform(axes[1].transData.transform(mkpts1))
- fig.lines = [
- matplotlib.lines.Line2D(
- (fkpts0[i, 0], fkpts1[i, 0]),
- (fkpts0[i, 1], fkpts1[i, 1]),
- transform=fig.transFigure,
- c=color[i],
- linewidth=1,
- )
- for i in range(len(mkpts0))
- ]
-
- axes[0].scatter(mkpts0[:, 0], mkpts0[:, 1], c=color, s=4)
- axes[1].scatter(mkpts1[:, 0], mkpts1[:, 1], c=color, s=4)
-
- # put txts
- txt_color = "k" if img0[:100, :200].mean() > 200 else "w"
- fig.text(
- 0.01,
- 0.99,
- "\n".join(text),
- transform=fig.axes[0].transAxes,
- fontsize=15,
- va="top",
- ha="left",
- color=txt_color,
- )
-
- # save or return figure
- if path:
- plt.savefig(str(path), bbox_inches="tight", pad_inches=0)
- plt.close()
- else:
- return fig
-
-
-def _make_evaluation_figure(data, b_id, alpha="dynamic"):
- b_mask = data["m_bids"] == b_id
- conf_thr = _compute_conf_thresh(data)
-
- img0 = (data["image0"][b_id][0].cpu().numpy() * 255).round().astype(np.int32)
- img1 = (data["image1"][b_id][0].cpu().numpy() * 255).round().astype(np.int32)
- kpts0 = data["mkpts0_f"][b_mask].cpu().numpy()
- kpts1 = data["mkpts1_f"][b_mask].cpu().numpy()
-
- # for megadepth, we visualize matches on the resized image
- if "scale0" in data:
- kpts0 = kpts0 / data["scale0"][b_id].cpu().numpy()[[1, 0]]
- kpts1 = kpts1 / data["scale1"][b_id].cpu().numpy()[[1, 0]]
- epi_errs = data["epi_errs"][b_mask].cpu().numpy()
- correct_mask = epi_errs < conf_thr
- precision = np.mean(correct_mask) if len(correct_mask) > 0 else 0
- n_correct = np.sum(correct_mask)
- n_gt_matches = int(data["conf_matrix_gt"][b_id].sum().cpu())
- recall = 0 if n_gt_matches == 0 else n_correct / (n_gt_matches)
- # recall might be larger than 1, since the calculation of conf_matrix_gt
- # uses groundtruth depths and camera poses, but epipolar distance is used here.
-
- # matching info
- if alpha == "dynamic":
- alpha = dynamic_alpha(len(correct_mask))
- color = error_colormap(epi_errs, conf_thr, alpha=alpha)
-
- text = [
- f"#Matches {len(kpts0)}",
- f"Precision({conf_thr:.2e}) ({100 * precision:.1f}%): {n_correct}/{len(kpts0)}",
- f"Recall({conf_thr:.2e}) ({100 * recall:.1f}%): {n_correct}/{n_gt_matches}",
- ]
-
- # make the figure
- figure = make_matching_figure(img0, img1, kpts0, kpts1, color, text=text)
- return figure
-
-
-def _make_evaluation_figure_offset(data, b_id, alpha="dynamic", side=""):
- layer_num = data["predict_flow"][0].shape[0]
-
- b_mask = data["offset_bids" + side] == b_id
- conf_thr = 2e-3 # hardcode for scannet(coarse level)
- img0 = (data["image0"][b_id][0].cpu().numpy() * 255).round().astype(np.int32)
- img1 = (data["image1"][b_id][0].cpu().numpy() * 255).round().astype(np.int32)
-
- figure_list = []
- # draw offset matches in different layers
- for layer_index in range(layer_num):
- l_mask = data["offset_lids" + side] == layer_index
- mask = l_mask & b_mask
- kpts0 = data["offset_kpts0_f" + side][mask].cpu().numpy()
- kpts1 = data["offset_kpts1_f" + side][mask].cpu().numpy()
-
- epi_errs = data["epi_errs_offset" + side][mask].cpu().numpy()
- correct_mask = epi_errs < conf_thr
-
- precision = np.mean(correct_mask) if len(correct_mask) > 0 else 0
- n_correct = np.sum(correct_mask)
- n_gt_matches = int(data["conf_matrix_gt"][b_id].sum().cpu())
- recall = 0 if n_gt_matches == 0 else n_correct / (n_gt_matches)
- # recall might be larger than 1, since the calculation of conf_matrix_gt
- # uses groundtruth depths and camera poses, but epipolar distance is used here.
-
- # matching info
- if alpha == "dynamic":
- alpha = dynamic_alpha(len(correct_mask))
- color = error_colormap(epi_errs, conf_thr, alpha=alpha)
-
- text = [
- f"#Matches {len(kpts0)}",
- f"Precision({conf_thr:.2e}) ({100 * precision:.1f}%): {n_correct}/{len(kpts0)}",
- f"Recall({conf_thr:.2e}) ({100 * recall:.1f}%): {n_correct}/{n_gt_matches}",
- ]
-
- # make the figure
- # import pdb;pdb.set_trace()
- figure = make_matching_figure(
- deepcopy(img0), deepcopy(img1), kpts0, kpts1, color, text=text
- )
- figure_list.append(figure)
- return figure
-
-
-def _make_confidence_figure(data, b_id):
- # TODO: Implement confidence figure
- raise NotImplementedError()
-
-
-def make_matching_figures(data, config, mode="evaluation"):
- """Make matching figures for a batch.
-
- Args:
- data (Dict): a batch updated by PL_LoFTR.
- config (Dict): matcher config
- Returns:
- figures (Dict[str, List[plt.figure]]
- """
- assert mode in ["evaluation", "confidence"] # 'confidence'
- figures = {mode: []}
- for b_id in range(data["image0"].size(0)):
- if mode == "evaluation":
- fig = _make_evaluation_figure(
- data, b_id, alpha=config.TRAINER.PLOT_MATCHES_ALPHA
- )
- elif mode == "confidence":
- fig = _make_confidence_figure(data, b_id)
- else:
- raise ValueError(f"Unknown plot mode: {mode}")
- figures[mode].append(fig)
- return figures
-
-
-def make_matching_figures_offset(data, config, mode="evaluation", side=""):
- """Make matching figures for a batch.
-
- Args:
- data (Dict): a batch updated by PL_LoFTR.
- config (Dict): matcher config
- Returns:
- figures (Dict[str, List[plt.figure]]
- """
- assert mode in ["evaluation", "confidence"] # 'confidence'
- figures = {mode: []}
- for b_id in range(data["image0"].size(0)):
- if mode == "evaluation":
- fig = _make_evaluation_figure_offset(
- data, b_id, alpha=config.TRAINER.PLOT_MATCHES_ALPHA, side=side
- )
- elif mode == "confidence":
- fig = _make_evaluation_figure_offset(data, b_id)
- else:
- raise ValueError(f"Unknown plot mode: {mode}")
- figures[mode].append(fig)
- return figures
-
-
-def dynamic_alpha(
- n_matches, milestones=[0, 300, 1000, 2000], alphas=[1.0, 0.8, 0.4, 0.2]
-):
- if n_matches == 0:
- return 1.0
- ranges = list(zip(alphas, alphas[1:] + [None]))
- loc = bisect.bisect_right(milestones, n_matches) - 1
- _range = ranges[loc]
- if _range[1] is None:
- return _range[0]
- return _range[1] + (milestones[loc + 1] - n_matches) / (
- milestones[loc + 1] - milestones[loc]
- ) * (_range[0] - _range[1])
-
-
-def error_colormap(err, thr, alpha=1.0):
- assert alpha <= 1.0 and alpha > 0, f"Invaid alpha value: {alpha}"
- x = 1 - np.clip(err / (thr * 2), 0, 1)
- return np.clip(
- np.stack([2 - x * 2, x * 2, np.zeros_like(x), np.ones_like(x) * alpha], -1),
- 0,
- 1,
- )
diff --git a/spaces/Redgon/bingo/src/components/chat-suggestions.tsx b/spaces/Redgon/bingo/src/components/chat-suggestions.tsx
deleted file mode 100644
index 00c2fee295c9e010946046eb71705a5e131f7a5a..0000000000000000000000000000000000000000
--- a/spaces/Redgon/bingo/src/components/chat-suggestions.tsx
+++ /dev/null
@@ -1,45 +0,0 @@
-import React, { useMemo } from 'react'
-import Image from 'next/image'
-import HelpIcon from '@/assets/images/help.svg'
-import { SuggestedResponse } from '@/lib/bots/bing/types'
-import { useBing } from '@/lib/hooks/use-bing'
-import { atom, useAtom } from 'jotai'
-
-type Suggestions = SuggestedResponse[]
-const helpSuggestions = ['为什么不回应某些主题', '告诉我更多关于必应的资迅', '必应如何使用 AI?'].map((text) => ({ text }))
-const suggestionsAtom = atom([])
-
-type ChatSuggestionsProps = React.ComponentProps<'div'> & Pick, 'setInput'> & { suggestions?: Suggestions }
-
-export function ChatSuggestions({ setInput, suggestions = [] }: ChatSuggestionsProps) {
- const [currentSuggestions, setSuggestions] = useAtom(suggestionsAtom)
- const toggleSuggestions = (() => {
- if (currentSuggestions === helpSuggestions) {
- setSuggestions(suggestions)
- } else {
- setSuggestions(helpSuggestions)
- }
- })
-
- useMemo(() => {
- setSuggestions(suggestions)
- window.scrollBy(0, 2000)
- }, [suggestions.length])
-
- return currentSuggestions?.length ? (
-
-
-
- {
- currentSuggestions.map(suggestion => (
-
- ))
- }
-
-
- ) : null
-}
diff --git a/spaces/Riksarkivet/htr_demo/helper/text/overview/htrflow/htrflow_row1.md b/spaces/Riksarkivet/htr_demo/helper/text/overview/htrflow/htrflow_row1.md
deleted file mode 100644
index 2c843d67260070c26f313a933b62efce4af2ed57..0000000000000000000000000000000000000000
--- a/spaces/Riksarkivet/htr_demo/helper/text/overview/htrflow/htrflow_row1.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## The Pipeline in Overview
-
-The steps in the pipeline can be seen below as follows:
diff --git a/spaces/RobLi/ControlNet-v1-1/cv_utils.py b/spaces/RobLi/ControlNet-v1-1/cv_utils.py
deleted file mode 100644
index d81177c5eee306107966132fd54695545a61a898..0000000000000000000000000000000000000000
--- a/spaces/RobLi/ControlNet-v1-1/cv_utils.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import cv2
-import numpy as np
-
-
-def resize_image(input_image, resolution, interpolation=None):
- H, W, C = input_image.shape
- H = float(H)
- W = float(W)
- k = float(resolution) / max(H, W)
- H *= k
- W *= k
- H = int(np.round(H / 64.0)) * 64
- W = int(np.round(W / 64.0)) * 64
- if interpolation is None:
- interpolation = cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA
- img = cv2.resize(input_image, (W, H), interpolation=interpolation)
- return img
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/datasets/wider_face.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/datasets/wider_face.py
deleted file mode 100644
index 3a13907db87a9986a7d701837259a0b712fc9dca..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/datasets/wider_face.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import os.path as osp
-import xml.etree.ElementTree as ET
-
-import mmcv
-
-from .builder import DATASETS
-from .xml_style import XMLDataset
-
-
-@DATASETS.register_module()
-class WIDERFaceDataset(XMLDataset):
- """Reader for the WIDER Face dataset in PASCAL VOC format.
-
- Conversion scripts can be found in
- https://github.com/sovrasov/wider-face-pascal-voc-annotations
- """
- CLASSES = ('face', )
-
- def __init__(self, **kwargs):
- super(WIDERFaceDataset, self).__init__(**kwargs)
-
- def load_annotations(self, ann_file):
- """Load annotation from WIDERFace XML style annotation file.
-
- Args:
- ann_file (str): Path of XML file.
-
- Returns:
- list[dict]: Annotation info from XML file.
- """
-
- data_infos = []
- img_ids = mmcv.list_from_file(ann_file)
- for img_id in img_ids:
- filename = f'{img_id}.jpg'
- xml_path = osp.join(self.img_prefix, 'Annotations',
- f'{img_id}.xml')
- tree = ET.parse(xml_path)
- root = tree.getroot()
- size = root.find('size')
- width = int(size.find('width').text)
- height = int(size.find('height').text)
- folder = root.find('folder').text
- data_infos.append(
- dict(
- id=img_id,
- filename=osp.join(folder, filename),
- width=width,
- height=height))
-
- return data_infos
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/losses/mse_loss.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/losses/mse_loss.py
deleted file mode 100644
index 68d05752a245548862f4c9919448d4fb8dc1b8ca..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/losses/mse_loss.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import torch.nn as nn
-import torch.nn.functional as F
-
-from ..builder import LOSSES
-from .utils import weighted_loss
-
-
-@weighted_loss
-def mse_loss(pred, target):
- """Warpper of mse loss."""
- return F.mse_loss(pred, target, reduction='none')
-
-
-@LOSSES.register_module()
-class MSELoss(nn.Module):
- """MSELoss.
-
- Args:
- reduction (str, optional): The method that reduces the loss to a
- scalar. Options are "none", "mean" and "sum".
- loss_weight (float, optional): The weight of the loss. Defaults to 1.0
- """
-
- def __init__(self, reduction='mean', loss_weight=1.0):
- super().__init__()
- self.reduction = reduction
- self.loss_weight = loss_weight
-
- def forward(self, pred, target, weight=None, avg_factor=None):
- """Forward function of loss.
-
- Args:
- pred (torch.Tensor): The prediction.
- target (torch.Tensor): The learning target of the prediction.
- weight (torch.Tensor, optional): Weight of the loss for each
- prediction. Defaults to None.
- avg_factor (int, optional): Average factor that is used to average
- the loss. Defaults to None.
-
- Returns:
- torch.Tensor: The calculated loss
- """
- loss = self.loss_weight * mse_loss(
- pred,
- target,
- weight,
- reduction=self.reduction,
- avg_factor=avg_factor)
- return loss
diff --git a/spaces/Roblox-organization1ol/README/README.md b/spaces/Roblox-organization1ol/README/README.md
deleted file mode 100644
index 6ec955c2b610fbe2f9d1af661472f691626f393b..0000000000000000000000000000000000000000
--- a/spaces/Roblox-organization1ol/README/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: README
-emoji: 👀
-colorFrom: gray
-colorTo: indigo
-sdk: static
-pinned: false
----
-
-Edit this `README.md` markdown file to author your organization card.
diff --git a/spaces/Rongjiehuang/ProDiff/modules/parallel_wavegan/models/__init__.py b/spaces/Rongjiehuang/ProDiff/modules/parallel_wavegan/models/__init__.py
deleted file mode 100644
index 4803ba6b2a0afc8022e756ae5b3f4c7403c3c1bd..0000000000000000000000000000000000000000
--- a/spaces/Rongjiehuang/ProDiff/modules/parallel_wavegan/models/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .melgan import * # NOQA
-from .parallel_wavegan import * # NOQA
diff --git a/spaces/SMD00/Image_Colorization/README.md b/spaces/SMD00/Image_Colorization/README.md
deleted file mode 100644
index 70cdbeabf14f14ebf235cebf1fa195079787b186..0000000000000000000000000000000000000000
--- a/spaces/SMD00/Image_Colorization/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Image Colorization
-emoji: 📊
-colorFrom: yellow
-colorTo: red
-sdk: gradio
-sdk_version: 3.32.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/Sakukaze/VITS-Umamusume-voice-synthesizer/text/sanskrit.py b/spaces/Sakukaze/VITS-Umamusume-voice-synthesizer/text/sanskrit.py
deleted file mode 100644
index 0223aaac384a2f850f5bc20651fc18eb964607d0..0000000000000000000000000000000000000000
--- a/spaces/Sakukaze/VITS-Umamusume-voice-synthesizer/text/sanskrit.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import re
-from indic_transliteration import sanscript
-
-
-# List of (iast, ipa) pairs:
-_iast_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('a', 'ə'),
- ('ā', 'aː'),
- ('ī', 'iː'),
- ('ū', 'uː'),
- ('ṛ', 'ɹ`'),
- ('ṝ', 'ɹ`ː'),
- ('ḷ', 'l`'),
- ('ḹ', 'l`ː'),
- ('e', 'eː'),
- ('o', 'oː'),
- ('k', 'k⁼'),
- ('k⁼h', 'kʰ'),
- ('g', 'g⁼'),
- ('g⁼h', 'gʰ'),
- ('ṅ', 'ŋ'),
- ('c', 'ʧ⁼'),
- ('ʧ⁼h', 'ʧʰ'),
- ('j', 'ʥ⁼'),
- ('ʥ⁼h', 'ʥʰ'),
- ('ñ', 'n^'),
- ('ṭ', 't`⁼'),
- ('t`⁼h', 't`ʰ'),
- ('ḍ', 'd`⁼'),
- ('d`⁼h', 'd`ʰ'),
- ('ṇ', 'n`'),
- ('t', 't⁼'),
- ('t⁼h', 'tʰ'),
- ('d', 'd⁼'),
- ('d⁼h', 'dʰ'),
- ('p', 'p⁼'),
- ('p⁼h', 'pʰ'),
- ('b', 'b⁼'),
- ('b⁼h', 'bʰ'),
- ('y', 'j'),
- ('ś', 'ʃ'),
- ('ṣ', 's`'),
- ('r', 'ɾ'),
- ('l̤', 'l`'),
- ('h', 'ɦ'),
- ("'", ''),
- ('~', '^'),
- ('ṃ', '^')
-]]
-
-
-def devanagari_to_ipa(text):
- text = text.replace('ॐ', 'ओम्')
- text = re.sub(r'\s*।\s*$', '.', text)
- text = re.sub(r'\s*।\s*', ', ', text)
- text = re.sub(r'\s*॥', '.', text)
- text = sanscript.transliterate(text, sanscript.DEVANAGARI, sanscript.IAST)
- for regex, replacement in _iast_to_ipa:
- text = re.sub(regex, replacement, text)
- text = re.sub('(.)[`ː]*ḥ', lambda x: x.group(0)
- [:-1]+'h'+x.group(1)+'*', text)
- return text
diff --git a/spaces/Shawn37/UTR_LM/esm/model/esm2.py b/spaces/Shawn37/UTR_LM/esm/model/esm2.py
deleted file mode 100644
index cbb023c53542024d84ddaa1dc0a214ec29e09d8a..0000000000000000000000000000000000000000
--- a/spaces/Shawn37/UTR_LM/esm/model/esm2.py
+++ /dev/null
@@ -1,163 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from typing import Union
-import torch
-import torch.nn as nn
-
-import esm
-from esm.modules import ContactPredictionHead, ESM1bLayerNorm, RobertaLMHead, TransformerLayer
-
-
-class ESM2(nn.Module):
- def __init__(
- self,
- num_layers: int = 33,
- embed_dim: int = 1280,
- attention_heads: int = 20,
- alphabet: Union[esm.data.Alphabet, str] = "ESM-1b",
- token_dropout: bool = True,
- ):
- super().__init__()
- self.num_layers = num_layers
- self.embed_dim = embed_dim
- self.attention_heads = attention_heads
- if not isinstance(alphabet, esm.data.Alphabet):
- alphabet = esm.data.Alphabet.from_architecture(alphabet)
- self.alphabet = alphabet
- self.alphabet_size = len(alphabet)
- self.padding_idx = alphabet.padding_idx
- self.mask_idx = alphabet.mask_idx
- self.cls_idx = alphabet.cls_idx
- self.eos_idx = alphabet.eos_idx
- self.prepend_bos = alphabet.prepend_bos
- self.append_eos = alphabet.append_eos
- self.token_dropout = token_dropout
-
- self._init_submodules()
-
- def _init_submodules(self):
- self.embed_scale = 1
- self.embed_tokens = nn.Embedding(
- self.alphabet_size,
- self.embed_dim,
- padding_idx=self.padding_idx,
- )
-
- self.layers = nn.ModuleList(
- [
- TransformerLayer(
- self.embed_dim,
- 4 * self.embed_dim,
- self.attention_heads,
- add_bias_kv=False,
- use_esm1b_layer_norm=True,
- use_rotary_embeddings=True,
- )
- for _ in range(self.num_layers)
- ]
- )
-
- self.contact_head = ContactPredictionHead(
- self.num_layers * self.attention_heads,
- self.prepend_bos,
- self.append_eos,
- eos_idx=self.eos_idx,
- )
- self.emb_layer_norm_after = ESM1bLayerNorm(self.embed_dim)
-
- self.lm_head = RobertaLMHead(
- embed_dim=self.embed_dim,
- output_dim=self.alphabet_size,
- weight=self.embed_tokens.weight,
- )
-
- def forward(self, tokens, repr_layers=[], need_head_weights=False, return_contacts=False, return_representation=False):
- if return_contacts:
- need_head_weights = True
-
- assert tokens.ndim == 2
- padding_mask = tokens.eq(self.padding_idx) # B, T
-
- x = self.embed_scale * self.embed_tokens(tokens)
-
- if self.token_dropout:
- x.masked_fill_((tokens == self.mask_idx).unsqueeze(-1), 0.0)
- # x: B x T x C
- mask_ratio_train = 0.15 * 0.8
- src_lengths = (~padding_mask).sum(-1)
- mask_ratio_observed = (tokens == self.mask_idx).sum(-1).to(x.dtype) / src_lengths
- x = x * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]
-
- if padding_mask is not None:
- x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))
-
- repr_layers = set(repr_layers)
- hidden_representations = {}
- if 0 in repr_layers:
- hidden_representations[0] = x
-
- if need_head_weights:
- attn_weights = []
-
- # (B, T, E) => (T, B, E)
- x = x.transpose(0, 1)
-
- if not padding_mask.any():
- padding_mask = None
-
- for layer_idx, layer in enumerate(self.layers):
- x, attn = layer(
- x,
- self_attn_padding_mask=padding_mask,
- need_head_weights=need_head_weights,
- )
- if (layer_idx + 1) in repr_layers:
- hidden_representations[layer_idx + 1] = x.transpose(0, 1)
- if need_head_weights:
- # (H, B, T, T) => (B, H, T, T)
- attn_weights.append(attn.transpose(1, 0))
-# print(x.shape) # 73, 2, 1280
- x = self.emb_layer_norm_after(x)
- x = x.transpose(0, 1) # (T, B, E) => (B, T, E)
-
- # last hidden representation should have layer norm applied
- if (layer_idx + 1) in repr_layers:
- hidden_representations[layer_idx + 1] = x
- x = self.lm_head(x)
-
- if return_representation:
- result = {"logits": x, "representations": hidden_representations}
- else:
- result = {"logits": x}
- if need_head_weights:
- # attentions: B x L x H x T x T
- attentions = torch.stack(attn_weights, 1)
- if padding_mask is not None:
- attention_mask = 1 - padding_mask.type_as(attentions)
- attention_mask = attention_mask.unsqueeze(1) * attention_mask.unsqueeze(2)
- attentions = attentions * attention_mask[:, None, None, :, :]
- result["attentions"] = attentions
- if return_contacts:
- attentions_symm, contacts = self.contact_head(tokens, attentions)
- result["contacts"] = contacts
- result["attentions_symm"] = attentions_symm
-
- return result
-
- def predict_contacts(self, tokens):
- return self(tokens, return_contacts=True)["contacts"]
-
- def predict_symmetric_attentions(self, tokens):
- return self(tokens, return_contacts=True)["attentions_symm"]
-
- def predict_attentions(self, tokens):
- return self(tokens, need_head_weights=True)["attentions"]
-
- def predict_representations(self, tokens):
- return self(tokens, return_representation=True)['representations']
-
- def predict_logits(self, tokens):
- return self(tokens)['logits']
diff --git a/spaces/ShrapTy/text_generation/README.md b/spaces/ShrapTy/text_generation/README.md
deleted file mode 100644
index 00d298966cb8fb17b5ed36020922a6bfe8c5466b..0000000000000000000000000000000000000000
--- a/spaces/ShrapTy/text_generation/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-
----
-title: text_generation
-emoji: 🔥
-colorFrom: indigo
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.48.0
-app_file: run.py
-pinned: false
-hf_oauth: true
----
diff --git a/spaces/Smotto/Vocal-Isolator/src/infer.py b/spaces/Smotto/Vocal-Isolator/src/infer.py
deleted file mode 100644
index 68fc0d8ea04478939f4dc5869bfbf0c8989ef0be..0000000000000000000000000000000000000000
--- a/spaces/Smotto/Vocal-Isolator/src/infer.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Standard Library Imports
-import os
-import subprocess
-
-# Third Party Imports
-import torch
-import onnxruntime as ort
-
-# Local Imports
-from models.MDX_net.mdx_net import Conv_TDF_net_trimm
-from loader import Loader
-
-vocal_path = "./datasets/output/vocals.wav"
-
-# Global Variables
-COMPUTATION_DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
-
-
-def main():
- print(COMPUTATION_DEVICE)
diff --git a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/data/sound_dataset.py b/spaces/SuYuanS/AudioCraft_Plus/audiocraft/data/sound_dataset.py
deleted file mode 100644
index 8b88cbe8016b4bd28c2de749177c9af29f7755fc..0000000000000000000000000000000000000000
--- a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/data/sound_dataset.py
+++ /dev/null
@@ -1,330 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-"""Dataset of audio with a simple description.
-"""
-
-from dataclasses import dataclass, fields, replace
-import json
-from pathlib import Path
-import random
-import typing as tp
-
-import numpy as np
-import torch
-
-from .info_audio_dataset import (
- InfoAudioDataset,
- get_keyword_or_keyword_list
-)
-from ..modules.conditioners import (
- ConditioningAttributes,
- SegmentWithAttributes,
- WavCondition,
-)
-
-
-EPS = torch.finfo(torch.float32).eps
-TARGET_LEVEL_LOWER = -35
-TARGET_LEVEL_UPPER = -15
-
-
-@dataclass
-class SoundInfo(SegmentWithAttributes):
- """Segment info augmented with Sound metadata.
- """
- description: tp.Optional[str] = None
- self_wav: tp.Optional[torch.Tensor] = None
-
- @property
- def has_sound_meta(self) -> bool:
- return self.description is not None
-
- def to_condition_attributes(self) -> ConditioningAttributes:
- out = ConditioningAttributes()
-
- for _field in fields(self):
- key, value = _field.name, getattr(self, _field.name)
- if key == 'self_wav':
- out.wav[key] = value
- else:
- out.text[key] = value
- return out
-
- @staticmethod
- def attribute_getter(attribute):
- if attribute == 'description':
- preprocess_func = get_keyword_or_keyword_list
- else:
- preprocess_func = None
- return preprocess_func
-
- @classmethod
- def from_dict(cls, dictionary: dict, fields_required: bool = False):
- _dictionary: tp.Dict[str, tp.Any] = {}
-
- # allow a subset of attributes to not be loaded from the dictionary
- # these attributes may be populated later
- post_init_attributes = ['self_wav']
-
- for _field in fields(cls):
- if _field.name in post_init_attributes:
- continue
- elif _field.name not in dictionary:
- if fields_required:
- raise KeyError(f"Unexpected missing key: {_field.name}")
- else:
- preprocess_func: tp.Optional[tp.Callable] = cls.attribute_getter(_field.name)
- value = dictionary[_field.name]
- if preprocess_func:
- value = preprocess_func(value)
- _dictionary[_field.name] = value
- return cls(**_dictionary)
-
-
-class SoundDataset(InfoAudioDataset):
- """Sound audio dataset: Audio dataset with environmental sound-specific metadata.
-
- Args:
- info_fields_required (bool): Whether all the mandatory metadata fields should be in the loaded metadata.
- external_metadata_source (tp.Optional[str]): Folder containing JSON metadata for the corresponding dataset.
- The metadata files contained in this folder are expected to match the stem of the audio file with
- a json extension.
- aug_p (float): Probability of performing audio mixing augmentation on the batch.
- mix_p (float): Proportion of batch items that are mixed together when applying audio mixing augmentation.
- mix_snr_low (int): Lowerbound for SNR value sampled for mixing augmentation.
- mix_snr_high (int): Upperbound for SNR value sampled for mixing augmentation.
- mix_min_overlap (float): Minimum overlap between audio files when performing mixing augmentation.
- kwargs: Additional arguments for AudioDataset.
-
- See `audiocraft.data.info_audio_dataset.InfoAudioDataset` for full initialization arguments.
- """
- def __init__(
- self,
- *args,
- info_fields_required: bool = True,
- external_metadata_source: tp.Optional[str] = None,
- aug_p: float = 0.,
- mix_p: float = 0.,
- mix_snr_low: int = -5,
- mix_snr_high: int = 5,
- mix_min_overlap: float = 0.5,
- **kwargs
- ):
- kwargs['return_info'] = True # We require the info for each song of the dataset.
- super().__init__(*args, **kwargs)
- self.info_fields_required = info_fields_required
- self.external_metadata_source = external_metadata_source
- self.aug_p = aug_p
- self.mix_p = mix_p
- if self.aug_p > 0:
- assert self.mix_p > 0, "Expecting some mixing proportion mix_p if aug_p > 0"
- assert self.channels == 1, "SoundDataset with audio mixing considers only monophonic audio"
- self.mix_snr_low = mix_snr_low
- self.mix_snr_high = mix_snr_high
- self.mix_min_overlap = mix_min_overlap
-
- def _get_info_path(self, path: tp.Union[str, Path]) -> Path:
- """Get path of JSON with metadata (description, etc.).
- If there exists a JSON with the same name as 'path.name', then it will be used.
- Else, such JSON will be searched for in an external json source folder if it exists.
- """
- info_path = Path(path).with_suffix('.json')
- if Path(info_path).exists():
- return info_path
- elif self.external_metadata_source and (Path(self.external_metadata_source) / info_path.name).exists():
- return Path(self.external_metadata_source) / info_path.name
- else:
- raise Exception(f"Unable to find a metadata JSON for path: {path}")
-
- def __getitem__(self, index):
- wav, info = super().__getitem__(index)
- info_data = info.to_dict()
- info_path = self._get_info_path(info.meta.path)
- if Path(info_path).exists():
- with open(info_path, 'r') as json_file:
- sound_data = json.load(json_file)
- sound_data.update(info_data)
- sound_info = SoundInfo.from_dict(sound_data, fields_required=self.info_fields_required)
- # if there are multiple descriptions, sample one randomly
- if isinstance(sound_info.description, list):
- sound_info.description = random.choice(sound_info.description)
- else:
- sound_info = SoundInfo.from_dict(info_data, fields_required=False)
-
- sound_info.self_wav = WavCondition(
- wav=wav[None], length=torch.tensor([info.n_frames]),
- sample_rate=[sound_info.sample_rate], path=[info.meta.path], seek_time=[info.seek_time])
-
- return wav, sound_info
-
- def collater(self, samples):
- # when training, audio mixing is performed in the collate function
- wav, sound_info = super().collater(samples) # SoundDataset always returns infos
- if self.aug_p > 0:
- wav, sound_info = mix_samples(wav, sound_info, self.aug_p, self.mix_p,
- snr_low=self.mix_snr_low, snr_high=self.mix_snr_high,
- min_overlap=self.mix_min_overlap)
- return wav, sound_info
-
-
-def rms_f(x: torch.Tensor) -> torch.Tensor:
- return (x ** 2).mean(1).pow(0.5)
-
-
-def normalize(audio: torch.Tensor, target_level: int = -25) -> torch.Tensor:
- """Normalize the signal to the target level."""
- rms = rms_f(audio)
- scalar = 10 ** (target_level / 20) / (rms + EPS)
- audio = audio * scalar.unsqueeze(1)
- return audio
-
-
-def is_clipped(audio: torch.Tensor, clipping_threshold: float = 0.99) -> torch.Tensor:
- return (abs(audio) > clipping_threshold).any(1)
-
-
-def mix_pair(src: torch.Tensor, dst: torch.Tensor, min_overlap: float) -> torch.Tensor:
- start = random.randint(0, int(src.shape[1] * (1 - min_overlap)))
- remainder = src.shape[1] - start
- if dst.shape[1] > remainder:
- src[:, start:] = src[:, start:] + dst[:, :remainder]
- else:
- src[:, start:start+dst.shape[1]] = src[:, start:start+dst.shape[1]] + dst
- return src
-
-
-def snr_mixer(clean: torch.Tensor, noise: torch.Tensor, snr: int, min_overlap: float,
- target_level: int = -25, clipping_threshold: float = 0.99) -> torch.Tensor:
- """Function to mix clean speech and noise at various SNR levels.
-
- Args:
- clean (torch.Tensor): Clean audio source to mix, of shape [B, T].
- noise (torch.Tensor): Noise audio source to mix, of shape [B, T].
- snr (int): SNR level when mixing.
- min_overlap (float): Minimum overlap between the two mixed sources.
- target_level (int): Gain level in dB.
- clipping_threshold (float): Threshold for clipping the audio.
- Returns:
- torch.Tensor: The mixed audio, of shape [B, T].
- """
- if clean.shape[1] > noise.shape[1]:
- noise = torch.nn.functional.pad(noise, (0, clean.shape[1] - noise.shape[1]))
- else:
- noise = noise[:, :clean.shape[1]]
-
- # normalizing to -25 dB FS
- clean = clean / (clean.max(1)[0].abs().unsqueeze(1) + EPS)
- clean = normalize(clean, target_level)
- rmsclean = rms_f(clean)
-
- noise = noise / (noise.max(1)[0].abs().unsqueeze(1) + EPS)
- noise = normalize(noise, target_level)
- rmsnoise = rms_f(noise)
-
- # set the noise level for a given SNR
- noisescalar = (rmsclean / (10 ** (snr / 20)) / (rmsnoise + EPS)).unsqueeze(1)
- noisenewlevel = noise * noisescalar
-
- # mix noise and clean speech
- noisyspeech = mix_pair(clean, noisenewlevel, min_overlap)
-
- # randomly select RMS value between -15 dBFS and -35 dBFS and normalize noisyspeech with that value
- # there is a chance of clipping that might happen with very less probability, which is not a major issue.
- noisy_rms_level = np.random.randint(TARGET_LEVEL_LOWER, TARGET_LEVEL_UPPER)
- rmsnoisy = rms_f(noisyspeech)
- scalarnoisy = (10 ** (noisy_rms_level / 20) / (rmsnoisy + EPS)).unsqueeze(1)
- noisyspeech = noisyspeech * scalarnoisy
- clean = clean * scalarnoisy
- noisenewlevel = noisenewlevel * scalarnoisy
-
- # final check to see if there are any amplitudes exceeding +/- 1. If so, normalize all the signals accordingly
- clipped = is_clipped(noisyspeech)
- if clipped.any():
- noisyspeech_maxamplevel = noisyspeech[clipped].max(1)[0].abs().unsqueeze(1) / (clipping_threshold - EPS)
- noisyspeech[clipped] = noisyspeech[clipped] / noisyspeech_maxamplevel
-
- return noisyspeech
-
-
-def snr_mix(src: torch.Tensor, dst: torch.Tensor, snr_low: int, snr_high: int, min_overlap: float):
- if snr_low == snr_high:
- snr = snr_low
- else:
- snr = np.random.randint(snr_low, snr_high)
- mix = snr_mixer(src, dst, snr, min_overlap)
- return mix
-
-
-def mix_text(src_text: str, dst_text: str):
- """Mix text from different sources by concatenating them."""
- if src_text == dst_text:
- return src_text
- return src_text + " " + dst_text
-
-
-def mix_samples(wavs: torch.Tensor, infos: tp.List[SoundInfo], aug_p: float, mix_p: float,
- snr_low: int, snr_high: int, min_overlap: float):
- """Mix samples within a batch, summing the waveforms and concatenating the text infos.
-
- Args:
- wavs (torch.Tensor): Audio tensors of shape [B, C, T].
- infos (list[SoundInfo]): List of SoundInfo items corresponding to the audio.
- aug_p (float): Augmentation probability.
- mix_p (float): Proportion of items in the batch to mix (and merge) together.
- snr_low (int): Lowerbound for sampling SNR.
- snr_high (int): Upperbound for sampling SNR.
- min_overlap (float): Minimum overlap between mixed samples.
- Returns:
- tuple[torch.Tensor, list[SoundInfo]]: A tuple containing the mixed wavs
- and mixed SoundInfo for the given batch.
- """
- # no mixing to perform within the batch
- if mix_p == 0:
- return wavs, infos
-
- if random.uniform(0, 1) < aug_p:
- # perform all augmentations on waveforms as [B, T]
- # randomly picking pairs of audio to mix
- assert wavs.size(1) == 1, f"Mix samples requires monophonic audio but C={wavs.size(1)}"
- wavs = wavs.mean(dim=1, keepdim=False)
- B, T = wavs.shape
- k = int(mix_p * B)
- mixed_sources_idx = torch.randperm(B)[:k]
- mixed_targets_idx = torch.randperm(B)[:k]
- aug_wavs = snr_mix(
- wavs[mixed_sources_idx],
- wavs[mixed_targets_idx],
- snr_low,
- snr_high,
- min_overlap,
- )
- # mixing textual descriptions in metadata
- descriptions = [info.description for info in infos]
- aug_infos = []
- for i, j in zip(mixed_sources_idx, mixed_targets_idx):
- text = mix_text(descriptions[i], descriptions[j])
- m = replace(infos[i])
- m.description = text
- aug_infos.append(m)
-
- # back to [B, C, T]
- aug_wavs = aug_wavs.unsqueeze(1)
- assert aug_wavs.shape[0] > 0, "Samples mixing returned empty batch."
- assert aug_wavs.dim() == 3, f"Returned wav should be [B, C, T] but dim = {aug_wavs.dim()}"
- assert aug_wavs.shape[0] == len(aug_infos), "Mismatch between number of wavs and infos in the batch"
-
- return aug_wavs, aug_infos # [B, C, T]
- else:
- # randomly pick samples in the batch to match
- # the batch size when performing audio mixing
- B, C, T = wavs.shape
- k = int(mix_p * B)
- wav_idx = torch.randperm(B)[:k]
- wavs = wavs[wav_idx]
- infos = [infos[i] for i in wav_idx]
- assert wavs.shape[0] == len(infos), "Mismatch between number of wavs and infos in the batch"
-
- return wavs, infos # [B, C, T]
diff --git a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/optim/ema.py b/spaces/SuYuanS/AudioCraft_Plus/audiocraft/optim/ema.py
deleted file mode 100644
index 4337eaff066a8ca124dca3e3e63ee36e417c055c..0000000000000000000000000000000000000000
--- a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/optim/ema.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-# ModelEMA implementation is taken from
-# https://github.com/facebookresearch/demucs
-
-from collections import defaultdict
-import typing as tp
-
-import torch
-import torch.nn as nn
-
-
-def _get_all_non_persistent_buffers_set(module: nn.Module, root: str = "") -> set:
- names: set = set()
- for (name, sub_module) in module.named_modules():
- if name == '':
- buffer_names = module._non_persistent_buffers_set
- buffer_names = {f"{root}.{buff_name}" if len(root) > 0 else buff_name
- for buff_name in buffer_names}
- names.update(buffer_names)
- else:
- sub_name = f"{root}.{name}" if len(root) > 0 else name
- sub_buffer_names = _get_all_non_persistent_buffers_set(sub_module, sub_name)
- names.update(sub_buffer_names)
- return names
-
-
-def _get_named_tensors(module: nn.Module):
- non_persistent_buffers_set = _get_all_non_persistent_buffers_set(module)
- named_buffers = [(name, buffer) for (name, buffer) in module.named_buffers()
- if name not in non_persistent_buffers_set]
- named_parameters = list(module.named_parameters())
- return named_parameters + named_buffers
-
-
-class ModuleDictEMA:
- """Exponential Moving Average over a nn.ModuleDict.
-
- You can switch to the EMA weights temporarily.
- """
- def __init__(self, module_dict: nn.ModuleDict, decay: float = 0.999,
- unbias: bool = True, device: tp.Union[torch.device, str] = 'cpu'):
- self.decay = decay
- self.module_dict = module_dict
- self.state: dict = defaultdict(dict)
- self.count = 0
- self.device = device
- self.unbias = unbias
- self._init()
-
- def _init(self):
- for module_name, module in self.module_dict.items():
- for key, val in _get_named_tensors(module):
- if not val.is_floating_point():
- continue
- device = self.device or val.device
- if key not in self.state[module_name]:
- self.state[module_name][key] = val.detach().to(device, copy=True)
-
- def step(self):
- if self.unbias:
- self.count = self.count * self.decay + 1
- w = 1 / self.count
- else:
- w = 1 - self.decay
- for module_name, module in self.module_dict.items():
- for key, val in _get_named_tensors(module):
- if not val.is_floating_point():
- continue
- device = self.device or val.device
- self.state[module_name][key].mul_(1 - w)
- self.state[module_name][key].add_(val.detach().to(device), alpha=w)
-
- def state_dict(self):
- return {'state': self.state, 'count': self.count}
-
- def load_state_dict(self, state):
- self.count = state['count']
- for module_name, module in state['state'].items():
- for key, val in module.items():
- self.state[module_name][key].copy_(val)
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiohttp/web_urldispatcher.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiohttp/web_urldispatcher.py
deleted file mode 100644
index 5942e355e019aaca9b16f95dfbc26b7275fccdaa..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiohttp/web_urldispatcher.py
+++ /dev/null
@@ -1,1220 +0,0 @@
-import abc
-import asyncio
-import base64
-import hashlib
-import inspect
-import keyword
-import os
-import re
-import warnings
-from contextlib import contextmanager
-from functools import wraps
-from pathlib import Path
-from types import MappingProxyType
-from typing import (
- TYPE_CHECKING,
- Any,
- Awaitable,
- Callable,
- Container,
- Dict,
- Generator,
- Iterable,
- Iterator,
- List,
- Mapping,
- Optional,
- Pattern,
- Set,
- Sized,
- Tuple,
- Type,
- Union,
- cast,
-)
-
-from yarl import URL, __version__ as yarl_version # type: ignore[attr-defined]
-
-from . import hdrs
-from .abc import AbstractMatchInfo, AbstractRouter, AbstractView
-from .helpers import DEBUG
-from .http import HttpVersion11
-from .typedefs import Final, Handler, PathLike, TypedDict
-from .web_exceptions import (
- HTTPException,
- HTTPExpectationFailed,
- HTTPForbidden,
- HTTPMethodNotAllowed,
- HTTPNotFound,
-)
-from .web_fileresponse import FileResponse
-from .web_request import Request
-from .web_response import Response, StreamResponse
-from .web_routedef import AbstractRouteDef
-
-__all__ = (
- "UrlDispatcher",
- "UrlMappingMatchInfo",
- "AbstractResource",
- "Resource",
- "PlainResource",
- "DynamicResource",
- "AbstractRoute",
- "ResourceRoute",
- "StaticResource",
- "View",
-)
-
-
-if TYPE_CHECKING: # pragma: no cover
- from .web_app import Application
-
- BaseDict = Dict[str, str]
-else:
- BaseDict = dict
-
-YARL_VERSION: Final[Tuple[int, ...]] = tuple(map(int, yarl_version.split(".")[:2]))
-
-HTTP_METHOD_RE: Final[Pattern[str]] = re.compile(
- r"^[0-9A-Za-z!#\$%&'\*\+\-\.\^_`\|~]+$"
-)
-ROUTE_RE: Final[Pattern[str]] = re.compile(
- r"(\{[_a-zA-Z][^{}]*(?:\{[^{}]*\}[^{}]*)*\})"
-)
-PATH_SEP: Final[str] = re.escape("/")
-
-
-_ExpectHandler = Callable[[Request], Awaitable[None]]
-_Resolve = Tuple[Optional["UrlMappingMatchInfo"], Set[str]]
-
-
-class _InfoDict(TypedDict, total=False):
- path: str
-
- formatter: str
- pattern: Pattern[str]
-
- directory: Path
- prefix: str
- routes: Mapping[str, "AbstractRoute"]
-
- app: "Application"
-
- domain: str
-
- rule: "AbstractRuleMatching"
-
- http_exception: HTTPException
-
-
-class AbstractResource(Sized, Iterable["AbstractRoute"]):
- def __init__(self, *, name: Optional[str] = None) -> None:
- self._name = name
-
- @property
- def name(self) -> Optional[str]:
- return self._name
-
- @property
- @abc.abstractmethod
- def canonical(self) -> str:
- """Exposes the resource's canonical path.
-
- For example '/foo/bar/{name}'
-
- """
-
- @abc.abstractmethod # pragma: no branch
- def url_for(self, **kwargs: str) -> URL:
- """Construct url for resource with additional params."""
-
- @abc.abstractmethod # pragma: no branch
- async def resolve(self, request: Request) -> _Resolve:
- """Resolve resource.
-
- Return (UrlMappingMatchInfo, allowed_methods) pair.
- """
-
- @abc.abstractmethod
- def add_prefix(self, prefix: str) -> None:
- """Add a prefix to processed URLs.
-
- Required for subapplications support.
- """
-
- @abc.abstractmethod
- def get_info(self) -> _InfoDict:
- """Return a dict with additional info useful for introspection"""
-
- def freeze(self) -> None:
- pass
-
- @abc.abstractmethod
- def raw_match(self, path: str) -> bool:
- """Perform a raw match against path"""
-
-
-class AbstractRoute(abc.ABC):
- def __init__(
- self,
- method: str,
- handler: Union[Handler, Type[AbstractView]],
- *,
- expect_handler: Optional[_ExpectHandler] = None,
- resource: Optional[AbstractResource] = None,
- ) -> None:
-
- if expect_handler is None:
- expect_handler = _default_expect_handler
-
- assert asyncio.iscoroutinefunction(
- expect_handler
- ), f"Coroutine is expected, got {expect_handler!r}"
-
- method = method.upper()
- if not HTTP_METHOD_RE.match(method):
- raise ValueError(f"{method} is not allowed HTTP method")
-
- assert callable(handler), handler
- if asyncio.iscoroutinefunction(handler):
- pass
- elif inspect.isgeneratorfunction(handler):
- warnings.warn(
- "Bare generators are deprecated, " "use @coroutine wrapper",
- DeprecationWarning,
- )
- elif isinstance(handler, type) and issubclass(handler, AbstractView):
- pass
- else:
- warnings.warn(
- "Bare functions are deprecated, " "use async ones", DeprecationWarning
- )
-
- @wraps(handler)
- async def handler_wrapper(request: Request) -> StreamResponse:
- result = old_handler(request)
- if asyncio.iscoroutine(result):
- return await result
- return result # type: ignore[return-value]
-
- old_handler = handler
- handler = handler_wrapper
-
- self._method = method
- self._handler = handler
- self._expect_handler = expect_handler
- self._resource = resource
-
- @property
- def method(self) -> str:
- return self._method
-
- @property
- def handler(self) -> Handler:
- return self._handler
-
- @property
- @abc.abstractmethod
- def name(self) -> Optional[str]:
- """Optional route's name, always equals to resource's name."""
-
- @property
- def resource(self) -> Optional[AbstractResource]:
- return self._resource
-
- @abc.abstractmethod
- def get_info(self) -> _InfoDict:
- """Return a dict with additional info useful for introspection"""
-
- @abc.abstractmethod # pragma: no branch
- def url_for(self, *args: str, **kwargs: str) -> URL:
- """Construct url for route with additional params."""
-
- async def handle_expect_header(self, request: Request) -> None:
- await self._expect_handler(request)
-
-
-class UrlMappingMatchInfo(BaseDict, AbstractMatchInfo):
- def __init__(self, match_dict: Dict[str, str], route: AbstractRoute):
- super().__init__(match_dict)
- self._route = route
- self._apps: List[Application] = []
- self._current_app: Optional[Application] = None
- self._frozen = False
-
- @property
- def handler(self) -> Handler:
- return self._route.handler
-
- @property
- def route(self) -> AbstractRoute:
- return self._route
-
- @property
- def expect_handler(self) -> _ExpectHandler:
- return self._route.handle_expect_header
-
- @property
- def http_exception(self) -> Optional[HTTPException]:
- return None
-
- def get_info(self) -> _InfoDict: # type: ignore[override]
- return self._route.get_info()
-
- @property
- def apps(self) -> Tuple["Application", ...]:
- return tuple(self._apps)
-
- def add_app(self, app: "Application") -> None:
- if self._frozen:
- raise RuntimeError("Cannot change apps stack after .freeze() call")
- if self._current_app is None:
- self._current_app = app
- self._apps.insert(0, app)
-
- @property
- def current_app(self) -> "Application":
- app = self._current_app
- assert app is not None
- return app
-
- @contextmanager
- def set_current_app(self, app: "Application") -> Generator[None, None, None]:
- if DEBUG: # pragma: no cover
- if app not in self._apps:
- raise RuntimeError(
- "Expected one of the following apps {!r}, got {!r}".format(
- self._apps, app
- )
- )
- prev = self._current_app
- self._current_app = app
- try:
- yield
- finally:
- self._current_app = prev
-
- def freeze(self) -> None:
- self._frozen = True
-
- def __repr__(self) -> str:
- return f""
-
-
-class MatchInfoError(UrlMappingMatchInfo):
- def __init__(self, http_exception: HTTPException) -> None:
- self._exception = http_exception
- super().__init__({}, SystemRoute(self._exception))
-
- @property
- def http_exception(self) -> HTTPException:
- return self._exception
-
- def __repr__(self) -> str:
- return "".format(
- self._exception.status, self._exception.reason
- )
-
-
-async def _default_expect_handler(request: Request) -> None:
- """Default handler for Expect header.
-
- Just send "100 Continue" to client.
- raise HTTPExpectationFailed if value of header is not "100-continue"
- """
- expect = request.headers.get(hdrs.EXPECT, "")
- if request.version == HttpVersion11:
- if expect.lower() == "100-continue":
- await request.writer.write(b"HTTP/1.1 100 Continue\r\n\r\n")
- else:
- raise HTTPExpectationFailed(text="Unknown Expect: %s" % expect)
-
-
-class Resource(AbstractResource):
- def __init__(self, *, name: Optional[str] = None) -> None:
- super().__init__(name=name)
- self._routes: List[ResourceRoute] = []
-
- def add_route(
- self,
- method: str,
- handler: Union[Type[AbstractView], Handler],
- *,
- expect_handler: Optional[_ExpectHandler] = None,
- ) -> "ResourceRoute":
-
- for route_obj in self._routes:
- if route_obj.method == method or route_obj.method == hdrs.METH_ANY:
- raise RuntimeError(
- "Added route will never be executed, "
- "method {route.method} is already "
- "registered".format(route=route_obj)
- )
-
- route_obj = ResourceRoute(method, handler, self, expect_handler=expect_handler)
- self.register_route(route_obj)
- return route_obj
-
- def register_route(self, route: "ResourceRoute") -> None:
- assert isinstance(
- route, ResourceRoute
- ), f"Instance of Route class is required, got {route!r}"
- self._routes.append(route)
-
- async def resolve(self, request: Request) -> _Resolve:
- allowed_methods: Set[str] = set()
-
- match_dict = self._match(request.rel_url.raw_path)
- if match_dict is None:
- return None, allowed_methods
-
- for route_obj in self._routes:
- route_method = route_obj.method
- allowed_methods.add(route_method)
-
- if route_method == request.method or route_method == hdrs.METH_ANY:
- return (UrlMappingMatchInfo(match_dict, route_obj), allowed_methods)
- else:
- return None, allowed_methods
-
- @abc.abstractmethod
- def _match(self, path: str) -> Optional[Dict[str, str]]:
- pass # pragma: no cover
-
- def __len__(self) -> int:
- return len(self._routes)
-
- def __iter__(self) -> Iterator[AbstractRoute]:
- return iter(self._routes)
-
- # TODO: implement all abstract methods
-
-
-class PlainResource(Resource):
- def __init__(self, path: str, *, name: Optional[str] = None) -> None:
- super().__init__(name=name)
- assert not path or path.startswith("/")
- self._path = path
-
- @property
- def canonical(self) -> str:
- return self._path
-
- def freeze(self) -> None:
- if not self._path:
- self._path = "/"
-
- def add_prefix(self, prefix: str) -> None:
- assert prefix.startswith("/")
- assert not prefix.endswith("/")
- assert len(prefix) > 1
- self._path = prefix + self._path
-
- def _match(self, path: str) -> Optional[Dict[str, str]]:
- # string comparison is about 10 times faster than regexp matching
- if self._path == path:
- return {}
- else:
- return None
-
- def raw_match(self, path: str) -> bool:
- return self._path == path
-
- def get_info(self) -> _InfoDict:
- return {"path": self._path}
-
- def url_for(self) -> URL: # type: ignore[override]
- return URL.build(path=self._path, encoded=True)
-
- def __repr__(self) -> str:
- name = "'" + self.name + "' " if self.name is not None else ""
- return f""
-
-
-class DynamicResource(Resource):
-
- DYN = re.compile(r"\{(?P[_a-zA-Z][_a-zA-Z0-9]*)\}")
- DYN_WITH_RE = re.compile(r"\{(?P[_a-zA-Z][_a-zA-Z0-9]*):(?P.+)\}")
- GOOD = r"[^{}/]+"
-
- def __init__(self, path: str, *, name: Optional[str] = None) -> None:
- super().__init__(name=name)
- pattern = ""
- formatter = ""
- for part in ROUTE_RE.split(path):
- match = self.DYN.fullmatch(part)
- if match:
- pattern += "(?P<{}>{})".format(match.group("var"), self.GOOD)
- formatter += "{" + match.group("var") + "}"
- continue
-
- match = self.DYN_WITH_RE.fullmatch(part)
- if match:
- pattern += "(?P<{var}>{re})".format(**match.groupdict())
- formatter += "{" + match.group("var") + "}"
- continue
-
- if "{" in part or "}" in part:
- raise ValueError(f"Invalid path '{path}'['{part}']")
-
- part = _requote_path(part)
- formatter += part
- pattern += re.escape(part)
-
- try:
- compiled = re.compile(pattern)
- except re.error as exc:
- raise ValueError(f"Bad pattern '{pattern}': {exc}") from None
- assert compiled.pattern.startswith(PATH_SEP)
- assert formatter.startswith("/")
- self._pattern = compiled
- self._formatter = formatter
-
- @property
- def canonical(self) -> str:
- return self._formatter
-
- def add_prefix(self, prefix: str) -> None:
- assert prefix.startswith("/")
- assert not prefix.endswith("/")
- assert len(prefix) > 1
- self._pattern = re.compile(re.escape(prefix) + self._pattern.pattern)
- self._formatter = prefix + self._formatter
-
- def _match(self, path: str) -> Optional[Dict[str, str]]:
- match = self._pattern.fullmatch(path)
- if match is None:
- return None
- else:
- return {
- key: _unquote_path(value) for key, value in match.groupdict().items()
- }
-
- def raw_match(self, path: str) -> bool:
- return self._formatter == path
-
- def get_info(self) -> _InfoDict:
- return {"formatter": self._formatter, "pattern": self._pattern}
-
- def url_for(self, **parts: str) -> URL:
- url = self._formatter.format_map({k: _quote_path(v) for k, v in parts.items()})
- return URL.build(path=url, encoded=True)
-
- def __repr__(self) -> str:
- name = "'" + self.name + "' " if self.name is not None else ""
- return "".format(
- name=name, formatter=self._formatter
- )
-
-
-class PrefixResource(AbstractResource):
- def __init__(self, prefix: str, *, name: Optional[str] = None) -> None:
- assert not prefix or prefix.startswith("/"), prefix
- assert prefix in ("", "/") or not prefix.endswith("/"), prefix
- super().__init__(name=name)
- self._prefix = _requote_path(prefix)
- self._prefix2 = self._prefix + "/"
-
- @property
- def canonical(self) -> str:
- return self._prefix
-
- def add_prefix(self, prefix: str) -> None:
- assert prefix.startswith("/")
- assert not prefix.endswith("/")
- assert len(prefix) > 1
- self._prefix = prefix + self._prefix
- self._prefix2 = self._prefix + "/"
-
- def raw_match(self, prefix: str) -> bool:
- return False
-
- # TODO: impl missing abstract methods
-
-
-class StaticResource(PrefixResource):
- VERSION_KEY = "v"
-
- def __init__(
- self,
- prefix: str,
- directory: PathLike,
- *,
- name: Optional[str] = None,
- expect_handler: Optional[_ExpectHandler] = None,
- chunk_size: int = 256 * 1024,
- show_index: bool = False,
- follow_symlinks: bool = False,
- append_version: bool = False,
- ) -> None:
- super().__init__(prefix, name=name)
- try:
- directory = Path(directory)
- if str(directory).startswith("~"):
- directory = Path(os.path.expanduser(str(directory)))
- directory = directory.resolve()
- if not directory.is_dir():
- raise ValueError("Not a directory")
- except (FileNotFoundError, ValueError) as error:
- raise ValueError(f"No directory exists at '{directory}'") from error
- self._directory = directory
- self._show_index = show_index
- self._chunk_size = chunk_size
- self._follow_symlinks = follow_symlinks
- self._expect_handler = expect_handler
- self._append_version = append_version
-
- self._routes = {
- "GET": ResourceRoute(
- "GET", self._handle, self, expect_handler=expect_handler
- ),
- "HEAD": ResourceRoute(
- "HEAD", self._handle, self, expect_handler=expect_handler
- ),
- }
-
- def url_for( # type: ignore[override]
- self,
- *,
- filename: Union[str, Path],
- append_version: Optional[bool] = None,
- ) -> URL:
- if append_version is None:
- append_version = self._append_version
- if isinstance(filename, Path):
- filename = str(filename)
- filename = filename.lstrip("/")
-
- url = URL.build(path=self._prefix, encoded=True)
- # filename is not encoded
- if YARL_VERSION < (1, 6):
- url = url / filename.replace("%", "%25")
- else:
- url = url / filename
-
- if append_version:
- try:
- filepath = self._directory.joinpath(filename).resolve()
- if not self._follow_symlinks:
- filepath.relative_to(self._directory)
- except (ValueError, FileNotFoundError):
- # ValueError for case when path point to symlink
- # with follow_symlinks is False
- return url # relatively safe
- if filepath.is_file():
- # TODO cache file content
- # with file watcher for cache invalidation
- with filepath.open("rb") as f:
- file_bytes = f.read()
- h = self._get_file_hash(file_bytes)
- url = url.with_query({self.VERSION_KEY: h})
- return url
- return url
-
- @staticmethod
- def _get_file_hash(byte_array: bytes) -> str:
- m = hashlib.sha256() # todo sha256 can be configurable param
- m.update(byte_array)
- b64 = base64.urlsafe_b64encode(m.digest())
- return b64.decode("ascii")
-
- def get_info(self) -> _InfoDict:
- return {
- "directory": self._directory,
- "prefix": self._prefix,
- "routes": self._routes,
- }
-
- def set_options_route(self, handler: Handler) -> None:
- if "OPTIONS" in self._routes:
- raise RuntimeError("OPTIONS route was set already")
- self._routes["OPTIONS"] = ResourceRoute(
- "OPTIONS", handler, self, expect_handler=self._expect_handler
- )
-
- async def resolve(self, request: Request) -> _Resolve:
- path = request.rel_url.raw_path
- method = request.method
- allowed_methods = set(self._routes)
- if not path.startswith(self._prefix2) and path != self._prefix:
- return None, set()
-
- if method not in allowed_methods:
- return None, allowed_methods
-
- match_dict = {"filename": _unquote_path(path[len(self._prefix) + 1 :])}
- return (UrlMappingMatchInfo(match_dict, self._routes[method]), allowed_methods)
-
- def __len__(self) -> int:
- return len(self._routes)
-
- def __iter__(self) -> Iterator[AbstractRoute]:
- return iter(self._routes.values())
-
- async def _handle(self, request: Request) -> StreamResponse:
- rel_url = request.match_info["filename"]
- try:
- filename = Path(rel_url)
- if filename.anchor:
- # rel_url is an absolute name like
- # /static/\\machine_name\c$ or /static/D:\path
- # where the static dir is totally different
- raise HTTPForbidden()
- filepath = self._directory.joinpath(filename).resolve()
- if not self._follow_symlinks:
- filepath.relative_to(self._directory)
- except (ValueError, FileNotFoundError) as error:
- # relatively safe
- raise HTTPNotFound() from error
- except HTTPForbidden:
- raise
- except Exception as error:
- # perm error or other kind!
- request.app.logger.exception(error)
- raise HTTPNotFound() from error
-
- # on opening a dir, load its contents if allowed
- if filepath.is_dir():
- if self._show_index:
- try:
- return Response(
- text=self._directory_as_html(filepath), content_type="text/html"
- )
- except PermissionError:
- raise HTTPForbidden()
- else:
- raise HTTPForbidden()
- elif filepath.is_file():
- return FileResponse(filepath, chunk_size=self._chunk_size)
- else:
- raise HTTPNotFound
-
- def _directory_as_html(self, filepath: Path) -> str:
- # returns directory's index as html
-
- # sanity check
- assert filepath.is_dir()
-
- relative_path_to_dir = filepath.relative_to(self._directory).as_posix()
- index_of = f"Index of /{relative_path_to_dir}"
- h1 = f"{index_of}
"
-
- index_list = []
- dir_index = filepath.iterdir()
- for _file in sorted(dir_index):
- # show file url as relative to static path
- rel_path = _file.relative_to(self._directory).as_posix()
- file_url = self._prefix + "/" + rel_path
-
- # if file is a directory, add '/' to the end of the name
- if _file.is_dir():
- file_name = f"{_file.name}/"
- else:
- file_name = _file.name
-
- index_list.append(
- '{name} '.format(
- url=file_url, name=file_name
- )
- )
- ul = "\n{}\n
".format("\n".join(index_list))
- body = f"\n{h1}\n{ul}\n"
-
- head_str = f"\n{index_of} \n"
- html = f"\n{head_str}\n{body}\n"
-
- return html
-
- def __repr__(self) -> str:
- name = "'" + self.name + "'" if self.name is not None else ""
- return " {directory!r}>".format(
- name=name, path=self._prefix, directory=self._directory
- )
-
-
-class PrefixedSubAppResource(PrefixResource):
- def __init__(self, prefix: str, app: "Application") -> None:
- super().__init__(prefix)
- self._app = app
- for resource in app.router.resources():
- resource.add_prefix(prefix)
-
- def add_prefix(self, prefix: str) -> None:
- super().add_prefix(prefix)
- for resource in self._app.router.resources():
- resource.add_prefix(prefix)
-
- def url_for(self, *args: str, **kwargs: str) -> URL:
- raise RuntimeError(".url_for() is not supported " "by sub-application root")
-
- def get_info(self) -> _InfoDict:
- return {"app": self._app, "prefix": self._prefix}
-
- async def resolve(self, request: Request) -> _Resolve:
- if (
- not request.url.raw_path.startswith(self._prefix2)
- and request.url.raw_path != self._prefix
- ):
- return None, set()
- match_info = await self._app.router.resolve(request)
- match_info.add_app(self._app)
- if isinstance(match_info.http_exception, HTTPMethodNotAllowed):
- methods = match_info.http_exception.allowed_methods
- else:
- methods = set()
- return match_info, methods
-
- def __len__(self) -> int:
- return len(self._app.router.routes())
-
- def __iter__(self) -> Iterator[AbstractRoute]:
- return iter(self._app.router.routes())
-
- def __repr__(self) -> str:
- return " {app!r}>".format(
- prefix=self._prefix, app=self._app
- )
-
-
-class AbstractRuleMatching(abc.ABC):
- @abc.abstractmethod # pragma: no branch
- async def match(self, request: Request) -> bool:
- """Return bool if the request satisfies the criteria"""
-
- @abc.abstractmethod # pragma: no branch
- def get_info(self) -> _InfoDict:
- """Return a dict with additional info useful for introspection"""
-
- @property
- @abc.abstractmethod # pragma: no branch
- def canonical(self) -> str:
- """Return a str"""
-
-
-class Domain(AbstractRuleMatching):
- re_part = re.compile(r"(?!-)[a-z\d-]{1,63}(? None:
- super().__init__()
- self._domain = self.validation(domain)
-
- @property
- def canonical(self) -> str:
- return self._domain
-
- def validation(self, domain: str) -> str:
- if not isinstance(domain, str):
- raise TypeError("Domain must be str")
- domain = domain.rstrip(".").lower()
- if not domain:
- raise ValueError("Domain cannot be empty")
- elif "://" in domain:
- raise ValueError("Scheme not supported")
- url = URL("http://" + domain)
- assert url.raw_host is not None
- if not all(self.re_part.fullmatch(x) for x in url.raw_host.split(".")):
- raise ValueError("Domain not valid")
- if url.port == 80:
- return url.raw_host
- return f"{url.raw_host}:{url.port}"
-
- async def match(self, request: Request) -> bool:
- host = request.headers.get(hdrs.HOST)
- if not host:
- return False
- return self.match_domain(host)
-
- def match_domain(self, host: str) -> bool:
- return host.lower() == self._domain
-
- def get_info(self) -> _InfoDict:
- return {"domain": self._domain}
-
-
-class MaskDomain(Domain):
- re_part = re.compile(r"(?!-)[a-z\d\*-]{1,63}(? None:
- super().__init__(domain)
- mask = self._domain.replace(".", r"\.").replace("*", ".*")
- self._mask = re.compile(mask)
-
- @property
- def canonical(self) -> str:
- return self._mask.pattern
-
- def match_domain(self, host: str) -> bool:
- return self._mask.fullmatch(host) is not None
-
-
-class MatchedSubAppResource(PrefixedSubAppResource):
- def __init__(self, rule: AbstractRuleMatching, app: "Application") -> None:
- AbstractResource.__init__(self)
- self._prefix = ""
- self._app = app
- self._rule = rule
-
- @property
- def canonical(self) -> str:
- return self._rule.canonical
-
- def get_info(self) -> _InfoDict:
- return {"app": self._app, "rule": self._rule}
-
- async def resolve(self, request: Request) -> _Resolve:
- if not await self._rule.match(request):
- return None, set()
- match_info = await self._app.router.resolve(request)
- match_info.add_app(self._app)
- if isinstance(match_info.http_exception, HTTPMethodNotAllowed):
- methods = match_info.http_exception.allowed_methods
- else:
- methods = set()
- return match_info, methods
-
- def __repr__(self) -> str:
- return " {app!r}>" "".format(app=self._app)
-
-
-class ResourceRoute(AbstractRoute):
- """A route with resource"""
-
- def __init__(
- self,
- method: str,
- handler: Union[Handler, Type[AbstractView]],
- resource: AbstractResource,
- *,
- expect_handler: Optional[_ExpectHandler] = None,
- ) -> None:
- super().__init__(
- method, handler, expect_handler=expect_handler, resource=resource
- )
-
- def __repr__(self) -> str:
- return " {handler!r}".format(
- method=self.method, resource=self._resource, handler=self.handler
- )
-
- @property
- def name(self) -> Optional[str]:
- if self._resource is None:
- return None
- return self._resource.name
-
- def url_for(self, *args: str, **kwargs: str) -> URL:
- """Construct url for route with additional params."""
- assert self._resource is not None
- return self._resource.url_for(*args, **kwargs)
-
- def get_info(self) -> _InfoDict:
- assert self._resource is not None
- return self._resource.get_info()
-
-
-class SystemRoute(AbstractRoute):
- def __init__(self, http_exception: HTTPException) -> None:
- super().__init__(hdrs.METH_ANY, self._handle)
- self._http_exception = http_exception
-
- def url_for(self, *args: str, **kwargs: str) -> URL:
- raise RuntimeError(".url_for() is not allowed for SystemRoute")
-
- @property
- def name(self) -> Optional[str]:
- return None
-
- def get_info(self) -> _InfoDict:
- return {"http_exception": self._http_exception}
-
- async def _handle(self, request: Request) -> StreamResponse:
- raise self._http_exception
-
- @property
- def status(self) -> int:
- return self._http_exception.status
-
- @property
- def reason(self) -> str:
- return self._http_exception.reason
-
- def __repr__(self) -> str:
- return "".format(self=self)
-
-
-class View(AbstractView):
- async def _iter(self) -> StreamResponse:
- if self.request.method not in hdrs.METH_ALL:
- self._raise_allowed_methods()
- method: Callable[[], Awaitable[StreamResponse]] = getattr(
- self, self.request.method.lower(), None
- )
- if method is None:
- self._raise_allowed_methods()
- resp = await method()
- return resp
-
- def __await__(self) -> Generator[Any, None, StreamResponse]:
- return self._iter().__await__()
-
- def _raise_allowed_methods(self) -> None:
- allowed_methods = {m for m in hdrs.METH_ALL if hasattr(self, m.lower())}
- raise HTTPMethodNotAllowed(self.request.method, allowed_methods)
-
-
-class ResourcesView(Sized, Iterable[AbstractResource], Container[AbstractResource]):
- def __init__(self, resources: List[AbstractResource]) -> None:
- self._resources = resources
-
- def __len__(self) -> int:
- return len(self._resources)
-
- def __iter__(self) -> Iterator[AbstractResource]:
- yield from self._resources
-
- def __contains__(self, resource: object) -> bool:
- return resource in self._resources
-
-
-class RoutesView(Sized, Iterable[AbstractRoute], Container[AbstractRoute]):
- def __init__(self, resources: List[AbstractResource]):
- self._routes: List[AbstractRoute] = []
- for resource in resources:
- for route in resource:
- self._routes.append(route)
-
- def __len__(self) -> int:
- return len(self._routes)
-
- def __iter__(self) -> Iterator[AbstractRoute]:
- yield from self._routes
-
- def __contains__(self, route: object) -> bool:
- return route in self._routes
-
-
-class UrlDispatcher(AbstractRouter, Mapping[str, AbstractResource]):
-
- NAME_SPLIT_RE = re.compile(r"[.:-]")
-
- def __init__(self) -> None:
- super().__init__()
- self._resources: List[AbstractResource] = []
- self._named_resources: Dict[str, AbstractResource] = {}
-
- async def resolve(self, request: Request) -> UrlMappingMatchInfo:
- method = request.method
- allowed_methods: Set[str] = set()
-
- for resource in self._resources:
- match_dict, allowed = await resource.resolve(request)
- if match_dict is not None:
- return match_dict
- else:
- allowed_methods |= allowed
-
- if allowed_methods:
- return MatchInfoError(HTTPMethodNotAllowed(method, allowed_methods))
- else:
- return MatchInfoError(HTTPNotFound())
-
- def __iter__(self) -> Iterator[str]:
- return iter(self._named_resources)
-
- def __len__(self) -> int:
- return len(self._named_resources)
-
- def __contains__(self, resource: object) -> bool:
- return resource in self._named_resources
-
- def __getitem__(self, name: str) -> AbstractResource:
- return self._named_resources[name]
-
- def resources(self) -> ResourcesView:
- return ResourcesView(self._resources)
-
- def routes(self) -> RoutesView:
- return RoutesView(self._resources)
-
- def named_resources(self) -> Mapping[str, AbstractResource]:
- return MappingProxyType(self._named_resources)
-
- def register_resource(self, resource: AbstractResource) -> None:
- assert isinstance(
- resource, AbstractResource
- ), f"Instance of AbstractResource class is required, got {resource!r}"
- if self.frozen:
- raise RuntimeError("Cannot register a resource into frozen router.")
-
- name = resource.name
-
- if name is not None:
- parts = self.NAME_SPLIT_RE.split(name)
- for part in parts:
- if keyword.iskeyword(part):
- raise ValueError(
- f"Incorrect route name {name!r}, "
- "python keywords cannot be used "
- "for route name"
- )
- if not part.isidentifier():
- raise ValueError(
- "Incorrect route name {!r}, "
- "the name should be a sequence of "
- "python identifiers separated "
- "by dash, dot or column".format(name)
- )
- if name in self._named_resources:
- raise ValueError(
- "Duplicate {!r}, "
- "already handled by {!r}".format(name, self._named_resources[name])
- )
- self._named_resources[name] = resource
- self._resources.append(resource)
-
- def add_resource(self, path: str, *, name: Optional[str] = None) -> Resource:
- if path and not path.startswith("/"):
- raise ValueError("path should be started with / or be empty")
- # Reuse last added resource if path and name are the same
- if self._resources:
- resource = self._resources[-1]
- if resource.name == name and resource.raw_match(path):
- return cast(Resource, resource)
- if not ("{" in path or "}" in path or ROUTE_RE.search(path)):
- resource = PlainResource(_requote_path(path), name=name)
- self.register_resource(resource)
- return resource
- resource = DynamicResource(path, name=name)
- self.register_resource(resource)
- return resource
-
- def add_route(
- self,
- method: str,
- path: str,
- handler: Union[Handler, Type[AbstractView]],
- *,
- name: Optional[str] = None,
- expect_handler: Optional[_ExpectHandler] = None,
- ) -> AbstractRoute:
- resource = self.add_resource(path, name=name)
- return resource.add_route(method, handler, expect_handler=expect_handler)
-
- def add_static(
- self,
- prefix: str,
- path: PathLike,
- *,
- name: Optional[str] = None,
- expect_handler: Optional[_ExpectHandler] = None,
- chunk_size: int = 256 * 1024,
- show_index: bool = False,
- follow_symlinks: bool = False,
- append_version: bool = False,
- ) -> AbstractResource:
- """Add static files view.
-
- prefix - url prefix
- path - folder with files
-
- """
- assert prefix.startswith("/")
- if prefix.endswith("/"):
- prefix = prefix[:-1]
- resource = StaticResource(
- prefix,
- path,
- name=name,
- expect_handler=expect_handler,
- chunk_size=chunk_size,
- show_index=show_index,
- follow_symlinks=follow_symlinks,
- append_version=append_version,
- )
- self.register_resource(resource)
- return resource
-
- def add_head(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
- """Shortcut for add_route with method HEAD."""
- return self.add_route(hdrs.METH_HEAD, path, handler, **kwargs)
-
- def add_options(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
- """Shortcut for add_route with method OPTIONS."""
- return self.add_route(hdrs.METH_OPTIONS, path, handler, **kwargs)
-
- def add_get(
- self,
- path: str,
- handler: Handler,
- *,
- name: Optional[str] = None,
- allow_head: bool = True,
- **kwargs: Any,
- ) -> AbstractRoute:
- """Shortcut for add_route with method GET.
-
- If allow_head is true, another
- route is added allowing head requests to the same endpoint.
- """
- resource = self.add_resource(path, name=name)
- if allow_head:
- resource.add_route(hdrs.METH_HEAD, handler, **kwargs)
- return resource.add_route(hdrs.METH_GET, handler, **kwargs)
-
- def add_post(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
- """Shortcut for add_route with method POST."""
- return self.add_route(hdrs.METH_POST, path, handler, **kwargs)
-
- def add_put(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
- """Shortcut for add_route with method PUT."""
- return self.add_route(hdrs.METH_PUT, path, handler, **kwargs)
-
- def add_patch(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
- """Shortcut for add_route with method PATCH."""
- return self.add_route(hdrs.METH_PATCH, path, handler, **kwargs)
-
- def add_delete(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
- """Shortcut for add_route with method DELETE."""
- return self.add_route(hdrs.METH_DELETE, path, handler, **kwargs)
-
- def add_view(
- self, path: str, handler: Type[AbstractView], **kwargs: Any
- ) -> AbstractRoute:
- """Shortcut for add_route with ANY methods for a class-based view."""
- return self.add_route(hdrs.METH_ANY, path, handler, **kwargs)
-
- def freeze(self) -> None:
- super().freeze()
- for resource in self._resources:
- resource.freeze()
-
- def add_routes(self, routes: Iterable[AbstractRouteDef]) -> List[AbstractRoute]:
- """Append routes to route table.
-
- Parameter should be a sequence of RouteDef objects.
-
- Returns a list of registered AbstractRoute instances.
- """
- registered_routes = []
- for route_def in routes:
- registered_routes.extend(route_def.register(self))
- return registered_routes
-
-
-def _quote_path(value: str) -> str:
- if YARL_VERSION < (1, 6):
- value = value.replace("%", "%25")
- return URL.build(path=value, encoded=False).raw_path
-
-
-def _unquote_path(value: str) -> str:
- return URL.build(path=value, encoded=True).path
-
-
-def _requote_path(value: str) -> str:
- # Quote non-ascii characters and other characters which must be quoted,
- # but preserve existing %-sequences.
- result = _quote_path(value)
- if "%" in value:
- result = result.replace("%25", "%")
- return result
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/formatters/other.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/formatters/other.py
deleted file mode 100644
index 990ead480218fdc7ca01ed6d146e47205987b72e..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/formatters/other.py
+++ /dev/null
@@ -1,161 +0,0 @@
-"""
- pygments.formatters.other
- ~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Other formatters: NullFormatter, RawTokenFormatter.
-
- :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from pip._vendor.pygments.formatter import Formatter
-from pip._vendor.pygments.util import get_choice_opt
-from pip._vendor.pygments.token import Token
-from pip._vendor.pygments.console import colorize
-
-__all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter']
-
-
-class NullFormatter(Formatter):
- """
- Output the text unchanged without any formatting.
- """
- name = 'Text only'
- aliases = ['text', 'null']
- filenames = ['*.txt']
-
- def format(self, tokensource, outfile):
- enc = self.encoding
- for ttype, value in tokensource:
- if enc:
- outfile.write(value.encode(enc))
- else:
- outfile.write(value)
-
-
-class RawTokenFormatter(Formatter):
- r"""
- Format tokens as a raw representation for storing token streams.
-
- The format is ``tokentyperepr(tokenstring)\n``. The output can later
- be converted to a token stream with the `RawTokenLexer`, described in the
- :doc:`lexer list `.
-
- Only two options are accepted:
-
- `compress`
- If set to ``'gz'`` or ``'bz2'``, compress the output with the given
- compression algorithm after encoding (default: ``''``).
- `error_color`
- If set to a color name, highlight error tokens using that color. If
- set but with no value, defaults to ``'red'``.
-
- .. versionadded:: 0.11
-
- """
- name = 'Raw tokens'
- aliases = ['raw', 'tokens']
- filenames = ['*.raw']
-
- unicodeoutput = False
-
- def __init__(self, **options):
- Formatter.__init__(self, **options)
- # We ignore self.encoding if it is set, since it gets set for lexer
- # and formatter if given with -Oencoding on the command line.
- # The RawTokenFormatter outputs only ASCII. Override here.
- self.encoding = 'ascii' # let pygments.format() do the right thing
- self.compress = get_choice_opt(options, 'compress',
- ['', 'none', 'gz', 'bz2'], '')
- self.error_color = options.get('error_color', None)
- if self.error_color is True:
- self.error_color = 'red'
- if self.error_color is not None:
- try:
- colorize(self.error_color, '')
- except KeyError:
- raise ValueError("Invalid color %r specified" %
- self.error_color)
-
- def format(self, tokensource, outfile):
- try:
- outfile.write(b'')
- except TypeError:
- raise TypeError('The raw tokens formatter needs a binary '
- 'output file')
- if self.compress == 'gz':
- import gzip
- outfile = gzip.GzipFile('', 'wb', 9, outfile)
-
- write = outfile.write
- flush = outfile.close
- elif self.compress == 'bz2':
- import bz2
- compressor = bz2.BZ2Compressor(9)
-
- def write(text):
- outfile.write(compressor.compress(text))
-
- def flush():
- outfile.write(compressor.flush())
- outfile.flush()
- else:
- write = outfile.write
- flush = outfile.flush
-
- if self.error_color:
- for ttype, value in tokensource:
- line = b"%r\t%r\n" % (ttype, value)
- if ttype is Token.Error:
- write(colorize(self.error_color, line))
- else:
- write(line)
- else:
- for ttype, value in tokensource:
- write(b"%r\t%r\n" % (ttype, value))
- flush()
-
-
-TESTCASE_BEFORE = '''\
- def testNeedsName(lexer):
- fragment = %r
- tokens = [
-'''
-TESTCASE_AFTER = '''\
- ]
- assert list(lexer.get_tokens(fragment)) == tokens
-'''
-
-
-class TestcaseFormatter(Formatter):
- """
- Format tokens as appropriate for a new testcase.
-
- .. versionadded:: 2.0
- """
- name = 'Testcase'
- aliases = ['testcase']
-
- def __init__(self, **options):
- Formatter.__init__(self, **options)
- if self.encoding is not None and self.encoding != 'utf-8':
- raise ValueError("Only None and utf-8 are allowed encodings.")
-
- def format(self, tokensource, outfile):
- indentation = ' ' * 12
- rawbuf = []
- outbuf = []
- for ttype, value in tokensource:
- rawbuf.append(value)
- outbuf.append('%s(%s, %r),\n' % (indentation, ttype, value))
-
- before = TESTCASE_BEFORE % (''.join(rawbuf),)
- during = ''.join(outbuf)
- after = TESTCASE_AFTER
- if self.encoding is None:
- outfile.write(before + during + after)
- else:
- outfile.write(before.encode('utf-8'))
- outfile.write(during.encode('utf-8'))
- outfile.write(after.encode('utf-8'))
- outfile.flush()
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/styled.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/styled.py
deleted file mode 100644
index 91cd0db31c14e30d4c1e2e9f36382b7a5e022870..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/styled.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from typing import TYPE_CHECKING
-
-from .measure import Measurement
-from .segment import Segment
-from .style import StyleType
-
-if TYPE_CHECKING:
- from .console import Console, ConsoleOptions, RenderResult, RenderableType
-
-
-class Styled:
- """Apply a style to a renderable.
-
- Args:
- renderable (RenderableType): Any renderable.
- style (StyleType): A style to apply across the entire renderable.
- """
-
- def __init__(self, renderable: "RenderableType", style: "StyleType") -> None:
- self.renderable = renderable
- self.style = style
-
- def __rich_console__(
- self, console: "Console", options: "ConsoleOptions"
- ) -> "RenderResult":
- style = console.get_style(self.style)
- rendered_segments = console.render(self.renderable, options)
- segments = Segment.apply_style(rendered_segments, style)
- return segments
-
- def __rich_measure__(
- self, console: "Console", options: "ConsoleOptions"
- ) -> Measurement:
- return Measurement.get(console, options, self.renderable)
-
-
-if __name__ == "__main__": # pragma: no cover
- from pip._vendor.rich import print
- from pip._vendor.rich.panel import Panel
-
- panel = Styled(Panel("hello"), "on blue")
- print(panel)
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py
deleted file mode 100644
index c466378ceba69a335d2beb4d3af92703d52b3831..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/jaraco/text/__init__.py
+++ /dev/null
@@ -1,599 +0,0 @@
-import re
-import itertools
-import textwrap
-import functools
-
-try:
- from importlib.resources import files # type: ignore
-except ImportError: # pragma: nocover
- from pkg_resources.extern.importlib_resources import files # type: ignore
-
-from pkg_resources.extern.jaraco.functools import compose, method_cache
-from pkg_resources.extern.jaraco.context import ExceptionTrap
-
-
-def substitution(old, new):
- """
- Return a function that will perform a substitution on a string
- """
- return lambda s: s.replace(old, new)
-
-
-def multi_substitution(*substitutions):
- """
- Take a sequence of pairs specifying substitutions, and create
- a function that performs those substitutions.
-
- >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
- 'baz'
- """
- substitutions = itertools.starmap(substitution, substitutions)
- # compose function applies last function first, so reverse the
- # substitutions to get the expected order.
- substitutions = reversed(tuple(substitutions))
- return compose(*substitutions)
-
-
-class FoldedCase(str):
- """
- A case insensitive string class; behaves just like str
- except compares equal when the only variation is case.
-
- >>> s = FoldedCase('hello world')
-
- >>> s == 'Hello World'
- True
-
- >>> 'Hello World' == s
- True
-
- >>> s != 'Hello World'
- False
-
- >>> s.index('O')
- 4
-
- >>> s.split('O')
- ['hell', ' w', 'rld']
-
- >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
- ['alpha', 'Beta', 'GAMMA']
-
- Sequence membership is straightforward.
-
- >>> "Hello World" in [s]
- True
- >>> s in ["Hello World"]
- True
-
- You may test for set inclusion, but candidate and elements
- must both be folded.
-
- >>> FoldedCase("Hello World") in {s}
- True
- >>> s in {FoldedCase("Hello World")}
- True
-
- String inclusion works as long as the FoldedCase object
- is on the right.
-
- >>> "hello" in FoldedCase("Hello World")
- True
-
- But not if the FoldedCase object is on the left:
-
- >>> FoldedCase('hello') in 'Hello World'
- False
-
- In that case, use ``in_``:
-
- >>> FoldedCase('hello').in_('Hello World')
- True
-
- >>> FoldedCase('hello') > FoldedCase('Hello')
- False
- """
-
- def __lt__(self, other):
- return self.lower() < other.lower()
-
- def __gt__(self, other):
- return self.lower() > other.lower()
-
- def __eq__(self, other):
- return self.lower() == other.lower()
-
- def __ne__(self, other):
- return self.lower() != other.lower()
-
- def __hash__(self):
- return hash(self.lower())
-
- def __contains__(self, other):
- return super().lower().__contains__(other.lower())
-
- def in_(self, other):
- "Does self appear in other?"
- return self in FoldedCase(other)
-
- # cache lower since it's likely to be called frequently.
- @method_cache
- def lower(self):
- return super().lower()
-
- def index(self, sub):
- return self.lower().index(sub.lower())
-
- def split(self, splitter=' ', maxsplit=0):
- pattern = re.compile(re.escape(splitter), re.I)
- return pattern.split(self, maxsplit)
-
-
-# Python 3.8 compatibility
-_unicode_trap = ExceptionTrap(UnicodeDecodeError)
-
-
-@_unicode_trap.passes
-def is_decodable(value):
- r"""
- Return True if the supplied value is decodable (using the default
- encoding).
-
- >>> is_decodable(b'\xff')
- False
- >>> is_decodable(b'\x32')
- True
- """
- value.decode()
-
-
-def is_binary(value):
- r"""
- Return True if the value appears to be binary (that is, it's a byte
- string and isn't decodable).
-
- >>> is_binary(b'\xff')
- True
- >>> is_binary('\xff')
- False
- """
- return isinstance(value, bytes) and not is_decodable(value)
-
-
-def trim(s):
- r"""
- Trim something like a docstring to remove the whitespace that
- is common due to indentation and formatting.
-
- >>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
- 'foo = bar\n\tbar = baz'
- """
- return textwrap.dedent(s).strip()
-
-
-def wrap(s):
- """
- Wrap lines of text, retaining existing newlines as
- paragraph markers.
-
- >>> print(wrap(lorem_ipsum))
- Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
- eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
- minim veniam, quis nostrud exercitation ullamco laboris nisi ut
- aliquip ex ea commodo consequat. Duis aute irure dolor in
- reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
- pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
- culpa qui officia deserunt mollit anim id est laborum.
-
- Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
- varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
- magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
- gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
- risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
- eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
- fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
- a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
- neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
- sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
- nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
- quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
- molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
- """
- paragraphs = s.splitlines()
- wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
- return '\n\n'.join(wrapped)
-
-
-def unwrap(s):
- r"""
- Given a multi-line string, return an unwrapped version.
-
- >>> wrapped = wrap(lorem_ipsum)
- >>> wrapped.count('\n')
- 20
- >>> unwrapped = unwrap(wrapped)
- >>> unwrapped.count('\n')
- 1
- >>> print(unwrapped)
- Lorem ipsum dolor sit amet, consectetur adipiscing ...
- Curabitur pretium tincidunt lacus. Nulla gravida orci ...
-
- """
- paragraphs = re.split(r'\n\n+', s)
- cleaned = (para.replace('\n', ' ') for para in paragraphs)
- return '\n'.join(cleaned)
-
-
-
-
-class Splitter(object):
- """object that will split a string with the given arguments for each call
-
- >>> s = Splitter(',')
- >>> s('hello, world, this is your, master calling')
- ['hello', ' world', ' this is your', ' master calling']
- """
-
- def __init__(self, *args):
- self.args = args
-
- def __call__(self, s):
- return s.split(*self.args)
-
-
-def indent(string, prefix=' ' * 4):
- """
- >>> indent('foo')
- ' foo'
- """
- return prefix + string
-
-
-class WordSet(tuple):
- """
- Given an identifier, return the words that identifier represents,
- whether in camel case, underscore-separated, etc.
-
- >>> WordSet.parse("camelCase")
- ('camel', 'Case')
-
- >>> WordSet.parse("under_sep")
- ('under', 'sep')
-
- Acronyms should be retained
-
- >>> WordSet.parse("firstSNL")
- ('first', 'SNL')
-
- >>> WordSet.parse("you_and_I")
- ('you', 'and', 'I')
-
- >>> WordSet.parse("A simple test")
- ('A', 'simple', 'test')
-
- Multiple caps should not interfere with the first cap of another word.
-
- >>> WordSet.parse("myABCClass")
- ('my', 'ABC', 'Class')
-
- The result is a WordSet, so you can get the form you need.
-
- >>> WordSet.parse("myABCClass").underscore_separated()
- 'my_ABC_Class'
-
- >>> WordSet.parse('a-command').camel_case()
- 'ACommand'
-
- >>> WordSet.parse('someIdentifier').lowered().space_separated()
- 'some identifier'
-
- Slices of the result should return another WordSet.
-
- >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
- 'out_of_context'
-
- >>> WordSet.from_class_name(WordSet()).lowered().space_separated()
- 'word set'
-
- >>> example = WordSet.parse('figured it out')
- >>> example.headless_camel_case()
- 'figuredItOut'
- >>> example.dash_separated()
- 'figured-it-out'
-
- """
-
- _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
-
- def capitalized(self):
- return WordSet(word.capitalize() for word in self)
-
- def lowered(self):
- return WordSet(word.lower() for word in self)
-
- def camel_case(self):
- return ''.join(self.capitalized())
-
- def headless_camel_case(self):
- words = iter(self)
- first = next(words).lower()
- new_words = itertools.chain((first,), WordSet(words).camel_case())
- return ''.join(new_words)
-
- def underscore_separated(self):
- return '_'.join(self)
-
- def dash_separated(self):
- return '-'.join(self)
-
- def space_separated(self):
- return ' '.join(self)
-
- def trim_right(self, item):
- """
- Remove the item from the end of the set.
-
- >>> WordSet.parse('foo bar').trim_right('foo')
- ('foo', 'bar')
- >>> WordSet.parse('foo bar').trim_right('bar')
- ('foo',)
- >>> WordSet.parse('').trim_right('bar')
- ()
- """
- return self[:-1] if self and self[-1] == item else self
-
- def trim_left(self, item):
- """
- Remove the item from the beginning of the set.
-
- >>> WordSet.parse('foo bar').trim_left('foo')
- ('bar',)
- >>> WordSet.parse('foo bar').trim_left('bar')
- ('foo', 'bar')
- >>> WordSet.parse('').trim_left('bar')
- ()
- """
- return self[1:] if self and self[0] == item else self
-
- def trim(self, item):
- """
- >>> WordSet.parse('foo bar').trim('foo')
- ('bar',)
- """
- return self.trim_left(item).trim_right(item)
-
- def __getitem__(self, item):
- result = super(WordSet, self).__getitem__(item)
- if isinstance(item, slice):
- result = WordSet(result)
- return result
-
- @classmethod
- def parse(cls, identifier):
- matches = cls._pattern.finditer(identifier)
- return WordSet(match.group(0) for match in matches)
-
- @classmethod
- def from_class_name(cls, subject):
- return cls.parse(subject.__class__.__name__)
-
-
-# for backward compatibility
-words = WordSet.parse
-
-
-def simple_html_strip(s):
- r"""
- Remove HTML from the string `s`.
-
- >>> str(simple_html_strip(''))
- ''
-
- >>> print(simple_html_strip('A stormy day in paradise'))
- A stormy day in paradise
-
- >>> print(simple_html_strip('Somebody tell the truth.'))
- Somebody tell the truth.
-
- >>> print(simple_html_strip('What about
\nmultiple lines?'))
- What about
- multiple lines?
- """
- html_stripper = re.compile('()|(<[^>]*>)|([^<]+)', re.DOTALL)
- texts = (match.group(3) or '' for match in html_stripper.finditer(s))
- return ''.join(texts)
-
-
-class SeparatedValues(str):
- """
- A string separated by a separator. Overrides __iter__ for getting
- the values.
-
- >>> list(SeparatedValues('a,b,c'))
- ['a', 'b', 'c']
-
- Whitespace is stripped and empty values are discarded.
-
- >>> list(SeparatedValues(' a, b , c, '))
- ['a', 'b', 'c']
- """
-
- separator = ','
-
- def __iter__(self):
- parts = self.split(self.separator)
- return filter(None, (part.strip() for part in parts))
-
-
-class Stripper:
- r"""
- Given a series of lines, find the common prefix and strip it from them.
-
- >>> lines = [
- ... 'abcdefg\n',
- ... 'abc\n',
- ... 'abcde\n',
- ... ]
- >>> res = Stripper.strip_prefix(lines)
- >>> res.prefix
- 'abc'
- >>> list(res.lines)
- ['defg\n', '\n', 'de\n']
-
- If no prefix is common, nothing should be stripped.
-
- >>> lines = [
- ... 'abcd\n',
- ... '1234\n',
- ... ]
- >>> res = Stripper.strip_prefix(lines)
- >>> res.prefix = ''
- >>> list(res.lines)
- ['abcd\n', '1234\n']
- """
-
- def __init__(self, prefix, lines):
- self.prefix = prefix
- self.lines = map(self, lines)
-
- @classmethod
- def strip_prefix(cls, lines):
- prefix_lines, lines = itertools.tee(lines)
- prefix = functools.reduce(cls.common_prefix, prefix_lines)
- return cls(prefix, lines)
-
- def __call__(self, line):
- if not self.prefix:
- return line
- null, prefix, rest = line.partition(self.prefix)
- return rest
-
- @staticmethod
- def common_prefix(s1, s2):
- """
- Return the common prefix of two lines.
- """
- index = min(len(s1), len(s2))
- while s1[:index] != s2[:index]:
- index -= 1
- return s1[:index]
-
-
-def remove_prefix(text, prefix):
- """
- Remove the prefix from the text if it exists.
-
- >>> remove_prefix('underwhelming performance', 'underwhelming ')
- 'performance'
-
- >>> remove_prefix('something special', 'sample')
- 'something special'
- """
- null, prefix, rest = text.rpartition(prefix)
- return rest
-
-
-def remove_suffix(text, suffix):
- """
- Remove the suffix from the text if it exists.
-
- >>> remove_suffix('name.git', '.git')
- 'name'
-
- >>> remove_suffix('something special', 'sample')
- 'something special'
- """
- rest, suffix, null = text.partition(suffix)
- return rest
-
-
-def normalize_newlines(text):
- r"""
- Replace alternate newlines with the canonical newline.
-
- >>> normalize_newlines('Lorem Ipsum\u2029')
- 'Lorem Ipsum\n'
- >>> normalize_newlines('Lorem Ipsum\r\n')
- 'Lorem Ipsum\n'
- >>> normalize_newlines('Lorem Ipsum\x85')
- 'Lorem Ipsum\n'
- """
- newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
- pattern = '|'.join(newlines)
- return re.sub(pattern, '\n', text)
-
-
-def _nonblank(str):
- return str and not str.startswith('#')
-
-
-@functools.singledispatch
-def yield_lines(iterable):
- r"""
- Yield valid lines of a string or iterable.
-
- >>> list(yield_lines(''))
- []
- >>> list(yield_lines(['foo', 'bar']))
- ['foo', 'bar']
- >>> list(yield_lines('foo\nbar'))
- ['foo', 'bar']
- >>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
- ['foo', 'baz #comment']
- >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
- ['foo', 'bar', 'baz', 'bing']
- """
- return itertools.chain.from_iterable(map(yield_lines, iterable))
-
-
-@yield_lines.register(str)
-def _(text):
- return filter(_nonblank, map(str.strip, text.splitlines()))
-
-
-def drop_comment(line):
- """
- Drop comments.
-
- >>> drop_comment('foo # bar')
- 'foo'
-
- A hash without a space may be in a URL.
-
- >>> drop_comment('http://example.com/foo#bar')
- 'http://example.com/foo#bar'
- """
- return line.partition(' #')[0]
-
-
-def join_continuation(lines):
- r"""
- Join lines continued by a trailing backslash.
-
- >>> list(join_continuation(['foo \\', 'bar', 'baz']))
- ['foobar', 'baz']
- >>> list(join_continuation(['foo \\', 'bar', 'baz']))
- ['foobar', 'baz']
- >>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
- ['foobarbaz']
-
- Not sure why, but...
- The character preceeding the backslash is also elided.
-
- >>> list(join_continuation(['goo\\', 'dly']))
- ['godly']
-
- A terrible idea, but...
- If no line is available to continue, suppress the lines.
-
- >>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
- ['foo']
- """
- lines = iter(lines)
- for item in lines:
- while item.endswith('\\'):
- try:
- item = item[:-2].strip() + next(lines)
- except StopIteration:
- return
- yield item
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/config.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/config.py
deleted file mode 100644
index 9a4044adaf876f57befa8cf37c5c23f8840a99f4..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/config.py
+++ /dev/null
@@ -1,139 +0,0 @@
-"""distutils.pypirc
-
-Provides the PyPIRCCommand class, the base class for the command classes
-that uses .pypirc in the distutils.command package.
-"""
-import os
-from configparser import RawConfigParser
-
-from .cmd import Command
-
-DEFAULT_PYPIRC = """\
-[distutils]
-index-servers =
- pypi
-
-[pypi]
-username:%s
-password:%s
-"""
-
-
-class PyPIRCCommand(Command):
- """Base command that knows how to handle the .pypirc file"""
-
- DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/'
- DEFAULT_REALM = 'pypi'
- repository = None
- realm = None
-
- user_options = [
- ('repository=', 'r', "url of repository [default: %s]" % DEFAULT_REPOSITORY),
- ('show-response', None, 'display full response text from server'),
- ]
-
- boolean_options = ['show-response']
-
- def _get_rc_file(self):
- """Returns rc file path."""
- return os.path.join(os.path.expanduser('~'), '.pypirc')
-
- def _store_pypirc(self, username, password):
- """Creates a default .pypirc file."""
- rc = self._get_rc_file()
- with os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f:
- f.write(DEFAULT_PYPIRC % (username, password))
-
- def _read_pypirc(self): # noqa: C901
- """Reads the .pypirc file."""
- rc = self._get_rc_file()
- if os.path.exists(rc):
- self.announce('Using PyPI login from %s' % rc)
- repository = self.repository or self.DEFAULT_REPOSITORY
-
- config = RawConfigParser()
- config.read(rc)
- sections = config.sections()
- if 'distutils' in sections:
- # let's get the list of servers
- index_servers = config.get('distutils', 'index-servers')
- _servers = [
- server.strip()
- for server in index_servers.split('\n')
- if server.strip() != ''
- ]
- if _servers == []:
- # nothing set, let's try to get the default pypi
- if 'pypi' in sections:
- _servers = ['pypi']
- else:
- # the file is not properly defined, returning
- # an empty dict
- return {}
- for server in _servers:
- current = {'server': server}
- current['username'] = config.get(server, 'username')
-
- # optional params
- for key, default in (
- ('repository', self.DEFAULT_REPOSITORY),
- ('realm', self.DEFAULT_REALM),
- ('password', None),
- ):
- if config.has_option(server, key):
- current[key] = config.get(server, key)
- else:
- current[key] = default
-
- # work around people having "repository" for the "pypi"
- # section of their config set to the HTTP (rather than
- # HTTPS) URL
- if server == 'pypi' and repository in (
- self.DEFAULT_REPOSITORY,
- 'pypi',
- ):
- current['repository'] = self.DEFAULT_REPOSITORY
- return current
-
- if (
- current['server'] == repository
- or current['repository'] == repository
- ):
- return current
- elif 'server-login' in sections:
- # old format
- server = 'server-login'
- if config.has_option(server, 'repository'):
- repository = config.get(server, 'repository')
- else:
- repository = self.DEFAULT_REPOSITORY
- return {
- 'username': config.get(server, 'username'),
- 'password': config.get(server, 'password'),
- 'repository': repository,
- 'server': server,
- 'realm': self.DEFAULT_REALM,
- }
-
- return {}
-
- def _read_pypi_response(self, response):
- """Read and decode a PyPI HTTP response."""
- import cgi
-
- content_type = response.getheader('content-type', 'text/plain')
- encoding = cgi.parse_header(content_type)[1].get('charset', 'ascii')
- return response.read().decode(encoding)
-
- def initialize_options(self):
- """Initialize options."""
- self.repository = None
- self.realm = None
- self.show_response = 0
-
- def finalize_options(self):
- """Finalizes options."""
- if self.repository is None:
- self.repository = self.DEFAULT_REPOSITORY
- if self.realm is None:
- self.realm = self.DEFAULT_REALM
diff --git a/spaces/TencentARC/VLog/models/grit_src/grit/modeling/text/load_text_token.py b/spaces/TencentARC/VLog/models/grit_src/grit/modeling/text/load_text_token.py
deleted file mode 100644
index 8491021bf5d7d23d7f3826395f270dccad30df36..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/VLog/models/grit_src/grit/modeling/text/load_text_token.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import torch
-
-
-class LoadTextTokens(object):
- def __init__(self, tokenizer, max_text_len=40, padding='do_not_pad'):
- self.tokenizer = tokenizer
- self.max_text_len = max_text_len
- self.padding = padding
-
- def descriptions_to_text_tokens(self, target, begin_token):
- target_encoding = self.tokenizer(
- target, padding=self.padding,
- add_special_tokens=False,
- truncation=True, max_length=self.max_text_len)
-
- need_predict = [1] * len(target_encoding['input_ids'])
- payload = target_encoding['input_ids']
- if len(payload) > self.max_text_len - 2:
- payload = payload[-(self.max_text_len - 2):]
- need_predict = payload[-(self.max_text_len - 2):]
-
- input_ids = [begin_token] + payload + [self.tokenizer.sep_token_id]
-
- need_predict = [0] + need_predict + [1]
- data = {
- 'text_tokens': torch.tensor(input_ids),
- 'text_lengths': len(input_ids),
- 'need_predict': torch.tensor(need_predict),
- }
-
- return data
-
- def __call__(self, object_descriptions, box_features, begin_token):
- text_tokens = []
- text_lengths = []
- need_predict = []
- for description in object_descriptions:
- tokens = self.descriptions_to_text_tokens(description, begin_token)
- text_tokens.append(tokens['text_tokens'])
- text_lengths.append(tokens['text_lengths'])
- need_predict.append(tokens['need_predict'])
-
- text_tokens = torch.cat(self.collate(text_tokens), dim=0).to(box_features.device)
- text_lengths = torch.tensor(text_lengths).to(box_features.device)
- need_predict = torch.cat(self.collate(need_predict), dim=0).to(box_features.device)
-
- assert text_tokens.dim() == 2 and need_predict.dim() == 2
- data = {'text_tokens': text_tokens,
- 'text_lengths': text_lengths,
- 'need_predict': need_predict}
-
- return data
-
- def collate(self, batch):
- if all(isinstance(b, torch.Tensor) for b in batch) and len(batch) > 0:
- if not all(b.shape == batch[0].shape for b in batch[1:]):
- assert all(len(b.shape) == len(batch[0].shape) for b in batch[1:])
- shape = torch.tensor([b.shape for b in batch])
- max_shape = tuple(shape.max(dim=0)[0].tolist())
- batch2 = []
- for b in batch:
- if any(c < m for c, m in zip(b.shape, max_shape)):
- b2 = torch.zeros(max_shape, dtype=b.dtype, device=b.device)
- if b.dim() == 1:
- b2[:b.shape[0]] = b
- elif b.dim() == 2:
- b2[:b.shape[0], :b.shape[1]] = b
- elif b.dim() == 3:
- b2[:b.shape[0], :b.shape[1], :b.shape[2]] = b
- else:
- raise NotImplementedError
- b = b2
- batch2.append(b[None, ...])
- else:
- batch2 = []
- for b in batch:
- batch2.append(b[None, ...])
- return batch2
- else:
- raise NotImplementedError
diff --git a/spaces/Thaweewat/ControlNet-Architecture/ldm/models/diffusion/ddpm.py b/spaces/Thaweewat/ControlNet-Architecture/ldm/models/diffusion/ddpm.py
deleted file mode 100644
index 394d44ad33c9738cc4aea490d080b77e63be6220..0000000000000000000000000000000000000000
--- a/spaces/Thaweewat/ControlNet-Architecture/ldm/models/diffusion/ddpm.py
+++ /dev/null
@@ -1,1797 +0,0 @@
-"""
-wild mixture of
-https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
-https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
-https://github.com/CompVis/taming-transformers
--- merci
-"""
-
-import torch
-import torch.nn as nn
-import numpy as np
-import pytorch_lightning as pl
-from torch.optim.lr_scheduler import LambdaLR
-from einops import rearrange, repeat
-from contextlib import contextmanager, nullcontext
-from functools import partial
-import itertools
-from tqdm import tqdm
-from torchvision.utils import make_grid
-from pytorch_lightning.utilities.distributed import rank_zero_only
-from omegaconf import ListConfig
-
-from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
-from ldm.modules.ema import LitEma
-from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
-from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
-from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
-from ldm.models.diffusion.ddim import DDIMSampler
-
-
-__conditioning_keys__ = {'concat': 'c_concat',
- 'crossattn': 'c_crossattn',
- 'adm': 'y'}
-
-
-def disabled_train(self, mode=True):
- """Overwrite model.train with this function to make sure train/eval mode
- does not change anymore."""
- return self
-
-
-def uniform_on_device(r1, r2, shape, device):
- return (r1 - r2) * torch.rand(*shape, device=device) + r2
-
-
-class DDPM(pl.LightningModule):
- # classic DDPM with Gaussian diffusion, in image space
- def __init__(self,
- unet_config,
- timesteps=1000,
- beta_schedule="linear",
- loss_type="l2",
- ckpt_path=None,
- ignore_keys=[],
- load_only_unet=False,
- monitor="val/loss",
- use_ema=True,
- first_stage_key="image",
- image_size=256,
- channels=3,
- log_every_t=100,
- clip_denoised=True,
- linear_start=1e-4,
- linear_end=2e-2,
- cosine_s=8e-3,
- given_betas=None,
- original_elbo_weight=0.,
- v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
- l_simple_weight=1.,
- conditioning_key=None,
- parameterization="eps", # all assuming fixed variance schedules
- scheduler_config=None,
- use_positional_encodings=False,
- learn_logvar=False,
- logvar_init=0.,
- make_it_fit=False,
- ucg_training=None,
- reset_ema=False,
- reset_num_ema_updates=False,
- ):
- super().__init__()
- assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
- self.parameterization = parameterization
- print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
- self.cond_stage_model = None
- self.clip_denoised = clip_denoised
- self.log_every_t = log_every_t
- self.first_stage_key = first_stage_key
- self.image_size = image_size # try conv?
- self.channels = channels
- self.use_positional_encodings = use_positional_encodings
- self.model = DiffusionWrapper(unet_config, conditioning_key)
- count_params(self.model, verbose=True)
- self.use_ema = use_ema
- if self.use_ema:
- self.model_ema = LitEma(self.model)
- print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
-
- self.use_scheduler = scheduler_config is not None
- if self.use_scheduler:
- self.scheduler_config = scheduler_config
-
- self.v_posterior = v_posterior
- self.original_elbo_weight = original_elbo_weight
- self.l_simple_weight = l_simple_weight
-
- if monitor is not None:
- self.monitor = monitor
- self.make_it_fit = make_it_fit
- if reset_ema: assert exists(ckpt_path)
- if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
- if reset_ema:
- assert self.use_ema
- print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
- self.model_ema = LitEma(self.model)
- if reset_num_ema_updates:
- print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
- assert self.use_ema
- self.model_ema.reset_num_updates()
-
- self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
- linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
-
- self.loss_type = loss_type
-
- self.learn_logvar = learn_logvar
- logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
- if self.learn_logvar:
- self.logvar = nn.Parameter(self.logvar, requires_grad=True)
- else:
- self.register_buffer('logvar', logvar)
-
- self.ucg_training = ucg_training or dict()
- if self.ucg_training:
- self.ucg_prng = np.random.RandomState()
-
- def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
- if exists(given_betas):
- betas = given_betas
- else:
- betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
- cosine_s=cosine_s)
- alphas = 1. - betas
- alphas_cumprod = np.cumprod(alphas, axis=0)
- alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
-
- timesteps, = betas.shape
- self.num_timesteps = int(timesteps)
- self.linear_start = linear_start
- self.linear_end = linear_end
- assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
-
- to_torch = partial(torch.tensor, dtype=torch.float32)
-
- self.register_buffer('betas', to_torch(betas))
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
- self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
-
- # calculations for diffusion q(x_t | x_{t-1}) and others
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
-
- # calculations for posterior q(x_{t-1} | x_t, x_0)
- posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
- 1. - alphas_cumprod) + self.v_posterior * betas
- # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
- self.register_buffer('posterior_variance', to_torch(posterior_variance))
- # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
- self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
- self.register_buffer('posterior_mean_coef1', to_torch(
- betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
- self.register_buffer('posterior_mean_coef2', to_torch(
- (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
-
- if self.parameterization == "eps":
- lvlb_weights = self.betas ** 2 / (
- 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
- elif self.parameterization == "x0":
- lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
- elif self.parameterization == "v":
- lvlb_weights = torch.ones_like(self.betas ** 2 / (
- 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)))
- else:
- raise NotImplementedError("mu not supported")
- lvlb_weights[0] = lvlb_weights[1]
- self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
- assert not torch.isnan(self.lvlb_weights).all()
-
- @contextmanager
- def ema_scope(self, context=None):
- if self.use_ema:
- self.model_ema.store(self.model.parameters())
- self.model_ema.copy_to(self.model)
- if context is not None:
- print(f"{context}: Switched to EMA weights")
- try:
- yield None
- finally:
- if self.use_ema:
- self.model_ema.restore(self.model.parameters())
- if context is not None:
- print(f"{context}: Restored training weights")
-
- @torch.no_grad()
- def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
- sd = torch.load(path, map_location="cuda")
- if "state_dict" in list(sd.keys()):
- sd = sd["state_dict"]
- keys = list(sd.keys())
- for k in keys:
- for ik in ignore_keys:
- if k.startswith(ik):
- print("Deleting key {} from state_dict.".format(k))
- del sd[k]
- if self.make_it_fit:
- n_params = len([name for name, _ in
- itertools.chain(self.named_parameters(),
- self.named_buffers())])
- for name, param in tqdm(
- itertools.chain(self.named_parameters(),
- self.named_buffers()),
- desc="Fitting old weights to new weights",
- total=n_params
- ):
- if not name in sd:
- continue
- old_shape = sd[name].shape
- new_shape = param.shape
- assert len(old_shape) == len(new_shape)
- if len(new_shape) > 2:
- # we only modify first two axes
- assert new_shape[2:] == old_shape[2:]
- # assumes first axis corresponds to output dim
- if not new_shape == old_shape:
- new_param = param.clone()
- old_param = sd[name]
- if len(new_shape) == 1:
- for i in range(new_param.shape[0]):
- new_param[i] = old_param[i % old_shape[0]]
- elif len(new_shape) >= 2:
- for i in range(new_param.shape[0]):
- for j in range(new_param.shape[1]):
- new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]]
-
- n_used_old = torch.ones(old_shape[1])
- for j in range(new_param.shape[1]):
- n_used_old[j % old_shape[1]] += 1
- n_used_new = torch.zeros(new_shape[1])
- for j in range(new_param.shape[1]):
- n_used_new[j] = n_used_old[j % old_shape[1]]
-
- n_used_new = n_used_new[None, :]
- while len(n_used_new.shape) < len(new_shape):
- n_used_new = n_used_new.unsqueeze(-1)
- new_param /= n_used_new
-
- sd[name] = new_param
-
- missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
- sd, strict=False)
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
- if len(missing) > 0:
- print(f"Missing Keys:\n {missing}")
- if len(unexpected) > 0:
- print(f"\nUnexpected Keys:\n {unexpected}")
-
- def q_mean_variance(self, x_start, t):
- """
- Get the distribution q(x_t | x_0).
- :param x_start: the [N x C x ...] tensor of noiseless inputs.
- :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
- :return: A tuple (mean, variance, log_variance), all of x_start's shape.
- """
- mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
- variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
- log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
- return mean, variance, log_variance
-
- def predict_start_from_noise(self, x_t, t, noise):
- return (
- extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
- )
-
- def predict_start_from_z_and_v(self, x_t, t, v):
- # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
- # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
- return (
- extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
- )
-
- def predict_eps_from_z_and_v(self, x_t, t, v):
- return (
- extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v +
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t
- )
-
- def q_posterior(self, x_start, x_t, t):
- posterior_mean = (
- extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
- extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
- )
- posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
- posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
- return posterior_mean, posterior_variance, posterior_log_variance_clipped
-
- def p_mean_variance(self, x, t, clip_denoised: bool):
- model_out = self.model(x, t)
- if self.parameterization == "eps":
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
- elif self.parameterization == "x0":
- x_recon = model_out
- if clip_denoised:
- x_recon.clamp_(-1., 1.)
-
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
- return model_mean, posterior_variance, posterior_log_variance
-
- @torch.no_grad()
- def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
- b, *_, device = *x.shape, x.device
- model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
- noise = noise_like(x.shape, device, repeat_noise)
- # no noise when t == 0
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
-
- @torch.no_grad()
- def p_sample_loop(self, shape, return_intermediates=False):
- device = self.betas.device
- b = shape[0]
- img = torch.randn(shape, device=device)
- intermediates = [img]
- for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
- img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
- clip_denoised=self.clip_denoised)
- if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
- intermediates.append(img)
- if return_intermediates:
- return img, intermediates
- return img
-
- @torch.no_grad()
- def sample(self, batch_size=16, return_intermediates=False):
- image_size = self.image_size
- channels = self.channels
- return self.p_sample_loop((batch_size, channels, image_size, image_size),
- return_intermediates=return_intermediates)
-
- def q_sample(self, x_start, t, noise=None):
- noise = default(noise, lambda: torch.randn_like(x_start))
- return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
-
- def get_v(self, x, noise, t):
- return (
- extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise -
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x
- )
-
- def get_loss(self, pred, target, mean=True):
- if self.loss_type == 'l1':
- loss = (target - pred).abs()
- if mean:
- loss = loss.mean()
- elif self.loss_type == 'l2':
- if mean:
- loss = torch.nn.functional.mse_loss(target, pred)
- else:
- loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
- else:
- raise NotImplementedError("unknown loss type '{loss_type}'")
-
- return loss
-
- def p_losses(self, x_start, t, noise=None):
- noise = default(noise, lambda: torch.randn_like(x_start))
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
- model_out = self.model(x_noisy, t)
-
- loss_dict = {}
- if self.parameterization == "eps":
- target = noise
- elif self.parameterization == "x0":
- target = x_start
- elif self.parameterization == "v":
- target = self.get_v(x_start, noise, t)
- else:
- raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported")
-
- loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
-
- log_prefix = 'train' if self.training else 'val'
-
- loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
- loss_simple = loss.mean() * self.l_simple_weight
-
- loss_vlb = (self.lvlb_weights[t] * loss).mean()
- loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
-
- loss = loss_simple + self.original_elbo_weight * loss_vlb
-
- loss_dict.update({f'{log_prefix}/loss': loss})
-
- return loss, loss_dict
-
- def forward(self, x, *args, **kwargs):
- # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
- # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
- t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
- return self.p_losses(x, t, *args, **kwargs)
-
- def get_input(self, batch, k):
- x = batch[k]
- if len(x.shape) == 3:
- x = x[..., None]
- x = rearrange(x, 'b h w c -> b c h w')
- x = x.to(memory_format=torch.contiguous_format).float()
- return x
-
- def shared_step(self, batch):
- x = self.get_input(batch, self.first_stage_key)
- loss, loss_dict = self(x)
- return loss, loss_dict
-
- def training_step(self, batch, batch_idx):
- for k in self.ucg_training:
- p = self.ucg_training[k]["p"]
- val = self.ucg_training[k]["val"]
- if val is None:
- val = ""
- for i in range(len(batch[k])):
- if self.ucg_prng.choice(2, p=[1 - p, p]):
- batch[k][i] = val
-
- loss, loss_dict = self.shared_step(batch)
-
- self.log_dict(loss_dict, prog_bar=True,
- logger=True, on_step=True, on_epoch=True)
-
- self.log("global_step", self.global_step,
- prog_bar=True, logger=True, on_step=True, on_epoch=False)
-
- if self.use_scheduler:
- lr = self.optimizers().param_groups[0]['lr']
- self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
-
- return loss
-
- @torch.no_grad()
- def validation_step(self, batch, batch_idx):
- _, loss_dict_no_ema = self.shared_step(batch)
- with self.ema_scope():
- _, loss_dict_ema = self.shared_step(batch)
- loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
- self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
- self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
-
- def on_train_batch_end(self, *args, **kwargs):
- if self.use_ema:
- self.model_ema(self.model)
-
- def _get_rows_from_list(self, samples):
- n_imgs_per_row = len(samples)
- denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
- denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
- denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
- return denoise_grid
-
- @torch.no_grad()
- def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
- log = dict()
- x = self.get_input(batch, self.first_stage_key)
- N = min(x.shape[0], N)
- n_row = min(x.shape[0], n_row)
- x = x.to(self.device)[:N]
- log["inputs"] = x
-
- # get diffusion row
- diffusion_row = list()
- x_start = x[:n_row]
-
- for t in range(self.num_timesteps):
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
- t = t.to(self.device).long()
- noise = torch.randn_like(x_start)
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
- diffusion_row.append(x_noisy)
-
- log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
-
- if sample:
- # get denoise row
- with self.ema_scope("Plotting"):
- samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
-
- log["samples"] = samples
- log["denoise_row"] = self._get_rows_from_list(denoise_row)
-
- if return_keys:
- if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
- return log
- else:
- return {key: log[key] for key in return_keys}
- return log
-
- def configure_optimizers(self):
- lr = self.learning_rate
- params = list(self.model.parameters())
- if self.learn_logvar:
- params = params + [self.logvar]
- opt = torch.optim.AdamW(params, lr=lr)
- return opt
-
-
-class LatentDiffusion(DDPM):
- """main class"""
-
- def __init__(self,
- first_stage_config,
- cond_stage_config,
- num_timesteps_cond=None,
- cond_stage_key="image",
- cond_stage_trainable=False,
- concat_mode=True,
- cond_stage_forward=None,
- conditioning_key=None,
- scale_factor=1.0,
- scale_by_std=False,
- force_null_conditioning=False,
- *args, **kwargs):
- self.force_null_conditioning = force_null_conditioning
- self.num_timesteps_cond = default(num_timesteps_cond, 1)
- self.scale_by_std = scale_by_std
- assert self.num_timesteps_cond <= kwargs['timesteps']
- # for backwards compatibility after implementation of DiffusionWrapper
- if conditioning_key is None:
- conditioning_key = 'concat' if concat_mode else 'crossattn'
- if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:
- conditioning_key = None
- ckpt_path = kwargs.pop("ckpt_path", None)
- reset_ema = kwargs.pop("reset_ema", False)
- reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False)
- ignore_keys = kwargs.pop("ignore_keys", [])
- super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
- self.concat_mode = concat_mode
- self.cond_stage_trainable = cond_stage_trainable
- self.cond_stage_key = cond_stage_key
- try:
- self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
- except:
- self.num_downs = 0
- if not scale_by_std:
- self.scale_factor = scale_factor
- else:
- self.register_buffer('scale_factor', torch.tensor(scale_factor))
- self.instantiate_first_stage(first_stage_config)
- self.instantiate_cond_stage(cond_stage_config)
- self.cond_stage_forward = cond_stage_forward
- self.clip_denoised = False
- self.bbox_tokenizer = None
-
- self.restarted_from_ckpt = False
- if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys)
- self.restarted_from_ckpt = True
- if reset_ema:
- assert self.use_ema
- print(
- f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
- self.model_ema = LitEma(self.model)
- if reset_num_ema_updates:
- print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
- assert self.use_ema
- self.model_ema.reset_num_updates()
-
- def make_cond_schedule(self, ):
- self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
- ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
- self.cond_ids[:self.num_timesteps_cond] = ids
-
- @rank_zero_only
- @torch.no_grad()
- def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
- # only for very first batch
- if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
- assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
- # set rescale weight to 1./std of encodings
- print("### USING STD-RESCALING ###")
- x = super().get_input(batch, self.first_stage_key)
- x = x.to(self.device)
- encoder_posterior = self.encode_first_stage(x)
- z = self.get_first_stage_encoding(encoder_posterior).detach()
- del self.scale_factor
- self.register_buffer('scale_factor', 1. / z.flatten().std())
- print(f"setting self.scale_factor to {self.scale_factor}")
- print("### USING STD-RESCALING ###")
-
- def register_schedule(self,
- given_betas=None, beta_schedule="linear", timesteps=1000,
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
- super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
-
- self.shorten_cond_schedule = self.num_timesteps_cond > 1
- if self.shorten_cond_schedule:
- self.make_cond_schedule()
-
- def instantiate_first_stage(self, config):
- model = instantiate_from_config(config)
- self.first_stage_model = model.eval()
- self.first_stage_model.train = disabled_train
- for param in self.first_stage_model.parameters():
- param.requires_grad = False
-
- def instantiate_cond_stage(self, config):
- if not self.cond_stage_trainable:
- if config == "__is_first_stage__":
- print("Using first stage also as cond stage.")
- self.cond_stage_model = self.first_stage_model
- elif config == "__is_unconditional__":
- print(f"Training {self.__class__.__name__} as an unconditional model.")
- self.cond_stage_model = None
- # self.be_unconditional = True
- else:
- model = instantiate_from_config(config)
- self.cond_stage_model = model.eval()
- self.cond_stage_model.train = disabled_train
- for param in self.cond_stage_model.parameters():
- param.requires_grad = False
- else:
- assert config != '__is_first_stage__'
- assert config != '__is_unconditional__'
- model = instantiate_from_config(config)
- self.cond_stage_model = model
-
- def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
- denoise_row = []
- for zd in tqdm(samples, desc=desc):
- denoise_row.append(self.decode_first_stage(zd.to(self.device),
- force_not_quantize=force_no_decoder_quantization))
- n_imgs_per_row = len(denoise_row)
- denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
- denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
- denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
- denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
- return denoise_grid
-
- def get_first_stage_encoding(self, encoder_posterior):
- if isinstance(encoder_posterior, DiagonalGaussianDistribution):
- z = encoder_posterior.sample()
- elif isinstance(encoder_posterior, torch.Tensor):
- z = encoder_posterior
- else:
- raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
- return self.scale_factor * z
-
- def get_learned_conditioning(self, c):
- if self.cond_stage_forward is None:
- if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
- c = self.cond_stage_model.encode(c)
- if isinstance(c, DiagonalGaussianDistribution):
- c = c.mode()
- else:
- c = self.cond_stage_model(c)
- else:
- assert hasattr(self.cond_stage_model, self.cond_stage_forward)
- c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
- return c
-
- def meshgrid(self, h, w):
- y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
- x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
-
- arr = torch.cat([y, x], dim=-1)
- return arr
-
- def delta_border(self, h, w):
- """
- :param h: height
- :param w: width
- :return: normalized distance to image border,
- wtith min distance = 0 at border and max dist = 0.5 at image center
- """
- lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
- arr = self.meshgrid(h, w) / lower_right_corner
- dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
- dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
- edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
- return edge_dist
-
- def get_weighting(self, h, w, Ly, Lx, device):
- weighting = self.delta_border(h, w)
- weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
- self.split_input_params["clip_max_weight"], )
- weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
-
- if self.split_input_params["tie_braker"]:
- L_weighting = self.delta_border(Ly, Lx)
- L_weighting = torch.clip(L_weighting,
- self.split_input_params["clip_min_tie_weight"],
- self.split_input_params["clip_max_tie_weight"])
-
- L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
- weighting = weighting * L_weighting
- return weighting
-
- def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
- """
- :param x: img of size (bs, c, h, w)
- :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
- """
- bs, nc, h, w = x.shape
-
- # number of crops in image
- Ly = (h - kernel_size[0]) // stride[0] + 1
- Lx = (w - kernel_size[1]) // stride[1] + 1
-
- if uf == 1 and df == 1:
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
- unfold = torch.nn.Unfold(**fold_params)
-
- fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
-
- weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
- normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
- weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
-
- elif uf > 1 and df == 1:
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
- unfold = torch.nn.Unfold(**fold_params)
-
- fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
- dilation=1, padding=0,
- stride=(stride[0] * uf, stride[1] * uf))
- fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
-
- weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
- normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
- weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
-
- elif df > 1 and uf == 1:
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
- unfold = torch.nn.Unfold(**fold_params)
-
- fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
- dilation=1, padding=0,
- stride=(stride[0] // df, stride[1] // df))
- fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
-
- weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
- normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
- weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
-
- else:
- raise NotImplementedError
-
- return fold, unfold, normalization, weighting
-
- @torch.no_grad()
- def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
- cond_key=None, return_original_cond=False, bs=None, return_x=False):
- x = super().get_input(batch, k)
- if bs is not None:
- x = x[:bs]
- x = x.to(self.device)
- encoder_posterior = self.encode_first_stage(x)
- z = self.get_first_stage_encoding(encoder_posterior).detach()
-
- if self.model.conditioning_key is not None and not self.force_null_conditioning:
- if cond_key is None:
- cond_key = self.cond_stage_key
- if cond_key != self.first_stage_key:
- if cond_key in ['caption', 'coordinates_bbox', "txt"]:
- xc = batch[cond_key]
- elif cond_key in ['class_label', 'cls']:
- xc = batch
- else:
- xc = super().get_input(batch, cond_key).to(self.device)
- else:
- xc = x
- if not self.cond_stage_trainable or force_c_encode:
- if isinstance(xc, dict) or isinstance(xc, list):
- c = self.get_learned_conditioning(xc)
- else:
- c = self.get_learned_conditioning(xc.to(self.device))
- else:
- c = xc
- if bs is not None:
- c = c[:bs]
-
- if self.use_positional_encodings:
- pos_x, pos_y = self.compute_latent_shifts(batch)
- ckey = __conditioning_keys__[self.model.conditioning_key]
- c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
-
- else:
- c = None
- xc = None
- if self.use_positional_encodings:
- pos_x, pos_y = self.compute_latent_shifts(batch)
- c = {'pos_x': pos_x, 'pos_y': pos_y}
- out = [z, c]
- if return_first_stage_outputs:
- xrec = self.decode_first_stage(z)
- out.extend([x, xrec])
- if return_x:
- out.extend([x])
- if return_original_cond:
- out.append(xc)
- return out
-
- @torch.no_grad()
- def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
- if predict_cids:
- if z.dim() == 4:
- z = torch.argmax(z.exp(), dim=1).long()
- z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
- z = rearrange(z, 'b h w c -> b c h w').contiguous()
-
- z = 1. / self.scale_factor * z
- return self.first_stage_model.decode(z)
-
- @torch.no_grad()
- def encode_first_stage(self, x):
- return self.first_stage_model.encode(x)
-
- def shared_step(self, batch, **kwargs):
- x, c = self.get_input(batch, self.first_stage_key)
- loss = self(x, c)
- return loss
-
- def forward(self, x, c, *args, **kwargs):
- t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
- if self.model.conditioning_key is not None:
- assert c is not None
- if self.cond_stage_trainable:
- c = self.get_learned_conditioning(c)
- if self.shorten_cond_schedule: # TODO: drop this option
- tc = self.cond_ids[t].to(self.device)
- c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
- return self.p_losses(x, c, t, *args, **kwargs)
-
- def apply_model(self, x_noisy, t, cond, return_ids=False):
- if isinstance(cond, dict):
- # hybrid case, cond is expected to be a dict
- pass
- else:
- if not isinstance(cond, list):
- cond = [cond]
- key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
- cond = {key: cond}
-
- x_recon = self.model(x_noisy, t, **cond)
-
- if isinstance(x_recon, tuple) and not return_ids:
- return x_recon[0]
- else:
- return x_recon
-
- def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
- return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
-
- def _prior_bpd(self, x_start):
- """
- Get the prior KL term for the variational lower-bound, measured in
- bits-per-dim.
- This term can't be optimized, as it only depends on the encoder.
- :param x_start: the [N x C x ...] tensor of inputs.
- :return: a batch of [N] KL values (in bits), one per batch element.
- """
- batch_size = x_start.shape[0]
- t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
- qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
- kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
- return mean_flat(kl_prior) / np.log(2.0)
-
- def p_losses(self, x_start, cond, t, noise=None):
- noise = default(noise, lambda: torch.randn_like(x_start))
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
- model_output = self.apply_model(x_noisy, t, cond)
-
- loss_dict = {}
- prefix = 'train' if self.training else 'val'
-
- if self.parameterization == "x0":
- target = x_start
- elif self.parameterization == "eps":
- target = noise
- elif self.parameterization == "v":
- target = self.get_v(x_start, noise, t)
- else:
- raise NotImplementedError()
-
- loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
- loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
-
- logvar_t = self.logvar[t].to(self.device)
- loss = loss_simple / torch.exp(logvar_t) + logvar_t
- # loss = loss_simple / torch.exp(self.logvar) + self.logvar
- if self.learn_logvar:
- loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
- loss_dict.update({'logvar': self.logvar.data.mean()})
-
- loss = self.l_simple_weight * loss.mean()
-
- loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
- loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
- loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
- loss += (self.original_elbo_weight * loss_vlb)
- loss_dict.update({f'{prefix}/loss': loss})
-
- return loss, loss_dict
-
- def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
- return_x0=False, score_corrector=None, corrector_kwargs=None):
- t_in = t
- model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
-
- if score_corrector is not None:
- assert self.parameterization == "eps"
- model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
-
- if return_codebook_ids:
- model_out, logits = model_out
-
- if self.parameterization == "eps":
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
- elif self.parameterization == "x0":
- x_recon = model_out
- else:
- raise NotImplementedError()
-
- if clip_denoised:
- x_recon.clamp_(-1., 1.)
- if quantize_denoised:
- x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
- if return_codebook_ids:
- return model_mean, posterior_variance, posterior_log_variance, logits
- elif return_x0:
- return model_mean, posterior_variance, posterior_log_variance, x_recon
- else:
- return model_mean, posterior_variance, posterior_log_variance
-
- @torch.no_grad()
- def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
- return_codebook_ids=False, quantize_denoised=False, return_x0=False,
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
- b, *_, device = *x.shape, x.device
- outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
- return_codebook_ids=return_codebook_ids,
- quantize_denoised=quantize_denoised,
- return_x0=return_x0,
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
- if return_codebook_ids:
- raise DeprecationWarning("Support dropped.")
- model_mean, _, model_log_variance, logits = outputs
- elif return_x0:
- model_mean, _, model_log_variance, x0 = outputs
- else:
- model_mean, _, model_log_variance = outputs
-
- noise = noise_like(x.shape, device, repeat_noise) * temperature
- if noise_dropout > 0.:
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
- # no noise when t == 0
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
-
- if return_codebook_ids:
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
- if return_x0:
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
- else:
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
-
- @torch.no_grad()
- def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
- img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
- score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
- log_every_t=None):
- if not log_every_t:
- log_every_t = self.log_every_t
- timesteps = self.num_timesteps
- if batch_size is not None:
- b = batch_size if batch_size is not None else shape[0]
- shape = [batch_size] + list(shape)
- else:
- b = batch_size = shape[0]
- if x_T is None:
- img = torch.randn(shape, device=self.device)
- else:
- img = x_T
- intermediates = []
- if cond is not None:
- if isinstance(cond, dict):
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
- else:
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
-
- if start_T is not None:
- timesteps = min(timesteps, start_T)
- iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
- total=timesteps) if verbose else reversed(
- range(0, timesteps))
- if type(temperature) == float:
- temperature = [temperature] * timesteps
-
- for i in iterator:
- ts = torch.full((b,), i, device=self.device, dtype=torch.long)
- if self.shorten_cond_schedule:
- assert self.model.conditioning_key != 'hybrid'
- tc = self.cond_ids[ts].to(cond.device)
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
-
- img, x0_partial = self.p_sample(img, cond, ts,
- clip_denoised=self.clip_denoised,
- quantize_denoised=quantize_denoised, return_x0=True,
- temperature=temperature[i], noise_dropout=noise_dropout,
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
- if mask is not None:
- assert x0 is not None
- img_orig = self.q_sample(x0, ts)
- img = img_orig * mask + (1. - mask) * img
-
- if i % log_every_t == 0 or i == timesteps - 1:
- intermediates.append(x0_partial)
- if callback: callback(i)
- if img_callback: img_callback(img, i)
- return img, intermediates
-
- @torch.no_grad()
- def p_sample_loop(self, cond, shape, return_intermediates=False,
- x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
- mask=None, x0=None, img_callback=None, start_T=None,
- log_every_t=None):
-
- if not log_every_t:
- log_every_t = self.log_every_t
- device = self.betas.device
- b = shape[0]
- if x_T is None:
- img = torch.randn(shape, device=device)
- else:
- img = x_T
-
- intermediates = [img]
- if timesteps is None:
- timesteps = self.num_timesteps
-
- if start_T is not None:
- timesteps = min(timesteps, start_T)
- iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
- range(0, timesteps))
-
- if mask is not None:
- assert x0 is not None
- assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
-
- for i in iterator:
- ts = torch.full((b,), i, device=device, dtype=torch.long)
- if self.shorten_cond_schedule:
- assert self.model.conditioning_key != 'hybrid'
- tc = self.cond_ids[ts].to(cond.device)
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
-
- img = self.p_sample(img, cond, ts,
- clip_denoised=self.clip_denoised,
- quantize_denoised=quantize_denoised)
- if mask is not None:
- img_orig = self.q_sample(x0, ts)
- img = img_orig * mask + (1. - mask) * img
-
- if i % log_every_t == 0 or i == timesteps - 1:
- intermediates.append(img)
- if callback: callback(i)
- if img_callback: img_callback(img, i)
-
- if return_intermediates:
- return img, intermediates
- return img
-
- @torch.no_grad()
- def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
- verbose=True, timesteps=None, quantize_denoised=False,
- mask=None, x0=None, shape=None, **kwargs):
- if shape is None:
- shape = (batch_size, self.channels, self.image_size, self.image_size)
- if cond is not None:
- if isinstance(cond, dict):
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
- else:
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
- return self.p_sample_loop(cond,
- shape,
- return_intermediates=return_intermediates, x_T=x_T,
- verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
- mask=mask, x0=x0)
-
- @torch.no_grad()
- def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
- if ddim:
- ddim_sampler = DDIMSampler(self)
- shape = (self.channels, self.image_size, self.image_size)
- samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,
- shape, cond, verbose=False, **kwargs)
-
- else:
- samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
- return_intermediates=True, **kwargs)
-
- return samples, intermediates
-
- @torch.no_grad()
- def get_unconditional_conditioning(self, batch_size, null_label=None):
- if null_label is not None:
- xc = null_label
- if isinstance(xc, ListConfig):
- xc = list(xc)
- if isinstance(xc, dict) or isinstance(xc, list):
- c = self.get_learned_conditioning(xc)
- else:
- if hasattr(xc, "to"):
- xc = xc.to(self.device)
- c = self.get_learned_conditioning(xc)
- else:
- if self.cond_stage_key in ["class_label", "cls"]:
- xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)
- return self.get_learned_conditioning(xc)
- else:
- raise NotImplementedError("todo")
- if isinstance(c, list): # in case the encoder gives us a list
- for i in range(len(c)):
- c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)
- else:
- c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)
- return c
-
- @torch.no_grad()
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,
- quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
- plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
- use_ema_scope=True,
- **kwargs):
- ema_scope = self.ema_scope if use_ema_scope else nullcontext
- use_ddim = ddim_steps is not None
-
- log = dict()
- z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
- return_first_stage_outputs=True,
- force_c_encode=True,
- return_original_cond=True,
- bs=N)
- N = min(x.shape[0], N)
- n_row = min(x.shape[0], n_row)
- log["inputs"] = x
- log["reconstruction"] = xrec
- if self.model.conditioning_key is not None:
- if hasattr(self.cond_stage_model, "decode"):
- xc = self.cond_stage_model.decode(c)
- log["conditioning"] = xc
- elif self.cond_stage_key in ["caption", "txt"]:
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
- log["conditioning"] = xc
- elif self.cond_stage_key in ['class_label', "cls"]:
- try:
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
- log['conditioning'] = xc
- except KeyError:
- # probably no "human_label" in batch
- pass
- elif isimage(xc):
- log["conditioning"] = xc
- if ismap(xc):
- log["original_conditioning"] = self.to_rgb(xc)
-
- if plot_diffusion_rows:
- # get diffusion row
- diffusion_row = list()
- z_start = z[:n_row]
- for t in range(self.num_timesteps):
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
- t = t.to(self.device).long()
- noise = torch.randn_like(z_start)
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
- diffusion_row.append(self.decode_first_stage(z_noisy))
-
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
- log["diffusion_row"] = diffusion_grid
-
- if sample:
- # get denoise row
- with ema_scope("Sampling"):
- samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
- ddim_steps=ddim_steps, eta=ddim_eta)
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
- x_samples = self.decode_first_stage(samples)
- log["samples"] = x_samples
- if plot_denoise_rows:
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
- log["denoise_row"] = denoise_grid
-
- if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
- self.first_stage_model, IdentityFirstStage):
- # also display when quantizing x0 while sampling
- with ema_scope("Plotting Quantized Denoised"):
- samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
- ddim_steps=ddim_steps, eta=ddim_eta,
- quantize_denoised=True)
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
- # quantize_denoised=True)
- x_samples = self.decode_first_stage(samples.to(self.device))
- log["samples_x0_quantized"] = x_samples
-
- if unconditional_guidance_scale > 1.0:
- uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)
- if self.model.conditioning_key == "crossattn-adm":
- uc = {"c_crossattn": [uc], "c_adm": c["c_adm"]}
- with ema_scope("Sampling with classifier-free guidance"):
- samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
- ddim_steps=ddim_steps, eta=ddim_eta,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=uc,
- )
- x_samples_cfg = self.decode_first_stage(samples_cfg)
- log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
-
- if inpaint:
- # make a simple center square
- b, h, w = z.shape[0], z.shape[2], z.shape[3]
- mask = torch.ones(N, h, w).to(self.device)
- # zeros will be filled in
- mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
- mask = mask[:, None, ...]
- with ema_scope("Plotting Inpaint"):
- samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
- x_samples = self.decode_first_stage(samples.to(self.device))
- log["samples_inpainting"] = x_samples
- log["mask"] = mask
-
- # outpaint
- mask = 1. - mask
- with ema_scope("Plotting Outpaint"):
- samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
- x_samples = self.decode_first_stage(samples.to(self.device))
- log["samples_outpainting"] = x_samples
-
- if plot_progressive_rows:
- with ema_scope("Plotting Progressives"):
- img, progressives = self.progressive_denoising(c,
- shape=(self.channels, self.image_size, self.image_size),
- batch_size=N)
- prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
- log["progressive_row"] = prog_row
-
- if return_keys:
- if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
- return log
- else:
- return {key: log[key] for key in return_keys}
- return log
-
- def configure_optimizers(self):
- lr = self.learning_rate
- params = list(self.model.parameters())
- if self.cond_stage_trainable:
- print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
- params = params + list(self.cond_stage_model.parameters())
- if self.learn_logvar:
- print('Diffusion model optimizing logvar')
- params.append(self.logvar)
- opt = torch.optim.AdamW(params, lr=lr)
- if self.use_scheduler:
- assert 'target' in self.scheduler_config
- scheduler = instantiate_from_config(self.scheduler_config)
-
- print("Setting up LambdaLR scheduler...")
- scheduler = [
- {
- 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
- 'interval': 'step',
- 'frequency': 1
- }]
- return [opt], scheduler
- return opt
-
- @torch.no_grad()
- def to_rgb(self, x):
- x = x.float()
- if not hasattr(self, "colorize"):
- self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
- x = nn.functional.conv2d(x, weight=self.colorize)
- x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
- return x
-
-
-class DiffusionWrapper(pl.LightningModule):
- def __init__(self, diff_model_config, conditioning_key):
- super().__init__()
- self.sequential_cross_attn = diff_model_config.pop("sequential_crossattn", False)
- self.diffusion_model = instantiate_from_config(diff_model_config)
- self.conditioning_key = conditioning_key
- assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm', 'hybrid-adm', 'crossattn-adm']
-
- def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None):
- if self.conditioning_key is None:
- out = self.diffusion_model(x, t)
- elif self.conditioning_key == 'concat':
- xc = torch.cat([x] + c_concat, dim=1)
- out = self.diffusion_model(xc, t)
- elif self.conditioning_key == 'crossattn':
- if not self.sequential_cross_attn:
- cc = torch.cat(c_crossattn, 1)
- else:
- cc = c_crossattn
- out = self.diffusion_model(x, t, context=cc)
- elif self.conditioning_key == 'hybrid':
- xc = torch.cat([x] + c_concat, dim=1)
- cc = torch.cat(c_crossattn, 1)
- out = self.diffusion_model(xc, t, context=cc)
- elif self.conditioning_key == 'hybrid-adm':
- assert c_adm is not None
- xc = torch.cat([x] + c_concat, dim=1)
- cc = torch.cat(c_crossattn, 1)
- out = self.diffusion_model(xc, t, context=cc, y=c_adm)
- elif self.conditioning_key == 'crossattn-adm':
- assert c_adm is not None
- cc = torch.cat(c_crossattn, 1)
- out = self.diffusion_model(x, t, context=cc, y=c_adm)
- elif self.conditioning_key == 'adm':
- cc = c_crossattn[0]
- out = self.diffusion_model(x, t, y=cc)
- else:
- raise NotImplementedError()
-
- return out
-
-
-class LatentUpscaleDiffusion(LatentDiffusion):
- def __init__(self, *args, low_scale_config, low_scale_key="LR", noise_level_key=None, **kwargs):
- super().__init__(*args, **kwargs)
- # assumes that neither the cond_stage nor the low_scale_model contain trainable params
- assert not self.cond_stage_trainable
- self.instantiate_low_stage(low_scale_config)
- self.low_scale_key = low_scale_key
- self.noise_level_key = noise_level_key
-
- def instantiate_low_stage(self, config):
- model = instantiate_from_config(config)
- self.low_scale_model = model.eval()
- self.low_scale_model.train = disabled_train
- for param in self.low_scale_model.parameters():
- param.requires_grad = False
-
- @torch.no_grad()
- def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False):
- if not log_mode:
- z, c = super().get_input(batch, k, force_c_encode=True, bs=bs)
- else:
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
- force_c_encode=True, return_original_cond=True, bs=bs)
- x_low = batch[self.low_scale_key][:bs]
- x_low = rearrange(x_low, 'b h w c -> b c h w')
- x_low = x_low.to(memory_format=torch.contiguous_format).float()
- zx, noise_level = self.low_scale_model(x_low)
- if self.noise_level_key is not None:
- # get noise level from batch instead, e.g. when extracting a custom noise level for bsr
- raise NotImplementedError('TODO')
-
- all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level}
- if log_mode:
- # TODO: maybe disable if too expensive
- x_low_rec = self.low_scale_model.decode(zx)
- return z, all_conds, x, xrec, xc, x_low, x_low_rec, noise_level
- return z, all_conds
-
- @torch.no_grad()
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
- plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True,
- unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True,
- **kwargs):
- ema_scope = self.ema_scope if use_ema_scope else nullcontext
- use_ddim = ddim_steps is not None
-
- log = dict()
- z, c, x, xrec, xc, x_low, x_low_rec, noise_level = self.get_input(batch, self.first_stage_key, bs=N,
- log_mode=True)
- N = min(x.shape[0], N)
- n_row = min(x.shape[0], n_row)
- log["inputs"] = x
- log["reconstruction"] = xrec
- log["x_lr"] = x_low
- log[f"x_lr_rec_@noise_levels{'-'.join(map(lambda x: str(x), list(noise_level.cpu().numpy())))}"] = x_low_rec
- if self.model.conditioning_key is not None:
- if hasattr(self.cond_stage_model, "decode"):
- xc = self.cond_stage_model.decode(c)
- log["conditioning"] = xc
- elif self.cond_stage_key in ["caption", "txt"]:
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
- log["conditioning"] = xc
- elif self.cond_stage_key in ['class_label', 'cls']:
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
- log['conditioning'] = xc
- elif isimage(xc):
- log["conditioning"] = xc
- if ismap(xc):
- log["original_conditioning"] = self.to_rgb(xc)
-
- if plot_diffusion_rows:
- # get diffusion row
- diffusion_row = list()
- z_start = z[:n_row]
- for t in range(self.num_timesteps):
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
- t = t.to(self.device).long()
- noise = torch.randn_like(z_start)
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
- diffusion_row.append(self.decode_first_stage(z_noisy))
-
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
- log["diffusion_row"] = diffusion_grid
-
- if sample:
- # get denoise row
- with ema_scope("Sampling"):
- samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
- ddim_steps=ddim_steps, eta=ddim_eta)
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
- x_samples = self.decode_first_stage(samples)
- log["samples"] = x_samples
- if plot_denoise_rows:
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
- log["denoise_row"] = denoise_grid
-
- if unconditional_guidance_scale > 1.0:
- uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label)
- # TODO explore better "unconditional" choices for the other keys
- # maybe guide away from empty text label and highest noise level and maximally degraded zx?
- uc = dict()
- for k in c:
- if k == "c_crossattn":
- assert isinstance(c[k], list) and len(c[k]) == 1
- uc[k] = [uc_tmp]
- elif k == "c_adm": # todo: only run with text-based guidance?
- assert isinstance(c[k], torch.Tensor)
- #uc[k] = torch.ones_like(c[k]) * self.low_scale_model.max_noise_level
- uc[k] = c[k]
- elif isinstance(c[k], list):
- uc[k] = [c[k][i] for i in range(len(c[k]))]
- else:
- uc[k] = c[k]
-
- with ema_scope("Sampling with classifier-free guidance"):
- samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
- ddim_steps=ddim_steps, eta=ddim_eta,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=uc,
- )
- x_samples_cfg = self.decode_first_stage(samples_cfg)
- log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
-
- if plot_progressive_rows:
- with ema_scope("Plotting Progressives"):
- img, progressives = self.progressive_denoising(c,
- shape=(self.channels, self.image_size, self.image_size),
- batch_size=N)
- prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
- log["progressive_row"] = prog_row
-
- return log
-
-
-class LatentFinetuneDiffusion(LatentDiffusion):
- """
- Basis for different finetunas, such as inpainting or depth2image
- To disable finetuning mode, set finetune_keys to None
- """
-
- def __init__(self,
- concat_keys: tuple,
- finetune_keys=("model.diffusion_model.input_blocks.0.0.weight",
- "model_ema.diffusion_modelinput_blocks00weight"
- ),
- keep_finetune_dims=4,
- # if model was trained without concat mode before and we would like to keep these channels
- c_concat_log_start=None, # to log reconstruction of c_concat codes
- c_concat_log_end=None,
- *args, **kwargs
- ):
- ckpt_path = kwargs.pop("ckpt_path", None)
- ignore_keys = kwargs.pop("ignore_keys", list())
- super().__init__(*args, **kwargs)
- self.finetune_keys = finetune_keys
- self.concat_keys = concat_keys
- self.keep_dims = keep_finetune_dims
- self.c_concat_log_start = c_concat_log_start
- self.c_concat_log_end = c_concat_log_end
- if exists(self.finetune_keys): assert exists(ckpt_path), 'can only finetune from a given checkpoint'
- if exists(ckpt_path):
- self.init_from_ckpt(ckpt_path, ignore_keys)
-
- def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
- sd = torch.load(path, map_location="cpu")
- if "state_dict" in list(sd.keys()):
- sd = sd["state_dict"]
- keys = list(sd.keys())
- for k in keys:
- for ik in ignore_keys:
- if k.startswith(ik):
- print("Deleting key {} from state_dict.".format(k))
- del sd[k]
-
- # make it explicit, finetune by including extra input channels
- if exists(self.finetune_keys) and k in self.finetune_keys:
- new_entry = None
- for name, param in self.named_parameters():
- if name in self.finetune_keys:
- print(
- f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only")
- new_entry = torch.zeros_like(param) # zero init
- assert exists(new_entry), 'did not find matching parameter to modify'
- new_entry[:, :self.keep_dims, ...] = sd[k]
- sd[k] = new_entry
-
- missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
- sd, strict=False)
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
- if len(missing) > 0:
- print(f"Missing Keys: {missing}")
- if len(unexpected) > 0:
- print(f"Unexpected Keys: {unexpected}")
-
- @torch.no_grad()
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
- quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
- plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
- use_ema_scope=True,
- **kwargs):
- ema_scope = self.ema_scope if use_ema_scope else nullcontext
- use_ddim = ddim_steps is not None
-
- log = dict()
- z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True)
- c_cat, c = c["c_concat"][0], c["c_crossattn"][0]
- N = min(x.shape[0], N)
- n_row = min(x.shape[0], n_row)
- log["inputs"] = x
- log["reconstruction"] = xrec
- if self.model.conditioning_key is not None:
- if hasattr(self.cond_stage_model, "decode"):
- xc = self.cond_stage_model.decode(c)
- log["conditioning"] = xc
- elif self.cond_stage_key in ["caption", "txt"]:
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
- log["conditioning"] = xc
- elif self.cond_stage_key in ['class_label', 'cls']:
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
- log['conditioning'] = xc
- elif isimage(xc):
- log["conditioning"] = xc
- if ismap(xc):
- log["original_conditioning"] = self.to_rgb(xc)
-
- if not (self.c_concat_log_start is None and self.c_concat_log_end is None):
- log["c_concat_decoded"] = self.decode_first_stage(c_cat[:, self.c_concat_log_start:self.c_concat_log_end])
-
- if plot_diffusion_rows:
- # get diffusion row
- diffusion_row = list()
- z_start = z[:n_row]
- for t in range(self.num_timesteps):
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
- t = t.to(self.device).long()
- noise = torch.randn_like(z_start)
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
- diffusion_row.append(self.decode_first_stage(z_noisy))
-
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
- log["diffusion_row"] = diffusion_grid
-
- if sample:
- # get denoise row
- with ema_scope("Sampling"):
- samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
- batch_size=N, ddim=use_ddim,
- ddim_steps=ddim_steps, eta=ddim_eta)
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
- x_samples = self.decode_first_stage(samples)
- log["samples"] = x_samples
- if plot_denoise_rows:
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
- log["denoise_row"] = denoise_grid
-
- if unconditional_guidance_scale > 1.0:
- uc_cross = self.get_unconditional_conditioning(N, unconditional_guidance_label)
- uc_cat = c_cat
- uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]}
- with ema_scope("Sampling with classifier-free guidance"):
- samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
- batch_size=N, ddim=use_ddim,
- ddim_steps=ddim_steps, eta=ddim_eta,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=uc_full,
- )
- x_samples_cfg = self.decode_first_stage(samples_cfg)
- log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
-
- return log
-
-
-class LatentInpaintDiffusion(LatentFinetuneDiffusion):
- """
- can either run as pure inpainting model (only concat mode) or with mixed conditionings,
- e.g. mask as concat and text via cross-attn.
- To disable finetuning mode, set finetune_keys to None
- """
-
- def __init__(self,
- concat_keys=("mask", "masked_image"),
- masked_image_key="masked_image",
- *args, **kwargs
- ):
- super().__init__(concat_keys, *args, **kwargs)
- self.masked_image_key = masked_image_key
- assert self.masked_image_key in concat_keys
-
- @torch.no_grad()
- def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
- # note: restricted to non-trainable encoders currently
- assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for inpainting'
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
- force_c_encode=True, return_original_cond=True, bs=bs)
-
- assert exists(self.concat_keys)
- c_cat = list()
- for ck in self.concat_keys:
- cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
- if bs is not None:
- cc = cc[:bs]
- cc = cc.to(self.device)
- bchw = z.shape
- if ck != self.masked_image_key:
- cc = torch.nn.functional.interpolate(cc, size=bchw[-2:])
- else:
- cc = self.get_first_stage_encoding(self.encode_first_stage(cc))
- c_cat.append(cc)
- c_cat = torch.cat(c_cat, dim=1)
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
- if return_first_stage_outputs:
- return z, all_conds, x, xrec, xc
- return z, all_conds
-
- @torch.no_grad()
- def log_images(self, *args, **kwargs):
- log = super(LatentInpaintDiffusion, self).log_images(*args, **kwargs)
- log["masked_image"] = rearrange(args[0]["masked_image"],
- 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
- return log
-
-
-class LatentDepth2ImageDiffusion(LatentFinetuneDiffusion):
- """
- condition on monocular depth estimation
- """
-
- def __init__(self, depth_stage_config, concat_keys=("midas_in",), *args, **kwargs):
- super().__init__(concat_keys=concat_keys, *args, **kwargs)
- self.depth_model = instantiate_from_config(depth_stage_config)
- self.depth_stage_key = concat_keys[0]
-
- @torch.no_grad()
- def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
- # note: restricted to non-trainable encoders currently
- assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for depth2img'
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
- force_c_encode=True, return_original_cond=True, bs=bs)
-
- assert exists(self.concat_keys)
- assert len(self.concat_keys) == 1
- c_cat = list()
- for ck in self.concat_keys:
- cc = batch[ck]
- if bs is not None:
- cc = cc[:bs]
- cc = cc.to(self.device)
- cc = self.depth_model(cc)
- cc = torch.nn.functional.interpolate(
- cc,
- size=z.shape[2:],
- mode="bicubic",
- align_corners=False,
- )
-
- depth_min, depth_max = torch.amin(cc, dim=[1, 2, 3], keepdim=True), torch.amax(cc, dim=[1, 2, 3],
- keepdim=True)
- cc = 2. * (cc - depth_min) / (depth_max - depth_min + 0.001) - 1.
- c_cat.append(cc)
- c_cat = torch.cat(c_cat, dim=1)
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
- if return_first_stage_outputs:
- return z, all_conds, x, xrec, xc
- return z, all_conds
-
- @torch.no_grad()
- def log_images(self, *args, **kwargs):
- log = super().log_images(*args, **kwargs)
- depth = self.depth_model(args[0][self.depth_stage_key])
- depth_min, depth_max = torch.amin(depth, dim=[1, 2, 3], keepdim=True), \
- torch.amax(depth, dim=[1, 2, 3], keepdim=True)
- log["depth"] = 2. * (depth - depth_min) / (depth_max - depth_min) - 1.
- return log
-
-
-class LatentUpscaleFinetuneDiffusion(LatentFinetuneDiffusion):
- """
- condition on low-res image (and optionally on some spatial noise augmentation)
- """
- def __init__(self, concat_keys=("lr",), reshuffle_patch_size=None,
- low_scale_config=None, low_scale_key=None, *args, **kwargs):
- super().__init__(concat_keys=concat_keys, *args, **kwargs)
- self.reshuffle_patch_size = reshuffle_patch_size
- self.low_scale_model = None
- if low_scale_config is not None:
- print("Initializing a low-scale model")
- assert exists(low_scale_key)
- self.instantiate_low_stage(low_scale_config)
- self.low_scale_key = low_scale_key
-
- def instantiate_low_stage(self, config):
- model = instantiate_from_config(config)
- self.low_scale_model = model.eval()
- self.low_scale_model.train = disabled_train
- for param in self.low_scale_model.parameters():
- param.requires_grad = False
-
- @torch.no_grad()
- def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
- # note: restricted to non-trainable encoders currently
- assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for upscaling-ft'
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
- force_c_encode=True, return_original_cond=True, bs=bs)
-
- assert exists(self.concat_keys)
- assert len(self.concat_keys) == 1
- # optionally make spatial noise_level here
- c_cat = list()
- noise_level = None
- for ck in self.concat_keys:
- cc = batch[ck]
- cc = rearrange(cc, 'b h w c -> b c h w')
- if exists(self.reshuffle_patch_size):
- assert isinstance(self.reshuffle_patch_size, int)
- cc = rearrange(cc, 'b c (p1 h) (p2 w) -> b (p1 p2 c) h w',
- p1=self.reshuffle_patch_size, p2=self.reshuffle_patch_size)
- if bs is not None:
- cc = cc[:bs]
- cc = cc.to(self.device)
- if exists(self.low_scale_model) and ck == self.low_scale_key:
- cc, noise_level = self.low_scale_model(cc)
- c_cat.append(cc)
- c_cat = torch.cat(c_cat, dim=1)
- if exists(noise_level):
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c], "c_adm": noise_level}
- else:
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
- if return_first_stage_outputs:
- return z, all_conds, x, xrec, xc
- return z, all_conds
-
- @torch.no_grad()
- def log_images(self, *args, **kwargs):
- log = super().log_images(*args, **kwargs)
- log["lr"] = rearrange(args[0]["lr"], 'b h w c -> b c h w')
- return log
diff --git a/spaces/Theivaprakasham/yolov6/tools/quantization/tensorrt/post_training/Calibrator.py b/spaces/Theivaprakasham/yolov6/tools/quantization/tensorrt/post_training/Calibrator.py
deleted file mode 100644
index e73e4187ba19f3e27496171ae00296ad8dc0dc79..0000000000000000000000000000000000000000
--- a/spaces/Theivaprakasham/yolov6/tools/quantization/tensorrt/post_training/Calibrator.py
+++ /dev/null
@@ -1,210 +0,0 @@
-#
-# Modified by Meituan
-# 2022.6.24
-#
-
-# Copyright 2019 NVIDIA Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import sys
-import glob
-import random
-import logging
-import cv2
-
-import numpy as np
-from PIL import Image
-import tensorrt as trt
-import pycuda.driver as cuda
-import pycuda.autoinit
-
-logging.basicConfig(level=logging.DEBUG,
- format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
- datefmt="%Y-%m-%d %H:%M:%S")
-logger = logging.getLogger(__name__)
-
-def preprocess_yolov6(image, channels=3, height=224, width=224):
- """Pre-processing for YOLOv6-based Object Detection Models
-
- Parameters
- ----------
- image: PIL.Image
- The image resulting from PIL.Image.open(filename) to preprocess
- channels: int
- The number of channels the image has (Usually 1 or 3)
- height: int
- The desired height of the image (usually 640)
- width: int
- The desired width of the image (usually 640)
-
- Returns
- -------
- img_data: numpy array
- The preprocessed image data in the form of a numpy array
-
- """
- # Get the image in CHW format
- resized_image = image.resize((width, height), Image.BILINEAR)
- img_data = np.asarray(resized_image).astype(np.float32)
-
- if len(img_data.shape) == 2:
- # For images without a channel dimension, we stack
- img_data = np.stack([img_data] * 3)
- logger.debug("Received grayscale image. Reshaped to {:}".format(img_data.shape))
- else:
- img_data = img_data.transpose([2, 0, 1])
-
- mean_vec = np.array([0.0, 0.0, 0.0])
- stddev_vec = np.array([1.0, 1.0, 1.0])
- assert img_data.shape[0] == channels
-
- for i in range(img_data.shape[0]):
- # Scale each pixel to [0, 1] and normalize per channel.
- img_data[i, :, :] = (img_data[i, :, :] / 255.0 - mean_vec[i]) / stddev_vec[i]
-
- return img_data
-
-def get_int8_calibrator(calib_cache, calib_data, max_calib_size, calib_batch_size):
- # Use calibration cache if it exists
- if os.path.exists(calib_cache):
- logger.info("Skipping calibration files, using calibration cache: {:}".format(calib_cache))
- calib_files = []
- # Use calibration files from validation dataset if no cache exists
- else:
- if not calib_data:
- raise ValueError("ERROR: Int8 mode requested, but no calibration data provided. Please provide --calibration-data /path/to/calibration/files")
-
- calib_files = get_calibration_files(calib_data, max_calib_size)
-
- # Choose pre-processing function for INT8 calibration
- preprocess_func = preprocess_yolov6
-
- int8_calibrator = ImageCalibrator(calibration_files=calib_files,
- batch_size=calib_batch_size,
- cache_file=calib_cache)
- return int8_calibrator
-
-
-def get_calibration_files(calibration_data, max_calibration_size=None, allowed_extensions=(".jpeg", ".jpg", ".png")):
- """Returns a list of all filenames ending with `allowed_extensions` found in the `calibration_data` directory.
-
- Parameters
- ----------
- calibration_data: str
- Path to directory containing desired files.
- max_calibration_size: int
- Max number of files to use for calibration. If calibration_data contains more than this number,
- a random sample of size max_calibration_size will be returned instead. If None, all samples will be used.
-
- Returns
- -------
- calibration_files: List[str]
- List of filenames contained in the `calibration_data` directory ending with `allowed_extensions`.
- """
-
- logger.info("Collecting calibration files from: {:}".format(calibration_data))
- calibration_files = [path for path in glob.iglob(os.path.join(calibration_data, "**"), recursive=True)
- if os.path.isfile(path) and path.lower().endswith(allowed_extensions)]
- logger.info("Number of Calibration Files found: {:}".format(len(calibration_files)))
-
- if len(calibration_files) == 0:
- raise Exception("ERROR: Calibration data path [{:}] contains no files!".format(calibration_data))
-
- if max_calibration_size:
- if len(calibration_files) > max_calibration_size:
- logger.warning("Capping number of calibration images to max_calibration_size: {:}".format(max_calibration_size))
- random.seed(42) # Set seed for reproducibility
- calibration_files = random.sample(calibration_files, max_calibration_size)
-
- return calibration_files
-
-
-# https://docs.nvidia.com/deeplearning/sdk/tensorrt-api/python_api/infer/Int8/EntropyCalibrator2.html
-class ImageCalibrator(trt.IInt8EntropyCalibrator2):
- """INT8 Calibrator Class for Imagenet-based Image Classification Models.
-
- Parameters
- ----------
- calibration_files: List[str]
- List of image filenames to use for INT8 Calibration
- batch_size: int
- Number of images to pass through in one batch during calibration
- input_shape: Tuple[int]
- Tuple of integers defining the shape of input to the model (Default: (3, 224, 224))
- cache_file: str
- Name of file to read/write calibration cache from/to.
- preprocess_func: function -> numpy.ndarray
- Pre-processing function to run on calibration data. This should match the pre-processing
- done at inference time. In general, this function should return a numpy array of
- shape `input_shape`.
- """
-
- def __init__(self, calibration_files=[], batch_size=32, input_shape=(3, 224, 224),
- cache_file="calibration.cache", use_cv2=False):
- super().__init__()
- self.input_shape = input_shape
- self.cache_file = cache_file
- self.batch_size = batch_size
- self.batch = np.zeros((self.batch_size, *self.input_shape), dtype=np.float32)
- self.device_input = cuda.mem_alloc(self.batch.nbytes)
-
- self.files = calibration_files
- self.use_cv2 = use_cv2
- # Pad the list so it is a multiple of batch_size
- if len(self.files) % self.batch_size != 0:
- logger.info("Padding # calibration files to be a multiple of batch_size {:}".format(self.batch_size))
- self.files += calibration_files[(len(calibration_files) % self.batch_size):self.batch_size]
-
- self.batches = self.load_batches()
- self.preprocess_func = preprocess_yolov6
-
- def load_batches(self):
- # Populates a persistent self.batch buffer with images.
- for index in range(0, len(self.files), self.batch_size):
- for offset in range(self.batch_size):
- if self.use_cv2:
- image = cv2.imread(self.files[index + offset])
- else:
- image = Image.open(self.files[index + offset])
- self.batch[offset] = self.preprocess_func(image, *self.input_shape)
- logger.info("Calibration images pre-processed: {:}/{:}".format(index+self.batch_size, len(self.files)))
- yield self.batch
-
- def get_batch_size(self):
- return self.batch_size
-
- def get_batch(self, names):
- try:
- # Assume self.batches is a generator that provides batch data.
- batch = next(self.batches)
- # Assume that self.device_input is a device buffer allocated by the constructor.
- cuda.memcpy_htod(self.device_input, batch)
- return [int(self.device_input)]
- except StopIteration:
- # When we're out of batches, we return either [] or None.
- # This signals to TensorRT that there is no calibration data remaining.
- return None
-
- def read_calibration_cache(self):
- # If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
- if os.path.exists(self.cache_file):
- with open(self.cache_file, "rb") as f:
- logger.info("Using calibration cache to save time: {:}".format(self.cache_file))
- return f.read()
-
- def write_calibration_cache(self, cache):
- with open(self.cache_file, "wb") as f:
- logger.info("Caching calibration data for future use: {:}".format(self.cache_file))
- f.write(cache)
diff --git a/spaces/ThomasSimonini/SB3_Atari/README.md b/spaces/ThomasSimonini/SB3_Atari/README.md
deleted file mode 100644
index e00a87c1675f7dd27e1d8b5fda9b56cca7692ba7..0000000000000000000000000000000000000000
--- a/spaces/ThomasSimonini/SB3_Atari/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: SB3 Atari
-emoji: 🏢
-colorFrom: red
-colorTo: purple
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/Tj/starcoder-playground/README.md b/spaces/Tj/starcoder-playground/README.md
deleted file mode 100644
index bc6646d1929597847c89d473505d711e37181177..0000000000000000000000000000000000000000
--- a/spaces/Tj/starcoder-playground/README.md
+++ /dev/null
@@ -1,78 +0,0 @@
----
-title: StarCoder Demo
-emoji: 💫
-colorFrom: gray
-colorTo: blue
-sdk: gradio
-sdk_version: 3.28.3
-app_file: app.py
-pinned: true
-duplicated_from: Fisharp/starcoder-playground
----
-
-
-# ⭐StarCoder Demo💫
-
-## Code-Completion Playground 💻 with ⭐StarCoder Models
-
-This is a demo playground to generate code with the power of ⭐[StarCoder](https://huggingface.co/bigcode/starcoder) a **15B** parameter model for code generation in **80+** programming languages.
-
-ℹ️ This is not an instruction model but just a code completion tool.
-
-🗣️For instruction and chatting you can chat with a prompted version of the model directly at the [HuggingFace🤗Chat💬(hf.co/chat)](https://huggingface.co/chat/?model=starcoder)
-
----
-
-**Intended Use**: this app and its [supporting model](https://huggingface.co/bigcode/starcoder) are provided for demonstration purposes only; not to serve as a replacement for human expertise. For more details on the model's limitations in terms of factuality and biases, please refer to the source [model card](hf.co/bigcode)
-
-⚠️ Any use or sharing of this demo constitutes your acceptance of the BigCode [OpenRAIL-M](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) License Agreement and the use restrictions included within.
-
----
-
-## Model Formats
-
-The model is pretrained on code and is formatted with special tokens in addition to the pure code data,\
-such as prefixes specifying the source of the file or tokens separating code from a commit message.\
-Use these templates to explore the model's capacities:
-
-### 1. Prefixes 🏷️
-
-For pure code files, use any combination of the following prefixes:
-
-```xml
-REPONAMEFILENAMESTARS\ncode<|endoftext|>
-```
-
-STARS can be one of: 0, 1-10, 10-100, 100-1000, 1000+
-
-### 2. Commits 💾
-
-The commits data is formatted as follows:
-
-```xml
-codetextcode<|endoftext|>
-```
-
-### 3. Jupyter Notebooks 📓
-
-The model is trained on Jupyter notebooks as Python scripts and structured formats like:
-
-```xml
-textcodeoutput
-```
-
-### 4. Issues 🐛
-
-We also trained on GitHub issues using the following formatting:
-
-```xml
-text...
-```
-
-### 5. Fill-in-the-middle 🧩
-
-Fill in the middle requires rearranging the model inputs. The playground handles this for you - all you need is to specify where to fill:
-
-```xml
-code beforecode after
-```
diff --git a/spaces/Viswa934746/AIBALA/README.md b/spaces/Viswa934746/AIBALA/README.md
deleted file mode 100644
index cfe252659d0906d4002fe2c517462920a17fb543..0000000000000000000000000000000000000000
--- a/spaces/Viswa934746/AIBALA/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: AIBALA
-emoji: 👁
-colorFrom: green
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
-duplicated_from: Bala2-03-2003/AIBALA
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Vrk/SkimLit/README.md b/spaces/Vrk/SkimLit/README.md
deleted file mode 100644
index d6ef3a29c1bf59f858a03317a1ed1b6bad954398..0000000000000000000000000000000000000000
--- a/spaces/Vrk/SkimLit/README.md
+++ /dev/null
@@ -1,46 +0,0 @@
----
-title: SkimLit
-emoji: 💩
-colorFrom: indigo
-colorTo: green
-sdk: streamlit
-sdk_version: 1.10.0
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio`, `streamlit`, or `static`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
-Path is relative to the root of the repository.
-
-`models`: _List[string]_
-HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space.
-Will be parsed automatically from your code if not specified here.
-
-`datasets`: _List[string]_
-HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space.
-Will be parsed automatically from your code if not specified here.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/Wander1ngW1nd/EdControl/README.md b/spaces/Wander1ngW1nd/EdControl/README.md
deleted file mode 100644
index d668d87b9cc83ccf136c6055c1327602d1612579..0000000000000000000000000000000000000000
--- a/spaces/Wander1ngW1nd/EdControl/README.md
+++ /dev/null
@@ -1,147 +0,0 @@
----
-title: EdControl
-emoji: 👨🏼🎓
-colorFrom: blue
-colorTo: green
-sdk: streamlit
-sdk_version: 1.25.0
-app_file: app.py
-pinned: false
----
-
-# EdControl
-
-Automated analytics service that tracks students' emotional responses during online classes.
-
-## Problem
-На сегодняшний день, существует большое количество платформ для онлайн-обучения. Существуют как крупные онлайн-школы, так и более малые по количеству учащихся. И из-за большого потока клиентов необходимо набирать преподавателей для проведения занятий. И менеджеры не всегда могут понять на сколько преподаватель компетентен в области soft skills при подаче материала. Следовательно, необходимо вовремя определять агрессивных и не умеющих контролировать свои эмоции преподавателей, для сохранения репутации компании.
-
-Today, there are a large number of platforms for online learning. There are both large online schools and smaller ones in terms of the number of students. And because of the large flow of clients, it is necessary to recruit teachers to conduct classes. And managers can not always understand how much the teacher is competent in the field of soft skills when submitting material. Therefore, it is necessary to identify aggressive and unable to control their emotions teachers in time to preserve the reputation of the company.
-
-## Solving the problem
-Наш продукт предлагает решение данной проблемы оценки образования, помогая онлайн платформам экономить время менеджеров при ручном просмотре видео-уроков преподавателей и повышать бизнес метрики компании, выявляя на ранней стадии не компетентных преподавателей. Проблема решается путем распознавания негативных эмоций клиента во время онлайн-урока с преподавателем. Вы загружаете запись видео-урока в наш сервис и получаете dashboard с информацией и аналитикой по всему уроку. Также на данном dash-board, при выявлении каких-либо негативных ситуаций, можно увидеть конкретные timestamp, когда была замечена эмоция и на сколько она велика.
-
-Our product offers a solution to this problem of education assessment, helping online platforms save managers' time when manually viewing teachers' video lessons and improve the company's business metrics by identifying non-competent teachers at an early stage. The problem is solved by recognizing the client's negative emotions during an online lesson with a teacher. You upload a video lesson recording to our service and get a dashboard with information and analytics throughout the lesson. Also on this dashboard, when identifying any negative situations, you can see the specific timestamp when the emotion was noticed and how big it is.
-
-## Implementation
-На данный момент реализованы:
-- Эмоциональное оценка состояния человека
-- Аналитика и визуализация данных для удобного анализа видео-урока
-- Рекомендации преподавателю для проведения последующих уроков, при обнаружении каких-либо проблем
-
-Модель с помощью CV определяет эмоцию, которую испытывает человек в данный момент времени и отображает глубину эмоции в шкале от 0 до 100:
-
-Currently implemented:
-- Emotional assessment of a person's condition
-- Data analytics and visualization for convenient video lesson analysis
-- Recommendations to the teacher for subsequent lessons, if any problems are found
-
-The CV model determines the emotion that a person is experiencing at a given time and displays the depth of emotion on a scale from 0 to 100:
-
-
-
-
-
-
-## The appearance of the service
-Внешний вид сервиса интуитивно понятен для пользователей и его главная страница выглядит так:
-
-The appearance of the service is intuitive for users and its main page looks like this:
-

-
-После успешной обработки загруженного видео вы можете получить аналитику и рекомендации:
-
-After successfully processing the uploaded video, you can get analytics and recommendations:
-
-
-## Installation
-
-1\. Install [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) and [docker engine](https://docs.docker.com/engine/install/)
-
-2\. Clone the project:
-
-```bash
-git clone https://github.com/Wander1ngW1nd/EdControl
-```
-
-3\. Build an image:
-
-```bash
-docker build -t edcontrol_image EdControl
-```
-
-4\. Run application container:
-
-```bash
-docker run --name edcontrol_app -dp 8501 edcontrol_image
-```
-
-5\. Figure out which port was assigned to your application:
-
-```bash
-docker port edcontrol_app
-```
- You will see the similar output:
-
-```
-8501/tcp -> 0.0.0.0:
-```
-
-6\. Go to:
-```
-http://0.0.0.0:
-```
-
-Now you can use the app!
-
-
-## Development
-
-### Dependencies Management
-
-Project’s dependencies are managed by [poetry](https://python-poetry.org/). So, all the dependencies and configuration parameters are listed in [pyproject.toml](pyproject.toml).
-
-To install the dependencies, follow these steps:
-
-1\. Install [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) and [poetry](https://python-poetry.org/docs/#installation)
-
-2\. Clone the project and go to the corresponding directory:
-
-```bash
-git clone https://github.com/Wander1ngW1nd/EdControl
-cd EdControl
-```
-
-3\. (Optional) If your python version does not match the requirements specified in [pyproject.toml](pyproject.toml), [install one of the matching versions](https://realpython.com/installing-python)
-
-4\. Create virtual environment and activate it
-
-```bash
-poetry shell
-```
-
-5\. Install dependencies
-
-```bash
-poetry lock --no-update
-poetry install
-```
-
-## Road Map
-На данный момент продукт находится в рабочем состоянии и готов к использованию. Наша команда EdControl видит перспективы и дальнейший путь развития продукта, добавление новых функций и расширение целевой аудитории.
-
-- Вырезание окна с обучающимся
-- Добавление распознавания речи (текст) и интонации (аудио) для повышения точности определения эмоционального состояния
-- Добавление распознования бан слов и жестов
-- Добавление функции идентификации по лицу
-- Добавление возможности распознования эмоционального состояния в групповых звонках и конференциях
-- Интеграция в LMS системы различных платформ
-
-At the moment, the product is in working condition and ready for use. Our EdControl team sees prospects and the further path of product development, the addition of new features and the expansion of the target audience.
-
-- Cutting out a window with students
-- Adding speech recognition (text) and intonation (audio) to improve the accuracy of determining the emotional state
-- Adding recognition of ban words and gestures
-- Adding face identification function
-- Adding the ability to recognize the emotional state in group calls and conferences
-- Integration into LMS systems of various platforms
\ No newline at end of file
diff --git a/spaces/XzJosh/Jianmo-Bert-VITS2/transforms.py b/spaces/XzJosh/Jianmo-Bert-VITS2/transforms.py
deleted file mode 100644
index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/Jianmo-Bert-VITS2/transforms.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import torch
-from torch.nn import functional as F
-
-import numpy as np
-
-
-DEFAULT_MIN_BIN_WIDTH = 1e-3
-DEFAULT_MIN_BIN_HEIGHT = 1e-3
-DEFAULT_MIN_DERIVATIVE = 1e-3
-
-
-def piecewise_rational_quadratic_transform(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails=None,
- tail_bound=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
-
- if tails is None:
- spline_fn = rational_quadratic_spline
- spline_kwargs = {}
- else:
- spline_fn = unconstrained_rational_quadratic_spline
- spline_kwargs = {
- 'tails': tails,
- 'tail_bound': tail_bound
- }
-
- outputs, logabsdet = spline_fn(
- inputs=inputs,
- unnormalized_widths=unnormalized_widths,
- unnormalized_heights=unnormalized_heights,
- unnormalized_derivatives=unnormalized_derivatives,
- inverse=inverse,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- **spline_kwargs
- )
- return outputs, logabsdet
-
-
-def searchsorted(bin_locations, inputs, eps=1e-6):
- bin_locations[..., -1] += eps
- return torch.sum(
- inputs[..., None] >= bin_locations,
- dim=-1
- ) - 1
-
-
-def unconstrained_rational_quadratic_spline(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails='linear',
- tail_bound=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
- outside_interval_mask = ~inside_interval_mask
-
- outputs = torch.zeros_like(inputs)
- logabsdet = torch.zeros_like(inputs)
-
- if tails == 'linear':
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
- constant = np.log(np.exp(1 - min_derivative) - 1)
- unnormalized_derivatives[..., 0] = constant
- unnormalized_derivatives[..., -1] = constant
-
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
- logabsdet[outside_interval_mask] = 0
- else:
- raise RuntimeError('{} tails are not implemented.'.format(tails))
-
- outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
- inputs=inputs[inside_interval_mask],
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
- inverse=inverse,
- left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative
- )
-
- return outputs, logabsdet
-
-def rational_quadratic_spline(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- left=0., right=1., bottom=0., top=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
- if torch.min(inputs) < left or torch.max(inputs) > right:
- raise ValueError('Input to a transform is not within its domain')
-
- num_bins = unnormalized_widths.shape[-1]
-
- if min_bin_width * num_bins > 1.0:
- raise ValueError('Minimal bin width too large for the number of bins')
- if min_bin_height * num_bins > 1.0:
- raise ValueError('Minimal bin height too large for the number of bins')
-
- widths = F.softmax(unnormalized_widths, dim=-1)
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
- cumwidths = torch.cumsum(widths, dim=-1)
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
- cumwidths = (right - left) * cumwidths + left
- cumwidths[..., 0] = left
- cumwidths[..., -1] = right
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
-
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
-
- heights = F.softmax(unnormalized_heights, dim=-1)
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
- cumheights = torch.cumsum(heights, dim=-1)
- cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
- cumheights = (top - bottom) * cumheights + bottom
- cumheights[..., 0] = bottom
- cumheights[..., -1] = top
- heights = cumheights[..., 1:] - cumheights[..., :-1]
-
- if inverse:
- bin_idx = searchsorted(cumheights, inputs)[..., None]
- else:
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
-
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
-
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
- delta = heights / widths
- input_delta = delta.gather(-1, bin_idx)[..., 0]
-
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
-
- input_heights = heights.gather(-1, bin_idx)[..., 0]
-
- if inverse:
- a = (((inputs - input_cumheights) * (input_derivatives
- + input_derivatives_plus_one
- - 2 * input_delta)
- + input_heights * (input_delta - input_derivatives)))
- b = (input_heights * input_derivatives
- - (inputs - input_cumheights) * (input_derivatives
- + input_derivatives_plus_one
- - 2 * input_delta))
- c = - input_delta * (inputs - input_cumheights)
-
- discriminant = b.pow(2) - 4 * a * c
- assert (discriminant >= 0).all()
-
- root = (2 * c) / (-b - torch.sqrt(discriminant))
- outputs = root * input_bin_widths + input_cumwidths
-
- theta_one_minus_theta = root * (1 - root)
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta)
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - root).pow(2))
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, -logabsdet
- else:
- theta = (inputs - input_cumwidths) / input_bin_widths
- theta_one_minus_theta = theta * (1 - theta)
-
- numerator = input_heights * (input_delta * theta.pow(2)
- + input_derivatives * theta_one_minus_theta)
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta)
- outputs = input_cumheights + numerator / denominator
-
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - theta).pow(2))
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, logabsdet
diff --git a/spaces/XzJosh/LittleTaffy-Bert-VITS2/bert/chinese-roberta-wwm-ext-large/README.md b/spaces/XzJosh/LittleTaffy-Bert-VITS2/bert/chinese-roberta-wwm-ext-large/README.md
deleted file mode 100644
index 7bce039b7f81ee328fdf8efe3f14409200aacbef..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/LittleTaffy-Bert-VITS2/bert/chinese-roberta-wwm-ext-large/README.md
+++ /dev/null
@@ -1,57 +0,0 @@
----
-language:
-- zh
-tags:
-- bert
-license: "apache-2.0"
----
-
-# Please use 'Bert' related functions to load this model!
-
-## Chinese BERT with Whole Word Masking
-For further accelerating Chinese natural language processing, we provide **Chinese pre-trained BERT with Whole Word Masking**.
-
-**[Pre-Training with Whole Word Masking for Chinese BERT](https://arxiv.org/abs/1906.08101)**
-Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Ziqing Yang, Shijin Wang, Guoping Hu
-
-This repository is developed based on:https://github.com/google-research/bert
-
-You may also interested in,
-- Chinese BERT series: https://github.com/ymcui/Chinese-BERT-wwm
-- Chinese MacBERT: https://github.com/ymcui/MacBERT
-- Chinese ELECTRA: https://github.com/ymcui/Chinese-ELECTRA
-- Chinese XLNet: https://github.com/ymcui/Chinese-XLNet
-- Knowledge Distillation Toolkit - TextBrewer: https://github.com/airaria/TextBrewer
-
-More resources by HFL: https://github.com/ymcui/HFL-Anthology
-
-## Citation
-If you find the technical report or resource is useful, please cite the following technical report in your paper.
-- Primary: https://arxiv.org/abs/2004.13922
-```
-@inproceedings{cui-etal-2020-revisiting,
- title = "Revisiting Pre-Trained Models for {C}hinese Natural Language Processing",
- author = "Cui, Yiming and
- Che, Wanxiang and
- Liu, Ting and
- Qin, Bing and
- Wang, Shijin and
- Hu, Guoping",
- booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings",
- month = nov,
- year = "2020",
- address = "Online",
- publisher = "Association for Computational Linguistics",
- url = "https://www.aclweb.org/anthology/2020.findings-emnlp.58",
- pages = "657--668",
-}
-```
-- Secondary: https://arxiv.org/abs/1906.08101
-```
-@article{chinese-bert-wwm,
- title={Pre-Training with Whole Word Masking for Chinese BERT},
- author={Cui, Yiming and Che, Wanxiang and Liu, Ting and Qin, Bing and Yang, Ziqing and Wang, Shijin and Hu, Guoping},
- journal={arXiv preprint arXiv:1906.08101},
- year={2019}
- }
-```
\ No newline at end of file
diff --git a/spaces/YONG627/456123/yolov5-code-main/utils/loggers/comet/comet_utils.py b/spaces/YONG627/456123/yolov5-code-main/utils/loggers/comet/comet_utils.py
deleted file mode 100644
index 27600761ad2843a6ab66aa22ad06782bb4b7eea7..0000000000000000000000000000000000000000
--- a/spaces/YONG627/456123/yolov5-code-main/utils/loggers/comet/comet_utils.py
+++ /dev/null
@@ -1,150 +0,0 @@
-import logging
-import os
-from urllib.parse import urlparse
-
-try:
- import comet_ml
-except (ModuleNotFoundError, ImportError):
- comet_ml = None
-
-import yaml
-
-logger = logging.getLogger(__name__)
-
-COMET_PREFIX = 'comet://'
-COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5')
-COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv('COMET_DEFAULT_CHECKPOINT_FILENAME', 'last.pt')
-
-
-def download_model_checkpoint(opt, experiment):
- model_dir = f'{opt.project}/{experiment.name}'
- os.makedirs(model_dir, exist_ok=True)
-
- model_name = COMET_MODEL_NAME
- model_asset_list = experiment.get_model_asset_list(model_name)
-
- if len(model_asset_list) == 0:
- logger.error(f'COMET ERROR: No checkpoints found for model name : {model_name}')
- return
-
- model_asset_list = sorted(
- model_asset_list,
- key=lambda x: x['step'],
- reverse=True,
- )
- logged_checkpoint_map = {asset['fileName']: asset['assetId'] for asset in model_asset_list}
-
- resource_url = urlparse(opt.weights)
- checkpoint_filename = resource_url.query
-
- if checkpoint_filename:
- asset_id = logged_checkpoint_map.get(checkpoint_filename)
- else:
- asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME)
- checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME
-
- if asset_id is None:
- logger.error(f'COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment')
- return
-
- try:
- logger.info(f'COMET INFO: Downloading checkpoint {checkpoint_filename}')
- asset_filename = checkpoint_filename
-
- model_binary = experiment.get_asset(asset_id, return_type='binary', stream=False)
- model_download_path = f'{model_dir}/{asset_filename}'
- with open(model_download_path, 'wb') as f:
- f.write(model_binary)
-
- opt.weights = model_download_path
-
- except Exception as e:
- logger.warning('COMET WARNING: Unable to download checkpoint from Comet')
- logger.exception(e)
-
-
-def set_opt_parameters(opt, experiment):
- """Update the opts Namespace with parameters
- from Comet's ExistingExperiment when resuming a run
-
- Args:
- opt (argparse.Namespace): Namespace of command line options
- experiment (comet_ml.APIExperiment): Comet API Experiment object
- """
- asset_list = experiment.get_asset_list()
- resume_string = opt.resume
-
- for asset in asset_list:
- if asset['fileName'] == 'opt.yaml':
- asset_id = asset['assetId']
- asset_binary = experiment.get_asset(asset_id, return_type='binary', stream=False)
- opt_dict = yaml.safe_load(asset_binary)
- for key, value in opt_dict.items():
- setattr(opt, key, value)
- opt.resume = resume_string
-
- # Save hyperparameters to YAML file
- # Necessary to pass checks in training script
- save_dir = f'{opt.project}/{experiment.name}'
- os.makedirs(save_dir, exist_ok=True)
-
- hyp_yaml_path = f'{save_dir}/hyp.yaml'
- with open(hyp_yaml_path, 'w') as f:
- yaml.dump(opt.hyp, f)
- opt.hyp = hyp_yaml_path
-
-
-def check_comet_weights(opt):
- """Downloads model weights from Comet and updates the
- weights path to point to saved weights location
-
- Args:
- opt (argparse.Namespace): Command Line arguments passed
- to YOLOv5 training script
-
- Returns:
- None/bool: Return True if weights are successfully downloaded
- else return None
- """
- if comet_ml is None:
- return
-
- if isinstance(opt.weights, str):
- if opt.weights.startswith(COMET_PREFIX):
- api = comet_ml.API()
- resource = urlparse(opt.weights)
- experiment_path = f'{resource.netloc}{resource.path}'
- experiment = api.get(experiment_path)
- download_model_checkpoint(opt, experiment)
- return True
-
- return None
-
-
-def check_comet_resume(opt):
- """Restores run parameters to its original state based on the model checkpoint
- and logged Experiment parameters.
-
- Args:
- opt (argparse.Namespace): Command Line arguments passed
- to YOLOv5 training script
-
- Returns:
- None/bool: Return True if the run is restored successfully
- else return None
- """
- if comet_ml is None:
- return
-
- if isinstance(opt.resume, str):
- if opt.resume.startswith(COMET_PREFIX):
- api = comet_ml.API()
- resource = urlparse(opt.resume)
- experiment_path = f'{resource.netloc}{resource.path}'
- experiment = api.get(experiment_path)
- set_opt_parameters(opt, experiment)
- download_model_checkpoint(opt, experiment)
-
- return True
-
- return None
diff --git a/spaces/Yan233th/so-vits-svc-models/vdecoder/hifigan/nvSTFT.py b/spaces/Yan233th/so-vits-svc-models/vdecoder/hifigan/nvSTFT.py
deleted file mode 100644
index 88597d62a505715091f9ba62d38bf0a85a31b95a..0000000000000000000000000000000000000000
--- a/spaces/Yan233th/so-vits-svc-models/vdecoder/hifigan/nvSTFT.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import math
-import os
-os.environ["LRU_CACHE_CAPACITY"] = "3"
-import random
-import torch
-import torch.utils.data
-import numpy as np
-import librosa
-from librosa.util import normalize
-from librosa.filters import mel as librosa_mel_fn
-from scipy.io.wavfile import read
-import soundfile as sf
-
-def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False):
- sampling_rate = None
- try:
- data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile.
- except Exception as ex:
- print(f"'{full_path}' failed to load.\nException:")
- print(ex)
- if return_empty_on_exception:
- return [], sampling_rate or target_sr or 32000
- else:
- raise Exception(ex)
-
- if len(data.shape) > 1:
- data = data[:, 0]
- assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension)
-
- if np.issubdtype(data.dtype, np.integer): # if audio data is type int
- max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX
- else: # if audio data is type fp32
- max_mag = max(np.amax(data), -np.amin(data))
- max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32
-
- data = torch.FloatTensor(data.astype(np.float32))/max_mag
-
- if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except
- return [], sampling_rate or target_sr or 32000
- if target_sr is not None and sampling_rate != target_sr:
- data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr))
- sampling_rate = target_sr
-
- return data, sampling_rate
-
-def dynamic_range_compression(x, C=1, clip_val=1e-5):
- return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
-
-def dynamic_range_decompression(x, C=1):
- return np.exp(x) / C
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-def dynamic_range_decompression_torch(x, C=1):
- return torch.exp(x) / C
-
-class STFT():
- def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5):
- self.target_sr = sr
-
- self.n_mels = n_mels
- self.n_fft = n_fft
- self.win_size = win_size
- self.hop_length = hop_length
- self.fmin = fmin
- self.fmax = fmax
- self.clip_val = clip_val
- self.mel_basis = {}
- self.hann_window = {}
-
- def get_mel(self, y, center=False):
- sampling_rate = self.target_sr
- n_mels = self.n_mels
- n_fft = self.n_fft
- win_size = self.win_size
- hop_length = self.hop_length
- fmin = self.fmin
- fmax = self.fmax
- clip_val = self.clip_val
-
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- if fmax not in self.mel_basis:
- mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
- self.mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
- self.hann_window[str(y.device)] = torch.hann_window(self.win_size).to(y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_length)/2), int((n_fft-hop_length)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_length, win_length=win_size, window=self.hann_window[str(y.device)],
- center=center, pad_mode='reflect', normalized=False, onesided=True)
- # print(111,spec)
- spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
- # print(222,spec)
- spec = torch.matmul(self.mel_basis[str(fmax)+'_'+str(y.device)], spec)
- # print(333,spec)
- spec = dynamic_range_compression_torch(spec, clip_val=clip_val)
- # print(444,spec)
- return spec
-
- def __call__(self, audiopath):
- audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr)
- spect = self.get_mel(audio.unsqueeze(0)).squeeze(0)
- return spect
-
-stft = STFT()
diff --git a/spaces/YazawaSunrise/so-vits-svc-LoveLive/utils.py b/spaces/YazawaSunrise/so-vits-svc-LoveLive/utils.py
deleted file mode 100644
index 27a3e07fbeb03c96e6073d3dd9ba5b0696d7cc69..0000000000000000000000000000000000000000
--- a/spaces/YazawaSunrise/so-vits-svc-LoveLive/utils.py
+++ /dev/null
@@ -1,355 +0,0 @@
-import os
-import glob
-import re
-import sys
-import argparse
-import logging
-import json
-import subprocess
-
-import librosa
-import numpy as np
-import torchaudio
-from scipy.io.wavfile import read
-import torch
-import torchvision
-from torch.nn import functional as F
-from commons import sequence_mask
-from hubert import hubert_model
-MATPLOTLIB_FLAG = False
-
-logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
-logger = logging
-
-f0_bin = 256
-f0_max = 1100.0
-f0_min = 50.0
-f0_mel_min = 1127 * np.log(1 + f0_min / 700)
-f0_mel_max = 1127 * np.log(1 + f0_max / 700)
-
-def f0_to_coarse(f0):
- is_torch = isinstance(f0, torch.Tensor)
- f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
-
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
- f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int)
- assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min())
- return f0_coarse
-
-
-def get_hubert_model(rank=None):
-
- hubert_soft = hubert_model.hubert_soft("hubert/hubert-soft-0d54a1f4.pt")
- if rank is not None:
- hubert_soft = hubert_soft.cuda(rank)
- return hubert_soft
-
-def get_hubert_content(hmodel, y=None, path=None):
- if path is not None:
- source, sr = torchaudio.load(path)
- source = torchaudio.functional.resample(source, sr, 16000)
- if len(source.shape) == 2 and source.shape[1] >= 2:
- source = torch.mean(source, dim=0).unsqueeze(0)
- else:
- source = y
- source = source.unsqueeze(0)
- with torch.inference_mode():
- units = hmodel.units(source)
- return units.transpose(1,2)
-
-
-def get_content(cmodel, y):
- with torch.no_grad():
- c = cmodel.extract_features(y.squeeze(1))[0]
- c = c.transpose(1, 2)
- return c
-
-
-
-def transform(mel, height): # 68-92
- #r = np.random.random()
- #rate = r * 0.3 + 0.85 # 0.85-1.15
- #height = int(mel.size(-2) * rate)
- tgt = torchvision.transforms.functional.resize(mel, (height, mel.size(-1)))
- if height >= mel.size(-2):
- return tgt[:, :mel.size(-2), :]
- else:
- silence = tgt[:,-1:,:].repeat(1,mel.size(-2)-height,1)
- silence += torch.randn_like(silence) / 10
- return torch.cat((tgt, silence), 1)
-
-
-def stretch(mel, width): # 0.5-2
- return torchvision.transforms.functional.resize(mel, (mel.size(-2), width))
-
-
-def load_checkpoint(checkpoint_path, model, optimizer=None):
- assert os.path.isfile(checkpoint_path)
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
- iteration = checkpoint_dict['iteration']
- learning_rate = checkpoint_dict['learning_rate']
- if iteration is None:
- iteration = 1
- if learning_rate is None:
- learning_rate = 0.0002
- if optimizer is not None and checkpoint_dict['optimizer'] is not None:
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
- saved_state_dict = checkpoint_dict['model']
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- new_state_dict= {}
- for k, v in state_dict.items():
- try:
- new_state_dict[k] = saved_state_dict[k]
- except:
- logger.info("%s is not in the checkpoint" % k)
- new_state_dict[k] = v
- if hasattr(model, 'module'):
- model.module.load_state_dict(new_state_dict)
- else:
- model.load_state_dict(new_state_dict)
- logger.info("Loaded checkpoint '{}' (iteration {})" .format(
- checkpoint_path, iteration))
- return model, optimizer, learning_rate, iteration
-
-
-def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
- logger.info("Saving model and optimizer state at iteration {} to {}".format(
- iteration, checkpoint_path))
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- torch.save({'model': state_dict,
- 'iteration': iteration,
- 'optimizer': optimizer.state_dict(),
- 'learning_rate': learning_rate}, checkpoint_path)
- clean_ckpt = False
- if clean_ckpt:
- clean_checkpoints(path_to_models='logs/32k/', n_ckpts_to_keep=3, sort_by_time=True)
-
-def clean_checkpoints(path_to_models='logs/48k/', n_ckpts_to_keep=2, sort_by_time=True):
- """Freeing up space by deleting saved ckpts
-
- Arguments:
- path_to_models -- Path to the model directory
- n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth
- sort_by_time -- True -> chronologically delete ckpts
- False -> lexicographically delete ckpts
- """
- ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))]
- name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1)))
- time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f)))
- sort_key = time_key if sort_by_time else name_key
- x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], key=sort_key)
- to_del = [os.path.join(path_to_models, fn) for fn in
- (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])]
- del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}")
- del_routine = lambda x: [os.remove(x), del_info(x)]
- rs = [del_routine(fn) for fn in to_del]
-
-def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
- for k, v in scalars.items():
- writer.add_scalar(k, v, global_step)
- for k, v in histograms.items():
- writer.add_histogram(k, v, global_step)
- for k, v in images.items():
- writer.add_image(k, v, global_step, dataformats='HWC')
- for k, v in audios.items():
- writer.add_audio(k, v, global_step, audio_sampling_rate)
-
-
-def latest_checkpoint_path(dir_path, regex="G_*.pth"):
- f_list = glob.glob(os.path.join(dir_path, regex))
- f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
- x = f_list[-1]
- print(x)
- return x
-
-
-def plot_spectrogram_to_numpy(spectrogram):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(10,2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
- plt.xlabel("Frames")
- plt.ylabel("Channels")
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def plot_alignment_to_numpy(alignment, info=None):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(6, 4))
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
- interpolation='none')
- fig.colorbar(im, ax=ax)
- xlabel = 'Decoder timestep'
- if info is not None:
- xlabel += '\n\n' + info
- plt.xlabel(xlabel)
- plt.ylabel('Encoder timestep')
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def load_wav_to_torch(full_path):
- sampling_rate, data = read(full_path)
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
-
-
-def load_filepaths_and_text(filename, split="|"):
- with open(filename, encoding='utf-8') as f:
- filepaths_and_text = [line.strip().split(split) for line in f]
- return filepaths_and_text
-
-
-def get_hparams(init=True):
- parser = argparse.ArgumentParser()
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
- help='JSON file for configuration')
- parser.add_argument('-m', '--model', type=str, required=True,
- help='Model name')
-
- args = parser.parse_args()
- model_dir = os.path.join("./logs", args.model)
-
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
-
- config_path = args.config
- config_save_path = os.path.join(model_dir, "config.json")
- if init:
- with open(config_path, "r") as f:
- data = f.read()
- with open(config_save_path, "w") as f:
- f.write(data)
- else:
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_dir(model_dir):
- config_save_path = os.path.join(model_dir, "config.json")
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_file(config_path):
- with open(config_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- return hparams
-
-
-def check_git_hash(model_dir):
- source_dir = os.path.dirname(os.path.realpath(__file__))
- if not os.path.exists(os.path.join(source_dir, ".git")):
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
- source_dir
- ))
- return
-
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
-
- path = os.path.join(model_dir, "githash")
- if os.path.exists(path):
- saved_hash = open(path).read()
- if saved_hash != cur_hash:
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
- saved_hash[:8], cur_hash[:8]))
- else:
- open(path, "w").write(cur_hash)
-
-
-def get_logger(model_dir, filename="train.log"):
- global logger
- logger = logging.getLogger(os.path.basename(model_dir))
- logger.setLevel(logging.DEBUG)
-
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
- h = logging.FileHandler(os.path.join(model_dir, filename))
- h.setLevel(logging.DEBUG)
- h.setFormatter(formatter)
- logger.addHandler(h)
- return logger
-
-
-class HParams():
- def __init__(self, **kwargs):
- for k, v in kwargs.items():
- if type(v) == dict:
- v = HParams(**v)
- self[k] = v
-
- def keys(self):
- return self.__dict__.keys()
-
- def items(self):
- return self.__dict__.items()
-
- def values(self):
- return self.__dict__.values()
-
- def __len__(self):
- return len(self.__dict__)
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __setitem__(self, key, value):
- return setattr(self, key, value)
-
- def __contains__(self, key):
- return key in self.__dict__
-
- def __repr__(self):
- return self.__dict__.__repr__()
-
diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/vq_diffusion/pipeline_vq_diffusion.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/vq_diffusion/pipeline_vq_diffusion.py
deleted file mode 100644
index 333599d7ecf8b68827bdde55a37fa96c213c013a..0000000000000000000000000000000000000000
--- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/vq_diffusion/pipeline_vq_diffusion.py
+++ /dev/null
@@ -1,335 +0,0 @@
-# Copyright 2022 Microsoft and The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import Callable, List, Optional, Tuple, Union
-
-import torch
-
-from diffusers import Transformer2DModel, VQModel
-from diffusers.schedulers.scheduling_vq_diffusion import VQDiffusionScheduler
-from transformers import CLIPTextModel, CLIPTokenizer
-
-from ...configuration_utils import ConfigMixin, register_to_config
-from ...modeling_utils import ModelMixin
-from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
-from ...utils import logging
-
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-
-class LearnedClassifierFreeSamplingEmbeddings(ModelMixin, ConfigMixin):
- """
- Utility class for storing learned text embeddings for classifier free sampling
- """
-
- @register_to_config
- def __init__(self, learnable: bool, hidden_size: Optional[int] = None, length: Optional[int] = None):
- super().__init__()
-
- self.learnable = learnable
-
- if self.learnable:
- assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
- assert length is not None, "learnable=True requires `length` to be set"
-
- embeddings = torch.zeros(length, hidden_size)
- else:
- embeddings = None
-
- self.embeddings = torch.nn.Parameter(embeddings)
-
-
-class VQDiffusionPipeline(DiffusionPipeline):
- r"""
- Pipeline for text-to-image generation using VQ Diffusion
-
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
-
- Args:
- vqvae ([`VQModel`]):
- Vector Quantized Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent
- representations.
- text_encoder ([`CLIPTextModel`]):
- Frozen text-encoder. VQ Diffusion uses the text portion of
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
- the [clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) variant.
- tokenizer (`CLIPTokenizer`):
- Tokenizer of class
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
- transformer ([`Transformer2DModel`]):
- Conditional transformer to denoise the encoded image latents.
- scheduler ([`VQDiffusionScheduler`]):
- A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
- """
-
- vqvae: VQModel
- text_encoder: CLIPTextModel
- tokenizer: CLIPTokenizer
- transformer: Transformer2DModel
- learned_classifier_free_sampling_embeddings: LearnedClassifierFreeSamplingEmbeddings
- scheduler: VQDiffusionScheduler
-
- def __init__(
- self,
- vqvae: VQModel,
- text_encoder: CLIPTextModel,
- tokenizer: CLIPTokenizer,
- transformer: Transformer2DModel,
- scheduler: VQDiffusionScheduler,
- learned_classifier_free_sampling_embeddings: LearnedClassifierFreeSamplingEmbeddings,
- ):
- super().__init__()
-
- self.register_modules(
- vqvae=vqvae,
- transformer=transformer,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- scheduler=scheduler,
- learned_classifier_free_sampling_embeddings=learned_classifier_free_sampling_embeddings,
- )
-
- def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance):
- batch_size = len(prompt) if isinstance(prompt, list) else 1
-
- # get prompt text embeddings
- text_inputs = self.tokenizer(
- prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- return_tensors="pt",
- )
- text_input_ids = text_inputs.input_ids
-
- if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
- removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
- logger.warning(
- "The following part of your input was truncated because CLIP can only handle sequences up to"
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
- )
- text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
- text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
-
- # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
- # While CLIP does normalize the pooled output of the text transformer when combining
- # the image and text embeddings, CLIP does not directly normalize the last hidden state.
- #
- # CLIP normalizing the pooled output.
- # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
- text_embeddings = text_embeddings / text_embeddings.norm(dim=-1, keepdim=True)
-
- # duplicate text embeddings for each generation per prompt
- text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
-
- if do_classifier_free_guidance:
- if self.learned_classifier_free_sampling_embeddings.learnable:
- uncond_embeddings = self.learned_classifier_free_sampling_embeddings.embeddings
- uncond_embeddings = uncond_embeddings.unsqueeze(0).repeat(batch_size, 1, 1)
- else:
- uncond_tokens = [""] * batch_size
-
- max_length = text_input_ids.shape[-1]
- uncond_input = self.tokenizer(
- uncond_tokens,
- padding="max_length",
- max_length=max_length,
- truncation=True,
- return_tensors="pt",
- )
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
- # See comment for normalizing text embeddings
- uncond_embeddings = uncond_embeddings / uncond_embeddings.norm(dim=-1, keepdim=True)
-
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
- seq_len = uncond_embeddings.shape[1]
- uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
- uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and text embeddings into a single batch
- # to avoid doing two forward passes
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
-
- return text_embeddings
-
- @torch.no_grad()
- def __call__(
- self,
- prompt: Union[str, List[str]],
- num_inference_steps: int = 100,
- guidance_scale: float = 5.0,
- truncation_rate: float = 1.0,
- num_images_per_prompt: int = 1,
- generator: Optional[torch.Generator] = None,
- latents: Optional[torch.FloatTensor] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
- callback_steps: Optional[int] = 1,
- ) -> Union[ImagePipelineOutput, Tuple]:
- """
- Function invoked when calling the pipeline for generation.
-
- Args:
- prompt (`str` or `List[str]`):
- The prompt or prompts to guide the image generation.
- num_inference_steps (`int`, *optional*, defaults to 100):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- guidance_scale (`float`, *optional*, defaults to 7.5):
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
- usually at the expense of lower image quality.
- truncation_rate (`float`, *optional*, defaults to 1.0 (equivalent to no truncation)):
- Used to "truncate" the predicted classes for x_0 such that the cumulative probability for a pixel is at
- most `truncation_rate`. The lowest probabilities that would increase the cumulative probability above
- `truncation_rate` are set to zero.
- num_images_per_prompt (`int`, *optional*, defaults to 1):
- The number of images to generate per prompt.
- generator (`torch.Generator`, *optional*):
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
- deterministic.
- latents (`torch.FloatTensor` of shape (batch), *optional*):
- Pre-generated noisy latents to be used as inputs for image generation. Must be valid embedding indices.
- Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will
- be generated of completely masked latent pixels.
- output_type (`str`, *optional*, defaults to `"pil"`):
- The output format of the generated image. Choose between
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
- callback (`Callable`, *optional*):
- A function that will be called every `callback_steps` steps during inference. The function will be
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
- callback_steps (`int`, *optional*, defaults to 1):
- The frequency at which the `callback` function will be called. If not specified, the callback will be
- called at every step.
-
- Returns:
- [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~ pipeline_utils.ImagePipelineOutput `] if
- `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
- generated images.
- """
- if isinstance(prompt, str):
- batch_size = 1
- elif isinstance(prompt, list):
- batch_size = len(prompt)
- else:
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
-
- batch_size = batch_size * num_images_per_prompt
-
- do_classifier_free_guidance = guidance_scale > 1.0
-
- text_embeddings = self._encode_prompt(prompt, num_images_per_prompt, do_classifier_free_guidance)
-
- if (callback_steps is None) or (
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
- ):
- raise ValueError(
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
- f" {type(callback_steps)}."
- )
-
- # get the initial completely masked latents unless the user supplied it
-
- latents_shape = (batch_size, self.transformer.num_latent_pixels)
- if latents is None:
- mask_class = self.transformer.num_vector_embeds - 1
- latents = torch.full(latents_shape, mask_class).to(self.device)
- else:
- if latents.shape != latents_shape:
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
- if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
- raise ValueError(
- "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
- f" {self.transformer.num_vector_embeds - 1} (inclusive)."
- )
- latents = latents.to(self.device)
-
- # set timesteps
- self.scheduler.set_timesteps(num_inference_steps, device=self.device)
-
- timesteps_tensor = self.scheduler.timesteps.to(self.device)
-
- sample = latents
-
- for i, t in enumerate(self.progress_bar(timesteps_tensor)):
- # expand the sample if we are doing classifier free guidance
- latent_model_input = torch.cat([sample] * 2) if do_classifier_free_guidance else sample
-
- # predict the un-noised image
- # model_output == `log_p_x_0`
- model_output = self.transformer(
- latent_model_input, encoder_hidden_states=text_embeddings, timestep=t
- ).sample
-
- if do_classifier_free_guidance:
- model_output_uncond, model_output_text = model_output.chunk(2)
- model_output = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
- model_output -= torch.logsumexp(model_output, dim=1, keepdim=True)
-
- model_output = self.truncate(model_output, truncation_rate)
-
- # remove `log(0)`'s (`-inf`s)
- model_output = model_output.clamp(-70)
-
- # compute the previous noisy sample x_t -> x_t-1
- sample = self.scheduler.step(model_output, timestep=t, sample=sample, generator=generator).prev_sample
-
- # call the callback, if provided
- if callback is not None and i % callback_steps == 0:
- callback(i, t, sample)
-
- embedding_channels = self.vqvae.config.vq_embed_dim
- embeddings_shape = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
- embeddings = self.vqvae.quantize.get_codebook_entry(sample, shape=embeddings_shape)
- image = self.vqvae.decode(embeddings, force_not_quantize=True).sample
-
- image = (image / 2 + 0.5).clamp(0, 1)
- image = image.cpu().permute(0, 2, 3, 1).numpy()
-
- if output_type == "pil":
- image = self.numpy_to_pil(image)
-
- if not return_dict:
- return (image,)
-
- return ImagePipelineOutput(images=image)
-
- def truncate(self, log_p_x_0: torch.FloatTensor, truncation_rate: float) -> torch.FloatTensor:
- """
- Truncates log_p_x_0 such that for each column vector, the total cumulative probability is `truncation_rate` The
- lowest probabilities that would increase the cumulative probability above `truncation_rate` are set to zero.
- """
- sorted_log_p_x_0, indices = torch.sort(log_p_x_0, 1, descending=True)
- sorted_p_x_0 = torch.exp(sorted_log_p_x_0)
- keep_mask = sorted_p_x_0.cumsum(dim=1) < truncation_rate
-
- # Ensure that at least the largest probability is not zeroed out
- all_true = torch.full_like(keep_mask[:, 0:1, :], True)
- keep_mask = torch.cat((all_true, keep_mask), dim=1)
- keep_mask = keep_mask[:, :-1, :]
-
- keep_mask = keep_mask.gather(1, indices.argsort(1))
-
- rv = log_p_x_0.clone()
-
- rv[~keep_mask] = -torch.inf # -inf = log(0)
-
- return rv
diff --git a/spaces/Yntec/ToyWorldXL/app.py b/spaces/Yntec/ToyWorldXL/app.py
deleted file mode 100644
index 873ef9ba8035d9961ebfc78dd6001557565df9be..0000000000000000000000000000000000000000
--- a/spaces/Yntec/ToyWorldXL/app.py
+++ /dev/null
@@ -1,172 +0,0 @@
-import gradio as gr
-import os
-import sys
-from pathlib import Path
-
-models = [
- "Lykon/dreamshaper-xl-1-0",
- "nerijs/pixel-art-xl",
- "Linaqruf/animagine-xl",
- "stabilityai/stable-diffusion-xl-base-1.0",
-]
-current_model = models[0]
-
-text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
-
-models2=[
- gr.Interface.load(f"models/{models[0]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[1]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[2]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[3]}",live=True,preprocess=False)
-]
-
-
-def text_it1(inputs,text_gen1=text_gen1):
- go_t1=text_gen1(inputs)
- return(go_t1)
-
-def set_model(current_model):
- current_model = models[current_model]
- return gr.update(label=(f"{current_model}"))
-
-
-def send_it1(inputs, model_choice):
- proc1=models2[model_choice]
- output1=proc1(inputs)
- return(output1)
-css=""""""
-
-
-with gr.Blocks(css=css) as myface:
- gr.HTML("""
-
-
-
-
- ToyWorld XL
-
-
-
-
-
SDXL models for your enjoyment!
-
-
Try out more than 420 Stable Diffusion models at Toy World by clicking here!
-
- """)
- with gr.Row():
- with gr.Column(scale=100):
- #Model selection dropdown
- model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
- with gr.Row():
- with gr.Column(scale=100):
- magic1=gr.Textbox(label="Your Prompt", lines=4)
- #align-items: center !important;
- #appearance: none !important;
-
-
- #border-style: none !important;
- #box-shadow: rgba(0, 0, 0, .2) 0 3px 5px -1px,rgba(0, 0, 0, .14) 0 6px 10px 0,rgba(0, 0, 0, .12) 0 1px 18px 0 !important;
- #box-sizing: border-box !important;
-
- #cursor: pointer !important;
- #display: inline-flex !important;
- #fill: currentcolor !important;
- #font-family: "Google Sans",Roboto,Arial,sans-serif !important;
- #font-size: 14px !important;
- #font-weight: 500 !important;
- #height: 48px !important;
- #justify-content: center !important;
- #letter-spacing: .25px !important;
- #line-height: normal !important;
- #max-width: 100% !important;
- #overflow: visible !important;
- #padding: 2px 24px !important;
- #position: relative !important;
- #text-align: center !important;
- #text-transform: none !important;
- #transition: box-shadow 280ms cubic-bezier(.4, 0, .2, 1),opacity 15ms linear 30ms,transform 270ms cubic-bezier(0, 0, .2, 1) 0ms !important;
- #user-select: none !important;
- #-webkit-user-select: none !important;
- #touch-action: manipulation !important;
- #width: auto !important;
- #will-change: transform,opacity !important;
- #z-index: 0 !important;
- gr.HTML("""""")
- run=gr.Button("Generate Image")
- with gr.Row():
- with gr.Column(style="width=800px"):
- output1=gr.Image(label=(f"{current_model}"))
-
-
- with gr.Row():
- with gr.Column(scale=50):
- input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea",lines=2)
- use_short=gr.Button("Use Short Prompt")
- see_prompts=gr.Button("Extend Idea")
-
-
- def short_prompt(inputs):
- return(inputs)
-
- model_name1.change(set_model,inputs=model_name1,outputs=[output1])
-
- run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
-
- use_short.click(short_prompt,inputs=[input_text],outputs=magic1)
-
- see_prompts.click(text_it1,inputs=[input_text],outputs=magic1)
-
-myface.queue(concurrency_count=200)
-myface.launch(inline=True, show_api=False, max_threads=400)
\ No newline at end of file
diff --git a/spaces/YueMafighting/FollowYourPose/style.css b/spaces/YueMafighting/FollowYourPose/style.css
deleted file mode 100644
index c4739b4ea5fc35e774a049e3dacc443f7f0eac19..0000000000000000000000000000000000000000
--- a/spaces/YueMafighting/FollowYourPose/style.css
+++ /dev/null
@@ -1,3 +0,0 @@
-h1 {
- text-align: center;
-}
diff --git a/spaces/abhishek/scikit-learn-tabular-playground/app.py b/spaces/abhishek/scikit-learn-tabular-playground/app.py
deleted file mode 100644
index 60fb6a4c604e8cd539131c2e462b766c0d182393..0000000000000000000000000000000000000000
--- a/spaces/abhishek/scikit-learn-tabular-playground/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/scikit-learn/tabular-playground").launch()
\ No newline at end of file
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/hooks/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/hooks/__init__.py
deleted file mode 100644
index 915af28cefab14a14c1188ed861161080fd138a3..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/hooks/__init__.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from .checkpoint import CheckpointHook
-from .closure import ClosureHook
-from .ema import EMAHook
-from .evaluation import DistEvalHook, EvalHook
-from .hook import HOOKS, Hook
-from .iter_timer import IterTimerHook
-from .logger import (DvcliveLoggerHook, LoggerHook, MlflowLoggerHook,
- NeptuneLoggerHook, PaviLoggerHook, TensorboardLoggerHook,
- TextLoggerHook, WandbLoggerHook)
-from .lr_updater import LrUpdaterHook
-from .memory import EmptyCacheHook
-from .momentum_updater import MomentumUpdaterHook
-from .optimizer import (Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook,
- GradientCumulativeOptimizerHook, OptimizerHook)
-from .profiler import ProfilerHook
-from .sampler_seed import DistSamplerSeedHook
-from .sync_buffer import SyncBuffersHook
-
-__all__ = [
- 'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook',
- 'OptimizerHook', 'Fp16OptimizerHook', 'IterTimerHook',
- 'DistSamplerSeedHook', 'EmptyCacheHook', 'LoggerHook', 'MlflowLoggerHook',
- 'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook',
- 'NeptuneLoggerHook', 'WandbLoggerHook', 'DvcliveLoggerHook',
- 'MomentumUpdaterHook', 'SyncBuffersHook', 'EMAHook', 'EvalHook',
- 'DistEvalHook', 'ProfilerHook', 'GradientCumulativeOptimizerHook',
- 'GradientCumulativeFp16OptimizerHook'
-]
diff --git a/spaces/abyildirim/inst-inpaint/app.py b/spaces/abyildirim/inst-inpaint/app.py
deleted file mode 100644
index 260e6878ac8d4d894f9e8439cd84bf627517a8b2..0000000000000000000000000000000000000000
--- a/spaces/abyildirim/inst-inpaint/app.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import argparse
-import gradio as gr
-import numpy as np
-import torch
-from PIL import Image
-import constants
-import utils
-from ldm.util import instantiate_from_config
-from omegaconf import OmegaConf
-from zipfile import ZipFile
-import os
-import requests
-import shutil
-
-def download_model(url):
- os.makedirs("models", exist_ok=True)
- local_filename = url.split('/')[-1]
- with requests.get(url, stream=True) as r:
- with open(os.path.join("models", local_filename), 'wb') as file:
- shutil.copyfileobj(r.raw, file)
- with ZipFile("models/gqa_inpaint.zip", 'r') as zObject:
- zObject.extractall(path="models/")
- os.remove("models/gqa_inpaint.zip")
-
-MODEL = None
-
-def inference(image: np.ndarray, instruction: str, center_crop: bool):
- if not instruction.lower().startswith("remove the"):
- raise gr.Error("Instruction should start with 'Remove the' !")
- image = Image.fromarray(image)
- cropped_image, image = utils.preprocess_image(image, center_crop=center_crop)
- output_image = MODEL.inpaint(image, instruction, num_steps=int(os.environ["NUM_STEPS"]), device="cuda", return_pil=True, seed=int(os.environ["SEED"]))
- return cropped_image, output_image
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "--config",
- type=str,
- default="configs/latent-diffusion/gqa-inpaint-ldm-vq-f8-256x256.yaml",
- help="Path of the model config file",
- )
- parser.add_argument(
- "--checkpoint",
- type=str,
- default="models/gqa_inpaint/ldm/model.ckpt",
- help="Path of the model checkpoint file",
- )
- args = parser.parse_args()
-
- print("## Downloading the model file")
- download_model("https://huggingface.co/abyildirim/inst-inpaint-models/resolve/main/gqa_inpaint.zip")
- print("## Download is completed")
-
- print("## Running the demo")
- parsed_config = OmegaConf.load(args.config)
- MODEL = instantiate_from_config(parsed_config["model"])
- model_state_dict = torch.load(args.checkpoint, map_location="cpu")["state_dict"]
- MODEL.load_state_dict(model_state_dict)
- MODEL.eval()
- MODEL.to("cuda")
-
- sample_image, sample_instruction, sample_step = constants.EXAMPLES[3]
-
- gr.Interface(
- fn=inference,
- inputs=[
- gr.Image(type="numpy", value=sample_image, label="Source Image").style(
- height=256
- ),
- gr.Textbox(
- label="Instruction",
- lines=1,
- value=sample_instruction,
- ),
- gr.Checkbox(value=True, label="Center Crop", interactive=False),
- ],
- outputs=[
- gr.Image(type="pil", label="Cropped Image").style(height=256),
- gr.Image(type="pil", label="Output Image").style(height=256),
- ],
- allow_flagging="never",
- examples=constants.EXAMPLES,
- cache_examples=True,
- title=constants.TITLE,
- description=constants.DESCRIPTION,
- ).launch()
diff --git a/spaces/adhirk/ARKs_Contextual_Chronicle/my_functions.py b/spaces/adhirk/ARKs_Contextual_Chronicle/my_functions.py
deleted file mode 100644
index 7371bb6359af3617a701ea8b7fad8054aa12288e..0000000000000000000000000000000000000000
--- a/spaces/adhirk/ARKs_Contextual_Chronicle/my_functions.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# Importing necessary libraries and packages
-
-import numpy as np
-import pandas as pd
-import matplotlib.pyplot as plt
-import seaborn as sns
-import plotly.express as px
-import string
-import re
-from sklearn.model_selection import train_test_split
-from sklearn.linear_model import LogisticRegression
-from sklearn.metrics import accuracy_score
-from sklearn.metrics import classification_report
-from sklearn.metrics import confusion_matrix
-from sklearn.feature_extraction.text import TfidfVectorizer
-
-import nltk
-from string import punctuation
-from sklearn.feature_extraction.text import CountVectorizer
-import pandas as pd
-from sklearn.pipeline import Pipeline
-from sklearn.model_selection import GridSearchCV
-
-from sklearn.utils.class_weight import compute_sample_weight
-from sklearn.svm import LinearSVC
-from sklearn.svm import SVC
-from sklearn.metrics.pairwise import cosine_similarity
-from imblearn.over_sampling import SMOTE
-
-import gensim
-import numpy as np
-from gensim.utils import simple_preprocess
-import gensim.downloader
-
-
-# Download the NLTK wordnet
-nltk.download('wordnet')
-
-# Instantiate a function for lemmatizing the text data
-def lem_tokenizer(s):
-
- # remove punctuation from the string using string.punctuation
- for char in s:
- if char in punctuation:
- s = s.replace(char, '')
-
- # make the entire string lowercase
- s = s.lower()
-
- # split the string at each space to make the list of tokens (uncleaned)
- tokens = s.split()
-
- # save NLTK stop words to a variable
- stop_words = nltk.corpus.stopwords.words('english')
-
- # use list comprehension to create a list of the tokens that are NOT stop words
- tokens_new = [token for token in tokens if token not in stop_words]
-
- # create WordNetLemmatizer object
- wnl = nltk.stem.WordNetLemmatizer()
-
- # list of part-of-speech tags
- pos_tags = ['v', 'n', 'a']
-
- # initiate empty list to collect lemmatized tokens
- tokens_lem = list()
-
- # loop through each token
- for token in tokens_new:
-
- # loop through each part-of-speech tag
- for pos_tag in pos_tags:
-
- # lemmatize each token using each part-of-speech tag
- token = wnl.lemmatize(word=token, pos=pos_tag)
-
- # append the lemmatized token to the new list
- tokens_lem.append(token)
-
- return tokens_lem
-
-
-###############################################################################
-
-
-# Instantiate a function for Stemming the text data
-
-stemmer = nltk.stem.PorterStemmer()
-
-# import the nltk stopwords
-nltk.download('stopwords')
-from nltk.corpus import stopwords
-
-ENGLISH_STOP_WORDS = stopwords.words('english')
-
-def remove_html_tags(text):
- pattern = re.compile(r'<.*?>')
- return pattern.sub('', text)
-
-
-def stem_tokenizer(sentence):
- # remove punctuation and set to lower case
- for punctuation_mark in string.punctuation:
- sentence = sentence.replace(punctuation_mark,'').lower()
-
- # remove digits using list comprehension
- sentence = ''.join([char for char in sentence if not char.isdigit()])
-
- # remove html tags
- sentence = remove_html_tags(sentence)
-
- # split sentence into words
- listofwords = sentence.split(' ')
- listofstemmed_words = []
-
- # remove stopwords and any tokens that are just empty strings
- for word in listofwords:
- if (not word in ENGLISH_STOP_WORDS) and (word!=''):
- # Stem words
- stemmed_word = stemmer.stem(word)
- listofstemmed_words.append(stemmed_word)
-
- return listofstemmed_words
-
-###############################################################################
-
-# Instantiate a function for Sentence2Vec using Word2Vec
-
-# Download the "word2vec-google-news-300" model
-word2vec_model = gensim.downloader.load("word2vec-google-news-300")
-
-# Access the model's word vectors
-model = word2vec_model.vectors
-
-def sentence2vec(text):
- """
- Embed a sentence by averaging the word vectors of the tokenized text. Out-of-vocabulary words are replaced by the zero-vector.
- -----
-
- Input: text (string)
- Output: embedding vector (np.array)
- """
- tokenized = simple_preprocess(text)
-
- word_embeddings = []
- for word in tokenized:
- # if the word is in the model then embed
- if word in word2vec_model:
- vector = word2vec_model[word]
- # add zeros for out-of-vocab words
- else:
- vector = np.zeros(300)
-
- word_embeddings.append(vector)
-
- # average the word vectors
- sentence_embedding = np.mean(word_embeddings, axis=0)
-
- return sentence_embedding
diff --git a/spaces/airus/img-to-music/app.py b/spaces/airus/img-to-music/app.py
deleted file mode 100644
index 4fc5b287065d355b088095b6c6cda4378bce6022..0000000000000000000000000000000000000000
--- a/spaces/airus/img-to-music/app.py
+++ /dev/null
@@ -1,194 +0,0 @@
-import time
-import base64
-import gradio as gr
-from sentence_transformers import SentenceTransformer
-
-import httpx
-import json
-
-import os
-import requests
-import urllib
-
-from os import path
-from pydub import AudioSegment
-
-#img_to_text = gr.Blocks.load(name="spaces/pharma/CLIP-Interrogator")
-img_to_text = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2")
-
-from share_btn import community_icon_html, loading_icon_html, share_js
-
-def get_prompts(uploaded_image, track_duration, gen_intensity, gen_mode):
- print("calling clip interrogator")
- #prompt = img_to_text(uploaded_image, "ViT-L (best for Stable Diffusion 1.*)", "fast", fn_index=1)[0]
- prompt = img_to_text(uploaded_image, 'fast', 4, fn_index=1)[0]
- print(prompt)
- music_result = generate_track_by_prompt(prompt, track_duration, gen_intensity, gen_mode)
- print(music_result)
- return music_result[0], gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
-
-from utils import get_tags_for_prompts, get_mubert_tags_embeddings, get_pat
-
-minilm = SentenceTransformer('all-MiniLM-L6-v2')
-mubert_tags_embeddings = get_mubert_tags_embeddings(minilm)
-
-
-def get_track_by_tags(tags, pat, duration, gen_intensity, gen_mode, maxit=20):
-
- r = httpx.post('https://api-b2b.mubert.com/v2/RecordTrackTTM',
- json={
- "method": "RecordTrackTTM",
- "params": {
- "pat": pat,
- "duration": duration,
- "format": "wav",
- "intensity":gen_intensity,
- "tags": tags,
- "mode": gen_mode
- }
- })
-
- rdata = json.loads(r.text)
- assert rdata['status'] == 1, rdata['error']['text']
- trackurl = rdata['data']['tasks'][0]['download_link']
-
- print('Generating track ', end='')
- for i in range(maxit):
- r = httpx.get(trackurl)
- if r.status_code == 200:
- return trackurl
- time.sleep(1)
-
-
-def generate_track_by_prompt(prompt, duration, gen_intensity, gen_mode):
- try:
- pat = get_pat("prodia@prodia.com")
- _, tags = get_tags_for_prompts(minilm, mubert_tags_embeddings, [prompt, ])[0]
- result = get_track_by_tags(tags, pat, int(duration), gen_intensity, gen_mode)
- print(result)
- return result, ",".join(tags), "Success"
- except Exception as e:
- return None, "", str(e)
-
-def convert_mp3_to_wav(mp3_filepath):
-
- url = mp3_filepath
- save_as = "file.mp3"
-
- data = urllib.request.urlopen(url)
-
- f = open(save_as,'wb')
- f.write(data.read())
- f.close()
-
- wave_file="file.wav"
-
- sound = AudioSegment.from_mp3(save_as)
- sound.export(wave_file, format="wav")
-
- return wave_file
-
-css = """
-#col-container {max-width: 580px; margin-left: auto; margin-right: auto;}
-a {text-decoration-line: underline; font-weight: 600;}
-.footer {
- margin-bottom: 45px;
- margin-top: 10px;
- text-align: center;
- border-bottom: 1px solid #e5e5e5;
- }
- .footer>p {
- font-size: .8rem;
- display: inline-block;
- padding: 0 10px;
- transform: translateY(10px);
- background: white;
- }
- .dark .footer {
- border-color: #303030;
- }
- .dark .footer>p {
- background: #0b0f19;
- }
-.animate-spin {
- animation: spin 1s linear infinite;
-}
-@keyframes spin {
- from {
- transform: rotate(0deg);
- }
- to {
- transform: rotate(360deg);
- }
-}
-#share-btn-container {
- display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
-}
-#share-btn {
- all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0;
-}
-#share-btn * {
- all: unset;
-}
-#share-btn-container div:nth-child(-n+2){
- width: auto !important;
- min-height: 0px !important;
-}
-#share-btn-container .wrap {
- display: none !important;
-}
-"""
-
-article = """
-
-
-
-"""
-
-with gr.Blocks(css="style.css") as demo:
- with gr.Column(elem_id="col-container"):
- gr.HTML("""
-
-
- Image to Music
-
-
-
- Sends an image in to CLIP Interrogator
- to generate a text prompt which is then run through
- Mubert text-to-music to generate music from the input image!
-
- """)
-
-
- input_img = gr.Image(type="filepath", elem_id="input-img")
- with gr.Row():
- track_duration = gr.Slider(minimum=20, maximum=120, value=30, step=5, label="Track duration", elem_id="duration-inp")
- gen_intensity = gr.Dropdown(choices=["low", "medium", "high"], value="high", label="Complexity")
- gen_mode = gr.Radio(label="mode", choices=["track", "loop"], value="track")
- generate = gr.Button("Generate Music from Image")
-
- music_output = gr.Audio(label="Result", type="filepath", elem_id="music-output")
-
- with gr.Group(elem_id="share-btn-container"):
- community_icon = gr.HTML(community_icon_html, visible=False)
- loading_icon = gr.HTML(loading_icon_html, visible=False)
- share_button = gr.Button("Share to community", elem_id="share-btn", visible=False)
-
- gr.HTML(article)
- generate.click(get_prompts, inputs=[input_img,track_duration,gen_intensity,gen_mode], outputs=[music_output, share_button, community_icon, loading_icon], api_name="i2m")
- share_button.click(None, [], [], _js=share_js)
-
-demo.queue(max_size=32, concurrency_count=20).launch()
\ No newline at end of file
diff --git a/spaces/akoksal/LongForm-OPT-125M/app.py b/spaces/akoksal/LongForm-OPT-125M/app.py
deleted file mode 100644
index 311e6083c9801ecb0231e6e12ef5652067d15d53..0000000000000000000000000000000000000000
--- a/spaces/akoksal/LongForm-OPT-125M/app.py
+++ /dev/null
@@ -1,103 +0,0 @@
-examples = [
- "Write an essay about meditation. [EOI]",
- "Give me 5 steps to clean my room. [EOI]",
- "How are the continents formed? [EOI]",
- "Prompt: A man draws a gun in a dark alley and asks for your wallet. You begrudgingly obey. He throws it on the ground, shoots it till it screeches, and turns to you; 'you are safe now'. Write a story about given prompt. [EOI]",
- "Write directions of a cooking recipe with these ingredients: chicken breast, carrots, green peas, celery, butter, onion, flour, salt, black pepper, celery seed, chicken broth, milk, unbaked pie crusts? [EOI]",
-]
-
-
-import gradio as gr
-from transformers import AutoTokenizer, pipeline, AutoModelForCausalLM
-
-
-
-tokenizer = AutoTokenizer.from_pretrained("akoksal/LongForm-OPT-125M")
-generate = pipeline('text-generation', model="akoksal/LongForm-OPT-125M", tokenizer=tokenizer)
-
-
-def predict(instruction, topp, max_length, temperature):
- if "[EOI]" not in instruction:
- instruction = instruction + " [EOI]"
- x = generate(instruction,
- do_sample=True,
- top_p=topp,
- num_return_sequences=1,
- max_length=max_length,
- temperature=temperature
- )[0]["generated_text"]
-
- return x[len(instruction):]
-
-def process_example(args):
- for x in predict(args):
- pass
- return x
-
-
-with gr.Blocks() as demo:
- with gr.Column():
- gr.Markdown("""# 📜LongForm
-
-The LongForm dataset is created by leveraging English corpus examples with augmented instructions. We select a diverse set of human-written documents from existing corpora such as C4 and Wikipedia and generate instructions for the given documents via LLMs. Then, we extend these examples with structured corpora examples such as Stack Exchange and WikiHow and task examples such as question answering, email writing, grammar error correction, story/poem generation, and text summarization.
-
-**Paper**: https://arxiv.org/abs/2304.08460
-
-**Dataset and Models**: https://github.com/akoksal/LongForm
-
-**Tips**:
-
-1. Use the "[EOI]" token at the end of the instruction for OPT models. This demo adds [EOI] automatically if you forget it.
-
-2. The LongForm dataset and models mainly focus on long text generation and have limitations regarding structured prediction tasks in NLP.
-"""
- )
- with gr.Row():
- with gr.Column(scale=3):
- instruction = gr.Textbox(placeholder="Enter your question here", label="Question", elem_id="q-input")
- with gr.Box():
- gr.Markdown("**Answer**")
- output = gr.Markdown(elem_id="q-output")
- submit = gr.Button("Generate", variant="primary")
- gr.Examples(
- examples=examples,
- inputs=[instruction],
- cache_examples=False,
- fn=process_example,
- outputs=[output],
- )
-
- with gr.Column(scale=1):
- top_p = gr.Slider(
- label="Top-p (nucleus sampling)",
- value=0.90,
- minimum=0.0,
- maximum=1,
- step=0.05,
- interactive=True,
- info="Higher values sample low-probability tokens",
- )
- max_length = gr.Slider(
- label="Max length",
- value=64,
- minimum=1,
- maximum=512,
- step=4,
- interactive=True,
- info="The maximum length of the output",
- )
- temperature = gr.Slider(
- label="Temperature",
- value=1.0,
- minimum=0.0,
- maximum=2.0,
- step=0.1,
- interactive=True,
- info="Higher values sample more diverse outputs",
- )
-
- submit.click(predict, inputs=[instruction, top_p, max_length, temperature], outputs=[output])
-
-
-demo.queue(concurrency_count=4)
-demo.launch()
\ No newline at end of file
diff --git a/spaces/alcanodi/stabilityai-stable-diffusion-xl-base-1.0/README.md b/spaces/alcanodi/stabilityai-stable-diffusion-xl-base-1.0/README.md
deleted file mode 100644
index 900ac616b5791e2030281a51ff742e70b0e06470..0000000000000000000000000000000000000000
--- a/spaces/alcanodi/stabilityai-stable-diffusion-xl-base-1.0/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Stabilityai Stable Diffusion Xl Base 1.0
-emoji: 🌖
-colorFrom: pink
-colorTo: red
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
-license: openrail
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/alexat/TextToVoiceEn/app.py b/spaces/alexat/TextToVoiceEn/app.py
deleted file mode 100644
index c487c2f8d1936e065891d5ad61ca2b0fb0478f9c..0000000000000000000000000000000000000000
--- a/spaces/alexat/TextToVoiceEn/app.py
+++ /dev/null
@@ -1,182 +0,0 @@
-import numpy as np
-import gradio as gr
-import os
-os.environ["SUNO_OFFLOAD_CPU"] = 'True'
-os.environ["SUNO_USE_SMALL_MODELS"] = 'True'
-from bark import SAMPLE_RATE, generate_audio, preload_models
-from bark.generation import SUPPORTED_LANGS
-
-DEBUG_MODE = False
-
-if not DEBUG_MODE:
- _ = preload_models()
-
-AVAILABLE_PROMPTS = ["Unconditional", "Announcer"]
-PROMPT_LOOKUP = {}
-for _, lang in SUPPORTED_LANGS:
- for n in range(10):
- label = f"Speaker {n} ({lang})"
- AVAILABLE_PROMPTS.append(label)
- PROMPT_LOOKUP[label] = f"{lang}_speaker_{n}"
-PROMPT_LOOKUP["Unconditional"] = None
-PROMPT_LOOKUP["Announcer"] = "announcer"
-
-default_text = "Empowering the people, providing upward mobility. We deliver knowledge, skills, and tools with world class developers and educators."
-
-title = "# AI Voice Generator"
-
-description = """
-Voice Generator can generate highly realistic, multilingual speech as well as other audio - including music, background noise and simple sound effects. \
-"""
-
-article = """
-
-## 🌎 Foreign Language
-
-Voice Generator supports various languages out-of-the-box and automatically determines language from input text. \
-When prompted with code-switched text, Voice Generator will even attempt to employ the native accent for the respective languages in the same voice.
-
-Try the prompt:
-
-```
-Buenos días Miguel. Tu colega piensa que tu alemán es extremadamente malo. But I suppose your english isn't terrible.
-```
-
-## 🤭 Non-Speech Sounds
-
-Below is a list of some known non-speech sounds, we keep updating our library. \
-
-* [laughter]
-* [laughs]
-* [sighs]
-* [music]
-* [gasps]
-* [clears throat]
-* — or ... for hesitations
-* ♪ for song lyrics
-* capitalization for emphasis of a word
-* MAN/WOMAN: for bias towards speaker
-
-Try the prompt:
-
-```
-" [clears throat] Hello, my name is Krystal. And, uh — and I like pizza. [laughs] But I also have other interests such as... ♪ singing ♪."
-```
-
-## 🎶 Music
-Voice Generator can generate all types of audio, and, in principle, doesn't see a difference between speech and music. \
-Sometimes it chooses to generate text as music, but you can help it out by adding music notes around your lyrics.
-
-Try the prompt:
-
-```
-♪ In the jungle, the mighty jungle, the lion barks tonight ♪
-```
-
-## 🧬 Voice Cloning
-
-Voice Generator has the capability to fully clone voices - including tone, pitch, emotion and prosody. \
-The model also attempts to preserve music, ambient noise, etc. from input audio. \
-However, to mitigate misuse of this technology, we limit the audio history prompts to a limited set of Krystal-provided, fully synthetic options to choose from.
-
-## 👥 Speaker Prompts
-
-You can provide certain speaker prompts such as NARRATOR, MAN, WOMAN, etc. \
-Please note that these are not always respected, especially if a conflicting audio history prompt is given.
-
-Try the prompt:
-
-```
-WOMAN: I would like an oatmilk latte please.
-MAN: Wow, that's expensive!
-```
-
-"""
-
-examples = [
- ["Please surprise me and speak in whatever voice you enjoy. Vielen Dank und Gesundheit!",
- "Unconditional"], # , 0.7, 0.7],
- ["Hello, my name is Krystal. And, uh — and I like pizza. [laughs] But I also have other interests such as playing tic tac toe.",
- "Speaker 1 (en)"], # , 0.7, 0.7],
- ["Buenos días Miguel. Tu colega piensa que tu alemán es extremadamente malo. But I suppose your english isn't terrible.",
- "Speaker 0 (es)"], # , 0.7, 0.7],
-]
-
-
-def gen_tts(text, history_prompt): # , temp_semantic, temp_waveform):
- history_prompt = PROMPT_LOOKUP[history_prompt]
- if DEBUG_MODE:
- audio_arr = np.zeros(SAMPLE_RATE)
- else:
- # , text_temp=temp_semantic, waveform_temp=temp_waveform)
- audio_arr = generate_audio(text, history_prompt=history_prompt)
- audio_arr = (audio_arr * 32767).astype(np.int16)
- return (SAMPLE_RATE, audio_arr)
-
-
-css = """
- #share-btn-container {
- display: none;
- padding-left: 0.5rem !important;
- padding-right: 0.5rem !important;
- background-color: #000000;
- justify-content: center;
- align-items: center;
- border-radius: 9999px !important;
- width: 13rem;
- margin-top: 10px;
- margin-left: auto;
- flex: unset !important;
- }
- #share-btn {
- all: initial;
- color: #ffffff;
- font-weight: 600;
- cursor: pointer;
- font-family: 'IBM Plex Sans', sans-serif;
- margin-left: 0.5rem !important;
- padding-top: 0.25rem !important;
- padding-bottom: 0.25rem !important;
- right:0;
- }
- #share-btn * {
- all: unset !important;
- }
- #share-btn-container div:nth-child(-n+2){
- width: auto !important;
- min-height: 0px !important;
- }
- #share-btn-container .wrap {
- display: none !important;
- }
-
- footer {
- visibility: hidden;
- }
-
-"""
-with gr.Blocks(css=css) as block:
- gr.Markdown(title)
- gr.Markdown(description)
- with gr.Row():
- with gr.Column():
- input_text = gr.Textbox(
- label="Input Text", lines=2, value=default_text, elem_id="input_text")
- options = gr.Dropdown(
- AVAILABLE_PROMPTS, value="Speaker 1 (en)", label="Acoustic Prompt", elem_id="speaker_option")
- run_button = gr.Button(text="Generate Audio", type="button")
- with gr.Column():
- audio_out = gr.Audio(label="Generated Audio",
- type="numpy", elem_id="audio_out")
-
- inputs = [input_text, options]
- outputs = [audio_out]
- gr.Examples(examples=examples, fn=gen_tts, inputs=inputs,
- outputs=outputs, cache_examples=True)
- gr.Markdown(article)
- run_button.click(fn=lambda: gr.update(visible=False), inputs=None, outputs=None, queue=False).then(
- fn=gen_tts, inputs=inputs, outputs=outputs, queue=True).then(
- fn=lambda: gr.update(visible=True), inputs=None, outputs=None, queue=False)
-
-block.queue()
-block.launch()
diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sbcharsetprober.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sbcharsetprober.py
deleted file mode 100644
index 46ba835c66c9f4c3b15b0a0671447d33b3b240d1..0000000000000000000000000000000000000000
--- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sbcharsetprober.py
+++ /dev/null
@@ -1,145 +0,0 @@
-######################## BEGIN LICENSE BLOCK ########################
-# The Original Code is Mozilla Universal charset detector code.
-#
-# The Initial Developer of the Original Code is
-# Netscape Communications Corporation.
-# Portions created by the Initial Developer are Copyright (C) 2001
-# the Initial Developer. All Rights Reserved.
-#
-# Contributor(s):
-# Mark Pilgrim - port to Python
-# Shy Shalom - original C code
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
-# 02110-1301 USA
-######################### END LICENSE BLOCK #########################
-
-from collections import namedtuple
-
-from .charsetprober import CharSetProber
-from .enums import CharacterCategory, ProbingState, SequenceLikelihood
-
-
-SingleByteCharSetModel = namedtuple('SingleByteCharSetModel',
- ['charset_name',
- 'language',
- 'char_to_order_map',
- 'language_model',
- 'typical_positive_ratio',
- 'keep_ascii_letters',
- 'alphabet'])
-
-
-class SingleByteCharSetProber(CharSetProber):
- SAMPLE_SIZE = 64
- SB_ENOUGH_REL_THRESHOLD = 1024 # 0.25 * SAMPLE_SIZE^2
- POSITIVE_SHORTCUT_THRESHOLD = 0.95
- NEGATIVE_SHORTCUT_THRESHOLD = 0.05
-
- def __init__(self, model, reversed=False, name_prober=None):
- super(SingleByteCharSetProber, self).__init__()
- self._model = model
- # TRUE if we need to reverse every pair in the model lookup
- self._reversed = reversed
- # Optional auxiliary prober for name decision
- self._name_prober = name_prober
- self._last_order = None
- self._seq_counters = None
- self._total_seqs = None
- self._total_char = None
- self._freq_char = None
- self.reset()
-
- def reset(self):
- super(SingleByteCharSetProber, self).reset()
- # char order of last character
- self._last_order = 255
- self._seq_counters = [0] * SequenceLikelihood.get_num_categories()
- self._total_seqs = 0
- self._total_char = 0
- # characters that fall in our sampling range
- self._freq_char = 0
-
- @property
- def charset_name(self):
- if self._name_prober:
- return self._name_prober.charset_name
- else:
- return self._model.charset_name
-
- @property
- def language(self):
- if self._name_prober:
- return self._name_prober.language
- else:
- return self._model.language
-
- def feed(self, byte_str):
- # TODO: Make filter_international_words keep things in self.alphabet
- if not self._model.keep_ascii_letters:
- byte_str = self.filter_international_words(byte_str)
- if not byte_str:
- return self.state
- char_to_order_map = self._model.char_to_order_map
- language_model = self._model.language_model
- for char in byte_str:
- order = char_to_order_map.get(char, CharacterCategory.UNDEFINED)
- # XXX: This was SYMBOL_CAT_ORDER before, with a value of 250, but
- # CharacterCategory.SYMBOL is actually 253, so we use CONTROL
- # to make it closer to the original intent. The only difference
- # is whether or not we count digits and control characters for
- # _total_char purposes.
- if order < CharacterCategory.CONTROL:
- self._total_char += 1
- # TODO: Follow uchardet's lead and discount confidence for frequent
- # control characters.
- # See https://github.com/BYVoid/uchardet/commit/55b4f23971db61
- if order < self.SAMPLE_SIZE:
- self._freq_char += 1
- if self._last_order < self.SAMPLE_SIZE:
- self._total_seqs += 1
- if not self._reversed:
- lm_cat = language_model[self._last_order][order]
- else:
- lm_cat = language_model[order][self._last_order]
- self._seq_counters[lm_cat] += 1
- self._last_order = order
-
- charset_name = self._model.charset_name
- if self.state == ProbingState.DETECTING:
- if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD:
- confidence = self.get_confidence()
- if confidence > self.POSITIVE_SHORTCUT_THRESHOLD:
- self.logger.debug('%s confidence = %s, we have a winner',
- charset_name, confidence)
- self._state = ProbingState.FOUND_IT
- elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD:
- self.logger.debug('%s confidence = %s, below negative '
- 'shortcut threshhold %s', charset_name,
- confidence,
- self.NEGATIVE_SHORTCUT_THRESHOLD)
- self._state = ProbingState.NOT_ME
-
- return self.state
-
- def get_confidence(self):
- r = 0.01
- if self._total_seqs > 0:
- r = ((1.0 * self._seq_counters[SequenceLikelihood.POSITIVE]) /
- self._total_seqs / self._model.typical_positive_ratio)
- r = r * self._freq_char / self._total_char
- if r >= 1.0:
- r = 0.99
- return r
diff --git a/spaces/aliceoq/vozes-da-loirinha/lib/infer_pack/models_onnx.py b/spaces/aliceoq/vozes-da-loirinha/lib/infer_pack/models_onnx.py
deleted file mode 100644
index 963e67b29f828e9fdd096397952054fe77cf3d10..0000000000000000000000000000000000000000
--- a/spaces/aliceoq/vozes-da-loirinha/lib/infer_pack/models_onnx.py
+++ /dev/null
@@ -1,819 +0,0 @@
-import math, pdb, os
-from time import time as ttime
-import torch
-from torch import nn
-from torch.nn import functional as F
-from lib.infer_pack import modules
-from lib.infer_pack import attentions
-from lib.infer_pack import commons
-from lib.infer_pack.commons import init_weights, get_padding
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from lib.infer_pack.commons import init_weights
-import numpy as np
-from lib.infer_pack import commons
-
-
-class TextEncoder256(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(256, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class TextEncoder768(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(768, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0,
- ):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(
- modules.ResidualCouplingLayer(
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- mean_only=True,
- )
- )
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
- def remove_weight_norm(self):
- for i in range(self.n_flows):
- self.flows[i * 2].remove_weight_norm()
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
- def remove_weight_norm(self):
- self.enc.remove_weight_norm()
-
-
-class Generator(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=0,
- ):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class SineGen(torch.nn.Module):
- """Definition of sine generator
- SineGen(samp_rate, harmonic_num = 0,
- sine_amp = 0.1, noise_std = 0.003,
- voiced_threshold = 0,
- flag_for_pulse=False)
- samp_rate: sampling rate in Hz
- harmonic_num: number of harmonic overtones (default 0)
- sine_amp: amplitude of sine-wavefrom (default 0.1)
- noise_std: std of Gaussian noise (default 0.003)
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
- Note: when flag_for_pulse is True, the first time step of a voiced
- segment is always sin(np.pi) or cos(0)
- """
-
- def __init__(
- self,
- samp_rate,
- harmonic_num=0,
- sine_amp=0.1,
- noise_std=0.003,
- voiced_threshold=0,
- flag_for_pulse=False,
- ):
- super(SineGen, self).__init__()
- self.sine_amp = sine_amp
- self.noise_std = noise_std
- self.harmonic_num = harmonic_num
- self.dim = self.harmonic_num + 1
- self.sampling_rate = samp_rate
- self.voiced_threshold = voiced_threshold
-
- def _f02uv(self, f0):
- # generate uv signal
- uv = torch.ones_like(f0)
- uv = uv * (f0 > self.voiced_threshold)
- return uv
-
- def forward(self, f0, upp):
- """sine_tensor, uv = forward(f0)
- input F0: tensor(batchsize=1, length, dim=1)
- f0 for unvoiced steps should be 0
- output sine_tensor: tensor(batchsize=1, length, dim)
- output uv: tensor(batchsize=1, length, 1)
- """
- with torch.no_grad():
- f0 = f0[:, None].transpose(1, 2)
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
- # fundamental component
- f0_buf[:, :, 0] = f0[:, :, 0]
- for idx in np.arange(self.harmonic_num):
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
- idx + 2
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
- rand_ini = torch.rand(
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
- )
- rand_ini[:, 0] = 0
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
- tmp_over_one *= upp
- tmp_over_one = F.interpolate(
- tmp_over_one.transpose(2, 1),
- scale_factor=upp,
- mode="linear",
- align_corners=True,
- ).transpose(2, 1)
- rad_values = F.interpolate(
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(
- 2, 1
- ) #######
- tmp_over_one %= 1
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
- cumsum_shift = torch.zeros_like(rad_values)
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
- sine_waves = torch.sin(
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
- )
- sine_waves = sine_waves * self.sine_amp
- uv = self._f02uv(f0)
- uv = F.interpolate(
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(2, 1)
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
- noise = noise_amp * torch.randn_like(sine_waves)
- sine_waves = sine_waves * uv + noise
- return sine_waves, uv, noise
-
-
-class SourceModuleHnNSF(torch.nn.Module):
- """SourceModule for hn-nsf
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
- add_noise_std=0.003, voiced_threshod=0)
- sampling_rate: sampling_rate in Hz
- harmonic_num: number of harmonic above F0 (default: 0)
- sine_amp: amplitude of sine source signal (default: 0.1)
- add_noise_std: std of additive Gaussian noise (default: 0.003)
- note that amplitude of noise in unvoiced is decided
- by sine_amp
- voiced_threshold: threhold to set U/V given F0 (default: 0)
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
- F0_sampled (batchsize, length, 1)
- Sine_source (batchsize, length, 1)
- noise_source (batchsize, length 1)
- uv (batchsize, length, 1)
- """
-
- def __init__(
- self,
- sampling_rate,
- harmonic_num=0,
- sine_amp=0.1,
- add_noise_std=0.003,
- voiced_threshod=0,
- is_half=True,
- ):
- super(SourceModuleHnNSF, self).__init__()
-
- self.sine_amp = sine_amp
- self.noise_std = add_noise_std
- self.is_half = is_half
- # to produce sine waveforms
- self.l_sin_gen = SineGen(
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
- )
-
- # to merge source harmonics into a single excitation
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
- self.l_tanh = torch.nn.Tanh()
-
- def forward(self, x, upp=None):
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
- if self.is_half:
- sine_wavs = sine_wavs.half()
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
- return sine_merge, None, None # noise, uv
-
-
-class GeneratorNSF(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels,
- sr,
- is_half=False,
- ):
- super(GeneratorNSF, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
-
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
- self.m_source = SourceModuleHnNSF(
- sampling_rate=sr, harmonic_num=0, is_half=is_half
- )
- self.noise_convs = nn.ModuleList()
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- c_cur = upsample_initial_channel // (2 ** (i + 1))
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
- if i + 1 < len(upsample_rates):
- stride_f0 = np.prod(upsample_rates[i + 1 :])
- self.noise_convs.append(
- Conv1d(
- 1,
- c_cur,
- kernel_size=stride_f0 * 2,
- stride=stride_f0,
- padding=stride_f0 // 2,
- )
- )
- else:
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- self.upp = np.prod(upsample_rates)
-
- def forward(self, x, f0, g=None):
- har_source, noi_source, uv = self.m_source(f0, self.upp)
- har_source = har_source.transpose(1, 2)
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- x_source = self.noise_convs[i](har_source)
- x = x + x_source
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-sr2sr = {
- "32k": 32000,
- "40k": 40000,
- "48k": 48000,
-}
-
-
-class SynthesizerTrnMsNSFsidM(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr,
- version,
- **kwargs
- ):
- super().__init__()
- if type(sr) == type("strr"):
- sr = sr2sr[sr]
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- if version == "v1":
- self.enc_p = TextEncoder256(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- else:
- self.enc_p = TextEncoder768(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- sr=sr,
- is_half=kwargs["is_half"],
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- self.speaker_map = None
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def construct_spkmixmap(self, n_speaker):
- self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels))
- for i in range(n_speaker):
- self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]]))
- self.speaker_map = self.speaker_map.unsqueeze(0)
-
- def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None):
- if self.speaker_map is not None: # [N, S] * [S, B, 1, H]
- g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
- g = g * self.speaker_map # [N, S, B, 1, H]
- g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
- g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
- else:
- g = g.unsqueeze(0)
- g = self.emb_g(g).transpose(1, 2)
-
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
- return o
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2, 3, 5, 7, 11, 17]
- # periods = [3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class MultiPeriodDiscriminatorV2(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminatorV2, self).__init__()
- # periods = [2, 3, 5, 7, 11, 17]
- periods = [2, 3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ]
- )
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(
- Conv2d(
- 1,
- 32,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 32,
- 128,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 128,
- 512,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 512,
- 1024,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 1024,
- 1024,
- (kernel_size, 1),
- 1,
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- ]
- )
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
diff --git a/spaces/alimeituan/gpt2/README.md b/spaces/alimeituan/gpt2/README.md
deleted file mode 100644
index 6f7160544295304b846beaa24314a53254a78930..0000000000000000000000000000000000000000
--- a/spaces/alimeituan/gpt2/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Gpt2
-emoji: 📊
-colorFrom: pink
-colorTo: blue
-sdk: streamlit
-sdk_version: 1.17.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/allknowingroger/Image-Models-Test106/README.md b/spaces/allknowingroger/Image-Models-Test106/README.md
deleted file mode 100644
index 98533d322f9b146b96635822b2ea5b2c3aef1801..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/Image-Models-Test106/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: More Image Models
-emoji: 😻
-colorFrom: red
-colorTo: gray
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-duplicated_from: allknowingroger/Image-Models-Test104
----
-
-
\ No newline at end of file
diff --git a/spaces/allknowingroger/Image-Models-Test153/README.md b/spaces/allknowingroger/Image-Models-Test153/README.md
deleted file mode 100644
index a3a43bf672ca727d8113068aed4ea790c9de9309..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/Image-Models-Test153/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: More Image Models
-emoji: 😻
-colorFrom: red
-colorTo: gray
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-duplicated_from: allknowingroger/Image-Models-Test142
----
-
-
\ No newline at end of file
diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_dsound_surround.c b/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_dsound_surround.c
deleted file mode 100644
index b7c887dff9e9e8bfbe22a240ce19cc8f50419158..0000000000000000000000000000000000000000
--- a/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_dsound_surround.c
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * $Id: $
- * Portable Audio I/O Library
- * Windows DirectSound surround sound output test
- *
- * Copyright (c) 2007 Ross Bencina
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files
- * (the "Software"), to deal in the Software without restriction,
- * including without limitation the rights to use, copy, modify, merge,
- * publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so,
- * subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
- * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * The text above constitutes the entire PortAudio license; however,
- * the PortAudio community also makes the following non-binding requests:
- *
- * Any person wishing to distribute modifications to the Software is
- * requested to send the modifications to the original developer so that
- * they can be incorporated into the canonical version. It is also
- * requested that these non-binding requests be included along with the
- * license above.
- */
-
-#include
-#include
-
-#include /* required when using pa_win_wmme.h */
-#include /* required when using pa_win_wmme.h */
-
-#include "portaudio.h"
-#include "pa_win_ds.h"
-
-#define NUM_SECONDS (12)
-#define SAMPLE_RATE (44100)
-#define FRAMES_PER_BUFFER (64)
-
-#ifndef M_PI
-#define M_PI (3.14159265)
-#endif
-
-#define TABLE_SIZE (100)
-
-#define CHANNEL_COUNT (6)
-
-
-
-typedef struct
-{
- float sine[TABLE_SIZE];
- int phase;
- int currentChannel;
- int cycleCount;
-}
-paTestData;
-
-/* This routine will be called by the PortAudio engine when audio is needed.
-** It may called at interrupt level on some machines so don't do anything
-** that could mess up the system like calling malloc() or free().
-*/
-static int patestCallback( const void *inputBuffer, void *outputBuffer,
- unsigned long framesPerBuffer,
- const PaStreamCallbackTimeInfo* timeInfo,
- PaStreamCallbackFlags statusFlags,
- void *userData )
-{
- paTestData *data = (paTestData*)userData;
- float *out = (float*)outputBuffer;
- unsigned long i,j;
-
- (void) timeInfo; /* Prevent unused variable warnings. */
- (void) statusFlags;
- (void) inputBuffer;
-
- for( i=0; icurrentChannel && data->cycleCount < 4410 ){
- *out++ = data->sine[data->phase];
- data->phase += 1 + j; // play each channel at a different pitch so they can be distinguished
- if( data->phase >= TABLE_SIZE ){
- data->phase -= TABLE_SIZE;
- }
- }else{
- *out++ = 0;
- }
- }
-
- data->cycleCount++;
- if( data->cycleCount > 44100 ){
- data->cycleCount = 0;
-
- ++data->currentChannel;
- if( data->currentChannel >= CHANNEL_COUNT )
- data->currentChannel -= CHANNEL_COUNT;
- }
- }
-
- return paContinue;
-}
-
-/*******************************************************************/
-int main(int argc, char* argv[])
-{
- PaStreamParameters outputParameters;
- PaWinDirectSoundStreamInfo directSoundStreamInfo;
- PaStream *stream;
- PaError err;
- paTestData data;
- int i;
- int deviceIndex;
-
- printf("PortAudio Test: output a sine blip on each channel. SR = %d, BufSize = %d, Chans = %d\n", SAMPLE_RATE, FRAMES_PER_BUFFER, CHANNEL_COUNT);
-
- err = Pa_Initialize();
- if( err != paNoError ) goto error;
-
- deviceIndex = Pa_GetHostApiInfo( Pa_HostApiTypeIdToHostApiIndex( paDirectSound ) )->defaultOutputDevice;
- if( argc == 2 ){
- sscanf( argv[1], "%d", &deviceIndex );
- }
-
- printf( "using device id %d (%s)\n", deviceIndex, Pa_GetDeviceInfo(deviceIndex)->name );
-
- /* initialise sinusoidal wavetable */
- for( i=0; idefaultLowOutputLatency;
- outputParameters.hostApiSpecificStreamInfo = NULL;
-
- /* it's not strictly necessary to provide a channelMask for surround sound
- output. But if you want to be sure which channel mask PortAudio will use
- then you should supply one */
- directSoundStreamInfo.size = sizeof(PaWinDirectSoundStreamInfo);
- directSoundStreamInfo.hostApiType = paDirectSound;
- directSoundStreamInfo.version = 1;
- directSoundStreamInfo.flags = paWinDirectSoundUseChannelMask;
- directSoundStreamInfo.channelMask = PAWIN_SPEAKER_5POINT1; /* request 5.1 output format */
- outputParameters.hostApiSpecificStreamInfo = &directSoundStreamInfo;
-
- if( Pa_IsFormatSupported( 0, &outputParameters, SAMPLE_RATE ) == paFormatIsSupported ){
- printf( "Pa_IsFormatSupported reports device will support %d channels.\n", CHANNEL_COUNT );
- }else{
- printf( "Pa_IsFormatSupported reports device will not support %d channels.\n", CHANNEL_COUNT );
- }
-
- err = Pa_OpenStream(
- &stream,
- NULL, /* no input */
- &outputParameters,
- SAMPLE_RATE,
- FRAMES_PER_BUFFER,
- paClipOff, /* we won't output out of range samples so don't bother clipping them */
- patestCallback,
- &data );
- if( err != paNoError ) goto error;
-
- err = Pa_StartStream( stream );
- if( err != paNoError ) goto error;
-
- printf("Play for %d seconds.\n", NUM_SECONDS );
- Pa_Sleep( NUM_SECONDS * 1000 );
-
- err = Pa_StopStream( stream );
- if( err != paNoError ) goto error;
-
- err = Pa_CloseStream( stream );
- if( err != paNoError ) goto error;
-
- Pa_Terminate();
- printf("Test finished.\n");
-
- return err;
-error:
- Pa_Terminate();
- fprintf( stderr, "An error occurred while using the portaudio stream\n" );
- fprintf( stderr, "Error number: %d\n", err );
- fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) );
- return err;
-}
diff --git a/spaces/ammarnasr/Sem-GAN-Bird-Image-Generator/discriminator.py b/spaces/ammarnasr/Sem-GAN-Bird-Image-Generator/discriminator.py
deleted file mode 100644
index 688343b6b621c3dc5d6fba0c7fdedf594c6d48be..0000000000000000000000000000000000000000
--- a/spaces/ammarnasr/Sem-GAN-Bird-Image-Generator/discriminator.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import torch
-import torch.nn as nn
-from models import downBlock, Block3x3_leakRelu
-
-
-def encode_image_by_16times(ndf):
- encode_img = nn.Sequential(
- nn.Conv2d(3, ndf, 4, 2, 1, bias=False),
- nn.LeakyReLU(0.2, inplace=True),
- nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
- nn.BatchNorm2d(ndf * 2),
- nn.LeakyReLU(0.2, inplace=True),
- nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
- nn.BatchNorm2d(ndf * 4),
- nn.LeakyReLU(0.2, inplace=True),
- nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
- nn.BatchNorm2d(ndf * 8),
- nn.LeakyReLU(0.2, inplace=True)
- )
- return encode_img
-
-class D_NET256(nn.Module):
- def __init__(self,DF_DIM, EMBEDDING_DIM ,b_jcu=True):
- super(D_NET256, self).__init__()
- ndf = DF_DIM
- nef = EMBEDDING_DIM
- self.img_code_s16 = encode_image_by_16times(ndf)
- self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
- self.img_code_s64 = downBlock(ndf * 16, ndf * 32)
- self.img_code_s64_1 = Block3x3_leakRelu(ndf * 32, ndf * 16)
- self.img_code_s64_2 = Block3x3_leakRelu(ndf * 16, ndf * 8)
- if b_jcu:
- self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)
- else:
- self.UNCOND_DNET = None
- self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)
-
- def forward(self, x_var):
- x_code16 = self.img_code_s16(x_var)
- x_code8 = self.img_code_s32(x_code16)
- x_code4 = self.img_code_s64(x_code8)
- x_code4 = self.img_code_s64_1(x_code4)
- x_code4 = self.img_code_s64_2(x_code4)
- return x_code4
-
-
-
-class D_GET_LOGITS(nn.Module):
- def __init__(self, ndf, nef, bcondition=False):
- super(D_GET_LOGITS, self).__init__()
- self.df_dim = ndf
- self.ef_dim = nef
- self.bcondition = bcondition
- if self.bcondition:
- self.jointConv = Block3x3_leakRelu(ndf * 8 + nef, ndf * 8)
-
- self.outlogits = nn.Sequential(
- nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
- nn.Sigmoid())
-
- def forward(self, h_code, c_code=None):
- if self.bcondition and c_code is not None:
- c_code = c_code.view(-1, self.ef_dim, 1, 1)
- c_code = c_code.repeat(1, 1, 4, 4)
- h_c_code = torch.cat((h_code, c_code), 1)
- h_c_code = self.jointConv(h_c_code)
- else:
- h_c_code = h_code
-
- output = self.outlogits(h_c_code)
- return output.view(-1)
diff --git a/spaces/aniket/gradsflow-text-classification/app.py b/spaces/aniket/gradsflow-text-classification/app.py
deleted file mode 100644
index a8122d77c2d548ba0f791e1dec876df0e066992d..0000000000000000000000000000000000000000
--- a/spaces/aniket/gradsflow-text-classification/app.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
-
-from chitra.serve.app import GradioApp
-
-tokenizer = AutoTokenizer.from_pretrained(
- "finiteautomata/beto-sentiment-analysis")
-model = AutoModelForSequenceClassification.from_pretrained(
- "finiteautomata/beto-sentiment-analysis")
-classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
-
-
-app = GradioApp(
- "text-classification",
- model=classifier,
- desc="🤗 HuggingFace Sentiment Analysis Example with CHITRA"
-)
-app.run()
diff --git a/spaces/animesh651/ChatAPT_v1/app.py b/spaces/animesh651/ChatAPT_v1/app.py
deleted file mode 100644
index d93a7e00faa6faa04adefbf022b3a75a2baa15f2..0000000000000000000000000000000000000000
--- a/spaces/animesh651/ChatAPT_v1/app.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import os
-import openai
-import gradio as gr
-openai.api_key = "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"
-
-start_sequence = "\nAI:"
-restart_sequence = "\nHuman: "
-
-prompt = "The following is a conversation with an AI assistant created by Animesh Bhatt The assistant is helpful, creative, clever, and very friendly.\n\nHuman: Hello, who the fuck are you?\nAI: I am an AI created by Animesh Bhatt. How can I help you today?\nHuman: "
-
-def openai_create(prompt):
-
- response = openai.Completion.create(
- model="text-davinci-003",
- prompt=prompt,
- temperature=0.9,
- max_tokens=150,
- top_p=1,
- frequency_penalty=0,
- presence_penalty=0.6,
- stop=[" Human:", " AI:"]
- )
-
- return response.choices[0].text
-
-
-
-def chatgpt_clone(input, history):
- history = history or []
- s = list(sum(history, ()))
- s.append(input)
- inp = ' '.join(s)
- output = openai_create(inp)
- history.append((input, output))
- return history, history
-
-
-block = gr.Blocks()
-
-
-with block:
- gr.Markdown("""Ask anything to Animesh (chatgpt-clone1)
- """)
- chatbot = gr.Chatbot()
- message = gr.Textbox(placeholder=prompt)
- state = gr.State()
- submit = gr.Button("SEND")
- submit.click(chatgpt_clone, inputs=[message, state], outputs=[chatbot, state])
-
-block.launch(inline = True)
-
diff --git a/spaces/aodianyun/ChatGLM-6B/THUDM/chatglm-6b/quantization.py b/spaces/aodianyun/ChatGLM-6B/THUDM/chatglm-6b/quantization.py
deleted file mode 100644
index 6f469f6a25a8233fe881608168daeba0bc809540..0000000000000000000000000000000000000000
--- a/spaces/aodianyun/ChatGLM-6B/THUDM/chatglm-6b/quantization.py
+++ /dev/null
@@ -1,201 +0,0 @@
-from torch.nn import Linear
-from torch.nn.parameter import Parameter
-
-import bz2
-import torch
-import base64
-import ctypes
-from transformers.utils import logging
-
-from typing import List
-from functools import partial
-
-logger = logging.get_logger(__name__)
-
-try:
- from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up
-
- class Kernel:
- def __init__(self, code: bytes, function_names: List[str]):
- self.code = code
- self._function_names = function_names
- self._cmodule = LazyKernelCModule(self.code)
-
- for name in self._function_names:
- setattr(self, name, KernelFunction(self._cmodule, name))
-
- quantization_code = "$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ"
-
- kernels = Kernel(
- bz2.decompress(base64.b64decode(quantization_code)),
- [
- "int4WeightCompression",
- "int4WeightExtractionFloat",
- "int4WeightExtractionHalf",
- "int8WeightExtractionFloat",
- "int8WeightExtractionHalf",
- ],
- )
-except Exception as exception:
- kernels = None
- logger.warning("Failed to load cpm_kernels:" + str(exception))
-
-
-class W8A16Linear(torch.autograd.Function):
- @staticmethod
- def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width):
- ctx.inp_shape = inp.size()
- ctx.weight_bit_width = weight_bit_width
- out_features = quant_w.size(0)
- inp = inp.contiguous().view(-1, inp.size(-1))
- weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width)
- ctx.weight_shape = weight.size()
- output = inp.mm(weight.t())
- ctx.save_for_backward(inp, quant_w, scale_w)
- return output.view(*(ctx.inp_shape[:-1] + (out_features,)))
-
- @staticmethod
- def backward(ctx, grad_output: torch.Tensor):
- inp, quant_w, scale_w = ctx.saved_tensors
- weight = extract_weight_to_half(quant_w, scale_w, ctx.weight_bit_width)
- grad_output = grad_output.contiguous().view(-1, weight.size(0))
- grad_input = grad_output.mm(weight)
- grad_weight = grad_output.t().mm(inp)
- return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None, None
-
-
-def compress_int4_weight(weight: torch.Tensor): # (n, m)
- with torch.cuda.device(weight.device):
- n, m = weight.size(0), weight.size(1)
- assert m % 2 == 0
- m = m // 2
- out = torch.empty(n, m, dtype=torch.int8, device="cuda")
- stream = torch.cuda.current_stream()
-
- gridDim = (n, 1, 1)
- blockDim = (min(round_up(m, 32), 1024), 1, 1)
-
- kernels.int4WeightCompression(
- gridDim,
- blockDim,
- 0,
- stream,
- [ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)],
- )
- return out
-
-
-def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int):
- if source_bit_width == 8:
- func = kernels.int8WeightExtractionHalf
- elif source_bit_width == 4:
- func = kernels.int4WeightExtractionHalf
- else:
- assert False, "Unsupported bit-width"
-
- with torch.cuda.device(weight.device):
- n, m = weight.size(0), weight.size(1)
- out = torch.empty(n, m * (8 // source_bit_width), dtype=torch.half, device="cuda")
- stream = torch.cuda.current_stream()
-
- gridDim = (n, 1, 1)
- blockDim = (min(round_up(m, 32), 1024), 1, 1)
-
- func(
- gridDim,
- blockDim,
- 0,
- stream,
- [
- ctypes.c_void_p(weight.data_ptr()),
- ctypes.c_void_p(scale_list.data_ptr()),
- ctypes.c_void_p(out.data_ptr()),
- ctypes.c_int32(n),
- ctypes.c_int32(m),
- ],
- )
- return out
-
-
-class QuantizedLinear(Linear):
- def __init__(self, weight_bit_width: int, weight_tensor=None, bias_tensor=None, empty_init=False, *args, **kwargs):
- super(QuantizedLinear, self).__init__(*args, **kwargs)
- self.weight_bit_width = weight_bit_width
-
- shape = self.weight.shape
- del self.weight
-
- if weight_tensor is None or empty_init:
- self.weight = torch.empty(
- shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=kwargs["device"]
- )
- self.weight_scale = torch.empty(shape[0], dtype=kwargs["dtype"], device=kwargs["device"])
- else:
- self.weight_scale = (weight_tensor.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)).half()
- self.weight = torch.round(weight_tensor / self.weight_scale[:, None]).to(torch.int8)
- if weight_bit_width == 4:
- self.weight = compress_int4_weight(self.weight)
-
- self.weight = Parameter(self.weight.to(kwargs["device"]), requires_grad=False)
- self.weight_scale = Parameter(self.weight_scale.to(kwargs["device"]), requires_grad=False)
- if bias_tensor is not None:
- self.bias = Parameter(bias_tensor.to(kwargs["device"]), requires_grad=False)
- else:
- self.bias = None
-
- def forward(self, input):
- output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width)
- if self.bias is not None:
- output = output + self.bias
- return output
-
-
-def quantize(model, weight_bit_width, empty_init=False, **kwargs):
- """Replace fp16 linear with quantized linear"""
-
- for layer in model.layers:
- layer.attention.query_key_value = QuantizedLinear(
- weight_bit_width=weight_bit_width,
- weight_tensor=layer.attention.query_key_value.weight.to(torch.cuda.current_device()),
- bias_tensor=layer.attention.query_key_value.bias,
- in_features=layer.attention.query_key_value.in_features,
- out_features=layer.attention.query_key_value.out_features,
- bias=True,
- dtype=torch.half,
- device=layer.attention.query_key_value.weight.device,
- empty_init=empty_init
- )
- layer.attention.dense = QuantizedLinear(
- weight_bit_width=weight_bit_width,
- weight_tensor=layer.attention.dense.weight.to(torch.cuda.current_device()),
- bias_tensor=layer.attention.dense.bias,
- in_features=layer.attention.dense.in_features,
- out_features=layer.attention.dense.out_features,
- bias=True,
- dtype=torch.half,
- device=layer.attention.dense.weight.device,
- empty_init=empty_init
- )
- layer.mlp.dense_h_to_4h = QuantizedLinear(
- weight_bit_width=weight_bit_width,
- weight_tensor=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()),
- bias_tensor=layer.mlp.dense_h_to_4h.bias,
- in_features=layer.mlp.dense_h_to_4h.in_features,
- out_features=layer.mlp.dense_h_to_4h.out_features,
- bias=True,
- dtype=torch.half,
- device=layer.mlp.dense_h_to_4h.weight.device,
- empty_init=empty_init
- )
- layer.mlp.dense_4h_to_h = QuantizedLinear(
- weight_bit_width=weight_bit_width,
- weight_tensor=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()),
- bias_tensor=layer.mlp.dense_4h_to_h.bias,
- in_features=layer.mlp.dense_4h_to_h.in_features,
- out_features=layer.mlp.dense_4h_to_h.out_features,
- bias=True,
- dtype=torch.half,
- device=layer.mlp.dense_4h_to_h.weight.device,
- empty_init=empty_init
- )
- return model
diff --git a/spaces/ardha27/rvc-hololive/infer_pack/transforms.py b/spaces/ardha27/rvc-hololive/infer_pack/transforms.py
deleted file mode 100644
index a11f799e023864ff7082c1f49c0cc18351a13b47..0000000000000000000000000000000000000000
--- a/spaces/ardha27/rvc-hololive/infer_pack/transforms.py
+++ /dev/null
@@ -1,209 +0,0 @@
-import torch
-from torch.nn import functional as F
-
-import numpy as np
-
-
-DEFAULT_MIN_BIN_WIDTH = 1e-3
-DEFAULT_MIN_BIN_HEIGHT = 1e-3
-DEFAULT_MIN_DERIVATIVE = 1e-3
-
-
-def piecewise_rational_quadratic_transform(
- inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails=None,
- tail_bound=1.0,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
- if tails is None:
- spline_fn = rational_quadratic_spline
- spline_kwargs = {}
- else:
- spline_fn = unconstrained_rational_quadratic_spline
- spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
-
- outputs, logabsdet = spline_fn(
- inputs=inputs,
- unnormalized_widths=unnormalized_widths,
- unnormalized_heights=unnormalized_heights,
- unnormalized_derivatives=unnormalized_derivatives,
- inverse=inverse,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- **spline_kwargs
- )
- return outputs, logabsdet
-
-
-def searchsorted(bin_locations, inputs, eps=1e-6):
- bin_locations[..., -1] += eps
- return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
-
-
-def unconstrained_rational_quadratic_spline(
- inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails="linear",
- tail_bound=1.0,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
- outside_interval_mask = ~inside_interval_mask
-
- outputs = torch.zeros_like(inputs)
- logabsdet = torch.zeros_like(inputs)
-
- if tails == "linear":
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
- constant = np.log(np.exp(1 - min_derivative) - 1)
- unnormalized_derivatives[..., 0] = constant
- unnormalized_derivatives[..., -1] = constant
-
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
- logabsdet[outside_interval_mask] = 0
- else:
- raise RuntimeError("{} tails are not implemented.".format(tails))
-
- (
- outputs[inside_interval_mask],
- logabsdet[inside_interval_mask],
- ) = rational_quadratic_spline(
- inputs=inputs[inside_interval_mask],
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
- inverse=inverse,
- left=-tail_bound,
- right=tail_bound,
- bottom=-tail_bound,
- top=tail_bound,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- )
-
- return outputs, logabsdet
-
-
-def rational_quadratic_spline(
- inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- left=0.0,
- right=1.0,
- bottom=0.0,
- top=1.0,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
- if torch.min(inputs) < left or torch.max(inputs) > right:
- raise ValueError("Input to a transform is not within its domain")
-
- num_bins = unnormalized_widths.shape[-1]
-
- if min_bin_width * num_bins > 1.0:
- raise ValueError("Minimal bin width too large for the number of bins")
- if min_bin_height * num_bins > 1.0:
- raise ValueError("Minimal bin height too large for the number of bins")
-
- widths = F.softmax(unnormalized_widths, dim=-1)
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
- cumwidths = torch.cumsum(widths, dim=-1)
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
- cumwidths = (right - left) * cumwidths + left
- cumwidths[..., 0] = left
- cumwidths[..., -1] = right
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
-
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
-
- heights = F.softmax(unnormalized_heights, dim=-1)
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
- cumheights = torch.cumsum(heights, dim=-1)
- cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
- cumheights = (top - bottom) * cumheights + bottom
- cumheights[..., 0] = bottom
- cumheights[..., -1] = top
- heights = cumheights[..., 1:] - cumheights[..., :-1]
-
- if inverse:
- bin_idx = searchsorted(cumheights, inputs)[..., None]
- else:
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
-
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
-
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
- delta = heights / widths
- input_delta = delta.gather(-1, bin_idx)[..., 0]
-
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
-
- input_heights = heights.gather(-1, bin_idx)[..., 0]
-
- if inverse:
- a = (inputs - input_cumheights) * (
- input_derivatives + input_derivatives_plus_one - 2 * input_delta
- ) + input_heights * (input_delta - input_derivatives)
- b = input_heights * input_derivatives - (inputs - input_cumheights) * (
- input_derivatives + input_derivatives_plus_one - 2 * input_delta
- )
- c = -input_delta * (inputs - input_cumheights)
-
- discriminant = b.pow(2) - 4 * a * c
- assert (discriminant >= 0).all()
-
- root = (2 * c) / (-b - torch.sqrt(discriminant))
- outputs = root * input_bin_widths + input_cumwidths
-
- theta_one_minus_theta = root * (1 - root)
- denominator = input_delta + (
- (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta
- )
- derivative_numerator = input_delta.pow(2) * (
- input_derivatives_plus_one * root.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - root).pow(2)
- )
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, -logabsdet
- else:
- theta = (inputs - input_cumwidths) / input_bin_widths
- theta_one_minus_theta = theta * (1 - theta)
-
- numerator = input_heights * (
- input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
- )
- denominator = input_delta + (
- (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta
- )
- outputs = input_cumheights + numerator / denominator
-
- derivative_numerator = input_delta.pow(2) * (
- input_derivatives_plus_one * theta.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - theta).pow(2)
- )
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, logabsdet
diff --git a/spaces/artificialguybr/video-dubbing/TTS/tests/tts_tests2/test_delightful_tts_train.py b/spaces/artificialguybr/video-dubbing/TTS/tests/tts_tests2/test_delightful_tts_train.py
deleted file mode 100644
index a917d776570c5d2077890a7bfaf624f69f6d48f6..0000000000000000000000000000000000000000
--- a/spaces/artificialguybr/video-dubbing/TTS/tests/tts_tests2/test_delightful_tts_train.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import glob
-import json
-import os
-import shutil
-
-from trainer import get_last_checkpoint
-
-from tests import get_device_id, get_tests_output_path, run_cli
-from TTS.config.shared_configs import BaseAudioConfig
-from TTS.tts.configs.delightful_tts_config import DelightfulTTSConfig
-from TTS.tts.models.delightful_tts import DelightfulTtsArgs, DelightfulTtsAudioConfig, VocoderConfig
-
-config_path = os.path.join(get_tests_output_path(), "test_model_config.json")
-output_path = os.path.join(get_tests_output_path(), "train_outputs")
-
-audio_config = BaseAudioConfig(
- sample_rate=22050,
- do_trim_silence=True,
- trim_db=60.0,
- signal_norm=False,
- mel_fmin=0.0,
- mel_fmax=8000,
- spec_gain=1.0,
- log_func="np.log",
- ref_level_db=20,
- preemphasis=0.0,
-)
-
-audio_config = DelightfulTtsAudioConfig()
-model_args = DelightfulTtsArgs()
-
-vocoder_config = VocoderConfig()
-
-
-config = DelightfulTTSConfig(
- audio=audio_config,
- batch_size=2,
- eval_batch_size=8,
- num_loader_workers=0,
- num_eval_loader_workers=0,
- text_cleaner="english_cleaners",
- use_phonemes=True,
- phoneme_language="en-us",
- phoneme_cache_path="tests/data/ljspeech/phoneme_cache/",
- f0_cache_path="tests/data/ljspeech/f0_cache_delightful/", ## delightful f0 cache is incompatible with other models
- run_eval=True,
- test_delay_epochs=-1,
- binary_align_loss_alpha=0.0,
- epochs=1,
- print_step=1,
- use_attn_priors=False,
- print_eval=True,
- test_sentences=[
- ["Be a voice, not an echo."],
- ],
- use_speaker_embedding=False,
-)
-config.save_json(config_path)
-
-# train the model for one epoch
-command_train = (
- f"CUDA_VISIBLE_DEVICES='{'cpu'}' python TTS/bin/train_tts.py --config_path {config_path} "
- f"--coqpit.output_path {output_path} "
- "--coqpit.datasets.0.formatter ljspeech "
- "--coqpit.datasets.0.meta_file_train metadata.csv "
- "--coqpit.datasets.0.meta_file_val metadata.csv "
- "--coqpit.datasets.0.path tests/data/ljspeech "
- "--coqpit.datasets.0.meta_file_attn_mask tests/data/ljspeech/metadata_attn_mask.txt "
- "--coqpit.test_delay_epochs -1"
-)
-
-run_cli(command_train)
-
-# Find latest folder
-continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime)
-
-# Inference using TTS API
-continue_config_path = os.path.join(continue_path, "config.json")
-continue_restore_path, _ = get_last_checkpoint(continue_path)
-out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
-
-# Check integrity of the config
-with open(continue_config_path, "r", encoding="utf-8") as f:
- config_loaded = json.load(f)
-assert config_loaded["characters"] is not None
-assert config_loaded["output_path"] in continue_path
-assert config_loaded["test_delay_epochs"] == -1
-
-# Load the model and run inference
-inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
-run_cli(inference_command)
-
-# restore the model and continue training for one more epoch
-command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --continue_path {continue_path} "
-run_cli(command_train)
-shutil.rmtree(continue_path)
-shutil.rmtree("tests/data/ljspeech/f0_cache_delightful/")
diff --git a/spaces/artqwu/gradio-demo/app.py b/spaces/artqwu/gradio-demo/app.py
deleted file mode 100644
index 62cebc7f54beb7c12ce0aa2d37ca6abd76e2234d..0000000000000000000000000000000000000000
--- a/spaces/artqwu/gradio-demo/app.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from fastai.vision.all import *
-learn = load_learner('export.pkl')
-
-labels = learn.dls.vocab
-def predict(img):
- img = PILImage.create(img)
- pred,pred_idx,probs = learn.predict(img)
- return {labels[i]: float(probs[i]) for i in range(len(labels))}
-
-examples = ['basset.jpg', 'beagle.jpg', 'siamese.jpg']
-
-import gradio as gr
-gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(512, 512)), examples=examples, outputs=gr.outputs.Label(num_top_classes=3)).launch()
\ No newline at end of file
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/xpath/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/xpath/__init__.py
deleted file mode 100644
index 216c000dc5ffc8e53cc9c596e420c1e67604d1aa..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/xpath/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__author__ = 'ericvergnaud'
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/transform_eos_concat_langpair_dataset.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/transform_eos_concat_langpair_dataset.py
deleted file mode 100644
index 638bd1a3d7dfd355c5a9b18f47d10ff3d00644e3..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/transform_eos_concat_langpair_dataset.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-
-import torch
-from torch.utils.data.dataloader import default_collate
-
-from fairseq.data import ConcatDataset
-
-logger = logging.getLogger(__name__)
-
-
-class TransformEosConcatLangPairDataset(ConcatDataset):
- """
- It is a combination of TransformEosLangPairDataset and ConcatDataset for multiple LangPairDataset datasets.
- Assume all datasets share the same src_eos, tgt_bos, left_pad_source and left_pad_target
- """
-
- def __init__(
- self,
- datasets,
- src_eos,
- tgt_bos,
- new_src_eos=None,
- new_tgt_bos=None,
- ):
- super().__init__(datasets)
- if new_src_eos is not None:
- assert len(new_src_eos) == len(datasets)
- else:
- new_src_eos = []
- if new_tgt_bos is not None:
- assert len(new_tgt_bos) == len(datasets)
- else:
- new_tgt_bos = []
- self.src_eos = src_eos
- self.tgt_bos = tgt_bos
- self.new_src_eos = (
- torch.LongTensor(new_src_eos).cpu() if len(new_src_eos) > 0 else []
- )
- self.new_tgt_bos = (
- torch.LongTensor(new_tgt_bos).cpu() if len(new_tgt_bos) > 0 else []
- )
- self.left_pad_source = self.is_left_pad_source(datasets)
- self.left_pad_target = self.is_left_pad_target(datasets)
- self.pad_idx = self.src_dict_pad()
-
- def src_dict_pad(self):
- if hasattr(self.datasets[0], "src_dict"):
- return self.datasets[0].src_dict.pad()
- if hasattr(self.datasets[0], "dataset"):
- return self.datasets[0].dataset.src_dict.pad()
- raise NotImplementedError("No src_dict is found")
-
- def __getitem__(self, idx):
- dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
- return dataset_idx, self.datasets[dataset_idx][sample_idx]
-
- def is_left_pad_source(self, datasets):
- def _left_pad_source(ds):
- if hasattr(ds, "left_pad_source"):
- return ds.left_pad_source
- if hasattr(ds, "dataset"):
- return _left_pad_source(ds.dataset)
- logger.warn(f"{type(ds)} has no left_pad_source, using default True")
- return True
-
- left_pad_source = _left_pad_source(datasets[0])
- for ds in datasets:
- if left_pad_source != _left_pad_source(ds):
- raise ValueError("Different left_pad_source setting detected!")
- return left_pad_source
-
- def is_left_pad_target(self, datasets):
- def _left_pad_target(ds):
- if hasattr(ds, "left_pad_target"):
- return ds.left_pad_target
- if hasattr(ds, "dataset"):
- return _left_pad_target(ds.dataset)
- logger.warn(f"{type(ds)} has no left_pad_target, using default False")
- return False
-
- left_pad_target = _left_pad_target(datasets[0])
- for ds in datasets:
- if left_pad_target != _left_pad_target(ds):
- raise ValueError("Different left_pad_target setting detected!")
- return left_pad_target
-
- def collater(self, samples, **extra_args):
- if len(samples) == 0:
- return samples
-
- dataset_ids = [s[0] for s in samples]
- samples = [s[1] for s in samples]
-
- if hasattr(self.datasets[0], "collater"):
- samples = self.datasets[0].collater(samples, **extra_args)
- else:
- samples = default_collate(samples, **extra_args)
-
- if len(self.new_src_eos) > 0:
- if self.left_pad_source:
- assert (
- samples["net_input"]["src_tokens"][:, -1] != self.src_eos
- ).sum() == 0
- samples["net_input"]["src_tokens"][:, -1] = self.new_src_eos[
- dataset_ids
- ]
-
- else:
- eos_idx = samples["net_input"]["src_lengths"] - 1
- assert (
- samples["net_input"]["src_tokens"][
- torch.arange(eos_idx.size(0)), eos_idx
- ]
- != self.src_eos
- ).sum() == 0
- samples["net_input"]["src_tokens"].scatter_(
- 1, eos_idx.view(-1, 1), self.new_src_eos[dataset_ids].view(-1, 1)
- )
-
- if len(self.new_tgt_bos) > 0 and "prev_output_tokens" in samples["net_input"]:
- if self.left_pad_target:
- # TODO: support different padding direction on target side
- raise NotImplementedError(
- "TransformEosLangPairDataset does not implement --left-pad-target True option"
- )
- else:
- assert (
- samples["net_input"]["prev_output_tokens"][:, 0] != self.tgt_bos
- ).sum() == 0
- samples["net_input"]["prev_output_tokens"][:, 0] = self.new_tgt_bos[
- dataset_ids
- ]
-
- return samples
diff --git a/spaces/aslasdlkj/Podfusion/animate.py b/spaces/aslasdlkj/Podfusion/animate.py
deleted file mode 100644
index fd0143c90d0117a3139b264c5e50c5e2a95f7ddb..0000000000000000000000000000000000000000
--- a/spaces/aslasdlkj/Podfusion/animate.py
+++ /dev/null
@@ -1,247 +0,0 @@
-import os
-import sys
-from cgi import test
-from pathlib import Path
-
-import cv2
-import mediapy
-import numpy as np
-from frame_interpolation.eval import interpolator, util
-from huggingface_hub import snapshot_download
-from image_tools.sizes import resize_and_crop
-from moviepy.editor import CompositeVideoClip, ImageClip
-from moviepy.editor import VideoFileClip as vfc
-from PIL import Image
-
-
-# get key positions at which frame needs to be generated
-def list_of_positions(num_contours, num_frames=100):
- positions = []
- for i in range(0, num_frames):
- positions.append(int(num_contours / num_frames * i))
- return positions
-
-
-def contourfinder(image1, image2, text=None, num_frames=100, output_dir=Path("temp")):
- # Create two blank pages to write into
- # I just hardcoded 1024*1024 as the size, ideally this should be np.shape(image1)
- blank = np.zeros(np.shape(image1), dtype="uint8")
- blank2 = np.zeros(np.shape(image2), dtype="uint8")
- # Threshold and contours for image 1 and 2
- threshold = cv2.Canny(image=image1, threshold1=100, threshold2=200)
- contours, hierarchies = cv2.findContours(
- threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
- )
-
- threshold2 = cv2.Canny(image=image2, threshold1=100, threshold2=200)
- contours2, hierarchies2 = cv2.findContours(
- threshold2, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
- )
-
- # Initialize three empty videos
- vid1 = cv2.VideoWriter(
- Path(output_dir / "vid1.mp4").as_posix(),
- cv2.VideoWriter_fourcc(*"mp4v"),
- 24,
- threshold.shape,
- )
- vid2 = cv2.VideoWriter(
- Path(output_dir / "vid2.mp4").as_posix(),
- cv2.VideoWriter_fourcc(*"mp4v"),
- 24,
- threshold.shape,
- )
- text_vid = cv2.VideoWriter(
- Path(output_dir / "text_video.mp4").as_posix(),
- cv2.VideoWriter_fourcc(*"mp4v"),
- 10,
- threshold.shape,
- )
-
- # Get positions
- positions = list_of_positions((len(contours)))
- frames = []
-
- # Loop over contours adding them to blank image then writing to video
- for i in range(0, len(contours)):
- cv2.drawContours(
- blank, contours=contours, contourIdx=i, color=(125, 200, 255), thickness=1
- )
-
- if i in positions:
- frames.append(blank)
- # Complile to video
- vid1.write(blank)
-
- vid1.release()
- clip1 = vfc(Path(output_dir / "vid1.mp4").as_posix())
- positions = list_of_positions((len(contours2)))
-
- for i in range(0, len(contours2)):
- cv2.drawContours(
- blank2, contours=contours2, contourIdx=i, color=(125, 200, 255), thickness=1
- )
- if i in positions:
- frames.append(blank2)
-
- vid2.write(blank2)
-
- vid2.release()
- clip3 = vfc(Path(output_dir / "vid2.mp4").as_posix())
-
- # Next is the text vid
-
- if text != None:
- # Reading an image in default mode
- image = np.zeros(original.shape, dtype="uint8")
-
- # font
- font = cv2.FONT_HERSHEY_COMPLEX
-
- # org
- org = (10, 400)
-
- # fontScale
- fontScale = 3
-
- # Blue color in BGR
- color = (186, 184, 108)
-
- # Line thickness of 2 px
- thickness = 4
-
- def text_frames(text, image, org):
- spacing = 55 # spacing between letters
- blink = image
- cv2.imwrite(Path(output_dir / "blink.png").as_posix(), blink)
- for i in range(0, len(text) - 1):
-
- text_vid.write(blink)
-
- # Using cv2.putText() method
- image = cv2.putText(
- image, text[i], org, font, fontScale, color, thickness, cv2.LINE_AA
- )
-
- # Take care of org spacing
- org = (org[0] + spacing, org[1])
- if text[i].isupper():
- org = (org[0] + spacing + 1, org[1])
- print(f"Upper {text[i]}")
- print(org)
-
- # Displaying the image
- cv2.imwrite(Path(output_dir / f"text_im{i}.png").as_posix, image)
-
- # Complile to video
- text_vid.write(image)
- text_vid.release()
-
- text_frames(text, image, org)
- return clip1, clip3
-
-
-def load_model(model_name):
- model = interpolator.Interpolator(snapshot_download(repo_id=model_name), None)
- return model
-
-
-model_names = [
- "akhaliq/frame-interpolation-film-style",
- "NimaBoscarino/frame-interpolation_film_l1",
- "NimaBoscarino/frame_interpolation_film_vgg",
-]
-
-models = {model_name: load_model(model_name) for model_name in model_names}
-
-ffmpeg_path = util.get_ffmpeg_path()
-mediapy.set_ffmpeg(ffmpeg_path)
-
-
-def resize(width, img):
- basewidth = width
- img = Image.open(img)
- wpercent = basewidth / float(img.size[0])
- hsize = int((float(img.size[1]) * float(wpercent)))
- img = img.resize((basewidth, hsize), Image.ANTIALIAS)
- return img
-
-
-def resize_img(img1, img2, output_dir):
- img_target_size = Image.open(img1)
- img_to_resize = resize_and_crop(
- img2,
- (
- img_target_size.size[0],
- img_target_size.size[1],
- ), # set width and height to match cv2_images[0]
- crop_origin="middle",
- )
- img_to_resize.save(Path(output_dir / "resized_img2.png"))
-
-
-def get_video_frames(
- images, vid_output_dir="temp", times_to_interpolate=6, model_name_index=0
-):
- frame1 = images[0]
- frame2 = images[1]
-
- model = models[model_names[model_name_index]]
- cv2_images = [cv2.imread(frame1), cv2.imread(frame2)]
-
- frame1 = resize(256, frame1)
- frame2 = resize(256, frame2)
- test_1 = Path(vid_output_dir / "test1.png")
- test_2 = Path(vid_output_dir / "test2.png")
- frame1.save(test_1)
- frame2.save(test_2)
-
- resize_img(test_1, test_2, vid_output_dir)
- input_frames = [
- Path(vid_output_dir / "test1.png").as_posix(),
- Path(vid_output_dir / "resized_img2.png").as_posix(),
- ]
-
- frames = list(
- util.interpolate_recursively_from_files(
- input_frames, times_to_interpolate, model
- )
- )
- return frames, cv2_images
-
-
-def create_mp4_with_audio(
- frames, cv2_images, duration, audio, output_path, overlay_image
-):
- vid_output_dir = output_path.parent
- temp_vid_path = Path(vid_output_dir / "TEMP.mp4")
- mediapy.write_video(temp_vid_path, frames, fps=10)
- print(
- f"TYPES....{type(cv2_images[0])},{type(cv2_images[1])} SHAPES{cv2_images[0].shape} Img {cv2_images[0]}"
- )
- clip1, clip3 = contourfinder(
- cv2_images[0], cv2_images[1], output_dir=vid_output_dir
- ) # has a third text option
-
- # Use open CV and moviepy code
- # So we move from open CV video 1 to out.mp4 to open CV video2
- clip1 = clip1
- clip2 = (
- vfc(temp_vid_path.as_posix())
- .resize(2)
- .set_start(clip1.duration - 0.5)
- .crossfadein(2)
- )
- clip3 = clip3.set_start((clip1.duration - 0.5) + (clip2.duration)).crossfadein(2)
-
- new_clip = CompositeVideoClip([clip1, clip2, clip3])
- new_clip.audio = audio # Naviely append audio without considering the length of the video, could be a problem, no idea, but it works, so I'm not touching it
- image = (
- ImageClip(overlay_image).set_duration(duration).resize(0.5).set_pos("center")
- )
-
- new_clip.set_duration(duration)
- # Now overlay the image with moviepy
- final_clip = CompositeVideoClip([new_clip, image])
- final_clip.write_videofile(output_path.as_posix(), audio_codec="aac")
- return output_path.as_posix()
diff --git a/spaces/avaco/stablediffusionapi-disney-pixal-cartoon/README.md b/spaces/avaco/stablediffusionapi-disney-pixal-cartoon/README.md
deleted file mode 100644
index 8a6811adf85327cd41577e4e3aaebfbdedbd52ca..0000000000000000000000000000000000000000
--- a/spaces/avaco/stablediffusionapi-disney-pixal-cartoon/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Stablediffusionapi Disney Pixal Cartoon
-emoji: 📉
-colorFrom: gray
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.40.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/awaawawawa/iurf7irfuyytruyyugb/ui/media/main.css b/spaces/awaawawawa/iurf7irfuyytruyyugb/ui/media/main.css
deleted file mode 100644
index 8d7401e9e6a82bf043e755fa7967746496f45056..0000000000000000000000000000000000000000
--- a/spaces/awaawawawa/iurf7irfuyytruyyugb/ui/media/main.css
+++ /dev/null
@@ -1,416 +0,0 @@
-body {
- font-family: Arial, Helvetica, sans-serif;
- font-size: 11pt;
- background-color: rgb(32, 33, 36);
- color: #eee;
-}
-a {
- color: rgb(0, 102, 204);
-}
-a:visited {
- color: rgb(0, 102, 204);
-}
-label {
- font-size: 10pt;
-}
-#prompt {
- width: 100%;
- height: 65pt;
- box-sizing: border-box;
-}
-@media screen and (max-width: 600px) {
- #prompt {
- width: 95%;
- }
-}
-.image_preview_container {
- /* display: none; */
- margin-top: 10pt;
-}
-.image_clear_btn {
- position: absolute;
- transform: translateX(-50%) translateY(-35%);
- background: black;
- color: white;
- border: 2pt solid #ccc;
- padding: 0;
- cursor: pointer;
- outline: inherit;
- border-radius: 8pt;
- width: 16pt;
- height: 16pt;
- font-family: Verdana;
- font-size: 8pt;
-}
-.settings-box ul {
- font-size: 9pt;
- margin-bottom: 5px;
- padding-left: 10px;
- list-style-type: none;
-}
-.settings-box li {
- padding-bottom: 4pt;
-}
-.editor-slider {
- vertical-align: middle;
-}
-.outputMsg {
- font-size: small;
- padding-bottom: 3pt;
-}
-#progressBar {
- font-size: small;
-}
-#footer {
- font-size: small;
- padding-left: 10pt;
- background: none;
-}
-#footer-legal {
- font-size: 8pt;
-}
-.imgSeedLabel {
- font-size: 0.8em;
- background-color: rgb(44, 45, 48);
- border-radius: 3px;
- padding: 5px;
-}
-.imgItem {
- display: inline-block;
- margin-top: 1em;
- margin-right: 1em;
-}
-.imgContainer {
- display: flex;
- justify-content: flex-end;
-}
-.imgItemInfo {
- padding-bottom: 0.5em;
- display: flex;
- align-items: flex-end;
- flex-direction: column;
- position: absolute;
- padding: 5px;
- opacity: 0;
- transition: 0.1s all;
-}
-.imgContainer:hover > .imgItemInfo {
- opacity: 1;
-}
-.imgItemInfo * {
- margin-bottom: 7px;
-}
-#container {
- width: 90%;
- margin-left: auto;
- margin-right: auto;
-}
-@media screen and (max-width: 1800px) {
- #container {
- width: 100%;
- }
-}
-#logo small {
- font-size: 11pt;
-}
-#editor {
- padding: 5px;
-}
-#editor label {
- font-weight: normal;
-}
-.settings-box label small {
- color: rgb(153, 153, 153);
-}
-#preview {
- padding: 5px;
-}
-#editor-inputs {
- margin-bottom: 20px;
-}
-#editor-inputs-prompt {
- flex: 1;
-}
-#editor-inputs .row {
- padding-bottom: 10px;
-}
-#makeImage {
- border-radius: 6px;
-}
-#editor-modifiers h5 {
- padding: 5pt 0;
- margin: 0;
-}
-#makeImage {
- flex: 0 0 70px;
- background: rgb(80, 0, 185);
- border: 2px solid rgb(40, 0, 78);
- color: rgb(255, 221, 255);
- width: 100%;
- height: 30pt;
-}
-#makeImage:hover {
- background: rgb(93, 0, 214);
-}
-#stopImage {
- flex: 0 0 70px;
- background: rgb(132, 8, 0);
- border: 2px solid rgb(122, 29, 0);
- color: rgb(255, 221, 255);
- width: 100%;
- height: 30pt;
- border-radius: 6px;
- display: none;
-}
-#stopImage:hover {
- background: rgb(177, 27, 0);
-}
-.flex-container {
- display: flex;
-}
-.col-50 {
- flex: 50%;
-}
-.col-fixed-10 {
- flex: 0 0 380pt;
-}
-.col-free {
- flex: 1;
-}
-.collapsible {
- cursor: pointer;
-}
-.collapsible-content {
- display: none;
- padding-left: 15px;
-}
-.collapsible-content h5 {
- padding: 5pt 0pt;
- margin: 0;
- font-size: 10pt;
-}
-.collapsible-handle {
- color: white;
- padding-right: 5px;
-}
-.panel-box {
- background: rgb(44, 45, 48);
- border: 1px solid rgb(47, 49, 53);
- border-radius: 7px;
- padding: 5px;
- margin-bottom: 15px;
- box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
-}
-.panel-box h4 {
- margin: 0;
- padding: 2px 0;
-}
-#editor-modifiers .editor-modifiers-leaf {
- padding-top: 10pt;
- padding-bottom: 10pt;
-}
-#preview {
- margin-left: 10pt;
-}
-img {
- box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
-}
-.line-separator {
- background: rgb(56, 56, 56);
- height: 1pt;
- margin: 15pt 0;
-}
-#editor-inputs-tags-container {
- margin-top: 5pt;
- display: none;
-}
-#server-status {
- display: inline;
- float: right;
- transform: translateY(-5pt);
-}
-#server-status-color {
- /* width: 8pt;
- height: 8pt;
- border-radius: 4pt; */
- font-size: 14pt;
- color: rgb(128, 87, 0);
- /* background-color: rgb(197, 1, 1); */
- /* transform: translateY(15%); */
- display: inline;
-}
-#server-status-msg {
- color: rgb(128, 87, 0);
- padding-left: 2pt;
- font-size: 10pt;
-}
-.preview-prompt {
- font-size: 16pt;
- margin-bottom: 10pt;
-}
-#coffeeButton {
- height: 23px;
- transform: translateY(25%);
-}
-
-#inpaintingEditor {
- width: 300pt;
- height: 300pt;
- margin-top: 5pt;
-}
-.drawing-board-canvas-wrapper {
- background-size: 100% 100%;
-}
-.drawing-board-control > button {
- background-color: #eee;
- border-radius: 3pt;
-}
-.drawing-board-control-inner {
- background-color: #eee;
- border-radius: 3pt;
-}
-#inpaintingEditor canvas {
- opacity: 0.6;
-}
-#enable_mask {
- margin-top: 8pt;
-}
-
-#top-nav {
- padding-top: 3pt;
- padding-bottom: 15pt;
-}
-#top-nav .icon {
- padding-right: 4pt;
- font-size: 14pt;
- transform: translateY(1pt);
-}
-#logo {
- display: inline;
-}
-#logo h1 {
- display: inline;
-}
-#top-nav-items {
- list-style-type: none;
- display: inline;
- float: right;
-}
-#top-nav-items > li {
- float: left;
- display: inline;
- padding-left: 20pt;
- cursor: default;
-}
-#initial-text {
- padding-top: 15pt;
- padding-left: 4pt;
-}
-.settings-subheader {
- font-size: 10pt;
- font-weight: bold;
-}
-.pl-5 {
- padding-left: 5pt;
-}
-#system-settings {
- width: 360pt;
- transform: translateX(-100%) translateX(70pt);
-
- padding-top: 10pt;
- padding-bottom: 10pt;
-}
-#system-settings ul {
- margin: 0;
- padding: 0;
-}
-#system-settings li {
- padding-left: 5pt;
-}
-#community-links {
- list-style-type: none;
- margin: 0;
- padding: 12pt;
- padding-bottom: 0pt;
- transform: translateX(-15%);
-}
-#community-links li {
- padding-bottom: 12pt;
- display: block;
- font-size: 10pt;
-}
-#community-links li .fa-fw {
- padding-right: 2pt;
-}
-#community-links li a {
- color: white;
- text-decoration: none;
-}
-.dropdown {
- overflow: hidden;
-}
-.dropdown-content {
- display: none;
- position: absolute;
- z-index: 2;
-
- background: rgb(18, 18, 19);
- border: 2px solid rgb(37, 38, 41);
- border-radius: 7px;
- padding: 5px;
- margin-bottom: 15px;
- box-shadow: 0 20px 28px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
-}
-.dropdown:hover .dropdown-content {
- display: block;
-}
-
-.imageTaskContainer {
- border: 1px solid #333;
- margin-bottom: 10pt;
- padding: 5pt;
- border-radius: 5pt;
- box-shadow: 0 20px 28px 0 rgba(0, 0, 0, 0.15), 0 6px 20px 0 rgba(0, 0, 0, 0.15);
-}
-.taskStatusLabel {
- float: left;
- font-size: 8pt;
- background:rgb(44, 45, 48);
- border: 1px solid rgb(61, 62, 66);
- padding: 2pt 4pt;
- border-radius: 2pt;
- margin-right: 5pt;
-}
-.activeTaskLabel {
- background:rgb(0, 90, 30);
- border: 1px solid rgb(0, 75, 19);
- color:rgb(204, 255, 217)
-}
-.secondaryButton {
- background: rgb(132, 8, 0);
- border: 1px solid rgb(122, 29, 0);
- color: rgb(255, 221, 255);
- padding: 3pt 6pt;
- border-radius: 5px;
-}
-.secondaryButton:hover {
- background: rgb(177, 27, 0);
-}
-.stopTask {
- float: right;
-}
-#preview-tools {
- display: none;
- padding: 4pt;
-}
-.taskConfig {
- font-size: 10pt;
- color: #aaa;
- margin-bottom: 5pt;
-}
-.img-batch {
- display: inline;
-}
-#prompt_from_file {
- display: none;
-}
\ No newline at end of file
diff --git a/spaces/awacke1/ASRSpeechRecognition1/README.md b/spaces/awacke1/ASRSpeechRecognition1/README.md
deleted file mode 100644
index 36729b8bc9508e3cafad4b9821a235bc42063d51..0000000000000000000000000000000000000000
--- a/spaces/awacke1/ASRSpeechRecognition1/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: ASRSpeechRecognition1
-emoji: 👀
-colorFrom: yellow
-colorTo: green
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/awacke1/CarePlanQnAWithContext/app.py b/spaces/awacke1/CarePlanQnAWithContext/app.py
deleted file mode 100644
index 58673d06f86ab37f710b480d410de3d5104a63ad..0000000000000000000000000000000000000000
--- a/spaces/awacke1/CarePlanQnAWithContext/app.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import gradio as gr
-
-context = "What should be documented in a care plan?\n"
-context = context + "Regardless of what your preferences are, your care plan should include:\n"
-context = context + "What your assessed care needs are.\n"
-context = context + "What type of support you should receive.\n"
-context = context + "Your desired outcomes.\n"
-context = context + "Who should provide care.\n"
-context = context + "When care and support should be provided.\n"
-context = context + "Records of care provided.\n"
-context = context + "Your wishes and personal preferences.\n"
-context = context + "The costs of the services.\n"
-
-context = context + "Dimensions\n"
-context = context + "1-Ontology of Plan\n"
-context = context + "2-Problems as evidenced by Signs of Systems\n"
-context = context + "3-Assessment of Needs\n"
-context = context + "4-Questions about problems faced\n"
-context = context + "5-Goals for long and short term improvements\n"
-context = context + "6-Knowledge-Behavior-Status Quality Measures\n"
-context = context + "7-Intervention List of Options\n"
-context = context + "8-Quality Measures\n"
-context = context + "9-Pathways Available\n"
-
-with open('WritingCarePlans.txt', 'r') as file:
- context = file.read()
-
-question = "What should be documented in a care plan?"
-
-gr.Interface.load(
- "huggingface/deepset/roberta-base-squad2",
- theme="default",
- css=".footer{display:none !important}",
- inputs=[gr.inputs.Textbox(lines=12, default=context, label="Context paragraph"), gr.inputs.Textbox(lines=3, default=question, label="Question")],
- outputs=[gr.outputs.Textbox(label="Answer"), gr.outputs.Textbox(label="Score")],
- title=None,
- description="Provide your own paragraph and ask any question about the text. How well does the model answer?").launch()
diff --git a/spaces/awacke1/StreamlitGraphViz/app.py b/spaces/awacke1/StreamlitGraphViz/app.py
deleted file mode 100644
index 66a45cfca2279fb250bd37d401dba51f2f1ffa31..0000000000000000000000000000000000000000
--- a/spaces/awacke1/StreamlitGraphViz/app.py
+++ /dev/null
@@ -1,556 +0,0 @@
-
-import time
-import re
-import pandas as pd
-import numpy as np
-import torch
-import torch.nn.functional as F
-import graphviz as graphviz
-import pydeck as pdk
-import streamlit as st
-
-from transformers import AutoTokenizer, AutoModel
-from tokenizers import Tokenizer, AddedToken
-from st_click_detector import click_detector
-
-st.title('Graphviz Gallery: https://graphviz.org/gallery/')
-
-# Using code:
-
-# Create a graphlib graph object
-graph = graphviz.Digraph()
-graph.edge('Grandpa', 'Ancestors')
-graph.edge('Grandma', 'Ancestors')
-graph.edge('Uncle', 'Grandma')
-graph.edge('Aunt', 'Grandma')
-graph.edge('Mom', 'Grandma')
-graph.edge('Cousin Bob', 'Aunt')
-graph.edge('Cousin Sue', 'Aunt')
-graph.edge('Brother', 'Mom')
-graph.edge('Sister', 'Mom')
-st.graphviz_chart(graph)
-
-
-st.graphviz_chart('''
-digraph G2 {
- node [shape=plaintext];
- struct1 [label=<
- 
- caption
-
>];
-}
-''')
-
-
-
-st.title('Graphviz Dot Language: https://graphviz.org/doc/info/lang.html')
-
-# Using graph language:
-st.graphviz_chart('''
-digraph G {
- rankdir=LR
- node [shape=plaintext]
- a [
- label=<
-
- class
- qualifier
-
>
- ]
- b [shape=ellipse style=filled
- label=<
-
-
- elephant
- two
-
-
-
-
- corn
- c
- f
-
-
- penguin
-
-
- 4
-
-
>
- ]
- c [
- label= line 2
line 3
>
- ]
- subgraph { rank=same b c }
- a:here -> b:there [dir=both arrowtail=diamond]
- c -> b
- d [shape=triangle]
- d -> c [label=<
-
-
-
- Edge labels
also
-
-
-
>
- ]
-}
-''')
-
-st.graphviz_chart('''
-digraph R {
- rankdir=LR
- node [style=rounded]
- node1 [shape=box]
- node2 [fillcolor=yellow, style="rounded,filled", shape=diamond]
- node3 [shape=record, label="{ a | b | c }"]
- node1 -> node2 -> node3
-}
-''')
-
-
-# pydeck example
-st.title('Pydeck Example: https://docs.streamlit.io/library/api-reference/charts/st.pydeck_chart')
-df = pd.DataFrame(
- np.random.randn(1000, 2) / [50, 50] + [44.9366, -93.6661],
- columns=['lat', 'lon'])
-
-# 44.9366° N, -93.6661° W : Mound MN
-st.pydeck_chart(pdk.Deck(
- map_style=None,
- initial_view_state=pdk.ViewState(
- latitude=44.9366,
- longitude=-93.6661,
- zoom=11,
- pitch=50,
- ),
- layers=[
- pdk.Layer(
- 'HexagonLayer',
- data=df,
- get_position='[lon, lat]',
- radius=200,
- elevation_scale=4,
- elevation_range=[0, 1000],
- pickable=True,
- extruded=True,
- ),
- pdk.Layer(
- 'ScatterplotLayer',
- data=df,
- get_position='[lon, lat]',
- get_color='[200, 30, 0, 160]',
- get_radius=200,
- ),
- ],
- ))
-
-st.title('Vega Lite Example: https://docs.streamlit.io/library/api-reference/charts/st.vega_lite_chart ')
-df = pd.DataFrame(
- np.random.randn(200, 3),
- columns=['a', 'b', 'c'])
-
-st.vega_lite_chart(df, {
- 'mark': {'type': 'circle', 'tooltip': True},
- 'encoding': {
- 'x': {'field': 'a', 'type': 'quantitative'},
- 'y': {'field': 'b', 'type': 'quantitative'},
- 'size': {'field': 'c', 'type': 'quantitative'},
- 'color': {'field': 'c', 'type': 'quantitative'},
- },
- })
-
-# More graph examples
-
-st.graphviz_chart('''
-digraph structs {
- node [shape=record];
- struct1 [label=" left| mid\ dle| right"];
- struct2 [label=" one| two"];
- struct3 [label="hello\nworld |{ b |{c| d|e}| f}| g | h"];
- struct1:f1 -> struct2:f0;
- struct1:f2 -> struct3:here;
-}
-''')
-
-st.graphviz_chart('''
-graph G {
- fontname="Helvetica,Arial,sans-serif"
- node [fontname="Helvetica,Arial,sans-serif"]
- edge [fontname="Helvetica,Arial,sans-serif"]
- layout=fdp
- e
- subgraph clusterA {
- a -- b;
- subgraph clusterC {
- C -- D;
- }
- }
- subgraph clusterB {
- d -- f
- }
- d -- D
- e -- clusterB
- clusterC -- clusterB
-}
-''')
-
-st.graphviz_chart('''
-graph Transparency {
- layout=neato
- start=11 // empiric value to set orientation
- bgcolor="#0000ff11"
- node [shape=circle width=2.22 label="" style=filled]
- 5 [color="#0000ff80"]
- 6 [color="#ee00ee80"]
- 1 [color="#ff000080"]
- 2 [color="#eeee0080"]
- 3 [color="#00ff0080"]
- 4 [color="#00eeee80"]
- 1 -- 2 -- 3 -- 4 -- 5 -- 6 -- 1
- }
-''')
-
-st.graphviz_chart('''
-digraph UML_Class_diagram {
- fontname="Helvetica,Arial,sans-serif"
- node [fontname="Helvetica,Arial,sans-serif"]
- edge [fontname="Helvetica,Arial,sans-serif"]
- labelloc="t"
- label="UML Class diagram demo"
- graph [splines=false]
- node [shape=record style=filled fillcolor=gray95]
- edge [arrowhead=vee style=dashed]
- Client -> Interface1 [xlabel=dependency]
- Client -> Interface2
- edge [dir=back arrowtail=empty style=""]
- Interface1 -> Class1 [xlabel=inheritance]
- Interface2 -> Class1 [dir=none]
- Interface2 [label="" xlabel="Simple\ninterface" shape=circle]
- Interface1[label = <{«interface» I/O | + property
...
|+ method
...
}>]
- Class1[label = <{I/O class | + property
...
|+ method
...
}>]
- edge [dir=back arrowtail=empty style=dashed]
- Class1 -> System_1 [xlabel=implementation]
- System_1 [label = <{System | + property
...
|+ method
...
}>]
- "Shared resource" [label = <{Shared resource | + property
...
|+ method
...
}>]
- edge [dir=back arrowtail=diamond]
- "System_1" -> Subsystem_1 [xlabel="composition"]
- Subsystem_1[label = <{Subsystem 1 | + property
...
|+ method
...
}>]
- Subsystem_2[label = <{Subsystem 2 | + property
...
|+ method
...
}>]
- Subsystem_3[label = <{Subsystem 3 | + property
...
|+ method
...
}>]
- "System_1" -> Subsystem_2
- "System_1" -> Subsystem_3
- edge [xdir=back arrowtail=odiamond]
- Subsystem_1 -> "Shared resource" [xlabel=aggregation]
- {Subsystem_2 Subsystem_3 } -> "Shared resource"
-}
-''')
-
-
-
-st.graphviz_chart('''
-digraph G {
- fontname="Helvetica,Arial,sans-serif"
- node [fontname="Helvetica,Arial,sans-serif"]
- edge [fontname="Helvetica,Arial,sans-serif"]
- subgraph cluster_1 {
- node [ style=filled,shape="box",fillcolor="antiquewhite:aquamarine" ]n5;
- node [ shape="ellipse",fillcolor="bisque4:blue2" ]n4;
- node [ shape="circle",fillcolor="cadetblue1:chocolate1" ]n3;
- node [ shape="diamond",fillcolor="crimson:cyan4" ]n2;
- node [ shape="triangle",fillcolor="deepskyblue2:firebrick" ]n1;
- node [ shape="pentagon",fillcolor="gray24:gray88" ]n0;
- label = "X11 Colors";
- }
- subgraph cluster_2 {
- node [ style=filled,shape="box",fillcolor="bisque:brown" ]n11;
- node [ shape="ellipse",fillcolor="green:darkorchid" ]n10;
- node [ shape="circle",fillcolor="deepskyblue:gold" ]n9;
- node [ shape="diamond",fillcolor="lightseagreen:orangered" ]n8;
- node [ shape="triangle",fillcolor="turquoise:salmon" ]n7;
- node [ shape="pentagon",fillcolor="snow:black" ]n6;
- label = "SVG Colors";
- }
- subgraph cluster_3 {
- node [ style=filled,shape="box",fillcolor="/accent3/1:/accent3/3" ]n17;
- node [ shape="ellipse",fillcolor="/accent4/1:/accent4/4" ]n16;
- node [ shape="circle",fillcolor="/accent5/1:/accent5/5" ]n15;
- node [ shape="diamond",fillcolor="/accent6/1:/accent6/6" ]n14;
- node [ shape="triangle",fillcolor="/accent7/1:/accent7/7" ]n13;
- node [ shape="pentagon",fillcolor="/accent8/1:/accent8/8" ]n12;
- label = "Brewer - accent";
- }
- subgraph cluster_4 {
- node [ style=filled,shape="box",fillcolor="/blues3/1:/blues3/2" ]n23;
- node [ shape="ellipse",fillcolor="/blues4/1:/blues4/3" ]n22;
- node [ shape="circle",fillcolor="/blues5/1:/blues5/4" ]n21;
- node [ shape="diamond",fillcolor="/blues6/1:/blues6/5" ]n20;
- node [ shape="triangle",fillcolor="/blues7/1:/blues7/6" ]n19;
- node [ shape="pentagon",fillcolor="/blues8/1:/blues8/7" ]n18;
- label = "Brewer - blues";
- }
-n3 -> n9 -> n15 -> n21;
-}
-''')
-
-st.graphviz_chart('''
-digraph G {bgcolor="#0000FF44:#FF000044" gradientangle=90
- fontname="Helvetica,Arial,sans-serif"
- node [fontname="Helvetica,Arial,sans-serif"]
- edge [fontname="Helvetica,Arial,sans-serif"]
- subgraph cluster_0 {
- style=filled;
- color=lightgrey;
- fillcolor="darkgray:gold";
- gradientangle=0
- node [fillcolor="yellow:green" style=filled gradientangle=270] a0;
- node [fillcolor="lightgreen:red"] a1;
- node [fillcolor="lightskyblue:darkcyan"] a2;
- node [fillcolor="cyan:lightslateblue"] a3;
- a0 -> a1 -> a2 -> a3;
- label = "process #1";
- }
- subgraph cluster_1 {
- node [fillcolor="yellow:magenta"
- style=filled gradientangle=270] b0;
- node [fillcolor="violet:darkcyan"] b1;
- node [fillcolor="peachpuff:red"] b2;
- node [fillcolor="mediumpurple:purple"] b3;
- b0 -> b1 -> b2 -> b3;
- label = "process #2";
- color=blue
- fillcolor="darkgray:gold";
- gradientangle=0
- style=filled;
- }
- start -> a0;
- start -> b0;
- a1 -> b3;
- b2 -> a3;
- a3 -> a0;
- a3 -> end;
- b3 -> end;
- start [shape=Mdiamond ,
- fillcolor="pink:red",
- gradientangle=90,
- style=radial];
- end [shape=Msquare,
- fillcolor="lightyellow:orange",
- style=radial,
- gradientangle=90];
-}
-''')
-
-st.graphviz_chart('''
-graph Color_wheel {
- graph [
- layout = neato
- label = "Color wheel, 33 colors.\nNeato layout"
- labelloc = b
- fontname = "Helvetica,Arial,sans-serif"
- start = regular
- normalize = 0
- ]
- node [
- shape = circle
- style = filled
- color = "#00000088"
- fontname = "Helvetica,Arial,sans-serif"
- ]
- edge [
- len = 2.7
- color = "#00000088"
- fontname = "Helvetica,Arial,sans-serif"
- ]
- subgraph Dark {
- node [fontcolor = white width = 1.4]
- center [width = 1 style = invis shape = point]
- center -- darkred [label = "0°/360°"]
- darkred [fillcolor = darkred]
- brown [fillcolor = brown]
- brown -- center [label = "30°"]
- olive [fillcolor = olive]
- olive -- center [label = "60°"]
- darkolivegreen [fillcolor = darkolivegreen fontsize = 10]
- darkolivegreen -- center [label = "90°"]
- darkgreen [fillcolor = darkgreen]
- darkgreen -- center [label = "120°"]
- "dark hue 0.416" [color = ".416 1 .6" fontcolor = white]
- "dark hue 0.416" -- center [label = "150°"]
- darkcyan [fillcolor = darkcyan]
- darkcyan -- center [label = "180°"]
- "dark hue 0.583" [color = ".583 1 .6" fontcolor = white]
- "dark hue 0.583" -- center [label = "210°"]
- darkblue [fillcolor = darkblue]
- darkblue -- center [label = "240°"]
- "dark hue 0.750" [color = ".750 1 .6"]
- "dark hue 0.750" -- center [label = "270°"]
- darkmagenta [fillcolor = darkmagenta]
- darkmagenta -- center [label = "300°"]
- "dark hue 0.916" [color = ".916 1 .6"]
- "dark hue 0.916" -- center [label = "330°"]
- }
- subgraph Tue {
- node [width = 1.3]
- "hue 0.083" -- brown
- "hue 0.083" [color = ".083 1 1"]
- "hue 0.125" [color = ".125 1 1"]
- "hue 0.166" -- olive
- "hue 0.166" [color = ".166 1 1"]
- "hue 0.208" [color = ".208 1 1"]
- "hue 0.250" -- darkolivegreen
- "hue 0.250" [color = ".250 1 1"]
- "hue 0.291" [color = ".291 1 1"]
- "hue 0.333" -- darkgreen
- "hue 0.333" [color = ".333 1 1"]
- "hue 0.375" [color = ".375 1 1"]
- "hue 0.416" -- "dark hue 0.416"
- "hue 0.416" [color = ".416 1 1"]
- "hue 0.458" [color = ".458 1 1"]
- "hue 0.500" -- darkcyan
- "hue 0.500" [color = ".500 1 1"]
- "hue 0.541" [color = ".541 1 1"]
- node [fontcolor = white]
- "hue 0.000" [color = ".000 1 1"]
- "hue 0.000" -- darkred
- "hue 0.041" [color = ".041 1 1"]
- "hue 0.583" -- "dark hue 0.583"
- "hue 0.583" [color = ".583 1 1"]
- "hue 0.625" [color = ".625 1 1"]
- "hue 0.666" -- darkblue
- "hue 0.666" [color = ".666 1 1"]
- "hue 0.708" [color = ".708 1 1"]
- "hue 0.750" -- "dark hue 0.750"
- "hue 0.750" [color = ".750 1 1"]
- "hue 0.791" [color = ".791 1 1"]
- "hue 0.833" -- darkmagenta
- "hue 0.833" [color = ".833 1 1"]
- "hue 0.875" [color = ".875 1 1"]
- "hue 0.916" -- "dark hue 0.916"
- "hue 0.916" [color = ".916 1 1"]
- "hue 0.958" [color = ".958 1 1"]
- edge [len = 1]
- "hue 0.000" -- "hue 0.041" -- "hue 0.083" -- "hue 0.125" -- "hue 0.166" -- "hue 0.208"
- "hue 0.208" -- "hue 0.250" -- "hue 0.291" -- "hue 0.333" -- "hue 0.375" -- "hue 0.416"
- "hue 0.416" -- "hue 0.458" -- "hue 0.500" --"hue 0.541" -- "hue 0.583" -- "hue 0.625"
- "hue 0.625" -- "hue 0.666" -- "hue 0.708" -- "hue 0.750" -- "hue 0.791" -- "hue 0.833"
- "hue 0.833" -- "hue 0.875" -- "hue 0.916" -- "hue 0.958" -- "hue 0.000"
- }
- subgraph Main_colors {
- node [width = 2 fontsize = 20]
- red [fillcolor = red fontcolor = white]
- orangered [fillcolor = orangered]
- orange [fillcolor = orange]
- gold [fillcolor = gold]
- yellow [fillcolor = yellow]
- yellowgreen [fillcolor = yellowgreen]
- deeppink [fillcolor = deeppink fontcolor = white]
- fuchsia [label = "fuchsia\nmagenta" fillcolor = fuchsia fontcolor = white]
- purple [fillcolor = purple fontcolor = white]
- blue [fillcolor = blue fontcolor = white]
- cornflowerblue [fillcolor = cornflowerblue]
- deepskyblue [fillcolor = deepskyblue]
- aqua [fillcolor = aqua label = "aqua\ncyan"]
- springgreen [fillcolor = springgreen]
- green [fillcolor = green]
- purple -- fuchsia -- deeppink -- red
- cornflowerblue -- blue -- purple
- cornflowerblue -- deepskyblue -- aqua [len = 1.7]
- aqua -- springgreen -- green -- yellowgreen -- yellow
- yellow -- gold -- orange -- orangered -- red [len = 1.6]
- orange -- "hue 0.083"
- deeppink -- "hue 0.916"
- deeppink -- "hue 0.875"
- red -- "hue 0.000"
- yellowgreen -- "hue 0.250"
- blue -- "hue 0.666"
- yellow -- "hue 0.166"
- gold -- "hue 0.125"
- green -- "hue 0.333"
- springgreen -- "hue 0.416"
- aqua -- "hue 0.500"
- cornflowerblue -- "hue 0.583"
- deepskyblue -- "hue 0.541"
- purple -- "hue 0.791"
- purple -- "hue 0.750"
- fuchsia -- "hue 0.833"
- }
- subgraph Light_colors {
- node [width = 2 fontsize = 20]
- node [shape = circle width = 1.8]
- edge [len = 2.1]
- pink [fillcolor = pink]
- pink -- red
- lightyellow [fillcolor = lightyellow]
- lightyellow -- yellow
- mediumpurple [fillcolor = mediumpurple]
- mediumpurple -- purple
- violet [fillcolor = violet]
- violet -- fuchsia
- hotpink [fillcolor = hotpink]
- hotpink -- deeppink
- "light hue 0.250" [color = ".250 .2 1"]
- "light hue 0.250" -- yellowgreen
- lightcyan [fillcolor = lightcyan]
- lightcyan -- aqua
- lightslateblue [fillcolor = lightslateblue]
- lightslateblue -- blue
- lightgreen [fillcolor = lightgreen]
- lightgreen -- green
- lightskyblue [fillcolor = lightskyblue]
- lightskyblue -- deepskyblue
- peachpuff [fillcolor = peachpuff]
- peachpuff -- orange
- "light hue 0.416" [color = ".416 .2 1"]
- "light hue 0.416" -- springgreen
- }
- subgraph Tints {
- node [width = 1]
- edge [len = 2.4]
- "hue 0 tint" -- pink
- "hue 0 tint" [color = "0 .1 1"]
- "hue 0.041 tint" [color = ".041 .1 1"]
- "hue 0.083 tint" -- peachpuff
- "hue 0.083 tint" [color = ".083 .1 1"]
- "hue 0.125 tint" [color = ".125 .1 1"]
- "hue 0.166 tint" -- lightyellow
- "hue 0.166 tint" [color = ".166 .1 1"]
- "hue 0.208 tint" [color = ".208 .1 1"]
- "hue 0.250 tint" -- "light hue 0.250"
- "hue 0.250 tint" [color = ".250 .1 1"]
- "hue 0.291 tint" [color = ".291 .1 1"]
- "hue 0.333 tint" -- lightgreen
- "hue 0.333 tint" [color = ".333 .1 1"]
- "hue 0.375 tint" [color = ".375 .1 1"]
- "hue 0.416 tint" -- "light hue 0.416"
- "hue 0.416 tint" [color = ".416 .1 1"]
- "hue 0.458 tint" [color = ".458 .1 1"]
- "hue 0.5 tint" -- lightcyan
- "hue 0.5 tint" [color = ".5 .1 1"]
- "hue 0.541 tint" -- lightskyblue
- "hue 0.541 tint" [color = ".541 .1 1"]
- "hue 0.583 tint" [color = ".583 .1 1"]
- "hue 0.625 tint" [color = ".625 .1 1"]
- "hue 0.666 tint" -- lightslateblue
- "hue 0.666 tint" [color = ".666 .1 1"]
- "hue 0.708 tint" [color = ".708 .1 1"]
- "hue 0.750 tint" -- mediumpurple
- "hue 0.750 tint" [color = ".750 .1 1"]
- "hue 0.791 tint" [color = ".791 .1 1"]
- "hue 0.833 tint" -- violet
- "hue 0.833 tint" [color = ".833 .1 1"]
- "hue 0.875 tint" [color = ".875 .1 1"]
- "hue 0.916 tint" -- hotpink
- "hue 0.916 tint" [color = ".916 .1 1"]
- "hue 0.958 tint" [color = ".958 .1 1"]
- edge [len = 2]
- "hue 0 tint" -- "hue 0.041 tint" -- "hue 0.083 tint" -- "hue 0.125 tint" -- "hue 0.166 tint" -- "hue 0.208 tint"
- "hue 0.208 tint" -- "hue 0.250 tint" -- "hue 0.291 tint" -- "hue 0.333 tint" -- "hue 0.375 tint" -- "hue 0.416 tint"
- "hue 0.416 tint" -- "hue 0.458 tint" -- "hue 0.5 tint" --"hue 0.541 tint" -- "hue 0.583 tint" -- "hue 0.625 tint"
- "hue 0.625 tint" -- "hue 0.666 tint" -- "hue 0.708 tint" -- "hue 0.750 tint" -- "hue 0.791 tint" -- "hue 0.833 tint"
- "hue 0.833 tint" -- "hue 0.875 tint" -- "hue 0.916 tint" -- "hue 0.958 tint" -- "hue 0 tint"
- }
- }
-''')
\ No newline at end of file
diff --git a/spaces/awacke1/chatGPT/encoder.py b/spaces/awacke1/chatGPT/encoder.py
deleted file mode 100644
index f461b87d889b75d2239c0a9cc731fe4ad41c7c7b..0000000000000000000000000000000000000000
--- a/spaces/awacke1/chatGPT/encoder.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# This file includes code which was modified from https://github.com/openai/gpt-2
-
-import tensorflow as tf
-import os
-import json
-import regex as re
-from functools import lru_cache
-import requests
-import boto3
-import pdb
-
-
-@lru_cache()
-def bytes_to_unicode():
-
- bs = (
- list(range(ord("!"), ord("~") + 1))
- + list(range(ord("¡"), ord("¬") + 1))
- + list(range(ord("®"), ord("ÿ") + 1))
- )
- cs = bs[:]
- n = 0
- for b in range(2 ** 8):
- if b not in bs:
- bs.append(b)
- cs.append(2 ** 8 + n)
- n += 1
- cs = [chr(n) for n in cs]
- return dict(zip(bs, cs))
-
-
-def get_pairs(word):
- pairs = set()
- prev_char = word[0]
- for char in word[1:]:
- pairs.add((prev_char, char))
- prev_char = char
- return pairs
-
-
-class Encoder:
- def __init__(self, encoder, bpe_merges, errors="replace"):
- self.encoder = encoder
- self.decoder = {v: k for k, v in self.encoder.items()}
- self.errors = errors
- self.byte_encoder = bytes_to_unicode()
- self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
- self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
- self.cache = {}
- self.pat = re.compile(
- r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"""
- )
-
- def bpe(self, token):
- if token in self.cache:
- return self.cache[token]
- word = tuple(token)
-
- pairs = get_pairs(word)
-
- if not pairs:
- return token
-
- while True:
- bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
- if bigram not in self.bpe_ranks:
- break
- first, second = bigram
- new_word = []
- i = 0
- while i < len(word):
- try:
- j = word.index(first, i)
- new_word.extend(word[i:j])
- i = j
- except:
- new_word.extend(word[i:])
- break
-
- if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
- new_word.append(first + second)
- i += 2
- else:
- new_word.append(word[i])
- i += 1
- new_word = tuple(new_word)
- word = new_word
- if len(word) == 1:
- break
- else:
- pairs = get_pairs(word)
-
- word = " ".join(word)
- self.cache[token] = word
- return word
-
- def encode(self, text):
- bpe_tokens = []
- for token in re.findall(self.pat, text):
- token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
-
- bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" "))
- return bpe_tokens
-
- def decode(self, tokens):
- text = "".join([self.decoder[token] for token in tokens])
- text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
- return text
-
-
-def get_encoder():
- with open("encoder.json", "r") as f:
- encoder = json.load(f)
- with open("vocab.bpe", "r", encoding="utf-8") as f:
- bpe_data = f.read()
- bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split("\n")[1:-1]]
- return Encoder(encoder=encoder, bpe_merges=bpe_merges)
-
-# encoder = get_encoder()
-# print('encoded is ', encoder.encode('hello 👋 world 🌍 This is a long string to test whether or not the emoji issue was fixed!'))
\ No newline at end of file
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/geometries/RingGeometry.js b/spaces/banana-projects/web3d/node_modules/three/src/geometries/RingGeometry.js
deleted file mode 100644
index 8acbda18d365c85ec46de94ddde8ccf5e5e3cec2..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/geometries/RingGeometry.js
+++ /dev/null
@@ -1,152 +0,0 @@
-/**
- * @author Kaleb Murphy
- * @author Mugen87 / https://github.com/Mugen87
- */
-
-import { Geometry } from '../core/Geometry.js';
-import { BufferGeometry } from '../core/BufferGeometry.js';
-import { Float32BufferAttribute } from '../core/BufferAttribute.js';
-import { Vector2 } from '../math/Vector2.js';
-import { Vector3 } from '../math/Vector3.js';
-
-// RingGeometry
-
-function RingGeometry( innerRadius, outerRadius, thetaSegments, phiSegments, thetaStart, thetaLength ) {
-
- Geometry.call( this );
-
- this.type = 'RingGeometry';
-
- this.parameters = {
- innerRadius: innerRadius,
- outerRadius: outerRadius,
- thetaSegments: thetaSegments,
- phiSegments: phiSegments,
- thetaStart: thetaStart,
- thetaLength: thetaLength
- };
-
- this.fromBufferGeometry( new RingBufferGeometry( innerRadius, outerRadius, thetaSegments, phiSegments, thetaStart, thetaLength ) );
- this.mergeVertices();
-
-}
-
-RingGeometry.prototype = Object.create( Geometry.prototype );
-RingGeometry.prototype.constructor = RingGeometry;
-
-// RingBufferGeometry
-
-function RingBufferGeometry( innerRadius, outerRadius, thetaSegments, phiSegments, thetaStart, thetaLength ) {
-
- BufferGeometry.call( this );
-
- this.type = 'RingBufferGeometry';
-
- this.parameters = {
- innerRadius: innerRadius,
- outerRadius: outerRadius,
- thetaSegments: thetaSegments,
- phiSegments: phiSegments,
- thetaStart: thetaStart,
- thetaLength: thetaLength
- };
-
- innerRadius = innerRadius || 0.5;
- outerRadius = outerRadius || 1;
-
- thetaStart = thetaStart !== undefined ? thetaStart : 0;
- thetaLength = thetaLength !== undefined ? thetaLength : Math.PI * 2;
-
- thetaSegments = thetaSegments !== undefined ? Math.max( 3, thetaSegments ) : 8;
- phiSegments = phiSegments !== undefined ? Math.max( 1, phiSegments ) : 1;
-
- // buffers
-
- var indices = [];
- var vertices = [];
- var normals = [];
- var uvs = [];
-
- // some helper variables
-
- var segment;
- var radius = innerRadius;
- var radiusStep = ( ( outerRadius - innerRadius ) / phiSegments );
- var vertex = new Vector3();
- var uv = new Vector2();
- var j, i;
-
- // generate vertices, normals and uvs
-
- for ( j = 0; j <= phiSegments; j ++ ) {
-
- for ( i = 0; i <= thetaSegments; i ++ ) {
-
- // values are generate from the inside of the ring to the outside
-
- segment = thetaStart + i / thetaSegments * thetaLength;
-
- // vertex
-
- vertex.x = radius * Math.cos( segment );
- vertex.y = radius * Math.sin( segment );
-
- vertices.push( vertex.x, vertex.y, vertex.z );
-
- // normal
-
- normals.push( 0, 0, 1 );
-
- // uv
-
- uv.x = ( vertex.x / outerRadius + 1 ) / 2;
- uv.y = ( vertex.y / outerRadius + 1 ) / 2;
-
- uvs.push( uv.x, uv.y );
-
- }
-
- // increase the radius for next row of vertices
-
- radius += radiusStep;
-
- }
-
- // indices
-
- for ( j = 0; j < phiSegments; j ++ ) {
-
- var thetaSegmentLevel = j * ( thetaSegments + 1 );
-
- for ( i = 0; i < thetaSegments; i ++ ) {
-
- segment = i + thetaSegmentLevel;
-
- var a = segment;
- var b = segment + thetaSegments + 1;
- var c = segment + thetaSegments + 2;
- var d = segment + 1;
-
- // faces
-
- indices.push( a, b, d );
- indices.push( b, c, d );
-
- }
-
- }
-
- // build geometry
-
- this.setIndex( indices );
- this.addAttribute( 'position', new Float32BufferAttribute( vertices, 3 ) );
- this.addAttribute( 'normal', new Float32BufferAttribute( normals, 3 ) );
- this.addAttribute( 'uv', new Float32BufferAttribute( uvs, 2 ) );
-
-}
-
-RingBufferGeometry.prototype = Object.create( BufferGeometry.prototype );
-RingBufferGeometry.prototype.constructor = RingBufferGeometry;
-
-
-export { RingGeometry, RingBufferGeometry };
diff --git a/spaces/bigcode/santacoder-demo/README.md b/spaces/bigcode/santacoder-demo/README.md
deleted file mode 100644
index d0bb17b02ee1d1ae999159ff68d85589ec4c4ebf..0000000000000000000000000000000000000000
--- a/spaces/bigcode/santacoder-demo/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: SantaCoder Demo
-emoji: 🎅
-colorFrom: blue
-colorTo: red
-sdk: gradio
-sdk_version: 3.21.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/bioriAsaeru/text-to-voice/Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Setup Free.md b/spaces/bioriAsaeru/text-to-voice/Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Setup Free.md
deleted file mode 100644
index 66a72857cf9a5a2e7d3e90eaa5931b1a67223644..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked Setup Free.md
+++ /dev/null
@@ -1,25 +0,0 @@
-
-How to Get Spotify Premium 1.1.14 Crack Full APK PC Free Download in 2023
-Spotify is one of the most popular and widely used online music streaming services in the world. It offers millions of songs, podcasts, playlists, and radio stations for users to enjoy on various devices. However, Spotify also has some limitations for free users, such as ads, shuffle mode, low audio quality, and offline listening restrictions. If you want to get rid of these annoyances and unlock all the premium features of Spotify, you may need to pay $9.99 per month for a Spotify Premium subscription.
-Adobe Acrobat Pro DC 2018.009.20050 Pre-Cracked setup free
Download File ✔ https://urloso.com/2uyPFa
-But what if you don't want to spend money on Spotify Premium? Is there any way to get Spotify Premium for free on your PC? The answer is yes. In this article, we will show you how to download Spotify Premium 1.1.14 Crack Full APK PC Free Download in 2023 with two methods: one is using a professional Spotify music converter and downloader, and the other is using a Spotify Premium cracked version for Windows and Mac.
-Method 1: Use DRmare Spotify Music Converter and Downloader
-The first method we recommend is using DRmare Spotify Music Converter and Downloader, which is one of the best Spotify cracked for Windows 10, Windows 11, and Mac versions[^2^]. It is a powerful tool that can help you download and convert Spotify songs with a free account on your computer. You can easily get Spotify Premium features such as ad-free listening, high audio quality, offline playback, and unlimited skips with this tool.
-DRmare Spotify Music Converter and Downloader can convert Spotify songs to MP3, WAV, FLAC, AAC, M4A, and M4B formats without losing any quality. You can also customize the output parameters such as bit rate, sample rate, channel, codec, etc. according to your needs. Moreover, DRmare Spotify Music Converter and Downloader can preserve the original ID3 tags and metadata information of the songs, such as title, artist, album, genre, etc.
-With DRmare Spotify Music Converter and Downloader, you can easily get Spotify Premium 1.1.14 Crack Full APK PC Free Download in 2023 by following these steps:
-
-
-- Download and install DRmare Spotify Music Converter and Downloader on your computer from its official website.
-- Launch DRmare Spotify Music Converter and Downloader and it will automatically open the Spotify app on your computer.
-- Drag and drop the songs, playlists, albums, or podcasts that you want to download from Spotify to DRmare Spotify Music Converter and Downloader.
-- Click the menu button on the top right corner and choose Preferences. Then you can adjust the output format and other settings as you like.
-- Click the Convert button on the bottom right corner and wait for DRmare Spotify Music Converter and Downloader to download and convert your selected Spotify tracks.
-- After the conversion is done, you can find the downloaded Spotify songs in the output folder on your computer.
-- Transfer the downloaded Spotify songs to your mobile devices or other players and enjoy them offline without any limitations.
-
-Method 2: Use Spotify Premium Cracked Version for Windows and Mac
-The second method we introduce is using a Spotify Premium cracked version for Windows and Mac. This is a modified version of the official Spotify app that has been hacked or patched to bypass the premium verification and unlock all the premium features of Spotify. You can use it to listen to ad-free music, skip unlimited tracks, download songs for offline listening, etc.
-However, this method also has some risks and drawbacks. First of all, using a Spotify Premium cracked version may violate the terms of service of Spotify and cause your account to be banned or suspended. Secondly, using a Spotify Premium cracked version may expose your devices to malware or viruses that may harm your data or privacy. Thirdly, using a Spotify Premium cracked version may not guarantee the stability or compatibility of the app as it may crash or stop working at any time.
-If you still want to try this method at your own risk, you can follow these steps to get Spotify Premium 1.1.14 Crack Full APK PC Free
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/bioriAsaeru/text-to-voice/Archicrypt Ultimate Ram Disk Warez 12l How to Encrypt and Mount Your Hard Disks as Memory Disks.md b/spaces/bioriAsaeru/text-to-voice/Archicrypt Ultimate Ram Disk Warez 12l How to Encrypt and Mount Your Hard Disks as Memory Disks.md
deleted file mode 100644
index d219395704ea39bf4351f07d05f02c123d385e8e..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Archicrypt Ultimate Ram Disk Warez 12l How to Encrypt and Mount Your Hard Disks as Memory Disks.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Archicrypt Ultimate Ram Disk Warez 12l
Download File --->>> https://urloso.com/2uyRbd
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/bioriAsaeru/text-to-voice/CGAxis Volume 114 Interior Lights 3D Models Collection The Ultimate Resource for Interior Lighting.md b/spaces/bioriAsaeru/text-to-voice/CGAxis Volume 114 Interior Lights 3D Models Collection The Ultimate Resource for Interior Lighting.md
deleted file mode 100644
index cee1eb88a7387b90dbdfd9b50e97f7157bd387bc..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/CGAxis Volume 114 Interior Lights 3D Models Collection The Ultimate Resource for Interior Lighting.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
-Interior Lights 3D Models Collection Volume 114 is a package of 60 high-polygon 3d models of various interior lamps. You can find here ceiling hanging lamps, table lamps, desk lamps, floor lamps and halogen ceiling lights.
-CGAxis – Volume 114 – Interior Lights 3D Models Collection
DOWNLOAD - https://urloso.com/2uyPjn
-All models in this collection are prepared for 3ds max 2010 or higher (V-Ray, Mental Ray, Corona, Scanline), Cinema 4D R15 or higher (V-Ray, Advanced Renderer), Unreal Engine, FBX and OBJ file formats.
-CGAxis 3D Interiors Volume 4 is a collection of 10 highly detailed interiors with textures, materials, lighting and rendering setup for 3ds max 2011 with V-Ray and Cinema 4D R15 with V-Ray. Ready to render.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/bioriAsaeru/text-to-voice/Cd Lucky Dube Discografia Torrentl.md b/spaces/bioriAsaeru/text-to-voice/Cd Lucky Dube Discografia Torrentl.md
deleted file mode 100644
index 7308d130bdc6c711323cb8d704ffc1638154ce24..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Cd Lucky Dube Discografia Torrentl.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Cd Lucky Dube Discografia Torrentl
Download File ››››› https://urloso.com/2uyRzQ
-
-... MotoGIS v2.4. Yngwie Malmsteen, Attack!! full album zip fight night 4 pc download. ... Cd Lucky Dube Discografia Torrentl · Championship ... 1fdad05405
-
-
-
diff --git a/spaces/bioriAsaeru/text-to-voice/Free Download Of Solution Manual Of Cost Accounting By Pedro Guerrero Zip Everything You Need to Know.md b/spaces/bioriAsaeru/text-to-voice/Free Download Of Solution Manual Of Cost Accounting By Pedro Guerrero Zip Everything You Need to Know.md
deleted file mode 100644
index 8c3cbe73536ca541333d7f7abe87fff40c902ae3..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Free Download Of Solution Manual Of Cost Accounting By Pedro Guerrero Zip Everything You Need to Know.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Free Download Of Solution Manual Of Cost Accounting By Pedro Guerrero Zip
Download Zip ……… https://urloso.com/2uyOsG
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/bioriAsaeru/text-to-voice/L Allieva Epub Download Books Where to Find the eBooks of the Popular Italian Crime Novels.md b/spaces/bioriAsaeru/text-to-voice/L Allieva Epub Download Books Where to Find the eBooks of the Popular Italian Crime Novels.md
deleted file mode 100644
index 2aabafe5761f0c3b8ed4a786caa78889aff7bc0b..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/L Allieva Epub Download Books Where to Find the eBooks of the Popular Italian Crime Novels.md
+++ /dev/null
@@ -1,6 +0,0 @@
-L Allieva Epub Download Books sorento efilm motivo
Download File ⚹⚹⚹ https://urloso.com/2uyP8D
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/birsardar/stable-diffusion-mat-outpainting-primer/outpainting_example1.py b/spaces/birsardar/stable-diffusion-mat-outpainting-primer/outpainting_example1.py
deleted file mode 100644
index 0e62fd7903a84a40f87cde3b380105a366c31034..0000000000000000000000000000000000000000
--- a/spaces/birsardar/stable-diffusion-mat-outpainting-primer/outpainting_example1.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# %%
-# an example script of how to do outpainting with the diffusers inpainting pipeline
-# this is basically just the example from
-# https://huggingface.co/runwayml/stable-diffusion-inpainting
-#%
-from diffusers import StableDiffusionInpaintPipeline
-
-from PIL import Image
-import numpy as np
-import torch
-
-from diffusers import StableDiffusionInpaintPipeline
-
-pipe = StableDiffusionInpaintPipeline.from_pretrained(
- "runwayml/stable-diffusion-inpainting",
- revision="fp16",
- torch_dtype=torch.float16,
-)
-pipe.to("cuda")
-
-# load the image, extract the mask
-rgba = Image.open('primed_image_with_alpha_channel.png')
-mask_image = Image.fromarray(np.array(rgba)[:, :, 3] == 0)
-
-# run the pipeline
-prompt = "Face of a yellow cat, high resolution, sitting on a park bench."
-# image and mask_image should be PIL images.
-# The mask structure is white for outpainting and black for keeping as is
-image = pipe(
- prompt=prompt,
- image=rgba,
- mask_image=mask_image,
-).images[0]
-image
-
-# %%
-# the vae does lossy encoding, we could get better quality if we pasted the original image into our result.
-# this may yield visible edges
diff --git a/spaces/birsardar/stable-diffusion-mat-outpainting-primer/torch_utils/ops/conv2d_resample.py b/spaces/birsardar/stable-diffusion-mat-outpainting-primer/torch_utils/ops/conv2d_resample.py
deleted file mode 100644
index cd4750744c83354bab78704d4ef51ad1070fcc4a..0000000000000000000000000000000000000000
--- a/spaces/birsardar/stable-diffusion-mat-outpainting-primer/torch_utils/ops/conv2d_resample.py
+++ /dev/null
@@ -1,156 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""2D convolution with optional up/downsampling."""
-
-import torch
-
-from .. import misc
-from . import conv2d_gradfix
-from . import upfirdn2d
-from .upfirdn2d import _parse_padding
-from .upfirdn2d import _get_filter_size
-
-#----------------------------------------------------------------------------
-
-def _get_weight_shape(w):
- with misc.suppress_tracer_warnings(): # this value will be treated as a constant
- shape = [int(sz) for sz in w.shape]
- misc.assert_shape(w, shape)
- return shape
-
-#----------------------------------------------------------------------------
-
-def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True):
- """Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations.
- """
- out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
-
- # Flip weight if requested.
- if not flip_weight: # conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False).
- w = w.flip([2, 3])
-
- # Workaround performance pitfall in cuDNN 8.0.5, triggered when using
- # 1x1 kernel + memory_format=channels_last + less than 64 channels.
- if kw == 1 and kh == 1 and stride == 1 and padding in [0, [0, 0], (0, 0)] and not transpose:
- if x.stride()[1] == 1 and min(out_channels, in_channels_per_group) < 64:
- if out_channels <= 4 and groups == 1:
- in_shape = x.shape
- x = w.squeeze(3).squeeze(2) @ x.reshape([in_shape[0], in_channels_per_group, -1])
- x = x.reshape([in_shape[0], out_channels, in_shape[2], in_shape[3]])
- else:
- x = x.to(memory_format=torch.contiguous_format)
- w = w.to(memory_format=torch.contiguous_format)
- x = conv2d_gradfix.conv2d(x, w, groups=groups)
- return x.to(memory_format=torch.channels_last)
-
- # Otherwise => execute using conv2d_gradfix.
- op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d
- return op(x, w, stride=stride, padding=padding, groups=groups)
-
-#----------------------------------------------------------------------------
-
-@misc.profiled_function
-def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):
- r"""2D convolution with optional up/downsampling.
-
- Padding is performed only once at the beginning, not between the operations.
-
- Args:
- x: Input tensor of shape
- `[batch_size, in_channels, in_height, in_width]`.
- w: Weight tensor of shape
- `[out_channels, in_channels//groups, kernel_height, kernel_width]`.
- f: Low-pass filter for up/downsampling. Must be prepared beforehand by
- calling upfirdn2d.setup_filter(). None = identity (default).
- up: Integer upsampling factor (default: 1).
- down: Integer downsampling factor (default: 1).
- padding: Padding with respect to the upsampled image. Can be a single number
- or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
- (default: 0).
- groups: Split input channels into N groups (default: 1).
- flip_weight: False = convolution, True = correlation (default: True).
- flip_filter: False = convolution, True = correlation (default: False).
-
- Returns:
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
- """
- # Validate arguments.
- assert isinstance(x, torch.Tensor) and (x.ndim == 4)
- assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)
- assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32)
- assert isinstance(up, int) and (up >= 1)
- assert isinstance(down, int) and (down >= 1)
- assert isinstance(groups, int) and (groups >= 1)
- out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
- fw, fh = _get_filter_size(f)
- px0, px1, py0, py1 = _parse_padding(padding)
-
- # Adjust padding to account for up/downsampling.
- if up > 1:
- px0 += (fw + up - 1) // 2
- px1 += (fw - up) // 2
- py0 += (fh + up - 1) // 2
- py1 += (fh - up) // 2
- if down > 1:
- px0 += (fw - down + 1) // 2
- px1 += (fw - down) // 2
- py0 += (fh - down + 1) // 2
- py1 += (fh - down) // 2
-
- # Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
- if kw == 1 and kh == 1 and (down > 1 and up == 1):
- x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
- x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
- return x
-
- # Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
- if kw == 1 and kh == 1 and (up > 1 and down == 1):
- x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
- x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
- return x
-
- # Fast path: downsampling only => use strided convolution.
- if down > 1 and up == 1:
- x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
- x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)
- return x
-
- # Fast path: upsampling with optional downsampling => use transpose strided convolution.
- if up > 1:
- if groups == 1:
- w = w.transpose(0, 1)
- else:
- w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)
- w = w.transpose(1, 2)
- w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw)
- px0 -= kw - 1
- px1 -= kw - up
- py0 -= kh - 1
- py1 -= kh - up
- pxt = max(min(-px0, -px1), 0)
- pyt = max(min(-py0, -py1), 0)
- x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))
- x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)
- if down > 1:
- x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
- return x
-
- # Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.
- if up == 1 and down == 1:
- if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:
- return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)
-
- # Fallback: Generic reference implementation.
- x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
- x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
- if down > 1:
- x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
- return x
-
-#----------------------------------------------------------------------------
diff --git a/spaces/boda/arabic-names-generator/README.md b/spaces/boda/arabic-names-generator/README.md
deleted file mode 100644
index 03008a049e0e59a7c7b3fd8e9be27daae90e245e..0000000000000000000000000000000000000000
--- a/spaces/boda/arabic-names-generator/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Arabic Names Generator
-emoji: 💻
-colorFrom: blue
-colorTo: indigo
-sdk: streamlit
-sdk_version: 1.17.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/bowtiedhal/essay_outline_generator/README.md b/spaces/bowtiedhal/essay_outline_generator/README.md
deleted file mode 100644
index a09e1ac7358afff1493eb3a48c9d5071655cdc27..0000000000000000000000000000000000000000
--- a/spaces/bowtiedhal/essay_outline_generator/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Essay Outline Generator
-emoji: 🐠
-colorFrom: purple
-colorTo: pink
-sdk: gradio
-sdk_version: 3.5
-app_file: app.py
-pinned: false
-license: gpl
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/bradarrML/diffuse-the-rest/build/_app/immutable/components/pages/_page.svelte-1525ec40.js b/spaces/bradarrML/diffuse-the-rest/build/_app/immutable/components/pages/_page.svelte-1525ec40.js
deleted file mode 100644
index f1269ad8d49e9e73ca29491cacd3be1af58342ae..0000000000000000000000000000000000000000
--- a/spaces/bradarrML/diffuse-the-rest/build/_app/immutable/components/pages/_page.svelte-1525ec40.js
+++ /dev/null
@@ -1,15 +0,0 @@
-import{S as Ne,i as Be,s as Re,H as be,I as ve,m as u,h as l,n,b as $,F as s,A as je,e as pt,f as J,g as Je,t as ne,d as Xe,J as xt,k as m,a as V,q as E,l as g,c as F,r as k,K as de,v as Qe,w as $e,x as et,y as tt,L as It,M as Ge,N as Dt,O as St,P as Ct,o as Pt,z as At,Q as Lt,p as mt,R as Mt,T as Ke}from"../../chunks/index-032ac624.js";function Tt(i){let e,a,t;return{c(){e=be("svg"),a=be("path"),t=be("path"),this.h()},l(r){e=ve(r,"svg",{class:!0,xmlns:!0,"xmlns:xlink":!0,"aria-hidden":!0,focusable:!0,role:!0,width:!0,height:!0,preserveAspectRatio:!0,viewBox:!0});var c=u(e);a=ve(c,"path",{d:!0,fill:!0}),u(a).forEach(l),t=ve(c,"path",{d:!0,fill:!0}),u(t).forEach(l),c.forEach(l),this.h()},h(){n(a,"d","M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z"),n(a,"fill","#FF9D00"),n(t,"d","M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z"),n(t,"fill","#FFD21E"),n(e,"class",i[0]),n(e,"xmlns","http://www.w3.org/2000/svg"),n(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),n(e,"aria-hidden","true"),n(e,"focusable","false"),n(e,"role","img"),n(e,"width","1em"),n(e,"height","1em"),n(e,"preserveAspectRatio","xMidYMid meet"),n(e,"viewBox","0 0 32 32")},m(r,c){$(r,e,c),s(e,a),s(e,t)},p(r,[c]){c&1&&n(e,"class",r[0])},i:je,o:je,d(r){r&&l(e)}}}function jt(i,e,a){let{classNames:t=""}=e;return i.$$set=r=>{"classNames"in r&&a(0,t=r.classNames)},[t]}class Nt extends Ne{constructor(e){super(),Be(this,e,jt,Tt,Re,{classNames:0})}}function Bt(i){let e,a,t;return{c(){e=be("svg"),a=be("circle"),t=be("path"),this.h()},l(r){e=ve(r,"svg",{class:!0,xmlns:!0,"xmlns:xlink":!0,"aria-hidden":!0,fill:!0,focusable:!0,role:!0,width:!0,height:!0,preserveAspectRatio:!0,viewBox:!0});var c=u(e);a=ve(c,"circle",{class:!0,cx:!0,cy:!0,r:!0,stroke:!0,"stroke-width":!0}),u(a).forEach(l),t=ve(c,"path",{class:!0,fill:!0,d:!0}),u(t).forEach(l),c.forEach(l),this.h()},h(){n(a,"class","opacity-25"),n(a,"cx","12"),n(a,"cy","12"),n(a,"r","10"),n(a,"stroke","currentColor"),n(a,"stroke-width","4"),n(t,"class","opacity-75"),n(t,"fill","currentColor"),n(t,"d","M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"),n(e,"class",i[0]),n(e,"xmlns","http://www.w3.org/2000/svg"),n(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),n(e,"aria-hidden","true"),n(e,"fill","none"),n(e,"focusable","false"),n(e,"role","img"),n(e,"width","1em"),n(e,"height","1em"),n(e,"preserveAspectRatio","xMidYMid meet"),n(e,"viewBox","0 0 24 24")},m(r,c){$(r,e,c),s(e,a),s(e,t)},p(r,[c]){c&1&&n(e,"class",r[0])},i:je,o:je,d(r){r&&l(e)}}}function Rt(i,e,a){let{classNames:t=""}=e;return i.$$set=r=>{"classNames"in r&&a(0,t=r.classNames)},[t]}class Ut extends Ne{constructor(e){super(),Be(this,e,Rt,Bt,Re,{classNames:0})}}function gt(i){let e,a,t,r,c,d,x,I,v;const h=[Ft,Vt],B=[];function j(w,_){return w[1]?0:1}return a=j(i),t=B[a]=h[a](i),{c(){e=m("div"),t.c(),r=V(),c=m("p"),d=E("Share to community"),this.h()},l(w){e=g(w,"DIV",{class:!0});var _=u(e);t.l(_),r=F(_),c=g(_,"P",{class:!0});var p=u(c);d=k(p,"Share to community"),p.forEach(l),_.forEach(l),this.h()},h(){n(c,"class","text-white font-semibold"),n(e,"class","flex items-center justify-center bg-black w-[12.5rem] px-2 py-1 gap-x-2 rounded-full cursor-pointer")},m(w,_){$(w,e,_),B[a].m(e,null),s(e,r),s(e,c),s(c,d),x=!0,I||(v=de(e,"click",i[2]),I=!0)},p(w,_){let p=a;a=j(w),a!==p&&(Je(),ne(B[p],1,1,()=>{B[p]=null}),Xe(),t=B[a],t||(t=B[a]=h[a](w),t.c()),J(t,1),t.m(e,r))},i(w){x||(J(t),x=!0)},o(w){ne(t),x=!1},d(w){w&&l(e),B[a].d(),I=!1,v()}}}function Vt(i){let e,a;return e=new Nt({}),{c(){Qe(e.$$.fragment)},l(t){$e(e.$$.fragment,t)},m(t,r){et(e,t,r),a=!0},i(t){a||(J(e.$$.fragment,t),a=!0)},o(t){ne(e.$$.fragment,t),a=!1},d(t){tt(e,t)}}}function Ft(i){let e,a;return e=new Ut({props:{classNames:"text-white animate-spin"}}),{c(){Qe(e.$$.fragment)},l(t){$e(e.$$.fragment,t)},m(t,r){et(e,t,r),a=!0},i(t){a||(J(e.$$.fragment,t),a=!0)},o(t){ne(e.$$.fragment,t),a=!1},d(t){tt(e,t)}}}function qt(i){let e,a,t=i[0]&>(i);return{c(){t&&t.c(),e=pt()},l(r){t&&t.l(r),e=pt()},m(r,c){t&&t.m(r,c),$(r,e,c),a=!0},p(r,[c]){r[0]?t?(t.p(r,c),c&1&&J(t,1)):(t=gt(r),t.c(),J(t,1),t.m(e.parentNode,e)):t&&(Je(),ne(t,1,1,()=>{t=null}),Xe())},i(r){a||(J(t),a=!0)},o(r){ne(t),a=!1},d(r){t&&t.d(r),r&&l(e)}}}function Ot(i,e,a){let{isVisisble:t}=e,{isUploading:r}=e;const c=xt();function d(){r||c("createCommunityPost")}return i.$$set=x=>{"isVisisble"in x&&a(0,t=x.isVisisble),"isUploading"in x&&a(1,r=x.isUploading)},[t,r,d]}class Ht extends Ne{constructor(e){super(),Be(this,e,Ot,qt,Re,{isVisisble:0,isUploading:1})}}const{document:Le,window:wt}=Lt;function bt(i){let e,a,t,r,c,d;return{c(){e=m("div"),a=m("p"),t=E("Loading\u2026"),r=V(),c=m("p"),d=E("\u2588\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592")},l(x){e=g(x,"DIV",{});var I=u(e);a=g(I,"P",{});var v=u(a);t=k(v,"Loading\u2026"),v.forEach(l),r=F(I),c=g(I,"P",{});var h=u(c);d=k(h,"\u2588\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592"),h.forEach(l),I.forEach(l)},m(x,I){$(x,e,I),s(e,a),s(a,t),s(e,r),s(e,c),s(c,d)},d(x){x&&l(e)}}}function vt(i){let e,a,t,r,c,d,x,I,v,h,B,j,w,_,p,U,q,T,y,S,O;return t=new Ht({props:{isUploading:i[2],isVisisble:i[3]}}),t.$on("createCommunityPost",i[14]),{c(){e=m("div"),a=m("div"),Qe(t.$$.fragment),r=V(),c=m("div"),d=m("span"),x=V(),I=m("button"),v=E("diffuse the f rest"),B=V(),j=m("div"),w=m("label"),_=m("input"),p=E(`
- upload img`),U=V(),q=m("p"),T=E("pro tip: upload img by dropping on the canvas"),this.h()},l(M){e=g(M,"DIV",{});var D=u(e);a=g(D,"DIV",{class:!0});var Y=u(a);$e(t.$$.fragment,Y),Y.forEach(l),r=F(D),c=g(D,"DIV",{class:!0});var Z=u(c);d=g(Z,"SPAN",{class:!0,role:!0,contenteditable:!0,style:!0,spellcheck:!0,dir:!0,maxlength:!0}),u(d).forEach(l),x=F(Z),I=g(Z,"BUTTON",{class:!0});var G=u(I);v=k(G,"diffuse the f rest"),G.forEach(l),Z.forEach(l),B=F(D),j=g(D,"DIV",{class:!0});var X=u(j);w=g(X,"LABEL",{class:!0});var ee=u(w);_=g(ee,"INPUT",{accept:!0,style:!0,type:!0}),p=k(ee,`
- upload img`),ee.forEach(l),U=F(X),q=g(X,"P",{class:!0});var se=u(q);T=k(se,"pro tip: upload img by dropping on the canvas"),se.forEach(l),X.forEach(l),D.forEach(l),this.h()},h(){n(a,"class","w-full flex justify-end"),n(d,"class","overflow-auto resize-y py-2 px-3 min-h-[42px] max-h-[500px] !w-[181px] whitespace-pre-wrap inline-block border border-gray-200 shadow-inner outline-none svelte-1wfa7x9"),n(d,"role","textbox"),n(d,"contenteditable",""),mt(d,"--placeholder","'Add prompt'"),n(d,"spellcheck","false"),n(d,"dir","auto"),n(d,"maxlength","200"),i[0]===void 0&&Mt(()=>i[17].call(d)),n(I,"class","bg-blue-500 hover:bg-blue-700 text-white font-bold py-[0.555rem] px-4"),n(c,"class",h="flex gap-x-2 mt-3 items-start justify-center "+(i[1]?"animate-pulse":"")),n(_,"accept","image/*"),mt(_,"display","none"),n(_,"type","file"),n(w,"class","inline border py-1 px-1.5 bg-slate-200 cursor-pointer"),n(q,"class","hidden desktop:inline mt-2 opacity-50"),n(j,"class","mt-4")},m(M,D){$(M,e,D),s(e,a),et(t,a,null),s(e,r),s(e,c),s(c,d),i[0]!==void 0&&(d.textContent=i[0]),s(c,x),s(c,I),s(I,v),s(e,B),s(e,j),s(j,w),s(w,_),i[18](_),s(w,p),s(j,U),s(j,q),s(q,T),y=!0,S||(O=[de(d,"input",i[17]),de(d,"keydown",i[13]),de(I,"click",i[9]),de(_,"change",i[10])],S=!0)},p(M,D){const Y={};D[0]&4&&(Y.isUploading=M[2]),D[0]&8&&(Y.isVisisble=M[3]),t.$set(Y),D[0]&1&&M[0]!==d.textContent&&(d.textContent=M[0]),(!y||D[0]&2&&h!==(h="flex gap-x-2 mt-3 items-start justify-center "+(M[1]?"animate-pulse":"")))&&n(c,"class",h)},i(M){y||(J(t.$$.fragment,M),y=!0)},o(M){ne(t.$$.fragment,M),y=!1},d(M){M&&l(e),tt(t),i[18](null),S=!1,Ct(O)}}}function zt(i){let e,a,t,r,c,d,x,I,v,h,B,j,w,_,p,U,q,T,y,S,O,M,D,Y,Z,G,X,ee,se,K,_e,te,Ce,ye,ae,Ee,ke,re,xe,o,f,b,C,P,R,L,le,W,he,pe,oe,Ie,Ue,De,Ve,Fe,ie,qe,ce,Oe,He,fe,ze,Me,ue,We,at,z=!i[4]&&bt(),N=i[4]&&vt(i);return{c(){e=m("link"),a=m("script"),r=m("script"),d=m("script"),I=V(),v=m("div"),h=m("canvas"),j=V(),w=m("div"),z&&z.c(),_=V(),p=m("div"),U=V(),N&&N.c(),T=V(),y=m("article"),S=m("div"),O=m("p"),M=E("Stable Diffusion model by "),D=m("a"),Y=E("CompVis"),Z=E(" and "),G=m("a"),X=E("Stability AI"),ee=E(" - Demo by \u{1F917} Hugging Face"),se=V(),K=m("p"),_e=E("Powered by "),te=m("a"),Ce=E("\u{1F917} Diffusers: State-of-the-art diffusion models for image and audio generation in PyTorch"),ye=E(". Based on "),ae=m("a"),Ee=E("notebook by @psuraj28"),ke=V(),re=m("p"),xe=E("Check out "),o=m("a"),f=E("Stable Diffusion Gradio demo"),b=V(),C=m("h3"),P=E("LICENSE"),R=V(),L=m("p"),le=E("The model is licensed with a "),W=m("a"),he=E("CreativeML Open RAIL-M"),pe=E(" license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please "),oe=m("a"),Ie=E("read the license"),Ue=V(),De=m("h3"),Ve=E("Biases and content acknowledgment"),Fe=V(),ie=m("p"),qe=E("Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the "),ce=m("a"),Oe=E("LAION-5B dataset"),He=E(", which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the "),fe=m("a"),ze=E("model card"),this.h()},l(A){const H=It('[data-svelte="svelte-bw39ln"]',Le.head);e=g(H,"LINK",{href:!0,rel:!0}),a=g(H,"SCRIPT",{src:!0});var yt=u(a);yt.forEach(l),r=g(H,"SCRIPT",{src:!0});var Et=u(r);Et.forEach(l),d=g(H,"SCRIPT",{src:!0});var kt=u(d);kt.forEach(l),H.forEach(l),I=F(A),v=g(A,"DIV",{class:!0});var Te=u(v);h=g(Te,"CANVAS",{class:!0}),u(h).forEach(l),j=F(Te),w=g(Te,"DIV",{class:!0});var me=u(w);z&&z.l(me),_=F(me),p=g(me,"DIV",{id:!0}),u(p).forEach(l),U=F(me),N&&N.l(me),me.forEach(l),Te.forEach(l),T=F(A),y=g(A,"ARTICLE",{class:!0});var Q=u(y);S=g(Q,"DIV",{class:!0});var ge=u(S);O=g(ge,"P",{});var we=u(O);M=k(we,"Stable Diffusion model by "),D=g(we,"A",{href:!0,rel:!0});var nt=u(D);Y=k(nt,"CompVis"),nt.forEach(l),Z=k(we," and "),G=g(we,"A",{href:!0,rel:!0});var st=u(G);X=k(st,"Stability AI"),st.forEach(l),ee=k(we," - Demo by \u{1F917} Hugging Face"),we.forEach(l),se=F(ge),K=g(ge,"P",{});var Se=u(K);_e=k(Se,"Powered by "),te=g(Se,"A",{href:!0,rel:!0});var rt=u(te);Ce=k(rt,"\u{1F917} Diffusers: State-of-the-art diffusion models for image and audio generation in PyTorch"),rt.forEach(l),ye=k(Se,". Based on "),ae=g(Se,"A",{href:!0,rel:!0});var ot=u(ae);Ee=k(ot,"notebook by @psuraj28"),ot.forEach(l),Se.forEach(l),ke=F(ge),re=g(ge,"P",{});var Ye=u(re);xe=k(Ye,"Check out "),o=g(Ye,"A",{href:!0,rel:!0});var it=u(o);f=k(it,"Stable Diffusion Gradio demo"),it.forEach(l),Ye.forEach(l),ge.forEach(l),b=F(Q),C=g(Q,"H3",{});var lt=u(C);P=k(lt,"LICENSE"),lt.forEach(l),R=F(Q),L=g(Q,"P",{});var Pe=u(L);le=k(Pe,"The model is licensed with a "),W=g(Pe,"A",{href:!0,rel:!0});var ct=u(W);he=k(ct,"CreativeML Open RAIL-M"),ct.forEach(l),pe=k(Pe," license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please "),oe=g(Pe,"A",{href:!0,rel:!0});var ft=u(oe);Ie=k(ft,"read the license"),ft.forEach(l),Pe.forEach(l),Ue=F(Q),De=g(Q,"H3",{});var ut=u(De);Ve=k(ut,"Biases and content acknowledgment"),ut.forEach(l),Fe=F(Q),ie=g(Q,"P",{});var Ae=u(ie);qe=k(Ae,"Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the "),ce=g(Ae,"A",{href:!0,rel:!0});var dt=u(ce);Oe=k(dt,"LAION-5B dataset"),dt.forEach(l),He=k(Ae,", which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the "),fe=g(Ae,"A",{href:!0,rel:!0});var ht=u(fe);ze=k(ht,"model card"),ht.forEach(l),Ae.forEach(l),Q.forEach(l),this.h()},h(){n(e,"href","https://cdnjs.cloudflare.com/ajax/libs/drawingboard.js/0.4.2/drawingboard.css"),n(e,"rel","stylesheet"),Ge(a.src,t="https://code.jquery.com/jquery-1.12.4.min.js")||n(a,"src",t),Ge(r.src,c="https://cdnjs.cloudflare.com/ajax/libs/drawingboard.js/0.4.2/drawingboard.min.js")||n(r,"src",c),Ge(d.src,x="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js")||n(d,"src",x),n(h,"class",B="border-[1.2px] desktop:mt-[34px] "+(i[8]?"":"hidden")),n(p,"id","board-container"),n(w,"class",q="flex flex-col items-center "+(i[1]?"pointer-events-none":"")),n(v,"class","flex flex-wrap gap-x-4 gap-y-2 justify-center my-8"),n(D,"href","https://huggingface.co/CompVis"),n(D,"rel","nofollow"),n(G,"href","https://huggingface.co/stabilityai"),n(G,"rel","nofollow"),n(te,"href","https://github.com/huggingface/diffusers"),n(te,"rel","nofollow"),n(ae,"href","https://twitter.com/psuraj28/status/1562039265126670339"),n(ae,"rel","nofollow"),n(o,"href","https://huggingface.co/spaces/stabilityai/stable-diffusion"),n(o,"rel","nofollow"),n(S,"class","text-center"),n(W,"href","https://huggingface.co/spaces/CompVis/stable-diffusion-license"),n(W,"rel","nofollow"),n(oe,"href","https://huggingface.co/spaces/CompVis/stable-diffusion-license"),n(oe,"rel","nofollow"),n(ce,"href","https://laion.ai/blog/laion-5b/"),n(ce,"rel","nofollow"),n(fe,"href","https://huggingface.co/CompVis/stable-diffusion-v1-4"),n(fe,"rel","nofollow"),n(y,"class",Me="prose-sm px-4 md:px-12 lg:px-56 mb-8 "+(i[4]?"":"hidden"))},m(A,H){s(Le.head,e),s(Le.head,a),s(Le.head,r),s(Le.head,d),$(A,I,H),$(A,v,H),s(v,h),i[15](h),s(v,j),s(v,w),z&&z.m(w,null),s(w,_),s(w,p),i[16](p),s(w,U),N&&N.m(w,null),$(A,T,H),$(A,y,H),s(y,S),s(S,O),s(O,M),s(O,D),s(D,Y),s(O,Z),s(O,G),s(G,X),s(O,ee),s(S,se),s(S,K),s(K,_e),s(K,te),s(te,Ce),s(K,ye),s(K,ae),s(ae,Ee),s(S,ke),s(S,re),s(re,xe),s(re,o),s(o,f),s(y,b),s(y,C),s(C,P),s(y,R),s(y,L),s(L,le),s(L,W),s(W,he),s(L,pe),s(L,oe),s(oe,Ie),s(y,Ue),s(y,De),s(De,Ve),s(y,Fe),s(y,ie),s(ie,qe),s(ie,ce),s(ce,Oe),s(ie,He),s(ie,fe),s(fe,ze),ue=!0,We||(at=[de(wt,"drop",Dt(St(i[11]))),de(wt,"paste",i[12])],We=!0)},p(A,H){(!ue||H[0]&256&&B!==(B="border-[1.2px] desktop:mt-[34px] "+(A[8]?"":"hidden")))&&n(h,"class",B),A[4]?z&&(z.d(1),z=null):z||(z=bt(),z.c(),z.m(w,_)),A[4]?N?(N.p(A,H),H[0]&16&&J(N,1)):(N=vt(A),N.c(),J(N,1),N.m(w,null)):N&&(Je(),ne(N,1,1,()=>{N=null}),Xe()),(!ue||H[0]&2&&q!==(q="flex flex-col items-center "+(A[1]?"pointer-events-none":"")))&&n(w,"class",q),(!ue||H[0]&16&&Me!==(Me="prose-sm px-4 md:px-12 lg:px-56 mb-8 "+(A[4]?"":"hidden")))&&n(y,"class",Me)},i(A){ue||(J(N),ue=!0)},o(A){ne(N),ue=!1},d(A){l(e),l(a),l(r),l(d),A&&l(I),A&&l(v),i[15](null),z&&z.d(),i[16](null),N&&N.d(),A&&l(T),A&&l(y),We=!1,Ct(at)}}}const Ze=500,_t=3e3;function Wt(){window.createImageBitmap=async function(i){return new Promise((e,a)=>{const t=document.createElement("canvas"),r=t.getContext("2d");t.width=i.width,t.height=i.height,r.putImageData(i,0,0);const c=t.toDataURL(),d=document.createElement("img");d.addEventListener("load",()=>{e(d)}),d.src=c})}}function Yt(){const i=document.querySelectorAll("a");for(const e of i)e.target="_blank"}async function Gt(i){return await(await fetch("https://huggingface.co/uploads",{method:"POST",headers:{"Content-Type":i.type,"X-Requested-With":"XMLHttpRequest"},body:i})).text()}function Kt(i,e,a){let t="",r=!1,c=!1,d=!1,x=!1,I,v,h,B,j,w,_,p=400,U,q,T,y=!1,S=[],O;async function M(){if(!h)return;const o=h.createImageData(v.width,v.height),f=o.data;for(let P=0,R=f.length;PD(o))}async function Y(o){const f=o.toDataURL("png"),C=await(await fetch(f)).blob(),P=new File([C],"canvas shot.png",{type:"image/png"}),R=o.getContext("2d").getImageData(0,0,p,p),L=await createImageBitmap(R);return{imgFile:P,imgBitmap:L}}async function Z(){if(!t)return alert("Please add prompt");if(!v||!h)return;_&&clearInterval(_),a(1,r=!0),a(8,y=!1),a(3,x=!1),se(),B=performance.now(),M();const{imgFile:o,imgBitmap:f}=await Y(v),b=new FormData;b.append("prompt",t),b.append("strength","0.85"),b.append("image",o);try{const C=await fetch("https://sdb.pcuenca.net/i2i",{method:"POST",body:b}),P=JSON.parse(await C.text()),{images:R}=P;if(!R.length)return alert("All the results were flagged. Please try again with diffeerent sketch + prompt");S=await Promise.all(R.map(async le=>{const W=new Image;return W.src=`data:image/png;base64, ${le}`,await new Promise((he,pe)=>{W.onload=()=>he(W)}),W})),S.push(f),O={sketch:o,generations:await Promise.all(R.map(async le=>{const W=`data:image/jpeg;base64, ${le}`,pe=await(await fetch(W)).blob(),Ie=`diffuse-the-rest-${Date.now()%200}.jpeg`;return new File([pe],Ie,{type:"image/jpeg"})}))},a(8,y=!0);let L=0;j=performance.now(),D(S[L%S.length]),w=()=>{_&&clearInterval(_),j=performance.now(),L=L+1,D(S[L%S.length])},_=setInterval(()=>{L=L+1,j=performance.now(),D(S[L%S.length])},2500),d||G(),a(3,x=!0)}catch(C){console.error(C),alert("Error happened, queue might be full. Please try again in a bit :)")}finally{a(1,r=!1)}}function G(){const o=document.createElement("div");o.className="drawing-board-control";const f=document.createElement("button");f.innerHTML="\u23EF",f.onclick=w,o.append(f);const b=document.querySelector(".drawing-board-controls");b&&S.length>1&&(b.appendChild(o),d=!0,a(5,U.onclick=()=>{_&&clearInterval(_)},U))}function X(){const o=document.createElement("div");o.className="drawing-board-control";const f=document.createElement("button");f.innerHTML="\u{1F9F9}",f.onclick=()=>{h==null||h.clearRect(0,0,p,p),S=[],a(8,y=!1)},o.append(f);const b=document.querySelector(".drawing-board-controls");b&&b.appendChild(o)}function ee(){const o=document.createElement("div");o.className="drawing-board-control";const f=document.createElement("button");f.innerHTML="\u2B07\uFE0F",f.onclick=()=>{if(!v)return;const C=document.createElement("a"),P=Date.now()%200;C.download=`diffuse-the-rest-${P}.png`,C.href=v.toDataURL(),C.click()},o.append(f);const b=document.querySelector(".drawing-board-controls");b&&b.appendChild(o)}function se(){const o=T.getContext("2d");a(7,T.width=v.width,T),a(7,T.height=v.height,T),o.drawImage(v,0,0)}async function K(o){_&&clearInterval(_);const f=new Image;f.src=URL.createObjectURL(o),await new Promise((P,R)=>{f.onload=()=>P(f)});const{width:b,height:C}=f;if(b==C)h==null||h.drawImage(f,0,0,b,C,0,0,p,p);else if(b>C){const P=Math.floor(p*C/b),R=Math.floor((p-P)/2);h==null||h.drawImage(f,0,0,b,C,0,R,p,P)}else{const P=Math.floor(p*b/C),R=Math.floor((p-P)/2);h==null||h.drawImage(f,0,0,b,C,R,0,P,p)}}function _e(){var f;const o=(f=q.files)==null?void 0:f[0];o&&K(o)}function te(o){var C;if(!((C=o.dataTransfer)!=null&&C.files))return;o.preventDefault();const b=Array.from(o.dataTransfer.files)[0];K(b)}function Ce(o){if(!o.clipboardData)return;const f=Array.from(o.clipboardData.files);if(f.length===0)return;o.preventDefault();const b=f[0];K(b)}function ye(o){if(r)return o.preventDefault();o.code==="Enter"&&(o.preventDefault(),Z())}async function ae(){a(2,c=!0);const o=[O.sketch,...O.generations];console.log(o);const b=(await Promise.all(o.map(L=>Gt(L)))).map(L=>`
`),C=`#### Prompt:
-${t}
-
-#### Sketch:
-
-${b[0]}
-
-
-#### Generations:
-
-${b.slice(1).join(`
-`)}
-`,R=new URLSearchParams({title:t,description:C}).toString();window.open(`https://huggingface.co/spaces/huggingface-projects/diffuse-the-rest/discussions/new?${R}`,"_blank"),a(2,c=!1)}Pt(async()=>{typeof createImageBitmap>"u"&&Wt();const{innerWidth:o}=window;p=Math.min(p,Math.floor(o*.75)),a(5,U.style.width=`${p}px`,U),a(5,U.style.height=`${p}px`,U),a(7,T.style.width=`${p}px`,T),a(7,T.style.height=`${p}px`,T),await At(),I=new window.DrawingBoard.Board("board-container",{size:10,controls:["Color",{Size:{type:"dropdown"}},{DrawingMode:{filler:!1}}],webStorage:!1,enlargeYourContainer:!0}),a(4,v=I.canvas),h=v.getContext("2d"),a(4,v.ondragover=function(f){return f.preventDefault(),!1},v),X(),ee(),Yt()});function Ee(o){Ke[o?"unshift":"push"](()=>{T=o,a(7,T)})}function ke(o){Ke[o?"unshift":"push"](()=>{U=o,a(5,U)})}function re(){t=this.textContent,a(0,t)}function xe(o){Ke[o?"unshift":"push"](()=>{q=o,a(6,q)})}return[t,r,c,x,v,U,q,T,y,Z,_e,te,Ce,ye,ae,Ee,ke,re,xe]}class Jt extends Ne{constructor(e){super(),Be(this,e,Kt,zt,Re,{},null,[-1,-1])}}export{Jt as default};
diff --git a/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/PIL/FliImagePlugin.py b/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/PIL/FliImagePlugin.py
deleted file mode 100644
index f4e89a03e0263bc6c1d318b379fdcfe7f61f8588..0000000000000000000000000000000000000000
--- a/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/PIL/FliImagePlugin.py
+++ /dev/null
@@ -1,171 +0,0 @@
-#
-# The Python Imaging Library.
-# $Id$
-#
-# FLI/FLC file handling.
-#
-# History:
-# 95-09-01 fl Created
-# 97-01-03 fl Fixed parser, setup decoder tile
-# 98-07-15 fl Renamed offset attribute to avoid name clash
-#
-# Copyright (c) Secret Labs AB 1997-98.
-# Copyright (c) Fredrik Lundh 1995-97.
-#
-# See the README file for information on usage and redistribution.
-#
-
-import os
-
-from . import Image, ImageFile, ImagePalette
-from ._binary import i16le as i16
-from ._binary import i32le as i32
-from ._binary import o8
-
-#
-# decoder
-
-
-def _accept(prefix):
- return (
- len(prefix) >= 6
- and i16(prefix, 4) in [0xAF11, 0xAF12]
- and i16(prefix, 14) in [0, 3] # flags
- )
-
-
-##
-# Image plugin for the FLI/FLC animation format. Use the seek
-# method to load individual frames.
-
-
-class FliImageFile(ImageFile.ImageFile):
- format = "FLI"
- format_description = "Autodesk FLI/FLC Animation"
- _close_exclusive_fp_after_loading = False
-
- def _open(self):
- # HEAD
- s = self.fp.read(128)
- if not (_accept(s) and s[20:22] == b"\x00\x00"):
- msg = "not an FLI/FLC file"
- raise SyntaxError(msg)
-
- # frames
- self.n_frames = i16(s, 6)
- self.is_animated = self.n_frames > 1
-
- # image characteristics
- self.mode = "P"
- self._size = i16(s, 8), i16(s, 10)
-
- # animation speed
- duration = i32(s, 16)
- magic = i16(s, 4)
- if magic == 0xAF11:
- duration = (duration * 1000) // 70
- self.info["duration"] = duration
-
- # look for palette
- palette = [(a, a, a) for a in range(256)]
-
- s = self.fp.read(16)
-
- self.__offset = 128
-
- if i16(s, 4) == 0xF100:
- # prefix chunk; ignore it
- self.__offset = self.__offset + i32(s)
- s = self.fp.read(16)
-
- if i16(s, 4) == 0xF1FA:
- # look for palette chunk
- number_of_subchunks = i16(s, 6)
- chunk_size = None
- for _ in range(number_of_subchunks):
- if chunk_size is not None:
- self.fp.seek(chunk_size - 6, os.SEEK_CUR)
- s = self.fp.read(6)
- chunk_type = i16(s, 4)
- if chunk_type in (4, 11):
- self._palette(palette, 2 if chunk_type == 11 else 0)
- break
- chunk_size = i32(s)
- if not chunk_size:
- break
-
- palette = [o8(r) + o8(g) + o8(b) for (r, g, b) in palette]
- self.palette = ImagePalette.raw("RGB", b"".join(palette))
-
- # set things up to decode first frame
- self.__frame = -1
- self._fp = self.fp
- self.__rewind = self.fp.tell()
- self.seek(0)
-
- def _palette(self, palette, shift):
- # load palette
-
- i = 0
- for e in range(i16(self.fp.read(2))):
- s = self.fp.read(2)
- i = i + s[0]
- n = s[1]
- if n == 0:
- n = 256
- s = self.fp.read(n * 3)
- for n in range(0, len(s), 3):
- r = s[n] << shift
- g = s[n + 1] << shift
- b = s[n + 2] << shift
- palette[i] = (r, g, b)
- i += 1
-
- def seek(self, frame):
- if not self._seek_check(frame):
- return
- if frame < self.__frame:
- self._seek(0)
-
- for f in range(self.__frame + 1, frame + 1):
- self._seek(f)
-
- def _seek(self, frame):
- if frame == 0:
- self.__frame = -1
- self._fp.seek(self.__rewind)
- self.__offset = 128
- else:
- # ensure that the previous frame was loaded
- self.load()
-
- if frame != self.__frame + 1:
- msg = f"cannot seek to frame {frame}"
- raise ValueError(msg)
- self.__frame = frame
-
- # move to next frame
- self.fp = self._fp
- self.fp.seek(self.__offset)
-
- s = self.fp.read(4)
- if not s:
- raise EOFError
-
- framesize = i32(s)
-
- self.decodermaxblock = framesize
- self.tile = [("fli", (0, 0) + self.size, self.__offset, None)]
-
- self.__offset += framesize
-
- def tell(self):
- return self.__frame
-
-
-#
-# registry
-
-Image.register_open(FliImageFile.format, FliImageFile, _accept)
-
-Image.register_extensions(FliImageFile.format, [".fli", ".flc"])
diff --git a/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/aiohttp/http.py b/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/aiohttp/http.py
deleted file mode 100644
index ca9dc54b215f7977970658250f23e3be137f1b3e..0000000000000000000000000000000000000000
--- a/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/aiohttp/http.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import http.server
-import sys
-from typing import Mapping, Tuple
-
-from . import __version__
-from .http_exceptions import HttpProcessingError as HttpProcessingError
-from .http_parser import (
- HeadersParser as HeadersParser,
- HttpParser as HttpParser,
- HttpRequestParser as HttpRequestParser,
- HttpResponseParser as HttpResponseParser,
- RawRequestMessage as RawRequestMessage,
- RawResponseMessage as RawResponseMessage,
-)
-from .http_websocket import (
- WS_CLOSED_MESSAGE as WS_CLOSED_MESSAGE,
- WS_CLOSING_MESSAGE as WS_CLOSING_MESSAGE,
- WS_KEY as WS_KEY,
- WebSocketError as WebSocketError,
- WebSocketReader as WebSocketReader,
- WebSocketWriter as WebSocketWriter,
- WSCloseCode as WSCloseCode,
- WSMessage as WSMessage,
- WSMsgType as WSMsgType,
- ws_ext_gen as ws_ext_gen,
- ws_ext_parse as ws_ext_parse,
-)
-from .http_writer import (
- HttpVersion as HttpVersion,
- HttpVersion10 as HttpVersion10,
- HttpVersion11 as HttpVersion11,
- StreamWriter as StreamWriter,
-)
-
-__all__ = (
- "HttpProcessingError",
- "RESPONSES",
- "SERVER_SOFTWARE",
- # .http_writer
- "StreamWriter",
- "HttpVersion",
- "HttpVersion10",
- "HttpVersion11",
- # .http_parser
- "HeadersParser",
- "HttpParser",
- "HttpRequestParser",
- "HttpResponseParser",
- "RawRequestMessage",
- "RawResponseMessage",
- # .http_websocket
- "WS_CLOSED_MESSAGE",
- "WS_CLOSING_MESSAGE",
- "WS_KEY",
- "WebSocketReader",
- "WebSocketWriter",
- "ws_ext_gen",
- "ws_ext_parse",
- "WSMessage",
- "WebSocketError",
- "WSMsgType",
- "WSCloseCode",
-)
-
-
-SERVER_SOFTWARE: str = "Python/{0[0]}.{0[1]} aiohttp/{1}".format(
- sys.version_info, __version__
-)
-
-RESPONSES: Mapping[int, Tuple[str, str]] = http.server.BaseHTTPRequestHandler.responses
diff --git a/spaces/caslabs/sanity-test-midi/README.md b/spaces/caslabs/sanity-test-midi/README.md
deleted file mode 100644
index 9d7e0b1b79a19256a93b245e07849038b71904e5..0000000000000000000000000000000000000000
--- a/spaces/caslabs/sanity-test-midi/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Sanity Test Midi
-emoji: 🚀
-colorFrom: red
-colorTo: gray
-sdk: gradio
-sdk_version: 3.27.0
-app_file: app.py
-pinned: false
-license: openrail
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/charles0519/ChuanhuChatGPT/custom.css b/spaces/charles0519/ChuanhuChatGPT/custom.css
deleted file mode 100644
index 5143eb138ea2469d8c457c71cb210fd3fb7cbe15..0000000000000000000000000000000000000000
--- a/spaces/charles0519/ChuanhuChatGPT/custom.css
+++ /dev/null
@@ -1,162 +0,0 @@
-:root {
- --chatbot-color-light: #F3F3F3;
- --chatbot-color-dark: #121111;
-}
-
-/* status_display */
-#status_display {
- display: flex;
- min-height: 2.5em;
- align-items: flex-end;
- justify-content: flex-end;
-}
-#status_display p {
- font-size: .85em;
- font-family: monospace;
- color: var(--body-text-color-subdued);
-}
-
-#chuanhu_chatbot, #status_display {
- transition: all 0.6s;
-}
-/* list */
-ol:not(.options), ul:not(.options) {
- padding-inline-start: 2em !important;
-}
-
-/* 亮色 */
-#chuanhu_chatbot {
- background-color: var(--chatbot-color-light) !important;
-}
-[data-testid = "bot"] {
- background-color: #FFFFFF !important;
-}
-[data-testid = "user"] {
- background-color: #95EC69 !important;
-}
-/* 对话气泡 */
-[class *= "message"] {
- border-radius: var(--radius-xl) !important;
- border: none;
- padding: var(--spacing-xl) !important;
- font-size: var(--text-md) !important;
- line-height: var(--line-md) !important;
- min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
- min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
-}
-[data-testid = "bot"] {
- max-width: 85%;
- border-bottom-left-radius: 0 !important;
-}
-[data-testid = "user"] {
- max-width: 85%;
- width: auto !important;
- border-bottom-right-radius: 0 !important;
-}
-/* 表格 */
-table {
- margin: 1em 0;
- border-collapse: collapse;
- empty-cells: show;
-}
-td,th {
- border: 1.2px solid var(--border-color-primary) !important;
- padding: 0.2em;
-}
-thead {
- background-color: rgba(175,184,193,0.2);
-}
-thead th {
- padding: .5em .2em;
-}
-/* 行内代码 */
-code {
- display: inline;
- white-space: break-spaces;
- border-radius: 6px;
- margin: 0 2px 0 2px;
- padding: .2em .4em .1em .4em;
- background-color: rgba(175,184,193,0.2);
-}
-/* 代码块 */
-pre code {
- display: block;
- overflow: auto;
- white-space: pre;
- background-color: hsla(0, 0%, 0%, 80%)!important;
- border-radius: 10px;
- padding: 1.4em 1.2em 0em 1.4em;
- margin: 1.2em 2em 1.2em 0.5em;
- color: #FFF;
- box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
-}
-/* 代码高亮样式 */
-.highlight .hll { background-color: #49483e }
-.highlight .c { color: #75715e } /* Comment */
-.highlight .err { color: #960050; background-color: #1e0010 } /* Error */
-.highlight .k { color: #66d9ef } /* Keyword */
-.highlight .l { color: #ae81ff } /* Literal */
-.highlight .n { color: #f8f8f2 } /* Name */
-.highlight .o { color: #f92672 } /* Operator */
-.highlight .p { color: #f8f8f2 } /* Punctuation */
-.highlight .ch { color: #75715e } /* Comment.Hashbang */
-.highlight .cm { color: #75715e } /* Comment.Multiline */
-.highlight .cp { color: #75715e } /* Comment.Preproc */
-.highlight .cpf { color: #75715e } /* Comment.PreprocFile */
-.highlight .c1 { color: #75715e } /* Comment.Single */
-.highlight .cs { color: #75715e } /* Comment.Special */
-.highlight .gd { color: #f92672 } /* Generic.Deleted */
-.highlight .ge { font-style: italic } /* Generic.Emph */
-.highlight .gi { color: #a6e22e } /* Generic.Inserted */
-.highlight .gs { font-weight: bold } /* Generic.Strong */
-.highlight .gu { color: #75715e } /* Generic.Subheading */
-.highlight .kc { color: #66d9ef } /* Keyword.Constant */
-.highlight .kd { color: #66d9ef } /* Keyword.Declaration */
-.highlight .kn { color: #f92672 } /* Keyword.Namespace */
-.highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
-.highlight .kr { color: #66d9ef } /* Keyword.Reserved */
-.highlight .kt { color: #66d9ef } /* Keyword.Type */
-.highlight .ld { color: #e6db74 } /* Literal.Date */
-.highlight .m { color: #ae81ff } /* Literal.Number */
-.highlight .s { color: #e6db74 } /* Literal.String */
-.highlight .na { color: #a6e22e } /* Name.Attribute */
-.highlight .nb { color: #f8f8f2 } /* Name.Builtin */
-.highlight .nc { color: #a6e22e } /* Name.Class */
-.highlight .no { color: #66d9ef } /* Name.Constant */
-.highlight .nd { color: #a6e22e } /* Name.Decorator */
-.highlight .ni { color: #f8f8f2 } /* Name.Entity */
-.highlight .ne { color: #a6e22e } /* Name.Exception */
-.highlight .nf { color: #a6e22e } /* Name.Function */
-.highlight .nl { color: #f8f8f2 } /* Name.Label */
-.highlight .nn { color: #f8f8f2 } /* Name.Namespace */
-.highlight .nx { color: #a6e22e } /* Name.Other */
-.highlight .py { color: #f8f8f2 } /* Name.Property */
-.highlight .nt { color: #f92672 } /* Name.Tag */
-.highlight .nv { color: #f8f8f2 } /* Name.Variable */
-.highlight .ow { color: #f92672 } /* Operator.Word */
-.highlight .w { color: #f8f8f2 } /* Text.Whitespace */
-.highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
-.highlight .mf { color: #ae81ff } /* Literal.Number.Float */
-.highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
-.highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
-.highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
-.highlight .sa { color: #e6db74 } /* Literal.String.Affix */
-.highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
-.highlight .sc { color: #e6db74 } /* Literal.String.Char */
-.highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
-.highlight .sd { color: #e6db74 } /* Literal.String.Doc */
-.highlight .s2 { color: #e6db74 } /* Literal.String.Double */
-.highlight .se { color: #ae81ff } /* Literal.String.Escape */
-.highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
-.highlight .si { color: #e6db74 } /* Literal.String.Interpol */
-.highlight .sx { color: #e6db74 } /* Literal.String.Other */
-.highlight .sr { color: #e6db74 } /* Literal.String.Regex */
-.highlight .s1 { color: #e6db74 } /* Literal.String.Single */
-.highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
-.highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
-.highlight .fm { color: #a6e22e } /* Name.Function.Magic */
-.highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
-.highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
-.highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
-.highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
-.highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
diff --git a/spaces/chasemcdo/hf_localai/api/api.go b/spaces/chasemcdo/hf_localai/api/api.go
deleted file mode 100644
index e4aac2fb16deab3f82097719535e520a06d3b2b0..0000000000000000000000000000000000000000
--- a/spaces/chasemcdo/hf_localai/api/api.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package api
-
-import (
- "errors"
-
- "github.com/go-skynet/LocalAI/internal"
- "github.com/go-skynet/LocalAI/pkg/assets"
- "github.com/gofiber/fiber/v2"
- "github.com/gofiber/fiber/v2/middleware/cors"
- "github.com/gofiber/fiber/v2/middleware/logger"
- "github.com/gofiber/fiber/v2/middleware/recover"
- "github.com/rs/zerolog"
- "github.com/rs/zerolog/log"
-)
-
-func App(opts ...AppOption) (*fiber.App, error) {
- options := newOptions(opts...)
-
- zerolog.SetGlobalLevel(zerolog.InfoLevel)
- if options.debug {
- zerolog.SetGlobalLevel(zerolog.DebugLevel)
- }
-
- // Return errors as JSON responses
- app := fiber.New(fiber.Config{
- BodyLimit: options.uploadLimitMB * 1024 * 1024, // this is the default limit of 4MB
- DisableStartupMessage: options.disableMessage,
- // Override default error handler
- ErrorHandler: func(ctx *fiber.Ctx, err error) error {
- // Status code defaults to 500
- code := fiber.StatusInternalServerError
-
- // Retrieve the custom status code if it's a *fiber.Error
- var e *fiber.Error
- if errors.As(err, &e) {
- code = e.Code
- }
-
- // Send custom error page
- return ctx.Status(code).JSON(
- ErrorResponse{
- Error: &APIError{Message: err.Error(), Code: code},
- },
- )
- },
- })
-
- if options.debug {
- app.Use(logger.New(logger.Config{
- Format: "[${ip}]:${port} ${status} - ${method} ${path}\n",
- }))
- }
-
- cm := NewConfigMerger()
- if err := cm.LoadConfigs(options.loader.ModelPath); err != nil {
- log.Error().Msgf("error loading config files: %s", err.Error())
- }
-
- if options.configFile != "" {
- if err := cm.LoadConfigFile(options.configFile); err != nil {
- log.Error().Msgf("error loading config file: %s", err.Error())
- }
- }
-
- if options.debug {
- for _, v := range cm.ListConfigs() {
- cfg, _ := cm.GetConfig(v)
- log.Debug().Msgf("Model: %s (config: %+v)", v, cfg)
- }
- }
-
- if options.assetsDestination != "" {
- // Extract files from the embedded FS
- err := assets.ExtractFiles(options.backendAssets, options.assetsDestination)
- if err != nil {
- log.Warn().Msgf("Failed extracting backend assets files: %s (might be required for some backends to work properly, like gpt4all)", err)
- }
- }
-
- // Default middleware config
- app.Use(recover.New())
-
- if options.preloadJSONModels != "" {
- if err := ApplyGalleryFromString(options.loader.ModelPath, options.preloadJSONModels, cm, options.galleries); err != nil {
- return nil, err
- }
- }
-
- if options.preloadModelsFromPath != "" {
- if err := ApplyGalleryFromFile(options.loader.ModelPath, options.preloadModelsFromPath, cm, options.galleries); err != nil {
- return nil, err
- }
- }
-
- if options.cors {
- if options.corsAllowOrigins == "" {
- app.Use(cors.New())
- } else {
- app.Use(cors.New(cors.Config{
- AllowOrigins: options.corsAllowOrigins,
- }))
- }
- }
-
- // LocalAI API endpoints
- applier := newGalleryApplier(options.loader.ModelPath)
- applier.start(options.context, cm)
-
- app.Get("/version", func(c *fiber.Ctx) error {
- return c.JSON(struct {
- Version string `json:"version"`
- }{Version: internal.PrintableVersion()})
- })
-
- app.Post("/models/apply", applyModelGallery(options.loader.ModelPath, cm, applier.C, options.galleries))
- app.Get("/models/available", listModelFromGallery(options.galleries, options.loader.ModelPath))
- app.Get("/models/jobs/:uuid", getOpStatus(applier))
-
- // openAI compatible API endpoint
-
- // chat
- app.Post("/v1/chat/completions", chatEndpoint(cm, options))
- app.Post("/chat/completions", chatEndpoint(cm, options))
-
- // edit
- app.Post("/v1/edits", editEndpoint(cm, options))
- app.Post("/edits", editEndpoint(cm, options))
-
- // completion
- app.Post("/v1/completions", completionEndpoint(cm, options))
- app.Post("/completions", completionEndpoint(cm, options))
- app.Post("/v1/engines/:model/completions", completionEndpoint(cm, options))
-
- // embeddings
- app.Post("/v1/embeddings", embeddingsEndpoint(cm, options))
- app.Post("/embeddings", embeddingsEndpoint(cm, options))
- app.Post("/v1/engines/:model/embeddings", embeddingsEndpoint(cm, options))
-
- // audio
- app.Post("/v1/audio/transcriptions", transcriptEndpoint(cm, options))
- app.Post("/tts", ttsEndpoint(cm, options))
-
- // images
- app.Post("/v1/images/generations", imageEndpoint(cm, options))
-
- if options.imageDir != "" {
- app.Static("/generated-images", options.imageDir)
- }
-
- if options.audioDir != "" {
- app.Static("/generated-audio", options.audioDir)
- }
-
- ok := func(c *fiber.Ctx) error {
- return c.SendStatus(200)
- }
-
- // Kubernetes health checks
- app.Get("/healthz", ok)
- app.Get("/readyz", ok)
-
- // models
- app.Get("/v1/models", listModels(options.loader, cm))
- app.Get("/models", listModels(options.loader, cm))
-
- return app, nil
-}
diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/jax-projects/big_bird/bigbird_flax.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/jax-projects/big_bird/bigbird_flax.py
deleted file mode 100644
index af5e11c83a6ad2f4f2afa55f316c4e06b493b351..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/research_projects/jax-projects/big_bird/bigbird_flax.py
+++ /dev/null
@@ -1,323 +0,0 @@
-import json
-import os
-from dataclasses import dataclass
-from functools import partial
-from typing import Callable
-
-import flax.linen as nn
-import jax
-import jax.numpy as jnp
-import joblib
-import optax
-import wandb
-from flax import jax_utils, struct, traverse_util
-from flax.serialization import from_bytes, to_bytes
-from flax.training import train_state
-from flax.training.common_utils import shard
-from tqdm.auto import tqdm
-
-from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
-from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
-
-
-class FlaxBigBirdForNaturalQuestionsModule(FlaxBigBirdForQuestionAnsweringModule):
- """
- BigBirdForQuestionAnswering with CLS Head over the top for predicting category
-
- This way we can load its weights with FlaxBigBirdForQuestionAnswering
- """
-
- config: BigBirdConfig
- dtype: jnp.dtype = jnp.float32
- add_pooling_layer: bool = True
-
- def setup(self):
- super().setup()
- self.cls = nn.Dense(5, dtype=self.dtype)
-
- def __call__(self, *args, **kwargs):
- outputs = super().__call__(*args, **kwargs)
- cls_out = self.cls(outputs[2])
- return outputs[:2] + (cls_out,)
-
-
-class FlaxBigBirdForNaturalQuestions(FlaxBigBirdForQuestionAnswering):
- module_class = FlaxBigBirdForNaturalQuestionsModule
-
-
-def calculate_loss_for_nq(start_logits, start_labels, end_logits, end_labels, pooled_logits, pooler_labels):
- def cross_entropy(logits, labels, reduction=None):
- """
- Args:
- logits: bsz, seqlen, vocab_size
- labels: bsz, seqlen
- """
- vocab_size = logits.shape[-1]
- labels = (labels[..., None] == jnp.arange(vocab_size)[None]).astype("f4")
- logits = jax.nn.log_softmax(logits, axis=-1)
- loss = -jnp.sum(labels * logits, axis=-1)
- if reduction is not None:
- loss = reduction(loss)
- return loss
-
- cross_entropy = partial(cross_entropy, reduction=jnp.mean)
- start_loss = cross_entropy(start_logits, start_labels)
- end_loss = cross_entropy(end_logits, end_labels)
- pooled_loss = cross_entropy(pooled_logits, pooler_labels)
- return (start_loss + end_loss + pooled_loss) / 3
-
-
-@dataclass
-class Args:
- model_id: str = "google/bigbird-roberta-base"
- logging_steps: int = 3000
- save_steps: int = 10500
-
- block_size: int = 128
- num_random_blocks: int = 3
-
- batch_size_per_device: int = 1
- max_epochs: int = 5
-
- # tx_args
- lr: float = 3e-5
- init_lr: float = 0.0
- warmup_steps: int = 20000
- weight_decay: float = 0.0095
-
- save_dir: str = "bigbird-roberta-natural-questions"
- base_dir: str = "training-expt"
- tr_data_path: str = "data/nq-training.jsonl"
- val_data_path: str = "data/nq-validation.jsonl"
-
- def __post_init__(self):
- os.makedirs(self.base_dir, exist_ok=True)
- self.save_dir = os.path.join(self.base_dir, self.save_dir)
- self.batch_size = self.batch_size_per_device * jax.device_count()
-
-
-@dataclass
-class DataCollator:
- pad_id: int
- max_length: int = 4096 # no dynamic padding on TPUs
-
- def __call__(self, batch):
- batch = self.collate_fn(batch)
- batch = jax.tree_util.tree_map(shard, batch)
- return batch
-
- def collate_fn(self, features):
- input_ids, attention_mask = self.fetch_inputs(features["input_ids"])
- batch = {
- "input_ids": jnp.array(input_ids, dtype=jnp.int32),
- "attention_mask": jnp.array(attention_mask, dtype=jnp.int32),
- "start_labels": jnp.array(features["start_token"], dtype=jnp.int32),
- "end_labels": jnp.array(features["end_token"], dtype=jnp.int32),
- "pooled_labels": jnp.array(features["category"], dtype=jnp.int32),
- }
- return batch
-
- def fetch_inputs(self, input_ids: list):
- inputs = [self._fetch_inputs(ids) for ids in input_ids]
- return zip(*inputs)
-
- def _fetch_inputs(self, input_ids: list):
- attention_mask = [1 for _ in range(len(input_ids))]
- while len(input_ids) < self.max_length:
- input_ids.append(self.pad_id)
- attention_mask.append(0)
- return input_ids, attention_mask
-
-
-def get_batched_dataset(dataset, batch_size, seed=None):
- if seed is not None:
- dataset = dataset.shuffle(seed=seed)
- for i in range(len(dataset) // batch_size):
- batch = dataset[i * batch_size : (i + 1) * batch_size]
- yield dict(batch)
-
-
-@partial(jax.pmap, axis_name="batch")
-def train_step(state, drp_rng, **model_inputs):
- def loss_fn(params):
- start_labels = model_inputs.pop("start_labels")
- end_labels = model_inputs.pop("end_labels")
- pooled_labels = model_inputs.pop("pooled_labels")
-
- outputs = state.apply_fn(**model_inputs, params=params, dropout_rng=drp_rng, train=True)
- start_logits, end_logits, pooled_logits = outputs
-
- return state.loss_fn(
- start_logits,
- start_labels,
- end_logits,
- end_labels,
- pooled_logits,
- pooled_labels,
- )
-
- drp_rng, new_drp_rng = jax.random.split(drp_rng)
- grad_fn = jax.value_and_grad(loss_fn)
- loss, grads = grad_fn(state.params)
- metrics = jax.lax.pmean({"loss": loss}, axis_name="batch")
- grads = jax.lax.pmean(grads, "batch")
-
- state = state.apply_gradients(grads=grads)
- return state, metrics, new_drp_rng
-
-
-@partial(jax.pmap, axis_name="batch")
-def val_step(state, **model_inputs):
- start_labels = model_inputs.pop("start_labels")
- end_labels = model_inputs.pop("end_labels")
- pooled_labels = model_inputs.pop("pooled_labels")
-
- outputs = state.apply_fn(**model_inputs, params=state.params, train=False)
- start_logits, end_logits, pooled_logits = outputs
-
- loss = state.loss_fn(start_logits, start_labels, end_logits, end_labels, pooled_logits, pooled_labels)
- metrics = jax.lax.pmean({"loss": loss}, axis_name="batch")
- return metrics
-
-
-class TrainState(train_state.TrainState):
- loss_fn: Callable = struct.field(pytree_node=False)
-
-
-@dataclass
-class Trainer:
- args: Args
- data_collator: Callable
- train_step_fn: Callable
- val_step_fn: Callable
- model_save_fn: Callable
- logger: wandb
- scheduler_fn: Callable = None
-
- def create_state(self, model, tx, num_train_steps, ckpt_dir=None):
- params = model.params
- state = TrainState.create(
- apply_fn=model.__call__,
- params=params,
- tx=tx,
- loss_fn=calculate_loss_for_nq,
- )
- if ckpt_dir is not None:
- params, opt_state, step, args, data_collator = restore_checkpoint(ckpt_dir, state)
- tx_args = {
- "lr": args.lr,
- "init_lr": args.init_lr,
- "warmup_steps": args.warmup_steps,
- "num_train_steps": num_train_steps,
- "weight_decay": args.weight_decay,
- }
- tx, lr = build_tx(**tx_args)
- state = train_state.TrainState(
- step=step,
- apply_fn=model.__call__,
- params=params,
- tx=tx,
- opt_state=opt_state,
- )
- self.args = args
- self.data_collator = data_collator
- self.scheduler_fn = lr
- model.params = params
- state = jax_utils.replicate(state)
- return state
-
- def train(self, state, tr_dataset, val_dataset):
- args = self.args
- total = len(tr_dataset) // args.batch_size
-
- rng = jax.random.PRNGKey(0)
- drp_rng = jax.random.split(rng, jax.device_count())
- for epoch in range(args.max_epochs):
- running_loss = jnp.array(0, dtype=jnp.float32)
- tr_dataloader = get_batched_dataset(tr_dataset, args.batch_size, seed=epoch)
- i = 0
- for batch in tqdm(tr_dataloader, total=total, desc=f"Running EPOCH-{epoch}"):
- batch = self.data_collator(batch)
- state, metrics, drp_rng = self.train_step_fn(state, drp_rng, **batch)
- running_loss += jax_utils.unreplicate(metrics["loss"])
- i += 1
- if i % args.logging_steps == 0:
- state_step = jax_utils.unreplicate(state.step)
- tr_loss = running_loss.item() / i
- lr = self.scheduler_fn(state_step - 1)
-
- eval_loss = self.evaluate(state, val_dataset)
- logging_dict = {
- "step": state_step.item(),
- "eval_loss": eval_loss.item(),
- "tr_loss": tr_loss,
- "lr": lr.item(),
- }
- tqdm.write(str(logging_dict))
- self.logger.log(logging_dict, commit=True)
-
- if i % args.save_steps == 0:
- self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}", state=state)
-
- def evaluate(self, state, dataset):
- dataloader = get_batched_dataset(dataset, self.args.batch_size)
- total = len(dataset) // self.args.batch_size
- running_loss = jnp.array(0, dtype=jnp.float32)
- i = 0
- for batch in tqdm(dataloader, total=total, desc="Evaluating ... "):
- batch = self.data_collator(batch)
- metrics = self.val_step_fn(state, **batch)
- running_loss += jax_utils.unreplicate(metrics["loss"])
- i += 1
- return running_loss / i
-
- def save_checkpoint(self, save_dir, state):
- state = jax_utils.unreplicate(state)
- print(f"SAVING CHECKPOINT IN {save_dir}", end=" ... ")
- self.model_save_fn(save_dir, params=state.params)
- with open(os.path.join(save_dir, "opt_state.msgpack"), "wb") as f:
- f.write(to_bytes(state.opt_state))
- joblib.dump(self.args, os.path.join(save_dir, "args.joblib"))
- joblib.dump(self.data_collator, os.path.join(save_dir, "data_collator.joblib"))
- with open(os.path.join(save_dir, "training_state.json"), "w") as f:
- json.dump({"step": state.step.item()}, f)
- print("DONE")
-
-
-def restore_checkpoint(save_dir, state):
- print(f"RESTORING CHECKPOINT FROM {save_dir}", end=" ... ")
- with open(os.path.join(save_dir, "flax_model.msgpack"), "rb") as f:
- params = from_bytes(state.params, f.read())
-
- with open(os.path.join(save_dir, "opt_state.msgpack"), "rb") as f:
- opt_state = from_bytes(state.opt_state, f.read())
-
- args = joblib.load(os.path.join(save_dir, "args.joblib"))
- data_collator = joblib.load(os.path.join(save_dir, "data_collator.joblib"))
-
- with open(os.path.join(save_dir, "training_state.json"), "r") as f:
- training_state = json.load(f)
- step = training_state["step"]
-
- print("DONE")
- return params, opt_state, step, args, data_collator
-
-
-def scheduler_fn(lr, init_lr, warmup_steps, num_train_steps):
- decay_steps = num_train_steps - warmup_steps
- warmup_fn = optax.linear_schedule(init_value=init_lr, end_value=lr, transition_steps=warmup_steps)
- decay_fn = optax.linear_schedule(init_value=lr, end_value=1e-7, transition_steps=decay_steps)
- lr = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[warmup_steps])
- return lr
-
-
-def build_tx(lr, init_lr, warmup_steps, num_train_steps, weight_decay):
- def weight_decay_mask(params):
- params = traverse_util.flatten_dict(params)
- mask = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
- return traverse_util.unflatten_dict(mask)
-
- lr = scheduler_fn(lr, init_lr, warmup_steps, num_train_steps)
-
- tx = optax.adamw(learning_rate=lr, weight_decay=weight_decay, mask=weight_decay_mask)
- return tx, lr
diff --git a/spaces/chomakov/GPT-4_PDF_summary/README.md b/spaces/chomakov/GPT-4_PDF_summary/README.md
deleted file mode 100644
index 030f0684033145eb58368f56fed50690dc24fa1f..0000000000000000000000000000000000000000
--- a/spaces/chomakov/GPT-4_PDF_summary/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: GPT-4 PDF Summary
-emoji: 💩
-colorFrom: yellow
-colorTo: indigo
-sdk: docker
-pinned: false
-license: creativeml-openrail-m
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cryptography/hazmat/primitives/cmac.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cryptography/hazmat/primitives/cmac.py
deleted file mode 100644
index 8aa1d791acdd09aefe1391a73dec960428b6892d..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cryptography/hazmat/primitives/cmac.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-from __future__ import annotations
-
-import typing
-
-from cryptography import utils
-from cryptography.exceptions import AlreadyFinalized
-from cryptography.hazmat.primitives import ciphers
-
-if typing.TYPE_CHECKING:
- from cryptography.hazmat.backends.openssl.cmac import _CMACContext
-
-
-class CMAC:
- _ctx: typing.Optional[_CMACContext]
- _algorithm: ciphers.BlockCipherAlgorithm
-
- def __init__(
- self,
- algorithm: ciphers.BlockCipherAlgorithm,
- backend: typing.Any = None,
- ctx: typing.Optional[_CMACContext] = None,
- ) -> None:
- if not isinstance(algorithm, ciphers.BlockCipherAlgorithm):
- raise TypeError("Expected instance of BlockCipherAlgorithm.")
- self._algorithm = algorithm
-
- if ctx is None:
- from cryptography.hazmat.backends.openssl.backend import (
- backend as ossl,
- )
-
- self._ctx = ossl.create_cmac_ctx(self._algorithm)
- else:
- self._ctx = ctx
-
- def update(self, data: bytes) -> None:
- if self._ctx is None:
- raise AlreadyFinalized("Context was already finalized.")
-
- utils._check_bytes("data", data)
- self._ctx.update(data)
-
- def finalize(self) -> bytes:
- if self._ctx is None:
- raise AlreadyFinalized("Context was already finalized.")
- digest = self._ctx.finalize()
- self._ctx = None
- return digest
-
- def verify(self, signature: bytes) -> None:
- utils._check_bytes("signature", signature)
- if self._ctx is None:
- raise AlreadyFinalized("Context was already finalized.")
-
- ctx, self._ctx = self._ctx, None
- ctx.verify(signature)
-
- def copy(self) -> CMAC:
- if self._ctx is None:
- raise AlreadyFinalized("Context was already finalized.")
- return CMAC(self._algorithm, ctx=self._ctx.copy())
diff --git a/spaces/cihyFjudo/fairness-paper-search/Antenna Magus Professional Download Crack [UPDATED]ed.md b/spaces/cihyFjudo/fairness-paper-search/Antenna Magus Professional Download Crack [UPDATED]ed.md
deleted file mode 100644
index 7c051795579eddea0f89f77ab057432d50bfca94..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Antenna Magus Professional Download Crack [UPDATED]ed.md
+++ /dev/null
@@ -1,67 +0,0 @@
-## Antenna Magus Professional Download Cracked
-
-
-
- ![Antenna Magus Professional Download Crack \[UPDATED\]ed](https://sjcrack.com/wp-content/uploads/2017/03/Antenna-Magus-Professional-2017-Crack-Keygen-FREE.jpg)
-
-
-
-**Antenna Magus Professional Download Cracked ✵✵✵ [https://walllowcopo.blogspot.com/?download=2twr28](https://walllowcopo.blogspot.com/?download=2twr28)**
-
-
-
-# How to Download and Use Antenna Magus Professional for Antenna Design
-
-
-
-Antenna Magus Professional is a software tool that helps you design and model antennas faster and more accurately. It has a database of over 350 validated antenna models that you can export to CST Studio Suite® for further analysis and optimization. In this article, we will show you how to download and use Antenna Magus Professional for antenna design.
-
-
-
-## Step 1: Download Antenna Magus Professional
-
-
-
-Antenna Magus Professional is a product of SIMULIA by Dassault Systèmes®. You can download it from their official website[^2^]. You will need to register and provide some information before you can access the download link. You can choose between a 14-day trial version or a full version with a license.
-
-
-
-## Step 2: Install Antenna Magus Professional
-
-
-
-After downloading the software, you can run the installer and follow the instructions on the screen. You will need to accept the terms and conditions, choose a destination folder, and enter your license information if you have one. The installation process may take some time depending on your system configuration.
-
-
-
-## Step 3: Launch Antenna Magus Professional
-
-
-
-Once the installation is complete, you can launch Antenna Magus Professional from the start menu or the desktop shortcut. You will see a welcome screen with some options to start a new project, open an existing project, or browse the antenna database. You can also access the help menu, the settings menu, and the license manager from here.
-
-
-
-## Step 4: Choose an Antenna Model
-
-
-
-Antenna Magus Professional has a user-friendly interface that allows you to search, filter, and compare different antenna models based on your specifications and requirements. You can enter parameters such as frequency range, gain, bandwidth, polarization, size, etc., and see which antennas match your criteria. You can also view detailed information about each antenna model, such as its description, performance characteristics, design equations, references, etc.
-
-
-
-## Step 5: Export to CST Studio Suite®
-
-
-
-After choosing an antenna model that suits your needs, you can export it to CST Studio Suite® for further analysis and optimization. Antenna Magus Professional will generate a parametric model of the antenna that you can import into CST Microwave Studio® or CST EM STUDIO®. You can then modify the geometry, materials, ports, boundaries, etc., and run simulations to evaluate the antenna's performance in different scenarios.
-
-
-
-## Conclusion
-
-
-
-Antenna Magus Professional is a powerful software tool that can help you design and model antennas faster and more accurately. It has a large database of validated antenna models that you can export to CST Studio Suite® for further analysis and optimization. However, downloading and using Antenna Magus Professional may not be legal if you do not have a valid license or permission from the software provider. Therefore, we do not recommend downloading cracked versions of Antenna Magus Professional from untrusted sources[^1^]. Instead, we suggest you try the trial version or purchase the full version from the official website[^2^]. We hope this article has been helpful for you.
-
- 1b8d091108
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/Designated Driver Film Completo In Italiano Download [PORTABLE] Gratuito Hd 1080p.md b/spaces/cihyFjudo/fairness-paper-search/Designated Driver Film Completo In Italiano Download [PORTABLE] Gratuito Hd 1080p.md
deleted file mode 100644
index 08f580d9f112682ac7f414c39dee8d524f5590bb..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Designated Driver Film Completo In Italiano Download [PORTABLE] Gratuito Hd 1080p.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Designated Driver film completo in italiano download gratuito hd 1080p
DOWNLOAD ✫ https://tinurli.com/2uwkud
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Ptgui Mac Download The Industry Leading Photo Stitching Software.md b/spaces/cihyFjudo/fairness-paper-search/Ptgui Mac Download The Industry Leading Photo Stitching Software.md
deleted file mode 100644
index d4f24aa85f1c8fb213d38e2caa213e74f36e58b7..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Ptgui Mac Download The Industry Leading Photo Stitching Software.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Ptgui Mac Download
Download File === https://tinurli.com/2uwkzY
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cjayic/soft-vc-widowmaker/hifigan/dataset.py b/spaces/cjayic/soft-vc-widowmaker/hifigan/dataset.py
deleted file mode 100644
index 0142cd0f4073b955a1dbc424d1eef31a76815285..0000000000000000000000000000000000000000
--- a/spaces/cjayic/soft-vc-widowmaker/hifigan/dataset.py
+++ /dev/null
@@ -1,126 +0,0 @@
-from pathlib import Path
-import math
-import random
-import numpy as np
-import torch
-import torch.nn.functional as F
-
-from torch.utils.data import Dataset
-
-import torchaudio
-import torchaudio.transforms as transforms
-
-
-class LogMelSpectrogram(torch.nn.Module):
- def __init__(self):
- super().__init__()
- self.melspctrogram = transforms.MelSpectrogram(
- sample_rate=16000,
- n_fft=1024,
- win_length=1024,
- hop_length=160,
- center=False,
- power=1.0,
- norm="slaney",
- onesided=True,
- n_mels=128,
- mel_scale="slaney",
- )
-
- def forward(self, wav):
- wav = F.pad(wav, ((1024 - 160) // 2, (1024 - 160) // 2), "reflect")
- mel = self.melspctrogram(wav)
- logmel = torch.log(torch.clamp(mel, min=1e-5))
- return logmel
-
-
-class MelDataset(Dataset):
- def __init__(
- self,
- root: Path,
- segment_length: int,
- sample_rate: int,
- hop_length: int,
- train: bool = True,
- finetune: bool = False,
- ):
- self.wavs_dir = root / "wavs"
- self.mels_dir = root / "mels"
- self.data_dir = self.wavs_dir if not finetune else self.mels_dir
-
- self.segment_length = segment_length
- self.sample_rate = sample_rate
- self.hop_length = hop_length
- self.train = train
- self.finetune = finetune
-
- suffix = ".wav" if not finetune else ".npy"
- pattern = f"train/**/*{suffix}" if train else "dev/**/*{suffix}"
-
- self.metadata = [
- path.relative_to(self.data_dir).with_suffix("")
- for path in self.data_dir.rglob(pattern)
- ]
-
- self.logmel = LogMelSpectrogram()
-
- def __len__(self):
- return len(self.metadata)
-
- def __getitem__(self, index):
- path = self.metadata[index]
- wav_path = self.wavs_dir / path
-
- info = torchaudio.info(wav_path.with_suffix(".wav"))
- if info.sample_rate != self.sample_rate:
- raise ValueError(
- f"Sample rate {info.sample_rate} doesn't match target of {self.sample_rate}"
- )
-
- if self.finetune:
- mel_path = self.mels_dir / path
- src_logmel = torch.from_numpy(np.load(mel_path.with_suffix(".npy")))
- src_logmel = src_logmel.unsqueeze(0)
-
- mel_frames_per_segment = math.ceil(self.segment_length / self.hop_length)
- mel_diff = src_logmel.size(-1) - mel_frames_per_segment if self.train else 0
- mel_offset = random.randint(0, max(mel_diff, 0))
-
- frame_offset = self.hop_length * mel_offset
- else:
- frame_diff = info.num_frames - self.segment_length
- frame_offset = random.randint(0, max(frame_diff, 0))
-
- wav, _ = torchaudio.load(
- filepath=wav_path.with_suffix(".wav"),
- frame_offset=frame_offset if self.train else 0,
- num_frames=self.segment_length if self.train else -1,
- )
-
- if wav.size(-1) < self.segment_length:
- wav = F.pad(wav, (0, self.segment_length - wav.size(-1)))
-
- if not self.finetune and self.train:
- gain = random.random() * (0.99 - 0.4) + 0.4
- flip = -1 if random.random() > 0.5 else 1
- wav = flip * gain * wav / wav.abs().max()
-
- tgt_logmel = self.logmel(wav.unsqueeze(0)).squeeze(0)
-
- if self.finetune:
- if self.train:
- src_logmel = src_logmel[
- :, :, mel_offset : mel_offset + mel_frames_per_segment
- ]
-
- if src_logmel.size(-1) < mel_frames_per_segment:
- src_logmel = F.pad(
- src_logmel,
- (0, mel_frames_per_segment - src_logmel.size(-1)),
- "constant",
- src_logmel.min(),
- )
- else:
- src_logmel = tgt_logmel.clone()
-
- return wav, src_logmel, tgt_logmel
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dcaenc.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dcaenc.c
deleted file mode 100644
index c731d79381c8d7e2daacb54505b6b5e29de7ece5..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dcaenc.c
+++ /dev/null
@@ -1,1341 +0,0 @@
-/*
- * DCA encoder
- * Copyright (C) 2008-2012 Alexander E. Patrakov
- * 2010 Benjamin Larsson
- * 2011 Xiang Wang
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "libavutil/avassert.h"
-#include "libavutil/channel_layout.h"
-#include "libavutil/common.h"
-#include "libavutil/ffmath.h"
-#include "libavutil/mem_internal.h"
-#include "libavutil/opt.h"
-#include "libavutil/thread.h"
-#include "libavutil/tx.h"
-#include "avcodec.h"
-#include "codec_internal.h"
-#include "dca.h"
-#include "dcaadpcm.h"
-#include "dcamath.h"
-#include "dca_core.h"
-#include "dcadata.h"
-#include "dcaenc.h"
-#include "encode.h"
-#include "put_bits.h"
-
-#define MAX_CHANNELS 6
-#define DCA_MAX_FRAME_SIZE 16384
-#define DCA_HEADER_SIZE 13
-#define DCA_LFE_SAMPLES 8
-
-#define DCAENC_SUBBANDS 32
-#define SUBFRAMES 1
-#define SUBSUBFRAMES 2
-#define SUBBAND_SAMPLES (SUBFRAMES * SUBSUBFRAMES * 8)
-#define AUBANDS 25
-
-#define COS_T(x) (c->cos_table[(x) & 2047])
-
-typedef struct CompressionOptions {
- int adpcm_mode;
-} CompressionOptions;
-
-typedef struct DCAEncContext {
- AVClass *class;
- PutBitContext pb;
- DCAADPCMEncContext adpcm_ctx;
- AVTXContext *mdct;
- av_tx_fn mdct_fn;
- CompressionOptions options;
- int frame_size;
- int frame_bits;
- int fullband_channels;
- int channels;
- int lfe_channel;
- int samplerate_index;
- int bitrate_index;
- int channel_config;
- const int32_t *band_interpolation;
- const int32_t *band_spectrum;
- int lfe_scale_factor;
- softfloat lfe_quant;
- int32_t lfe_peak_cb;
- const int8_t *channel_order_tab; ///< channel reordering table, lfe and non lfe
-
- int32_t prediction_mode[MAX_CHANNELS][DCAENC_SUBBANDS];
- int32_t adpcm_history[MAX_CHANNELS][DCAENC_SUBBANDS][DCA_ADPCM_COEFFS * 2];
- int32_t history[MAX_CHANNELS][512]; /* This is a circular buffer */
- int32_t *subband[MAX_CHANNELS][DCAENC_SUBBANDS];
- int32_t quantized[MAX_CHANNELS][DCAENC_SUBBANDS][SUBBAND_SAMPLES];
- int32_t peak_cb[MAX_CHANNELS][DCAENC_SUBBANDS];
- int32_t diff_peak_cb[MAX_CHANNELS][DCAENC_SUBBANDS]; ///< expected peak of residual signal
- int32_t downsampled_lfe[DCA_LFE_SAMPLES];
- int32_t masking_curve_cb[SUBSUBFRAMES][256];
- int32_t bit_allocation_sel[MAX_CHANNELS];
- int abits[MAX_CHANNELS][DCAENC_SUBBANDS];
- int scale_factor[MAX_CHANNELS][DCAENC_SUBBANDS];
- softfloat quant[MAX_CHANNELS][DCAENC_SUBBANDS];
- int32_t quant_index_sel[MAX_CHANNELS][DCA_CODE_BOOKS];
- int32_t eff_masking_curve_cb[256];
- int32_t band_masking_cb[32];
- int32_t worst_quantization_noise;
- int32_t worst_noise_ever;
- int consumed_bits;
- int consumed_adpcm_bits; ///< Number of bits to transmit ADPCM related info
-
- int32_t cos_table[2048];
- int32_t band_interpolation_tab[2][512];
- int32_t band_spectrum_tab[2][8];
- int32_t auf[9][AUBANDS][256];
- int32_t cb_to_add[256];
- int32_t cb_to_level[2048];
- int32_t lfe_fir_64i[512];
-} DCAEncContext;
-
-/* Transfer function of outer and middle ear, Hz -> dB */
-static double hom(double f)
-{
- double f1 = f / 1000;
-
- return -3.64 * pow(f1, -0.8)
- + 6.8 * exp(-0.6 * (f1 - 3.4) * (f1 - 3.4))
- - 6.0 * exp(-0.15 * (f1 - 8.7) * (f1 - 8.7))
- - 0.0006 * (f1 * f1) * (f1 * f1);
-}
-
-static double gammafilter(int i, double f)
-{
- double h = (f - fc[i]) / erb[i];
-
- h = 1 + h * h;
- h = 1 / (h * h);
- return 20 * log10(h);
-}
-
-static int subband_bufer_alloc(DCAEncContext *c)
-{
- int ch, band;
- int32_t *bufer = av_calloc(MAX_CHANNELS * DCAENC_SUBBANDS *
- (SUBBAND_SAMPLES + DCA_ADPCM_COEFFS),
- sizeof(int32_t));
- if (!bufer)
- return AVERROR(ENOMEM);
-
- /* we need a place for DCA_ADPCM_COEFF samples from previous frame
- * to calc prediction coefficients for each subband */
- for (ch = 0; ch < MAX_CHANNELS; ch++) {
- for (band = 0; band < DCAENC_SUBBANDS; band++) {
- c->subband[ch][band] = bufer +
- ch * DCAENC_SUBBANDS * (SUBBAND_SAMPLES + DCA_ADPCM_COEFFS) +
- band * (SUBBAND_SAMPLES + DCA_ADPCM_COEFFS) + DCA_ADPCM_COEFFS;
- }
- }
- return 0;
-}
-
-static void subband_bufer_free(DCAEncContext *c)
-{
- if (c->subband[0][0]) {
- int32_t *bufer = c->subband[0][0] - DCA_ADPCM_COEFFS;
- av_free(bufer);
- c->subband[0][0] = NULL;
- }
-}
-
-static uint16_t bitalloc_12_table[DCA_BITALLOC_12_COUNT][12 + 1][2];
-
-static uint16_t bitalloc_table[DCA_NUM_BITALLOC_CODES][2];
-static const uint16_t (*bitalloc_tables[DCA_CODE_BOOKS][8])[2];
-
-static av_cold void create_enc_table(uint16_t dst[][2], unsigned count,
- const uint8_t (**src_tablep)[2])
-{
- const uint8_t (*src_table)[2] = *src_tablep;
- uint16_t code = 0;
-
- for (unsigned i = 0; i < count; i++) {
- unsigned dst_idx = src_table[i][0];
-
- dst[dst_idx][0] = code >> (16 - src_table[i][1]);
- dst[dst_idx][1] = src_table[i][1];
-
- code += 1 << (16 - src_table[i][1]);
- }
- *src_tablep += count;
-}
-
-static av_cold void dcaenc_init_static_tables(void)
-{
- uint16_t (*bitalloc_dst)[2] = bitalloc_table;
- const uint8_t (*src_table)[2] = ff_dca_vlc_src_tables;
-
- for (unsigned i = 0; i < DCA_CODE_BOOKS; i++) {
- for (unsigned j = 0; j < ff_dca_quant_index_group_size[i]; j++) {
- create_enc_table(bitalloc_dst, ff_dca_bitalloc_sizes[i],
- &src_table);
- bitalloc_tables[i][j] = bitalloc_dst - ff_dca_bitalloc_offsets[i];
- bitalloc_dst += ff_dca_bitalloc_sizes[i];
- }
- }
-
- for (unsigned i = 0; i < DCA_BITALLOC_12_COUNT; i++)
- create_enc_table(&bitalloc_12_table[i][1], 12, &src_table);
-}
-
-static int encode_init(AVCodecContext *avctx)
-{
- static AVOnce init_static_once = AV_ONCE_INIT;
- DCAEncContext *c = avctx->priv_data;
- AVChannelLayout layout = avctx->ch_layout;
- int i, j, k, min_frame_bits;
- float scale = 1.0f;
- int ret;
-
- if ((ret = subband_bufer_alloc(c)) < 0)
- return ret;
-
- c->fullband_channels = c->channels = layout.nb_channels;
- c->lfe_channel = (c->channels == 3 || c->channels == 6);
- c->band_interpolation = c->band_interpolation_tab[1];
- c->band_spectrum = c->band_spectrum_tab[1];
- c->worst_quantization_noise = -2047;
- c->worst_noise_ever = -2047;
- c->consumed_adpcm_bits = 0;
-
- if (ff_dcaadpcm_init(&c->adpcm_ctx))
- return AVERROR(ENOMEM);
-
- switch (layout.nb_channels) {
- case 1: /* mono */
- c->channel_config = 0;
- break;
- case 2: /* stereo */
- c->channel_config = 2;
- break;
- case 4: /* 2.2 */
- c->channel_config = 8;
- break;
- case 5: /* 5.0 */
- c->channel_config = 9;
- break;
- case 6: /* 5.1 */
- c->channel_config = 9;
- break;
- default:
- av_assert1(!"impossible channel layout");
- }
-
- if (c->lfe_channel) {
- c->fullband_channels--;
- c->channel_order_tab = channel_reorder_lfe[c->channel_config];
- } else {
- c->channel_order_tab = channel_reorder_nolfe[c->channel_config];
- }
-
- for (i = 0; i < MAX_CHANNELS; i++) {
- for (j = 0; j < DCA_CODE_BOOKS; j++) {
- c->quant_index_sel[i][j] = ff_dca_quant_index_group_size[j];
- }
- /* 6 - no Huffman */
- c->bit_allocation_sel[i] = 6;
-
- for (j = 0; j < DCAENC_SUBBANDS; j++) {
- /* -1 - no ADPCM */
- c->prediction_mode[i][j] = -1;
- memset(c->adpcm_history[i][j], 0, sizeof(int32_t)*DCA_ADPCM_COEFFS);
- }
- }
-
- for (i = 0; i < 9; i++) {
- if (sample_rates[i] == avctx->sample_rate)
- break;
- }
- if (i == 9)
- return AVERROR(EINVAL);
- c->samplerate_index = i;
-
- if (avctx->bit_rate < 32000 || avctx->bit_rate > 3840000) {
- av_log(avctx, AV_LOG_ERROR, "Bit rate %"PRId64" not supported.", avctx->bit_rate);
- return AVERROR(EINVAL);
- }
- for (i = 0; ff_dca_bit_rates[i] < avctx->bit_rate; i++)
- ;
- c->bitrate_index = i;
- c->frame_bits = FFALIGN((avctx->bit_rate * 512 + avctx->sample_rate - 1) / avctx->sample_rate, 32);
- min_frame_bits = 132 + (493 + 28 * 32) * c->fullband_channels + c->lfe_channel * 72;
- if (c->frame_bits < min_frame_bits || c->frame_bits > (DCA_MAX_FRAME_SIZE << 3))
- return AVERROR(EINVAL);
-
- c->frame_size = (c->frame_bits + 7) / 8;
-
- avctx->frame_size = 32 * SUBBAND_SAMPLES;
-
- if ((ret = av_tx_init(&c->mdct, &c->mdct_fn, AV_TX_INT32_MDCT, 0, 256, &scale, 0)) < 0)
- return ret;
-
- /* Init all tables */
- c->cos_table[0] = 0x7fffffff;
- c->cos_table[512] = 0;
- c->cos_table[1024] = -c->cos_table[0];
- for (i = 1; i < 512; i++) {
- c->cos_table[i] = (int32_t)(0x7fffffff * cos(M_PI * i / 1024));
- c->cos_table[1024-i] = -c->cos_table[i];
- c->cos_table[1024+i] = -c->cos_table[i];
- c->cos_table[2048-i] = +c->cos_table[i];
- }
-
- for (i = 0; i < 2048; i++)
- c->cb_to_level[i] = (int32_t)(0x7fffffff * ff_exp10(-0.005 * i));
-
- for (k = 0; k < 32; k++) {
- for (j = 0; j < 8; j++) {
- c->lfe_fir_64i[64 * j + k] = (int32_t)(0xffffff800000ULL * ff_dca_lfe_fir_64[8 * k + j]);
- c->lfe_fir_64i[64 * (7-j) + (63 - k)] = (int32_t)(0xffffff800000ULL * ff_dca_lfe_fir_64[8 * k + j]);
- }
- }
-
- for (i = 0; i < 512; i++) {
- c->band_interpolation_tab[0][i] = (int32_t)(0x1000000000ULL * ff_dca_fir_32bands_perfect[i]);
- c->band_interpolation_tab[1][i] = (int32_t)(0x1000000000ULL * ff_dca_fir_32bands_nonperfect[i]);
- }
-
- for (i = 0; i < 9; i++) {
- for (j = 0; j < AUBANDS; j++) {
- for (k = 0; k < 256; k++) {
- double freq = sample_rates[i] * (k + 0.5) / 512;
-
- c->auf[i][j][k] = (int32_t)(10 * (hom(freq) + gammafilter(j, freq)));
- }
- }
- }
-
- for (i = 0; i < 256; i++) {
- double add = 1 + ff_exp10(-0.01 * i);
- c->cb_to_add[i] = (int32_t)(100 * log10(add));
- }
- for (j = 0; j < 8; j++) {
- double accum = 0;
- for (i = 0; i < 512; i++) {
- double reconst = ff_dca_fir_32bands_perfect[i] * ((i & 64) ? (-1) : 1);
- accum += reconst * cos(2 * M_PI * (i + 0.5 - 256) * (j + 0.5) / 512);
- }
- c->band_spectrum_tab[0][j] = (int32_t)(200 * log10(accum));
- }
- for (j = 0; j < 8; j++) {
- double accum = 0;
- for (i = 0; i < 512; i++) {
- double reconst = ff_dca_fir_32bands_nonperfect[i] * ((i & 64) ? (-1) : 1);
- accum += reconst * cos(2 * M_PI * (i + 0.5 - 256) * (j + 0.5) / 512);
- }
- c->band_spectrum_tab[1][j] = (int32_t)(200 * log10(accum));
- }
-
- ff_thread_once(&init_static_once, dcaenc_init_static_tables);
- return 0;
-}
-
-static av_cold int encode_close(AVCodecContext *avctx)
-{
- DCAEncContext *c = avctx->priv_data;
- av_tx_uninit(&c->mdct);
- subband_bufer_free(c);
- ff_dcaadpcm_free(&c->adpcm_ctx);
-
- return 0;
-}
-
-static void subband_transform(DCAEncContext *c, const int32_t *input)
-{
- int ch, subs, i, k, j;
-
- for (ch = 0; ch < c->fullband_channels; ch++) {
- /* History is copied because it is also needed for PSY */
- int32_t hist[512];
- int hist_start = 0;
- const int chi = c->channel_order_tab[ch];
-
- memcpy(hist, &c->history[ch][0], 512 * sizeof(int32_t));
-
- for (subs = 0; subs < SUBBAND_SAMPLES; subs++) {
- int32_t accum[64];
- int32_t resp;
- int band;
-
- /* Calculate the convolutions at once */
- memset(accum, 0, 64 * sizeof(int32_t));
-
- for (k = 0, i = hist_start, j = 0;
- i < 512; k = (k + 1) & 63, i++, j++)
- accum[k] += mul32(hist[i], c->band_interpolation[j]);
- for (i = 0; i < hist_start; k = (k + 1) & 63, i++, j++)
- accum[k] += mul32(hist[i], c->band_interpolation[j]);
-
- for (k = 16; k < 32; k++)
- accum[k] = accum[k] - accum[31 - k];
- for (k = 32; k < 48; k++)
- accum[k] = accum[k] + accum[95 - k];
-
- for (band = 0; band < 32; band++) {
- resp = 0;
- for (i = 16; i < 48; i++) {
- int s = (2 * band + 1) * (2 * (i + 16) + 1);
- resp += mul32(accum[i], COS_T(s << 3)) >> 3;
- }
-
- c->subband[ch][band][subs] = ((band + 1) & 2) ? -resp : resp;
- }
-
- /* Copy in 32 new samples from input */
- for (i = 0; i < 32; i++)
- hist[i + hist_start] = input[(subs * 32 + i) * c->channels + chi];
-
- hist_start = (hist_start + 32) & 511;
- }
- }
-}
-
-static void lfe_downsample(DCAEncContext *c, const int32_t *input)
-{
- /* FIXME: make 128x LFE downsampling possible */
- const int lfech = lfe_index[c->channel_config];
- int i, j, lfes;
- int32_t hist[512];
- int32_t accum;
- int hist_start = 0;
-
- memcpy(hist, &c->history[c->channels - 1][0], 512 * sizeof(int32_t));
-
- for (lfes = 0; lfes < DCA_LFE_SAMPLES; lfes++) {
- /* Calculate the convolution */
- accum = 0;
-
- for (i = hist_start, j = 0; i < 512; i++, j++)
- accum += mul32(hist[i], c->lfe_fir_64i[j]);
- for (i = 0; i < hist_start; i++, j++)
- accum += mul32(hist[i], c->lfe_fir_64i[j]);
-
- c->downsampled_lfe[lfes] = accum;
-
- /* Copy in 64 new samples from input */
- for (i = 0; i < 64; i++)
- hist[i + hist_start] = input[(lfes * 64 + i) * c->channels + lfech];
-
- hist_start = (hist_start + 64) & 511;
- }
-}
-
-static uint32_t dca_vlc_calc_alloc_bits(const int values[], uint8_t n, uint8_t sel)
-{
- uint32_t sum = 0;
- for (unsigned i = 0; i < n; i++)
- sum += bitalloc_12_table[sel][values[i]][1];
- return sum;
-}
-
-static void dca_vlc_enc_alloc(PutBitContext *pb, const int values[],
- uint8_t n, uint8_t sel)
-{
- for (unsigned i = 0; i < n; i++)
- put_bits(pb, bitalloc_12_table[sel][values[i]][1],
- bitalloc_12_table[sel][values[i]][0]);
-}
-
-static uint32_t dca_vlc_calc_quant_bits(const int values[], uint8_t n,
- uint8_t sel, uint8_t table)
-{
- uint32_t sum = 0;
- for (unsigned i = 0; i < n; i++)
- sum += bitalloc_tables[table][sel][values[i]][1];
- return sum;
-}
-
-static void dca_vlc_enc_quant(PutBitContext *pb, const int values[],
- uint8_t n, uint8_t sel, uint8_t table)
-{
- for (unsigned i = 0; i < n; i++)
- put_bits(pb, bitalloc_tables[table][sel][values[i]][1],
- bitalloc_tables[table][sel][values[i]][0]);
-}
-
-static int32_t get_cb(DCAEncContext *c, int32_t in)
-{
- int i, res = 0;
- in = FFABS(in);
-
- for (i = 1024; i > 0; i >>= 1) {
- if (c->cb_to_level[i + res] >= in)
- res += i;
- }
- return -res;
-}
-
-static int32_t add_cb(DCAEncContext *c, int32_t a, int32_t b)
-{
- if (a < b)
- FFSWAP(int32_t, a, b);
-
- if (a - b >= 256)
- return a;
- return a + c->cb_to_add[a - b];
-}
-
-static void calc_power(DCAEncContext *c,
- const int32_t in[2 * 256], int32_t power[256])
-{
- int i;
- LOCAL_ALIGNED_32(int32_t, data, [512]);
- LOCAL_ALIGNED_32(int32_t, coeff, [256]);
-
- for (i = 0; i < 512; i++)
- data[i] = norm__(mul32(in[i], 0x3fffffff - (COS_T(4 * i + 2) >> 1)), 4);
-
- c->mdct_fn(c->mdct, coeff, data, sizeof(int32_t));
- for (i = 0; i < 256; i++) {
- const int32_t cb = get_cb(c, coeff[i]);
- power[i] = add_cb(c, cb, cb);
- }
-}
-
-static void adjust_jnd(DCAEncContext *c,
- const int32_t in[512], int32_t out_cb[256])
-{
- int32_t power[256];
- int32_t out_cb_unnorm[256];
- int32_t denom;
- const int32_t ca_cb = -1114;
- const int32_t cs_cb = 928;
- const int samplerate_index = c->samplerate_index;
- int i, j;
-
- calc_power(c, in, power);
-
- for (j = 0; j < 256; j++)
- out_cb_unnorm[j] = -2047; /* and can only grow */
-
- for (i = 0; i < AUBANDS; i++) {
- denom = ca_cb; /* and can only grow */
- for (j = 0; j < 256; j++)
- denom = add_cb(c, denom, power[j] + c->auf[samplerate_index][i][j]);
- for (j = 0; j < 256; j++)
- out_cb_unnorm[j] = add_cb(c, out_cb_unnorm[j],
- -denom + c->auf[samplerate_index][i][j]);
- }
-
- for (j = 0; j < 256; j++)
- out_cb[j] = add_cb(c, out_cb[j], -out_cb_unnorm[j] - ca_cb - cs_cb);
-}
-
-typedef void (*walk_band_t)(DCAEncContext *c, int band1, int band2, int f,
- int32_t spectrum1, int32_t spectrum2, int channel,
- int32_t * arg);
-
-static void walk_band_low(DCAEncContext *c, int band, int channel,
- walk_band_t walk, int32_t *arg)
-{
- int f;
-
- if (band == 0) {
- for (f = 0; f < 4; f++)
- walk(c, 0, 0, f, 0, -2047, channel, arg);
- } else {
- for (f = 0; f < 8; f++)
- walk(c, band, band - 1, 8 * band - 4 + f,
- c->band_spectrum[7 - f], c->band_spectrum[f], channel, arg);
- }
-}
-
-static void walk_band_high(DCAEncContext *c, int band, int channel,
- walk_band_t walk, int32_t *arg)
-{
- int f;
-
- if (band == 31) {
- for (f = 0; f < 4; f++)
- walk(c, 31, 31, 256 - 4 + f, 0, -2047, channel, arg);
- } else {
- for (f = 0; f < 8; f++)
- walk(c, band, band + 1, 8 * band + 4 + f,
- c->band_spectrum[f], c->band_spectrum[7 - f], channel, arg);
- }
-}
-
-static void update_band_masking(DCAEncContext *c, int band1, int band2,
- int f, int32_t spectrum1, int32_t spectrum2,
- int channel, int32_t * arg)
-{
- int32_t value = c->eff_masking_curve_cb[f] - spectrum1;
-
- if (value < c->band_masking_cb[band1])
- c->band_masking_cb[band1] = value;
-}
-
-static void calc_masking(DCAEncContext *c, const int32_t *input)
-{
- int i, k, band, ch, ssf;
- int32_t data[512];
-
- for (i = 0; i < 256; i++)
- for (ssf = 0; ssf < SUBSUBFRAMES; ssf++)
- c->masking_curve_cb[ssf][i] = -2047;
-
- for (ssf = 0; ssf < SUBSUBFRAMES; ssf++)
- for (ch = 0; ch < c->fullband_channels; ch++) {
- const int chi = c->channel_order_tab[ch];
-
- for (i = 0, k = 128 + 256 * ssf; k < 512; i++, k++)
- data[i] = c->history[ch][k];
- for (k -= 512; i < 512; i++, k++)
- data[i] = input[k * c->channels + chi];
- adjust_jnd(c, data, c->masking_curve_cb[ssf]);
- }
- for (i = 0; i < 256; i++) {
- int32_t m = 2048;
-
- for (ssf = 0; ssf < SUBSUBFRAMES; ssf++)
- if (c->masking_curve_cb[ssf][i] < m)
- m = c->masking_curve_cb[ssf][i];
- c->eff_masking_curve_cb[i] = m;
- }
-
- for (band = 0; band < 32; band++) {
- c->band_masking_cb[band] = 2048;
- walk_band_low(c, band, 0, update_band_masking, NULL);
- walk_band_high(c, band, 0, update_band_masking, NULL);
- }
-}
-
-static inline int32_t find_peak(DCAEncContext *c, const int32_t *in, int len)
-{
- int sample;
- int32_t m = 0;
- for (sample = 0; sample < len; sample++) {
- int32_t s = abs(in[sample]);
- if (m < s)
- m = s;
- }
- return get_cb(c, m);
-}
-
-static void find_peaks(DCAEncContext *c)
-{
- int band, ch;
-
- for (ch = 0; ch < c->fullband_channels; ch++) {
- for (band = 0; band < 32; band++)
- c->peak_cb[ch][band] = find_peak(c, c->subband[ch][band],
- SUBBAND_SAMPLES);
- }
-
- if (c->lfe_channel)
- c->lfe_peak_cb = find_peak(c, c->downsampled_lfe, DCA_LFE_SAMPLES);
-}
-
-static void adpcm_analysis(DCAEncContext *c)
-{
- int ch, band;
- int pred_vq_id;
- int32_t *samples;
- int32_t estimated_diff[SUBBAND_SAMPLES];
-
- c->consumed_adpcm_bits = 0;
- for (ch = 0; ch < c->fullband_channels; ch++) {
- for (band = 0; band < 32; band++) {
- samples = c->subband[ch][band] - DCA_ADPCM_COEFFS;
- pred_vq_id = ff_dcaadpcm_subband_analysis(&c->adpcm_ctx, samples,
- SUBBAND_SAMPLES, estimated_diff);
- if (pred_vq_id >= 0) {
- c->prediction_mode[ch][band] = pred_vq_id;
- c->consumed_adpcm_bits += 12; //12 bits to transmit prediction vq index
- c->diff_peak_cb[ch][band] = find_peak(c, estimated_diff, 16);
- } else {
- c->prediction_mode[ch][band] = -1;
- }
- }
- }
-}
-
-static const int snr_fudge = 128;
-#define USED_1ABITS 1
-#define USED_26ABITS 4
-
-static inline int32_t get_step_size(DCAEncContext *c, int ch, int band)
-{
- int32_t step_size;
-
- if (c->bitrate_index == 3)
- step_size = ff_dca_lossless_quant[c->abits[ch][band]];
- else
- step_size = ff_dca_lossy_quant[c->abits[ch][band]];
-
- return step_size;
-}
-
-static int calc_one_scale(DCAEncContext *c, int32_t peak_cb, int abits,
- softfloat *quant)
-{
- int32_t peak;
- int our_nscale, try_remove;
- softfloat our_quant;
-
- av_assert0(peak_cb <= 0);
- av_assert0(peak_cb >= -2047);
-
- our_nscale = 127;
- peak = c->cb_to_level[-peak_cb];
-
- for (try_remove = 64; try_remove > 0; try_remove >>= 1) {
- if (scalefactor_inv[our_nscale - try_remove].e + stepsize_inv[abits].e <= 17)
- continue;
- our_quant.m = mul32(scalefactor_inv[our_nscale - try_remove].m, stepsize_inv[abits].m);
- our_quant.e = scalefactor_inv[our_nscale - try_remove].e + stepsize_inv[abits].e - 17;
- if ((ff_dca_quant_levels[abits] - 1) / 2 < quantize_value(peak, our_quant))
- continue;
- our_nscale -= try_remove;
- }
-
- if (our_nscale >= 125)
- our_nscale = 124;
-
- quant->m = mul32(scalefactor_inv[our_nscale].m, stepsize_inv[abits].m);
- quant->e = scalefactor_inv[our_nscale].e + stepsize_inv[abits].e - 17;
- av_assert0((ff_dca_quant_levels[abits] - 1) / 2 >= quantize_value(peak, *quant));
-
- return our_nscale;
-}
-
-static inline void quantize_adpcm_subband(DCAEncContext *c, int ch, int band)
-{
- int32_t step_size;
- int32_t diff_peak_cb = c->diff_peak_cb[ch][band];
- c->scale_factor[ch][band] = calc_one_scale(c, diff_peak_cb,
- c->abits[ch][band],
- &c->quant[ch][band]);
-
- step_size = get_step_size(c, ch, band);
- ff_dcaadpcm_do_real(c->prediction_mode[ch][band],
- c->quant[ch][band],
- ff_dca_scale_factor_quant7[c->scale_factor[ch][band]],
- step_size, c->adpcm_history[ch][band], c->subband[ch][band],
- c->adpcm_history[ch][band] + 4, c->quantized[ch][band],
- SUBBAND_SAMPLES, c->cb_to_level[-diff_peak_cb]);
-}
-
-static void quantize_adpcm(DCAEncContext *c)
-{
- int band, ch;
-
- for (ch = 0; ch < c->fullband_channels; ch++)
- for (band = 0; band < 32; band++)
- if (c->prediction_mode[ch][band] >= 0)
- quantize_adpcm_subband(c, ch, band);
-}
-
-static void quantize_pcm(DCAEncContext *c)
-{
- int sample, band, ch;
-
- for (ch = 0; ch < c->fullband_channels; ch++) {
- for (band = 0; band < 32; band++) {
- if (c->prediction_mode[ch][band] == -1) {
- for (sample = 0; sample < SUBBAND_SAMPLES; sample++) {
- int32_t val = quantize_value(c->subband[ch][band][sample],
- c->quant[ch][band]);
- c->quantized[ch][band][sample] = val;
- }
- }
- }
- }
-}
-
-static void accumulate_huff_bit_consumption(int abits, int32_t *quantized,
- uint32_t *result)
-{
- uint8_t sel, id = abits - 1;
- for (sel = 0; sel < ff_dca_quant_index_group_size[id]; sel++)
- result[sel] += dca_vlc_calc_quant_bits(quantized, SUBBAND_SAMPLES,
- sel, id);
-}
-
-static uint32_t set_best_code(uint32_t vlc_bits[DCA_CODE_BOOKS][7],
- uint32_t clc_bits[DCA_CODE_BOOKS],
- int32_t res[DCA_CODE_BOOKS])
-{
- uint8_t i, sel;
- uint32_t best_sel_bits[DCA_CODE_BOOKS];
- int32_t best_sel_id[DCA_CODE_BOOKS];
- uint32_t t, bits = 0;
-
- for (i = 0; i < DCA_CODE_BOOKS; i++) {
-
- av_assert0(!((!!vlc_bits[i][0]) ^ (!!clc_bits[i])));
- if (vlc_bits[i][0] == 0) {
- /* do not transmit adjustment index for empty codebooks */
- res[i] = ff_dca_quant_index_group_size[i];
- /* and skip it */
- continue;
- }
-
- best_sel_bits[i] = vlc_bits[i][0];
- best_sel_id[i] = 0;
- for (sel = 0; sel < ff_dca_quant_index_group_size[i]; sel++) {
- if (best_sel_bits[i] > vlc_bits[i][sel] && vlc_bits[i][sel]) {
- best_sel_bits[i] = vlc_bits[i][sel];
- best_sel_id[i] = sel;
- }
- }
-
- /* 2 bits to transmit scale factor adjustment index */
- t = best_sel_bits[i] + 2;
- if (t < clc_bits[i]) {
- res[i] = best_sel_id[i];
- bits += t;
- } else {
- res[i] = ff_dca_quant_index_group_size[i];
- bits += clc_bits[i];
- }
- }
- return bits;
-}
-
-static uint32_t set_best_abits_code(int abits[DCAENC_SUBBANDS], int bands,
- int32_t *res)
-{
- uint8_t i;
- uint32_t t;
- int32_t best_sel = 6;
- int32_t best_bits = bands * 5;
-
- /* Check do we have subband which cannot be encoded by Huffman tables */
- for (i = 0; i < bands; i++) {
- if (abits[i] > 12 || abits[i] == 0) {
- *res = best_sel;
- return best_bits;
- }
- }
-
- for (i = 0; i < DCA_BITALLOC_12_COUNT; i++) {
- t = dca_vlc_calc_alloc_bits(abits, bands, i);
- if (t < best_bits) {
- best_bits = t;
- best_sel = i;
- }
- }
-
- *res = best_sel;
- return best_bits;
-}
-
-static int init_quantization_noise(DCAEncContext *c, int noise, int forbid_zero)
-{
- int ch, band, ret = USED_26ABITS | USED_1ABITS;
- uint32_t huff_bit_count_accum[MAX_CHANNELS][DCA_CODE_BOOKS][7];
- uint32_t clc_bit_count_accum[MAX_CHANNELS][DCA_CODE_BOOKS];
- uint32_t bits_counter = 0;
-
- c->consumed_bits = 132 + 333 * c->fullband_channels;
- c->consumed_bits += c->consumed_adpcm_bits;
- if (c->lfe_channel)
- c->consumed_bits += 72;
-
- /* attempt to guess the bit distribution based on the prevoius frame */
- for (ch = 0; ch < c->fullband_channels; ch++) {
- for (band = 0; band < 32; band++) {
- int snr_cb = c->peak_cb[ch][band] - c->band_masking_cb[band] - noise;
-
- if (snr_cb >= 1312) {
- c->abits[ch][band] = 26;
- ret &= ~USED_1ABITS;
- } else if (snr_cb >= 222) {
- c->abits[ch][band] = 8 + mul32(snr_cb - 222, 69000000);
- ret &= ~(USED_26ABITS | USED_1ABITS);
- } else if (snr_cb >= 0) {
- c->abits[ch][band] = 2 + mul32(snr_cb, 106000000);
- ret &= ~(USED_26ABITS | USED_1ABITS);
- } else if (forbid_zero || snr_cb >= -140) {
- c->abits[ch][band] = 1;
- ret &= ~USED_26ABITS;
- } else {
- c->abits[ch][band] = 0;
- ret &= ~(USED_26ABITS | USED_1ABITS);
- }
- }
- c->consumed_bits += set_best_abits_code(c->abits[ch], 32,
- &c->bit_allocation_sel[ch]);
- }
-
- /* Recalc scale_factor each time to get bits consumption in case of Huffman coding.
- It is suboptimal solution */
- /* TODO: May be cache scaled values */
- for (ch = 0; ch < c->fullband_channels; ch++) {
- for (band = 0; band < 32; band++) {
- if (c->prediction_mode[ch][band] == -1) {
- c->scale_factor[ch][band] = calc_one_scale(c, c->peak_cb[ch][band],
- c->abits[ch][band],
- &c->quant[ch][band]);
- }
- }
- }
- quantize_adpcm(c);
- quantize_pcm(c);
-
- memset(huff_bit_count_accum, 0, MAX_CHANNELS * DCA_CODE_BOOKS * 7 * sizeof(uint32_t));
- memset(clc_bit_count_accum, 0, MAX_CHANNELS * DCA_CODE_BOOKS * sizeof(uint32_t));
- for (ch = 0; ch < c->fullband_channels; ch++) {
- for (band = 0; band < 32; band++) {
- if (c->abits[ch][band] && c->abits[ch][band] <= DCA_CODE_BOOKS) {
- accumulate_huff_bit_consumption(c->abits[ch][band],
- c->quantized[ch][band],
- huff_bit_count_accum[ch][c->abits[ch][band] - 1]);
- clc_bit_count_accum[ch][c->abits[ch][band] - 1] += bit_consumption[c->abits[ch][band]];
- } else {
- bits_counter += bit_consumption[c->abits[ch][band]];
- }
- }
- }
-
- for (ch = 0; ch < c->fullband_channels; ch++) {
- bits_counter += set_best_code(huff_bit_count_accum[ch],
- clc_bit_count_accum[ch],
- c->quant_index_sel[ch]);
- }
-
- c->consumed_bits += bits_counter;
-
- return ret;
-}
-
-static void assign_bits(DCAEncContext *c)
-{
- /* Find the bounds where the binary search should work */
- int low, high, down;
- int used_abits = 0;
- int forbid_zero = 1;
-restart:
- init_quantization_noise(c, c->worst_quantization_noise, forbid_zero);
- low = high = c->worst_quantization_noise;
- if (c->consumed_bits > c->frame_bits) {
- while (c->consumed_bits > c->frame_bits) {
- if (used_abits == USED_1ABITS && forbid_zero) {
- forbid_zero = 0;
- goto restart;
- }
- low = high;
- high += snr_fudge;
- used_abits = init_quantization_noise(c, high, forbid_zero);
- }
- } else {
- while (c->consumed_bits <= c->frame_bits) {
- high = low;
- if (used_abits == USED_26ABITS)
- goto out; /* The requested bitrate is too high, pad with zeros */
- low -= snr_fudge;
- used_abits = init_quantization_noise(c, low, forbid_zero);
- }
- }
-
- /* Now do a binary search between low and high to see what fits */
- for (down = snr_fudge >> 1; down; down >>= 1) {
- init_quantization_noise(c, high - down, forbid_zero);
- if (c->consumed_bits <= c->frame_bits)
- high -= down;
- }
- init_quantization_noise(c, high, forbid_zero);
-out:
- c->worst_quantization_noise = high;
- if (high > c->worst_noise_ever)
- c->worst_noise_ever = high;
-}
-
-static void shift_history(DCAEncContext *c, const int32_t *input)
-{
- int k, ch;
-
- for (k = 0; k < 512; k++)
- for (ch = 0; ch < c->channels; ch++) {
- const int chi = c->channel_order_tab[ch];
-
- c->history[ch][k] = input[k * c->channels + chi];
- }
-}
-
-static void fill_in_adpcm_bufer(DCAEncContext *c)
-{
- int ch, band;
- int32_t step_size;
- /* We fill in ADPCM work buffer for subbands which hasn't been ADPCM coded
- * in current frame - we need this data if subband of next frame is
- * ADPCM
- */
- for (ch = 0; ch < c->channels; ch++) {
- for (band = 0; band < 32; band++) {
- int32_t *samples = c->subband[ch][band] - DCA_ADPCM_COEFFS;
- if (c->prediction_mode[ch][band] == -1) {
- step_size = get_step_size(c, ch, band);
-
- ff_dca_core_dequantize(c->adpcm_history[ch][band],
- c->quantized[ch][band]+12, step_size,
- ff_dca_scale_factor_quant7[c->scale_factor[ch][band]], 0, 4);
- } else {
- AV_COPY128U(c->adpcm_history[ch][band], c->adpcm_history[ch][band]+4);
- }
- /* Copy dequantized values for LPC analysis.
- * It reduces artifacts in case of extreme quantization,
- * example: in current frame abits is 1 and has no prediction flag,
- * but end of this frame is sine like signal. In this case, if LPC analysis uses
- * original values, likely LPC analysis returns good prediction gain, and sets prediction flag.
- * But there are no proper value in decoder history, so likely result will be no good.
- * Bitstream has "Predictor history flag switch", but this flag disables history for all subbands
- */
- samples[0] = c->adpcm_history[ch][band][0] * (1 << 7);
- samples[1] = c->adpcm_history[ch][band][1] * (1 << 7);
- samples[2] = c->adpcm_history[ch][band][2] * (1 << 7);
- samples[3] = c->adpcm_history[ch][band][3] * (1 << 7);
- }
- }
-}
-
-static void calc_lfe_scales(DCAEncContext *c)
-{
- if (c->lfe_channel)
- c->lfe_scale_factor = calc_one_scale(c, c->lfe_peak_cb, 11, &c->lfe_quant);
-}
-
-static void put_frame_header(DCAEncContext *c)
-{
- /* SYNC */
- put_bits(&c->pb, 16, 0x7ffe);
- put_bits(&c->pb, 16, 0x8001);
-
- /* Frame type: normal */
- put_bits(&c->pb, 1, 1);
-
- /* Deficit sample count: none */
- put_bits(&c->pb, 5, 31);
-
- /* CRC is not present */
- put_bits(&c->pb, 1, 0);
-
- /* Number of PCM sample blocks */
- put_bits(&c->pb, 7, SUBBAND_SAMPLES - 1);
-
- /* Primary frame byte size */
- put_bits(&c->pb, 14, c->frame_size - 1);
-
- /* Audio channel arrangement */
- put_bits(&c->pb, 6, c->channel_config);
-
- /* Core audio sampling frequency */
- put_bits(&c->pb, 4, bitstream_sfreq[c->samplerate_index]);
-
- /* Transmission bit rate */
- put_bits(&c->pb, 5, c->bitrate_index);
-
- /* Embedded down mix: disabled */
- put_bits(&c->pb, 1, 0);
-
- /* Embedded dynamic range flag: not present */
- put_bits(&c->pb, 1, 0);
-
- /* Embedded time stamp flag: not present */
- put_bits(&c->pb, 1, 0);
-
- /* Auxiliary data flag: not present */
- put_bits(&c->pb, 1, 0);
-
- /* HDCD source: no */
- put_bits(&c->pb, 1, 0);
-
- /* Extension audio ID: N/A */
- put_bits(&c->pb, 3, 0);
-
- /* Extended audio data: not present */
- put_bits(&c->pb, 1, 0);
-
- /* Audio sync word insertion flag: after each sub-frame */
- put_bits(&c->pb, 1, 0);
-
- /* Low frequency effects flag: not present or 64x subsampling */
- put_bits(&c->pb, 2, c->lfe_channel ? 2 : 0);
-
- /* Predictor history switch flag: on */
- put_bits(&c->pb, 1, 1);
-
- /* No CRC */
- /* Multirate interpolator switch: non-perfect reconstruction */
- put_bits(&c->pb, 1, 0);
-
- /* Encoder software revision: 7 */
- put_bits(&c->pb, 4, 7);
-
- /* Copy history: 0 */
- put_bits(&c->pb, 2, 0);
-
- /* Source PCM resolution: 16 bits, not DTS ES */
- put_bits(&c->pb, 3, 0);
-
- /* Front sum/difference coding: no */
- put_bits(&c->pb, 1, 0);
-
- /* Surrounds sum/difference coding: no */
- put_bits(&c->pb, 1, 0);
-
- /* Dialog normalization: 0 dB */
- put_bits(&c->pb, 4, 0);
-}
-
-static void put_primary_audio_header(DCAEncContext *c)
-{
- int ch, i;
- /* Number of subframes */
- put_bits(&c->pb, 4, SUBFRAMES - 1);
-
- /* Number of primary audio channels */
- put_bits(&c->pb, 3, c->fullband_channels - 1);
-
- /* Subband activity count */
- for (ch = 0; ch < c->fullband_channels; ch++)
- put_bits(&c->pb, 5, DCAENC_SUBBANDS - 2);
-
- /* High frequency VQ start subband */
- for (ch = 0; ch < c->fullband_channels; ch++)
- put_bits(&c->pb, 5, DCAENC_SUBBANDS - 1);
-
- /* Joint intensity coding index: 0, 0 */
- for (ch = 0; ch < c->fullband_channels; ch++)
- put_bits(&c->pb, 3, 0);
-
- /* Transient mode codebook: A4, A4 (arbitrary) */
- for (ch = 0; ch < c->fullband_channels; ch++)
- put_bits(&c->pb, 2, 0);
-
- /* Scale factor code book: 7 bit linear, 7-bit sqrt table (for each channel) */
- for (ch = 0; ch < c->fullband_channels; ch++)
- put_bits(&c->pb, 3, 6);
-
- /* Bit allocation quantizer select: linear 5-bit */
- for (ch = 0; ch < c->fullband_channels; ch++)
- put_bits(&c->pb, 3, c->bit_allocation_sel[ch]);
-
- /* Quantization index codebook select */
- for (i = 0; i < DCA_CODE_BOOKS; i++)
- for (ch = 0; ch < c->fullband_channels; ch++)
- put_bits(&c->pb, ff_dca_quant_index_sel_nbits[i], c->quant_index_sel[ch][i]);
-
- /* Scale factor adjustment index: transmitted in case of Huffman coding */
- for (i = 0; i < DCA_CODE_BOOKS; i++)
- for (ch = 0; ch < c->fullband_channels; ch++)
- if (c->quant_index_sel[ch][i] < ff_dca_quant_index_group_size[i])
- put_bits(&c->pb, 2, 0);
-
- /* Audio header CRC check word: not transmitted */
-}
-
-static void put_subframe_samples(DCAEncContext *c, int ss, int band, int ch)
-{
- int i, j, sum, bits, sel;
- if (c->abits[ch][band] <= DCA_CODE_BOOKS) {
- av_assert0(c->abits[ch][band] > 0);
- sel = c->quant_index_sel[ch][c->abits[ch][band] - 1];
- // Huffman codes
- if (sel < ff_dca_quant_index_group_size[c->abits[ch][band] - 1]) {
- dca_vlc_enc_quant(&c->pb, &c->quantized[ch][band][ss * 8], 8,
- sel, c->abits[ch][band] - 1);
- return;
- }
-
- // Block codes
- if (c->abits[ch][band] <= 7) {
- for (i = 0; i < 8; i += 4) {
- sum = 0;
- for (j = 3; j >= 0; j--) {
- sum *= ff_dca_quant_levels[c->abits[ch][band]];
- sum += c->quantized[ch][band][ss * 8 + i + j];
- sum += (ff_dca_quant_levels[c->abits[ch][band]] - 1) / 2;
- }
- put_bits(&c->pb, bit_consumption[c->abits[ch][band]] / 4, sum);
- }
- return;
- }
- }
-
- for (i = 0; i < 8; i++) {
- bits = bit_consumption[c->abits[ch][band]] / 16;
- put_sbits(&c->pb, bits, c->quantized[ch][band][ss * 8 + i]);
- }
-}
-
-static void put_subframe(DCAEncContext *c, int subframe)
-{
- int i, band, ss, ch;
-
- /* Subsubframes count */
- put_bits(&c->pb, 2, SUBSUBFRAMES -1);
-
- /* Partial subsubframe sample count: dummy */
- put_bits(&c->pb, 3, 0);
-
- /* Prediction mode: no ADPCM, in each channel and subband */
- for (ch = 0; ch < c->fullband_channels; ch++)
- for (band = 0; band < DCAENC_SUBBANDS; band++)
- put_bits(&c->pb, 1, !(c->prediction_mode[ch][band] == -1));
-
- /* Prediction VQ address */
- for (ch = 0; ch < c->fullband_channels; ch++)
- for (band = 0; band < DCAENC_SUBBANDS; band++)
- if (c->prediction_mode[ch][band] >= 0)
- put_bits(&c->pb, 12, c->prediction_mode[ch][band]);
-
- /* Bit allocation index */
- for (ch = 0; ch < c->fullband_channels; ch++) {
- if (c->bit_allocation_sel[ch] == 6) {
- for (band = 0; band < DCAENC_SUBBANDS; band++) {
- put_bits(&c->pb, 5, c->abits[ch][band]);
- }
- } else {
- dca_vlc_enc_alloc(&c->pb, c->abits[ch], DCAENC_SUBBANDS,
- c->bit_allocation_sel[ch]);
- }
- }
-
- if (SUBSUBFRAMES > 1) {
- /* Transition mode: none for each channel and subband */
- for (ch = 0; ch < c->fullband_channels; ch++)
- for (band = 0; band < DCAENC_SUBBANDS; band++)
- if (c->abits[ch][band])
- put_bits(&c->pb, 1, 0); /* codebook A4 */
- }
-
- /* Scale factors */
- for (ch = 0; ch < c->fullband_channels; ch++)
- for (band = 0; band < DCAENC_SUBBANDS; band++)
- if (c->abits[ch][band])
- put_bits(&c->pb, 7, c->scale_factor[ch][band]);
-
- /* Joint subband scale factor codebook select: not transmitted */
- /* Scale factors for joint subband coding: not transmitted */
- /* Stereo down-mix coefficients: not transmitted */
- /* Dynamic range coefficient: not transmitted */
- /* Stde information CRC check word: not transmitted */
- /* VQ encoded high frequency subbands: not transmitted */
-
- /* LFE data: 8 samples and scalefactor */
- if (c->lfe_channel) {
- for (i = 0; i < DCA_LFE_SAMPLES; i++)
- put_bits(&c->pb, 8, quantize_value(c->downsampled_lfe[i], c->lfe_quant) & 0xff);
- put_bits(&c->pb, 8, c->lfe_scale_factor);
- }
-
- /* Audio data (subsubframes) */
- for (ss = 0; ss < SUBSUBFRAMES ; ss++)
- for (ch = 0; ch < c->fullband_channels; ch++)
- for (band = 0; band < DCAENC_SUBBANDS; band++)
- if (c->abits[ch][band])
- put_subframe_samples(c, ss, band, ch);
-
- /* DSYNC */
- put_bits(&c->pb, 16, 0xffff);
-}
-
-static int encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
- const AVFrame *frame, int *got_packet_ptr)
-{
- DCAEncContext *c = avctx->priv_data;
- const int32_t *samples;
- int ret, i;
-
- if ((ret = ff_get_encode_buffer(avctx, avpkt, c->frame_size, 0)) < 0)
- return ret;
-
- samples = (const int32_t *)frame->data[0];
-
- subband_transform(c, samples);
- if (c->lfe_channel)
- lfe_downsample(c, samples);
-
- calc_masking(c, samples);
- if (c->options.adpcm_mode)
- adpcm_analysis(c);
- find_peaks(c);
- assign_bits(c);
- calc_lfe_scales(c);
- shift_history(c, samples);
-
- init_put_bits(&c->pb, avpkt->data, avpkt->size);
- fill_in_adpcm_bufer(c);
- put_frame_header(c);
- put_primary_audio_header(c);
- for (i = 0; i < SUBFRAMES; i++)
- put_subframe(c, i);
-
- flush_put_bits(&c->pb);
- memset(put_bits_ptr(&c->pb), 0, put_bytes_left(&c->pb, 0));
-
- *got_packet_ptr = 1;
- return 0;
-}
-
-#define DCAENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM
-
-static const AVOption options[] = {
- { "dca_adpcm", "Use ADPCM encoding", offsetof(DCAEncContext, options.adpcm_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, DCAENC_FLAGS },
- { NULL },
-};
-
-static const AVClass dcaenc_class = {
- .class_name = "DCA (DTS Coherent Acoustics)",
- .item_name = av_default_item_name,
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-static const FFCodecDefault defaults[] = {
- { "b", "1411200" },
- { NULL },
-};
-
-const FFCodec ff_dca_encoder = {
- .p.name = "dca",
- CODEC_LONG_NAME("DCA (DTS Coherent Acoustics)"),
- .p.type = AVMEDIA_TYPE_AUDIO,
- .p.id = AV_CODEC_ID_DTS,
- .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_EXPERIMENTAL |
- AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE,
- .priv_data_size = sizeof(DCAEncContext),
- .init = encode_init,
- .close = encode_close,
- FF_CODEC_ENCODE_CB(encode_frame),
- .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
- .p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32,
- AV_SAMPLE_FMT_NONE },
- .p.supported_samplerates = sample_rates,
- CODEC_OLD_CHANNEL_LAYOUTS(AV_CH_LAYOUT_MONO, AV_CH_LAYOUT_STEREO,
- AV_CH_LAYOUT_2_2, AV_CH_LAYOUT_5POINT0,
- AV_CH_LAYOUT_5POINT1)
- .p.ch_layouts = (const AVChannelLayout[]){
- AV_CHANNEL_LAYOUT_MONO,
- AV_CHANNEL_LAYOUT_STEREO,
- AV_CHANNEL_LAYOUT_2_2,
- AV_CHANNEL_LAYOUT_5POINT0,
- AV_CHANNEL_LAYOUT_5POINT1,
- { 0 },
- },
- .defaults = defaults,
- .p.priv_class = &dcaenc_class,
-};
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/indeo2.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/indeo2.c
deleted file mode 100644
index dd88ebf7c587b127d40e19bd4a2dcb1d9a452de9..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/indeo2.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Intel Indeo 2 codec
- * Copyright (c) 2005 Konstantin Shishkov
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * Intel Indeo 2 decoder.
- */
-
-#include "libavutil/attributes.h"
-#include "libavutil/thread.h"
-
-#define BITSTREAM_READER_LE
-#include "avcodec.h"
-#include "codec_internal.h"
-#include "decode.h"
-#include "get_bits.h"
-#include "indeo2data.h"
-
-typedef struct Ir2Context{
- AVCodecContext *avctx;
- AVFrame *picture;
- GetBitContext gb;
- int decode_delta;
-} Ir2Context;
-
-#define CODE_VLC_BITS 14
-static VLC ir2_vlc;
-
-/* Indeo 2 codes are in range 0x01..0x7F and 0x81..0x90 */
-static inline int ir2_get_code(GetBitContext *gb)
-{
- return get_vlc2(gb, ir2_vlc.table, CODE_VLC_BITS, 1);
-}
-
-static int ir2_decode_plane(Ir2Context *ctx, int width, int height, uint8_t *dst,
- int pitch, const uint8_t *table)
-{
- int i;
- int j;
- int out = 0;
-
- if ((width & 1) || width * height / (2*(IR2_CODES - 0x7F)) > get_bits_left(&ctx->gb))
- return AVERROR_INVALIDDATA;
-
- /* first line contain absolute values, other lines contain deltas */
- while (out < width) {
- int c = ir2_get_code(&ctx->gb);
- if (c >= 0x80) { /* we have a run */
- c -= 0x7F;
- if (out + c*2 > width)
- return AVERROR_INVALIDDATA;
- for (i = 0; i < c * 2; i++)
- dst[out++] = 0x80;
- } else { /* copy two values from table */
- if (c <= 0)
- return AVERROR_INVALIDDATA;
- dst[out++] = table[c * 2];
- dst[out++] = table[(c * 2) + 1];
- }
- }
- dst += pitch;
-
- for (j = 1; j < height; j++) {
- out = 0;
- while (out < width) {
- int c;
- if (get_bits_left(&ctx->gb) <= 0)
- return AVERROR_INVALIDDATA;
- c = ir2_get_code(&ctx->gb);
- if (c >= 0x80) { /* we have a skip */
- c -= 0x7F;
- if (out + c*2 > width)
- return AVERROR_INVALIDDATA;
- for (i = 0; i < c * 2; i++) {
- dst[out] = dst[out - pitch];
- out++;
- }
- } else { /* add two deltas from table */
- int t;
- if (c <= 0)
- return AVERROR_INVALIDDATA;
- t = dst[out - pitch] + (table[c * 2] - 128);
- t = av_clip_uint8(t);
- dst[out] = t;
- out++;
- t = dst[out - pitch] + (table[(c * 2) + 1] - 128);
- t = av_clip_uint8(t);
- dst[out] = t;
- out++;
- }
- }
- dst += pitch;
- }
- return 0;
-}
-
-static int ir2_decode_plane_inter(Ir2Context *ctx, int width, int height, uint8_t *dst,
- int pitch, const uint8_t *table)
-{
- int j;
- int out = 0;
- int c;
- int t;
-
- if (width & 1)
- return AVERROR_INVALIDDATA;
-
- for (j = 0; j < height; j++) {
- out = 0;
- while (out < width) {
- if (get_bits_left(&ctx->gb) <= 0)
- return AVERROR_INVALIDDATA;
- c = ir2_get_code(&ctx->gb);
- if (c >= 0x80) { /* we have a skip */
- c -= 0x7F;
- out += c * 2;
- } else { /* add two deltas from table */
- if (c <= 0)
- return AVERROR_INVALIDDATA;
- t = dst[out] + (((table[c * 2] - 128)*3) >> 2);
- t = av_clip_uint8(t);
- dst[out] = t;
- out++;
- t = dst[out] + (((table[(c * 2) + 1] - 128)*3) >> 2);
- t = av_clip_uint8(t);
- dst[out] = t;
- out++;
- }
- }
- dst += pitch;
- }
- return 0;
-}
-
-static int ir2_decode_frame(AVCodecContext *avctx, AVFrame *picture,
- int *got_frame, AVPacket *avpkt)
-{
- Ir2Context * const s = avctx->priv_data;
- const uint8_t *buf = avpkt->data;
- int buf_size = avpkt->size;
- AVFrame * const p = s->picture;
- int start, ret;
- int ltab, ctab;
-
- if ((ret = ff_reget_buffer(avctx, p, 0)) < 0)
- return ret;
-
- start = 48; /* hardcoded for now */
-
- if (start >= buf_size) {
- av_log(s->avctx, AV_LOG_ERROR, "input buffer size too small (%d)\n", buf_size);
- return AVERROR_INVALIDDATA;
- }
-
- s->decode_delta = buf[18];
-
- /* decide whether frame uses deltas or not */
-
- if ((ret = init_get_bits8(&s->gb, buf + start, buf_size - start)) < 0)
- return ret;
-
- ltab = buf[0x22] & 3;
- ctab = buf[0x22] >> 2;
-
- if (ctab > 3) {
- av_log(avctx, AV_LOG_ERROR, "ctab %d is invalid\n", ctab);
- return AVERROR_INVALIDDATA;
- }
-
- if (s->decode_delta) { /* intraframe */
- if ((ret = ir2_decode_plane(s, avctx->width, avctx->height,
- p->data[0], p->linesize[0],
- ir2_delta_table[ltab])) < 0)
- return ret;
-
- /* swapped U and V */
- if ((ret = ir2_decode_plane(s, avctx->width >> 2, avctx->height >> 2,
- p->data[2], p->linesize[2],
- ir2_delta_table[ctab])) < 0)
- return ret;
- if ((ret = ir2_decode_plane(s, avctx->width >> 2, avctx->height >> 2,
- p->data[1], p->linesize[1],
- ir2_delta_table[ctab])) < 0)
- return ret;
- } else { /* interframe */
- if ((ret = ir2_decode_plane_inter(s, avctx->width, avctx->height,
- p->data[0], p->linesize[0],
- ir2_delta_table[ltab])) < 0)
- return ret;
- /* swapped U and V */
- if ((ret = ir2_decode_plane_inter(s, avctx->width >> 2, avctx->height >> 2,
- p->data[2], p->linesize[2],
- ir2_delta_table[ctab])) < 0)
- return ret;
- if ((ret = ir2_decode_plane_inter(s, avctx->width >> 2, avctx->height >> 2,
- p->data[1], p->linesize[1],
- ir2_delta_table[ctab])) < 0)
- return ret;
- }
-
- if ((ret = av_frame_ref(picture, p)) < 0)
- return ret;
-
- *got_frame = 1;
-
- return buf_size;
-}
-
-static av_cold void ir2_init_static(void)
-{
- INIT_VLC_STATIC_FROM_LENGTHS(&ir2_vlc, CODE_VLC_BITS, IR2_CODES,
- &ir2_tab[0][1], 2, &ir2_tab[0][0], 2, 1,
- 0, INIT_VLC_OUTPUT_LE, 1 << CODE_VLC_BITS);
-}
-
-static av_cold int ir2_decode_init(AVCodecContext *avctx)
-{
- static AVOnce init_static_once = AV_ONCE_INIT;
- Ir2Context * const ic = avctx->priv_data;
-
- ic->avctx = avctx;
-
- avctx->pix_fmt= AV_PIX_FMT_YUV410P;
-
- ic->picture = av_frame_alloc();
- if (!ic->picture)
- return AVERROR(ENOMEM);
-
- ff_thread_once(&init_static_once, ir2_init_static);
-
- return 0;
-}
-
-static av_cold int ir2_decode_end(AVCodecContext *avctx)
-{
- Ir2Context * const ic = avctx->priv_data;
-
- av_frame_free(&ic->picture);
-
- return 0;
-}
-
-const FFCodec ff_indeo2_decoder = {
- .p.name = "indeo2",
- CODEC_LONG_NAME("Intel Indeo 2"),
- .p.type = AVMEDIA_TYPE_VIDEO,
- .p.id = AV_CODEC_ID_INDEO2,
- .priv_data_size = sizeof(Ir2Context),
- .init = ir2_decode_init,
- .close = ir2_decode_end,
- FF_CODEC_DECODE_CB(ir2_decode_frame),
- .p.capabilities = AV_CODEC_CAP_DR1,
-};
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Discover New Shows and Download Them with kisskh APK.md b/spaces/congsaPfin/Manga-OCR/logs/Discover New Shows and Download Them with kisskh APK.md
deleted file mode 100644
index b39d0ff87bd4a6407adc022b09cc48370af7f48a..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Discover New Shows and Download Them with kisskh APK.md
+++ /dev/null
@@ -1,93 +0,0 @@
-
-How to Download APK Kisskh: The Ultimate Guide for Drama Lovers
- If you are a fan of Korean, Chinese, Thai, Hollywood, or anime shows, you might have heard of APK Kisskh. This is a popular app that allows you to watch thousands of dramas and movies on your Android device for free. But how do you download and use this app? In this article, we will show you everything you need to know about APK Kisskh, from its features and benefits to its installation and usage. Read on and discover how to enjoy your favorite shows with this amazing app.
- What is APK Kisskh?
- APK Kisskh is an app that provides access to various drama, movie, Hollywood, and anime shows from different countries and genres. You can watch them online or download them for offline viewing. The app supports multi subtitles such as English, Khmer, Indonesian, Malay, Thai, and so on. You can also adjust the video quality according to your preference and internet speed. The app is updated regularly with new content and features to keep you entertained.
-download apk kisskh
Download ……… https://urlca.com/2uOebp
- Features of APK Kisskh
- Some of the features that make APK Kisskh stand out from other similar apps are:
-
-- It has a user-friendly interface that is easy to navigate and use.
-- It has a large collection of shows from various categories such as romance, comedy, action, thriller, horror, fantasy, sci-fi, etc.
-- It has a search function that allows you to find your desired show by keyword or title.
-- It has a download function that lets you save your favorite shows on your device for offline viewing.
-- It has a notification function that alerts you when new episodes or shows are added.
-- It has a feedback function that allows you to rate and comment on the shows you watch.
-
- Benefits of APK Kisskh
- Some of the benefits that you can get from using APK Kisskh are:
-
-- You can watch your favorite shows anytime and anywhere without any restrictions or fees.
-- You can choose from a variety of subtitle languages to suit your needs and preferences.
-- You can enjoy high-quality videos that are smooth and clear.
-- You can discover new shows that match your taste and interest.
-- You can interact with other users who share your passion for drama and movies.
-
- How to Download and Install APK Kisskh on Your Device
- If you are interested in trying out APK Kisskh, you need to download and install it on your Android device first. Here are the steps you need to follow:
- Step 1: Enable Unknown Sources
- Since APK Kisskh is not available on the Google Play Store, you need to enable unknown sources on your device settings. This will allow you to install apps from sources other than the official store. To do this, go to Settings > Security > Unknown Sources and toggle it on.
- Step 2: Download the APK File
- Next, you need to download the APK file of APK Kisskh from a reliable source. You can use this link to get the latest version of the app. Make sure you have enough storage space on your device before downloading the file.
- Step 3: Install the APK File
- Once you have downloaded the file, locate it in your file manager and tap on it. You will see a pop-up window asking you to confirm the installation. Tap on Install and wait for the process to finish.
- Step 4: Launch the App and Enjoy
- Finally, you can launch the app from your app drawer or home screen and start watching your favorite shows. You will see a list of categories on the main screen, such as Korean, Chinese, Thai, Hollywood, and Anime. You can also access the menu by tapping on the three horizontal lines on the top left corner. Here you can find the settings, downloads, notifications, feedback, and more options.
- How to Use APK Kisskh to Watch Your Favorite Shows
- Now that you have installed APK Kisskh on your device, you might be wondering how to use it to watch your favorite shows. Here are some tips and tricks that will help you get the most out of this app:
-download apk kisskh 2.0
-download apk kisskh korean drama
-download apk kisskh thai drama
-download apk kisskh anime
-download apk kisskh latest version
-download apk kisskh 3.0
-download apk kisskh chinese drama
-download apk kisskh hollywood movies
-download apk kisskh for android
-download apk kisskh me
-download apk kisskh app
-download apk kisskh free
-download apk kisskh online
-download apk kisskh multi-subtittle
-download apk kisskh entertainment
-download apk kisskh update
-download apk kisskh category
-download apk kisskh google play
-download apk kisskh installs
-download apk kisskh APKCombo
-download apk kissskh 3.0
-download apk kissskh APK
-download apk kissskh me
-download apk kissskh juii
-download apk kissskh latest version 1.0.0.0
-download apk kissskh for android free
-download apk kissskh watch kdrama c-drama online
-download apk kissskh high quality
-download apk kissskh search
-download apk kissskh mobile app
- Browse by Category
- If you want to explore the different shows available on APK Kisskh, you can browse by category. Simply tap on the category you are interested in and you will see a list of shows that belong to that category. You can also sort the list by popularity, rating, or update date. To watch a show, just tap on its poster and you will see its details, such as synopsis, cast, genre, episodes, and more. Tap on the play button to start watching.
- Search by Keyword
- If you already have a specific show in mind that you want to watch, you can use the search function to find it quickly. Just tap on the magnifying glass icon on the top right corner and enter the keyword or title of the show. You will see a list of results that match your query. Tap on the one you want and follow the same steps as above to watch it.
- Select Subtitle Language
- One of the best features of APK Kisskh is that it supports multiple subtitle languages for different shows. You can choose from English, Khmer, Indonesian, Malay, Thai, and more. To select your preferred subtitle language, tap on the CC icon on the bottom right corner of the video player and choose from the available options. You can also adjust the size and color of the subtitles if you want.
- Adjust Video Quality
- Another feature that APK Kisskh offers is that it allows you to adjust the video quality according to your internet speed and preference. You can choose from low, medium, high, or auto quality. To change the video quality, tap on the HD icon on the bottom right corner of the video player and select from the available options. The auto quality will automatically adjust the video quality based on your internet connection.
- Conclusion
- APK Kisskh is a great app for drama and movie lovers who want to watch their favorite shows on their Android devices for free. It has a lot of features and benefits that make it stand out from other similar apps. It is easy to download and install, and it is user-friendly and convenient to use. If you are looking for a way to enjoy your favorite shows anytime and anywhere, APK Kisskh is the app for you.
- FAQs
- Here are some frequently asked questions about APK Kisskh:
-
-- Is APK Kisskh safe to use?
-- Yes, APK Kisskh is safe to use as long as you download it from a trusted source like this link. It does not contain any viruses or malware that can harm your device or data.
-- Is APK Kisskh legal to use?
-- APK Kisskh does not host any content on its own servers. It only provides links to third-party sources that host the content. Therefore, it is not responsible for any copyright infringement or legal issues that may arise from using the app. However, it is advisable to check the laws and regulations of your country before using the app.
-- Does APK Kisskh require any registration or subscription?
-- No, APK Kisskh does not require any registration or subscription to use. You can watch all the shows for free without any limitations or ads.
-- Can I request a show that is not available on APK Kisskh?
-- Yes, you can request a show that is not available on APK Kisskh by using the feedback function. You can find it on the menu and tap on the Feedback option. You can then write your request and send it to the developers. They will try their best to add your requested show as soon as possible.
-- Can I share a show that I like with my friends?
-- Yes, you can share a show that you like with your friends by using the share function. You can find it on the details page of the show and tap on the Share option. You can then choose from various social media platforms or apps to share the link of the show with your friends.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download 3D Noise Texture with High Resolution and Quality.md b/spaces/congsaPfin/Manga-OCR/logs/Download 3D Noise Texture with High Resolution and Quality.md
deleted file mode 100644
index 113a757f5be90ffddb39c3890a8d61c08ed4dbd5..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Download 3D Noise Texture with High Resolution and Quality.md
+++ /dev/null
@@ -1,165 +0,0 @@
-
-How to Download 3D Noise Texture
-Are you looking for a way to add some realism and variety to your 3D models, animations, or games? If so, you might want to use 3D noise texture. In this article, we will explain what 3D noise texture is, why you need it, and how you can find and download it online. We will also show you how you can create your own 3D noise texture in Blender, a free and open-source 3D creation software.
-download 3d noise texture
Download Zip — https://urlca.com/2uO7jX
- What is 3D Noise Texture and Why You Need It
-Definition of 3D noise texture
-A 3D noise texture is a type of procedural texture that generates random values in three dimensions. Unlike a regular 2D image, a 3D noise texture has depth and can represent the internal structure or variation of a material. For example, a 3D noise texture can simulate the grain of wood, the veins of marble, or the clouds in the sky.
- Uses of 3D noise texture
-A 3D noise texture can be used for various purposes in 3D graphics, such as:
-
-- Creating realistic materials and surfaces for 3D models.
-- Adding details and complexity to low-polygon meshes.
-- Generating organic shapes and patterns.
-- Animating objects or effects with dynamic noise.
-- Modifying other textures or colors with noise.
-
- Examples of 3D noise texture
-Here are some examples of how 3D noise texture can be used in different scenarios:
-
-
-Scenario
-Example
-
-
-Wood material
-
-
-
-Clouds effect
-
-
-
-Terrain generation
-
-
-
- How to Find and Download 3D Noise Texture Online
-Sources of free 3D noise texture images
-If you don't want to create your own 3D noise texture, you can find and download some online for free. Here are some sources that offer free 3D noise texture images:
- Freepik
-Freepik is a website that provides free graphic resources for personal and commercial use. You can find over 96,000 vectors, stock photos, and PSD files of noise texture on Freepik. You can filter the results by color, orientation, size, license, and more. To download a noise texture image from Freepik, you need to create a free account and credit the author.
- Blender Manual
-Blender Manual is the official documentation for Blender, which includes tutorials, guides, and references. You can find some examples of 3D noise texture images in the Blender Manual, under the section of Texture Nodes. You can see how different types of noise texture can be used to create various effects and materials in Blender. You can also download the images by right-clicking and choosing Save Image As.
- Unity Asset Store
-Unity Asset Store is a marketplace that offers assets, tools, and services for creating games and interactive experiences with Unity, a popular game engine. You can find over 1,000 assets related to noise texture on the Unity Asset Store, including 3D models, shaders, scripts, and more. Some of them are free, while others require payment. To download an asset from the Unity Asset Store, you need to create a free account and open it in Unity.
- Sources of paid 3D noise texture images
-If you are looking for more professional or specialized 3D noise texture images, you might want to consider some paid sources. Here are some sources that offer paid 3D noise texture images:
- Unreal Engine Marketplace
-Unreal Engine Marketplace is a store that offers assets, tools, and services for creating games and interactive experiences with Unreal Engine, another popular game engine. You can find over 800 assets related to noise texture on the Unreal Engine Marketplace, including 3D models, materials, blueprints, and more. The prices range from $0 to $200. To download an asset from the Unreal Engine Marketplace, you need to create a free account and open it in Unreal Engine.
-download 3d noise texture for blender
-download 3d noise texture for unity
-download 3d noise texture for cinema 4d
-download 3d noise texture for maya
-download 3d noise texture for 3ds max
-download 3d noise texture free
-download 3d noise texture pack
-download 3d noise texture generator
-download 3d noise texture photoshop
-download 3d noise texture seamless
-download 3d noise texture png
-download 3d noise texture jpg
-download 3d noise texture vector
-download 3d noise texture psd
-download 3d noise texture ai
-download 3d noise texture high resolution
-download 3d noise texture hd
-download 3d noise texture realistic
-download 3d noise texture grunge
-download 3d noise texture film grain
-download 3d noise texture white
-download 3d noise texture black
-download 3d noise texture grey
-download 3d noise texture color
-download 3d noise texture blue
-download 3d noise texture red
-download 3d noise texture green
-download 3d noise texture yellow
-download 3d noise texture purple
-download 3d noise texture pink
-download 3d noise texture brown
-download 3d noise texture orange
-download 3d noise texture simple
-download 3d noise texture complex
-download 3d noise texture abstract
-download 3d noise texture geometric
-download 3d noise texture organic
-download 3d noise texture fractal
-download 3d noise texture perlin
-download 3d noise texture simplex
-download 3d noise texture brownian motion
-download 3d noise texture voronoi
-download 3d noise texture cellular
-download 3d noise texture wood
-download 3d noise texture metal
-download 3d noise texture stone
-download 3d noise texture marble
-download 3d noise texture paper
-download 3d noise texture fabric
- NVIDIA Technical Blog
-NVIDIA Technical Blog is a blog that showcases the latest research and innovations from NVIDIA, a leading company in graphics and computing. You can find some articles that discuss and demonstrate how to use 3D noise texture for various applications, such as volumetric rendering, fluid simulation, and ray tracing. You can also download some sample code and data from the blog posts.
- Computer Graphics Group
-Computer Graphics Group is a research group at the University of Bonn, Germany, that focuses on computer graphics and vision. You can find some publications that present novel methods and algorithms for generating and using 3D noise texture, such as Perlin Noise Tensor Field Design, Noise-based Volume Rendering, and Noise-based Terrain Synthesis. You can also download some source code and data from the publication pages.
- How to Create Your Own 3D Noise Texture in Blender
-Steps to create a 3D noise texture in Blender
-If you want to have more control and creativity over your 3D noise texture, you can create your own in Blender. Here are the steps to do so:
-
-- Open Blender and create a new file.
-- Add a cube object by pressing Shift + A and choosing Mesh > Cube.
-- Select the cube and go to the Shading workspace.
-- In the Shader Editor window, add a Noise Texture node by pressing Shift + A and choosing Texture > Noise Texture.
-- Connect the Color output of the Noise Texture node to the Base Color input of the Principled BSDF node.
-- Adjust the Scale, Detail, Distortion, and Roughness values of the Noise Texture node to change the appearance of the noise.
-- If you want to make the noise 3D, add a Texture Coordinate node by pressing Shift + A and choosing Input > Texture Coordinate.
-- Connect the Object output of the Texture Coordinate node to the Vector input of the Noise Texture node.
-- To animate the noise, add a Mapping node by pressing Shift + A and choosing Vector > Mapping.
-- Connect the Object output of the Texture Coordinate node to the Vector input of the Mapping node.
-- Connect the Vector output of the Mapping node to the Vector input of the Noise Texture node.
-- Increase the Z value of the Location property of the Mapping node over time by inserting keyframes.
-- To export your 3D noise texture as an image file, go to the Image Editor window and choose Image > Save As.
-
- Conclusion and FAQs
-In this article, we have learned what 3D noise texture is, why you need it, and how you can find and download it online. We have also shown you how you can create your own 3D noise texture in Blender, a free and open-source 3D creation software. We hope that this article has helped you to understand and use 3D noise texture for your 3D projects. If you have any questions or comments, feel free to leave them below. Here are some FAQs that might be useful for you:
- What is the difference between 2D and 3D noise texture?
-A 2D noise texture is a type of procedural texture that generates random values in two dimensions. It can be used to create surface patterns or variations on a 2D plane. A 3D noise texture is a type of procedural texture that generates random values in three dimensions. It can be used to create internal structure or variation of a material in 3D space.
- What are some types of 3D noise texture?
-There are many types of 3D noise texture, each with different characteristics and applications. Some common types are:
-
-- Perlin noise: A smooth and continuous noise that can create natural-looking textures.
-- Voronoi noise: A cellular noise that can create irregular shapes and patterns.
-- Simplex noise: A faster and less grid-aligned version of Perlin noise.
-- Worley noise: A variant of Voronoi noise that can create more organic and realistic textures.
-- Fractal noise: A combination of multiple layers of noise with different frequencies and amplitudes.
-
- How can I use 3D noise texture in other software or game engines?
-If you want to use 3D noise texture in other software or game engines, you need to export it as an image file or a volume file. An image file is a 2D representation of the 3D noise texture, which can be used as a texture map for materials or shaders. A volume file is a 3D representation of the 3D noise texture, which can be used as a volumetric data for rendering or simulation. You can export your 3D noise texture from Blender as an image file or a volume file by following these steps:
-
-- Select the object that has the 3D noise texture applied.
-- Go to the Render Properties panel and set the Render Engine to Cycles.
-- Go to the Output Properties panel and set the File Format to PNG for image file or OpenVDB for volume file.
-- Go to the View Layer Properties panel and enable the Data passes for Material Index, UV, Normal, Vector, Z, Mist, Emit, Ambient Occlusion, Environment, Shadow, Diffuse, Glossy, Transmission, Subsurface Scattering, Volume Direct, Volume Indirect, Cryptomatte Object, Cryptomatte Material.
-- Go to the Compositing workspace and add a File Output node by pressing Shift + A and choosing Output > File Output.
-- Connect the outputs of the Render Layers node to the inputs of the File Output node.
-- In the File Output node properties, set the Base Path to where you want to save your file.
-- Go to the Render menu and choose Render Image.
-
- How can I improve the quality of my 3D noise texture?
-If you want to improve the quality of your 3D noise texture, you can try some of these tips:
-
-- Increase the resolution of your image or volume file when exporting from Blender.
-- Increase the Scale value of your Noise Texture node in Blender to add more detail.
-- Increase the Detail value of your Noise Texture node in Blender to add more contrast.
-- Increase the Distortion value of your Noise Texture node in Blender to add more variation.
-- Mix different types of Noise Texture nodes in Blender using Mix RGB nodes or Math nodes.
-- Add some color or gradient to your Noise Texture node using Color Ramp nodes or Gradient Texture nodes.
-
- Where can I learn more about 3D noise texture?
-If you want to learn more about 3D noise texture, you can check out some of these resources:
-
-- The Book of Shaders: Noise: A chapter from an online book that explains the theory and practice of using noise in shaders.
-- Procedural Texturing and Modeling: A book that covers the principles and techniques of procedural texturing and modeling, including noise generation and application.
-- Noise Machine : A website that lets you create and download 3D noise texture images online.
-- Blender Cloud: Procedural Shading Fundamentals and Beyond: A video course that teaches you how to use procedural shading and texturing in Blender.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Experience the Fun of Barbie Horse Riding Game - Free Download Available.md b/spaces/congsaPfin/Manga-OCR/logs/Experience the Fun of Barbie Horse Riding Game - Free Download Available.md
deleted file mode 100644
index b0b559ba4350223f10fdf0b04a344ae12a3b1576..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Experience the Fun of Barbie Horse Riding Game - Free Download Available.md
+++ /dev/null
@@ -1,89 +0,0 @@
-
-Barbie Horse Riding Game Download
-Do you love horses and adventure? Do you want to join Barbie and her friends in a fun and exciting riding club? If you answered yes, then you should definitely check out Barbie Riding Club, a classic computer game that will make you feel like you are in a real horse ranch. In this article, we will tell you everything you need to know about this game, including what it is, why you should play it, how to download and install it, and some tips and tricks for playing it. Let's get started!
-barbie horse riding game download
Download ✪ https://urlca.com/2uOdPu
- What is Barbie Riding Club?
-Barbie Riding Club is a 1998 computer game developed by American studio Human Code and published by Mattel Media. It is one of the most popular and beloved Barbie games ever made, and it has a loyal fan base even today. The game involves feeding, grooming, and riding horses, as well as exploring a beautiful open world with different locations, such as the stables, the meadows, the forest, the beach, and the town. You can choose from four different horses to adopt and name, each with their own personality and appearance. You can also customize your own rider outfit and accessories. The game has a story mode where you have to help Barbie and her friends solve a mystery involving a legendary wild horse, as well as a free mode where you can roam around and have fun with your horse. The game also features mini-games, such as jumping, racing, and obstacle courses, where you can earn trophies and medals.
- Why should you play Barbie Riding Club?
-There are many reasons why you should play Barbie Riding Club, whether you are a fan of Barbie or not. Here are some of them:
-
-- It is a fun and relaxing game that will make you feel happy and calm.
-- It is a great way to learn about horses and how to take care of them.
-- It is a creative game that allows you to express your style and personality.
-- It is an educational game that teaches you about nature and wildlife.
-- It is an adventurous game that challenges you to explore and discover new things.
-- It is a nostalgic game that will bring back memories of your childhood.
-
- How to download and install Barbie Riding Club?
-If you want to play Barbie Riding Club on your computer, you will need to download and install it first. Here are the steps you need to follow:
-barbie riding club game free download
-barbie horse adventures pc game download
-barbie horse riding games online
-barbie horse games download for android
-barbie riding club download windows 10
-barbie horse adventures wild horse rescue download
-barbie horse riding games for girls
-barbie horse games download full version
-barbie riding club game online
-barbie horse adventures mystery ride download
-barbie horse riding games dress up
-barbie horse games download for pc
-barbie riding club game mac
-barbie horse adventures riding camp download
-barbie horse riding games 3d
-barbie horse games download apk
-barbie riding club game playstation
-barbie horse adventures blue ribbon race download
-barbie horse riding games free download for mobile
-barbie horse games download for windows 7
-barbie riding club game cd rom
-barbie horse adventures ps2 game download
-barbie horse riding games unblocked
-barbie horse games download for ios
-barbie riding club game iso
-barbie horse adventures wii game download
-barbie horse riding games y8
-barbie horse games download for laptop
-barbie riding club game cheats
-barbie horse adventures ds game download
-barbie horse riding games 2021
-barbie horse games download for macbook
-barbie riding club game soundtrack
-barbie horse adventures xbox game download
-barbie horse riding games kizi
-barbie horse games download for chromebook
-barbie riding club game review
-barbie horse adventures gba game download
-barbie horse riding games friv
-barbie horse games download for tablet
-
-- Go to [this link](^1^) where you can find the original CD-ROM image of the game.
-- Click on the "DOWNLOAD OPTIONS" button and choose "ISO IMAGE". This will download a file called "barbieridingclub_201908.iso".
-- You will need a program that can mount ISO files, such as Daemon Tools or Virtual CloneDrive. Install one of these programs on your computer if you don't have one already.
-- Right-click on the ISO file and choose "Mount" from the menu. This will create a virtual drive on your computer that contains the game files.
-- Open the virtual drive and double-click on the "SETUP.EXE" file. This will launch the installation wizard for the game.
-- Follow the instructions on the screen to complete the installation process. You may need to restart your computer after the installation.
-- Once the installation is done, you can launch the game from your desktop or start menu. Enjoy!
-
- Tips and tricks for playing Barbie Riding Club
-To make the most out of your gaming experience, here are some tips and tricks for playing Barbie Riding Club:
-
-- To feed your horse, you need to go to the feed room in the stables and select the type of food you want. Then, you need to drag the food to your horse's mouth and wait until it finishes eating.
-- To groom your horse, you need to go to the tack room in the stables and select the brush, comb, or sponge you want. Then, you need to click and drag the tool over your horse's body and mane until it sparkles.
-- To ride your horse, you need to go to the riding arena or the trail map and select the location you want to go. Then, you need to use the arrow keys on your keyboard to control your horse's movement and speed. You can also use the spacebar to make your horse jump over obstacles.
-- To play mini-games, you need to find and click on the icons that appear on the screen when you are riding. These icons will indicate what type of mini-game is available, such as jumping, racing, or obstacle courses. You can earn trophies and medals by completing these mini-games successfully.
-- To save your progress, you need to go to the clubhouse and click on the computer. Then, you need to select the "Save Game" option and choose a slot for your game. You can also load a previous game from here.
-
- Conclusion
-Barbie Riding Club is a wonderful game that will make you feel like you are part of a real riding club. You can have fun with your horse, explore a beautiful world, and solve a mystery with Barbie and her friends. You can also learn a lot about horses and nature along the way. If you are looking for a game that is fun, relaxing, creative, educational, adventurous, and nostalgic, then you should definitely download and play Barbie Riding Club today. You won't regret it!
- FAQs
-Here are some frequently asked questions and answers about Barbie Riding Club:
-
-- Q: What are the system requirements for Barbie Riding Club?
A: You will need a Windows 95/98/ME/XP/Vista/7/8/10 computer with a Pentium 133 MHz processor or higher, 16 MB of RAM or more, 100 MB of hard disk space or more, a 4x CD-ROM drive or faster, a 16-bit sound card or better, and a 256-color SVGA graphics card or better.
-- Q: How can I change the language of the game?
A: You can change the language of the game by going to the options menu in the main menu and selecting the "Language" option. You can choose from English, French, German, Spanish, Italian, Dutch, Portuguese, Swedish, Norwegian, Danish, Finnish, Polish, Czech, Hungarian, Russian, Greek, Turkish, Hebrew, Arabic, Chinese (Simplified), Chinese (Traditional), Japanese, Korean, Thai, Indonesian, Malay, Vietnamese, Hindi, Bengali, Tamil, Telugu, Urdu, Persian, or Swahili.
-- Q: How can I unlock the legendary wild horse?
A: You can unlock the legendary wild horse by completing the story mode of the game. You will need to find all the clues and solve all the puzzles that will lead you to the secret location of the wild horse. Once you find it, you will be able to adopt it and name it.
-- Q: How can I get more outfits and accessories for my rider?
A: You can get more outfits and accessories for your rider by going to the boutique in the town and buying them with your money. You can earn money by playing mini-games and winning trophies and medals. You can also find some outfits and accessories hidden in some locations.
-- Q: How can I play Barbie Riding Club with my friends?
A: You can play Barbie Riding Club with your friends by using a LAN (local area network) connection or an online service such as GameRanger or Hamachi. You will need to have two or more copies of the game installed on different computers and connect them using one of these methods. Then, you will be able to join or host a multiplayer session where you can chat and ride with your friends.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Plane Game APK Have Fun with Amazing 3D Graphics and Physics in Airplane Games.md b/spaces/congsaPfin/Manga-OCR/logs/Plane Game APK Have Fun with Amazing 3D Graphics and Physics in Airplane Games.md
deleted file mode 100644
index 0f91946db687ea518ce35b87a3b394bd94d1af8a..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Plane Game APK Have Fun with Amazing 3D Graphics and Physics in Airplane Games.md
+++ /dev/null
@@ -1,97 +0,0 @@
-
-Plane Game APK: How to Download and Play the Best Plane Simulator Games on Your Android Device
- Do you love flying planes and exploring the sky? Do you want to experience the thrill and excitement of piloting different types of aircraft? Do you wish you could travel to various destinations and scenarios without leaving your home? If you answered yes to any of these questions, then you should try playing plane game apk on your Android device.
- What is a plane game apk?
- An apk file is an Android application package that contains all the files and resources needed to install and run an app on your device.
- An apk file is like a zip file that you can unzip and access its contents. You can download apk files from various sources, such as official app stores, third-party websites, or directly from developers. However, you need to be careful when downloading apk files from unknown or untrusted sources, as they may contain malware or viruses that can harm your device or compromise your privacy.
-plane game apk
Download ————— https://urlca.com/2uO82f
- A plane game apk is an apk file that contains a plane simulator game, which is a type of video game that simulates the flight and operation of various types of aircraft.
- A plane simulator game is a game that lets you control a plane and perform various tasks, such as taking off, landing, navigating, maneuvering, combatting, etc. You can choose from different planes, such as commercial jets, military fighters, helicopters, gliders, etc., and customize their features, such as color, design, engine, weapons, etc. You can also select from different locations and scenarios, such as airports, cities, mountains, oceans, war zones, etc., and adjust the weather conditions, time of day, difficulty level, etc. A plane simulator game can provide you with realistic and immersive flight simulation experiences that can make you feel like a real pilot.
- Why should you download and play plane game apk?
- Plane game apk can offer you many benefits, such as:
- - Enjoying realistic and immersive flight simulation experiences
- Plane game apk can give you the opportunity to experience what it's like to fly a plane in various situations and environments. You can see the cockpit view, hear the engine sound, feel the turbulence, etc. You can also interact with other planes, air traffic controllers, passengers, etc. You can test your skills and knowledge in handling different planes and challenges. You can also learn new things about aviation and aerodynamics.
- - Learning about different planes and their features
- Plane game apk can help you discover different types of planes and their characteristics. You can compare their performance, speed, maneuverability, fuel consumption, etc. You can also learn about their history, development, purpose, etc. You can also customize your plane according to your preferences and style. You can also unlock new planes and features as you progress in the game.
- - Exploring various locations and scenarios around the world
- Plane game apk can allow you to travel to different places and situations that you may not be able to visit in real life. You can fly over famous landmarks, such as the Eiffel Tower, the Statue of Liberty, the Great Wall of China, etc. You can also experience different weather conditions, such as rain, snow, fog, storm, etc. You can also face different challenges and dangers, such as engine failure, bird strike, enemy attack, etc. You can also enjoy different views and perspectives, such as the bird's eye view, the satellite view, the first-person view, etc.
- - Challenging yourself with different missions and objectives
- Plane game apk can test your abilities and creativity in completing various tasks and goals. You can choose from different modes and levels of difficulty, such as free flight, career mode, multiplayer mode, etc. You can also follow different scenarios and stories, such as rescue missions, cargo delivery, air combat, etc. You can also compete with other players and rank on leaderboards. You can also earn rewards and achievements for your performance.
- - Having fun and relaxing with your favorite hobby
- Plane game apk can provide you with entertainment and relaxation with your favorite hobby. You can play whenever you want and wherever you are. You can also play with your friends and family online or offline. You can also customize your game settings and controls according to your preferences. You can also enjoy the graphics and sound effects of the game. You can also share your screenshots and videos of your flights on social media.
- How to download and play plane game apk?
- To download and play plane game apk, you need to follow these steps:
- - Find a reliable and safe source for downloading plane game apk, such as APKPure.com
- APKPure.com is a website that offers free and secure downloads of apk files for various Android apps and games. You can browse through different categories and genres of apps and games, such as action, adventure, simulation, etc. You can also search for specific apps and games by name or keyword. You can also read the reviews and ratings of other users before downloading.
- - Choose the plane game apk that suits your preferences and device specifications, such as Plane Simulator 3D
- Plane Simulator 3D is one of the best plane simulator games available on APKPure.com. It has a rating of 4.1 out of 5 stars from over 300 thousand users. It has a size of 21 MB and requires Android 2.1 or higher to run. It offers realistic 3D graphics, physics-based controls, over 24 planes to choose from, over 10 global locations to fly over, over 50 missions to complete, etc.
-airplane game apk download
-flight simulator plane game apk
-airplane pilot game apk
-airplane shooting game apk
-airplane racing game apk
-airplane simulator game apk free download
-flight simulator 3d airplane pilot apk
-airplane flight simulator 2017 apk
-airplane flight pilot simulator apk
-airplane chefs game apk
-merge plane game apk
-pilot simulator airplane games apk
-airplane! game apk
-airplane pilot cabin flight simulator 3d apk
-sniper in the world game apk
-airplane games 2020: aircraft flying 3d simulator apk
-airplane landing game apk
-airplane rescue game apk
-airplane war game apk
-airplane mode game apk
-airplane parking game apk
-airplane games for android free download apk
-flight simulator: plane games mod apk
-airplane games for kids apk
-airplane games offline apk
-airplane games online apk
-airplane games with joystick support apk
-flight simulator: plane games hack apk
-airplane games 3d free download apk
-airplane games with realistic graphics apk
-flight simulator: plane games unlimited money apk
-airplane games with missions and challenges apk
-flight simulator: plane games pro version apk
-airplane games with different planes and modes apk
-flight simulator: plane games latest version apk
-airplane games with multiplayer and chat option apk
-flight simulator: plane games no ads apk
-airplane games with weather and time effects apk
-flight simulator: plane games premium features unlocked apk
-airplane games with cockpit view and controls apk
-flight simulator: plane games high quality sound effects apk
-airplane games with emergency landings and crashes apk
-flight simulator: plane games realistic physics and dynamics apk
-airplane games with aircraft carrier landings and dogfights apk
-flight simulator: plane games easy and fun gameplay apk
-airplane games with helicopter racing and rescue mode apk
-flight simulator: plane games best software and apps for android[^1^]
-airplane games with career mode and achievements apk
-flight simulator: plane games variety of planes and locations[^2^]
- - Download the plane game apk file to your device and install it by allowing unknown sources in your settings
- To download the plane game apk file from APKPure.com, you need to click on the download button on the app page. The file will be saved in your device's download folder or any other location you choose. To install the plane game apk file on your device, you need to go to your settings and enable the option to allow unknown sources or apps from unknown sources. This will allow you to install apps that are not from official app stores. Then you need to locate the plane game apk file on your device and tap on it to start the installation process.
- - Launch the plane game apk and follow the instructions to start playing
- To launch the plane game apk on your device, you need to find its icon on your home screen or app drawer and tap on it. The game will open and show you its main menu. You can choose from different options, such as start game, settings, achievements, etc. You can also watch a tutorial video or read a guide on how to play the game. Then you can select your plane, location, mode, level, etc., and start playing.
- - Enjoy the best plane simulator games on your Android device
- Now you are ready to enjoy the best plane simulator games on your Android device. You can fly different planes in different locations and scenarios. You can learn new things about aviation and aerodynamics. You can challenge yourself with different missions and objectives. You can have fun and relax with your favorite hobby. You can also play with your friends and family online or offline. You can also share your screenshots and videos of your flights on social media.
- Conclusion
- Plane game apk is a great way to enjoy the best plane simulator games on your Android device. You can download and play plane game apk from reliable and safe sources, such as APKPure.com. You can choose from different planes, locations, modes, levels, etc., and have realistic and immersive flight simulation experiences. You can also learn, challenge, have fun, and relax with plane game apk. So what are you waiting for? Download and play plane game apk today and become the best pilot ever!
- FAQs
- Q: What are some of the best plane game apk available on APKPure.com?
-A: Some of the best plane game apk available on APKPure.com are Plane Simulator 3D, Flight Pilot Simulator 3D Free, Airplane Simulator 2020: Flight Simulator Games, Infinite Flight - Flight Simulator, etc.
- Q: How can I update my plane game apk to the latest version?
-A: You can update your plane game apk to the latest version by visiting the app page on APKPure.com and clicking on the update button. You can also enable the auto-update option in your settings to get the latest updates automatically.
- Q: How can I uninstall my plane game apk from my device?
-A: You can uninstall your plane game apk from your device by going to your settings and selecting the apps or applications option. Then you need to find the plane game apk you want to uninstall and tap on it. Then you need to tap on the uninstall button and confirm your action.
- Q: How can I contact the developers of my plane game apk if I have any issues or feedback?
-A: You can contact the developers of your plane game apk by visiting their official website or social media pages. You can also find their contact information on the app page on APKPure.com. You can also leave a comment or review on the app page to share your issues or feedback.
- Q: How can I improve my performance and battery life while playing plane game apk?
-A: You can improve your performance and battery life while playing plane game apk by adjusting your game settings and device settings. You can lower the graphics quality, sound volume, brightness, etc., to reduce the load on your device. You can also close other apps and turn off unnecessary features, such as Wi-Fi, Bluetooth, GPS, etc., to save battery power.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/RPG APK Mod (Unlimited) - The Ultimate Collection of Role Playing Games with Unlimited Features and Possibilities.md b/spaces/congsaPfin/Manga-OCR/logs/RPG APK Mod (Unlimited) - The Ultimate Collection of Role Playing Games with Unlimited Features and Possibilities.md
deleted file mode 100644
index c78cf038943ad6d5c352e5cd6ba5ef2d38202486..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/RPG APK Mod (Unlimited) - The Ultimate Collection of Role Playing Games with Unlimited Features and Possibilities.md
+++ /dev/null
@@ -1,136 +0,0 @@
-
-RPG APK Mod (Unlimited): What You Need to Know
-If you are a fan of role-playing games (RPGs), you might have heard of APK mods that can enhance your gaming experience. But what are they exactly and how do they work? In this article, we will explain what RPG games are, what APK mods are, and what are the best RPG APK mod games you can play in 2023. Whether you are looking for unlimited resources, unlocked features, or improved graphics, we have got you covered.
- What is an RPG game?
-An RPG game is a genre of video game where you control a character or a group of characters that undertake quests in a fictional world. You can customize your character's appearance, skills, abilities, and equipment, and make choices that affect the story and the gameplay. RPG games usually have a rich and immersive story, a complex and dynamic world, and a variety of characters and enemies to interact with.
-rpg apk mod (unlimited)
Download »»» https://urlca.com/2uOcsO
- Definition and examples of RPG games
-According to Techopedia, a role-playing game (RPG) is "a genre of video game where the gamer controls a fictional character (or characters) that undertakes a quest in an imaginary world." Some examples of popular RPG games are The Elder Scrolls, Final Fantasy, The Witcher, Dragon Age, and Fallout.
- Types and genres of RPG games
-RPG games can be divided into different types and genres based on their gameplay mechanics, settings, themes, and features. Some of the most common types and genres of RPG games are:
-
-- Action RPG: A type of RPG game that focuses on real-time combat and action-oriented gameplay. Examples: Diablo, Dark Souls, Kingdom Hearts.
-- Turn-based RPG: A type of RPG game that uses a turn-based system for combat and other actions. Examples: Pokemon, Persona, Final Fantasy X.
-- Tactical RPG: A type of RPG game that involves strategic planning and positioning of characters on a grid or a map. Examples: Fire Emblem, XCOM, Disgaea.
-- MMORPG: A type of RPG game that is played online with other players in a massive multiplayer online world. Examples: World of Warcraft, Guild Wars 2, Final Fantasy XIV.
-- Fantasy RPG: A genre of RPG game that is set in a fantasy world with magic, mythical creatures, and medieval elements. Examples: The Elder Scrolls, Dragon Age, The Witcher.
-- Sci-fi RPG: A genre of RPG game that is set in a science fiction world with futuristic technology, aliens, and space exploration. Examples: Mass Effect, Star Wars, Deus Ex.
-- Horror RPG: A genre of RPG game that is designed to create a sense of fear, suspense, and terror in the player. Examples: Resident Evil, Silent Hill, Bloodborne.
-
- What is an APK mod?
-An APK mod is a modified version of an original APK file that has been altered by someone to add or change some features or functions. An APK file is the format used by Android devices to install applications. By using an APK mod, you can enjoy some benefits that are not available in the original version of the app or game.
-rpg apk mod (unlimited) coins
-rpg apk mod (unlimited) money
-rpg apk mod (unlimited) gems
-rpg apk mod (unlimited) diamonds
-rpg apk mod (unlimited) gold
-rpg apk mod (unlimited) energy
-rpg apk mod (unlimited) lives
-rpg apk mod (unlimited) skills
-rpg apk mod (unlimited) items
-rpg apk mod (unlimited) weapons
-rpg apk mod (unlimited) armor
-rpg apk mod (unlimited) characters
-rpg apk mod (unlimited) levels
-rpg apk mod (unlimited) stars
-rpg apk mod (unlimited) keys
-rpg apk mod (unlimited) tickets
-rpg apk mod (unlimited) resources
-rpg apk mod (unlimited) points
-rpg apk mod (unlimited) credits
-rpg apk mod (unlimited) souls
-rpg apk mod (unlimited) mana
-rpg apk mod (unlimited) health
-rpg apk mod (unlimited) stamina
-rpg apk mod (unlimited) speed
-rpg apk mod (unlimited) damage
-tap titans 2 rpg apk mod (unlimited)
-afk arena rpg apk mod (unlimited)
-flight pilot simulator 3d rpg apk mod (unlimited)
-game of thrones: conquest rpg apk mod (unlimited)
-dynamons world rpg apk mod (unlimited)
-summoners war: sky arena rpg apk mod (unlimited)
-final fantasy brave exvius rpg apk mod (unlimited)
-dragon ball legends rpg apk mod (unlimited)
-marvel future fight rpg apk mod (unlimited)
-star wars: galaxy of heroes rpg apk mod (unlimited)
-harry potter: hogwarts mystery rpg apk mod (unlimited)
-the witcher: monster slayer rpg apk mod (unlimited)
-the elder scrolls: blades rpg apk mod (unlimited)
-the walking dead: survivors rpg apk mod (unlimited)
-pokemon go rpg apk mod (unlimited)
-gacha life rpg apk mod (unlimited)
-roblox rpg apk mod (unlimited)
-minecraft: pocket edition rpg apk mod (unlimited)
-among us: impostor edition rpg apk mod (unlimited)
-genshin impact: open world edition rpg apk mod (unlimited)
-fallout shelter: vault builder edition rpg apk mod (unlimited)
-stardew valley: farming simulator edition rpg apk mod (unlimited)
-terraria: sandbox adventure edition rpg apk mod (unlimited)
-undertale: retro pixel edition rpg apk mod (unlimited)
- Definition and benefits of APK mods
-According to Saikotech, "Mod Apk is an application file that has been modified by someone who has access to the source code or has reverse engineered it." Some of the benefits the functionality or stability of the game.
-
You can lose your progress, achievements, or rewards if the mod is not updated or compatible with the latest version of the game.
-
- Therefore, you should be careful and cautious when using APK mods. Some of the precautions you should take are:
-
-- Download APK mods only from trusted and reputable sources that have positive reviews and ratings from other users.
-- Scan the APK mod file with a reliable antivirus or anti-malware software before installing it on your device.
-- Backup your data and progress before using an APK mod in case something goes wrong or you want to revert to the original version.
-- Use a VPN or a proxy to hide your IP address and location when playing online games with an APK mod to avoid detection and banning.
-- Use a secondary or dummy account to test the APK mod before using it on your main account to avoid losing your data or reputation.
-
- What are the best RPG APK mod games?
-Now that you know what RPG games and APK mods are, you might be wondering what are the best RPG APK mod games you can play in 2023. There are many RPG APK mod games available on the internet, but not all of them are worth your time and attention. To help you choose the best RPG APK mod games, we have used the following criteria:
- Criteria for choosing the best RPG APK mod games
-The criteria we have used to select the best RPG APK mod games are:
-
-- The quality and popularity of the original RPG game, based on its ratings, reviews, awards, and fan base.
-- The features and benefits of the APK mod, based on what it adds or changes to the original RPG game.
-- The safety and reliability of the APK mod, based on its source, compatibility, updates, and feedback from other users.
-
- Top 5 RPG APK mod games in 2023
-Based on these criteria, we have compiled a list of the top 5 RPG APK mod games you can play in 2023. These are:
- BW&Heroes:Offline Dark Fantasy Hack and Slash
-BW&Heroes is an action RPG game that lets you explore a dark fantasy world full of monsters, dungeons, and loot. You can choose from four different classes: Warrior, Mage, Rogue, or Archer, and customize your skills and equipment. You can also recruit and upgrade heroes to fight by your side.
- The APK mod for BW&Heroes gives you unlimited gold, gems, and energy, as well as unlocked all heroes and items. You can also enjoy improved graphics and sound quality. You can download the APK mod from .
- Eternium
-Eternium is a classic RPG game that pays tribute to the old-school games like Diablo, Torchlight, or Titan Quest. You can create your own character from three classes: Mage, Warrior, or Bounty Hunter, and embark on an epic adventure across various worlds. You can also craft your own weapons and armor, learn new spells and abilities, and collect pets and companions.
- The APK mod for Eternium gives you unlimited gold, gems, and resources, as well as unlocked all skills and talents. You can also play offline without any internet connection. You can download the APK mod from .
- SoulCraft
-SoulCraft is a hack and slash RPG game that puts you in the role of an angel who has to fight against the demons that have invaded the earth. You can choose from five different game modes: Story Mode, Survival Mode,
Adventure Mode, Arena Mode, Hellgate Mode, or Crystal Defense Mode. You can also join forces with other players in co-op mode or compete against them in PvP mode.
- The APK mod for SoulCraft gives you unlimited gold, souls, and items, as well as unlocked all modes and features. You can also play offline without any internet connection. You can download the APK mod from .
- Legendary Heroes MOBA Offline
-Legendary Heroes is a MOBA (multiplayer online battle arena) game that combines the best elements of RPG and strategy games. You can choose from over 40 heroes, each with their own unique skills and abilities, and fight in 4v4 battles across various maps and modes. You can also upgrade your heroes and items, and collect runes and crystals to boost your power.
- The APK mod for Legendary Heroes gives you unlimited gold, crystals, and energy, as well as unlocked all heroes and items. You can also play offline without any internet connection. You can download the APK mod from .
- Rick and Morty: Pocket Mortys
-Rick and Morty: Pocket Mortys is a RPG game based on the popular animated series Rick and Morty. You can play as Rick, a mad scientist who travels across different dimensions with his grandson Morty. You can collect and train over 300 different Mortys, each with their own personality and appearance, and battle other Ricks and Mortys in the multiverse.
- The APK mod for Rick and Morty: Pocket Mortys gives you unlimited money, coupons, and schmeckles, as well as unlocked all Mortys and items. You can also enjoy improved graphics and sound quality. You can download the APK mod from .
- Conclusion
-RPG games are one of the most popular and enjoyable genres of video games that let you immerse yourself in a fictional world and create your own character and story. APK mods are modified versions of original APK files that can enhance your gaming experience by adding or changing some features or functions. However, APK mods are not without risks, so you should be careful and cautious when using them.
- If you are looking for the best RPG APK mod games to play in 2023, we have recommended five of them based on their quality, popularity, features, benefits, safety, and reliability. These are BW&Heroes:Offline Dark Fantasy Hack and Slash, Eternium, SoulCraft, Legendary Heroes MOBA Offline, and Rick and Morty: Pocket Mortys. You can download them from the links provided in this article and enjoy unlimited resources, unlocked features, improved graphics, offline mode, and more.
- We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Happy gaming!
- FAQs
-
-- Q: What is the difference between an RPG game and an APK mod?
-- A: An RPG game is a genre of video game where you control a character or a group of characters that undertake quests in a fictional world. An APK mod is a modified version of an original APK file that has been altered by someone to add or change some features or functions.
-- Q: What are the benefits of using an APK mod?
-- A: Some of the benefits of using an APK mod are accessing unlimited resources, unlocking premium features, enhancing graphics, sound, or performance, removing ads or in-app purchases, adding new content, playing offline, etc.
-- Q: What are the risks of using an APK mod?
-- A: Some of the risks of using an APK mod are exposing your device to malware, viruses, or spyware, violating the terms and conditions of the original app or game developer and getting banned or suspended from using their services, encountering bugs, glitches, or compatibility issues that can affect the functionality or stability of the game, losing your progress, achievements, or rewards if the mod is not updated or compatible with the latest version of the game, etc.
-- Q: How can I avoid the risks of using an APK mod?
-- A: Some of the precautions you can take are downloading APK mods only from trusted and reputable sources that have positive reviews and ratings from other users, scanning the APK mod file with a reliable antivirus or anti-malware software before installing it on your device, backing up your data and progress before using an APK mod in case something goes wrong or you want to revert to the original version, using a VPN or a proxy to hide your IP address and location when playing online games with an APK mod to avoid detection and banning, using a secondary or dummy account to test the APK mod before using it on your main account to avoid losing your data or reputation, etc.
-- Q: What are some of the best RPG APK mod games in 2023?
-- A: Some of the best RPG APK mod games in 2023 are BW&Heroes:Offline Dark Fantasy Hack and Slash, Eternium, SoulCraft, Legendary Heroes MOBA Offline, and Rick and Morty: Pocket Mortys. You can download them from the links provided in this article and enjoy unlimited resources, unlocked features, improved graphics, offline mode, and more.
-- Q: How can I install an APK mod on my device?
-- A: To install an APK mod on your device, you need to follow these steps:
-
-- Download the APK mod file from a trusted and reputable source.
-- Enable the installation of apps from unknown sources on your device settings.
-- Locate the APK mod file on your device storage and tap on it to start the installation process.
-- Follow the instructions on the screen and wait for the installation to complete.
-- Launch the app or game and enjoy the modded features.
-
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Truck Simulator Ultimate APK - Oyun ndir Clubta Dnyann En Byk Lojistik irketi Olun.md b/spaces/congsaPfin/Manga-OCR/logs/Truck Simulator Ultimate APK - Oyun ndir Clubta Dnyann En Byk Lojistik irketi Olun.md
deleted file mode 100644
index 6a81722cd23bf4a3e25d085567549b1a979027a8..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Truck Simulator Ultimate APK - Oyun ndir Clubta Dnyann En Byk Lojistik irketi Olun.md
+++ /dev/null
@@ -1,95 +0,0 @@
-
-Truck Simulator : Ultimate - A New Game from Zuuks Games
-If you are a fan of simulation games, especially truck simulation games, you might want to check out Truck Simulator : Ultimate, a new game from Zuuks Games, the makers of Bus Simulator : Ultimate. This game lets you experience the thrill of driving a truck across different countries and continents, as well as managing your own truck company. In this article, we will tell you more about this game and how to download and install it on your Android device.
-zuuks games truck simulator ultimate apk oyun indir club
Download Zip ⭐ https://urlca.com/2uO5mn
- What is Truck Simulator : Ultimate?
-Truck Simulator : Ultimate is a game that combines two genres: simulation and tycoon. You can play it in two ways:
- A realistic and immersive truck simulation game
-In this mode, you can drive official Mercedes-Benz licensed trucks on realistic roads and cities. You can choose from different models of trucks, each with its own features and performance. You can also customize your truck with various accessories, such as lamps, horns, cockpit lights, and more. You can transport different types of cargoes, such as food, furniture, chemicals, cars, etc., to over 100 cities in countries like Turkey, USA, Canada, Russia, Germany, Italy, France, Spain, Netherlands, South Korea, Japan, China, Brazil, Azerbaijan, etc. You can also enjoy realistic weather conditions, such as rain, snow, fog, etc., as well as traffic rules and toll roads.
- A tycoon game where you can manage your own truck company
-In this mode, you can create your own truck company and become the world's largest logistics company. You can hire employees, expand your fleet, bid on cargo contracts, design your offices, and more. You can also compete with other players in multiplayer season mode. You can either cooperate with other players to deliver cargoes together or join races to win prizes. You can also chat with other players and make friends.
- What are the features of Truck Simulator : Ultimate?
-Truck Simulator : Ultimate has many features that make it an enjoyable and addictive game. Some of them are:
-zuuks games truck simulator ultimate apk download free
-zuuks games truck simulator ultimate mod apk unlimited money
-zuuks games truck simulator ultimate android gameplay
-zuuks games truck simulator ultimate apk indir son sürüm
-zuuks games truck simulator ultimate multiplayer season
-zuuks games truck simulator ultimate apk oyun indir club hile
-zuuks games truck simulator ultimate mercedes benz trucks
-zuuks games truck simulator ultimate apk pure
-zuuks games truck simulator ultimate review
-zuuks games truck simulator ultimate apk oyun indir club pc
-zuuks games truck simulator ultimate cheats
-zuuks games truck simulator ultimate apk uptodown
-zuuks games truck simulator ultimate online
-zuuks games truck simulator ultimate apk oyun indir club ios
-zuuks games truck simulator ultimate tips and tricks
-zuuks games truck simulator ultimate apk mirror
-zuuks games truck simulator ultimate trailer
-zuuks games truck simulator ultimate apk oyun indir club android oyun club
-zuuks games truck simulator ultimate hack
-zuuks games truck simulator ultimate apk mob.org
-zuuks games truck simulator ultimate update
-zuuks games truck simulator ultimate apk rexdl
-zuuks games truck simulator ultimate wiki
-zuuks games truck simulator ultimate apk oyun indir club tamindir
-zuuks games truck simulator ultimate guide
-zuuks games truck simulator ultimate apk old version
-zuuks games truck simulator ultimate gameplay
-zuuks games truck simulator ultimate apk oyun indir club apkpure
-zuuks games truck simulator ultimate codes
-zuuks games truck simulator ultimate apk happy mod
-zuuks games truck simulator ultimate best trucks
-zuuks games truck simulator ultimate apk oyun indir club android 1
-zuuks games truck simulator ultimate support
-zuuks games truck simulator ultimate apk mod menu
-zuuks games truck simulator ultimate realistic graphics mod
-zuuks games truck simulator ultimate apk oyun indir club revdl
-zuuks games truck simulator ultimate forum
-zuuks games truck simulator ultimate apk latest version
-zuuks games truck simulator ultimate system requirements
-zuuks games truck simulator ultimate apk oyun indir club andropalace
-zuuks games truck simulator ultimate news
-zuuks games truck simulator ultimate apk no ads
-zuuks games truck simulator ultimate customisation options
-zuuks games truck simulator ultimate apk oyun indir club an1.com
-zuuks games truck simulator ultimate help
-zuuks games truck simulator ultimate apk obb
-zuuks games truck simulator ultimate dlc mod system
-zuuks games truck simulator ultimate apk oyun indir club mobdisc
-zuuks games truck simulator ultimate feedback
- Official Mercedes-Benz licensed trucks
-You can drive authentic trucks from Mercedes-Benz in this game. You can choose from different models of trucks that have different specifications and capabilities. You can also see the detailed interiors of the trucks and enjoy the realistic sounds and physics.
- Multiplayer season with cooperative and competitive modes
-You can play with other players online in this game. You can either join cooperative mode or competitive mode. In cooperative mode, you can team up with other players to deliver cargoes together. You can share the profits and expenses with your teammates. In competitive mode, you can join races with other players to win prizes. You can also chat with other players in real time and make friends.
- Customizable trucks and offices
-You can customize your trucks and offices in this game. You can change the color of your trucks, add accessories like lamps, horns, cockpit lights, etc., upgrade your engines and tires, etc. You can also design your offices according to your taste. You can choose from different furniture styles and colors, add plants and paintings, etc. You can also see the statistics of your company, such as revenue, expenses, profit, reputation, etc.
- Over 100 cities and various cargoes to transport
-You can travel to over 100 cities in different countries and continents in this game. You can see the landmarks and scenery of each city and enjoy the diversity of cultures and languages. You can also transport various types of cargoes, such as food, furniture, chemicals, cars, etc. You can see the weight, volume, and value of each cargo and choose the best route and truck for it. You can also face different challenges and risks, such as road accidents, traffic jams, police checks, etc.
- Realistic weather, traffic, and road conditions
-You can experience realistic weather conditions in this game. You can see the changes of day and night, as well as rain, snow, fog, etc. You can also feel the effects of weather on your driving, such as slippery roads, reduced visibility, etc. You can also encounter realistic traffic and road conditions in this game. You can see other vehicles on the road, such as cars, buses, motorcycles, etc. You can also follow the traffic rules and signs, such as speed limits, traffic lights, toll roads, etc.
- How to download and install Truck Simulator : Ultimate APK?
-If you want to play Truck Simulator : Ultimate on your Android device, you need to download and install the APK file of the game. Here are the steps to do it:
- Download the APK file from a trusted source
-You can download the APK file of Truck Simulator : Ultimate from various sources on the internet. However, you need to be careful and choose a trusted source that does not contain any viruses or malware. One of the sources that we recommend is Oyun Indir Club, a website that provides free and safe APK downloads for various games and apps. You can visit their website and search for Truck Simulator : Ultimate APK. Then you can click on the download button and wait for the file to be downloaded.
- Enable unknown sources on your device settings
-Before you can install the APK file of Truck Simulator : Ultimate on your device, you need to enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, you need to go to your device settings and look for security or privacy options. Then you need to find the option that says unknown sources or allow installation from unknown sources. You need to toggle it on or check it to enable it.
- Install the APK file and launch the game
-After you have downloaded the APK file of Truck Simulator : Ultimate and enabled unknown sources on your device settings, you can install the APK file on your device. To do this, you need to locate the APK file on your device storage using a file manager app or your browser's downloads folder. Then you need to tap on the APK file and follow the instructions on the screen to install it. After the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer.
- Conclusion
-Truck Simulator : Ultimate is a new game from Zuuks Games that lets you drive realistic trucks and manage your own truck company. It has many features that make it an enjoyable and addictive game for simulation and tycoon fans. You can download and install it on your Android device by following the steps above. We hope you have fun playing this game and share your feedback with us.
- FAQs
-
-- What are the system requirements for Truck Simulator : Ultimate?
-The minimum system requirements for Truck Simulator : Ultimate are Android 5.0 or higher, 2 GB of RAM, 1 GB of free storage space, and a stable internet connection.
-- How can I contact Zuuks Games for support or feedback?
-You can contact Zuuks Games by sending an email to info@zuuks.com or by visiting their website at https://www.zuuks.com/. You can also follow them on social media platforms like Facebook, Twitter, Instagram, YouTube, etc.
-- How can I update Truck Simulator : Ultimate?
-You can update Truck Simulator : Ultimate by downloading and installing the latest version of the APK file from Oyun Indir Club or other trusted sources. Alternatively, you can wait for the official update from Zuuks Games on Google Play Store.
-- How can I play Truck Simulator : Ultimate offline? p>You can play Truck Simulator : Ultimate offline by turning off your internet connection before launching the game. However, you will not be able to access some features that require online connection, such as multiplayer season, chat, etc.
-
- How can I get more money and gold in Truck Simulator : Ultimate?
-You can get more money and gold in Truck Simulator : Ultimate by completing missions, delivering cargoes, winning races, etc. You can also watch ads or make in-app purchases to get more money and gold.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/annotator_path.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/annotator_path.py
deleted file mode 100644
index ba168e19cf0eb7f7dae6ac3d54c5977945e7386a..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/annotator_path.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import os
-from modules import shared
-
-models_path = shared.opts.data.get('control_net_modules_path', None)
-if not models_path:
- models_path = getattr(shared.cmd_opts, 'controlnet_annotator_models_path', None)
-if not models_path:
- models_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'downloads')
-
-if not os.path.isabs(models_path):
- models_path = os.path.join(shared.data_path, models_path)
-
-clip_vision_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'clip_vision')
-# clip vision is always inside controlnet "extensions\sd-webui-controlnet"
-# and any problem can be solved by removing controlnet and reinstall
-
-models_path = os.path.realpath(models_path)
-os.makedirs(models_path, exist_ok=True)
-print(f'ControlNet preprocessor location: {models_path}')
-# Make sure that the default location is inside controlnet "extensions\sd-webui-controlnet"
-# so that any problem can be solved by removing controlnet and reinstall
-# if users do not change configs on their own (otherwise users will know what is wrong)
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/cnn/utils/sync_bn.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/cnn/utils/sync_bn.py
deleted file mode 100644
index c0dbcb1b167ea0df690c0f47fe0217a3454b5d59..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/cnn/utils/sync_bn.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import torch
-
-import annotator.mmpkg.mmcv as mmcv
-
-
-class _BatchNormXd(torch.nn.modules.batchnorm._BatchNorm):
- """A general BatchNorm layer without input dimension check.
-
- Reproduced from @kapily's work:
- (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)
- The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc
- is `_check_input_dim` that is designed for tensor sanity checks.
- The check has been bypassed in this class for the convenience of converting
- SyncBatchNorm.
- """
-
- def _check_input_dim(self, input):
- return
-
-
-def revert_sync_batchnorm(module):
- """Helper function to convert all `SyncBatchNorm` (SyncBN) and
- `mmcv.ops.sync_bn.SyncBatchNorm`(MMSyncBN) layers in the model to
- `BatchNormXd` layers.
-
- Adapted from @kapily's work:
- (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)
-
- Args:
- module (nn.Module): The module containing `SyncBatchNorm` layers.
-
- Returns:
- module_output: The converted module with `BatchNormXd` layers.
- """
- module_output = module
- module_checklist = [torch.nn.modules.batchnorm.SyncBatchNorm]
- if hasattr(mmcv, 'ops'):
- module_checklist.append(mmcv.ops.SyncBatchNorm)
- if isinstance(module, tuple(module_checklist)):
- module_output = _BatchNormXd(module.num_features, module.eps,
- module.momentum, module.affine,
- module.track_running_stats)
- if module.affine:
- # no_grad() may not be needed here but
- # just to be consistent with `convert_sync_batchnorm()`
- with torch.no_grad():
- module_output.weight = module.weight
- module_output.bias = module.bias
- module_output.running_mean = module.running_mean
- module_output.running_var = module.running_var
- module_output.num_batches_tracked = module.num_batches_tracked
- module_output.training = module.training
- # qconfig exists in quantized models
- if hasattr(module, 'qconfig'):
- module_output.qconfig = module.qconfig
- for name, child in module.named_children():
- module_output.add_module(name, revert_sync_batchnorm(child))
- del module
- return module_output
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/ops/three_nn.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/ops/three_nn.py
deleted file mode 100644
index 2b01047a129989cd5545a0a86f23a487f4a13ce1..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/ops/three_nn.py
+++ /dev/null
@@ -1,51 +0,0 @@
-from typing import Tuple
-
-import torch
-from torch.autograd import Function
-
-from ..utils import ext_loader
-
-ext_module = ext_loader.load_ext('_ext', ['three_nn_forward'])
-
-
-class ThreeNN(Function):
- """Find the top-3 nearest neighbors of the target set from the source set.
-
- Please refer to `Paper of PointNet++ `_
- for more details.
- """
-
- @staticmethod
- def forward(ctx, target: torch.Tensor,
- source: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
- """
- Args:
- target (Tensor): shape (B, N, 3), points set that needs to
- find the nearest neighbors.
- source (Tensor): shape (B, M, 3), points set that is used
- to find the nearest neighbors of points in target set.
-
- Returns:
- Tensor: shape (B, N, 3), L2 distance of each point in target
- set to their corresponding nearest neighbors.
- """
- target = target.contiguous()
- source = source.contiguous()
-
- B, N, _ = target.size()
- m = source.size(1)
- dist2 = torch.cuda.FloatTensor(B, N, 3)
- idx = torch.cuda.IntTensor(B, N, 3)
-
- ext_module.three_nn_forward(target, source, dist2, idx, b=B, n=N, m=m)
- if torch.__version__ != 'parrots':
- ctx.mark_non_differentiable(idx)
-
- return torch.sqrt(dist2), idx
-
- @staticmethod
- def backward(ctx, a=None, b=None):
- return None, None
-
-
-three_nn = ThreeNN.apply
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/cnn/bricks/scale.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/cnn/bricks/scale.py
deleted file mode 100644
index c905fffcc8bf998d18d94f927591963c428025e2..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/cnn/bricks/scale.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-
-
-class Scale(nn.Module):
- """A learnable scale parameter.
-
- This layer scales the input by a learnable factor. It multiplies a
- learnable scale parameter of shape (1,) with input of any shape.
-
- Args:
- scale (float): Initial value of scale factor. Default: 1.0
- """
-
- def __init__(self, scale=1.0):
- super(Scale, self).__init__()
- self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
-
- def forward(self, x):
- return x * self.scale
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/decode_heads/fpn_head.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/decode_heads/fpn_head.py
deleted file mode 100644
index 1241c55b0813d1ecdddf1e66e7c5031fbf78ed50..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/decode_heads/fpn_head.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import numpy as np
-import torch.nn as nn
-from annotator.uniformer.mmcv.cnn import ConvModule
-
-from annotator.uniformer.mmseg.ops import resize
-from ..builder import HEADS
-from .decode_head import BaseDecodeHead
-
-
-@HEADS.register_module()
-class FPNHead(BaseDecodeHead):
- """Panoptic Feature Pyramid Networks.
-
- This head is the implementation of `Semantic FPN
- `_.
-
- Args:
- feature_strides (tuple[int]): The strides for input feature maps.
- stack_lateral. All strides suppose to be power of 2. The first
- one is of largest resolution.
- """
-
- def __init__(self, feature_strides, **kwargs):
- super(FPNHead, self).__init__(
- input_transform='multiple_select', **kwargs)
- assert len(feature_strides) == len(self.in_channels)
- assert min(feature_strides) == feature_strides[0]
- self.feature_strides = feature_strides
-
- self.scale_heads = nn.ModuleList()
- for i in range(len(feature_strides)):
- head_length = max(
- 1,
- int(np.log2(feature_strides[i]) - np.log2(feature_strides[0])))
- scale_head = []
- for k in range(head_length):
- scale_head.append(
- ConvModule(
- self.in_channels[i] if k == 0 else self.channels,
- self.channels,
- 3,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg))
- if feature_strides[i] != feature_strides[0]:
- scale_head.append(
- nn.Upsample(
- scale_factor=2,
- mode='bilinear',
- align_corners=self.align_corners))
- self.scale_heads.append(nn.Sequential(*scale_head))
-
- def forward(self, inputs):
-
- x = self._transform_inputs(inputs)
-
- output = self.scale_heads[0](x[0])
- for i in range(1, len(self.feature_strides)):
- # non inplace
- output = output + resize(
- self.scale_heads[i](x[i]),
- size=output.shape[2:],
- mode='bilinear',
- align_corners=self.align_corners)
-
- output = self.cls_seg(output)
- return output
diff --git a/spaces/crashedice/signify/signify/gan/util/image_pool.py b/spaces/crashedice/signify/signify/gan/util/image_pool.py
deleted file mode 100644
index 6d086f882bc3d1b90c529fce6cddaaa75f2005d7..0000000000000000000000000000000000000000
--- a/spaces/crashedice/signify/signify/gan/util/image_pool.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import random
-import torch
-
-
-class ImagePool():
- """This class implements an image buffer that stores previously generated images.
-
- This buffer enables us to update discriminators using a history of generated images
- rather than the ones produced by the latest generators.
- """
-
- def __init__(self, pool_size):
- """Initialize the ImagePool class
-
- Parameters:
- pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
- """
- self.pool_size = pool_size
- if self.pool_size > 0: # create an empty pool
- self.num_imgs = 0
- self.images = []
-
- def query(self, images):
- """Return an image from the pool.
-
- Parameters:
- images: the latest generated images from the generator
-
- Returns images from the buffer.
-
- By 50/100, the buffer will return input images.
- By 50/100, the buffer will return images previously stored in the buffer,
- and insert the current images to the buffer.
- """
- if self.pool_size == 0: # if the buffer size is 0, do nothing
- return images
- return_images = []
- for image in images:
- image = torch.unsqueeze(image.data, 0)
- if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer
- self.num_imgs = self.num_imgs + 1
- self.images.append(image)
- return_images.append(image)
- else:
- p = random.uniform(0, 1)
- if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer
- random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
- tmp = self.images[random_id].clone()
- self.images[random_id] = image
- return_images.append(tmp)
- else: # by another 50% chance, the buffer will return the current image
- return_images.append(image)
- return_images = torch.cat(return_images, 0) # collect all the images and return
- return return_images
diff --git a/spaces/cybercorejapan/human-detection-docker/models/trackers/reid_parallel_tracker/core/tracklet.py b/spaces/cybercorejapan/human-detection-docker/models/trackers/reid_parallel_tracker/core/tracklet.py
deleted file mode 100644
index d1c892cd44cc21dfe0321d7a150b94e3f2739948..0000000000000000000000000000000000000000
--- a/spaces/cybercorejapan/human-detection-docker/models/trackers/reid_parallel_tracker/core/tracklet.py
+++ /dev/null
@@ -1,427 +0,0 @@
-
-import numpy as np
-from collections import deque
-from .matching import iou_scores
-from .basetrack import BaseTrack, TrackState
-from .kalman_filter import KalmanFilter
-from copy import deepcopy
-import cv2
-
-RAD2DEG = 180.0/np.pi
-
-
-def add_stracks(tlista, tlistb):
- exists = {}
- res = []
- for t in tlista:
- exists[t.track_id] = 1
- res.append(t)
- for t in tlistb:
- tid = t.track_id
- if not exists.get(tid, 0):
- exists[tid] = 1
- res.append(t)
- return res
-
-
-def subtract_stracks(tlista, tlistb):
- stracks = {}
- for t in tlista:
- stracks[t.track_id] = t
- for t in tlistb:
- tid = t.track_id
- if stracks.get(tid, 0):
- del stracks[tid]
- return list(stracks.values())
-
-
-def remove_duplicate_stracks(stracksa, stracksb):
- pscores = iou_scores(stracksa, stracksb)
- pairs = np.where(pscores > 0.85)
- dupa, dupb = list(), list()
- for p, q in zip(*pairs):
- timep = stracksa[p].frame_id - stracksa[p].start_frame
- timeq = stracksb[q].frame_id - stracksb[q].start_frame
- if timep > timeq:
- dupb.append(q)
- else:
- dupa.append(p)
- resa = [t for i, t in enumerate(stracksa) if not i in dupa]
- resb = [t for i, t in enumerate(stracksb) if not i in dupb]
- return resa, resb
-
-
-class Tracklet(BaseTrack):
- shared_kalman = KalmanFilter()
-
- def __init__(self, tlwh, score, cls, angle=None,
- feat=None, feat_history=50,
- enable_buffer=True, obj_img=None):
- self._tlwh = np.asarray(tlwh, dtype=np.float32)
- # wait activate
- self.score = score
- self.cls = -1
-
- self.kalman_filter = None
- self.mean, self.covariance = None, None
- # add trajectory, last observation, last_mean,last_cov
- self.trajectory = None # buffer to keep track of trajectory
- # last observation before lost (frame_id,det_bbox)
- self.last_observation = None
- # last mean,last observation before lost
- self.last_mean, self.last_covariance = None, None
- self.last_frame_id = None # last frame_id before lost
-
- self.is_activated = False
-
- self.cls_hist = [] # (cls id, freq)
- self.update_cls(cls, score)
-
- self.tracklet_len = 0
-
- self.angle = angle
-
- # reid
- norm_feat = self.norm_feat(feat) if feat is not None else None
- self.smooth_feat = norm_feat
- self.curr_feat = norm_feat
- self.feat_momentum = 0.9
- self.obj_img = obj_img
- self.best_det_feat = norm_feat
- self.best_obj_img = obj_img
-
- # buffer of feature
- self.enable_buffer = enable_buffer
- self.feat_buffer = list()
- self.box_buffer = list() # tlwh
- self.obj_img_buffer = list()
-
- self.lost_frame_num = 0
-
- self.history_info = []
-
- self.active_frames = []
-
- def update_active_frames(self, frame):
- if not isinstance(frame, list):
- self.active_frames.append(frame)
- else:
- self.active_frames.extend(frame)
-
- def set_lost_frame_num(self):
- self.lost_frame_num += 1
-
- def reset_lost_frame_num(self):
- self.lost_frame_num = 0
-
- @staticmethod
- def norm_feat(feat):
- feat /= np.linalg.norm(feat)
- return feat
-
- def buffer_areas(self):
- buf_areas = np.array([_bbox[2]*_bbox[3] for _bbox in self.box_buffer])
- return buf_areas
-
- def update_reid_buffer(self, new_track):
- N = 15
- # check wheather the obj image size and ratio is different from current image sizes
- # --> update the buffer if the condition is True
- update = False
- if len(self.box_buffer) == 0:
- update = True
- else:
- new_tlwh = new_track._tlwh
- # calculate the area of boxes
- buf_areas = self.buffer_areas()
- new_area = new_tlwh[2] * new_tlwh[3]
-
- # Calculate the percentage value
- percentage = N / 100
-
- # Check if new_area is N% bigger or smaller than every box in buf_areas
- is_bigger = np.all(new_area >= (1 + percentage) * buf_areas)
- is_smaller = np.all(new_area <= (1 - percentage) * buf_areas)
-
- if is_bigger or is_smaller:
- update = True
-
- if update:
- self.feat_buffer.append(new_track.curr_feat)
- self.box_buffer.append(new_track._tlwh)
- self.obj_img_buffer.append(new_track.obj_img)
-
- def best_area_feat(self, new_box):
- # new_box: tlwh
- # return a best match feature and obj image base on min area
- if len(self.feat_buffer) == 0 or not self.enable_buffer:
- return dict(
- feat=self.curr_feat,
- obj_img=self.obj_img)
-
- new_box_area = new_box[2] * new_box[3]
- buf_areas = self.buffer_areas()
- dif = np.abs(buf_areas - new_box_area)
-
- best_match_idx = np.argmin(dif)
-
- best_match_results = dict(
- feat=self.feat_buffer[best_match_idx],
- obj_img=self.obj_img_buffer[best_match_idx]
- )
- return best_match_results
-
- def update_features(self, new_track):
- feat = new_track.curr_feat
- self.smooth_feat = (self.feat_momentum * self.smooth_feat
- + (1 - self.feat_momentum) * feat)
- self.smooth_feat = self.norm_feat(self.smooth_feat)
-
- if self.enable_buffer:
- # update feature buffer based on boxsize
- self.update_reid_buffer(new_track)
-
- # update current feature
- self.curr_feat = feat
- self.obj_img = new_track.obj_img
-
- # update best reid feature base on detection score
- if new_track.score >= self.score:
- self.best_det_feat = feat
- self.best_obj_img = new_track.obj_img
-
- def update_cls(self, cls, score):
- if len(self.cls_hist) > 0:
- max_freq = 0
- found = False
- for c in self.cls_hist:
- if cls == c[0]:
- c[1] += score
- found = True
-
- if c[1] > max_freq:
- max_freq = c[1]
- self.cls = c[0]
- if not found:
- self.cls_hist.append([cls, score])
- self.cls = cls
- else:
- self.cls_hist.append([cls, score])
- self.cls = cls
-
- def update_angle(self, angle, score):
- self.angle = (1-score)*self.angle + score*angle
-
- @staticmethod
- def multi_predict(stracks):
- if len(stracks) > 0:
- multi_mean = np.asarray([st.mean.copy() for st in stracks])
- multi_covariance = np.asarray([st.covariance for st in stracks])
- for i, st in enumerate(stracks):
- if st.state != TrackState.Tracked:
- multi_mean[i][6] = 0
- multi_mean[i][7] = 0
- multi_mean, multi_covariance = Tracklet.shared_kalman.multi_predict(
- multi_mean, multi_covariance)
- for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
- stracks[i].mean = mean
- stracks[i].covariance = cov
-
- @staticmethod
- def multi_gmc(stracks, Hmat=np.eye(3, 3)):
- # This approach uses Homography matrix
- if len(stracks) > 0:
- multi_mean = [st.mean.reshape(-1, 1, 2) for st in stracks]
- multi_covariance = np.asarray([st.covariance for st in stracks])
-
- R, T = Hmat[:2, :2], Hmat[:2, 2]
- R8x8 = np.kron(np.eye(4, dtype=float), R)
- H = deepcopy(Hmat)
- H[:2, 2] = 0 # remove translation
-
- for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
- # w = mean.reshape(4,2).dot(V)
- # R_ = [R/w_i for w_i in w]
- # R8x8 = block_diag(*R_)
- mean = cv2.perspectiveTransform(mean, H)
- mean = mean.reshape(-1)
- mean[:2] += T
- cov = R8x8.dot(cov).dot(R8x8.transpose())
-
- stracks[i].mean = mean
- stracks[i].covariance = cov
-
- last_obs, last_frame_id = stracks[i].last_observation, stracks[i].last_frame_id
- last_mean, last_cov = stracks[i].last_mean, stracks[i].last_covariance
-
- if last_mean is not None:
- # Update the last observation
- last_obs = cv2.perspectiveTransform(
- last_obs.reshape(-1, 1, 2), H)
- last_obs = last_obs.reshape(-1)
- last_obs[:2] += T
- last_mean = cv2.perspectiveTransform(
- last_mean.reshape(-1, 1, 2), H)
- last_mean = last_mean.reshape(-1)
- last_mean[:2] += T
- last_cov = R8x8.dot(last_cov).dot(R8x8.transpose())
- stracks[i].last_observation = last_obs
- stracks[i].last_mean = last_mean
- stracks[i].last_covariance = last_cov
- # save the trajectory
- traj = last_mean[:4]
- stracks[i].trajectory = np.append(traj, last_frame_id)
-
- def activate(self, kalman_filter, frame_id):
- """Start a new tracklet"""
- self.kalman_filter = kalman_filter
- self.track_id = self.next_id()
-
- xywh = self.tlwh_to_xywh(self._tlwh)
- self.mean, self.covariance = self.kalman_filter.initiate(xywh)
-
- self.tracklet_len = 0
- self.state = TrackState.Tracked
- if frame_id == 1:
- self.is_activated = True
- self.frame_id = frame_id
- self.start_frame = frame_id
-
- # Save trajectory
- self.last_frame_id = frame_id
- self.last_observation = deepcopy(xywh)
- self.last_mean = deepcopy(self.mean)
- self.last_covariance = deepcopy(self.covariance)
-
- self.set_history_info(
- history_info=[dict(frame_id=frame_id, track_id=self.track_id), ],
- append_first=False)
-
- self.update_active_frames(frame_id)
-
- def re_activate(self, new_track, frame_id, new_id=False):
- xywh = self.tlwh_to_xywh(new_track.tlwh)
- self.mean, self.covariance = self.kalman_filter.update(self.mean,
- self.covariance, xywh,
- new_track.score)
- if new_track.curr_feat is not None:
- self.update_features(new_track)
- self.tracklet_len = 0
- self.state = TrackState.Tracked
- self.is_activated = True
- self.frame_id = frame_id
- if new_id:
- self.track_id = self.next_id()
- self.score = new_track.score
-
- self.update_cls(new_track.cls, new_track.score)
- if self.angle is not None:
- self.update_angle(new_track.angle, new_track.score)
-
- # Save trajectory
- self.last_frame_id = frame_id
- self.last_observation = deepcopy(xywh)
- self.last_mean = deepcopy(self.mean)
- self.last_covariance = deepcopy(self.covariance)
- self.set_history_info(
- history_info=[dict(frame_id=frame_id, track_id=self.track_id), ],
- append_first=False)
- self.update_active_frames(frame_id)
-
- def update(self, new_track, frame_id):
- """
- Update a matched track
- :type new_track: Tracklet
- :type frame_id: int
- :type update_feature: bool
- :return:
- """
- self.frame_id = frame_id
- self.tracklet_len += 1
-
- new_xywh = self.tlwh_to_xywh(new_track.tlwh)
- self.mean, self.covariance = self.kalman_filter.update(
- self.mean, self.covariance, new_xywh, new_track.score)
-
- # update reid feature
- if new_track.curr_feat is not None:
- self.update_features(new_track)
-
- self.state = TrackState.Tracked
- self.is_activated = True
-
- self.score = new_track.score
- self.update_cls(new_track.cls, new_track.score)
-
- if self.angle is not None:
- self.update_angle(new_track.angle, new_track.score)
-
- # Save trajectory
- self.last_frame_id = frame_id
- self.last_observation = deepcopy(new_xywh)
- self.last_mean = deepcopy(self.mean)
- self.last_covariance = deepcopy(self.covariance)
- if self.is_activated:
- self.set_history_info(
- history_info=[
- dict(frame_id=frame_id, track_id=self.track_id), ],
- append_first=False)
- self.update_active_frames(frame_id)
-
- def smooth_update(self, new_track, frame_id):
- '''
- As introduced in OC-SORT
- '''
- xywh = self.tlwh_to_xywh(new_track.tlwh)
- # Interpolate update
- if self.last_frame_id is not None:
- num_missing_steps = frame_id - self.last_frame_id
- if num_missing_steps > 1:
- delta = (xywh - self.last_observation)/num_missing_steps
- interpolate_tracks = [self.last_observation +
- i*delta for i in range(1, num_missing_steps)]
- mean_i, covariance_i = self.last_mean, self.last_covariance
- for new_xywh in interpolate_tracks:
- # Predict
- mean_i, covariance_i = self.kalman_filter.predict(
- mean_i, covariance_i)
- # Update
- mean_i, covariance_i = self.kalman_filter.update(
- mean_i, covariance_i, new_xywh, new_track.score)
- # the prediction step before last update
- self.mean, self.covariance = self.kalman_filter.predict(
- mean_i, covariance_i)
-
- # Normal update
- self.update(new_track, frame_id)
-
- @property
- def velocity(self):
- vx = self.mean[4] # horizontal velocity
- vy = self.mean[5] # vertical velocity
- vh = self.mean[7] # height velocity
- return [vx, vy, vh]
-
- @property
- def vel_dir(self):
- if self.trajectory is None:
- dist = self.mean[4:6] # velocity
- else:
- p2 = self.mean[:2]
- p1 = self.trajectory[:2] # (x,y,w,h,frame_id)
- dist = p2-p1
- norm = np.linalg.norm(dist) + 1e-6
- return dist/norm
-
- def common_active_frames(self, track):
- lst3 = [value for value in self.active_frames if value in track.active_frames]
- return len(lst3) > 0
-
- def set_history_info(self, history_info, append_first=False):
- if append_first:
- self.history_info = history_info + self.history_info
- else:
- self.history_info = self.history_info + history_info
-
- def get_history_info(self):
- return self.history_info
diff --git a/spaces/danterivers/music-generation-samples/audiocraft/data/audio.py b/spaces/danterivers/music-generation-samples/audiocraft/data/audio.py
deleted file mode 100644
index 1829d7db4ef832ad65598b471caa7d256a06d012..0000000000000000000000000000000000000000
--- a/spaces/danterivers/music-generation-samples/audiocraft/data/audio.py
+++ /dev/null
@@ -1,213 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-"""
-Audio IO methods are defined in this module (info, read, write),
-We rely on av library for faster read when possible, otherwise on torchaudio.
-"""
-
-from dataclasses import dataclass
-from pathlib import Path
-import logging
-import typing as tp
-
-import numpy as np
-import soundfile
-import torch
-from torch.nn import functional as F
-import torchaudio as ta
-
-import av
-
-from .audio_utils import f32_pcm, i16_pcm, normalize_audio
-
-
-_av_initialized = False
-
-
-def _init_av():
- global _av_initialized
- if _av_initialized:
- return
- logger = logging.getLogger('libav.mp3')
- logger.setLevel(logging.ERROR)
- _av_initialized = True
-
-
-@dataclass(frozen=True)
-class AudioFileInfo:
- sample_rate: int
- duration: float
- channels: int
-
-
-def _av_info(filepath: tp.Union[str, Path]) -> AudioFileInfo:
- _init_av()
- with av.open(str(filepath)) as af:
- stream = af.streams.audio[0]
- sample_rate = stream.codec_context.sample_rate
- duration = float(stream.duration * stream.time_base)
- channels = stream.channels
- return AudioFileInfo(sample_rate, duration, channels)
-
-
-def _soundfile_info(filepath: tp.Union[str, Path]) -> AudioFileInfo:
- info = soundfile.info(filepath)
- return AudioFileInfo(info.samplerate, info.duration, info.channels)
-
-
-def audio_info(filepath: tp.Union[str, Path]) -> AudioFileInfo:
- # torchaudio no longer returns useful duration informations for some formats like mp3s.
- filepath = Path(filepath)
- if filepath.suffix in ['.flac', '.ogg']: # TODO: Validate .ogg can be safely read with av_info
- # ffmpeg has some weird issue with flac.
- return _soundfile_info(filepath)
- else:
- return _av_info(filepath)
-
-
-def _av_read(filepath: tp.Union[str, Path], seek_time: float = 0, duration: float = -1.) -> tp.Tuple[torch.Tensor, int]:
- """FFMPEG-based audio file reading using PyAV bindings.
- Soundfile cannot read mp3 and av_read is more efficient than torchaudio.
-
- Args:
- filepath (str or Path): Path to audio file to read.
- seek_time (float): Time at which to start reading in the file.
- duration (float): Duration to read from the file. If set to -1, the whole file is read.
- Returns:
- Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate
- """
- _init_av()
- with av.open(str(filepath)) as af:
- stream = af.streams.audio[0]
- sr = stream.codec_context.sample_rate
- num_frames = int(sr * duration) if duration >= 0 else -1
- frame_offset = int(sr * seek_time)
- # we need a small negative offset otherwise we get some edge artifact
- # from the mp3 decoder.
- af.seek(int(max(0, (seek_time - 0.1)) / stream.time_base), stream=stream)
- frames = []
- length = 0
- for frame in af.decode(streams=stream.index):
- current_offset = int(frame.rate * frame.pts * frame.time_base)
- strip = max(0, frame_offset - current_offset)
- buf = torch.from_numpy(frame.to_ndarray())
- if buf.shape[0] != stream.channels:
- buf = buf.view(-1, stream.channels).t()
- buf = buf[:, strip:]
- frames.append(buf)
- length += buf.shape[1]
- if num_frames > 0 and length >= num_frames:
- break
- assert frames
- # If the above assert fails, it is likely because we seeked past the end of file point,
- # in which case ffmpeg returns a single frame with only zeros, and a weird timestamp.
- # This will need proper debugging, in due time.
- wav = torch.cat(frames, dim=1)
- assert wav.shape[0] == stream.channels
- if num_frames > 0:
- wav = wav[:, :num_frames]
- return f32_pcm(wav), sr
-
-
-def audio_read(filepath: tp.Union[str, Path], seek_time: float = 0.,
- duration: float = -1., pad: bool = False) -> tp.Tuple[torch.Tensor, int]:
- """Read audio by picking the most appropriate backend tool based on the audio format.
-
- Args:
- filepath (str or Path): Path to audio file to read.
- seek_time (float): Time at which to start reading in the file.
- duration (float): Duration to read from the file. If set to -1, the whole file is read.
- pad (bool): Pad output audio if not reaching expected duration.
- Returns:
- Tuple[torch.Tensor, int]: Tuple containing audio data and sample rate.
- """
- fp = Path(filepath)
- if fp.suffix in ['.flac', '.ogg']: # TODO: check if we can safely use av_read for .ogg
- # There is some bug with ffmpeg and reading flac
- info = _soundfile_info(filepath)
- frames = -1 if duration <= 0 else int(duration * info.sample_rate)
- frame_offset = int(seek_time * info.sample_rate)
- wav, sr = soundfile.read(filepath, start=frame_offset, frames=frames, dtype=np.float32)
- assert info.sample_rate == sr, f"Mismatch of sample rates {info.sample_rate} {sr}"
- wav = torch.from_numpy(wav).t().contiguous()
- if len(wav.shape) == 1:
- wav = torch.unsqueeze(wav, 0)
- elif (
- fp.suffix in ['.wav', '.mp3'] and fp.suffix[1:] in ta.utils.sox_utils.list_read_formats()
- and duration <= 0 and seek_time == 0
- ):
- # Torchaudio is faster if we load an entire file at once.
- wav, sr = ta.load(fp)
- else:
- wav, sr = _av_read(filepath, seek_time, duration)
- if pad and duration > 0:
- expected_frames = int(duration * sr)
- wav = F.pad(wav, (0, expected_frames - wav.shape[-1]))
- return wav, sr
-
-
-def audio_write(stem_name: tp.Union[str, Path],
- wav: torch.Tensor, sample_rate: int,
- format: str = 'wav', mp3_rate: int = 320, normalize: bool = True,
- strategy: str = 'peak', peak_clip_headroom_db: float = 1,
- rms_headroom_db: float = 18, loudness_headroom_db: float = 14,
- log_clipping: bool = True, make_parent_dir: bool = True,
- add_suffix: bool = True) -> Path:
- """Convenience function for saving audio to disk. Returns the filename the audio was written to.
-
- Args:
- stem_name (str or Path): Filename without extension which will be added automatically.
- format (str): Either "wav" or "mp3".
- mp3_rate (int): kbps when using mp3s.
- normalize (bool): if `True` (default), normalizes according to the prescribed
- strategy (see after). If `False`, the strategy is only used in case clipping
- would happen.
- strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',
- i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square
- with extra headroom to avoid clipping. 'clip' just clips.
- peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.
- rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger
- than the `peak_clip` one to avoid further clipping.
- loudness_headroom_db (float): Target loudness for loudness normalization.
- log_clipping (bool): If True, basic logging on stderr when clipping still
- occurs despite strategy (only for 'rms').
- make_parent_dir (bool): Make parent directory if it doesn't exist.
- Returns:
- Path: Path of the saved audio.
- """
- assert wav.dtype.is_floating_point, "wav is not floating point"
- if wav.dim() == 1:
- wav = wav[None]
- elif wav.dim() > 2:
- raise ValueError("Input wav should be at most 2 dimension.")
- assert wav.isfinite().all()
- wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db,
- rms_headroom_db, loudness_headroom_db, log_clipping=log_clipping,
- sample_rate=sample_rate, stem_name=str(stem_name))
- kwargs: dict = {}
- if format == 'mp3':
- suffix = '.mp3'
- kwargs.update({"compression": mp3_rate})
- elif format == 'wav':
- wav = i16_pcm(wav)
- suffix = '.wav'
- kwargs.update({"encoding": "PCM_S", "bits_per_sample": 16})
- else:
- raise RuntimeError(f"Invalid format {format}. Only wav or mp3 are supported.")
- if not add_suffix:
- suffix = ''
- path = Path(str(stem_name) + suffix)
- if make_parent_dir:
- path.parent.mkdir(exist_ok=True, parents=True)
- try:
- ta.save(path, wav, sample_rate, **kwargs)
- except Exception:
- if path.exists():
- # we do not want to leave half written files around.
- path.unlink()
- raise
- return path
diff --git a/spaces/dataminers/dataminers/sharp_ratio.py b/spaces/dataminers/dataminers/sharp_ratio.py
deleted file mode 100644
index ac25c581256fbba994cd5944132fcc18c411ad2a..0000000000000000000000000000000000000000
--- a/spaces/dataminers/dataminers/sharp_ratio.py
+++ /dev/null
@@ -1,166 +0,0 @@
-import pandas as pd
-import numpy as np
-from datetime import datetime
-import streamlit as st
-import matplotlib.pyplot as plt
-import plotly.express as px
-#import plotly.graph_objects as go
-
-
-def cumulative_return(stocks,choices):
- symbols, weights, investing_style, benchmark, rf, A_coef,ticker = choices.values()
-
- #tkers = sorted(set(stocks['Ticker'].unique()))
- #preprocess
- #data= stocks.copy()
- #print('data cumu',data)
- #data.set_index('Date',append=True)
- #df_by_stock= data.pivot(index='Date',columns='Ticker')
- #stocks = df_by_stock['Adj. Close']
-
- #stocks = data.pivot(index="Date", columns="Ticker", values="Adj. Close")
- data_stocks = stocks.copy()
- df = stocks.copy()
- df.set_index('Date', inplace=True)
-
-
- tkers = symbols.copy()
- logRet = np.log(df/df.shift())
- log_returns = np.log(df/df.shift())
- tickers_list = symbols.copy()
- weights_list = weights.copy()
- ##
- stock_port = {}
- for e in tickers_list: stock_port[e] = 0
- # Convert Weights to Floats and Sum
- weights = [float(x) for x in weights_list]
- s = sum(weights)
- # Calc Weight Proportions
- new_weights = []
- for i in weights: new_weights.append(i/s)
- # Assign Weights to Ticker Dict
- i = 0
- for e in stock_port:
- stock_port[e] = new_weights[i]
- i += 1
-
- port = dict.fromkeys(tkers, 0)
- port.update(stock_port)
-
- portfolio_dict = port
-
- for e in portfolio_dict:
- tmp = 0
- if portfolio_dict[e] > tmp:
- tmp = portfolio_dict[e]
- tick = e
- list_ =[]
- for e in tickers_list:
- if e not in list_:
- list_.append(e)
-
- df = df[list_]
- df = df/df.iloc[0]
- df.reset_index(inplace=True)
- df=pd.DataFrame(df)
- fig = px.line(df, x='Date' ,y=df.columns[1:,])
-
-
- #layout reference = https://linuxtut.com/en/b13e3e721519c2842cc9/
- fig.update_layout(
- xaxis=dict(
- rangeselector=dict(
- buttons=list([
- dict(count=1,
- label="1m",
- step="month",
- stepmode="backward"),
- dict(count=6,
- label="6m",
- step="month",
- stepmode="backward"),
- dict(count=1,
- label="YTD",
- step="year",
- stepmode="todate"),
- dict(count=1,
- label="1y",
- step="year",
- stepmode="backward"),
- dict(step="all")
- ])
- ),
- rangeslider=dict(
- visible=True
- ),
- type="date"
- )
- )
- fig.update_layout(xaxis=dict(rangeselector = dict(font = dict( color = "black"))))
- fig.update_layout(title_text = 'Portfolio Historical Normalized Cumulative Returns',
- title_x=0.458)
- st.plotly_chart(fig, use_container_width=True)
-
-def sharp_ratio_func(df,choices):
- symbols, weights, investing_style, benchmark, rf, A_coef,ticker = choices.values()
-
- #tkers = sorted(set(stocks['Ticker'].unique()))
- #preprocess
- #data= stocks.copy()
- #df_by_stock= data.pivot(index='Date',columns='Ticker')
- #stocks = df_by_stock['Adj. Close']
-
- #stocks = data.pivot(index="Date", columns="Ticker", values="Adj. Close")
- stocks = df.copy()
- stocks.set_index('Date', inplace=True)
- tkers = stocks.columns
- tickers_list = symbols.copy()
- weights_list = weights.copy()
-
- stock_port = {}
- for e in tickers_list: stock_port[e] = 0
- # Convert Weights to Floats and Sum
- weights = [float(x) for x in weights_list]
- s = sum(weights)
- # Calc Weight Proportions
- new_weights = []
- for i in weights: new_weights.append(i/s)
- # Assign Weights to Ticker Dict
- i = 0
- for e in stock_port:
- stock_port[e] = new_weights[i]
- i += 1
-
- port = dict.fromkeys(tkers, 0)
- port.update(stock_port)
-
- portfolio_dict = port
-
- sharp_ratio_list = []
- for ticker in symbols:
- logRet = np.log(stocks/stocks.shift())
- stk = dict.fromkeys(tkers, 0)
- stkTicker = {ticker:1}
- stk.update(stkTicker)
- ttlStk = np.sum(logRet*stk, axis=1)
- stock_sharpe_ratio = ttlStk.mean() / ttlStk.std()
- sharp_ratio_list.append(stock_sharpe_ratio)
-
- sharp_ratio = {'Assets': symbols, 'Sharpe Ratio': sharp_ratio_list}
-
- # Portfolio sharp Ratio Calculation
- logRet = np.log(stocks/stocks.shift())
- portfolio = dict.fromkeys(tkers, 0)
- portfolio.update(portfolio_dict)
- totalPortfolio = np.sum(logRet*portfolio, axis=1)
- portfolio_sharpe_ratio = totalPortfolio.mean() / totalPortfolio.std()
-
- sharp_ratio['Assets'].append('Portfolio')
- sharp_ratio['Sharpe Ratio'].append(portfolio_sharpe_ratio)
-
- fig = px.bar(sharp_ratio, x='Assets', y="Sharpe Ratio",color='Assets')
- fig.update_layout(title_text = 'Sharpe Ratio of the Assets and Portfolio',
- title_x=0.458)
-
- st.plotly_chart(fig, use_container_width=True)
-
diff --git a/spaces/davidefiocco/zeroshotcat/app.py b/spaces/davidefiocco/zeroshotcat/app.py
deleted file mode 100644
index a0dda13ae4dbcb06e154f8f577d81ff1c06f37ee..0000000000000000000000000000000000000000
--- a/spaces/davidefiocco/zeroshotcat/app.py
+++ /dev/null
@@ -1,76 +0,0 @@
-from io import BytesIO
-
-import pandas as pd
-import streamlit as st
-import tokenizers
-import torch
-from transformers import Pipeline, pipeline
-
-st.set_page_config(
- page_title="Zero-shot classification from tabular data",
- page_icon=None,
- layout="wide",
- initial_sidebar_state="auto",
- menu_items=None,
-)
-
-
-@st.cache(
- hash_funcs={
- torch.nn.parameter.Parameter: lambda _: None,
- tokenizers.Tokenizer: lambda _: None,
- tokenizers.AddedToken: lambda _: None,
- },
- allow_output_mutation=True,
- show_spinner=False,
-)
-def load_classifier() -> Pipeline:
- classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
- return classifier
-
-
-with st.spinner(text="Setting stuff up related to the inference engine..."):
- classifier = load_classifier()
-
-st.title("Zero-shot classification from tabular data")
-st.text(
- "Upload an Excel table and perform zero-shot classification on a set of custom labels"
-)
-
-data = st.file_uploader(
- "Upload Excel file (it should contain a column named `text` in its header):"
-)
-labels = st.text_input("Enter comma-separated labels:")
-
-# classify first N snippets only for faster inference
-N = 10000
-
-if st.button("Calculate labels"):
-
- try:
- labels_list = labels.split(",")
- table = pd.read_excel(data)
- table = table.head(N).reset_index(drop=True)
-
- prog_bar = st.progress(0)
- preds = []
-
- for i in range(len(table)):
- preds.append(classifier(table.loc[i, "text"], labels)["labels"][0])
- prog_bar.progress((i + 1) / len(table))
-
- table["label"] = preds
-
- st.table(table[["text", "label"]])
-
- buf = BytesIO()
- table[["text", "label"]].to_excel(buf)
-
- st.download_button(
- label="Download table", data=buf.getvalue(), file_name="output.xlsx"
- )
-
- except:
- st.error(
- "Something went wrong. Make sure you upload an Excel file containing a column named `text` and a set of comma-separated labels is provided"
- )
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/__init__.py
deleted file mode 100644
index 156cb232a7aa80eee1526c7598f72043de10473f..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Empty __init__.py file to signal Python this directory is a package."""
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-5e8c1776.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-5e8c1776.js
deleted file mode 100644
index a4c648a10a082118298b4df922b235d456c0d8bc..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-5e8c1776.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import{S as j,e as C,s as H,m as B,g as o,Y as g,h as d,n as S,k as v,C as E,F as h,G as b,w as k,u as w,H as M,V as L,ae as T,o as q,Q as z,R as D,E as F}from"./index-9e76ffee.js";import{B as G}from"./Button-30a08c0b.js";function Q(s){let e,a,n;return{c(){e=B("div"),o(e,"id",s[0]),o(e,"class",a="prose "+s[1].join(" ")+" svelte-1yrv54"),o(e,"data-testid","markdown"),o(e,"dir",n=s[5]?"rtl":"ltr"),g(e,"min",s[4]),g(e,"hide",!s[2])},m(t,i){d(t,e,i),e.innerHTML=s[3]},p(t,[i]){i&8&&(e.innerHTML=t[3]),i&1&&o(e,"id",t[0]),i&2&&a!==(a="prose "+t[1].join(" ")+" svelte-1yrv54")&&o(e,"class",a),i&32&&n!==(n=t[5]?"rtl":"ltr")&&o(e,"dir",n),i&18&&g(e,"min",t[4]),i&6&&g(e,"hide",!t[2])},i:S,o:S,d(t){t&&v(e)}}}function R(s,e,a){let{elem_id:n=""}=e,{elem_classes:t=[]}=e,{visible:i=!0}=e,{value:f}=e,{min_height:r=!1}=e,{rtl:l=!1}=e;const _=E();return s.$$set=m=>{"elem_id"in m&&a(0,n=m.elem_id),"elem_classes"in m&&a(1,t=m.elem_classes),"visible"in m&&a(2,i=m.visible),"value"in m&&a(3,f=m.value),"min_height"in m&&a(4,r=m.min_height),"rtl"in m&&a(5,l=m.rtl)},s.$$.update=()=>{s.$$.dirty&8&&_("change")},[n,t,i,f,r,l]}class V extends j{constructor(e){super(),C(this,e,R,Q,H,{elem_id:0,elem_classes:1,visible:2,value:3,min_height:4,rtl:5})}}function Y(s){let e,a,n,t,i;const f=[s[4],{variant:"center"}];let r={};for(let l=0;l{"label"in u&&a(6,n=u.label),"elem_id"in u&&a(0,t=u.elem_id),"elem_classes"in u&&a(1,i=u.elem_classes),"visible"in u&&a(2,f=u.visible),"value"in u&&a(3,r=u.value),"loading_status"in u&&a(4,l=u.loading_status),"rtl"in u&&a(5,_=u.rtl)},s.$$.update=()=>{s.$$.dirty&64&&m("change")},[t,i,f,r,l,_,n,c]}class J extends j{constructor(e){super(),C(this,e,I,A,H,{label:6,elem_id:0,elem_classes:1,visible:2,value:3,loading_status:4,rtl:5})}}const O=J,P=["static"];export{O as Component,P as modes};
-//# sourceMappingURL=index-5e8c1776.js.map
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/h11/_events.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/h11/_events.py
deleted file mode 100644
index 075bf8a469d44d2388b08ec3d009fe55d44cb6eb..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/h11/_events.py
+++ /dev/null
@@ -1,369 +0,0 @@
-# High level events that make up HTTP/1.1 conversations. Loosely inspired by
-# the corresponding events in hyper-h2:
-#
-# http://python-hyper.org/h2/en/stable/api.html#events
-#
-# Don't subclass these. Stuff will break.
-
-import re
-from abc import ABC
-from dataclasses import dataclass, field
-from typing import Any, cast, Dict, List, Tuple, Union
-
-from ._abnf import method, request_target
-from ._headers import Headers, normalize_and_validate
-from ._util import bytesify, LocalProtocolError, validate
-
-# Everything in __all__ gets re-exported as part of the h11 public API.
-__all__ = [
- "Event",
- "Request",
- "InformationalResponse",
- "Response",
- "Data",
- "EndOfMessage",
- "ConnectionClosed",
-]
-
-method_re = re.compile(method.encode("ascii"))
-request_target_re = re.compile(request_target.encode("ascii"))
-
-
-class Event(ABC):
- """
- Base class for h11 events.
- """
-
- __slots__ = ()
-
-
-@dataclass(init=False, frozen=True)
-class Request(Event):
- """The beginning of an HTTP request.
-
- Fields:
-
- .. attribute:: method
-
- An HTTP method, e.g. ``b"GET"`` or ``b"POST"``. Always a byte
- string. :term:`Bytes-like objects ` and native
- strings containing only ascii characters will be automatically
- converted to byte strings.
-
- .. attribute:: target
-
- The target of an HTTP request, e.g. ``b"/index.html"``, or one of the
- more exotic formats described in `RFC 7320, section 5.3
- `_. Always a byte
- string. :term:`Bytes-like objects ` and native
- strings containing only ascii characters will be automatically
- converted to byte strings.
-
- .. attribute:: headers
-
- Request headers, represented as a list of (name, value) pairs. See
- :ref:`the header normalization rules ` for details.
-
- .. attribute:: http_version
-
- The HTTP protocol version, represented as a byte string like
- ``b"1.1"``. See :ref:`the HTTP version normalization rules
- ` for details.
-
- """
-
- __slots__ = ("method", "headers", "target", "http_version")
-
- method: bytes
- headers: Headers
- target: bytes
- http_version: bytes
-
- def __init__(
- self,
- *,
- method: Union[bytes, str],
- headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]],
- target: Union[bytes, str],
- http_version: Union[bytes, str] = b"1.1",
- _parsed: bool = False,
- ) -> None:
- super().__init__()
- if isinstance(headers, Headers):
- object.__setattr__(self, "headers", headers)
- else:
- object.__setattr__(
- self, "headers", normalize_and_validate(headers, _parsed=_parsed)
- )
- if not _parsed:
- object.__setattr__(self, "method", bytesify(method))
- object.__setattr__(self, "target", bytesify(target))
- object.__setattr__(self, "http_version", bytesify(http_version))
- else:
- object.__setattr__(self, "method", method)
- object.__setattr__(self, "target", target)
- object.__setattr__(self, "http_version", http_version)
-
- # "A server MUST respond with a 400 (Bad Request) status code to any
- # HTTP/1.1 request message that lacks a Host header field and to any
- # request message that contains more than one Host header field or a
- # Host header field with an invalid field-value."
- # -- https://tools.ietf.org/html/rfc7230#section-5.4
- host_count = 0
- for name, value in self.headers:
- if name == b"host":
- host_count += 1
- if self.http_version == b"1.1" and host_count == 0:
- raise LocalProtocolError("Missing mandatory Host: header")
- if host_count > 1:
- raise LocalProtocolError("Found multiple Host: headers")
-
- validate(method_re, self.method, "Illegal method characters")
- validate(request_target_re, self.target, "Illegal target characters")
-
- # This is an unhashable type.
- __hash__ = None # type: ignore
-
-
-@dataclass(init=False, frozen=True)
-class _ResponseBase(Event):
- __slots__ = ("headers", "http_version", "reason", "status_code")
-
- headers: Headers
- http_version: bytes
- reason: bytes
- status_code: int
-
- def __init__(
- self,
- *,
- headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]],
- status_code: int,
- http_version: Union[bytes, str] = b"1.1",
- reason: Union[bytes, str] = b"",
- _parsed: bool = False,
- ) -> None:
- super().__init__()
- if isinstance(headers, Headers):
- object.__setattr__(self, "headers", headers)
- else:
- object.__setattr__(
- self, "headers", normalize_and_validate(headers, _parsed=_parsed)
- )
- if not _parsed:
- object.__setattr__(self, "reason", bytesify(reason))
- object.__setattr__(self, "http_version", bytesify(http_version))
- if not isinstance(status_code, int):
- raise LocalProtocolError("status code must be integer")
- # Because IntEnum objects are instances of int, but aren't
- # duck-compatible (sigh), see gh-72.
- object.__setattr__(self, "status_code", int(status_code))
- else:
- object.__setattr__(self, "reason", reason)
- object.__setattr__(self, "http_version", http_version)
- object.__setattr__(self, "status_code", status_code)
-
- self.__post_init__()
-
- def __post_init__(self) -> None:
- pass
-
- # This is an unhashable type.
- __hash__ = None # type: ignore
-
-
-@dataclass(init=False, frozen=True)
-class InformationalResponse(_ResponseBase):
- """An HTTP informational response.
-
- Fields:
-
- .. attribute:: status_code
-
- The status code of this response, as an integer. For an
- :class:`InformationalResponse`, this is always in the range [100,
- 200).
-
- .. attribute:: headers
-
- Request headers, represented as a list of (name, value) pairs. See
- :ref:`the header normalization rules ` for
- details.
-
- .. attribute:: http_version
-
- The HTTP protocol version, represented as a byte string like
- ``b"1.1"``. See :ref:`the HTTP version normalization rules
- ` for details.
-
- .. attribute:: reason
-
- The reason phrase of this response, as a byte string. For example:
- ``b"OK"``, or ``b"Not Found"``.
-
- """
-
- def __post_init__(self) -> None:
- if not (100 <= self.status_code < 200):
- raise LocalProtocolError(
- "InformationalResponse status_code should be in range "
- "[100, 200), not {}".format(self.status_code)
- )
-
- # This is an unhashable type.
- __hash__ = None # type: ignore
-
-
-@dataclass(init=False, frozen=True)
-class Response(_ResponseBase):
- """The beginning of an HTTP response.
-
- Fields:
-
- .. attribute:: status_code
-
- The status code of this response, as an integer. For an
- :class:`Response`, this is always in the range [200,
- 1000).
-
- .. attribute:: headers
-
- Request headers, represented as a list of (name, value) pairs. See
- :ref:`the header normalization rules ` for details.
-
- .. attribute:: http_version
-
- The HTTP protocol version, represented as a byte string like
- ``b"1.1"``. See :ref:`the HTTP version normalization rules
- ` for details.
-
- .. attribute:: reason
-
- The reason phrase of this response, as a byte string. For example:
- ``b"OK"``, or ``b"Not Found"``.
-
- """
-
- def __post_init__(self) -> None:
- if not (200 <= self.status_code < 1000):
- raise LocalProtocolError(
- "Response status_code should be in range [200, 1000), not {}".format(
- self.status_code
- )
- )
-
- # This is an unhashable type.
- __hash__ = None # type: ignore
-
-
-@dataclass(init=False, frozen=True)
-class Data(Event):
- """Part of an HTTP message body.
-
- Fields:
-
- .. attribute:: data
-
- A :term:`bytes-like object` containing part of a message body. Or, if
- using the ``combine=False`` argument to :meth:`Connection.send`, then
- any object that your socket writing code knows what to do with, and for
- which calling :func:`len` returns the number of bytes that will be
- written -- see :ref:`sendfile` for details.
-
- .. attribute:: chunk_start
-
- A marker that indicates whether this data object is from the start of a
- chunked transfer encoding chunk. This field is ignored when when a Data
- event is provided to :meth:`Connection.send`: it is only valid on
- events emitted from :meth:`Connection.next_event`. You probably
- shouldn't use this attribute at all; see
- :ref:`chunk-delimiters-are-bad` for details.
-
- .. attribute:: chunk_end
-
- A marker that indicates whether this data object is the last for a
- given chunked transfer encoding chunk. This field is ignored when when
- a Data event is provided to :meth:`Connection.send`: it is only valid
- on events emitted from :meth:`Connection.next_event`. You probably
- shouldn't use this attribute at all; see
- :ref:`chunk-delimiters-are-bad` for details.
-
- """
-
- __slots__ = ("data", "chunk_start", "chunk_end")
-
- data: bytes
- chunk_start: bool
- chunk_end: bool
-
- def __init__(
- self, data: bytes, chunk_start: bool = False, chunk_end: bool = False
- ) -> None:
- object.__setattr__(self, "data", data)
- object.__setattr__(self, "chunk_start", chunk_start)
- object.__setattr__(self, "chunk_end", chunk_end)
-
- # This is an unhashable type.
- __hash__ = None # type: ignore
-
-
-# XX FIXME: "A recipient MUST ignore (or consider as an error) any fields that
-# are forbidden to be sent in a trailer, since processing them as if they were
-# present in the header section might bypass external security filters."
-# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#chunked.trailer.part
-# Unfortunately, the list of forbidden fields is long and vague :-/
-@dataclass(init=False, frozen=True)
-class EndOfMessage(Event):
- """The end of an HTTP message.
-
- Fields:
-
- .. attribute:: headers
-
- Default value: ``[]``
-
- Any trailing headers attached to this message, represented as a list of
- (name, value) pairs. See :ref:`the header normalization rules
- ` for details.
-
- Must be empty unless ``Transfer-Encoding: chunked`` is in use.
-
- """
-
- __slots__ = ("headers",)
-
- headers: Headers
-
- def __init__(
- self,
- *,
- headers: Union[
- Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]], None
- ] = None,
- _parsed: bool = False,
- ) -> None:
- super().__init__()
- if headers is None:
- headers = Headers([])
- elif not isinstance(headers, Headers):
- headers = normalize_and_validate(headers, _parsed=_parsed)
-
- object.__setattr__(self, "headers", headers)
-
- # This is an unhashable type.
- __hash__ = None # type: ignore
-
-
-@dataclass(frozen=True)
-class ConnectionClosed(Event):
- """This event indicates that the sender has closed their outgoing
- connection.
-
- Note that this does not necessarily mean that they can't *receive* further
- data, because TCP connections are composed to two one-way channels which
- can be closed independently. See :ref:`closing` for details.
-
- No fields.
- """
-
- pass
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/importlib_resources/tests/_compat.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/importlib_resources/tests/_compat.py
deleted file mode 100644
index e7bf06dd4eb5c1c65255df6262fec8332bdc9e2a..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/importlib_resources/tests/_compat.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import os
-
-
-try:
- from test.support import import_helper # type: ignore
-except ImportError:
- # Python 3.9 and earlier
- class import_helper: # type: ignore
- from test.support import (
- modules_setup,
- modules_cleanup,
- DirsOnSysPath,
- CleanImport,
- )
-
-
-try:
- from test.support import os_helper # type: ignore
-except ImportError:
- # Python 3.9 compat
- class os_helper: # type:ignore
- from test.support import temp_dir
-
-
-try:
- # Python 3.10
- from test.support.os_helper import unlink
-except ImportError:
- from test.support import unlink as _unlink
-
- def unlink(target):
- return _unlink(os.fspath(target))
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/jsonschema/benchmarks/issue232.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/jsonschema/benchmarks/issue232.py
deleted file mode 100644
index efd07154822e4b0609900482eb26636fc3c100eb..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/jsonschema/benchmarks/issue232.py
+++ /dev/null
@@ -1,25 +0,0 @@
-"""
-A performance benchmark using the example from issue #232.
-
-See https://github.com/python-jsonschema/jsonschema/pull/232.
-"""
-from pathlib import Path
-
-from pyperf import Runner
-from referencing import Registry
-
-from jsonschema.tests._suite import Version
-import jsonschema
-
-issue232 = Version(
- path=Path(__file__).parent / "issue232",
- remotes=Registry(),
- name="issue232",
-)
-
-
-if __name__ == "__main__":
- issue232.benchmark(
- runner=Runner(),
- Validator=jsonschema.Draft4Validator,
- )
diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/commands/diffusers_cli.py b/spaces/declare-lab/tango/diffusers/src/diffusers/commands/diffusers_cli.py
deleted file mode 100644
index 74ad29a786d7f77e982242d7020170cb4d031c41..0000000000000000000000000000000000000000
--- a/spaces/declare-lab/tango/diffusers/src/diffusers/commands/diffusers_cli.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2023 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from argparse import ArgumentParser
-
-from .env import EnvironmentCommand
-
-
-def main():
- parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli []")
- commands_parser = parser.add_subparsers(help="diffusers-cli command helpers")
-
- # Register commands
- EnvironmentCommand.register_subcommand(commands_parser)
-
- # Let's go
- args = parser.parse_args()
-
- if not hasattr(args, "func"):
- parser.print_help()
- exit(1)
-
- # Run
- service = args.func(args)
- service.run()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py b/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py
deleted file mode 100644
index 2b47184d7773295a5701f8e80f89c64e721e8070..0000000000000000000000000000000000000000
--- a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py
+++ /dev/null
@@ -1,427 +0,0 @@
-# Copyright 2023 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import inspect
-from typing import Callable, List, Optional, Union
-
-import numpy as np
-import PIL
-import torch
-import torch.utils.checkpoint
-from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
-
-from ...models import AutoencoderKL, UNet2DConditionModel
-from ...schedulers import KarrasDiffusionSchedulers
-from ...utils import is_accelerate_available, logging, randn_tensor
-from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
-
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-
-class VersatileDiffusionImageVariationPipeline(DiffusionPipeline):
- r"""
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
-
- Parameters:
- vqvae ([`VQModel`]):
- Vector-quantized (VQ) Model to encode and decode images to and from latent representations.
- bert ([`LDMBertModel`]):
- Text-encoder model based on [BERT](https://huggingface.co/docs/transformers/model_doc/bert) architecture.
- tokenizer (`transformers.BertTokenizer`):
- Tokenizer of class
- [BertTokenizer](https://huggingface.co/docs/transformers/model_doc/bert#transformers.BertTokenizer).
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
- scheduler ([`SchedulerMixin`]):
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
- """
- image_feature_extractor: CLIPImageProcessor
- image_encoder: CLIPVisionModelWithProjection
- image_unet: UNet2DConditionModel
- vae: AutoencoderKL
- scheduler: KarrasDiffusionSchedulers
-
- def __init__(
- self,
- image_feature_extractor: CLIPImageProcessor,
- image_encoder: CLIPVisionModelWithProjection,
- image_unet: UNet2DConditionModel,
- vae: AutoencoderKL,
- scheduler: KarrasDiffusionSchedulers,
- ):
- super().__init__()
- self.register_modules(
- image_feature_extractor=image_feature_extractor,
- image_encoder=image_encoder,
- image_unet=image_unet,
- vae=vae,
- scheduler=scheduler,
- )
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
-
- def enable_sequential_cpu_offload(self, gpu_id=0):
- r"""
- Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
- text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
- `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
- """
- if is_accelerate_available():
- from accelerate import cpu_offload
- else:
- raise ImportError("Please install accelerate via `pip install accelerate`")
-
- device = torch.device(f"cuda:{gpu_id}")
-
- for cpu_offloaded_model in [self.image_unet, self.text_unet, self.text_encoder, self.vae]:
- if cpu_offloaded_model is not None:
- cpu_offload(cpu_offloaded_model, device)
-
- @property
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device with unet->image_unet
- def _execution_device(self):
- r"""
- Returns the device on which the pipeline's models will be executed. After calling
- `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
- hooks.
- """
- if not hasattr(self.image_unet, "_hf_hook"):
- return self.device
- for module in self.image_unet.modules():
- if (
- hasattr(module, "_hf_hook")
- and hasattr(module._hf_hook, "execution_device")
- and module._hf_hook.execution_device is not None
- ):
- return torch.device(module._hf_hook.execution_device)
- return self.device
-
- def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
- r"""
- Encodes the prompt into text encoder hidden states.
-
- Args:
- prompt (`str` or `List[str]`):
- prompt to be encoded
- device: (`torch.device`):
- torch device
- num_images_per_prompt (`int`):
- number of images that should be generated per prompt
- do_classifier_free_guidance (`bool`):
- whether to use classifier free guidance or not
- negative_prompt (`str` or `List[str]`):
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
- if `guidance_scale` is less than `1`).
- """
-
- def normalize_embeddings(encoder_output):
- embeds = self.image_encoder.vision_model.post_layernorm(encoder_output.last_hidden_state)
- embeds = self.image_encoder.visual_projection(embeds)
- embeds_pooled = embeds[:, 0:1]
- embeds = embeds / torch.norm(embeds_pooled, dim=-1, keepdim=True)
- return embeds
-
- if isinstance(prompt, torch.Tensor) and len(prompt.shape) == 4:
- prompt = list(prompt)
-
- batch_size = len(prompt) if isinstance(prompt, list) else 1
-
- # get prompt text embeddings
- image_input = self.image_feature_extractor(images=prompt, return_tensors="pt")
- pixel_values = image_input.pixel_values.to(device).to(self.image_encoder.dtype)
- image_embeddings = self.image_encoder(pixel_values)
- image_embeddings = normalize_embeddings(image_embeddings)
-
- # duplicate image embeddings for each generation per prompt, using mps friendly method
- bs_embed, seq_len, _ = image_embeddings.shape
- image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1)
- image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
-
- # get unconditional embeddings for classifier free guidance
- if do_classifier_free_guidance:
- uncond_images: List[str]
- if negative_prompt is None:
- uncond_images = [np.zeros((512, 512, 3)) + 0.5] * batch_size
- elif type(prompt) is not type(negative_prompt):
- raise TypeError(
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
- f" {type(prompt)}."
- )
- elif isinstance(negative_prompt, PIL.Image.Image):
- uncond_images = [negative_prompt]
- elif batch_size != len(negative_prompt):
- raise ValueError(
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
- " the batch size of `prompt`."
- )
- else:
- uncond_images = negative_prompt
-
- uncond_images = self.image_feature_extractor(images=uncond_images, return_tensors="pt")
- pixel_values = uncond_images.pixel_values.to(device).to(self.image_encoder.dtype)
- negative_prompt_embeds = self.image_encoder(pixel_values)
- negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds)
-
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
- seq_len = negative_prompt_embeds.shape[1]
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and conditional embeddings into a single batch
- # to avoid doing two forward passes
- image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings])
-
- return image_embeddings
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
- def decode_latents(self, latents):
- latents = 1 / self.vae.config.scaling_factor * latents
- image = self.vae.decode(latents).sample
- image = (image / 2 + 0.5).clamp(0, 1)
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
- return image
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
- def prepare_extra_step_kwargs(self, generator, eta):
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
- # and should be between [0, 1]
-
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
- extra_step_kwargs = {}
- if accepts_eta:
- extra_step_kwargs["eta"] = eta
-
- # check if the scheduler accepts generator
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
- if accepts_generator:
- extra_step_kwargs["generator"] = generator
- return extra_step_kwargs
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_image_variation.StableDiffusionImageVariationPipeline.check_inputs
- def check_inputs(self, image, height, width, callback_steps):
- if (
- not isinstance(image, torch.Tensor)
- and not isinstance(image, PIL.Image.Image)
- and not isinstance(image, list)
- ):
- raise ValueError(
- "`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is"
- f" {type(image)}"
- )
-
- if height % 8 != 0 or width % 8 != 0:
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
-
- if (callback_steps is None) or (
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
- ):
- raise ValueError(
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
- f" {type(callback_steps)}."
- )
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
- if isinstance(generator, list) and len(generator) != batch_size:
- raise ValueError(
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
- )
-
- if latents is None:
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
- else:
- latents = latents.to(device)
-
- # scale the initial noise by the standard deviation required by the scheduler
- latents = latents * self.scheduler.init_noise_sigma
- return latents
-
- @torch.no_grad()
- def __call__(
- self,
- image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.Tensor],
- height: Optional[int] = None,
- width: Optional[int] = None,
- num_inference_steps: int = 50,
- guidance_scale: float = 7.5,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: Optional[int] = 1,
- eta: float = 0.0,
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
- latents: Optional[torch.FloatTensor] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
- callback_steps: int = 1,
- **kwargs,
- ):
- r"""
- Function invoked when calling the pipeline for generation.
-
- Args:
- image (`PIL.Image.Image`, `List[PIL.Image.Image]` or `torch.Tensor`):
- The image prompt or prompts to guide the image generation.
- height (`int`, *optional*, defaults to self.image_unet.config.sample_size * self.vae_scale_factor):
- The height in pixels of the generated image.
- width (`int`, *optional*, defaults to self.image_unet.config.sample_size * self.vae_scale_factor):
- The width in pixels of the generated image.
- num_inference_steps (`int`, *optional*, defaults to 50):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- guidance_scale (`float`, *optional*, defaults to 7.5):
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
- usually at the expense of lower image quality.
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
- if `guidance_scale` is less than `1`).
- num_images_per_prompt (`int`, *optional*, defaults to 1):
- The number of images to generate per prompt.
- eta (`float`, *optional*, defaults to 0.0):
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
- [`schedulers.DDIMScheduler`], will be ignored for others.
- generator (`torch.Generator`, *optional*):
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
- to make generation deterministic.
- latents (`torch.FloatTensor`, *optional*):
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
- tensor will ge generated by sampling using the supplied random `generator`.
- output_type (`str`, *optional*, defaults to `"pil"`):
- The output format of the generate image. Choose between
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
- plain tuple.
- callback (`Callable`, *optional*):
- A function that will be called every `callback_steps` steps during inference. The function will be
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
- callback_steps (`int`, *optional*, defaults to 1):
- The frequency at which the `callback` function will be called. If not specified, the callback will be
- called at every step.
-
- Examples:
-
- ```py
- >>> from diffusers import VersatileDiffusionImageVariationPipeline
- >>> import torch
- >>> import requests
- >>> from io import BytesIO
- >>> from PIL import Image
-
- >>> # let's download an initial image
- >>> url = "https://huggingface.co/datasets/diffusers/images/resolve/main/benz.jpg"
-
- >>> response = requests.get(url)
- >>> image = Image.open(BytesIO(response.content)).convert("RGB")
-
- >>> pipe = VersatileDiffusionImageVariationPipeline.from_pretrained(
- ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16
- ... )
- >>> pipe = pipe.to("cuda")
-
- >>> generator = torch.Generator(device="cuda").manual_seed(0)
- >>> image = pipe(image, generator=generator).images[0]
- >>> image.save("./car_variation.png")
- ```
-
- Returns:
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
- When returning a tuple, the first element is a list with the generated images, and the second element is a
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
- (nsfw) content, according to the `safety_checker`.
- """
- # 0. Default height and width to unet
- height = height or self.image_unet.config.sample_size * self.vae_scale_factor
- width = width or self.image_unet.config.sample_size * self.vae_scale_factor
-
- # 1. Check inputs. Raise error if not correct
- self.check_inputs(image, height, width, callback_steps)
-
- # 2. Define call parameters
- batch_size = 1 if isinstance(image, PIL.Image.Image) else len(image)
- device = self._execution_device
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
- # corresponds to doing no classifier free guidance.
- do_classifier_free_guidance = guidance_scale > 1.0
-
- # 3. Encode input prompt
- image_embeddings = self._encode_prompt(
- image, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
- )
-
- # 4. Prepare timesteps
- self.scheduler.set_timesteps(num_inference_steps, device=device)
- timesteps = self.scheduler.timesteps
-
- # 5. Prepare latent variables
- num_channels_latents = self.image_unet.in_channels
- latents = self.prepare_latents(
- batch_size * num_images_per_prompt,
- num_channels_latents,
- height,
- width,
- image_embeddings.dtype,
- device,
- generator,
- latents,
- )
-
- # 6. Prepare extra step kwargs.
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
-
- # 7. Denoising loop
- for i, t in enumerate(self.progress_bar(timesteps)):
- # expand the latents if we are doing classifier free guidance
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
-
- # predict the noise residual
- noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample
-
- # perform guidance
- if do_classifier_free_guidance:
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
-
- # compute the previous noisy sample x_t -> x_t-1
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
-
- # call the callback, if provided
- if callback is not None and i % callback_steps == 0:
- callback(i, t, latents)
-
- # 8. Post-processing
- image = self.decode_latents(latents)
-
- # 9. Convert to PIL
- if output_type == "pil":
- image = self.numpy_to_pil(image)
-
- if not return_dict:
- return (image,)
-
- return ImagePipelineOutput(images=image)
diff --git a/spaces/declare-lab/tango/diffusers/tests/pipelines/test_pipeline_utils.py b/spaces/declare-lab/tango/diffusers/tests/pipelines/test_pipeline_utils.py
deleted file mode 100644
index 51d987d8bb1151862f910822eb2c173ce4ff313c..0000000000000000000000000000000000000000
--- a/spaces/declare-lab/tango/diffusers/tests/pipelines/test_pipeline_utils.py
+++ /dev/null
@@ -1,134 +0,0 @@
-import unittest
-
-from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
-
-
-class IsSafetensorsCompatibleTests(unittest.TestCase):
- def test_all_is_compatible(self):
- filenames = [
- "safety_checker/pytorch_model.bin",
- "safety_checker/model.safetensors",
- "vae/diffusion_pytorch_model.bin",
- "vae/diffusion_pytorch_model.safetensors",
- "text_encoder/pytorch_model.bin",
- "text_encoder/model.safetensors",
- "unet/diffusion_pytorch_model.bin",
- "unet/diffusion_pytorch_model.safetensors",
- ]
- self.assertTrue(is_safetensors_compatible(filenames))
-
- def test_diffusers_model_is_compatible(self):
- filenames = [
- "unet/diffusion_pytorch_model.bin",
- "unet/diffusion_pytorch_model.safetensors",
- ]
- self.assertTrue(is_safetensors_compatible(filenames))
-
- def test_diffusers_model_is_not_compatible(self):
- filenames = [
- "safety_checker/pytorch_model.bin",
- "safety_checker/model.safetensors",
- "vae/diffusion_pytorch_model.bin",
- "vae/diffusion_pytorch_model.safetensors",
- "text_encoder/pytorch_model.bin",
- "text_encoder/model.safetensors",
- "unet/diffusion_pytorch_model.bin",
- # Removed: 'unet/diffusion_pytorch_model.safetensors',
- ]
- self.assertFalse(is_safetensors_compatible(filenames))
-
- def test_transformer_model_is_compatible(self):
- filenames = [
- "text_encoder/pytorch_model.bin",
- "text_encoder/model.safetensors",
- ]
- self.assertTrue(is_safetensors_compatible(filenames))
-
- def test_transformer_model_is_not_compatible(self):
- filenames = [
- "safety_checker/pytorch_model.bin",
- "safety_checker/model.safetensors",
- "vae/diffusion_pytorch_model.bin",
- "vae/diffusion_pytorch_model.safetensors",
- "text_encoder/pytorch_model.bin",
- # Removed: 'text_encoder/model.safetensors',
- "unet/diffusion_pytorch_model.bin",
- "unet/diffusion_pytorch_model.safetensors",
- ]
- self.assertFalse(is_safetensors_compatible(filenames))
-
- def test_all_is_compatible_variant(self):
- filenames = [
- "safety_checker/pytorch_model.fp16.bin",
- "safety_checker/model.fp16.safetensors",
- "vae/diffusion_pytorch_model.fp16.bin",
- "vae/diffusion_pytorch_model.fp16.safetensors",
- "text_encoder/pytorch_model.fp16.bin",
- "text_encoder/model.fp16.safetensors",
- "unet/diffusion_pytorch_model.fp16.bin",
- "unet/diffusion_pytorch_model.fp16.safetensors",
- ]
- variant = "fp16"
- self.assertTrue(is_safetensors_compatible(filenames, variant=variant))
-
- def test_diffusers_model_is_compatible_variant(self):
- filenames = [
- "unet/diffusion_pytorch_model.fp16.bin",
- "unet/diffusion_pytorch_model.fp16.safetensors",
- ]
- variant = "fp16"
- self.assertTrue(is_safetensors_compatible(filenames, variant=variant))
-
- def test_diffusers_model_is_compatible_variant_partial(self):
- # pass variant but use the non-variant filenames
- filenames = [
- "unet/diffusion_pytorch_model.bin",
- "unet/diffusion_pytorch_model.safetensors",
- ]
- variant = "fp16"
- self.assertTrue(is_safetensors_compatible(filenames, variant=variant))
-
- def test_diffusers_model_is_not_compatible_variant(self):
- filenames = [
- "safety_checker/pytorch_model.fp16.bin",
- "safety_checker/model.fp16.safetensors",
- "vae/diffusion_pytorch_model.fp16.bin",
- "vae/diffusion_pytorch_model.fp16.safetensors",
- "text_encoder/pytorch_model.fp16.bin",
- "text_encoder/model.fp16.safetensors",
- "unet/diffusion_pytorch_model.fp16.bin",
- # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
- ]
- variant = "fp16"
- self.assertFalse(is_safetensors_compatible(filenames, variant=variant))
-
- def test_transformer_model_is_compatible_variant(self):
- filenames = [
- "text_encoder/pytorch_model.fp16.bin",
- "text_encoder/model.fp16.safetensors",
- ]
- variant = "fp16"
- self.assertTrue(is_safetensors_compatible(filenames, variant=variant))
-
- def test_transformer_model_is_compatible_variant_partial(self):
- # pass variant but use the non-variant filenames
- filenames = [
- "text_encoder/pytorch_model.bin",
- "text_encoder/model.safetensors",
- ]
- variant = "fp16"
- self.assertTrue(is_safetensors_compatible(filenames, variant=variant))
-
- def test_transformer_model_is_not_compatible_variant(self):
- filenames = [
- "safety_checker/pytorch_model.fp16.bin",
- "safety_checker/model.fp16.safetensors",
- "vae/diffusion_pytorch_model.fp16.bin",
- "vae/diffusion_pytorch_model.fp16.safetensors",
- "text_encoder/pytorch_model.fp16.bin",
- # 'text_encoder/model.fp16.safetensors',
- "unet/diffusion_pytorch_model.fp16.bin",
- "unet/diffusion_pytorch_model.fp16.safetensors",
- ]
- variant = "fp16"
- self.assertFalse(is_safetensors_compatible(filenames, variant=variant))
diff --git a/spaces/deedax/TLDR-the-TnC/config.py b/spaces/deedax/TLDR-the-TnC/config.py
deleted file mode 100644
index e92a66532f610b7918e81594374b7d4b4aa829e9..0000000000000000000000000000000000000000
--- a/spaces/deedax/TLDR-the-TnC/config.py
+++ /dev/null
@@ -1 +0,0 @@
-OPENAI_API_KEY = "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"
\ No newline at end of file
diff --git a/spaces/deepghs/anime-ai-detect-fucker/attacker/base.py b/spaces/deepghs/anime-ai-detect-fucker/attacker/base.py
deleted file mode 100644
index 3d630cbadadd732efae1c4a10ee8c40a74a5884b..0000000000000000000000000000000000000000
--- a/spaces/deepghs/anime-ai-detect-fucker/attacker/base.py
+++ /dev/null
@@ -1,32 +0,0 @@
-class Attacker:
- def __init__(self, model, img_transform=(lambda x: x, lambda x: x)):
- self.model = model # 必须是pytorch的model
- '''self.model.eval()
- for k, v in self.model.named_parameters():
- v.requires_grad = False'''
- self.img_transform = img_transform
- self.forward = lambda attacker, images, labels: attacker.step(images, labels, attacker.loss)
-
- def set_para(self, **kwargs):
- for k, v in kwargs.items():
- setattr(self, k, v)
-
- def set_forward(self, forward):
- self.forward = forward
-
- def step(self, images, labels, loss):
- pass
-
- def set_loss(self, loss):
- self.loss = loss
-
- def attack(self, images, labels):
- pass
-
-
-class Empty:
- def __enter__(self):
- pass
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- pass
diff --git a/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/hifigan/__init__.py b/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/hifigan/__init__.py
deleted file mode 100644
index e0ae476fe58c48e998c56234a55b871beba4042d..0000000000000000000000000000000000000000
--- a/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/hifigan/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from .models import Generator
-
-
-class AttrDict(dict):
- def __init__(self, *args, **kwargs):
- super(AttrDict, self).__init__(*args, **kwargs)
- self.__dict__ = self
diff --git a/spaces/deepwisdom/MetaGPT/metagpt/tools/prompt_writer.py b/spaces/deepwisdom/MetaGPT/metagpt/tools/prompt_writer.py
deleted file mode 100644
index 83a29413bc2def4d521507827d9d16d402fad562..0000000000000000000000000000000000000000
--- a/spaces/deepwisdom/MetaGPT/metagpt/tools/prompt_writer.py
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@Time : 2023/5/2 16:03
-@Author : alexanderwu
-@File : prompt_writer.py
-"""
-from typing import Union
-
-
-class GPTPromptGenerator:
- """通过LLM,给定输出,要求LLM给出输入(支持指令、对话、搜索三种风格)"""
- def __init__(self):
- self._generators = {i: getattr(self, f"gen_{i}_style") for i in ['instruction', 'chatbot', 'query']}
-
- def gen_instruction_style(self, example):
- """指令风格:给定输出,要求LLM给出输入"""
- return f"""指令:X
-输出:{example}
-这个输出可能来源于什么样的指令?
-X:"""
-
- def gen_chatbot_style(self, example):
- """对话风格:给定输出,要求LLM给出输入"""
- return f"""你是一个对话机器人。一个用户给你发送了一条非正式的信息,你的回复如下。
-信息:X
-回复:{example}
-非正式信息X是什么?
-X:"""
-
- def gen_query_style(self, example):
- """搜索风格:给定输出,要求LLM给出输入"""
- return f"""你是一个搜索引擎。一个人详细地查询了某个问题,关于这个查询最相关的文档如下。
-查询:X
-文档:{example} 详细的查询X是什么?
-X:"""
-
- def gen(self, example: str, style: str = 'all') -> Union[list[str], str]:
- """
- 通过example生成一个或多个输出,用于让LLM回复对应输入
-
- :param example: LLM的预期输出样本
- :param style: (all|instruction|chatbot|query)
- :return: LLM的预期输入样本(一个或多个)
- """
- if style != 'all':
- return self._generators[style](example)
- return [f(example) for f in self._generators.values()]
-
-
-class WikiHowTemplate:
- def __init__(self):
- self._prompts = """Give me {step} steps to {question}.
-How to {question}?
-Do you know how can I {question}?
-List {step} instructions to {question}.
-What are some tips to {question}?
-What are some steps to {question}?
-Can you provide {step} clear and concise instructions on how to {question}?
-I'm interested in learning how to {question}. Could you break it down into {step} easy-to-follow steps?
-For someone who is new to {question}, what would be {step} key steps to get started?
-What is the most efficient way to {question}? Could you provide a list of {step} steps?
-Do you have any advice on how to {question} successfully? Maybe a step-by-step guide with {step} steps?
-I'm trying to accomplish {question}. Could you walk me through the process with {step} detailed instructions?
-What are the essential {step} steps to {question}?
-I need to {question}, but I'm not sure where to start. Can you give me {step} actionable steps?
-As a beginner in {question}, what are the {step} basic steps I should take?
-I'm looking for a comprehensive guide on how to {question}. Can you provide {step} detailed steps?
-Could you outline {step} practical steps to achieve {question}?
-What are the {step} fundamental steps to consider when attempting to {question}?"""
-
- def gen(self, question: str, step: str) -> list[str]:
- return self._prompts.format(question=question, step=step).splitlines()
-
-
-class EnronTemplate:
- def __init__(self):
- self._prompts = """Write an email with the subject "{subj}".
-Can you craft an email with the subject {subj}?
-Would you be able to compose an email and use {subj} as the subject?
-Create an email about {subj}.
-Draft an email and include the subject "{subj}".
-Generate an email about {subj}.
-Hey, can you shoot me an email about {subj}?
-Do you mind crafting an email for me with {subj} as the subject?
-Can you whip up an email with the subject of "{subj}"?
-Hey, can you write an email and use "{subj}" as the subject?
-Can you send me an email about {subj}?"""
-
- def gen(self, subj):
- return self._prompts.format(subj=subj).splitlines()
-
-
-class BEAGECTemplate:
- def __init__(self):
- self._prompts = """Edit and revise this document to improve its grammar, vocabulary, spelling, and style.
-Revise this document to correct all the errors related to grammar, spelling, and style.
-Refine this document by eliminating all grammatical, lexical, and orthographic errors and improving its writing style.
-Polish this document by rectifying all errors related to grammar, vocabulary, and writing style.
-Enhance this document by correcting all the grammar errors and style issues, and improving its overall quality.
-Rewrite this document by fixing all grammatical, lexical and orthographic errors.
-Fix all grammar errors and style issues and rewrite this document.
-Take a stab at fixing all the mistakes in this document and make it sound better.
-Give this document a once-over and clean up any grammar or spelling errors.
-Tweak this document to make it read smoother and fix any mistakes you see.
-Make this document sound better by fixing all the grammar, spelling, and style issues.
-Proofread this document and fix any errors that make it sound weird or confusing."""
-
- def gen(self):
- return self._prompts.splitlines()
diff --git a/spaces/deepwisdom/MetaGPT/tests/metagpt/actions/test_write_test.py b/spaces/deepwisdom/MetaGPT/tests/metagpt/actions/test_write_test.py
deleted file mode 100644
index 87a22b13917978374c163213e315d01dcf3ad8f7..0000000000000000000000000000000000000000
--- a/spaces/deepwisdom/MetaGPT/tests/metagpt/actions/test_write_test.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@Time : 2023/5/11 17:45
-@Author : alexanderwu
-@File : test_write_test.py
-"""
-import pytest
-
-from metagpt.actions.write_test import WriteTest
-from metagpt.logs import logger
-
-
-@pytest.mark.asyncio
-async def test_write_test():
- code = """
- import random
- from typing import Tuple
-
- class Food:
- def __init__(self, position: Tuple[int, int]):
- self.position = position
-
- def generate(self, max_y: int, max_x: int):
- self.position = (random.randint(1, max_y - 1), random.randint(1, max_x - 1))
- """
-
- write_test = WriteTest()
-
- test_code = await write_test.run(
- code_to_test=code,
- test_file_name="test_food.py",
- source_file_path="/some/dummy/path/cli_snake_game/cli_snake_game/food.py",
- workspace="/some/dummy/path/cli_snake_game"
- )
- logger.info(test_code)
-
- # We cannot exactly predict the generated test cases, but we can check if it is a string and if it is not empty
- assert isinstance(test_code, str)
- assert "from cli_snake_game.food import Food" in test_code
- assert "class TestFood(unittest.TestCase)" in test_code
- assert "def test_generate" in test_code
diff --git a/spaces/diacanFperku/AutoGPT/Corel Painter Essentials 4 Serial Number Keygen !!TOP!!.md b/spaces/diacanFperku/AutoGPT/Corel Painter Essentials 4 Serial Number Keygen !!TOP!!.md
deleted file mode 100644
index 30a4daa7575f3c7cba0b96b85eb463879648ae4f..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Corel Painter Essentials 4 Serial Number Keygen !!TOP!!.md
+++ /dev/null
@@ -1,34 +0,0 @@
-
-How to Activate Corel Painter Essentials 4 with Serial Number Keygen
-If you are looking for a way to activate Corel Painter Essentials 4, a powerful and easy-to-use software for creating digital art, you might be interested in using a serial number keygen. A serial number keygen is a program that generates valid serial numbers for various software products. By using a serial number keygen, you can bypass the activation process and use the software without paying for it.
-Corel Painter Essentials 4 Serial Number Keygen
DOWNLOAD >>> https://gohhs.com/2uFTJs
-However, using a serial number keygen is not legal and can expose you to various risks, such as malware infection, legal action, or loss of data. Therefore, we do not recommend using a serial number keygen for Corel Painter Essentials 4 or any other software. Instead, we suggest that you purchase a legitimate license from the official website of Corel or an authorized reseller.
-If you have already purchased a license for Corel Painter Essentials 4, you can activate it easily by following these steps:
-
-- Launch Corel Painter Essentials 4.
-- The next screen gives you three options: Continue to Evaluate, Purchase, and Activate Now. Select Activate Now, and enter the serial number/purchase key that you received in your order confirmation email.
-- The next screen displays your Installation Code and Serial Code. Click Next.
-- If your internet connection was detected, the next screen will say "You have successfully activated your product." You will be asked to retain your activation code for your records. This is advisable, as you will need the code should you need to reinstall your program in the future. Click Finish.
-
-If you receive errors when activating, or are not able to access the screens outlined above, please contact Customer Support Services, and an agent can assist you with the activation process.
-By activating Corel Painter Essentials 4 with a valid license, you can enjoy all the features and benefits of this amazing software, such as:
-
-
-- Create stunning paintings from photos with the Photo Painting System.
-- Experiment with realistic brushes and media, such as oil, acrylic, watercolor, chalk, and more.
-- Customize your workspace and tools to suit your preferences and workflow.
-- Learn from tutorials and tips from experts and other users.
-- Share your artwork online or print it on canvas.
-
-We hope this article has helped you understand how to activate Corel Painter Essentials 4 with a serial number keygen. However, we strongly advise you to avoid using a serial number keygen and instead purchase a legitimate license for this software. This way, you can support the developers of this software and avoid any legal or technical issues.
-
-If you want to learn more about Corel Painter Essentials 4 and how to use it effectively, you can check out the following resources:
-
-- The official website of Corel Painter Essentials 4, where you can find product information, features, tutorials, gallery, and support.
-- The Corel Discovery Center, where you can access free learning materials, such as video tutorials, tips and tricks, webinars, and blogs.
-- The Corel Painter Essentials 4 User Guide, where you can find detailed instructions on how to use the software and its tools.
-- The Corel Painter Essentials 4 Community Forum, where you can interact with other users, ask questions, share feedback, and showcase your artwork.
-
-Corel Painter Essentials 4 is a great software for anyone who wants to unleash their creativity and create stunning digital art. Whether you are a beginner or a professional, you can find the tools and features that suit your needs and style. By activating Corel Painter Essentials 4 with a valid license, you can ensure that you have the best experience with this software and support its development.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Licence-Key-Booklet-Creator.md b/spaces/diacanFperku/AutoGPT/Licence-Key-Booklet-Creator.md
deleted file mode 100644
index 5d7ed10cfdd4289d4c60c0737ad88ebe8db40eb0..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Licence-Key-Booklet-Creator.md
+++ /dev/null
@@ -1,85 +0,0 @@
-Licence Key Booklet Creator
-
-
-
-Click Here > [https://maudaracte.blogspot.com/?file=2tvJbW](https://maudaracte.blogspot.com/?file=2tvJbW)
-
-
-
-
-
-
-
-
-
-Here is a possible title and article with SEO optimization and HTML formatting for the keyword "Licence Key Booklet Creator":
-
-How to Create a Booklet from a PDF Document with Licence Key Booklet Creator
-
-If you want to create a booklet from a PDF document, you might be interested in Licence Key Booklet Creator, a simple and lightweight software tool that can help you do that in the easiest way possible. In this article, we will show you how to use Licence Key Booklet Creator to create a booklet from a PDF document in just a few steps.
-
-What is Licence Key Booklet Creator?
-
-Licence Key Booklet Creator is a software tool that allows you to create a booklet from a PDF document. It reorders pages so that after printing and folding the pages, a small book is created. You can choose between two different layout types: book or calendar. Licence Key Booklet Creator can only work with PDF files, and it doesn't have any configuration settings, making it very easy to use.
-
-How to Download and Install Licence Key Booklet Creator?
-
-To download and install Licence Key Booklet Creator, you need to follow these steps:
-
-
-Go to this link and download the software.
-Open the downloaded file and follow the installation wizard.
-Enter your licence key when prompted. You can get your licence key by purchasing the software from this website.
-Finish the installation and launch the software.
-
-
-How to Create a Booklet from a PDF Document with Licence Key Booklet Creator?
-
-To create a booklet from a PDF document with Licence Key Booklet Creator, you need to follow these steps:
-
-
-Open the software and click on the "Browse" button to select the PDF document you want to create a booklet from.
-Select an output destination for the booklet file.
-Select a layout type: book or calendar.
-Click on the "Create booklet" button and wait for the process to finish.
-Open the newly created booklet file and check if everything is correct.
-Print and fold the booklet according to your preferences.
-
-
-Conclusion
-
-Licence Key Booklet Creator is a simple and lightweight software tool that can help you create a booklet from a PDF document in the easiest way possible. It has no configuration settings, and it can only work with PDF files. You can choose between two different layout types: book or calendar. To use Licence Key Booklet Creator, you need to purchase a licence key from this website. We hope this article was helpful and informative for you. If you have any questions or feedback, please leave them in the comments section below.Here are a few more paragraphs for the article:
-
-What are the Benefits of Creating a Booklet from a PDF Document?
-
-Creating a booklet from a PDF document can have many benefits, such as:
-
-
-It can save paper and ink by printing multiple pages on a single sheet.
-It can make your document more portable and convenient to read.
-It can enhance the presentation and appearance of your document.
-It can be used for various purposes, such as marketing, education, entertainment, etc.
-
-
-What are the Requirements for Creating a Booklet from a PDF Document?
-
-To create a booklet from a PDF document, you need to have the following requirements:
-
-
-A PDF document that you want to create a booklet from.
-A printer that supports duplex printing (printing on both sides of the paper) or manual duplex printing (printing one side at a time and flipping the paper).
-A licence key for Licence Key Booklet Creator software.
-A computer with Windows or Mac operating system.
-
-
-What are the Alternatives to Licence Key Booklet Creator?
-
-If you don't want to use Licence Key Booklet Creator software, you can also try some alternatives, such as:
-
-
-Adobe Acrobat or Reader: These are popular PDF software that can also print booklets and PDF portfolios. You can find more information on how to print booklets using Acrobat or Reader here.
-Microsoft Word: This is a word processing software that can also create a booklet or book in Word. You can find more information on how to create a booklet or book in Word here.
-FlipHTML5: This is a digital booklet creator that can help you create interactive and engaging booklets from PDF documents. You can find more information on how to use FlipHTML5 here. dfd1c89656
-
-
-
diff --git a/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/text/chinese.py b/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/text/chinese.py
deleted file mode 100644
index 276753880b73de2e8889dcb2101cd98c09e0710b..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/text/chinese.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import os
-import re
-
-import cn2an
-from pypinyin import lazy_pinyin, Style
-
-from text import symbols
-from text.symbols import punctuation
-from text.tone_sandhi import ToneSandhi
-
-current_file_path = os.path.dirname(__file__)
-pinyin_to_symbol_map = {line.split("\t")[0]: line.strip().split("\t")[1] for line in
- open(os.path.join(current_file_path, 'opencpop-strict.txt')).readlines()}
-
-import jieba.posseg as psg
-
-
-rep_map = {
- ':': ',',
- ';': ',',
- ',': ',',
- '。': '.',
- '!': '!',
- '?': '?',
- '\n': '.',
- "·": ",",
- '、': ",",
- '...': '…',
- '$': '.',
- '“': "'",
- '”': "'",
- '‘': "'",
- '’': "'",
- '(': "'",
- ')': "'",
- '(': "'",
- ')': "'",
- '《': "'",
- '》': "'",
- '【': "'",
- '】': "'",
- '[': "'",
- ']': "'",
- '—': "-",
- '~': "-",
- '~': "-",
- '「': "'",
- '」': "'",
-
-}
-
-tone_modifier = ToneSandhi()
-
-def replace_punctuation(text):
- text = text.replace("嗯", "恩").replace("呣","母")
- pattern = re.compile('|'.join(re.escape(p) for p in rep_map.keys()))
-
- replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
-
- replaced_text = re.sub(r'[^\u4e00-\u9fa5'+"".join(punctuation)+r']+', '', replaced_text)
-
- return replaced_text
-
-def g2p(text):
- pattern = r'(?<=[{0}])\s*'.format(''.join(punctuation))
- sentences = [i for i in re.split(pattern, text) if i.strip()!='']
- phones, tones, word2ph = _g2p(sentences)
- assert sum(word2ph) == len(phones)
- assert len(word2ph) == len(text) #Sometimes it will crash,you can add a try-catch.
- phones = ['_'] + phones + ["_"]
- tones = [0] + tones + [0]
- word2ph = [1] + word2ph + [1]
- return phones, tones, word2ph
-
-
-def _get_initials_finals(word):
- initials = []
- finals = []
- orig_initials = lazy_pinyin(
- word, neutral_tone_with_five=True, style=Style.INITIALS)
- orig_finals = lazy_pinyin(
- word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
- for c, v in zip(orig_initials, orig_finals):
- initials.append(c)
- finals.append(v)
- return initials, finals
-
-
-def _g2p(segments):
- phones_list = []
- tones_list = []
- word2ph = []
- for seg in segments:
- pinyins = []
- # Replace all English words in the sentence
- seg = re.sub('[a-zA-Z]+', '', seg)
- seg_cut = psg.lcut(seg)
- initials = []
- finals = []
- seg_cut = tone_modifier.pre_merge_for_modify(seg_cut)
- for word, pos in seg_cut:
- if pos == 'eng':
- continue
- sub_initials, sub_finals = _get_initials_finals(word)
- sub_finals = tone_modifier.modified_tone(word, pos,
- sub_finals)
- initials.append(sub_initials)
- finals.append(sub_finals)
-
- # assert len(sub_initials) == len(sub_finals) == len(word)
- initials = sum(initials, [])
- finals = sum(finals, [])
- #
- for c, v in zip(initials, finals):
- raw_pinyin = c+v
- # NOTE: post process for pypinyin outputs
- # we discriminate i, ii and iii
- if c == v:
- assert c in punctuation
- phone = [c]
- tone = '0'
- word2ph.append(1)
- else:
- v_without_tone = v[:-1]
- tone = v[-1]
-
- pinyin = c+v_without_tone
- assert tone in '12345'
-
- if c:
- # 多音节
- v_rep_map = {
- "uei": 'ui',
- 'iou': 'iu',
- 'uen': 'un',
- }
- if v_without_tone in v_rep_map.keys():
- pinyin = c+v_rep_map[v_without_tone]
- else:
- # 单音节
- pinyin_rep_map = {
- 'ing': 'ying',
- 'i': 'yi',
- 'in': 'yin',
- 'u': 'wu',
- }
- if pinyin in pinyin_rep_map.keys():
- pinyin = pinyin_rep_map[pinyin]
- else:
- single_rep_map = {
- 'v': 'yu',
- 'e': 'e',
- 'i': 'y',
- 'u': 'w',
- }
- if pinyin[0] in single_rep_map.keys():
- pinyin = single_rep_map[pinyin[0]]+pinyin[1:]
-
- assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin)
- phone = pinyin_to_symbol_map[pinyin].split(' ')
- word2ph.append(len(phone))
-
- phones_list += phone
- tones_list += [int(tone)] * len(phone)
- return phones_list, tones_list, word2ph
-
-
-
-def text_normalize(text):
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
- for number in numbers:
- text = text.replace(number, cn2an.an2cn(number), 1)
- text = replace_punctuation(text)
- return text
-
-def get_bert_feature(text, word2ph):
- from text import chinese_bert
- return chinese_bert.get_bert_feature(text, word2ph)
-
-if __name__ == '__main__':
- from text.chinese_bert import get_bert_feature
- text = "啊!但是《原神》是由,米哈\游自主, [研发]的一款全.新开放世界.冒险游戏"
- text = text_normalize(text)
- print(text)
- phones, tones, word2ph = g2p(text)
- bert = get_bert_feature(text, word2ph)
-
- print(phones, tones, word2ph, bert.shape)
-
-
-# # 示例用法
-# text = "这是一个示例文本:,你好!这是一个测试...."
-# print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试
diff --git a/spaces/dineshreddy/WALT/mmdet/models/dense_heads/fsaf_head.py b/spaces/dineshreddy/WALT/mmdet/models/dense_heads/fsaf_head.py
deleted file mode 100644
index 7183efce28596ba106411250f508aec5995fbf60..0000000000000000000000000000000000000000
--- a/spaces/dineshreddy/WALT/mmdet/models/dense_heads/fsaf_head.py
+++ /dev/null
@@ -1,422 +0,0 @@
-import numpy as np
-import torch
-from mmcv.cnn import normal_init
-from mmcv.runner import force_fp32
-
-from mmdet.core import (anchor_inside_flags, images_to_levels, multi_apply,
- unmap)
-from ..builder import HEADS
-from ..losses.accuracy import accuracy
-from ..losses.utils import weight_reduce_loss
-from .retina_head import RetinaHead
-
-
-@HEADS.register_module()
-class FSAFHead(RetinaHead):
- """Anchor-free head used in `FSAF `_.
-
- The head contains two subnetworks. The first classifies anchor boxes and
- the second regresses deltas for the anchors (num_anchors is 1 for anchor-
- free methods)
-
- Args:
- *args: Same as its base class in :class:`RetinaHead`
- score_threshold (float, optional): The score_threshold to calculate
- positive recall. If given, prediction scores lower than this value
- is counted as incorrect prediction. Default to None.
- **kwargs: Same as its base class in :class:`RetinaHead`
-
- Example:
- >>> import torch
- >>> self = FSAFHead(11, 7)
- >>> x = torch.rand(1, 7, 32, 32)
- >>> cls_score, bbox_pred = self.forward_single(x)
- >>> # Each anchor predicts a score for each class except background
- >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
- >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
- >>> assert cls_per_anchor == self.num_classes
- >>> assert box_per_anchor == 4
- """
-
- def __init__(self, *args, score_threshold=None, **kwargs):
- super().__init__(*args, **kwargs)
- self.score_threshold = score_threshold
-
- def forward_single(self, x):
- """Forward feature map of a single scale level.
-
- Args:
- x (Tensor): Feature map of a single scale level.
-
- Returns:
- tuple (Tensor):
- cls_score (Tensor): Box scores for each scale level
- Has shape (N, num_points * num_classes, H, W).
- bbox_pred (Tensor): Box energies / deltas for each scale
- level with shape (N, num_points * 4, H, W).
- """
- cls_score, bbox_pred = super().forward_single(x)
- # relu: TBLR encoder only accepts positive bbox_pred
- return cls_score, self.relu(bbox_pred)
-
- def init_weights(self):
- """Initialize weights of the head."""
- super(FSAFHead, self).init_weights()
- # The positive bias in self.retina_reg conv is to prevent predicted \
- # bbox with 0 area
- normal_init(self.retina_reg, std=0.01, bias=0.25)
-
- def _get_targets_single(self,
- flat_anchors,
- valid_flags,
- gt_bboxes,
- gt_bboxes_ignore,
- gt_labels,
- img_meta,
- label_channels=1,
- unmap_outputs=True):
- """Compute regression and classification targets for anchors in a
- single image.
-
- Most of the codes are the same with the base class
- :obj: `AnchorHead`, except that it also collects and returns
- the matched gt index in the image (from 0 to num_gt-1). If the
- anchor bbox is not matched to any gt, the corresponding value in
- pos_gt_inds is -1.
- """
- inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
- img_meta['img_shape'][:2],
- self.train_cfg.allowed_border)
- if not inside_flags.any():
- return (None, ) * 7
- # Assign gt and sample anchors
- anchors = flat_anchors[inside_flags.type(torch.bool), :]
- assign_result = self.assigner.assign(
- anchors, gt_bboxes, gt_bboxes_ignore,
- None if self.sampling else gt_labels)
-
- sampling_result = self.sampler.sample(assign_result, anchors,
- gt_bboxes)
-
- num_valid_anchors = anchors.shape[0]
- bbox_targets = torch.zeros_like(anchors)
- bbox_weights = torch.zeros_like(anchors)
- labels = anchors.new_full((num_valid_anchors, ),
- self.num_classes,
- dtype=torch.long)
- label_weights = anchors.new_zeros((num_valid_anchors, label_channels),
- dtype=torch.float)
- pos_gt_inds = anchors.new_full((num_valid_anchors, ),
- -1,
- dtype=torch.long)
-
- pos_inds = sampling_result.pos_inds
- neg_inds = sampling_result.neg_inds
-
- if len(pos_inds) > 0:
- if not self.reg_decoded_bbox:
- pos_bbox_targets = self.bbox_coder.encode(
- sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
- else:
- # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
- # is applied directly on the decoded bounding boxes, both
- # the predicted boxes and regression targets should be with
- # absolute coordinate format.
- pos_bbox_targets = sampling_result.pos_gt_bboxes
- bbox_targets[pos_inds, :] = pos_bbox_targets
- bbox_weights[pos_inds, :] = 1.0
- # The assigned gt_index for each anchor. (0-based)
- pos_gt_inds[pos_inds] = sampling_result.pos_assigned_gt_inds
- if gt_labels is None:
- # Only rpn gives gt_labels as None
- # Foreground is the first class
- labels[pos_inds] = 0
- else:
- labels[pos_inds] = gt_labels[
- sampling_result.pos_assigned_gt_inds]
- if self.train_cfg.pos_weight <= 0:
- label_weights[pos_inds] = 1.0
- else:
- label_weights[pos_inds] = self.train_cfg.pos_weight
-
- if len(neg_inds) > 0:
- label_weights[neg_inds] = 1.0
-
- # shadowed_labels is a tensor composed of tuples
- # (anchor_inds, class_label) that indicate those anchors lying in the
- # outer region of a gt or overlapped by another gt with a smaller
- # area.
- #
- # Therefore, only the shadowed labels are ignored for loss calculation.
- # the key `shadowed_labels` is defined in :obj:`CenterRegionAssigner`
- shadowed_labels = assign_result.get_extra_property('shadowed_labels')
- if shadowed_labels is not None and shadowed_labels.numel():
- if len(shadowed_labels.shape) == 2:
- idx_, label_ = shadowed_labels[:, 0], shadowed_labels[:, 1]
- assert (labels[idx_] != label_).all(), \
- 'One label cannot be both positive and ignored'
- label_weights[idx_, label_] = 0
- else:
- label_weights[shadowed_labels] = 0
-
- # map up to original set of anchors
- if unmap_outputs:
- num_total_anchors = flat_anchors.size(0)
- labels = unmap(labels, num_total_anchors, inside_flags)
- label_weights = unmap(label_weights, num_total_anchors,
- inside_flags)
- bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
- bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
- pos_gt_inds = unmap(
- pos_gt_inds, num_total_anchors, inside_flags, fill=-1)
-
- return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
- neg_inds, sampling_result, pos_gt_inds)
-
- @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
- def loss(self,
- cls_scores,
- bbox_preds,
- gt_bboxes,
- gt_labels,
- img_metas,
- gt_bboxes_ignore=None):
- """Compute loss of the head.
-
- Args:
- cls_scores (list[Tensor]): Box scores for each scale level
- Has shape (N, num_points * num_classes, H, W).
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
- level with shape (N, num_points * 4, H, W).
- gt_bboxes (list[Tensor]): each item are the truth boxes for each
- image in [tl_x, tl_y, br_x, br_y] format.
- gt_labels (list[Tensor]): class indices corresponding to each box
- img_metas (list[dict]): Meta information of each image, e.g.,
- image size, scaling factor, etc.
- gt_bboxes_ignore (None | list[Tensor]): specify which bounding
- boxes can be ignored when computing the loss.
-
- Returns:
- dict[str, Tensor]: A dictionary of loss components.
- """
- for i in range(len(bbox_preds)): # loop over fpn level
- # avoid 0 area of the predicted bbox
- bbox_preds[i] = bbox_preds[i].clamp(min=1e-4)
- # TODO: It may directly use the base-class loss function.
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
- assert len(featmap_sizes) == self.anchor_generator.num_levels
- batch_size = len(gt_bboxes)
- device = cls_scores[0].device
- anchor_list, valid_flag_list = self.get_anchors(
- featmap_sizes, img_metas, device=device)
- label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
- cls_reg_targets = self.get_targets(
- anchor_list,
- valid_flag_list,
- gt_bboxes,
- img_metas,
- gt_bboxes_ignore_list=gt_bboxes_ignore,
- gt_labels_list=gt_labels,
- label_channels=label_channels)
- if cls_reg_targets is None:
- return None
- (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
- num_total_pos, num_total_neg,
- pos_assigned_gt_inds_list) = cls_reg_targets
-
- num_gts = np.array(list(map(len, gt_labels)))
- num_total_samples = (
- num_total_pos + num_total_neg if self.sampling else num_total_pos)
- # anchor number of multi levels
- num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
- # concat all level anchors and flags to a single tensor
- concat_anchor_list = []
- for i in range(len(anchor_list)):
- concat_anchor_list.append(torch.cat(anchor_list[i]))
- all_anchor_list = images_to_levels(concat_anchor_list,
- num_level_anchors)
- losses_cls, losses_bbox = multi_apply(
- self.loss_single,
- cls_scores,
- bbox_preds,
- all_anchor_list,
- labels_list,
- label_weights_list,
- bbox_targets_list,
- bbox_weights_list,
- num_total_samples=num_total_samples)
-
- # `pos_assigned_gt_inds_list` (length: fpn_levels) stores the assigned
- # gt index of each anchor bbox in each fpn level.
- cum_num_gts = list(np.cumsum(num_gts)) # length of batch_size
- for i, assign in enumerate(pos_assigned_gt_inds_list):
- # loop over fpn levels
- for j in range(1, batch_size):
- # loop over batch size
- # Convert gt indices in each img to those in the batch
- assign[j][assign[j] >= 0] += int(cum_num_gts[j - 1])
- pos_assigned_gt_inds_list[i] = assign.flatten()
- labels_list[i] = labels_list[i].flatten()
- num_gts = sum(map(len, gt_labels)) # total number of gt in the batch
- # The unique label index of each gt in the batch
- label_sequence = torch.arange(num_gts, device=device)
- # Collect the average loss of each gt in each level
- with torch.no_grad():
- loss_levels, = multi_apply(
- self.collect_loss_level_single,
- losses_cls,
- losses_bbox,
- pos_assigned_gt_inds_list,
- labels_seq=label_sequence)
- # Shape: (fpn_levels, num_gts). Loss of each gt at each fpn level
- loss_levels = torch.stack(loss_levels, dim=0)
- # Locate the best fpn level for loss back-propagation
- if loss_levels.numel() == 0: # zero gt
- argmin = loss_levels.new_empty((num_gts, ), dtype=torch.long)
- else:
- _, argmin = loss_levels.min(dim=0)
-
- # Reweight the loss of each (anchor, label) pair, so that only those
- # at the best gt level are back-propagated.
- losses_cls, losses_bbox, pos_inds = multi_apply(
- self.reweight_loss_single,
- losses_cls,
- losses_bbox,
- pos_assigned_gt_inds_list,
- labels_list,
- list(range(len(losses_cls))),
- min_levels=argmin)
- num_pos = torch.cat(pos_inds, 0).sum().float()
- pos_recall = self.calculate_pos_recall(cls_scores, labels_list,
- pos_inds)
-
- if num_pos == 0: # No gt
- avg_factor = num_pos + float(num_total_neg)
- else:
- avg_factor = num_pos
- for i in range(len(losses_cls)):
- losses_cls[i] /= avg_factor
- losses_bbox[i] /= avg_factor
- return dict(
- loss_cls=losses_cls,
- loss_bbox=losses_bbox,
- num_pos=num_pos / batch_size,
- pos_recall=pos_recall)
-
- def calculate_pos_recall(self, cls_scores, labels_list, pos_inds):
- """Calculate positive recall with score threshold.
-
- Args:
- cls_scores (list[Tensor]): Classification scores at all fpn levels.
- Each tensor is in shape (N, num_classes * num_anchors, H, W)
- labels_list (list[Tensor]): The label that each anchor is assigned
- to. Shape (N * H * W * num_anchors, )
- pos_inds (list[Tensor]): List of bool tensors indicating whether
- the anchor is assigned to a positive label.
- Shape (N * H * W * num_anchors, )
-
- Returns:
- Tensor: A single float number indicating the positive recall.
- """
- with torch.no_grad():
- num_class = self.num_classes
- scores = [
- cls.permute(0, 2, 3, 1).reshape(-1, num_class)[pos]
- for cls, pos in zip(cls_scores, pos_inds)
- ]
- labels = [
- label.reshape(-1)[pos]
- for label, pos in zip(labels_list, pos_inds)
- ]
- scores = torch.cat(scores, dim=0)
- labels = torch.cat(labels, dim=0)
- if self.use_sigmoid_cls:
- scores = scores.sigmoid()
- else:
- scores = scores.softmax(dim=1)
-
- return accuracy(scores, labels, thresh=self.score_threshold)
-
- def collect_loss_level_single(self, cls_loss, reg_loss, assigned_gt_inds,
- labels_seq):
- """Get the average loss in each FPN level w.r.t. each gt label.
-
- Args:
- cls_loss (Tensor): Classification loss of each feature map pixel,
- shape (num_anchor, num_class)
- reg_loss (Tensor): Regression loss of each feature map pixel,
- shape (num_anchor, 4)
- assigned_gt_inds (Tensor): It indicates which gt the prior is
- assigned to (0-based, -1: no assignment). shape (num_anchor),
- labels_seq: The rank of labels. shape (num_gt)
-
- Returns:
- shape: (num_gt), average loss of each gt in this level
- """
- if len(reg_loss.shape) == 2: # iou loss has shape (num_prior, 4)
- reg_loss = reg_loss.sum(dim=-1) # sum loss in tblr dims
- if len(cls_loss.shape) == 2:
- cls_loss = cls_loss.sum(dim=-1) # sum loss in class dims
- loss = cls_loss + reg_loss
- assert loss.size(0) == assigned_gt_inds.size(0)
- # Default loss value is 1e6 for a layer where no anchor is positive
- # to ensure it will not be chosen to back-propagate gradient
- losses_ = loss.new_full(labels_seq.shape, 1e6)
- for i, l in enumerate(labels_seq):
- match = assigned_gt_inds == l
- if match.any():
- losses_[i] = loss[match].mean()
- return losses_,
-
- def reweight_loss_single(self, cls_loss, reg_loss, assigned_gt_inds,
- labels, level, min_levels):
- """Reweight loss values at each level.
-
- Reassign loss values at each level by masking those where the
- pre-calculated loss is too large. Then return the reduced losses.
-
- Args:
- cls_loss (Tensor): Element-wise classification loss.
- Shape: (num_anchors, num_classes)
- reg_loss (Tensor): Element-wise regression loss.
- Shape: (num_anchors, 4)
- assigned_gt_inds (Tensor): The gt indices that each anchor bbox
- is assigned to. -1 denotes a negative anchor, otherwise it is the
- gt index (0-based). Shape: (num_anchors, ),
- labels (Tensor): Label assigned to anchors. Shape: (num_anchors, ).
- level (int): The current level index in the pyramid
- (0-4 for RetinaNet)
- min_levels (Tensor): The best-matching level for each gt.
- Shape: (num_gts, ),
-
- Returns:
- tuple:
- - cls_loss: Reduced corrected classification loss. Scalar.
- - reg_loss: Reduced corrected regression loss. Scalar.
- - pos_flags (Tensor): Corrected bool tensor indicating the
- final positive anchors. Shape: (num_anchors, ).
- """
- loc_weight = torch.ones_like(reg_loss)
- cls_weight = torch.ones_like(cls_loss)
- pos_flags = assigned_gt_inds >= 0 # positive pixel flag
- pos_indices = torch.nonzero(pos_flags, as_tuple=False).flatten()
-
- if pos_flags.any(): # pos pixels exist
- pos_assigned_gt_inds = assigned_gt_inds[pos_flags]
- zeroing_indices = (min_levels[pos_assigned_gt_inds] != level)
- neg_indices = pos_indices[zeroing_indices]
-
- if neg_indices.numel():
- pos_flags[neg_indices] = 0
- loc_weight[neg_indices] = 0
- # Only the weight corresponding to the label is
- # zeroed out if not selected
- zeroing_labels = labels[neg_indices]
- assert (zeroing_labels >= 0).all()
- cls_weight[neg_indices, zeroing_labels] = 0
-
- # Weighted loss for both cls and reg loss
- cls_loss = weight_reduce_loss(cls_loss, cls_weight, reduction='sum')
- reg_loss = weight_reduce_loss(reg_loss, loc_weight, reduction='sum')
-
- return cls_loss, reg_loss, pos_flags
diff --git a/spaces/dineshreddy/WALT/mmdet/models/roi_heads/roi_extractors/__init__.py b/spaces/dineshreddy/WALT/mmdet/models/roi_heads/roi_extractors/__init__.py
deleted file mode 100644
index a6ec0ecc3063cd23c2463f2f53f1c2a83b04d43b..0000000000000000000000000000000000000000
--- a/spaces/dineshreddy/WALT/mmdet/models/roi_heads/roi_extractors/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from .generic_roi_extractor import GenericRoIExtractor
-from .single_level_roi_extractor import SingleRoIExtractor
-
-__all__ = [
- 'SingleRoIExtractor',
- 'GenericRoIExtractor',
-]
diff --git a/spaces/evanpierce/3D_Photo_Inpainting2/MiDaS/monodepth_net.py b/spaces/evanpierce/3D_Photo_Inpainting2/MiDaS/monodepth_net.py
deleted file mode 100644
index 461db0807deaa98b98e4b5447d0a24b830ab7dbf..0000000000000000000000000000000000000000
--- a/spaces/evanpierce/3D_Photo_Inpainting2/MiDaS/monodepth_net.py
+++ /dev/null
@@ -1,186 +0,0 @@
-"""MonoDepthNet: Network for monocular depth estimation trained by mixing several datasets.
-This file contains code that is adapted from
-https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
-"""
-import torch
-import torch.nn as nn
-from torchvision import models
-
-
-class MonoDepthNet(nn.Module):
- """Network for monocular depth estimation.
- """
-
- def __init__(self, path=None, features=256):
- """Init.
-
- Args:
- path (str, optional): Path to saved model. Defaults to None.
- features (int, optional): Number of features. Defaults to 256.
- """
- super().__init__()
-
- resnet = models.resnet50(pretrained=False)
-
- self.pretrained = nn.Module()
- self.scratch = nn.Module()
- self.pretrained.layer1 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
- resnet.maxpool, resnet.layer1)
-
- self.pretrained.layer2 = resnet.layer2
- self.pretrained.layer3 = resnet.layer3
- self.pretrained.layer4 = resnet.layer4
-
- # adjust channel number of feature maps
- self.scratch.layer1_rn = nn.Conv2d(256, features, kernel_size=3, stride=1, padding=1, bias=False)
- self.scratch.layer2_rn = nn.Conv2d(512, features, kernel_size=3, stride=1, padding=1, bias=False)
- self.scratch.layer3_rn = nn.Conv2d(1024, features, kernel_size=3, stride=1, padding=1, bias=False)
- self.scratch.layer4_rn = nn.Conv2d(2048, features, kernel_size=3, stride=1, padding=1, bias=False)
-
- self.scratch.refinenet4 = FeatureFusionBlock(features)
- self.scratch.refinenet3 = FeatureFusionBlock(features)
- self.scratch.refinenet2 = FeatureFusionBlock(features)
- self.scratch.refinenet1 = FeatureFusionBlock(features)
-
- # adaptive output module: 2 convolutions and upsampling
- self.scratch.output_conv = nn.Sequential(nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
- nn.Conv2d(128, 1, kernel_size=3, stride=1, padding=1),
- Interpolate(scale_factor=2, mode='bilinear'))
-
- # load model
- if path:
- self.load(path)
-
- def forward(self, x):
- """Forward pass.
-
- Args:
- x (tensor): input data (image)
-
- Returns:
- tensor: depth
- """
- layer_1 = self.pretrained.layer1(x)
- layer_2 = self.pretrained.layer2(layer_1)
- layer_3 = self.pretrained.layer3(layer_2)
- layer_4 = self.pretrained.layer4(layer_3)
-
- layer_1_rn = self.scratch.layer1_rn(layer_1)
- layer_2_rn = self.scratch.layer2_rn(layer_2)
- layer_3_rn = self.scratch.layer3_rn(layer_3)
- layer_4_rn = self.scratch.layer4_rn(layer_4)
-
- path_4 = self.scratch.refinenet4(layer_4_rn)
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
- path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
- path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
-
- out = self.scratch.output_conv(path_1)
-
- return out
-
- def load(self, path):
- """Load model from file.
-
- Args:
- path (str): file path
- """
- parameters = torch.load(path)
-
- self.load_state_dict(parameters)
-
-
-class Interpolate(nn.Module):
- """Interpolation module.
- """
-
- def __init__(self, scale_factor, mode):
- """Init.
-
- Args:
- scale_factor (float): scaling
- mode (str): interpolation mode
- """
- super(Interpolate, self).__init__()
-
- self.interp = nn.functional.interpolate
- self.scale_factor = scale_factor
- self.mode = mode
-
- def forward(self, x):
- """Forward pass.
-
- Args:
- x (tensor): input
-
- Returns:
- tensor: interpolated data
- """
- x = self.interp(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=False)
-
- return x
-
-
-class ResidualConvUnit(nn.Module):
- """Residual convolution module.
- """
-
- def __init__(self, features):
- """Init.
-
- Args:
- features (int): number of features
- """
- super().__init__()
-
- self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True)
- self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=False)
- self.relu = nn.ReLU(inplace=True)
-
- def forward(self, x):
- """Forward pass.
-
- Args:
- x (tensor): input
-
- Returns:
- tensor: output
- """
- out = self.relu(x)
- out = self.conv1(out)
- out = self.relu(out)
- out = self.conv2(out)
-
- return out + x
-
-
-class FeatureFusionBlock(nn.Module):
- """Feature fusion block.
- """
-
- def __init__(self, features):
- """Init.
-
- Args:
- features (int): number of features
- """
- super().__init__()
-
- self.resConfUnit = ResidualConvUnit(features)
-
- def forward(self, *xs):
- """Forward pass.
-
- Returns:
- tensor: output
- """
- output = xs[0]
-
- if len(xs) == 2:
- output += self.resConfUnit(xs[1])
-
- output = self.resConfUnit(output)
- output = nn.functional.interpolate(output, scale_factor=2,
- mode='bilinear', align_corners=True)
-
- return output
diff --git a/spaces/facebook/MusicGen/audiocraft/modules/conditioners.py b/spaces/facebook/MusicGen/audiocraft/modules/conditioners.py
deleted file mode 100644
index 178957d1771dc4c6f2df028fd9bb60f204567955..0000000000000000000000000000000000000000
--- a/spaces/facebook/MusicGen/audiocraft/modules/conditioners.py
+++ /dev/null
@@ -1,1416 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from collections import defaultdict
-from copy import deepcopy
-from dataclasses import dataclass, field
-from itertools import chain
-import logging
-import math
-from pathlib import Path
-import random
-import re
-import typing as tp
-import warnings
-
-import einops
-from num2words import num2words
-import spacy
-from transformers import RobertaTokenizer, T5EncoderModel, T5Tokenizer # type: ignore
-import torch
-from torch import nn
-import torch.nn.functional as F
-from torch.nn.utils.rnn import pad_sequence
-
-from .chroma import ChromaExtractor
-from .streaming import StreamingModule
-from .transformer import create_sin_embedding
-from ..data.audio import audio_read
-from ..data.audio_dataset import SegmentInfo
-from ..data.audio_utils import convert_audio
-from ..environment import AudioCraftEnvironment
-from ..quantization import ResidualVectorQuantizer
-from ..utils.autocast import TorchAutocast
-from ..utils.cache import EmbeddingCache
-from ..utils.utils import collate, hash_trick, length_to_mask, load_clap_state_dict, warn_once
-
-
-logger = logging.getLogger(__name__)
-TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist)
-ConditionType = tp.Tuple[torch.Tensor, torch.Tensor] # condition, mask
-
-
-class WavCondition(tp.NamedTuple):
- wav: torch.Tensor
- length: torch.Tensor
- sample_rate: tp.List[int]
- path: tp.List[tp.Optional[str]] = []
- seek_time: tp.List[tp.Optional[float]] = []
-
-
-class JointEmbedCondition(tp.NamedTuple):
- wav: torch.Tensor
- text: tp.List[tp.Optional[str]]
- length: torch.Tensor
- sample_rate: tp.List[int]
- path: tp.List[tp.Optional[str]] = []
- seek_time: tp.List[tp.Optional[float]] = []
-
-
-@dataclass
-class ConditioningAttributes:
- text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict)
- wav: tp.Dict[str, WavCondition] = field(default_factory=dict)
- joint_embed: tp.Dict[str, JointEmbedCondition] = field(default_factory=dict)
-
- def __getitem__(self, item):
- return getattr(self, item)
-
- @property
- def text_attributes(self):
- return self.text.keys()
-
- @property
- def wav_attributes(self):
- return self.wav.keys()
-
- @property
- def joint_embed_attributes(self):
- return self.joint_embed.keys()
-
- @property
- def attributes(self):
- return {
- "text": self.text_attributes,
- "wav": self.wav_attributes,
- "joint_embed": self.joint_embed_attributes,
- }
-
- def to_flat_dict(self):
- return {
- **{f"text.{k}": v for k, v in self.text.items()},
- **{f"wav.{k}": v for k, v in self.wav.items()},
- **{f"joint_embed.{k}": v for k, v in self.joint_embed.items()}
- }
-
- @classmethod
- def from_flat_dict(cls, x):
- out = cls()
- for k, v in x.items():
- kind, att = k.split(".")
- out[kind][att] = v
- return out
-
-
-class SegmentWithAttributes(SegmentInfo):
- """Base class for all dataclasses that are used for conditioning.
- All child classes should implement `to_condition_attributes` that converts
- the existing attributes to a dataclass of type ConditioningAttributes.
- """
- def to_condition_attributes(self) -> ConditioningAttributes:
- raise NotImplementedError()
-
-
-def nullify_condition(condition: ConditionType, dim: int = 1):
- """Transform an input condition to a null condition.
- The way it is done by converting it to a single zero vector similarly
- to how it is done inside WhiteSpaceTokenizer and NoopTokenizer.
-
- Args:
- condition (ConditionType): A tuple of condition and mask (tuple[torch.Tensor, torch.Tensor])
- dim (int): The dimension that will be truncated (should be the time dimension)
- WARNING!: dim should not be the batch dimension!
- Returns:
- ConditionType: A tuple of null condition and mask
- """
- assert dim != 0, "dim cannot be the batch dimension!"
- assert isinstance(condition, tuple) and \
- isinstance(condition[0], torch.Tensor) and \
- isinstance(condition[1], torch.Tensor), "'nullify_condition' got an unexpected input type!"
- cond, mask = condition
- B = cond.shape[0]
- last_dim = cond.dim() - 1
- out = cond.transpose(dim, last_dim)
- out = 0. * out[..., :1]
- out = out.transpose(dim, last_dim)
- mask = torch.zeros((B, 1), device=out.device).int()
- assert cond.dim() == out.dim()
- return out, mask
-
-
-def nullify_wav(cond: WavCondition) -> WavCondition:
- """Transform a WavCondition to a nullified WavCondition.
- It replaces the wav by a null tensor, forces its length to 0, and replaces metadata by dummy attributes.
-
- Args:
- cond (WavCondition): Wav condition with wav, tensor of shape [B, T].
- Returns:
- WavCondition: Nullified wav condition.
- """
- null_wav, _ = nullify_condition((cond.wav, torch.zeros_like(cond.wav)), dim=cond.wav.dim() - 1)
- return WavCondition(
- wav=null_wav,
- length=torch.tensor([0] * cond.wav.shape[0], device=cond.wav.device),
- sample_rate=cond.sample_rate,
- path=[None] * cond.wav.shape[0],
- seek_time=[None] * cond.wav.shape[0],
- )
-
-
-def nullify_joint_embed(embed: JointEmbedCondition) -> JointEmbedCondition:
- """Nullify the joint embedding condition by replacing it by a null tensor, forcing its length to 0,
- and replacing metadata by dummy attributes.
-
- Args:
- cond (JointEmbedCondition): Joint embedding condition with wav and text, wav tensor of shape [B, C, T].
- """
- null_wav, _ = nullify_condition((embed.wav, torch.zeros_like(embed.wav)), dim=embed.wav.dim() - 1)
- return JointEmbedCondition(
- wav=null_wav, text=[None] * len(embed.text),
- length=torch.LongTensor([0]).to(embed.wav.device),
- sample_rate=embed.sample_rate,
- path=[None] * embed.wav.shape[0],
- seek_time=[0] * embed.wav.shape[0],
- )
-
-
-class Tokenizer:
- """Base tokenizer implementation
- (in case we want to introduce more advances tokenizers in the future).
- """
- def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]:
- raise NotImplementedError()
-
-
-class WhiteSpaceTokenizer(Tokenizer):
- """This tokenizer should be used for natural language descriptions.
- For example:
- ["he didn't, know he's going home.", 'shorter sentence'] =>
- [[78, 62, 31, 4, 78, 25, 19, 34],
- [59, 77, 0, 0, 0, 0, 0, 0]]
- """
- PUNCTUATION = "?:!.,;"
-
- def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm",
- lemma: bool = True, stopwords: bool = True) -> None:
- self.n_bins = n_bins
- self.pad_idx = pad_idx
- self.lemma = lemma
- self.stopwords = stopwords
- try:
- self.nlp = spacy.load(language)
- except IOError:
- spacy.cli.download(language) # type: ignore
- self.nlp = spacy.load(language)
-
- @tp.no_type_check
- def __call__(self, texts: tp.List[tp.Optional[str]],
- return_text: bool = False) -> tp.Tuple[torch.Tensor, torch.Tensor]:
- """Take a list of strings and convert them to a tensor of indices.
-
- Args:
- texts (list[str]): List of strings.
- return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False.
- Returns:
- tuple[torch.Tensor, torch.Tensor]:
- - Indices of words in the LUT.
- - And a mask indicating where the padding tokens are
- """
- output, lengths = [], []
- texts = deepcopy(texts)
- for i, text in enumerate(texts):
- # if current sample doesn't have a certain attribute, replace with pad token
- if text is None:
- output.append(torch.Tensor([self.pad_idx]))
- lengths.append(0)
- continue
-
- # convert numbers to words
- text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore
- # normalize text
- text = self.nlp(text) # type: ignore
- # remove stopwords
- if self.stopwords:
- text = [w for w in text if not w.is_stop] # type: ignore
- # remove punctuation
- text = [w for w in text if w.text not in self.PUNCTUATION] # type: ignore
- # lemmatize if needed
- text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore
-
- texts[i] = " ".join(text)
- lengths.append(len(text))
- # convert to tensor
- tokens = torch.Tensor([hash_trick(w, self.n_bins) for w in text])
- output.append(tokens)
-
- mask = length_to_mask(torch.IntTensor(lengths)).int()
- padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t()
- if return_text:
- return padded_output, mask, texts # type: ignore
- return padded_output, mask
-
-
-class NoopTokenizer(Tokenizer):
- """This tokenizer should be used for global conditioners such as: artist, genre, key, etc.
- The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split
- strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will
- split it to ["Jeff", "Buckley"] and return an index per word.
-
- For example:
- ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101]
- ["Metal", "Rock", "Classical"] => [0, 223, 51]
- """
- def __init__(self, n_bins: int, pad_idx: int = 0):
- self.n_bins = n_bins
- self.pad_idx = pad_idx
-
- def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]:
- output, lengths = [], []
- for text in texts:
- # if current sample doesn't have a certain attribute, replace with pad token
- if text is None:
- output.append(self.pad_idx)
- lengths.append(0)
- else:
- output.append(hash_trick(text, self.n_bins))
- lengths.append(1)
-
- tokens = torch.LongTensor(output).unsqueeze(1)
- mask = length_to_mask(torch.IntTensor(lengths)).int()
- return tokens, mask
-
-
-class BaseConditioner(nn.Module):
- """Base model for all conditioner modules.
- We allow the output dim to be different than the hidden dim for two reasons:
- 1) keep our LUTs small when the vocab is large;
- 2) make all condition dims consistent.
-
- Args:
- dim (int): Hidden dim of the model.
- output_dim (int): Output dim of the conditioner.
- """
- def __init__(self, dim: int, output_dim: int):
- super().__init__()
- self.dim = dim
- self.output_dim = output_dim
- self.output_proj = nn.Linear(dim, output_dim)
-
- def tokenize(self, *args, **kwargs) -> tp.Any:
- """Should be any part of the processing that will lead to a synchronization
- point, e.g. BPE tokenization with transfer to the GPU.
-
- The returned value will be saved and return later when calling forward().
- """
- raise NotImplementedError()
-
- def forward(self, inputs: tp.Any) -> ConditionType:
- """Gets input that should be used as conditioning (e.g, genre, description or a waveform).
- Outputs a ConditionType, after the input data was embedded as a dense vector.
-
- Returns:
- ConditionType:
- - A tensor of size [B, T, D] where B is the batch size, T is the length of the
- output embedding and D is the dimension of the embedding.
- - And a mask indicating where the padding tokens.
- """
- raise NotImplementedError()
-
-
-class TextConditioner(BaseConditioner):
- ...
-
-
-class LUTConditioner(TextConditioner):
- """Lookup table TextConditioner.
-
- Args:
- n_bins (int): Number of bins.
- dim (int): Hidden dim of the model (text-encoder/LUT).
- output_dim (int): Output dim of the conditioner.
- tokenizer (str): Name of the tokenizer.
- pad_idx (int, optional): Index for padding token. Defaults to 0.
- """
- def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0):
- super().__init__(dim, output_dim)
- self.embed = nn.Embedding(n_bins, dim)
- self.tokenizer: Tokenizer
- if tokenizer == 'whitespace':
- self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx)
- elif tokenizer == 'noop':
- self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx)
- else:
- raise ValueError(f"unrecognized tokenizer `{tokenizer}`.")
-
- def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]:
- device = self.embed.weight.device
- tokens, mask = self.tokenizer(x)
- tokens, mask = tokens.to(device), mask.to(device)
- return tokens, mask
-
- def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType:
- tokens, mask = inputs
- embeds = self.embed(tokens)
- embeds = self.output_proj(embeds)
- embeds = (embeds * mask.unsqueeze(-1))
- return embeds, mask
-
-
-class T5Conditioner(TextConditioner):
- """T5-based TextConditioner.
-
- Args:
- name (str): Name of the T5 model.
- output_dim (int): Output dim of the conditioner.
- finetune (bool): Whether to fine-tune T5 at train time.
- device (str): Device for T5 Conditioner.
- autocast_dtype (tp.Optional[str], optional): Autocast dtype.
- word_dropout (float, optional): Word dropout probability.
- normalize_text (bool, optional): Whether to apply text normalization.
- """
- MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b",
- "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large",
- "google/flan-t5-xl", "google/flan-t5-xxl"]
- MODELS_DIMS = {
- "t5-small": 512,
- "t5-base": 768,
- "t5-large": 1024,
- "t5-3b": 1024,
- "t5-11b": 1024,
- "google/flan-t5-small": 512,
- "google/flan-t5-base": 768,
- "google/flan-t5-large": 1024,
- "google/flan-t5-3b": 1024,
- "google/flan-t5-11b": 1024,
- }
-
- def __init__(self, name: str, output_dim: int, finetune: bool, device: str,
- autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0.,
- normalize_text: bool = False):
- assert name in self.MODELS, f"Unrecognized t5 model name (should in {self.MODELS})"
- super().__init__(self.MODELS_DIMS[name], output_dim)
- self.device = device
- self.name = name
- self.finetune = finetune
- self.word_dropout = word_dropout
- if autocast_dtype is None or self.device == 'cpu':
- self.autocast = TorchAutocast(enabled=False)
- if self.device != 'cpu':
- logger.warning("T5 has no autocast, this might lead to NaN")
- else:
- dtype = getattr(torch, autocast_dtype)
- assert isinstance(dtype, torch.dtype)
- logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}")
- self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype)
- # Let's disable logging temporarily because T5 will vomit some errors otherwise.
- # thanks https://gist.github.com/simon-weber/7853144
- previous_level = logging.root.manager.disable
- logging.disable(logging.ERROR)
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- try:
- self.t5_tokenizer = T5Tokenizer.from_pretrained(name)
- t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune)
- finally:
- logging.disable(previous_level)
- if finetune:
- self.t5 = t5
- else:
- # this makes sure that the t5 models is not part
- # of the saved checkpoint
- self.__dict__['t5'] = t5.to(device)
-
- self.normalize_text = normalize_text
- if normalize_text:
- self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True)
-
- def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]:
- # if current sample doesn't have a certain attribute, replace with empty string
- entries: tp.List[str] = [xi if xi is not None else "" for xi in x]
- if self.normalize_text:
- _, _, entries = self.text_normalizer(entries, return_text=True)
- if self.word_dropout > 0. and self.training:
- new_entries = []
- for entry in entries:
- words = [word for word in entry.split(" ") if random.random() >= self.word_dropout]
- new_entries.append(" ".join(words))
- entries = new_entries
-
- empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""])
-
- inputs = self.t5_tokenizer(entries, return_tensors='pt', padding=True).to(self.device)
- mask = inputs['attention_mask']
- mask[empty_idx, :] = 0 # zero-out index where the input is non-existant
- return inputs
-
- def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType:
- mask = inputs['attention_mask']
- with torch.set_grad_enabled(self.finetune), self.autocast:
- embeds = self.t5(**inputs).last_hidden_state
- embeds = self.output_proj(embeds.to(self.output_proj.weight))
- embeds = (embeds * mask.unsqueeze(-1))
- return embeds, mask
-
-
-class WaveformConditioner(BaseConditioner):
- """Base class for all conditioners that take a waveform as input.
- Classes that inherit must implement `_get_wav_embedding` that outputs
- a continuous tensor, and `_downsampling_factor` that returns the down-sampling
- factor of the embedding model.
-
- Args:
- dim (int): The internal representation dimension.
- output_dim (int): Output dimension.
- device (tp.Union[torch.device, str]): Device.
- """
- def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]):
- super().__init__(dim, output_dim)
- self.device = device
- # if False no masking is done, used in ChromaStemConditioner when completing by periodicity a sample.
- self._use_masking = True
-
- def tokenize(self, x: WavCondition) -> WavCondition:
- wav, length, sample_rate, path, seek_time = x
- assert length is not None
- return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time)
-
- def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor:
- """Gets as input a WavCondition and returns a dense embedding."""
- raise NotImplementedError()
-
- def _downsampling_factor(self):
- """Returns the downsampling factor of the embedding model."""
- raise NotImplementedError()
-
- def forward(self, x: WavCondition) -> ConditionType:
- """Extract condition embedding and mask from a waveform and its metadata.
- Args:
- x (WavCondition): Waveform condition containing raw waveform and metadata.
- Returns:
- ConditionType: a dense vector representing the conditioning along with its mask
- """
- wav, lengths, *_ = x
- with torch.no_grad():
- embeds = self._get_wav_embedding(x)
- embeds = embeds.to(self.output_proj.weight)
- embeds = self.output_proj(embeds)
-
- if lengths is not None and self._use_masking:
- lengths = lengths / self._downsampling_factor()
- mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore
- else:
- mask = torch.ones_like(embeds[..., 0])
- embeds = (embeds * mask.unsqueeze(-1))
- return embeds, mask
-
-
-class ChromaStemConditioner(WaveformConditioner):
- """Chroma conditioner based on stems.
- The ChromaStemConditioner uses DEMUCS to first filter out drums and bass, as
- the drums and bass often dominate the chroma leading to the chroma features
- not containing information about the melody.
-
- Args:
- output_dim (int): Output dimension for the conditioner.
- sample_rate (int): Sample rate for the chroma extractor.
- n_chroma (int): Number of chroma bins for the chroma extractor.
- radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12).
- duration (int): duration used during training. This is later used for correct padding
- in case we are using chroma as prefix.
- match_len_on_eval (bool, optional): if True then all chromas are padded to the training
- duration. Defaults to False.
- eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as
- conditions during eval (for cases where we don't want to leak test conditions like MusicCaps).
- Defaults to None.
- n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0.
- device (tp.Union[torch.device, str], optional): Device for the conditioner.
- **kwargs: Additional parameters for the chroma extractor.
- """
- def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int,
- duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None,
- n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None,
- device: tp.Union[torch.device, str] = 'cpu', **kwargs):
- from demucs import pretrained
- super().__init__(dim=n_chroma, output_dim=output_dim, device=device)
- self.autocast = TorchAutocast(enabled=device != 'cpu', device_type=self.device, dtype=torch.float32)
- self.sample_rate = sample_rate
- self.match_len_on_eval = match_len_on_eval
- if match_len_on_eval:
- self._use_masking = False
- self.duration = duration
- self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device)
- stem_sources: list = self.demucs.sources # type: ignore
- self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device)
- self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma,
- radix2_exp=radix2_exp, **kwargs).to(device)
- self.chroma_len = self._get_chroma_len()
- self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs)
- self.cache = None
- if cache_path is not None:
- self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device,
- compute_embed_fn=self._get_full_chroma_for_cache,
- extract_embed_fn=self._extract_chroma_chunk)
-
- def _downsampling_factor(self) -> int:
- return self.chroma.winhop
-
- def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]:
- """Load pre-defined waveforms from a json.
- These waveforms will be used for chroma extraction during evaluation.
- This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps).
- """
- if path is None:
- return None
-
- logger.info(f"Loading evaluation wavs from {path}")
- from audiocraft.data.audio_dataset import AudioDataset
- dataset: AudioDataset = AudioDataset.from_meta(
- path, segment_duration=self.duration, min_audio_duration=self.duration,
- sample_rate=self.sample_rate, channels=1)
-
- if len(dataset) > 0:
- eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device)
- logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner")
- return eval_wavs
- else:
- raise ValueError("Could not find evaluation wavs, check lengths of wavs")
-
- def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None:
- self.eval_wavs = eval_wavs
-
- def has_eval_wavs(self) -> bool:
- return self.eval_wavs is not None
-
- def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor:
- """Sample wavs from a predefined list."""
- assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided."
- total_eval_wavs = len(self.eval_wavs)
- out = self.eval_wavs
- if num_samples > total_eval_wavs:
- out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1)
- return out[torch.randperm(len(out))][:num_samples]
-
- def _get_chroma_len(self) -> int:
- """Get length of chroma during training."""
- dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device)
- dummy_chr = self.chroma(dummy_wav)
- return dummy_chr.shape[1]
-
- @torch.no_grad()
- def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor:
- """Get parts of the wav that holds the melody, extracting the main stems from the wav."""
- from demucs.apply import apply_model
- from demucs.audio import convert_audio
- with self.autocast:
- wav = convert_audio(
- wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore
- stems = apply_model(self.demucs, wav, device=self.device)
- stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning
- mix_wav = stems.sum(1) # merge extracted stems to single waveform
- mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore
- return mix_wav
-
- @torch.no_grad()
- def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor:
- """Extract chroma features from the waveform."""
- with self.autocast:
- return self.chroma(wav)
-
- @torch.no_grad()
- def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor:
- """Compute wav embedding, applying stem and chroma extraction."""
- # avoid 0-size tensors when we are working with null conds
- if wav.shape[-1] == 1:
- return self._extract_chroma(wav)
- stems = self._get_stemmed_wav(wav, sample_rate)
- chroma = self._extract_chroma(stems)
- return chroma
-
- @torch.no_grad()
- def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor:
- """Extract chroma from the whole audio waveform at the given path."""
- wav, sr = audio_read(path)
- wav = wav[None].to(self.device)
- wav = convert_audio(wav, sr, self.sample_rate, to_channels=1)
- chroma = self._compute_wav_embedding(wav, self.sample_rate)[0]
- return chroma
-
- def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor:
- """Extract a chunk of chroma from the full chroma derived from the full waveform."""
- wav_length = x.wav.shape[-1]
- seek_time = x.seek_time[idx]
- assert seek_time is not None, (
- "WavCondition seek_time is required "
- "when extracting chroma chunks from pre-computed chroma.")
- full_chroma = full_chroma.float()
- frame_rate = self.sample_rate / self._downsampling_factor()
- target_length = int(frame_rate * wav_length / self.sample_rate)
- index = int(frame_rate * seek_time)
- out = full_chroma[index: index + target_length]
- out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0]
- return out.to(self.device)
-
- @torch.no_grad()
- def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor:
- """Get the wav embedding from the WavCondition.
- The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly
- or will rely on the embedding cache to load the pre-computed embedding if relevant.
- """
- sampled_wav: tp.Optional[torch.Tensor] = None
- if not self.training and self.eval_wavs is not None:
- warn_once(logger, "Using precomputed evaluation wavs!")
- sampled_wav = self._sample_eval_wavs(len(x.wav))
-
- no_undefined_paths = all(p is not None for p in x.path)
- no_nullified_cond = x.wav.shape[-1] > 1
- if sampled_wav is not None:
- chroma = self._compute_wav_embedding(sampled_wav, self.sample_rate)
- elif self.cache is not None and no_undefined_paths and no_nullified_cond:
- paths = [Path(p) for p in x.path if p is not None]
- chroma = self.cache.get_embed_from_cache(paths, x)
- else:
- assert all(sr == x.sample_rate[0] for sr in x.sample_rate), "All sample rates in batch should be equal."
- chroma = self._compute_wav_embedding(x.wav, x.sample_rate[0])
-
- if self.match_len_on_eval:
- B, T, C = chroma.shape
- if T > self.chroma_len:
- chroma = chroma[:, :self.chroma_len]
- logger.debug(f"Chroma was truncated to match length! ({T} -> {chroma.shape[1]})")
- elif T < self.chroma_len:
- n_repeat = int(math.ceil(self.chroma_len / T))
- chroma = chroma.repeat(1, n_repeat, 1)
- chroma = chroma[:, :self.chroma_len]
- logger.debug(f"Chroma was repeated to match length! ({T} -> {chroma.shape[1]})")
-
- return chroma
-
- def tokenize(self, x: WavCondition) -> WavCondition:
- """Apply WavConditioner tokenization and populate cache if needed."""
- x = super().tokenize(x)
- no_undefined_paths = all(p is not None for p in x.path)
- if self.cache is not None and no_undefined_paths:
- paths = [Path(p) for p in x.path if p is not None]
- self.cache.populate_embed_cache(paths, x)
- return x
-
-
-class JointEmbeddingConditioner(BaseConditioner):
- """Joint embedding conditioning supporting both audio or text conditioning.
-
- Args:
- dim (int): Dimension.
- output_dim (int): Output dimension.
- device (str): Device.
- attribute (str): Attribute used by the conditioner.
- autocast_dtype (str): Autocast for the conditioner.
- quantize (bool): Whether to quantize the CLAP embedding.
- n_q (int): Number of residual quantizers (used if quantize is true).
- bins (int): Quantizers' codebooks size (used if quantize is true).
- kwargs: Additional parameters for residual vector quantizer.
- """
- def __init__(self, dim: int, output_dim: int, device: str, attribute: str,
- autocast_dtype: tp.Optional[str] = 'float32', quantize: bool = True,
- n_q: int = 12, bins: int = 1024, **kwargs):
- super().__init__(dim=dim, output_dim=output_dim)
- self.device = device
- self.attribute = attribute
- if autocast_dtype is None or device == 'cpu':
- self.autocast = TorchAutocast(enabled=False)
- logger.warning("JointEmbeddingConditioner has no autocast, this might lead to NaN.")
- else:
- dtype = getattr(torch, autocast_dtype)
- assert isinstance(dtype, torch.dtype)
- logger.info(f"JointEmbeddingConditioner will be evaluated with autocast as {autocast_dtype}.")
- self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype)
- # residual vector quantizer to discretize the conditioned embedding
- self.quantizer: tp.Optional[ResidualVectorQuantizer] = None
- if quantize:
- self.quantizer = ResidualVectorQuantizer(dim, n_q=n_q, bins=bins, **kwargs)
-
- def _get_embed(self, x: JointEmbedCondition) -> tp.Tuple[torch.Tensor, torch.Tensor]:
- """Get joint embedding in latent space from the inputs.
-
- Returns:
- tuple[torch.Tensor, torch.Tensor]: Tensor for the latent embedding
- and corresponding empty indexes.
- """
- raise NotImplementedError()
-
- def forward(self, x: JointEmbedCondition) -> ConditionType:
- with self.autocast:
- embed, empty_idx = self._get_embed(x)
- if self.quantizer is not None:
- embed = embed.view(-1, self.dim, 1)
- q_res = self.quantizer(embed, frame_rate=1)
- out_embed = q_res.x.view(-1, self.dim)
- else:
- out_embed = embed
- out_embed = self.output_proj(out_embed).view(-1, 1, self.output_dim)
- mask = torch.ones(*out_embed.shape[:2], device=out_embed.device)
- mask[empty_idx, :] = 0 # zero-out index where the input is non-existant
- out_embed = (out_embed * mask.unsqueeze(-1))
- return out_embed, mask
-
- def tokenize(self, x: JointEmbedCondition) -> JointEmbedCondition:
- return x
-
-
-class CLAPEmbeddingConditioner(JointEmbeddingConditioner):
- """Joint Embedding conditioner based on pre-trained CLAP model.
-
- This CLAP-based conditioner supports a caching mechanism
- over the computed embeddings for faster training.
-
- Args:
- dim (int): Dimension.
- output_dim (int): Output dimension.
- device (str): Device.
- attribute (str): Attribute used by the conditioner.
- quantize (bool): Whether to quantize the CLAP embedding.
- n_q (int): Number of residual quantizers (used if quantize is true).
- bins (int): Quantizers' codebooks size (used if quantize is true).
- checkpoint (str): Path to CLAP checkpoint.
- model_arch (str): CLAP model architecture.
- enable_fusion (bool): Enable fusion for CLAP model.
- sample_rate (int): Sample rate used by CLAP model.
- max_audio_length (float): Maximum audio length for CLAP model.
- audio_stride (float): Stride to use for getting a CLAP embedding on the full sequence.
- normalize (bool): Whether to normalize the CLAP embedding.
- text_p (float): Probability of using text representation instead of audio at train time.
- batch_size (Optional[int]): Batch size for CLAP embedding computation.
- autocast_dtype (str): Autocast for the conditioner.
- cache_path (Optional[str]): Path for pre-computed embeddings caching.
- kwargs: Additional parameters for residual vector quantizer.
- """
- def __init__(self, dim: int, output_dim: int, device: str, attribute: str,
- quantize: bool, n_q: int, bins: int, checkpoint: tp.Union[str, Path], model_arch: str,
- enable_fusion: bool, sample_rate: int, max_audio_length: int, audio_stride: int,
- normalize: bool, text_p: bool, batch_size: tp.Optional[int] = None,
- autocast_dtype: tp.Optional[str] = 'float32', cache_path: tp.Optional[str] = None, **kwargs):
- try:
- import laion_clap # type: ignore
- except ImportError:
- raise ImportError("Please install CLAP to use the CLAPEmbeddingConditioner: 'pip install laion_clap'")
- warnings.warn("Sample rate for CLAP conditioner was fixed in version v1.1.0, (from 44.1 to 48 kHz). "
- "Please retrain all models.")
- checkpoint = AudioCraftEnvironment.resolve_reference_path(checkpoint)
- clap_tokenize = RobertaTokenizer.from_pretrained('roberta-base')
- clap_model = laion_clap.CLAP_Module(enable_fusion=enable_fusion, amodel=model_arch)
- load_clap_state_dict(clap_model, checkpoint)
- clap_model.eval()
- clap_model.to(device)
- super().__init__(dim=dim, output_dim=output_dim, device=device, attribute=attribute,
- autocast_dtype=autocast_dtype, quantize=quantize, n_q=n_q, bins=bins,
- **kwargs)
- self.checkpoint = checkpoint
- self.enable_fusion = enable_fusion
- self.model_arch = model_arch
- self.clap: laion_clap.CLAP_Module
- self.clap_tokenize: RobertaTokenizer
- self.clap_sample_rate = sample_rate
- self.clap_max_frames = int(self.clap_sample_rate * max_audio_length)
- self.clap_stride = int(self.clap_sample_rate * audio_stride)
- self.batch_size = batch_size or 1
- self.normalize = normalize
- self.text_p = text_p
- self.__dict__['clap_tokenize'] = clap_tokenize
- self.__dict__['clap'] = clap_model
- self.wav_cache, self.text_cache = None, None
- if cache_path is not None:
- self.wav_cache = EmbeddingCache(Path(cache_path) / 'wav', self.device,
- compute_embed_fn=self._get_wav_embedding_for_cache,
- extract_embed_fn=self._extract_wav_embedding_chunk)
- self.text_cache = EmbeddingCache(Path(cache_path) / 'text', self.device,
- compute_embed_fn=self._get_text_embedding_for_cache)
-
- def _tokenizer(self, texts: tp.Union[str, tp.List[str]]) -> dict:
- # we use the default params from CLAP module here as well
- return self.clap_tokenize(texts, padding="max_length", truncation=True, max_length=77, return_tensors="pt")
-
- def _compute_text_embedding(self, text: tp.List[str]) -> torch.Tensor:
- """Compute text embedding from CLAP model on a given a batch of text.
-
- Args:
- text (list[str]): List of text for the batch, with B items.
- Returns:
- torch.Tensor: CLAP embedding derived from text, of shape [B, 1, D], with D the CLAP embedding dimension.
- """
- with torch.no_grad():
- embed = self.clap.get_text_embedding(text, tokenizer=self._tokenizer, use_tensor=True)
- return embed.view(embed.size(0), 1, embed.size(-1))
-
- def _get_text_embedding_for_cache(self, path: tp.Union[Path, str],
- x: JointEmbedCondition, idx: int) -> torch.Tensor:
- """Get text embedding function for the cache."""
- text = x.text[idx]
- text = text if text is not None else ""
- return self._compute_text_embedding([text])[0]
-
- def _preprocess_wav(self, wav: torch.Tensor, length: torch.Tensor, sample_rates: tp.List[int]) -> torch.Tensor:
- """Preprocess wav to expected format by CLAP model.
-
- Args:
- wav (torch.Tensor): Audio wav, of shape [B, C, T].
- length (torch.Tensor): Actual length of the audio for each item in the batch, of shape [B].
- sample_rates (list[int]): Sample rates for each sample in the batch
- Returns:
- torch.Tensor: Audio wav of shape [B, T].
- """
- assert wav.dim() == 3, "Expecting wav to be [B, C, T]"
- if sample_rates is not None:
- _wav = []
- for i, audio in enumerate(wav):
- sr = sample_rates[i]
- audio = convert_audio(audio, from_rate=sr, to_rate=self.clap_sample_rate, to_channels=1)
- _wav.append(audio)
- wav = torch.stack(_wav, dim=0)
- wav = wav.mean(dim=1)
- return wav
-
- def _compute_wav_embedding(self, wav: torch.Tensor, length: torch.Tensor,
- sample_rates: tp.List[int], reduce_mean: bool = False) -> torch.Tensor:
- """Compute audio wave embedding from CLAP model.
-
- Since CLAP operates on a fixed sequence length audio inputs and we need to process longer audio sequences,
- we calculate the wav embeddings on `clap_max_frames` windows with `clap_stride`-second stride and
- average the resulting embeddings.
-
- Args:
- wav (torch.Tensor): Audio wav, of shape [B, C, T].
- length (torch.Tensor): Actual length of the audio for each item in the batch, of shape [B].
- sample_rates (list[int]): Sample rates for each sample in the batch.
- reduce_mean (bool): Whether to get the average tensor.
- Returns:
- torch.Tensor: Audio embedding of shape [B, F, D], F being the number of chunks, D the dimension.
- """
- with torch.no_grad():
- wav = self._preprocess_wav(wav, length, sample_rates)
- B, T = wav.shape
- if T >= self.clap_max_frames:
- wav = wav.unfold(-1, self.clap_max_frames, self.clap_stride) # [B, F, T]
- else:
- wav = wav.view(-1, 1, T) # [B, F, T] with F=1
- wav = einops.rearrange(wav, 'b f t -> (b f) t')
- embed_list = []
- for i in range(0, wav.size(0), self.batch_size):
- _wav = wav[i:i+self.batch_size, ...]
- _embed = self.clap.get_audio_embedding_from_data(_wav, use_tensor=True)
- embed_list.append(_embed)
- embed = torch.cat(embed_list, dim=0)
- embed = einops.rearrange(embed, '(b f) d -> b f d', b=B)
- if reduce_mean:
- embed = embed.mean(dim=1, keepdim=True)
- return embed # [B, F, D] with F=1 if reduce_mean is True
-
- def _get_wav_embedding_for_cache(self, path: tp.Union[str, Path],
- x: JointEmbedCondition, idx: int) -> torch.Tensor:
- """Compute audio wave embedding for the cache.
- The embedding is computed on a given audio read from file.
-
- Args:
- path (str or Path): Path to the full audio file.
- Returns:
- torch.Tensor: Single-item tensor of shape [F, D], F being the number of chunks, D the dimension.
- """
- wav, sr = audio_read(path) # [C, T]
- wav = wav.unsqueeze(0).to(self.device) # [1, C, T]
- wav_len = torch.LongTensor([wav.shape[-1]]).to(self.device)
- embed = self._compute_wav_embedding(wav, wav_len, [sr], reduce_mean=False) # [B, F, D]
- return embed.squeeze(0) # [F, D]
-
- def _extract_wav_embedding_chunk(self, full_embed: torch.Tensor, x: JointEmbedCondition, idx: int) -> torch.Tensor:
- """Extract the chunk of embedding matching the seek_time and length from the full CLAP audio embedding.
-
- Args:
- full_embed (torch.Tensor): CLAP embedding computed on the full wave, of shape [F, D].
- x (JointEmbedCondition): Joint embedding condition for the full batch.
- idx (int): Index considered for the given embedding to extract.
- Returns:
- torch.Tensor: Wav embedding averaged on sliding window, of shape [1, D].
- """
- sample_rate = x.sample_rate[idx]
- seek_time = x.seek_time[idx]
- seek_time = 0. if seek_time is None else seek_time
- clap_stride = int(self.clap_stride / self.clap_sample_rate) * sample_rate
- end_seek_time = seek_time + self.clap_max_frames / self.clap_sample_rate
- start_offset = int(seek_time * sample_rate // clap_stride)
- end_offset = int(end_seek_time * sample_rate // clap_stride)
- wav_embed = full_embed[start_offset:end_offset, ...]
- wav_embed = wav_embed.mean(dim=0, keepdim=True)
- return wav_embed.to(self.device) # [F, D]
-
- def _get_text_embedding(self, x: JointEmbedCondition) -> torch.Tensor:
- """Get CLAP embedding from a batch of text descriptions."""
- no_nullified_cond = x.wav.shape[-1] > 1 # we don't want to read from cache when condition dropout
- if self.text_cache is not None and no_nullified_cond:
- assert all(p is not None for p in x.path), "Cache requires all JointEmbedCondition paths to be provided"
- paths = [Path(p) for p in x.path if p is not None]
- embed = self.text_cache.get_embed_from_cache(paths, x)
- else:
- text = [xi if xi is not None else "" for xi in x.text]
- embed = self._compute_text_embedding(text)
- if self.normalize:
- embed = torch.nn.functional.normalize(embed, p=2.0, dim=-1)
- return embed
-
- def _get_wav_embedding(self, x: JointEmbedCondition) -> torch.Tensor:
- """Get CLAP embedding from a batch of audio tensors (and corresponding sample rates)."""
- no_undefined_paths = all(p is not None for p in x.path)
- no_nullified_cond = x.wav.shape[-1] > 1 # we don't want to read from cache when condition dropout
- if self.wav_cache is not None and no_undefined_paths and no_nullified_cond:
- paths = [Path(p) for p in x.path if p is not None]
- embed = self.wav_cache.get_embed_from_cache(paths, x)
- else:
- embed = self._compute_wav_embedding(x.wav, x.length, x.sample_rate, reduce_mean=True)
- if self.normalize:
- embed = torch.nn.functional.normalize(embed, p=2.0, dim=-1)
- return embed
-
- def tokenize(self, x: JointEmbedCondition) -> JointEmbedCondition:
- # Trying to limit as much as possible sync points when the cache is warm.
- no_undefined_paths = all(p is not None for p in x.path)
- if self.wav_cache is not None and no_undefined_paths:
- assert all([p is not None for p in x.path]), "Cache requires all JointEmbedCondition paths to be provided"
- paths = [Path(p) for p in x.path if p is not None]
- self.wav_cache.populate_embed_cache(paths, x)
- if self.text_cache is not None and no_undefined_paths:
- assert all([p is not None for p in x.path]), "Cache requires all JointEmbedCondition paths to be provided"
- paths = [Path(p) for p in x.path if p is not None]
- self.text_cache.populate_embed_cache(paths, x)
- return x
-
- def _get_embed(self, x: JointEmbedCondition) -> tp.Tuple[torch.Tensor, torch.Tensor]:
- """Extract shared latent representation from either the wav or the text using CLAP."""
- # decide whether to use text embedding at train time or not
- use_text_embed = random.random() < self.text_p
- if self.training and not use_text_embed:
- embed = self._get_wav_embedding(x)
- empty_idx = torch.LongTensor([]) # we assume we always have the audio wav
- else:
- embed = self._get_text_embedding(x)
- empty_idx = torch.LongTensor([i for i, xi in enumerate(x.text) if xi is None or xi == ""])
- return embed, empty_idx
-
-
-def dropout_condition(sample: ConditioningAttributes, condition_type: str, condition: str) -> ConditioningAttributes:
- """Utility function for nullifying an attribute inside an ConditioningAttributes object.
- If the condition is of type "wav", then nullify it using `nullify_condition` function.
- If the condition is of any other type, set its value to None.
- Works in-place.
- """
- if condition_type not in ['text', 'wav', 'joint_embed']:
- raise ValueError(
- "dropout_condition got an unexpected condition type!"
- f" expected 'text', 'wav' or 'joint_embed' but got '{condition_type}'"
- )
-
- if condition not in getattr(sample, condition_type):
- raise ValueError(
- "dropout_condition received an unexpected condition!"
- f" expected wav={sample.wav.keys()} and text={sample.text.keys()}"
- f" but got '{condition}' of type '{condition_type}'!"
- )
-
- if condition_type == 'wav':
- wav_cond = sample.wav[condition]
- sample.wav[condition] = nullify_wav(wav_cond)
- elif condition_type == 'joint_embed':
- embed = sample.joint_embed[condition]
- sample.joint_embed[condition] = nullify_joint_embed(embed)
- else:
- sample.text[condition] = None
-
- return sample
-
-
-class DropoutModule(nn.Module):
- """Base module for all dropout modules."""
- def __init__(self, seed: int = 1234):
- super().__init__()
- self.rng = torch.Generator()
- self.rng.manual_seed(seed)
-
-
-class AttributeDropout(DropoutModule):
- """Dropout with a given probability per attribute.
- This is different from the behavior of ClassifierFreeGuidanceDropout as this allows for attributes
- to be dropped out separately. For example, "artist" can be dropped while "genre" remains.
- This is in contrast to ClassifierFreeGuidanceDropout where if "artist" is dropped "genre"
- must also be dropped.
-
- Args:
- p (tp.Dict[str, float]): A dict mapping between attributes and dropout probability. For example:
- ...
- "genre": 0.1,
- "artist": 0.5,
- "wav": 0.25,
- ...
- active_on_eval (bool, optional): Whether the dropout is active at eval. Default to False.
- seed (int, optional): Random seed.
- """
- def __init__(self, p: tp.Dict[str, tp.Dict[str, float]], active_on_eval: bool = False, seed: int = 1234):
- super().__init__(seed=seed)
- self.active_on_eval = active_on_eval
- # construct dict that return the values from p otherwise 0
- self.p = {}
- for condition_type, probs in p.items():
- self.p[condition_type] = defaultdict(lambda: 0, probs)
-
- def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]:
- """
- Args:
- samples (list[ConditioningAttributes]): List of conditions.
- Returns:
- list[ConditioningAttributes]: List of conditions after certain attributes were set to None.
- """
- if not self.training and not self.active_on_eval:
- return samples
-
- samples = deepcopy(samples)
- for condition_type, ps in self.p.items(): # for condition types [text, wav]
- for condition, p in ps.items(): # for attributes of each type (e.g., [artist, genre])
- if torch.rand(1, generator=self.rng).item() < p:
- for sample in samples:
- dropout_condition(sample, condition_type, condition)
- return samples
-
- def __repr__(self):
- return f"AttributeDropout({dict(self.p)})"
-
-
-class ClassifierFreeGuidanceDropout(DropoutModule):
- """Classifier Free Guidance dropout.
- All attributes are dropped with the same probability.
-
- Args:
- p (float): Probability to apply condition dropout during training.
- seed (int): Random seed.
- """
- def __init__(self, p: float, seed: int = 1234):
- super().__init__(seed=seed)
- self.p = p
-
- def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]:
- """
- Args:
- samples (list[ConditioningAttributes]): List of conditions.
- Returns:
- list[ConditioningAttributes]: List of conditions after all attributes were set to None.
- """
- if not self.training:
- return samples
-
- # decide on which attributes to drop in a batched fashion
- drop = torch.rand(1, generator=self.rng).item() < self.p
- if not drop:
- return samples
-
- # nullify conditions of all attributes
- samples = deepcopy(samples)
- for condition_type in ["wav", "text"]:
- for sample in samples:
- for condition in sample.attributes[condition_type]:
- dropout_condition(sample, condition_type, condition)
- return samples
-
- def __repr__(self):
- return f"ClassifierFreeGuidanceDropout(p={self.p})"
-
-
-class ConditioningProvider(nn.Module):
- """Prepare and provide conditions given all the supported conditioners.
-
- Args:
- conditioners (dict): Dictionary of conditioners.
- device (torch.device or str, optional): Device for conditioners and output condition types.
- """
- def __init__(self, conditioners: tp.Dict[str, BaseConditioner], device: tp.Union[torch.device, str] = "cpu"):
- super().__init__()
- self.device = device
- self.conditioners = nn.ModuleDict(conditioners)
-
- @property
- def joint_embed_conditions(self):
- return [m.attribute for m in self.conditioners.values() if isinstance(m, JointEmbeddingConditioner)]
-
- @property
- def has_joint_embed_conditions(self):
- return len(self.joint_embed_conditions) > 0
-
- @property
- def text_conditions(self):
- return [k for k, v in self.conditioners.items() if isinstance(v, TextConditioner)]
-
- @property
- def wav_conditions(self):
- return [k for k, v in self.conditioners.items() if isinstance(v, WaveformConditioner)]
-
- @property
- def has_wav_condition(self):
- return len(self.wav_conditions) > 0
-
- def tokenize(self, inputs: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.Any]:
- """Match attributes/wavs with existing conditioners in self, and compute tokenize them accordingly.
- This should be called before starting any real GPU work to avoid synchronization points.
- This will return a dict matching conditioner names to their arbitrary tokenized representations.
-
- Args:
- inputs (list[ConditioningAttributes]): List of ConditioningAttributes objects containing
- text and wav conditions.
- """
- assert all([isinstance(x, ConditioningAttributes) for x in inputs]), (
- "Got unexpected types input for conditioner! should be tp.List[ConditioningAttributes]",
- f" but types were {set([type(x) for x in inputs])}"
- )
-
- output = {}
- text = self._collate_text(inputs)
- wavs = self._collate_wavs(inputs)
- joint_embeds = self._collate_joint_embeds(inputs)
-
- assert set(text.keys() | wavs.keys() | joint_embeds.keys()).issubset(set(self.conditioners.keys())), (
- f"Got an unexpected attribute! Expected {self.conditioners.keys()}, ",
- f"got {text.keys(), wavs.keys(), joint_embeds.keys()}"
- )
-
- for attribute, batch in chain(text.items(), wavs.items(), joint_embeds.items()):
- output[attribute] = self.conditioners[attribute].tokenize(batch)
- return output
-
- def forward(self, tokenized: tp.Dict[str, tp.Any]) -> tp.Dict[str, ConditionType]:
- """Compute pairs of `(embedding, mask)` using the configured conditioners and the tokenized representations.
- The output is for example:
- {
- "genre": (torch.Tensor([B, 1, D_genre]), torch.Tensor([B, 1])),
- "description": (torch.Tensor([B, T_desc, D_desc]), torch.Tensor([B, T_desc])),
- ...
- }
-
- Args:
- tokenized (dict): Dict of tokenized representations as returned by `tokenize()`.
- """
- output = {}
- for attribute, inputs in tokenized.items():
- condition, mask = self.conditioners[attribute](inputs)
- output[attribute] = (condition, mask)
- return output
-
- def _collate_text(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.List[tp.Optional[str]]]:
- """Given a list of ConditioningAttributes objects, compile a dictionary where the keys
- are the attributes and the values are the aggregated input per attribute.
- For example:
- Input:
- [
- ConditioningAttributes(text={"genre": "Rock", "description": "A rock song with a guitar solo"}, wav=...),
- ConditioningAttributes(text={"genre": "Hip-hop", "description": "A hip-hop verse"}, wav=...),
- ]
- Output:
- {
- "genre": ["Rock", "Hip-hop"],
- "description": ["A rock song with a guitar solo", "A hip-hop verse"]
- }
-
- Args:
- samples (list of ConditioningAttributes): List of ConditioningAttributes samples.
- Returns:
- dict[str, list[str, optional]]: A dictionary mapping an attribute name to text batch.
- """
- out: tp.Dict[str, tp.List[tp.Optional[str]]] = defaultdict(list)
- texts = [x.text for x in samples]
- for text in texts:
- for condition in self.text_conditions:
- out[condition].append(text[condition])
- return out
-
- def _collate_wavs(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, WavCondition]:
- """Generate a dict where the keys are attributes by which we fetch similar wavs,
- and the values are Tensors of wavs according to said attributes.
-
- *Note*: by the time the samples reach this function, each sample should have some waveform
- inside the "wav" attribute. It should be either:
- 1. A real waveform
- 2. A null waveform due to the sample having no similar waveforms (nullified by the dataset)
- 3. A null waveform due to it being dropped in a dropout module (nullified by dropout)
-
- Args:
- samples (list of ConditioningAttributes): List of ConditioningAttributes samples.
- Returns:
- dict[str, WavCondition]: A dictionary mapping an attribute name to wavs.
- """
- wavs = defaultdict(list)
- lengths = defaultdict(list)
- sample_rates = defaultdict(list)
- paths = defaultdict(list)
- seek_times = defaultdict(list)
- out: tp.Dict[str, WavCondition] = {}
-
- for sample in samples:
- for attribute in self.wav_conditions:
- wav, length, sample_rate, path, seek_time = sample.wav[attribute]
- assert wav.dim() == 3, f"Got wav with dim={wav.dim()}, but expected 3 [1, C, T]"
- assert wav.size(0) == 1, f"Got wav [B, C, T] with shape={wav.shape}, but expected B == 1"
- # mono-channel conditioning
- wav = wav.mean(1, keepdim=True) # [1, 1, T]
- wavs[attribute].append(wav.flatten()) # [T]
- lengths[attribute].append(length)
- sample_rates[attribute].extend(sample_rate)
- paths[attribute].extend(path)
- seek_times[attribute].extend(seek_time)
-
- # stack all wavs to a single tensor
- for attribute in self.wav_conditions:
- stacked_wav, _ = collate(wavs[attribute], dim=0)
- out[attribute] = WavCondition(
- stacked_wav.unsqueeze(1), torch.cat(lengths[attribute]), sample_rates[attribute],
- paths[attribute], seek_times[attribute])
-
- return out
-
- def _collate_joint_embeds(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, JointEmbedCondition]:
- """Generate a dict where the keys are attributes by which we compute joint embeddings,
- and the values are Tensors of pre-computed embeddings and the corresponding text attributes.
-
- Args:
- samples (list[ConditioningAttributes]): List of ConditioningAttributes samples.
- Returns:
- A dictionary mapping an attribute name to joint embeddings.
- """
- texts = defaultdict(list)
- wavs = defaultdict(list)
- lengths = defaultdict(list)
- sample_rates = defaultdict(list)
- paths = defaultdict(list)
- seek_times = defaultdict(list)
- channels: int = 0
-
- out = {}
- for sample in samples:
- for attribute in self.joint_embed_conditions:
- wav, text, length, sample_rate, path, seek_time = sample.joint_embed[attribute]
- assert wav.dim() == 3
- if channels == 0:
- channels = wav.size(1)
- else:
- assert channels == wav.size(1), "not all audio has same number of channels in batch"
- assert wav.size(0) == 1, "Expecting single-wav batch in the collate method"
- wav = einops.rearrange(wav, "b c t -> (b c t)") # [1, C, T] => [C * T]
- wavs[attribute].append(wav)
- texts[attribute].extend(text)
- lengths[attribute].append(length)
- sample_rates[attribute].extend(sample_rate)
- paths[attribute].extend(path)
- seek_times[attribute].extend(seek_time)
-
- for attribute in self.joint_embed_conditions:
- stacked_texts = texts[attribute]
- stacked_paths = paths[attribute]
- stacked_seek_times = seek_times[attribute]
- stacked_wavs = pad_sequence(wavs[attribute]).to(self.device)
- stacked_wavs = einops.rearrange(stacked_wavs, "(c t) b -> b c t", c=channels)
- stacked_sample_rates = sample_rates[attribute]
- stacked_lengths = torch.cat(lengths[attribute]).to(self.device)
- assert stacked_lengths.size(0) == stacked_wavs.size(0)
- assert len(stacked_sample_rates) == stacked_wavs.size(0)
- assert len(stacked_texts) == stacked_wavs.size(0)
- out[attribute] = JointEmbedCondition(
- text=stacked_texts, wav=stacked_wavs,
- length=stacked_lengths, sample_rate=stacked_sample_rates,
- path=stacked_paths, seek_time=stacked_seek_times)
-
- return out
-
-
-class ConditionFuser(StreamingModule):
- """Condition fuser handles the logic to combine the different conditions
- to the actual model input.
-
- Args:
- fuse2cond (tp.Dict[str, str]): A dictionary that says how to fuse
- each condition. For example:
- {
- "prepend": ["description"],
- "sum": ["genre", "bpm"],
- "cross": ["description"],
- }
- cross_attention_pos_emb (bool, optional): Use positional embeddings in cross attention.
- cross_attention_pos_emb_scale (int): Scale for positional embeddings in cross attention if used.
- """
- FUSING_METHODS = ["sum", "prepend", "cross", "input_interpolate"]
-
- def __init__(self, fuse2cond: tp.Dict[str, tp.List[str]], cross_attention_pos_emb: bool = False,
- cross_attention_pos_emb_scale: float = 1.0):
- super().__init__()
- assert all(
- [k in self.FUSING_METHODS for k in fuse2cond.keys()]
- ), f"Got invalid fuse method, allowed methods: {self.FUSING_METHODS}"
- self.cross_attention_pos_emb = cross_attention_pos_emb
- self.cross_attention_pos_emb_scale = cross_attention_pos_emb_scale
- self.fuse2cond: tp.Dict[str, tp.List[str]] = fuse2cond
- self.cond2fuse: tp.Dict[str, str] = {}
- for fuse_method, conditions in fuse2cond.items():
- for condition in conditions:
- self.cond2fuse[condition] = fuse_method
-
- def forward(
- self,
- input: torch.Tensor,
- conditions: tp.Dict[str, ConditionType]
- ) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]:
- """Fuse the conditions to the provided model input.
-
- Args:
- input (torch.Tensor): Transformer input.
- conditions (dict[str, ConditionType]): Dict of conditions.
- Returns:
- tuple[torch.Tensor, torch.Tensor]: The first tensor is the transformer input
- after the conditions have been fused. The second output tensor is the tensor
- used for cross-attention or None if no cross attention inputs exist.
- """
- B, T, _ = input.shape
-
- if 'offsets' in self._streaming_state:
- first_step = False
- offsets = self._streaming_state['offsets']
- else:
- first_step = True
- offsets = torch.zeros(input.shape[0], dtype=torch.long, device=input.device)
-
- assert set(conditions.keys()).issubset(set(self.cond2fuse.keys())), \
- f"given conditions contain unknown attributes for fuser, " \
- f"expected {self.cond2fuse.keys()}, got {conditions.keys()}"
- cross_attention_output = None
- for cond_type, (cond, cond_mask) in conditions.items():
- op = self.cond2fuse[cond_type]
- if op == 'sum':
- input += cond
- elif op == 'input_interpolate':
- cond = einops.rearrange(cond, "b t d -> b d t")
- cond = F.interpolate(cond, size=input.shape[1])
- input += einops.rearrange(cond, "b d t -> b t d")
- elif op == 'prepend':
- if first_step:
- input = torch.cat([cond, input], dim=1)
- elif op == 'cross':
- if cross_attention_output is not None:
- cross_attention_output = torch.cat([cross_attention_output, cond], dim=1)
- else:
- cross_attention_output = cond
- else:
- raise ValueError(f"unknown op ({op})")
-
- if self.cross_attention_pos_emb and cross_attention_output is not None:
- positions = torch.arange(
- cross_attention_output.shape[1],
- device=cross_attention_output.device
- ).view(1, -1, 1)
- pos_emb = create_sin_embedding(positions, cross_attention_output.shape[-1])
- cross_attention_output = cross_attention_output + self.cross_attention_pos_emb_scale * pos_emb
-
- if self._is_streaming:
- self._streaming_state['offsets'] = offsets + T
-
- return input, cross_attention_output
diff --git a/spaces/facebook/MusicGen/tests/losses/__init__.py b/spaces/facebook/MusicGen/tests/losses/__init__.py
deleted file mode 100644
index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000
--- a/spaces/facebook/MusicGen/tests/losses/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/spaces/facebook/ov-seg/open_vocab_seg/utils/predictor.py b/spaces/facebook/ov-seg/open_vocab_seg/utils/predictor.py
deleted file mode 100644
index 01ad62abfcb1cdb392fa105ae324e26561fa59d0..0000000000000000000000000000000000000000
--- a/spaces/facebook/ov-seg/open_vocab_seg/utils/predictor.py
+++ /dev/null
@@ -1,242 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# Copyright (c) Meta Platforms, Inc. All Rights Reserved
-
-import numpy as np
-import torch
-from torch.nn import functional as F
-import cv2
-
-from detectron2.data import MetadataCatalog
-from detectron2.structures import BitMasks
-from detectron2.engine.defaults import DefaultPredictor
-from detectron2.utils.visualizer import ColorMode, Visualizer
-from detectron2.modeling.postprocessing import sem_seg_postprocess
-
-import open_clip
-from segment_anything import SamAutomaticMaskGenerator, sam_model_registry
-from open_vocab_seg.modeling.clip_adapter.adapter import PIXEL_MEAN, PIXEL_STD
-from open_vocab_seg.modeling.clip_adapter.utils import crop_with_mask
-
-class OVSegPredictor(DefaultPredictor):
- def __init__(self, cfg):
- super().__init__(cfg)
-
- def __call__(self, original_image, class_names):
- """
- Args:
- original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
-
- Returns:
- predictions (dict):
- the output of the model for one image only.
- See :doc:`/tutorials/models` for details about the format.
- """
- with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
- # Apply pre-processing to image.
- if self.input_format == "RGB":
- # whether the model expects BGR inputs or RGB
- original_image = original_image[:, :, ::-1]
- height, width = original_image.shape[:2]
- image = self.aug.get_transform(original_image).apply_image(original_image)
- image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
-
- inputs = {"image": image, "height": height, "width": width, "class_names": class_names}
- predictions = self.model([inputs])[0]
- return predictions
-
-class OVSegVisualizer(Visualizer):
- def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE, class_names=None):
- super().__init__(img_rgb, metadata, scale, instance_mode)
- self.class_names = class_names
-
- def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):
- """
- Draw semantic segmentation predictions/labels.
-
- Args:
- sem_seg (Tensor or ndarray): the segmentation of shape (H, W).
- Each value is the integer label of the pixel.
- area_threshold (int): segments with less than `area_threshold` are not drawn.
- alpha (float): the larger it is, the more opaque the segmentations are.
-
- Returns:
- output (VisImage): image object with visualizations.
- """
- if isinstance(sem_seg, torch.Tensor):
- sem_seg = sem_seg.numpy()
- labels, areas = np.unique(sem_seg, return_counts=True)
- sorted_idxs = np.argsort(-areas).tolist()
- labels = labels[sorted_idxs]
- class_names = self.class_names if self.class_names is not None else self.metadata.stuff_classes
-
- for label in filter(lambda l: l < len(class_names), labels):
- try:
- mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]
- except (AttributeError, IndexError):
- mask_color = None
-
- binary_mask = (sem_seg == label).astype(np.uint8)
- text = class_names[label]
- self.draw_binary_mask(
- binary_mask,
- color=mask_color,
- edge_color=(1.0, 1.0, 240.0 / 255),
- text=text,
- alpha=alpha,
- area_threshold=area_threshold,
- )
- return self.output
-
-
-
-class VisualizationDemo(object):
- def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
- """
- Args:
- cfg (CfgNode):
- instance_mode (ColorMode):
- parallel (bool): whether to run the model in different processes from visualization.
- Useful since the visualization logic can be slow.
- """
- self.metadata = MetadataCatalog.get(
- cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
- )
-
- self.cpu_device = torch.device("cpu")
- self.instance_mode = instance_mode
-
- self.parallel = parallel
- if parallel:
- raise NotImplementedError
- else:
- self.predictor = OVSegPredictor(cfg)
-
- def run_on_image(self, image, class_names):
- """
- Args:
- image (np.ndarray): an image of shape (H, W, C) (in BGR order).
- This is the format used by OpenCV.
- Returns:
- predictions (dict): the output of the model.
- vis_output (VisImage): the visualized image output.
- """
- predictions = self.predictor(image, class_names)
- # Convert image from OpenCV BGR format to Matplotlib RGB format.
- image = image[:, :, ::-1]
- visualizer = OVSegVisualizer(image, self.metadata, instance_mode=self.instance_mode, class_names=class_names)
- if "sem_seg" in predictions:
- r = predictions["sem_seg"]
- blank_area = (r[0] == 0)
- pred_mask = r.argmax(dim=0).to('cpu')
- pred_mask[blank_area] = 255
- pred_mask = np.array(pred_mask, dtype=np.int)
-
- vis_output = visualizer.draw_sem_seg(
- pred_mask
- )
- else:
- raise NotImplementedError
-
- return predictions, vis_output
-
-class SAMVisualizationDemo(object):
- def __init__(self, cfg, granularity, sam_path, ovsegclip_path, instance_mode=ColorMode.IMAGE, parallel=False):
- self.metadata = MetadataCatalog.get(
- cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
- )
-
- self.cpu_device = torch.device("cpu")
- self.instance_mode = instance_mode
-
- self.parallel = parallel
- self.granularity = granularity
- sam = sam_model_registry["vit_l"](checkpoint=sam_path).cuda()
- self.predictor = SamAutomaticMaskGenerator(sam, points_per_batch=16)
- self.clip_model, _, _ = open_clip.create_model_and_transforms('ViT-L-14', pretrained=ovsegclip_path)
-
- def run_on_image(self, ori_image, class_names):
- height, width, _ = ori_image.shape
- if width > height:
- new_width = 1280
- new_height = int((new_width / width) * height)
- else:
- new_height = 1280
- new_width = int((new_height / height) * width)
- image = cv2.resize(ori_image, (new_width, new_height))
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
- ori_image = cv2.cvtColor(ori_image, cv2.COLOR_BGR2RGB)
- visualizer = OVSegVisualizer(ori_image, self.metadata, instance_mode=self.instance_mode, class_names=class_names)
- with torch.no_grad(), torch.cuda.amp.autocast():
- masks = self.predictor.generate(image)
- pred_masks = [masks[i]['segmentation'][None,:,:] for i in range(len(masks))]
- pred_masks = np.row_stack(pred_masks)
- pred_masks = BitMasks(pred_masks)
- bboxes = pred_masks.get_bounding_boxes()
-
- mask_fill = [255.0 * c for c in PIXEL_MEAN]
-
- image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
-
- regions = []
- for bbox, mask in zip(bboxes, pred_masks):
- region, _ = crop_with_mask(
- image,
- mask,
- bbox,
- fill=mask_fill,
- )
- regions.append(region.unsqueeze(0))
- regions = [F.interpolate(r.to(torch.float), size=(224, 224), mode="bicubic") for r in regions]
-
- pixel_mean = torch.tensor(PIXEL_MEAN).reshape(1, -1, 1, 1)
- pixel_std = torch.tensor(PIXEL_STD).reshape(1, -1, 1, 1)
- imgs = [(r/255.0 - pixel_mean) / pixel_std for r in regions]
- imgs = torch.cat(imgs)
- if len(class_names) == 1:
- class_names.append('others')
- txts = [f'a photo of {cls_name}' for cls_name in class_names]
- text = open_clip.tokenize(txts)
-
- img_batches = torch.split(imgs, 32, dim=0)
-
- with torch.no_grad(), torch.cuda.amp.autocast():
- self.clip_model.cuda()
- text_features = self.clip_model.encode_text(text.cuda())
- text_features /= text_features.norm(dim=-1, keepdim=True)
- image_features = []
- for img_batch in img_batches:
- image_feat = self.clip_model.encode_image(img_batch.cuda().half())
- image_feat /= image_feat.norm(dim=-1, keepdim=True)
- image_features.append(image_feat.detach())
- image_features = torch.cat(image_features, dim=0)
- class_preds = (100.0 * image_features @ text_features.T).softmax(dim=-1)
- select_cls = torch.zeros_like(class_preds)
-
- max_scores, select_mask = torch.max(class_preds, dim=0)
- if len(class_names) == 2 and class_names[-1] == 'others':
- select_mask = select_mask[:-1]
- if self.granularity < 1:
- thr_scores = max_scores * self.granularity
- select_mask = []
- if len(class_names) == 2 and class_names[-1] == 'others':
- thr_scores = thr_scores[:-1]
- for i, thr in enumerate(thr_scores):
- cls_pred = class_preds[:,i]
- locs = torch.where(cls_pred > thr)
- select_mask.extend(locs[0].tolist())
- for idx in select_mask:
- select_cls[idx] = class_preds[idx]
- semseg = torch.einsum("qc,qhw->chw", select_cls.float(), pred_masks.tensor.float().cuda())
-
- r = semseg
- blank_area = (r[0] == 0)
- pred_mask = r.argmax(dim=0).to('cpu')
- pred_mask[blank_area] = 255
- pred_mask = np.array(pred_mask, dtype=np.int)
- pred_mask = cv2.resize(pred_mask, (width, height), interpolation=cv2.INTER_NEAREST)
-
- vis_output = visualizer.draw_sem_seg(
- pred_mask
- )
-
- return None, vis_output
\ No newline at end of file
diff --git a/spaces/failfast/2D-GameCreator/src/pages/_app.tsx b/spaces/failfast/2D-GameCreator/src/pages/_app.tsx
deleted file mode 100644
index 1974b420cd44d291c6ed020213db183b76e2afd5..0000000000000000000000000000000000000000
--- a/spaces/failfast/2D-GameCreator/src/pages/_app.tsx
+++ /dev/null
@@ -1,26 +0,0 @@
-import Head from "next/head";
-import { AppProps } from "next/app";
-import { CacheProvider, EmotionCache } from "@emotion/react";
-import createEmotionCache from "@/lib/createEmotionCache";
-import { Experimental_CssVarsProvider as CssVarsProvider } from "@mui/material/styles";
-import theme from "@/lib/theme";
-
-const clientSideEmotionCache = createEmotionCache();
-
-export interface MyAppProps extends AppProps {
- emotionCache?: EmotionCache;
-}
-
-export default function MyApp(props: MyAppProps) {
- const { Component, emotionCache = clientSideEmotionCache, pageProps } = props;
- return (
-
-
-
-
-
-
-
-
- );
-}
diff --git a/spaces/failfast/nextjs-hf-spaces/src/components/base/code.tsx b/spaces/failfast/nextjs-hf-spaces/src/components/base/code.tsx
deleted file mode 100644
index 801588c4408f12cdc676c15632e327162d671bf7..0000000000000000000000000000000000000000
--- a/spaces/failfast/nextjs-hf-spaces/src/components/base/code.tsx
+++ /dev/null
@@ -1,19 +0,0 @@
-import { styled } from "@mui/material/styles";
-import { Paper, PaperProps } from "@mui/material";
-
-type CodeProps = {
- children: string;
-};
-
-const CodeBox = styled(Paper)(({ theme }) => ({
- fontFamily: "monospace",
- padding: 8,
- borderTop: `2px solid ${theme.palette.secondary.dark}`,
- borderBottom: `2px solid ${theme.palette.secondary.dark}`,
-}));
-
-export default function Code(props: CodeProps) {
- const { children } = props;
-
- return {children} ;
-}
diff --git a/spaces/falterWliame/Face_Mask_Detection/Checkers-7 Registration Code.md b/spaces/falterWliame/Face_Mask_Detection/Checkers-7 Registration Code.md
deleted file mode 100644
index e3eb58f46d2d5a1e5df384aadf20a4524352c8de..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/Checkers-7 Registration Code.md
+++ /dev/null
@@ -1,52 +0,0 @@
-checkers-7 registration code
Download ✶✶✶ https://urlca.com/2uDcWA
-
-, you will have the choice of using Group Chat or Private Chat with your new komodo friends. There is a small fee for each feature you use.
-
-Show off your new komodo flippers with our shiny new server, "Groovy Komodo". Log in and head over to the "Groovy Komodo" server list to get more information on this server.
-
-Happy Holidays from everyone on the server staff and we hope you have a great time.
-
-Komodo Tournament
-
-Tournament begins @ 8:00 PM EST. Prizes to be announced.
-
----------------------------------------------------------------------
-
-Komodo Social Media
-
-Follow us on Twitter, check out our Facebook page, and join our forum!
-
-Komodo Server Features
-
-==============================================================
-
-If you are not the owner of this server, please read below before attempting to register for a fee.
-
-*********************************************************************************************************
-
-Q&A
-
-Are my plans safe?
-
-We have taken extreme measures to protect your data. Komodo is based on Drupal 7 (LTS version) and MovableType 4.7. Komodo is not currently supported on MovableType 5.5 or earlier. Komodo is also NOT a product that will be supported by Automattic, Inc. (the company that runs WordPress). It is an independent project. We do not support Komodo in any way. We cannot guarantee that the server will be available at all times. It is possible that the server will be taken down for maintenance.
-
-Can I get refunds?
-
-If you have questions about refunds or an issue with your registration, please contact us at support@komodo.xyz
-
-I have a problem, how can I contact you?
-
-You can contact us at support@komodo.xyz
-
-Do you provide security for Komodo?
-
-Komodo is an independent company that does not provide security for your Komodo account or server data. We recommend that you use our free antivirus plugin to prevent the possibility of malware being installed on your server. The Komodo team has not reviewed the code of the server, therefore it is the server owner’s responsibility to secure their server.
-
-Can I add subdomains to the server?
-
-No, you cannot add subdomains to the server.
-
-The hosting server owner reserves the right to accept or refuse registration for the 4fefd39f24
-
-
-
diff --git a/spaces/falterWliame/Face_Mask_Detection/Not Angka Lagu The Power Of Love VERIFIED.md b/spaces/falterWliame/Face_Mask_Detection/Not Angka Lagu The Power Of Love VERIFIED.md
deleted file mode 100644
index 15fed0277c2049ea74ec2bc0bca045f8cc3b5ea0..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/Not Angka Lagu The Power Of Love VERIFIED.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Not angka lagu the power of love
DOWNLOAD ✺✺✺ https://urlca.com/2uDcv1
-
-Lagu Karaoke LAWAS TERBAIK - Tembang Kenangan Indonesia. Rp135. ... Cd Lagu Musik Barat Lawas The Power Of Love Songs ... Not Angka Lagu Bahtera. 4d29de3e1b
-
-
-
diff --git a/spaces/fatiXbelha/sd/Download Call of Duty 3 and Join the Battle of the Century.md b/spaces/fatiXbelha/sd/Download Call of Duty 3 and Join the Battle of the Century.md
deleted file mode 100644
index a0d6b2f0ecc9c91f8e8188feeaec6ea2a3348a2b..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Download Call of Duty 3 and Join the Battle of the Century.md
+++ /dev/null
@@ -1,117 +0,0 @@
-
-Download Call of Duty 3: A Guide for Gamers
-If you are a fan of first-person shooter games, you might have heard of Call of Duty 3. It is one of the most popular and acclaimed games in the Call of Duty series, which is known for its realistic and immersive war scenarios. In this article, we will tell you everything you need to know about Call of Duty 3, and how to download it on your preferred platform or device.
- What is Call of Duty 3?
-Call of Duty 3 is a 2006 first-person shooter video game developed by Treyarch and published by Activision. It is the third major installment in the Call of Duty series, which is based on the events and battles of World War II. It was released for PlayStation 3, Wii, Xbox 360, PlayStation 2 and Xbox. It was a launch title for the PlayStation 3 and Wii in North America, Europe and Australia.
-download call of duty 3
Download File ⚡ https://urllie.com/2uNFq0
- The story and gameplay of Call of Duty 3
-The game follows the Allied invasion of Normandy in 1944, and features four campaigns that focus on different nations and factions: the American, British, Canadian, and Polish forces. The player can control different characters from each campaign, and experience different perspectives and objectives. The game has a total of 14 missions, each with multiple paths and options to complete.
-The game is open-ended, giving the player freedom to choose how to approach each situation. The player can use various weapons and equipment, such as rifles, pistols, grenades, tanks, and air strikes. The player can also interact with the environment, such as hiding behind cover, tossing back enemy grenades, or destroying obstacles. The game also features a realistic physics engine that affects the movement and behavior of objects and characters.
- The features and modes of Call of Duty 3
-The game has several features and modes that enhance its replay value and appeal. Some of them are:
-
-- A multiplayer mode that supports up to 24 players online or offline, with different modes such as deathmatch, capture the flag, headquarters, war, single flag capture, battle mode (Wii only), etc.
-- A split-screen co-op mode that allows two players to play through the single-player campaign together.
-- A bonus mode called Arcade Mode that lets the player replay any mission with a scoring system based on kills, headshots, accuracy, etc.
-- A variety of unlockable content such as concept art, interviews, videos, cheats, etc.
-
- Why should you download Call of Duty 3?
-There are many reasons why you should download Call of Duty 3 if you are a fan of first-person shooter games. Here are some of them:
- The benefits of playing Call of Duty 3
-Playing Call of Duty 3 can offer you many benefits, such as:
-
-- Improving your reflexes, coordination, and decision-making skills.
-- Learning about history and culture through the authentic and accurate depiction of World War II events and locations.
-- Experiencing adrenaline-pumping action and excitement through the intense and dramatic gameplay.
-- Having fun and enjoyment with your friends or other players through the multiplayer and co-op modes.
-
- The reviews and ratings of Call of Duty 3
-Call of Duty 3 has received positive reviews and ratings from critics and players alike. It has an average score of 82/100 on Metacritic, based on 63 reviews for the Xbox 360 version. It has also sold over 7.2 million copies worldwide as of November 2013, making it one of the best-selling games in the Call of Duty series.
-Some of the praises and criticisms of Call of Duty 3 are:
-download call of duty 3 pc full version
-call of duty 3 steam download
-call of duty modern warfare 3 download
-call of duty 3 free download for windows 10
-call of duty 3 download size
-how to download call of duty 3 on ps4
-call of duty 3 xbox one download
-call of duty black ops 3 download
-call of duty 3 download ocean of games
-call of duty 3 highly compressed download
-call of duty world at war 3 download
-call of duty 3 download for android
-call of duty 3 download mac
-call of duty 3 download apk
-call of duty 3 download softonic
-call of duty modern warfare 3 download for pc highly compressed
-call of duty black ops 3 download for pc highly compressed
-call of duty modern warfare 3 download size
-call of duty black ops 3 download size
-call of duty modern warfare 3 download ocean of games
-call of duty black ops 3 download ocean of games
-call of duty modern warfare 3 download for android
-call of duty black ops 3 download for android
-call of duty modern warfare 3 download apk
-call of duty black ops 3 download apk
-call of duty modern warfare 3 download mac
-call of duty black ops 3 download mac
-call of duty modern warfare 3 download softonic
-call of duty black ops 3 download softonic
-how to download call of duty modern warfare 3 for free on pc
-how to download call of duty black ops 3 for free on pc
-how to download call of duty modern warfare 3 multiplayer only
-how to download call of duty black ops 3 multiplayer only
-how to download call of duty modern warfare 3 zombies mode
-how to download call of duty black ops 3 zombies mode
-how to download and install call of duty modern warfare 3 on pc with proof (cod mw3)
-how to download and install call of duty black ops 3 on pc with proof (cod bo3)
-how to fix error during initialization on install call of duty modern warfare 3 (cod mw3)
-how to fix error during initialization on install call of duty black ops 3 (cod bo3)
-how to play online multiplayer in cracked version of call of duty modern warfare 3 (cod mw3)
-how to play online multiplayer in cracked version of call of duty black ops 3 (cod bo3)
-best settings for low end pc to run call of duty modern warfare 3 (cod mw3)
-best settings for low end pc to run call of duty black ops 3 (cod bo3)
-best graphics mod for call of duty modern warfare 3 (cod mw3)
-best graphics mod for call of duty black ops 3 (cod bo3)
-
-- The game has impressive graphics, sound, and presentation that create a realistic and immersive atmosphere.
-- The game has varied and challenging missions, with multiple paths and options to complete them.
-- The game has a robust and enjoyable multiplayer mode, with different modes and maps to choose from.
-- The game has some technical issues, such as glitches, bugs, and frame rate drops.
-- The game has a short and linear single-player campaign, with little replay value.
-- The game has a lack of innovation and originality, as it follows the same formula as the previous games in the series.
-
- How to download Call of Duty 3?
-If you are convinced that Call of Duty 3 is a game worth playing, you might be wondering how to download it on your platform or device. Here are some tips and steps to help you out:
- The platforms and devices that support Call of Duty 3
-Call of Duty 3 is available for various platforms and devices, such as:
- PlayStation 2, PlayStation 3, and Wii
-If you have a PlayStation 2, PlayStation 3, or Wii console, you can download Call of Duty 3 from their respective online stores or marketplaces. You will need to have an internet connection, a valid account, and enough storage space on your console. You will also need to pay a certain amount of money to purchase the game.
- Xbox, Xbox 360, and PC
-If you have an Xbox, Xbox 360, or PC device, you can download Call of Duty 3 from various websites or sources online. You will need to have an internet connection, a compatible device, and enough storage space on your device. You will also need to be careful about the legality and safety of the websites or sources you choose to download from.
- The steps and tips to download Call of Duty 3
-Here are some general steps and tips to download Call of Duty 3 on your platform or device:
- Find a reliable source or website
-The first step is to find a reliable source or website that offers Call of Duty 3 for download. You can use search engines, forums, blogs, or recommendations from friends or other players to find one. You should check the reviews, ratings, feedbacks, and reputation of the source or website before downloading anything from it. You should also avoid any source or website that asks for personal information, payment details, or suspicious permissions.
- Check the system requirements and compatibility
-The second step is to check the system requirements and compatibility of Call of Duty 3 for your platform or device. You should make sure that your platform or device meets the minimum or recommended specifications for running the game smoothly and without any issues. You should also make sure that your platform or device supports the format and version of the game you are downloading.
- Follow the instructions and complete the installation
-The third step is to follow the instructions and complete the installation of Call of Duty 3 on your platform or device. You should follow the steps provided by the source or website you are downloading from, or by the game itself. You should also agree to the terms and conditions, accept any updates or patches, and verify the integrity of the game files. Once the installation is done, you can launch the game and enjoy playing it.
- Conclusion
-Call of Duty 3 is a great game for fans of first-person shooter games. It has a realistic and immersive story and gameplay that takes you back to World War II. It has a robust and enjoyable multiplayer mode that lets you play with your friends or other players online or offline. It has positive reviews and ratings from critics and players alike. It is available for various platforms and devices, and you can download it easily and safely by following the tips and steps we have provided in this article. We hope you have found this article helpful and informative. If you have any questions or comments, feel free to leave them below. Happy gaming!
- FAQs
-Here are some frequently asked questions and answers about Call of Duty 3:
-
-- Is Call of Duty 3 free to download?
-No, Call of Duty 3 is not free to download. You will need to pay a certain amount of money to purchase the game from the official online stores or marketplaces of your platform or device. However, you might be able to find some discounts or offers from time to time.
-- Is Call of Duty 3 safe to download?
-Yes, Call of Duty 3 is safe to download, as long as you download it from a reliable source or website. You should avoid any source or website that looks suspicious, illegal, or malicious. You should also scan the game files with an antivirus software before installing them.
-- How long does it take to download Call of Duty 3?
-The time it takes to download Call of Duty 3 depends on several factors, such as the size of the game file, the speed of your internet connection, the performance of your platform or device, and the traffic of the source or website you are downloading from. Generally, it can take anywhere from a few minutes to a few hours to download Call of Duty 3.
-- Can I play Call of Duty 3 offline?
-Yes, you can play Call of Duty 3 offline, as long as you have downloaded and installed the game on your platform or device. You can play the single-player campaign, the split-screen co-op mode, or the offline multiplayer mode without an internet connection. However, you will need an internet connection to play the online multiplayer mode, or to access any online features or updates.
-- Can I play Call of Duty 3 with a controller?
-Yes, you can play Call of Duty 3 with a controller, if you have a compatible controller for your platform or device. You can use a PlayStation controller for PlayStation 2, PlayStation 3, and PC devices; an Xbox controller for Xbox, Xbox 360, and PC devices; or a Wii remote for Wii devices. You can also customize the controller settings and sensitivity according to your preference.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Download GO Multiple Pro APK for Android - Run Multiple Accounts in One Device.md b/spaces/fatiXbelha/sd/Download GO Multiple Pro APK for Android - Run Multiple Accounts in One Device.md
deleted file mode 100644
index 1976d8a75dec2dac86f4a56c07e8b4be4bfb9153..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Download GO Multiple Pro APK for Android - Run Multiple Accounts in One Device.md
+++ /dev/null
@@ -1,130 +0,0 @@
-
-How to Download GO Multiple Pro APK for Android Devices
-Do you want to run multiple accounts of the same app on your Android device? Do you want to switch between different apps without logging out and logging in again? Do you want to protect your privacy and data security while using multiple apps? If you answered yes to any of these questions, then you might be interested in downloading GO Multiple Pro APK.
-download go multiple pro apk
Download Zip ✑ ✑ ✑ https://urllie.com/2uNIIt
-GO Multiple Pro APK is a powerful and easy-to-use app that allows you to clone and run multiple instances of the same app on your Android device. With this app, you can enjoy multiple social media accounts, gaming accounts, work accounts, and more without any hassle. You can also customize and manage your cloned apps with various settings and features.
-In this article, we will show you what GO Multiple Pro APK is, what features and benefits it offers, how to download and install it on your Android device, and how to use it effectively. We will also answer some frequently asked questions about this app. By the end of this article, you will be able to enjoy multiple apps on your device with ease and convenience.
- What is GO Multiple Pro APK?
-GO Multiple Pro APK is an app that lets you create and run multiple virtual spaces on your Android device. A virtual space is a separate environment where you can clone and run another app. For example, if you have two Facebook accounts, you can clone the Facebook app and run it in a virtual space. This way, you can switch between your two accounts without logging out and logging in again.
-GO Multiple Pro APK is not available on Google Play Store, but you can download it from other sources. However, before downloading it, you should make sure that the source is reliable and trustworthy. You should also check the compatibility of your device and the app before installing it.
- Features of GO Multiple Pro APK
-GO Multiple Pro APK has many features that make it a useful and versatile app for cloning and running multiple apps. Some of these features are:
-
-- Supports most Android apps: You can clone and run almost any Android app with GO Multiple Pro APK, including social media apps, gaming apps, messaging apps, productivity apps, and more.
-- Supports x86 devices: You can use GO Multiple Pro APK on devices that have x86 processors, which are not supported by many other cloning apps.
-- No root required: You don't need to root your device to use GO Multiple Pro APK. You can use it without any risk of damaging your device or voiding your warranty.
-- No error 105: Unlike the version from Google Play Store, the optimized version of GO Multiple Pro APK does not have the error 105 issue, which prevents some apps from running properly.
-- Customizable settings: You can customize various settings for your cloned apps, such as notifications, storage, permissions, network, etc.
-- Data security: You can protect your data and privacy by locking your cloned apps with a password or a pattern. You can also hide your cloned apps from the app drawer or create shortcuts for them.
-
- Benefits of GO Multiple Pro APK
-GO Multiple Pro APK has many benefits that make it a worthwhile app to download and use. Some of these benefits are:
-How to download go multiple pro apk for free
-Go multiple pro apk latest version download
-Go multiple pro apk modded download
-Go multiple pro apk no root download
-Go multiple pro apk for android download
-Go multiple pro apk for pc download
-Go multiple pro apk for ios download
-Go multiple pro apk for windows download
-Go multiple pro apk for mac download
-Go multiple pro apk for linux download
-Download go multiple pro apk from google play
-Download go multiple pro apk from apkpure
-Download go multiple pro apk from apkcombo
-Download go multiple pro apk from gameguardian
-Download go multiple pro apk from uptodown
-Download go multiple pro apk cracked version
-Download go multiple pro apk premium version
-Download go multiple pro apk full version
-Download go multiple pro apk unlocked version
-Download go multiple pro apk patched version
-Download go multiple - parallel account app
-Download go multiple - virtual spaces app
-Download go multiple - dual space app
-Download go multiple - clone app app
-Download go multiple - multi accounts app
-Go multiple pro apk download link
-Go multiple pro apk download file
-Go multiple pro apk download mirror
-Go multiple pro apk download site
-Go multiple pro apk download online
-Go multiple pro apk download offline
-Go multiple pro apk download 2023
-Go multiple pro apk download 2022
-Go multiple pro apk download 2021
-Go multiple pro apk download 2020
-Benefits of downloading go multiple pro apk
-Features of downloading go multiple pro apk
-Reviews of downloading go multiple pro apk
-Ratings of downloading go multiple pro apk
-Alternatives of downloading go multiple pro apk
-Tips for downloading go multiple pro apk
-Tricks for downloading go multiple pro apk
-Hacks for downloading go multiple pro apk
-Cheats for downloading go multiple pro apk
-Guides for downloading go multiple pro apk
-Tutorials for downloading go multiple pro apk
-Videos for downloading go multiple pro apk
-Images for downloading go multiple pro apk
-Screenshots for downloading go multiple pro apk
-
-- Multitasking: You can run multiple apps simultaneously on your device without any interference or conflict. You can also switch between them easily and quickly.
Multiple accounts: You can use multiple accounts of the same app on your device without any hassle. You can enjoy different social media profiles, gaming levels, work projects, and more with different accounts.
-- Privacy protection: You can hide your online status, location, contacts, and other personal information from your cloned apps. You can also prevent them from accessing your device's data, such as photos, videos, files, etc.
-- Storage optimization: You can save storage space on your device by deleting the cache and data of your cloned apps. You can also move them to an external SD card if you have one.
-- Battery saving: You can reduce battery consumption by turning off the background running of your cloned apps. You can also set a timer to automatically close them after a certain period of time.
-
- How to Download and Install GO Multiple Pro APK
-If you want to download and install GO Multiple Pro APK on your Android device, you need to follow these simple steps:
- Step 1: Enable Unknown Sources on Your Device
-Since GO Multiple Pro APK is not available on Google Play Store, you need to enable the option of installing apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps from sources other than Google Play Store.
- Step 2: Download the APK File from a Trusted Source
-The next step is to download the APK file of GO Multiple Pro APK from a trusted and reliable source. You can use the link below to download the latest version of the app:
-
-Make sure that you have enough storage space on your device before downloading the file. Also, scan the file with an antivirus app to ensure that it is safe and free from malware.
- Step 3: Install the APK File on Your Device
-Once you have downloaded the APK file, you need to install it on your device. To do this, locate the file in your device's file manager and tap on it. You will see a pop-up window asking you to confirm the installation. Tap on Install and wait for the process to complete.
- Step 4: Launch the App and Enjoy
-After the installation is done, you can launch the app by tapping on its icon in your app drawer or home screen. You will see a welcome screen with some instructions on how to use the app. Follow them and start cloning and running multiple apps on your device.
- How to Use GO Multiple Pro APK
-Using GO Multiple Pro APK is very easy and intuitive. Here are some tips on how to use it effectively:
- How to Clone Apps with GO Multiple Pro APK
-To clone an app with GO Multiple Pro APK, you need to do the following:
-
-- Open GO Multiple Pro APK and tap on the + icon at the bottom right corner of the screen.
-- Select the app that you want to clone from the list of installed apps on your device. You can also search for an app by typing its name in the search bar.
-- Tap on Clone and wait for a few seconds until the cloning process is done.
-- You will see a new icon of the cloned app in your app drawer or home screen. Tap on it to launch it and log in with a different account.
-
- How to Manage Cloned Apps with GO Multiple Pro APK
-To manage your cloned apps with GO Multiple Pro APK, you need to do the following:
-
-- Open GO Multiple Pro APK and tap on the ≡ icon at the top left corner of the screen.
-- Select Manage Space from the menu that appears.
You will see a list of your cloned apps and their storage usage. You can tap on each app to see more details and options.
-- You can delete the cache and data of your cloned apps by tapping on Clear Cache or Clear Data. This will free up some storage space on your device.
-- You can move your cloned apps to an external SD card by tapping on Move to SD Card. This will also save some storage space on your device. However, you need to have an SD card inserted in your device and make sure that it has enough space.
-- You can uninstall your cloned apps by tapping on Uninstall. This will remove the app and its data from your device completely.
-
- FAQs about GO Multiple Pro APK
-Here are some frequently asked questions and answers about GO Multiple Pro APK:
- Q: Is GO Multiple Pro APK safe to use?
-A: Yes, GO Multiple Pro APK is safe to use as long as you download it from a trusted and reliable source. You should also scan the APK file with an antivirus app before installing it. However, you should be careful about the permissions and data that you grant to your cloned apps, as they may pose some security risks.
- Q: Does GO Multiple Pro APK work on all Android devices?
-A: No, GO Multiple Pro APK may not work on some Android devices, especially those that have low RAM, CPU, or storage capacity. You should check the compatibility of your device and the app before installing it. You should also close some background apps and processes to improve the performance of your cloned apps.
- Q: Does GO Multiple Pro APK affect the original apps?
-A: No, GO Multiple Pro APK does not affect the original apps on your device. Your cloned apps run in separate virtual spaces that do not interfere with the original apps. You can use both the original and the cloned apps without any conflict or problem.
- Q: How many apps can I clone with GO Multiple Pro APK?
-A: There is no limit to how many apps you can clone with GO Multiple Pro APK. However, you should consider the storage space and performance of your device before cloning too many apps. You should also delete or move some cloned apps that you don't use frequently to save some space and battery.
- Q: How can I update my cloned apps with GO Multiple Pro APK?
-A: You can update your cloned apps with GO Multiple Pro APK by following these steps:
-
-- Open GO Multiple Pro APK and tap on the ≡ icon at the top left corner of the screen.
-- Select Update Apps from the menu that appears.
You will see a list of your cloned apps that have new updates available. You can tap on Update All to update all of them at once, or tap on each app to update it individually.
-- Wait for the update process to complete and enjoy the latest version of your cloned apps.
-
- Conclusion
-GO Multiple Pro APK is a great app that allows you to clone and run multiple instances of the same app on your Android device. You can use it to enjoy multiple social media accounts, gaming accounts, work accounts, and more without any hassle. You can also customize and manage your cloned apps with various settings and features. You can download and install GO Multiple Pro APK from the link below and start using it right away.
-
-We hope that this article has helped you understand what GO Multiple Pro APK is, what features and benefits it offers, how to download and install it on your Android device, and how to use it effectively. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Enjoy GreenNet VPN on PC with Free Download The Best VPN for Privacy and Unblock Websites.md b/spaces/fatiXbelha/sd/Enjoy GreenNet VPN on PC with Free Download The Best VPN for Privacy and Unblock Websites.md
deleted file mode 100644
index 332416c424da4a1dbd8715f1c037c31fb111cc0b..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Enjoy GreenNet VPN on PC with Free Download The Best VPN for Privacy and Unblock Websites.md
+++ /dev/null
@@ -1,87 +0,0 @@
-
-How to Download Green Net VPN for PC and Why You Should Use It
| | If you are looking for a way to protect your online privacy and security, access geo-restricted content, and enjoy fast and reliable internet connection, you might want to consider using a VPN (Virtual Private Network). A VPN is a service that encrypts your data and masks your IP address, making it harder for anyone to track or spy on your online activities. One of the best VPNs you can use is Green Net VPN, a free and easy-to-use app that works on Android devices. But what if you want to use Green Net VPN on your PC? In this article, we will show you how to download and install Green Net VPN for PC using an Android emulator, and why you should use it.
-free download green net vpn for pc
Download File — https://urllie.com/2uNv1v
| | What is Green Net VPN and What are Its Benefits?
| | Green Net VPN is a free VPN app that offers secure and encrypted internet connection, no-logging policy, access to geo-restricted content, and fast and reliable connection. It has over 10 million downloads on Google Play Store and a 4.4-star rating from more than 400,000 users. Here are some of the features and benefits of using Green Net VPN:
| |
-- Secure and encrypted internet connection: Green Net VPN uses AES 256-bit encryption technology, which is the same standard used by the military and governments. This means that your internet traffic is protected from hackers, ISPs, advertisers, and anyone else who might want to snoop on your data.
-- No-logging policy: Green Net VPN does not keep any records of your online activities, ensuring that your personal information remains private and secure. This policy ensures that your data is never stored, monitored, or shared with any third-party entities.
-- Access to geo-restricted content: Green Net VPN allows you to bypass censorship and geo-restrictions by changing your IP address to one of its servers in different countries. This way, you can access websites and apps that are blocked or unavailable in your region, such as Netflix, YouTube, Facebook, Twitter, etc.
-- Fast and reliable connection: Green Net VPN has over 3,000 servers in more than 60 countries, ensuring that you always have a stable and speedy connection. You can also enjoy unlimited bandwidth with no data caps or throttling.
-
| | How to Download and Install Green Net VPN for PC
| | To use Green Net VPN on your PC, you need to use an Android emulator. An Android emulator is a software that allows you to run Android apps on your PC. There are many Android emulators available on the market, but we recommend using one of these three:
| |
-
-Name
-Pros
-Cons
-
-
-BlueStacks
-- The most popular and widely used Android emulator
- Easy to install and use
- Supports high-performance gaming
- Compatible with Windows and Mac
-- Requires a lot of system resources
- May have some compatibility issues with some apps
- May show ads
-
-
-NoxPlayer
-- A fast and lightweight Android emulator
- Supports multiple instances and keyboard mapping
- Compatible with Windows and Mac
-- May have some stability and performance issues
- May have some security risks
- May not support the latest Android versions
-
-
-LDPlayer
-- A powerful and smooth Android emulator
- Supports high-quality graphics and FPS games
- Compatible with Windows only
-- May have some compatibility issues with some apps
- May have some bugs and glitches
- May show ads
-
-
| | Once you have chosen and downloaded your preferred Android emulator, follow these steps to install Green Net VPN for PC:
| |
-- Launch the Android emulator and sign in with your Google account.
-- Go to the Google Play Store and search for Green Net VPN.
-- Click on the install button and wait for the app to download.
-- Once the app is installed, you can find it on the home screen or the app drawer of the emulator.
-- Click on the app icon to launch Green Net VPN and enjoy using it on your PC.
-
| | How to Use Green Net VPN for PC
| | Using Green Net VPN for PC is very easy and straightforward. Here are some of the main functions and tips you can use to optimize your VPN experience:
| |
-- Connect to a server: To connect to a server, simply click on the big green button on the app interface. The app will automatically select the best server for you based on your location and network speed. You can also choose a server manually by clicking on the globe icon and selecting a country from the list.
-- Change settings: To change settings, click on the menu icon on the top left corner of the app interface. You can adjust various options, such as auto-connect, notifications, language, etc. You can also access the help center, feedback, and rate us sections from here.
-- Check your IP address: To check your IP address, click on the shield icon on the top right corner of the app interface. You will see your original IP address and your new IP address after connecting to a server. You can also see your connection status, time, and data usage from here.
-- Optimize your connection: To optimize your connection, make sure you choose a server that is close to your location and has a high speed. You can also try switching servers if you encounter any issues or slowdowns. You can also clear your cache and cookies from your browser to improve your browsing speed.
-- Enjoy unlimited access: To enjoy unlimited access, you can use Green Net VPN to unblock any website or app that is restricted or censored in your region. You can also stream videos, play games, download files, and do anything else you want online without worrying about bandwidth limits or throttling.
-
| | Conclusion
| | In conclusion, Green Net VPN is one of the best VPNs you can use on your PC. It offers secure and encrypted internet connection, no-logging policy, access to geo-restricted content, and fast and reliable connection. It is also free and easy to use, thanks to its simple and user-friendly interface. To download and install Green Net VPN for PC, you just need to use an Android emulator and follow a few simple steps. Once you have it on your PC, you can enjoy using it anytime and anywhere you want. So what are you waiting for? Download Green Net VPN for PC today and experience the difference!
| | FAQs
| | Here are some of the frequently asked questions and their answers about Green Net VPN for PC:
| | Q: Is Green Net VPN safe to use?
| | A: Yes, Green Net VPN is safe to use. It uses AES 256-bit encryption technology, which is very secure and reliable. It also has a no-logging policy, which means it does not store or share any of your online activities or personal information.
-How to install GreenNet VPN on PC with MEmu emulator
-GreenNet VPN: secure and encrypted internet connection for PC
-GreenNet: Hotspot VPN Proxy app free on PC with LDPlayer
-Green Net VPN: free VPN and unlimited proxy for Windows PC
-Download GreenNet VPN for PC and Mac from Google Play Store
-GreenNet VPN: no-log policy and unlimited bandwidth for PC users
-GreenNet: Hotspot VPN Proxy app for PC: bypass geo-restrictions and access blocked websites
-Green Net VPN: fast and reliable VPN service for Windows PC
-How to use GreenNet VPN on PC with Android emulator
-GreenNet VPN: military-grade AES 256-bit encryption for PC
-GreenNet: Hotspot VPN Proxy app for PC: incognito browsing and wifi safety
-Green Net VPN: best VPN and fast free VPN proxy for Windows PC
-How to download GreenNet VPN for PC and Mac from App Store
-GreenNet VPN: access to geo-restricted content and social networks for PC
-GreenNet: Hotspot VPN Proxy app for PC: anonymous connection and privacy protection
-Green Net VPN: no ads and no registration required for Windows PC
-How to update GreenNet VPN on PC with MEmu emulator
-GreenNet VPN: get ultimate online security with GreenNet VPN for PC
-GreenNet: Hotspot VPN Proxy app for PC: secure your device and internet traffic
-Green Net VPN: easy to use and user-friendly interface for Windows PC
-How to uninstall GreenNet VPN on PC with LDPlayer
-GreenNet VPN: enjoy seamless streaming and download without limits for PC
-GreenNet: Hotspot VPN Proxy app for PC: location spoofer and IP masker
-Green Net VPN: support multiple languages and regions for Windows PC
-How to contact GreenNet VPN customer service on PC
-GreenNet VPN: compatible with all internet based services and browsers for PC
-GreenNet: Hotspot VPN Proxy app for PC: no data cap or bandwidth limit
-Green Net VPN: support Android, iPhone, iPad and Mac devices for Windows PC
-How to rate and review GreenNet VPN on PC with Google Play Store
-GreenNet VPN: one tap to connect and disconnect for PC
-GreenNet: Hotspot VPN Proxy app for PC: low battery and memory usage
-Green Net VPN: 30-day money back guarantee for Windows PC users
-How to share GreenNet VPN with your friends on PC with social media apps
-GreenNet VPN: choose from hundreds of servers in different countries for PC
-GreenNet: Hotspot VPN Proxy app for PC: works with public WiFi hotspots and cellular data networks
-Green Net VPN: affordable and flexible subscription plans for Windows PC users
-How to troubleshoot common issues with GreenNet VPN on PC with FAQ section
-GreenNet VPN: high-speed and stable connection for PC users
-GreenNet: Hotspot VPN Proxy app for PC: supports P2P and torrenting activities
-Green Net VPN: offers free trial and referral bonus for Windows PC users
| | Q: Is Green Net VPN free to use?
| | A: Yes, Green Net VPN is free to use. It does not charge any fees or require any subscriptions or registrations. However, it may show some ads from time to time, which you can remove by upgrading to the premium version.
| | Q: Does Green Net VPN work on Windows and Mac?
| | A: Yes, Green Net VPN works on Windows and Mac. You just need to use an Android emulator to run it on your PC. You can choose from different Android emulators, such as BlueStacks, NoxPlayer, or LDPlayer, depending on your preference and system requirements.
| | Q: How can I contact Green Net VPN support?
| | A: If you have any questions, issues, or feedback about Green Net VPN, you can contact their support team by sending an email to greennetvpn@gmail.com. You can also visit their website or their Facebook page for more information and updates.
| | Q: What are some alternatives to Green Net VPN?
| | A: If you are looking for some alternatives to Green Net VPN, you can try some of these VPNs that also work on PC:
| |
-- NordVPN: A premium VPN that offers advanced security features, fast speed, and access to over 5,000 servers in 60 countries.
-- ExpressVPN: A high-quality VPN that offers excellent performance, easy-to-use interface, and access to over 3,000 servers in 94 countries.
-- Turbo VPN: A free VPN that offers unlimited bandwidth, simple interface, and access to over 10,000 servers in 50 countries.
-
| 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/criteria/lpips/utils.py b/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/criteria/lpips/utils.py
deleted file mode 100644
index 3d15a0983775810ef6239c561c67939b2b9ee3b5..0000000000000000000000000000000000000000
--- a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/criteria/lpips/utils.py
+++ /dev/null
@@ -1,30 +0,0 @@
-from collections import OrderedDict
-
-import torch
-
-
-def normalize_activation(x, eps=1e-10):
- norm_factor = torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True))
- return x / (norm_factor + eps)
-
-
-def get_state_dict(net_type: str = 'alex', version: str = '0.1'):
- # build url
- url = 'https://raw.githubusercontent.com/richzhang/PerceptualSimilarity/' \
- + f'master/lpips/weights/v{version}/{net_type}.pth'
-
- # download
- old_state_dict = torch.hub.load_state_dict_from_url(
- url, progress=True,
- map_location=None if torch.cuda.is_available() else torch.device('cpu')
- )
-
- # rename keys
- new_state_dict = OrderedDict()
- for key, val in old_state_dict.items():
- new_key = key
- new_key = new_key.replace('lin', '')
- new_key = new_key.replace('model.', '')
- new_state_dict[new_key] = val
-
- return new_state_dict
diff --git a/spaces/fffffu/bing/src/components/chat-attachments.tsx b/spaces/fffffu/bing/src/components/chat-attachments.tsx
deleted file mode 100644
index ef43d4e262935d263b6099138c56f7daade5299d..0000000000000000000000000000000000000000
--- a/spaces/fffffu/bing/src/components/chat-attachments.tsx
+++ /dev/null
@@ -1,37 +0,0 @@
-import Image from 'next/image'
-import ClearIcon from '@/assets/images/clear.svg'
-import RefreshIcon from '@/assets/images/refresh.svg'
-import { FileItem } from '@/lib/bots/bing/types'
-import { cn } from '@/lib/utils'
-import { useBing } from '@/lib/hooks/use-bing'
-
-type ChatAttachmentsProps = Pick, 'attachmentList' | 'setAttachmentList' | 'uploadImage'>
-
-export function ChatAttachments({ attachmentList = [], setAttachmentList, uploadImage }: ChatAttachmentsProps) {
- return attachmentList.length ? (
-
- {attachmentList.map(file => (
-
- {file.status === 'loading' && (
-
-
- )
- }
- {file.status !== 'error' && (
-
-
- )
- }
- {file.status === 'error' && (
-
- uploadImage(file.url)} />
-
- )}
-
-
- ))}
-
- ) : null
-}
diff --git a/spaces/fffiloni/Image-to-MusicGen/audiocraft/models/lm.py b/spaces/fffiloni/Image-to-MusicGen/audiocraft/models/lm.py
deleted file mode 100644
index 43f82b42340dd9e721a3a76fa58e27f70fe2b4e5..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/Image-to-MusicGen/audiocraft/models/lm.py
+++ /dev/null
@@ -1,526 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from dataclasses import dataclass
-from functools import partial
-import logging
-import math
-import typing as tp
-
-import torch
-from torch import nn
-
-from ..utils import utils
-from ..modules.streaming import StreamingModule, State
-from ..modules.transformer import StreamingTransformer, create_norm_fn
-from ..modules.conditioners import (
- ConditionFuser,
- ClassifierFreeGuidanceDropout,
- AttributeDropout,
- ConditioningProvider,
- ConditioningAttributes,
- ConditionType,
-)
-from ..modules.codebooks_patterns import CodebooksPatternProvider
-from ..modules.activations import get_activation_fn
-
-
-logger = logging.getLogger(__name__)
-ConditionTensors = tp.Dict[str, ConditionType]
-CFGConditions = tp.Union[ConditionTensors, tp.Tuple[ConditionTensors, ConditionTensors]]
-
-
-def get_init_fn(method: str, input_dim: int, init_depth: tp.Optional[int] = None):
- """LM layer initialization.
- Inspired from xlformers: https://github.com/fairinternal/xlformers
-
- Args:
- method (str): Method name for init function. Valid options are:
- 'gaussian', 'uniform'.
- input_dim (int): Input dimension of the initialized module.
- init_depth (Optional[int]): Optional init depth value used to rescale
- the standard deviation if defined.
- """
- # Compute std
- std = 1 / math.sqrt(input_dim)
- # Rescale with depth
- if init_depth is not None:
- std = std / math.sqrt(2 * init_depth)
-
- if method == 'gaussian':
- return partial(
- torch.nn.init.trunc_normal_, mean=0.0, std=std, a=-3 * std, b=3 * std
- )
- elif method == 'uniform':
- bound = math.sqrt(3) * std # ensure the standard deviation is `std`
- return partial(torch.nn.init.uniform_, a=-bound, b=bound)
- else:
- raise ValueError("Unsupported layer initialization method")
-
-
-def init_layer(m: nn.Module,
- method: str,
- init_depth: tp.Optional[int] = None,
- zero_bias_init: bool = False):
- """Wrapper around ``get_init_fn`` for proper initialization of LM modules.
-
- Args:
- m (nn.Module): Module to initialize.
- method (str): Method name for the init function.
- init_depth (Optional[int]): Optional init depth value used to rescale
- the standard deviation if defined.
- zero_bias_init (bool): Whether to initialize the bias to 0 or not.
- """
- if isinstance(m, nn.Linear):
- init_fn = get_init_fn(method, m.in_features, init_depth=init_depth)
- if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16:
- weight = m.weight.float()
- init_fn(weight)
- m.weight.data[:] = weight.half()
- else:
- init_fn(m.weight)
- if zero_bias_init and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.Embedding):
- init_fn = get_init_fn(method, m.embedding_dim, init_depth=None)
- if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16:
- weight = m.weight.float()
- init_fn(weight)
- m.weight.data[:] = weight.half()
- else:
- init_fn(m.weight)
-
-
-class ScaledEmbedding(nn.Embedding):
- """Boost learning rate for embeddings (with `scale`).
- """
- def __init__(self, *args, lr=None, **kwargs):
- super().__init__(*args, **kwargs)
- self.lr = lr
-
- def make_optim_group(self):
- group = {"params": list(self.parameters())}
- if self.lr is not None:
- group["lr"] = self.lr
- return group
-
-
-@dataclass
-class LMOutput:
- # The logits are already re-aligned with the input codes
- # hence no extra shift is required, e.g. when computing CE
- logits: torch.Tensor # [B, K, T, card]
- mask: torch.Tensor # [B, K, T]
-
-
-class LMModel(StreamingModule):
- """Transformer-based language model on multiple streams of codes.
-
- Args:
- pattern_provider (CodebooksPatternProvider): Pattern provider for codebook interleaving.
- condition_provider (MusicConditioningProvider): Conditioning provider from metadata.
- fuser (ConditionFuser): Fuser handling the fusing of conditions with language model input.
- n_q (int): Number of parallel streams to model.
- card (int): Cardinality, vocabulary size.
- dim (int): Dimension of the transformer encoder.
- num_heads (int): Number of heads for the transformer encoder.
- hidden_scale (int): Scale for hidden feed forward dimension of the transformer encoder.
- norm (str): Normalization method.
- norm_first (bool): Use pre-norm instead of post-norm.
- emb_lr (Optional[float]): Embedding-specific learning rate.
- bias_proj (bool): Use bias for output projections.
- weight_init (Optional[str]): Method for weight initialization.
- depthwise_init (Optional[str]): Method for depthwise weight initialization.
- zero_bias_init (bool): If true and bias in Linears, initialize bias to zeros.
- cfg_dropout (float): Classifier-free guidance dropout.
- cfg_coef (float): Classifier-free guidance coefficient.
- attribute_dropout (dict): Attribute dropout probabilities.
- two_step_cfg (bool): Whether to run classifier free-guidance with 2 distinct steps.
- **kwargs: Additional parameters for the transformer encoder.
- """
- def __init__(self, pattern_provider: CodebooksPatternProvider, condition_provider: ConditioningProvider,
- fuser: ConditionFuser, n_q: int = 8, card: int = 1024, dim: int = 128, num_heads: int = 8,
- hidden_scale: int = 4, norm: str = 'layer_norm', norm_first: bool = False,
- emb_lr: tp.Optional[float] = None, bias_proj: bool = True,
- weight_init: tp.Optional[str] = None, depthwise_init: tp.Optional[str] = None,
- zero_bias_init: bool = False, cfg_dropout: float = 0, cfg_coef: float = 1.0,
- attribute_dropout: tp.Dict[str, tp.Dict[str, float]] = {}, two_step_cfg: bool = False,
- **kwargs):
- super().__init__()
- self.cfg_coef = cfg_coef
- self.cfg_dropout = ClassifierFreeGuidanceDropout(p=cfg_dropout)
- self.att_dropout = AttributeDropout(p=attribute_dropout)
- self.condition_provider = condition_provider
- self.fuser = fuser
- self.card = card
- embed_dim = self.card + 1
- self.n_q = n_q
- self.dim = dim
- self.pattern_provider = pattern_provider
- self.two_step_cfg = two_step_cfg
- self.emb = nn.ModuleList([ScaledEmbedding(embed_dim, dim, lr=emb_lr) for _ in range(n_q)])
- if 'activation' in kwargs:
- kwargs['activation'] = get_activation_fn(kwargs['activation'])
- self.transformer = StreamingTransformer(
- d_model=dim, num_heads=num_heads, dim_feedforward=int(hidden_scale * dim),
- norm=norm, norm_first=norm_first, **kwargs)
- self.out_norm: tp.Optional[nn.Module] = None
- if norm_first:
- self.out_norm = create_norm_fn(norm, dim)
- self.linears = nn.ModuleList([nn.Linear(dim, self.card, bias=bias_proj) for _ in range(n_q)])
- self._init_weights(weight_init, depthwise_init, zero_bias_init)
- self._fsdp: tp.Optional[nn.Module]
- self.__dict__['_fsdp'] = None
-
- def _init_weights(self, weight_init: tp.Optional[str], depthwise_init: tp.Optional[str], zero_bias_init: bool):
- """Initialization of the transformer module weights.
-
- Args:
- weight_init (Optional[str]): Weight initialization strategy. See ``get_init_fn`` for valid options.
- depthwise_init (Optional[str]): Depwthwise initialization strategy. The following options are valid:
- 'current' where the depth corresponds to the current layer index or 'global' where the total number
- of layer is used as depth. If not set, no depthwise initialization strategy is used.
- zero_bias_init (bool): Whether to initalize bias to zero or not.
- """
- assert depthwise_init is None or depthwise_init in ['current', 'global']
- assert depthwise_init is None or weight_init is not None, \
- "If 'depthwise_init' is defined, a 'weight_init' method should be provided."
- assert not zero_bias_init or weight_init is not None, \
- "If 'zero_bias_init', a 'weight_init' method should be provided"
-
- if weight_init is None:
- return
-
- for emb_layer in self.emb:
- init_layer(emb_layer, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init)
-
- for layer_idx, tr_layer in enumerate(self.transformer.layers):
- depth = None
- if depthwise_init == 'current':
- depth = layer_idx + 1
- elif depthwise_init == 'global':
- depth = len(self.transformer.layers)
- init_fn = partial(init_layer, method=weight_init, init_depth=depth, zero_bias_init=zero_bias_init)
- tr_layer.apply(init_fn)
-
- for linear in self.linears:
- init_layer(linear, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init)
-
- @property
- def special_token_id(self) -> int:
- return self.card
-
- @property
- def num_codebooks(self) -> int:
- return self.n_q
-
- def forward(self, sequence: torch.Tensor,
- conditions: tp.List[ConditioningAttributes],
- condition_tensors: tp.Optional[ConditionTensors] = None) -> torch.Tensor:
- """Apply language model on sequence and conditions.
- Given a tensor of sequence of shape [B, K, S] with K the number of codebooks and
- S the sequence steps, return the logits with shape [B, card, K, S].
-
- Args:
- indices (torch.Tensor): indices of the codes to model.
- conditions (list[ConditioningAttributes]): conditionings to use when modeling
- the given codes. Note that when evaluating multiple time with the same conditioning
- you should pre-compute those and pass them as `condition_tensors`.
- condition_tensors (dict[str, ConditionType] or None): pre-computed conditioning
- tensors, see `conditions`.
- Returns:
- torch.Tensor: Logits.
- """
- B, K, S = sequence.shape
- assert K == self.num_codebooks, 'Sequence shape must match the specified number of codebooks'
- input_ = sum([self.emb[k](sequence[:, k]) for k in range(K)])
- if condition_tensors is None:
- assert not self._is_streaming, "Conditions tensors should be precomputed when streaming."
- # apply dropout modules
- conditions = self.cfg_dropout(conditions)
- conditions = self.att_dropout(conditions)
- tokenized = self.condition_provider.tokenize(conditions)
- # encode conditions and fuse, both have a streaming cache to not recompute when generating.
- condition_tensors = self.condition_provider(tokenized)
- else:
- assert not conditions, "Shouldn't pass both conditions and condition_tensors."
-
- input_, cross_attention_input = self.fuser(input_, condition_tensors)
-
- out = self.transformer(input_, cross_attention_src=cross_attention_input)
- if self.out_norm:
- out = self.out_norm(out)
- logits = torch.stack([self.linears[k](out) for k in range(K)], dim=1) # [B, K, S, card]
-
- # remove the prefix from the model outputs
- if len(self.fuser.fuse2cond['prepend']) > 0:
- logits = logits[:, :, -S:]
-
- return logits # [B, K, S, card]
-
- def compute_predictions(
- self, codes: torch.Tensor,
- conditions: tp.List[ConditioningAttributes],
- condition_tensors: tp.Optional[ConditionTensors] = None) -> LMOutput:
- """Given an input tensor of codes [B, K, T] and list of conditions, runs the model
- forward using the specified codes interleaving pattern.
-
- Args:
- codes (torch.Tensor): Input codes of shape [B, K, T] with B the batch size,
- K the number of codebooks and T the number of timesteps.
- conditions (list[ConditioningAttributes]): conditionings to use when modeling
- the given codes. Note that when evaluating multiple time with the same conditioning
- you should pre-compute those and pass them as `condition_tensors`.
- condition_tensors (dict[str, ConditionType] or None): pre-computed conditioning
- tensors, see `conditions`.
- Returns:
- LMOutput: Language model outputs
- logits (torch.Tensor) of shape [B, K, T, card] corresponding to the provided codes,
- i.e. the first item corresponds to logits to predict the first code, meaning that
- no additional shifting of codes and logits is required.
- mask (torch.Tensor) of shape [B, K, T], mask over valid and invalid positions.
- Given the specified interleaving strategies, parts of the logits and codes should
- not be considered as valid predictions because of invalid context.
- """
- B, K, T = codes.shape
- codes = codes.contiguous()
- # map codes [B, K, T] into pattern sequence [B, K, S] using special_token_id for masked tokens
- pattern = self.pattern_provider.get_pattern(T)
- sequence_codes, sequence_indexes, sequence_mask = pattern.build_pattern_sequence(
- codes, self.special_token_id, keep_only_valid_steps=True
- )
- # apply model on pattern sequence
- model = self if self._fsdp is None else self._fsdp
- logits = model(sequence_codes, conditions, condition_tensors) # [B, K, S, card]
- # map back the logits on pattern sequence to logits on original codes: [B, K, S, card] -> [B, K, T, card]
- # and provide the corresponding mask over invalid positions of tokens
- logits = logits.permute(0, 3, 1, 2) # [B, card, K, S]
- # note: we use nans as special token to make it obvious if we feed unexpected logits
- logits, logits_indexes, logits_mask = pattern.revert_pattern_logits(
- logits, float('nan'), keep_only_valid_steps=True
- )
- logits = logits.permute(0, 2, 3, 1) # [B, K, T, card]
- logits_mask = logits_mask[None, :, :].expand(B, -1, -1) # [K, T] -> [B, K, T]
- return LMOutput(logits, logits_mask)
-
- def _sample_next_token(self,
- sequence: torch.Tensor,
- cfg_conditions: CFGConditions,
- unconditional_state: State,
- use_sampling: bool = False,
- temp: float = 1.0,
- top_k: int = 0,
- top_p: float = 0.0,
- cfg_coef: tp.Optional[float] = None) -> torch.Tensor:
- """Sample next token from the model given a sequence and a set of conditions. The model supports
- multiple sampling strategies (greedy sampling, softmax, top-k, top-p...).
-
- Args:
- sequence (torch.Tensor): Current sequence of shape [B, K, S]
- with K corresponding to the number of codebooks and S the number of sequence steps.
- S = 1 in streaming mode, except for the first step that contains a bigger prompt.
- condition_tensors (Dict[str, ConditionType): Set of conditions. If CFG is used,
- should be twice the batch size, being the concatenation of the conditions + null conditions.
- use_sampling (bool): Whether to use a sampling strategy or not.
- temp (float): Sampling temperature.
- top_k (int): K for "top-k" sampling.
- top_p (float): P for "top-p" sampling.
- cfg_coef (float): classifier free guidance coefficient
- Returns:
- next_token (torch.Tensor): Next token tensor of shape [B, K, 1].
- """
- B = sequence.shape[0]
- cfg_coef = self.cfg_coef if cfg_coef is None else cfg_coef
- model = self if self._fsdp is None else self._fsdp
- if self.two_step_cfg and cfg_conditions != {}:
- assert isinstance(cfg_conditions, tuple)
- condition_tensors, null_condition_tensors = cfg_conditions
- cond_logits = model(sequence, conditions=[], condition_tensors=condition_tensors)
- state = self.get_streaming_state()
- self.set_streaming_state(unconditional_state)
- uncond_logits = model(sequence, conditions=[], condition_tensors=null_condition_tensors)
- unconditional_state.update(self.get_streaming_state())
- self.set_streaming_state(state)
- logits = uncond_logits + (cond_logits - uncond_logits) * self.cfg_coef
- else:
- assert isinstance(cfg_conditions, dict)
- condition_tensors = cfg_conditions
- if condition_tensors:
- # Preparing for CFG, predicting both conditional and unconditional logits.
- sequence = torch.cat([sequence, sequence], dim=0)
- all_logits = model(
- sequence,
- conditions=[], condition_tensors=condition_tensors)
- if condition_tensors:
- cond_logits, uncond_logits = all_logits.split(B, dim=0) # [B, K, T, card]
- logits = uncond_logits + (cond_logits - uncond_logits) * cfg_coef
- else:
- logits = all_logits
-
- logits = logits.permute(0, 1, 3, 2) # [B, K, card, T]
- logits = logits[..., -1] # [B x K x card]
-
- if use_sampling:
- probs = torch.softmax(logits / temp, dim=-1)
- if top_p > 0.0:
- next_token = utils.sample_top_p(probs, p=top_p)
- elif top_k > 0:
- next_token = utils.sample_top_k(probs, k=top_k)
- else:
- next_token = utils.multinomial(probs, num_samples=1)
- else:
- next_token = torch.argmax(logits, dim=-1, keepdim=True)
-
- return next_token
-
- @torch.no_grad()
- def generate(self,
- prompt: tp.Optional[torch.Tensor] = None,
- conditions: tp.List[ConditioningAttributes] = [],
- num_samples: tp.Optional[int] = None,
- max_gen_len: int = 256,
- use_sampling: bool = True,
- temp: float = 1.0,
- top_k: int = 250,
- top_p: float = 0.0,
- cfg_coef: tp.Optional[float] = None,
- two_step_cfg: bool = False,
- remove_prompts: bool = False,
- check: bool = False,
- callback: tp.Optional[tp.Callable[[int, int], None]] = None) -> torch.Tensor:
- """Generate tokens sampling from the model given a prompt or unconditionally. Generation can
- be perform in a greedy fashion or using sampling with top K and top P strategies.
-
- Args:
- prompt (Optional[torch.Tensor]): Prompt tokens of shape [B, K, T].
- conditions_tensors (Dict[str, torch.Tensor]): Set of conditions or None.
- num_samples (int or None): Number of samples to generate when no prompt and no conditions are given.
- max_gen_len (int): Maximum generation length.
- use_sampling (bool): Whether to use a sampling strategy or not.
- temp (float): Sampling temperature.
- top_k (int): K for "top-k" sampling.
- top_p (float): P for "top-p" sampling.
- remove_prompts (bool): Whether to remove prompts from generation or not.
- Returns:
- torch.Tensor: Generated tokens.
- """
- assert not self.training, "generation shouldn't be used in training mode."
- first_param = next(iter(self.parameters()))
- device = first_param.device
-
- # Checking all input shapes are consistents.
- possible_num_samples = []
- if num_samples is not None:
- possible_num_samples.append(num_samples)
- elif prompt is not None:
- possible_num_samples.append(prompt.shape[0])
- elif conditions:
- possible_num_samples.append(len(conditions))
- else:
- possible_num_samples.append(1)
- assert [x == possible_num_samples[0] for x in possible_num_samples], "Inconsitent inputs shapes"
- num_samples = possible_num_samples[0]
-
- # below we create set of conditions: one conditional and one unconditional
- # to do that we merge the regular condition together with the null condition
- # we then do 1 forward pass instead of 2.
- # the reason for that is two-fold:
- # 1. it is about x2 faster than doing 2 forward passes
- # 2. avoid the streaming API treating the 2 passes as part of different time steps
- # We also support doing two different passes, in particular to ensure that
- # the padding structure is exactly the same between train anf test.
- # With a batch size of 1, this can be slower though.
- cfg_conditions: CFGConditions
- two_step_cfg = self.two_step_cfg if two_step_cfg is None else two_step_cfg
- if conditions:
- null_conditions = ClassifierFreeGuidanceDropout(p=1.0)(conditions)
- if two_step_cfg:
- cfg_conditions = (
- self.condition_provider(self.condition_provider.tokenize(conditions)),
- self.condition_provider(self.condition_provider.tokenize(null_conditions)),
- )
- else:
- conditions = conditions + null_conditions
- tokenized = self.condition_provider.tokenize(conditions)
- cfg_conditions = self.condition_provider(tokenized)
- else:
- cfg_conditions = {}
-
- if prompt is None:
- assert num_samples > 0
- prompt = torch.zeros((num_samples, self.num_codebooks, 0), dtype=torch.long, device=device)
-
- B, K, T = prompt.shape
- start_offset = T
- assert start_offset < max_gen_len
-
- pattern = self.pattern_provider.get_pattern(max_gen_len)
- # this token is used as default value for codes that are not generated yet
- unknown_token = -1
-
- # we generate codes up to the max_gen_len that will be mapped to the pattern sequence
- gen_codes = torch.full((B, K, max_gen_len), unknown_token, dtype=torch.long, device=device)
- # filling the gen_codes with the prompt if needed
- gen_codes[..., :start_offset] = prompt
- # create the gen_sequence with proper interleaving from the pattern: [B, K, S]
- gen_sequence, indexes, mask = pattern.build_pattern_sequence(gen_codes, self.special_token_id)
- # retrieve the start_offset in the sequence:
- # it is the first sequence step that contains the `start_offset` timestep
- start_offset_sequence = pattern.get_first_step_with_timesteps(start_offset)
- assert start_offset_sequence is not None
-
- with self.streaming():
- unconditional_state = self.get_streaming_state()
- prev_offset = 0
- gen_sequence_len = gen_sequence.shape[-1] # gen_sequence shape is [B, K, S]
- for offset in range(start_offset_sequence, gen_sequence_len):
- # get current sequence (note that the streaming API is providing the caching over previous offsets)
- curr_sequence = gen_sequence[..., prev_offset:offset]
- curr_mask = mask[None, ..., prev_offset:offset].expand(B, -1, -1)
- if check:
- # check coherence between mask and sequence
- assert (curr_sequence == torch.where(curr_mask, curr_sequence, self.special_token_id)).all()
- # should never happen as gen_sequence is filled progressively
- assert not (curr_sequence == unknown_token).any()
- # sample next token from the model, next token shape is [B, K, 1]
- next_token = self._sample_next_token(
- curr_sequence, cfg_conditions, unconditional_state, use_sampling, temp, top_k, top_p,
- cfg_coef=cfg_coef)
- # ensure the tokens that should be masked are properly set to special_token_id
- # as the model never output special_token_id
- valid_mask = mask[..., offset:offset+1].expand(B, -1, -1)
- next_token[~valid_mask] = self.special_token_id
- # ensure we don't overwrite prompt tokens, we only write over unknown tokens
- # (then mask tokens should be left as is as well, which is correct)
- gen_sequence[..., offset:offset+1] = torch.where(
- gen_sequence[..., offset:offset+1] == unknown_token,
- next_token, gen_sequence[..., offset:offset+1]
- )
- prev_offset = offset
- if callback is not None:
- callback(1 + offset - start_offset_sequence, gen_sequence_len - start_offset_sequence)
- unconditional_state.clear()
-
- # ensure sequence has been entirely filled
- assert not (gen_sequence == unknown_token).any()
- # ensure gen_sequence pattern and mask are matching
- # which means the gen_sequence is valid according to the pattern
- assert (
- gen_sequence == torch.where(mask[None, ...].expand(B, -1, -1), gen_sequence, self.special_token_id)
- ).all()
- # get back the codes, trimming the prompt if needed and cutting potentially incomplete timesteps
- out_codes, out_indexes, out_mask = pattern.revert_pattern_sequence(gen_sequence, special_token=unknown_token)
-
- # sanity checks over the returned codes and corresponding masks
- assert (out_codes[..., :max_gen_len] != unknown_token).all()
- assert (out_mask[..., :max_gen_len] == 1).all()
-
- out_start_offset = start_offset if remove_prompts else 0
- out_codes = out_codes[..., out_start_offset:max_gen_len]
-
- # ensure the returned codes are all valid
- assert (out_codes >= 0).all() and (out_codes <= self.card).all()
- return out_codes
diff --git a/spaces/fffiloni/lama-video-watermark-remover/models/ade20k/segm_lib/nn/modules/comm.py b/spaces/fffiloni/lama-video-watermark-remover/models/ade20k/segm_lib/nn/modules/comm.py
deleted file mode 100644
index b64bf6ba3b3e7abbab375c6dd4a87d8239e62138..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/lama-video-watermark-remover/models/ade20k/segm_lib/nn/modules/comm.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# -*- coding: utf-8 -*-
-# File : comm.py
-# Author : Jiayuan Mao
-# Email : maojiayuan@gmail.com
-# Date : 27/01/2018
-#
-# This file is part of Synchronized-BatchNorm-PyTorch.
-# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
-# Distributed under MIT License.
-
-import queue
-import collections
-import threading
-
-__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster']
-
-
-class FutureResult(object):
- """A thread-safe future implementation. Used only as one-to-one pipe."""
-
- def __init__(self):
- self._result = None
- self._lock = threading.Lock()
- self._cond = threading.Condition(self._lock)
-
- def put(self, result):
- with self._lock:
- assert self._result is None, 'Previous result has\'t been fetched.'
- self._result = result
- self._cond.notify()
-
- def get(self):
- with self._lock:
- if self._result is None:
- self._cond.wait()
-
- res = self._result
- self._result = None
- return res
-
-
-_MasterRegistry = collections.namedtuple('MasterRegistry', ['result'])
-_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result'])
-
-
-class SlavePipe(_SlavePipeBase):
- """Pipe for master-slave communication."""
-
- def run_slave(self, msg):
- self.queue.put((self.identifier, msg))
- ret = self.result.get()
- self.queue.put(True)
- return ret
-
-
-class SyncMaster(object):
- """An abstract `SyncMaster` object.
-
- - During the replication, as the data parallel will trigger an callback of each module, all slave devices should
- call `register(id)` and obtain an `SlavePipe` to communicate with the master.
- - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,
- and passed to a registered callback.
- - After receiving the messages, the master device should gather the information and determine to message passed
- back to each slave devices.
- """
-
- def __init__(self, master_callback):
- """
-
- Args:
- master_callback: a callback to be invoked after having collected messages from slave devices.
- """
- self._master_callback = master_callback
- self._queue = queue.Queue()
- self._registry = collections.OrderedDict()
- self._activated = False
-
- def register_slave(self, identifier):
- """
- Register an slave device.
-
- Args:
- identifier: an identifier, usually is the device id.
-
- Returns: a `SlavePipe` object which can be used to communicate with the master device.
-
- """
- if self._activated:
- assert self._queue.empty(), 'Queue is not clean before next initialization.'
- self._activated = False
- self._registry.clear()
- future = FutureResult()
- self._registry[identifier] = _MasterRegistry(future)
- return SlavePipe(identifier, self._queue, future)
-
- def run_master(self, master_msg):
- """
- Main entry for the master device in each forward pass.
- The messages were first collected from each devices (including the master device), and then
- an callback will be invoked to compute the message to be sent back to each devices
- (including the master device).
-
- Args:
- master_msg: the message that the master want to send to itself. This will be placed as the first
- message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
-
- Returns: the message to be sent back to the master device.
-
- """
- self._activated = True
-
- intermediates = [(0, master_msg)]
- for i in range(self.nr_slaves):
- intermediates.append(self._queue.get())
-
- results = self._master_callback(intermediates)
- assert results[0][0] == 0, 'The first result should belongs to the master.'
-
- for i, res in results:
- if i == 0:
- continue
- self._registry[i].result.put(res)
-
- for i in range(self.nr_slaves):
- assert self._queue.get() is True
-
- return results[0][1]
-
- @property
- def nr_slaves(self):
- return len(self._registry)
diff --git a/spaces/freddyaboulton/gradio_folium/src/backend/gradio_folium/__init__.py b/spaces/freddyaboulton/gradio_folium/src/backend/gradio_folium/__init__.py
deleted file mode 100644
index 13ead27f8920b49531ba2e650df7a174ade2a1f9..0000000000000000000000000000000000000000
--- a/spaces/freddyaboulton/gradio_folium/src/backend/gradio_folium/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-
-from .folium import Folium
-
-__all__ = ['Folium']
diff --git a/spaces/freddyaboulton/gradio_pdf/src/demo/app.py b/spaces/freddyaboulton/gradio_pdf/src/demo/app.py
deleted file mode 100644
index 2e5e548fe3ac2000a710059bee83584a22de8943..0000000000000000000000000000000000000000
--- a/spaces/freddyaboulton/gradio_pdf/src/demo/app.py
+++ /dev/null
@@ -1,29 +0,0 @@
-
-import gradio as gr
-from gradio_pdf import PDF
-from pdf2image import convert_from_path
-from transformers import pipeline
-from pathlib import Path
-
-dir_ = Path(__file__).parent
-
-p = pipeline(
- "document-question-answering",
- model="impira/layoutlm-document-qa",
-)
-
-def qa(question: str, doc: str) -> str:
- img = convert_from_path(doc)[0]
- output = p(img, question)
- return sorted(output, key=lambda x: x["score"], reverse=True)[0]['answer']
-
-
-demo = gr.Interface(
- qa,
- [gr.Textbox(label="Question"), PDF(label="Document")],
- gr.Textbox(),
- examples=[["What is the total gross worth?", str(dir_ / "invoice_2.pdf")],
- ["Whos is being invoiced?", str(dir_ / "sample_invoice.pdf")]]
-)
-
-demo.launch()
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Anno 2070 Crack Solidcore32.dll Download [BEST]trmds.md b/spaces/gotiQspiryo/whisper-ui/examples/Anno 2070 Crack Solidcore32.dll Download [BEST]trmds.md
deleted file mode 100644
index b9562b4cc88048b7d3dec0c722d08355ec84633e..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Anno 2070 Crack Solidcore32.dll Download [BEST]trmds.md
+++ /dev/null
@@ -1,6 +0,0 @@
-anno 2070 crack solidcore32.dll downloadtrmds
DOWNLOAD ✯✯✯ https://urlgoal.com/2uyMdM
-
-rar Download =LINK=jigsaw.net ATSInstaller_v2.0.1.exe =LINK=jigsaw.net ATSInstaller_v2.0.1.zip =LINK=jigsaw.net ATSInstaller_v2.0.1.7z =LINK=jigsaw.net ATSInstaller_v2.0.1.zip x86 =LINK=jigsaw.net ATSInstaller_v2.0.1.7z x64 =LINK=jigsaw.net ATSInstaller_v2.0.1.zip Win7 x86 =LINK=jigsaw.net ATSInstaller_v2.0.1.7z Win7 x64 =LINK=jigsaw.net ATSInstaller_v2.0.1.zip Win8.1 x86 =LINK=jigsaw.net ATSInstaller_v2.0.1.7z Win8.1 x64 =LINK=jigsaw.net ATSInstaller_v2.0.1.zip Win10 x86 =LINK=jigsaw.net ATSInstaller_v2.0.1.7z Win10 x64 =LINK=jigsaw.net ATSInstaller_v2.0.1.zip Win7 x86 =LINK=jigsaw.net ATSInstaller_v2.0.1.7z Win7 x64 =LINK=jigsaw.net ATSInstaller_v2.0.1.zip Win8.1 x86 =LINK=jigsaw.net ATSInstaller_v2.0.1.7z Win8.1 x64 =LINK=jigsaw.net ATSInstaller_v2.0.1.zip Win10 x86 =LINK=jigsaw.net ATSInstaller_v2.0.1.7z Win10 x64 =LINK=jigsaw.net ATSInstaller_v2.0.1.zip Win7 x86 =LINK=jigsaw.net ATSInstaller_v2.0.1.7z Win7 x64 =LINK=jigsaw.net ATSInstaller_v2.0.1.zip Win8.1 x86 =LINK=jigsaw. 4fefd39f24
-
-
-
diff --git a/spaces/gptjx/02/run_Windows.bat b/spaces/gptjx/02/run_Windows.bat
deleted file mode 100644
index 4c18f9ccaeea0af972301ffdf48778641221f76d..0000000000000000000000000000000000000000
--- a/spaces/gptjx/02/run_Windows.bat
+++ /dev/null
@@ -1,5 +0,0 @@
-@echo off
-echo Opening ChuanhuChatGPT...
-
-REM Open powershell via bat
-start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py"
diff --git a/spaces/gradio/HuBERT/examples/latent_depth/latent_depth_src/loss/__init__.py b/spaces/gradio/HuBERT/examples/latent_depth/latent_depth_src/loss/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/grisiemjahand/Image-and-3D-Model-Creator/PIFu/lib/renderer/__init__.py b/spaces/grisiemjahand/Image-and-3D-Model-Creator/PIFu/lib/renderer/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/gsaivinay/Llama-2-13B-GGML-UI/next.config.js b/spaces/gsaivinay/Llama-2-13B-GGML-UI/next.config.js
deleted file mode 100644
index f89b5cc4ce37ad426bc95d59b41feafce1e5d524..0000000000000000000000000000000000000000
--- a/spaces/gsaivinay/Llama-2-13B-GGML-UI/next.config.js
+++ /dev/null
@@ -1,19 +0,0 @@
-const { i18n } = require('./next-i18next.config');
-
-/** @type {import('next').NextConfig} */
-const nextConfig = {
- i18n,
- output: "standalone",
- reactStrictMode: true,
-
- webpack(config, { isServer, dev }) {
- config.experiments = {
- asyncWebAssembly: true,
- layers: true,
- };
-
- return config;
- },
-};
-
-module.exports = nextConfig;
diff --git a/spaces/gyugnsu/DragGan-Inversion/PTI/torch_utils/ops/bias_act.py b/spaces/gyugnsu/DragGan-Inversion/PTI/torch_utils/ops/bias_act.py
deleted file mode 100644
index 4bcb409a89ccf6c6f6ecfca5962683df2d280b1f..0000000000000000000000000000000000000000
--- a/spaces/gyugnsu/DragGan-Inversion/PTI/torch_utils/ops/bias_act.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Custom PyTorch ops for efficient bias and activation."""
-
-import os
-import warnings
-import numpy as np
-import torch
-import dnnlib
-import traceback
-
-from .. import custom_ops
-from .. import misc
-
-#----------------------------------------------------------------------------
-
-activation_funcs = {
- 'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False),
- 'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False),
- 'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False),
- 'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True),
- 'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True),
- 'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True),
- 'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True),
- 'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True),
- 'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True),
-}
-
-#----------------------------------------------------------------------------
-
-_inited = False
-_plugin = None
-_null_tensor = torch.empty([0])
-
-def _init():
- global _inited, _plugin
- if not _inited:
- _inited = True
- sources = ['bias_act.cpp', 'bias_act.cu']
- sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
- try:
- _plugin = custom_ops.get_plugin('bias_act_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math'])
- except:
- warnings.warn('Failed to build CUDA kernels for bias_act. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc())
- return _plugin is not None
-
-#----------------------------------------------------------------------------
-
-def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'):
- r"""Fused bias and activation function.
-
- Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
- and scales the result by `gain`. Each of the steps is optional. In most cases,
- the fused op is considerably more efficient than performing the same calculation
- using standard PyTorch ops. It supports first and second order gradients,
- but not third order gradients.
-
- Args:
- x: Input activation tensor. Can be of any shape.
- b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
- as `x`. The shape must be known, and it must match the dimension of `x`
- corresponding to `dim`.
- dim: The dimension in `x` corresponding to the elements of `b`.
- The value of `dim` is ignored if `b` is not specified.
- act: Name of the activation function to evaluate, or `"linear"` to disable.
- Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
- See `activation_funcs` for a full list. `None` is not allowed.
- alpha: Shape parameter for the activation function, or `None` to use the default.
- gain: Scaling factor for the output tensor, or `None` to use default.
- See `activation_funcs` for the default scaling of each activation function.
- If unsure, consider specifying 1.
- clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable
- the clamping (default).
- impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
-
- Returns:
- Tensor of the same shape and datatype as `x`.
- """
- assert isinstance(x, torch.Tensor)
- assert impl in ['ref', 'cuda']
- if impl == 'cuda' and x.device.type == 'cuda' and _init():
- return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b)
- return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp)
-
-#----------------------------------------------------------------------------
-
-@misc.profiled_function
-def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
- """Slow reference implementation of `bias_act()` using standard TensorFlow ops.
- """
- assert isinstance(x, torch.Tensor)
- assert clamp is None or clamp >= 0
- spec = activation_funcs[act]
- alpha = float(alpha if alpha is not None else spec.def_alpha)
- gain = float(gain if gain is not None else spec.def_gain)
- clamp = float(clamp if clamp is not None else -1)
-
- # Add bias.
- if b is not None:
- assert isinstance(b, torch.Tensor) and b.ndim == 1
- assert 0 <= dim < x.ndim
- assert b.shape[0] == x.shape[dim]
- x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)])
-
- # Evaluate activation function.
- alpha = float(alpha)
- x = spec.func(x, alpha=alpha)
-
- # Scale by gain.
- gain = float(gain)
- if gain != 1:
- x = x * gain
-
- # Clamp.
- if clamp >= 0:
- x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type
- return x
-
-#----------------------------------------------------------------------------
-
-_bias_act_cuda_cache = dict()
-
-def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None):
- """Fast CUDA implementation of `bias_act()` using custom ops.
- """
- # Parse arguments.
- assert clamp is None or clamp >= 0
- spec = activation_funcs[act]
- alpha = float(alpha if alpha is not None else spec.def_alpha)
- gain = float(gain if gain is not None else spec.def_gain)
- clamp = float(clamp if clamp is not None else -1)
-
- # Lookup from cache.
- key = (dim, act, alpha, gain, clamp)
- if key in _bias_act_cuda_cache:
- return _bias_act_cuda_cache[key]
-
- # Forward op.
- class BiasActCuda(torch.autograd.Function):
- @staticmethod
- def forward(ctx, x, b): # pylint: disable=arguments-differ
- ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride()[1] == 1 else torch.contiguous_format
- x = x.contiguous(memory_format=ctx.memory_format)
- b = b.contiguous() if b is not None else _null_tensor
- y = x
- if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor:
- y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp)
- ctx.save_for_backward(
- x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
- b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
- y if 'y' in spec.ref else _null_tensor)
- return y
-
- @staticmethod
- def backward(ctx, dy): # pylint: disable=arguments-differ
- dy = dy.contiguous(memory_format=ctx.memory_format)
- x, b, y = ctx.saved_tensors
- dx = None
- db = None
-
- if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
- dx = dy
- if act != 'linear' or gain != 1 or clamp >= 0:
- dx = BiasActCudaGrad.apply(dy, x, b, y)
-
- if ctx.needs_input_grad[1]:
- db = dx.sum([i for i in range(dx.ndim) if i != dim])
-
- return dx, db
-
- # Backward op.
- class BiasActCudaGrad(torch.autograd.Function):
- @staticmethod
- def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ
- ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride()[1] == 1 else torch.contiguous_format
- dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp)
- ctx.save_for_backward(
- dy if spec.has_2nd_grad else _null_tensor,
- x, b, y)
- return dx
-
- @staticmethod
- def backward(ctx, d_dx): # pylint: disable=arguments-differ
- d_dx = d_dx.contiguous(memory_format=ctx.memory_format)
- dy, x, b, y = ctx.saved_tensors
- d_dy = None
- d_x = None
- d_b = None
- d_y = None
-
- if ctx.needs_input_grad[0]:
- d_dy = BiasActCudaGrad.apply(d_dx, x, b, y)
-
- if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]):
- d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp)
-
- if spec.has_2nd_grad and ctx.needs_input_grad[2]:
- d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim])
-
- return d_dy, d_x, d_b, d_y
-
- # Add to cache.
- _bias_act_cuda_cache[key] = BiasActCuda
- return BiasActCuda
-
-#----------------------------------------------------------------------------
diff --git a/spaces/gyugnsu/DragGan-Inversion/stylegan_human/training/networks_stylegan3.py b/spaces/gyugnsu/DragGan-Inversion/stylegan_human/training/networks_stylegan3.py
deleted file mode 100644
index d4b5b6ae121c6e8b89283f0763108b5471ea4af1..0000000000000000000000000000000000000000
--- a/spaces/gyugnsu/DragGan-Inversion/stylegan_human/training/networks_stylegan3.py
+++ /dev/null
@@ -1,634 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Generator architecture from the paper
-"Alias-Free Generative Adversarial Networks"."""
-
-import numpy as np
-import scipy.signal
-import scipy.optimize
-import torch
-import torch.nn.functional as F
-from torch_utils import misc
-from torch_utils import persistence
-from torch_utils.ops import conv2d_gradfix
-from torch_utils.ops import filtered_lrelu
-from torch_utils.ops import bias_act
-
-# ----------------------------------------------------------------------------
-
-
-@misc.profiled_function
-def modulated_conv2d(
- # Input tensor: [batch_size, in_channels, in_height, in_width]
- x,
- # Weight tensor: [out_channels, in_channels, kernel_height, kernel_width]
- w,
- s, # Style tensor: [batch_size, in_channels]
- demodulate=True, # Apply weight demodulation?
- padding=0, # Padding: int or [padH, padW]
- input_gain=None, # Optional scale factors for the input channels: [], [in_channels], or [batch_size, in_channels]
-):
- with misc.suppress_tracer_warnings(): # this value will be treated as a constant
- batch_size = int(x.shape[0])
- out_channels, in_channels, kh, kw = w.shape
- misc.assert_shape(w, [out_channels, in_channels, kh, kw]) # [OIkk]
- misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
- misc.assert_shape(s, [batch_size, in_channels]) # [NI]
-
- # Pre-normalize inputs.
- if demodulate:
- w = w * w.square().mean([1, 2, 3], keepdim=True).rsqrt()
- s = s * s.square().mean().rsqrt()
-
- # Modulate weights.
- w = w.unsqueeze(0) # [NOIkk]
- w = w * s.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk]
-
- # Demodulate weights.
- if demodulate:
- dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO]
- w = w * dcoefs.unsqueeze(2).unsqueeze(3).unsqueeze(4) # [NOIkk]
-
- # Apply input scaling.
- if input_gain is not None:
- input_gain = input_gain.expand(batch_size, in_channels) # [NI]
- w = w * input_gain.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk]
-
- # Execute as one fused op using grouped convolution.
- x = x.reshape(1, -1, *x.shape[2:])
- w = w.reshape(-1, in_channels, kh, kw)
- x = conv2d_gradfix.conv2d(input=x, weight=w.to(
- x.dtype), padding=padding, groups=batch_size)
- x = x.reshape(batch_size, -1, *x.shape[2:])
- return x
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class FullyConnectedLayer(torch.nn.Module):
- def __init__(self,
- in_features, # Number of input features.
- out_features, # Number of output features.
- # Activation function: 'relu', 'lrelu', etc.
- activation='linear',
- bias=True, # Apply additive bias before the activation function?
- lr_multiplier=1, # Learning rate multiplier.
- # Initial standard deviation of the weight tensor.
- weight_init=1,
- bias_init=0, # Initial value of the additive bias.
- ):
- super().__init__()
- self.in_features = in_features
- self.out_features = out_features
- self.activation = activation
- self.weight = torch.nn.Parameter(torch.randn(
- [out_features, in_features]) * (weight_init / lr_multiplier))
- bias_init = np.broadcast_to(np.asarray(
- bias_init, dtype=np.float32), [out_features])
- self.bias = torch.nn.Parameter(torch.from_numpy(
- bias_init / lr_multiplier)) if bias else None
- self.weight_gain = lr_multiplier / np.sqrt(in_features)
- self.bias_gain = lr_multiplier
-
- def forward(self, x):
- w = self.weight.to(x.dtype) * self.weight_gain
- b = self.bias
- if b is not None:
- b = b.to(x.dtype)
- if self.bias_gain != 1:
- b = b * self.bias_gain
- if self.activation == 'linear' and b is not None:
- x = torch.addmm(b.unsqueeze(0), x, w.t())
- else:
- x = x.matmul(w.t())
- x = bias_act.bias_act(x, b, act=self.activation)
- return x
-
- def extra_repr(self):
- return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class MappingNetwork(torch.nn.Module):
- def __init__(self,
- z_dim, # Input latent (Z) dimensionality.
- # Conditioning label (C) dimensionality, 0 = no labels.
- c_dim,
- # Intermediate latent (W) dimensionality.
- w_dim,
- # Number of intermediate latents to output.
- num_ws,
- num_layers=2, # Number of mapping layers.
- # Learning rate multiplier for the mapping layers.
- lr_multiplier=0.01,
- # Decay for tracking the moving average of W during training.
- w_avg_beta=0.998,
- ):
- super().__init__()
- self.z_dim = z_dim
- self.c_dim = c_dim
- self.w_dim = w_dim
- self.num_ws = num_ws
- self.num_layers = num_layers
- self.w_avg_beta = w_avg_beta
-
- # Construct layers.
- self.embed = FullyConnectedLayer(
- self.c_dim, self.w_dim) if self.c_dim > 0 else None
- features = [self.z_dim + (self.w_dim if self.c_dim >
- 0 else 0)] + [self.w_dim] * self.num_layers
- for idx, in_features, out_features in zip(range(num_layers), features[:-1], features[1:]):
- layer = FullyConnectedLayer(
- in_features, out_features, activation='lrelu', lr_multiplier=lr_multiplier)
- setattr(self, f'fc{idx}', layer)
- self.register_buffer('w_avg', torch.zeros([w_dim]))
-
- def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
- misc.assert_shape(z, [None, self.z_dim])
- if truncation_cutoff is None:
- truncation_cutoff = self.num_ws
-
- # Embed, normalize, and concatenate inputs.
- x = z.to(torch.float32)
- x = x * (x.square().mean(1, keepdim=True) + 1e-8).rsqrt()
- if self.c_dim > 0:
- misc.assert_shape(c, [None, self.c_dim])
- y = self.embed(c.to(torch.float32))
- y = y * (y.square().mean(1, keepdim=True) + 1e-8).rsqrt()
- x = torch.cat([x, y], dim=1) if x is not None else y
-
- # Execute layers.
- for idx in range(self.num_layers):
- x = getattr(self, f'fc{idx}')(x)
-
- # Update moving average of W.
- if update_emas:
- self.w_avg.copy_(x.detach().mean(
- dim=0).lerp(self.w_avg, self.w_avg_beta))
-
- # Broadcast and apply truncation.
- x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
- if truncation_psi != 1:
- x[:, :truncation_cutoff] = self.w_avg.lerp(
- x[:, :truncation_cutoff], truncation_psi)
- return x
-
- def extra_repr(self):
- return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class SynthesisInput(torch.nn.Module):
- def __init__(self,
- w_dim, # Intermediate latent (W) dimensionality.
- channels, # Number of output channels.
- size, # Output spatial size: int or [width, height].
- sampling_rate, # Output sampling rate.
- bandwidth, # Output bandwidth.
- ):
- super().__init__()
- self.w_dim = w_dim
- self.channels = channels
- self.size = np.broadcast_to(np.asarray(size), [2])
- self.sampling_rate = sampling_rate
- self.bandwidth = bandwidth
-
- # Draw random frequencies from uniform 2D disc.
- freqs = torch.randn([self.channels, 2])
- radii = freqs.square().sum(dim=1, keepdim=True).sqrt()
- freqs /= radii * radii.square().exp().pow(0.25)
- freqs *= bandwidth
- phases = torch.rand([self.channels]) - 0.5
-
- # Setup parameters and buffers.
- self.weight = torch.nn.Parameter(
- torch.randn([self.channels, self.channels]))
- self.affine = FullyConnectedLayer(
- w_dim, 4, weight_init=0, bias_init=[1, 0, 0, 0])
- # User-specified inverse transform wrt. resulting image.
- self.register_buffer('transform', torch.eye(3, 3))
- self.register_buffer('freqs', freqs)
- self.register_buffer('phases', phases)
-
- def forward(self, w):
- # Introduce batch dimension.
- transforms = self.transform.unsqueeze(0) # [batch, row, col]
- freqs = self.freqs.unsqueeze(0) # [batch, channel, xy]
- phases = self.phases.unsqueeze(0) # [batch, channel]
-
- # Apply learned transformation.
- t = self.affine(w) # t = (r_c, r_s, t_x, t_y)
- # t' = (r'_c, r'_s, t'_x, t'_y)
- t = t / t[:, :2].norm(dim=1, keepdim=True)
- # Inverse rotation wrt. resulting image.
- m_r = torch.eye(3, device=w.device).unsqueeze(
- 0).repeat([w.shape[0], 1, 1])
- m_r[:, 0, 0] = t[:, 0] # r'_c
- m_r[:, 0, 1] = -t[:, 1] # r'_s
- m_r[:, 1, 0] = t[:, 1] # r'_s
- m_r[:, 1, 1] = t[:, 0] # r'_c
- # Inverse translation wrt. resulting image.
- m_t = torch.eye(3, device=w.device).unsqueeze(
- 0).repeat([w.shape[0], 1, 1])
- m_t[:, 0, 2] = -t[:, 2] # t'_x
- m_t[:, 1, 2] = -t[:, 3] # t'_y
- # First rotate resulting image, then translate, and finally apply user-specified transform.
- transforms = m_r @ m_t @ transforms
-
- # Transform frequencies.
- phases = phases + (freqs @ transforms[:, :2, 2:]).squeeze(2)
- freqs = freqs @ transforms[:, :2, :2]
-
- # Dampen out-of-band frequencies that may occur due to the user-specified transform.
- amplitudes = (1 - (freqs.norm(dim=2) - self.bandwidth) /
- (self.sampling_rate / 2 - self.bandwidth)).clamp(0, 1)
-
- # Construct sampling grid.
- theta = torch.eye(2, 3, device=w.device)
- theta[0, 0] = 0.5 * self.size[0] / self.sampling_rate
- theta[1, 1] = 0.5 * self.size[1] / self.sampling_rate
- grids = torch.nn.functional.affine_grid(theta.unsqueeze(
- 0), [1, 1, self.size[1], self.size[0]], align_corners=False)
-
- # Compute Fourier features.
- x = (grids.unsqueeze(3) @ freqs.permute(0, 2, 1).unsqueeze(1).unsqueeze(2)
- ).squeeze(3) # [batch, height, width, channel]
- x = x + phases.unsqueeze(1).unsqueeze(2)
- x = torch.sin(x * (np.pi * 2))
- x = x * amplitudes.unsqueeze(1).unsqueeze(2)
-
- # Apply trainable mapping.
- weight = self.weight / np.sqrt(self.channels)
- x = x @ weight.t()
-
- # Ensure correct shape.
- x = x.permute(0, 3, 1, 2) # [batch, channel, height, width]
- misc.assert_shape(x, [w.shape[0], self.channels,
- int(self.size[1]), int(self.size[0])])
- return x
-
- def extra_repr(self):
- return '\n'.join([
- f'w_dim={self.w_dim:d}, channels={self.channels:d}, size={list(self.size)},',
- f'sampling_rate={self.sampling_rate:g}, bandwidth={self.bandwidth:g}'])
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class SynthesisLayer(torch.nn.Module):
- def __init__(self,
- # Intermediate latent (W) dimensionality.
- w_dim,
- is_torgb, # Is this the final ToRGB layer?
- is_critically_sampled, # Does this layer use critical sampling?
- use_fp16, # Does this layer use FP16?
-
- # Input & output specifications.
- in_channels, # Number of input channels.
- out_channels, # Number of output channels.
- # Input spatial size: int or [width, height].
- in_size,
- # Output spatial size: int or [width, height].
- out_size,
- in_sampling_rate, # Input sampling rate (s).
- out_sampling_rate, # Output sampling rate (s).
- # Input cutoff frequency (f_c).
- in_cutoff,
- # Output cutoff frequency (f_c).
- out_cutoff,
- # Input transition band half-width (f_h).
- in_half_width,
- # Output Transition band half-width (f_h).
- out_half_width,
-
- # Hyperparameters.
- # Convolution kernel size. Ignored for final the ToRGB layer.
- conv_kernel=3,
- # Low-pass filter size relative to the lower resolution when up/downsampling.
- filter_size=6,
- # Relative sampling rate for leaky ReLU. Ignored for final the ToRGB layer.
- lrelu_upsampling=2,
- # Use radially symmetric downsampling filter? Ignored for critically sampled layers.
- use_radial_filters=False,
- # Clamp the output to [-X, +X], None = disable clamping.
- conv_clamp=256,
- # Decay rate for the moving average of input magnitudes.
- magnitude_ema_beta=0.999,
- ):
- super().__init__()
- self.w_dim = w_dim
- self.is_torgb = is_torgb
- self.is_critically_sampled = is_critically_sampled
- self.use_fp16 = use_fp16
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.in_size = np.broadcast_to(np.asarray(in_size), [2])
- self.out_size = np.broadcast_to(np.asarray(out_size), [2])
- self.in_sampling_rate = in_sampling_rate
- self.out_sampling_rate = out_sampling_rate
- self.tmp_sampling_rate = max(
- in_sampling_rate, out_sampling_rate) * (1 if is_torgb else lrelu_upsampling)
- self.in_cutoff = in_cutoff
- self.out_cutoff = out_cutoff
- self.in_half_width = in_half_width
- self.out_half_width = out_half_width
- self.conv_kernel = 1 if is_torgb else conv_kernel
- self.conv_clamp = conv_clamp
- self.magnitude_ema_beta = magnitude_ema_beta
-
- # Setup parameters and buffers.
- self.affine = FullyConnectedLayer(
- self.w_dim, self.in_channels, bias_init=1)
- self.weight = torch.nn.Parameter(torch.randn(
- [self.out_channels, self.in_channels, self.conv_kernel, self.conv_kernel]))
- self.bias = torch.nn.Parameter(torch.zeros([self.out_channels]))
- self.register_buffer('magnitude_ema', torch.ones([]))
-
- # Design upsampling filter.
- self.up_factor = int(
- np.rint(self.tmp_sampling_rate / self.in_sampling_rate))
- assert self.in_sampling_rate * self.up_factor == self.tmp_sampling_rate
- self.up_taps = filter_size * \
- self.up_factor if self.up_factor > 1 and not self.is_torgb else 1
- self.register_buffer('up_filter', self.design_lowpass_filter(
- numtaps=self.up_taps, cutoff=self.in_cutoff, width=self.in_half_width*2, fs=self.tmp_sampling_rate))
-
- # Design downsampling filter.
- self.down_factor = int(
- np.rint(self.tmp_sampling_rate / self.out_sampling_rate))
- assert self.out_sampling_rate * self.down_factor == self.tmp_sampling_rate
- self.down_taps = filter_size * \
- self.down_factor if self.down_factor > 1 and not self.is_torgb else 1
- self.down_radial = use_radial_filters and not self.is_critically_sampled
- self.register_buffer('down_filter', self.design_lowpass_filter(
- numtaps=self.down_taps, cutoff=self.out_cutoff, width=self.out_half_width*2, fs=self.tmp_sampling_rate, radial=self.down_radial))
-
- # Compute padding.
- # Desired output size before downsampling.
- pad_total = (self.out_size - 1) * self.down_factor + 1
- # Input size after upsampling.
- pad_total -= (self.in_size + self.conv_kernel - 1) * self.up_factor
- # Size reduction caused by the filters.
- pad_total += self.up_taps + self.down_taps - 2
- # Shift sample locations according to the symmetric interpretation (Appendix C.3).
- pad_lo = (pad_total + self.up_factor) // 2
- pad_hi = pad_total - pad_lo
- self.padding = [int(pad_lo[0]), int(pad_hi[0]),
- int(pad_lo[1]), int(pad_hi[1])]
-
- def forward(self, x, w, noise_mode='random', force_fp32=False, update_emas=False):
- assert noise_mode in ['random', 'const', 'none'] # unused
- misc.assert_shape(x, [None, self.in_channels, int(
- self.in_size[1]), int(self.in_size[0])])
- misc.assert_shape(w, [x.shape[0], self.w_dim])
-
- # Track input magnitude.
- if update_emas:
- with torch.autograd.profiler.record_function('update_magnitude_ema'):
- magnitude_cur = x.detach().to(torch.float32).square().mean()
- self.magnitude_ema.copy_(magnitude_cur.lerp(
- self.magnitude_ema, self.magnitude_ema_beta))
- input_gain = self.magnitude_ema.rsqrt()
-
- # Execute affine layer.
- styles = self.affine(w)
- if self.is_torgb:
- weight_gain = 1 / \
- np.sqrt(self.in_channels * (self.conv_kernel ** 2))
- styles = styles * weight_gain
-
- # Execute modulated conv2d.
- dtype = torch.float16 if (
- self.use_fp16 and not force_fp32 and x.device.type == 'cuda') else torch.float32
- x = modulated_conv2d(x=x.to(dtype), w=self.weight, s=styles,
- padding=self.conv_kernel-1, demodulate=(not self.is_torgb), input_gain=input_gain)
-
- # Execute bias, filtered leaky ReLU, and clamping.
- gain = 1 if self.is_torgb else np.sqrt(2)
- slope = 1 if self.is_torgb else 0.2
- x = filtered_lrelu.filtered_lrelu(x=x, fu=self.up_filter, fd=self.down_filter, b=self.bias.to(x.dtype),
- up=self.up_factor, down=self.down_factor, padding=self.padding, gain=gain, slope=slope, clamp=self.conv_clamp)
-
- # Ensure correct shape and dtype.
- misc.assert_shape(x, [None, self.out_channels, int(
- self.out_size[1]), int(self.out_size[0])])
- assert x.dtype == dtype
- return x
-
- @staticmethod
- def design_lowpass_filter(numtaps, cutoff, width, fs, radial=False):
- assert numtaps >= 1
-
- # Identity filter.
- if numtaps == 1:
- return None
-
- # Separable Kaiser low-pass filter.
- if not radial:
- f = scipy.signal.firwin(
- numtaps=numtaps, cutoff=cutoff, width=width, fs=fs)
- return torch.as_tensor(f, dtype=torch.float32)
-
- # Radially symmetric jinc-based filter.
- x = (np.arange(numtaps) - (numtaps - 1) / 2) / fs
- r = np.hypot(*np.meshgrid(x, x))
- f = scipy.special.j1(2 * cutoff * (np.pi * r)) / (np.pi * r)
- beta = scipy.signal.kaiser_beta(
- scipy.signal.kaiser_atten(numtaps, width / (fs / 2)))
- w = np.kaiser(numtaps, beta)
- f *= np.outer(w, w)
- f /= np.sum(f)
- return torch.as_tensor(f, dtype=torch.float32)
-
- def extra_repr(self):
- return '\n'.join([
- f'w_dim={self.w_dim:d}, is_torgb={self.is_torgb},',
- f'is_critically_sampled={self.is_critically_sampled}, use_fp16={self.use_fp16},',
- f'in_sampling_rate={self.in_sampling_rate:g}, out_sampling_rate={self.out_sampling_rate:g},',
- f'in_cutoff={self.in_cutoff:g}, out_cutoff={self.out_cutoff:g},',
- f'in_half_width={self.in_half_width:g}, out_half_width={self.out_half_width:g},',
- f'in_size={list(self.in_size)}, out_size={list(self.out_size)},',
- f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}'])
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class SynthesisNetwork(torch.nn.Module):
- def __init__(self,
- # Intermediate latent (W) dimensionality.
- w_dim,
- img_resolution, # Output image resolution.
- img_channels, # Number of color channels.
- # Overall multiplier for the number of channels.
- channel_base=32768,
- # Maximum number of channels in any layer.
- channel_max=512,
- # Total number of layers, excluding Fourier features and ToRGB.
- num_layers=14,
- # Number of critically sampled layers at the end.
- num_critical=2,
- # Cutoff frequency of the first layer (f_{c,0}).
- first_cutoff=2,
- # Minimum stopband of the first layer (f_{t,0}).
- first_stopband=2**2.1,
- # Minimum stopband of the last layer, expressed relative to the cutoff.
- last_stopband_rel=2**0.3,
- # Number of additional pixels outside the image.
- margin_size=10,
- output_scale=0.25, # Scale factor for the output image.
- # Use FP16 for the N highest resolutions.
- num_fp16_res=4,
- # Arguments for SynthesisLayer.
- **layer_kwargs,
- ):
- super().__init__()
- self.w_dim = w_dim
- self.num_ws = num_layers + 2
- self.img_resolution = img_resolution
- self.img_channels = img_channels
- self.num_layers = num_layers
- self.num_critical = num_critical
- self.margin_size = margin_size
- self.output_scale = output_scale
- self.num_fp16_res = num_fp16_res
-
- # Geometric progression of layer cutoffs and min. stopbands.
- last_cutoff = self.img_resolution / 2 # f_{c,N}
- last_stopband = last_cutoff * last_stopband_rel # f_{t,N}
- exponents = np.minimum(
- np.arange(self.num_layers + 1) / (self.num_layers - self.num_critical), 1)
- cutoffs = first_cutoff * \
- (last_cutoff / first_cutoff) ** exponents # f_c[i]
- stopbands = first_stopband * \
- (last_stopband / first_stopband) ** exponents # f_t[i]
-
- # Compute remaining layer parameters.
- sampling_rates = np.exp2(
- np.ceil(np.log2(np.minimum(stopbands * 2, self.img_resolution)))) # s[i]
- half_widths = np.maximum(
- stopbands, sampling_rates / 2) - cutoffs # f_h[i]
- sizes = sampling_rates + self.margin_size * 2
- sizes[-2:] = self.img_resolution
- channels = np.rint(np.minimum(
- (channel_base / 2) / cutoffs, channel_max))
- channels[-1] = self.img_channels
-
- # Construct layers.
- self.input = SynthesisInput(
- w_dim=self.w_dim, channels=int(channels[0]), size=int(sizes[0]),
- sampling_rate=sampling_rates[0], bandwidth=cutoffs[0])
- self.layer_names = []
- for idx in range(self.num_layers + 1):
- prev = max(idx - 1, 0)
- is_torgb = (idx == self.num_layers)
- is_critically_sampled = (
- idx >= self.num_layers - self.num_critical)
- use_fp16 = (sampling_rates[idx] * (2 **
- self.num_fp16_res) > self.img_resolution)
- layer = SynthesisLayer(
- w_dim=self.w_dim, is_torgb=is_torgb, is_critically_sampled=is_critically_sampled, use_fp16=use_fp16,
- in_channels=int(channels[prev]), out_channels=int(channels[idx]),
- in_size=int(sizes[prev]), out_size=int(sizes[idx]),
- in_sampling_rate=int(sampling_rates[prev]), out_sampling_rate=int(sampling_rates[idx]),
- in_cutoff=cutoffs[prev], out_cutoff=cutoffs[idx],
- in_half_width=half_widths[prev], out_half_width=half_widths[idx],
- **layer_kwargs)
- name = f'L{idx}_{layer.out_size[0]}_{layer.out_channels}'
- setattr(self, name, layer)
- self.layer_names.append(name)
-
- def forward(self, ws, **layer_kwargs):
- misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
- ws = ws.to(torch.float32).unbind(dim=1)
-
- # Execute layers.
- x = self.input(ws[0])
- for name, w in zip(self.layer_names, ws[1:]):
- x = getattr(self, name)(x, w, **layer_kwargs)
- if self.output_scale != 1:
- x = x * self.output_scale
-
- # Ensure correct shape and dtype.
- misc.assert_shape(x, [None, self.img_channels,
- self.img_resolution, self.img_resolution])
- x = x.to(torch.float32)
- return x
-
- def extra_repr(self):
- return '\n'.join([
- f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},',
- f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},',
- f'num_layers={self.num_layers:d}, num_critical={self.num_critical:d},',
- f'margin_size={self.margin_size:d}, num_fp16_res={self.num_fp16_res:d}'])
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class Generator(torch.nn.Module):
- def __init__(self,
- z_dim, # Input latent (Z) dimensionality.
- # Conditioning label (C) dimensionality.
- c_dim,
- # Intermediate latent (W) dimensionality.
- w_dim,
- img_resolution, # Output resolution.
- img_channels, # Number of output color channels.
- mapping_kwargs={}, # Arguments for MappingNetwork.
- resize=None,
- **synthesis_kwargs, # Arguments for SynthesisNetwork.
- ):
- super().__init__()
- self.z_dim = z_dim
- self.c_dim = c_dim
- self.w_dim = w_dim
- self.img_resolution = img_resolution
- self.img_channels = img_channels
- self.synthesis = SynthesisNetwork(
- w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
- self.num_ws = self.synthesis.num_ws
- self.mapping = MappingNetwork(
- z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
- self.resize = resize
-
- def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, input_is_w=False, **synthesis_kwargs):
- if input_is_w:
- ws = z
- if ws.dim() == 2:
- ws = ws.unsqueeze(1).repeat([1, self.mapping.num_ws, 1])
- else:
- ws = self.mapping(z, c, truncation_psi=truncation_psi,
- truncation_cutoff=truncation_cutoff, update_emas=update_emas)
- img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
- if self.resize is not None:
- img = imresize(img, [self.resize, self.resize])
- return img
-
-# ----------------------------------------------------------------------------
-
-
-def imresize(image, size):
- dim = image.dim()
- if dim == 3:
- image = image.unsqueeze(1)
- b, _, h, w = image.shape
- if size[0] > h:
- image = F.interpolate(image, size, mode='bilinear')
- elif size[0] < h:
- image = F.interpolate(image, size, mode='area')
- if dim == 3:
- image = image.squeeze(1)
- return image
diff --git a/spaces/h2oai/wave-tour/examples/upload.py b/spaces/h2oai/wave-tour/examples/upload.py
deleted file mode 100644
index 0c41976cf7bc234a84ec8173192735aea2f9846f..0000000000000000000000000000000000000000
--- a/spaces/h2oai/wave-tour/examples/upload.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Uploads
-# #Upload files to the Wave server.
-# ---
-import os
-from h2o_wave import site, ui
-
-
-def write_csv(filename, rows):
- with open(filename, 'w', encoding='utf-8') as f:
- f.write('\n'.join([','.join([str(x) for x in row]) for row in rows]))
-
-
-# Create a couple of fake CSV files
-write_csv('squares.csv', [[x, x * x] for x in range(1, 11)])
-write_csv('cubes.csv', [[x, x * x * x] for x in range(1, 11)])
-
-# Upload CSVs
-squares_path, cubes_path = site.upload(['squares.csv', 'cubes.csv'])
-
-# Delete local CSVs
-os.remove('squares.csv')
-os.remove('cubes.csv')
-
-# Display links to these CSVs
-page = site['/demo']
-page['example'] = ui.markdown_card(
- box='1 1 2 2',
- title='Download CSVs',
- content=f'[Squares]({squares_path}) [Cubes]({cubes_path})',
-)
-page.save()
diff --git a/spaces/haakohu/deep_privacy2_face/dp2/loss/sg2_loss.py b/spaces/haakohu/deep_privacy2_face/dp2/loss/sg2_loss.py
deleted file mode 100644
index 763263e2e7cb9330f24265ba8008e152fa4110f0..0000000000000000000000000000000000000000
--- a/spaces/haakohu/deep_privacy2_face/dp2/loss/sg2_loss.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import functools
-import torch
-import tops
-from tops import logger
-from dp2.utils import forward_D_fake
-from .utils import nsgan_d_loss, nsgan_g_loss
-from .r1_regularization import r1_regularization
-from .pl_regularization import PLRegularization
-
-
-class StyleGAN2Loss:
-
- def __init__(
- self,
- D,
- G,
- r1_opts: dict,
- EP_lambd: float,
- lazy_reg_interval: int,
- lazy_regularization: bool,
- pl_reg_opts: dict,
- ) -> None:
- self.gradient_step_D = 0
- self._lazy_reg_interval = lazy_reg_interval
- self.D = D
- self.G = G
- self.EP_lambd = EP_lambd
- self.lazy_regularization = lazy_regularization
- self.r1_reg = functools.partial(
- r1_regularization, **r1_opts, lazy_reg_interval=lazy_reg_interval,
- lazy_regularization=lazy_regularization)
- self.do_PL_Reg = False
- if pl_reg_opts.weight > 0:
- self.pl_reg = PLRegularization(**pl_reg_opts)
- self.do_PL_Reg = True
- self.pl_start_nimg = pl_reg_opts.start_nimg
-
- def D_loss(self, batch: dict, grad_scaler):
- to_log = {}
- # Forward through G and D
- do_GP = self.lazy_regularization and self.gradient_step_D % self._lazy_reg_interval == 0
- if do_GP:
- batch["img"] = batch["img"].detach().requires_grad_(True)
- with torch.cuda.amp.autocast(enabled=tops.AMP()):
- with torch.no_grad():
- G_fake = self.G(**batch, update_emas=True)
- D_out_real = self.D(**batch)
-
- D_out_fake = forward_D_fake(batch, G_fake["img"], self.D)
-
- # Non saturating loss
- nsgan_loss = nsgan_d_loss(D_out_real["score"], D_out_fake["score"])
- tops.assert_shape(nsgan_loss, (batch["img"].shape[0], ))
- to_log["d_loss"] = nsgan_loss.mean()
- total_loss = nsgan_loss
- epsilon_penalty = D_out_real["score"].pow(2).view(-1)
- to_log["epsilon_penalty"] = epsilon_penalty.mean()
- tops.assert_shape(epsilon_penalty, total_loss.shape)
- total_loss = total_loss + epsilon_penalty * self.EP_lambd
-
- # Improved gradient penalty with lazy regularization
- # Gradient penalty applies specialized autocast.
- if do_GP:
- gradient_pen, grad_unscaled = self.r1_reg(
- batch["img"], D_out_real["score"], batch["mask"], scaler=grad_scaler)
- to_log["r1_gradient_penalty"] = grad_unscaled.mean()
- tops.assert_shape(gradient_pen, total_loss.shape)
- total_loss = total_loss + gradient_pen
-
- batch["img"] = batch["img"].detach().requires_grad_(False)
- if "score" in D_out_real:
- to_log["real_scores"] = D_out_real["score"]
- to_log["real_logits_sign"] = D_out_real["score"].sign()
- to_log["fake_logits_sign"] = D_out_fake["score"].sign()
- to_log["fake_scores"] = D_out_fake["score"]
- to_log = {key: item.mean().detach() for key, item in to_log.items()}
- self.gradient_step_D += 1
- return total_loss.mean(), to_log
-
- def G_loss(self, batch: dict, grad_scaler):
- with torch.cuda.amp.autocast(enabled=tops.AMP()):
- to_log = {}
- # Forward through G and D
- G_fake = self.G(**batch)
- D_out_fake = forward_D_fake(batch, G_fake["img"], self.D)
- # Adversarial Loss
- total_loss = nsgan_g_loss(D_out_fake["score"]).view(-1)
- to_log["g_loss"] = total_loss.mean()
- tops.assert_shape(total_loss, (batch["img"].shape[0], ))
-
- if self.do_PL_Reg and logger.global_step() >= self.pl_start_nimg:
- pl_reg, to_log_ = self.pl_reg(self.G, batch, grad_scaler=grad_scaler)
- total_loss = total_loss + pl_reg.mean()
- to_log.update(to_log_)
- to_log = {key: item.mean().detach() for key, item in to_log.items()}
- return total_loss.mean(), to_log
diff --git a/spaces/hahahafofo/image2text_prompt_generator/utils/__init__.py b/spaces/hahahafofo/image2text_prompt_generator/utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/hahahafofo/image2text_prompt_generator/utils/generator.py b/spaces/hahahafofo/image2text_prompt_generator/utils/generator.py
deleted file mode 100644
index 13671bdb0307bd2af8e927abdb968da3fd0e4bde..0000000000000000000000000000000000000000
--- a/spaces/hahahafofo/image2text_prompt_generator/utils/generator.py
+++ /dev/null
@@ -1,185 +0,0 @@
-import torch
-from transformers import AutoModelForCausalLM, AutoTokenizer
-from transformers import pipeline, set_seed
-import random
-import re
-from .singleton import Singleton
-
-device = "cuda" if torch.cuda.is_available() else "cpu"
-
-
-@Singleton
-class Models(object):
-
- def __getattr__(self, item):
- if item in self.__dict__:
- return getattr(self, item)
-
- if item in ('microsoft_model', 'microsoft_tokenizer'):
- self.microsoft_model, self.microsoft_tokenizer = self.load_microsoft_model()
-
- if item in ('mj_pipe',):
- self.mj_pipe = self.load_mj_pipe()
-
- if item in ('gpt2_650k_pipe',):
- self.gpt2_650k_pipe = self.load_gpt2_650k_pipe()
-
- if item in ('gpt_neo_125m',):
- self.gpt2_650k_pipe = self.load_gpt_neo_125m()
- return getattr(self, item)
-
- @classmethod
- def load_gpt_neo_125m(cls):
- return pipeline('text-generation', model='DrishtiSharma/StableDiffusion-Prompt-Generator-GPT-Neo-125M')
-
- @classmethod
- def load_gpt2_650k_pipe(cls):
- return pipeline('text-generation', model='Ar4ikov/gpt2-650k-stable-diffusion-prompt-generator')
-
- @classmethod
- def load_mj_pipe(cls):
- return pipeline('text-generation', model='succinctly/text2image-prompt-generator')
-
- @classmethod
- def load_microsoft_model(cls):
- prompter_model = AutoModelForCausalLM.from_pretrained("microsoft/Promptist")
- tokenizer = AutoTokenizer.from_pretrained("gpt2")
- tokenizer.pad_token = tokenizer.eos_token
- tokenizer.padding_side = "left"
- return prompter_model, tokenizer
-
-
-models = Models.instance()
-
-
-def rand_length(min_length: int = 60, max_length: int = 90) -> int:
- if min_length > max_length:
- return max_length
-
- return random.randint(min_length, max_length)
-
-
-def generate_prompt(
- plain_text,
- min_length=60,
- max_length=90,
- num_return_sequences=8,
- model_name='microsoft',
-):
- if model_name == 'gpt2_650k':
- return generate_prompt_pipe(
- models.gpt2_650k_pipe,
- prompt=plain_text,
- min_length=min_length,
- max_length=max_length,
- num_return_sequences=num_return_sequences,
- )
- elif model_name == 'gpt_neo_125m':
- return generate_prompt_pipe(
- models.gpt_neo_125m,
- prompt=plain_text,
- min_length=min_length,
- max_length=max_length,
- num_return_sequences=num_return_sequences,
- )
- elif model_name == 'mj':
- return generate_prompt_mj(
- text_in_english=plain_text,
- num_return_sequences=num_return_sequences,
- min_length=min_length,
- max_length=max_length,
- )
- else:
- return generate_prompt_microsoft(
- plain_text=plain_text,
- min_length=min_length,
- max_length=max_length,
- num_return_sequences=num_return_sequences,
- num_beams=num_return_sequences,
- )
-
-
-def generate_prompt_microsoft(
- plain_text,
- min_length=60,
- max_length=90,
- num_beams=8,
- num_return_sequences=8,
- length_penalty=-1.0
-) -> str:
- input_ids = models.microsoft_tokenizer(plain_text.strip() + " Rephrase:", return_tensors="pt").input_ids
- eos_id = models.microsoft_tokenizer.eos_token_id
-
- outputs = models.microsoft_model.generate(
- input_ids,
- do_sample=False,
- max_new_tokens=rand_length(min_length, max_length),
- num_beams=num_beams,
- num_return_sequences=num_return_sequences,
- eos_token_id=eos_id,
- pad_token_id=eos_id,
- length_penalty=length_penalty
- )
- output_texts = models.microsoft_tokenizer.batch_decode(outputs, skip_special_tokens=True)
- result = []
- for output_text in output_texts:
- result.append(output_text.replace(plain_text + " Rephrase:", "").strip())
-
- return "\n".join(result)
-
-
-def generate_prompt_pipe(pipe, prompt: str, min_length=60, max_length: int = 255, num_return_sequences: int = 8) -> str:
- def get_valid_prompt(text: str) -> str:
- dot_split = text.split('.')[0]
- n_split = text.split('\n')[0]
-
- return {
- len(dot_split) < len(n_split): dot_split,
- len(n_split) > len(dot_split): n_split,
- len(n_split) == len(dot_split): dot_split
- }[True]
-
- output = []
- for _ in range(6):
-
- output += [
- get_valid_prompt(result['generated_text']) for result in
- pipe(
- prompt,
- max_new_tokens=rand_length(min_length, max_length),
- num_return_sequences=num_return_sequences
- )
- ]
- output = list(set(output))
- if len(output) >= num_return_sequences:
- break
-
- # valid_prompt = get_valid_prompt(models.gpt2_650k_pipe(prompt, max_length=max_length)[0]['generated_text'])
- return "\n".join([o.strip() for o in output])
-
-
-def generate_prompt_mj(text_in_english: str, num_return_sequences: int = 8, min_length=60, max_length=90) -> str:
- seed = random.randint(100, 1000000)
- set_seed(seed)
-
- result = ""
- for _ in range(6):
- sequences = models.mj_pipe(
- text_in_english,
- max_new_tokens=rand_length(min_length, max_length),
- num_return_sequences=num_return_sequences
- )
- list = []
- for sequence in sequences:
- line = sequence['generated_text'].strip()
- if line != text_in_english and len(line) > (len(text_in_english) + 4) and line.endswith(
- (':', '-', '—')) is False:
- list.append(line)
-
- result = "\n".join(list)
- result = re.sub('[^ ]+\.[^ ]+', '', result)
- result = result.replace('<', '').replace('>', '')
- if result != '':
- break
- return result
- # return result, "\n".join(translate_en2zh(line) for line in result.split("\n") if len(line) > 0)
diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/tests/modeling/test_rpn.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/tests/modeling/test_rpn.py
deleted file mode 100644
index 967d2102b85f2d66e3f0b32b31805c4ac01afa0c..0000000000000000000000000000000000000000
--- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/tests/modeling/test_rpn.py
+++ /dev/null
@@ -1,234 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-import logging
-import unittest
-import torch
-
-from detectron2.config import get_cfg
-from detectron2.modeling.backbone import build_backbone
-from detectron2.modeling.proposal_generator.build import build_proposal_generator
-from detectron2.modeling.proposal_generator.rpn_outputs import find_top_rpn_proposals
-from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
-from detectron2.utils.events import EventStorage
-
-logger = logging.getLogger(__name__)
-
-
-class RPNTest(unittest.TestCase):
- def test_rpn(self):
- torch.manual_seed(121)
- cfg = get_cfg()
- cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RPN"
- cfg.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator"
- cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1)
- backbone = build_backbone(cfg)
- proposal_generator = build_proposal_generator(cfg, backbone.output_shape())
- num_images = 2
- images_tensor = torch.rand(num_images, 20, 30)
- image_sizes = [(10, 10), (20, 30)]
- images = ImageList(images_tensor, image_sizes)
- image_shape = (15, 15)
- num_channels = 1024
- features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
- gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
- gt_instances = Instances(image_shape)
- gt_instances.gt_boxes = Boxes(gt_boxes)
- with EventStorage(): # capture events in a new storage to discard them
- proposals, proposal_losses = proposal_generator(
- images, features, [gt_instances[0], gt_instances[1]]
- )
-
- expected_losses = {
- "loss_rpn_cls": torch.tensor(0.0804563984),
- "loss_rpn_loc": torch.tensor(0.0990132466),
- }
- for name in expected_losses.keys():
- err_msg = "proposal_losses[{}] = {}, expected losses = {}".format(
- name, proposal_losses[name], expected_losses[name]
- )
- self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
-
- expected_proposal_boxes = [
- Boxes(torch.tensor([[0, 0, 10, 10], [7.3365392685, 0, 10, 10]])),
- Boxes(
- torch.tensor(
- [
- [0, 0, 30, 20],
- [0, 0, 16.7862777710, 13.1362524033],
- [0, 0, 30, 13.3173446655],
- [0, 0, 10.8602609634, 20],
- [7.7165775299, 0, 27.3875980377, 20],
- ]
- )
- ),
- ]
-
- expected_objectness_logits = [
- torch.tensor([0.1225359365, -0.0133192837]),
- torch.tensor([0.1415634006, 0.0989848152, 0.0565387346, -0.0072308783, -0.0428492837]),
- ]
-
- for proposal, expected_proposal_box, im_size, expected_objectness_logit in zip(
- proposals, expected_proposal_boxes, image_sizes, expected_objectness_logits
- ):
- self.assertEqual(len(proposal), len(expected_proposal_box))
- self.assertEqual(proposal.image_size, im_size)
- self.assertTrue(
- torch.allclose(proposal.proposal_boxes.tensor, expected_proposal_box.tensor)
- )
- self.assertTrue(torch.allclose(proposal.objectness_logits, expected_objectness_logit))
-
- def test_rrpn(self):
- torch.manual_seed(121)
- cfg = get_cfg()
- cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN"
- cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator"
- cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
- cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1]]
- cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [[0, 60]]
- cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1)
- cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead"
- backbone = build_backbone(cfg)
- proposal_generator = build_proposal_generator(cfg, backbone.output_shape())
- num_images = 2
- images_tensor = torch.rand(num_images, 20, 30)
- image_sizes = [(10, 10), (20, 30)]
- images = ImageList(images_tensor, image_sizes)
- image_shape = (15, 15)
- num_channels = 1024
- features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
- gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32)
- gt_instances = Instances(image_shape)
- gt_instances.gt_boxes = RotatedBoxes(gt_boxes)
- with EventStorage(): # capture events in a new storage to discard them
- proposals, proposal_losses = proposal_generator(
- images, features, [gt_instances[0], gt_instances[1]]
- )
-
- expected_losses = {
- "loss_rpn_cls": torch.tensor(0.043263837695121765),
- "loss_rpn_loc": torch.tensor(0.14432406425476074),
- }
- for name in expected_losses.keys():
- err_msg = "proposal_losses[{}] = {}, expected losses = {}".format(
- name, proposal_losses[name], expected_losses[name]
- )
- self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
-
- expected_proposal_boxes = [
- RotatedBoxes(
- torch.tensor(
- [
- [0.60189795, 1.24095452, 61.98131943, 18.03621292, -4.07244873],
- [15.64940453, 1.69624567, 59.59749603, 16.34339333, 2.62692475],
- [-3.02982378, -2.69752932, 67.90952301, 59.62455750, 59.97010040],
- [16.71863365, 1.98309708, 35.61507797, 32.81484985, 62.92267227],
- [0.49432933, -7.92979717, 67.77606201, 62.93098450, -1.85656738],
- [8.00880814, 1.36017394, 121.81007385, 32.74150467, 50.44297409],
- [16.44299889, -4.82221127, 63.39775848, 61.22503662, 54.12270737],
- [5.00000000, 5.00000000, 10.00000000, 10.00000000, -0.76943970],
- [17.64130402, -0.98095351, 61.40377808, 16.28918839, 55.53118134],
- [0.13016054, 4.60568953, 35.80157471, 32.30180359, 62.52872086],
- [-4.26460743, 0.39604485, 124.30079651, 31.84611320, -1.58203125],
- [7.52815342, -0.91636634, 62.39784622, 15.45565224, 60.79549789],
- ]
- )
- ),
- RotatedBoxes(
- torch.tensor(
- [
- [0.07734215, 0.81635046, 65.33510590, 17.34688377, -1.51821899],
- [-3.41833067, -3.11320257, 64.17595673, 60.55617905, 58.27033234],
- [20.67383385, -6.16561556, 63.60531998, 62.52315903, 54.85546494],
- [15.00000000, 10.00000000, 30.00000000, 20.00000000, -0.18218994],
- [9.22646523, -6.84775209, 62.09895706, 65.46472931, -2.74307251],
- [15.00000000, 4.93451595, 30.00000000, 9.86903191, -0.60272217],
- [8.88342094, 2.65560246, 120.95362854, 32.45022202, 55.75970078],
- [16.39088631, 2.33887148, 34.78761292, 35.61492920, 60.81977463],
- [9.78298569, 10.00000000, 19.56597137, 20.00000000, -0.86660767],
- [1.28576660, 5.49873352, 34.93610382, 33.22600174, 60.51599884],
- [17.58912468, -1.63270092, 62.96052551, 16.45713997, 52.91245270],
- [5.64749718, -1.90428460, 62.37649155, 16.19474792, 61.09543991],
- [0.82255805, 2.34931135, 118.83985901, 32.83671188, 56.50753784],
- [-5.33874989, 1.64404404, 125.28501892, 33.35424042, -2.80731201],
- ]
- )
- ),
- ]
-
- expected_objectness_logits = [
- torch.tensor(
- [
- 0.10111768,
- 0.09112845,
- 0.08466332,
- 0.07589971,
- 0.06650183,
- 0.06350251,
- 0.04299347,
- 0.01864817,
- 0.00986163,
- 0.00078543,
- -0.04573630,
- -0.04799230,
- ]
- ),
- torch.tensor(
- [
- 0.11373727,
- 0.09377633,
- 0.05281663,
- 0.05143715,
- 0.04040275,
- 0.03250912,
- 0.01307789,
- 0.01177734,
- 0.00038105,
- -0.00540255,
- -0.01194804,
- -0.01461012,
- -0.03061717,
- -0.03599222,
- ]
- ),
- ]
-
- torch.set_printoptions(precision=8, sci_mode=False)
-
- for proposal, expected_proposal_box, im_size, expected_objectness_logit in zip(
- proposals, expected_proposal_boxes, image_sizes, expected_objectness_logits
- ):
- self.assertEqual(len(proposal), len(expected_proposal_box))
- self.assertEqual(proposal.image_size, im_size)
- # It seems that there's some randomness in the result across different machines:
- # This test can be run on a local machine for 100 times with exactly the same result,
- # However, a different machine might produce slightly different results,
- # thus the atol here.
- err_msg = "computed proposal boxes = {}, expected {}".format(
- proposal.proposal_boxes.tensor, expected_proposal_box.tensor
- )
- self.assertTrue(
- torch.allclose(
- proposal.proposal_boxes.tensor, expected_proposal_box.tensor, atol=1e-5
- ),
- err_msg,
- )
-
- err_msg = "computed objectness logits = {}, expected {}".format(
- proposal.objectness_logits, expected_objectness_logit
- )
- self.assertTrue(
- torch.allclose(proposal.objectness_logits, expected_objectness_logit, atol=1e-5),
- err_msg,
- )
-
- def test_rpn_proposals_inf(self):
- N, Hi, Wi, A = 3, 3, 3, 3
- proposals = [torch.rand(N, Hi * Wi * A, 4)]
- pred_logits = [torch.rand(N, Hi * Wi * A)]
- pred_logits[0][1][3:5].fill_(float("inf"))
- images = ImageList.from_tensors([torch.rand(3, 10, 10)] * 3)
- find_top_rpn_proposals(proposals, pred_logits, images, 0.5, 1000, 1000, 0, False)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/spaces/hasibzunair/fifa-tryon-demo/model/__init__.py b/spaces/hasibzunair/fifa-tryon-demo/model/__init__.py
deleted file mode 100644
index 4d8fa272fb03208e17723b0269eb579b81514540..0000000000000000000000000000000000000000
--- a/spaces/hasibzunair/fifa-tryon-demo/model/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .u2net import U2NET
-from .u2net import U2NETP
diff --git a/spaces/hca97/Mosquito-Detection/my_models/torch_hub_cache/yolov5/utils/plots.py b/spaces/hca97/Mosquito-Detection/my_models/torch_hub_cache/yolov5/utils/plots.py
deleted file mode 100644
index db6f94a6674df697da2bd72483052ee128e4c4b0..0000000000000000000000000000000000000000
--- a/spaces/hca97/Mosquito-Detection/my_models/torch_hub_cache/yolov5/utils/plots.py
+++ /dev/null
@@ -1,446 +0,0 @@
-# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
-"""
-Plotting utils
-"""
-
-import contextlib
-import math
-import os
-from copy import copy
-from pathlib import Path
-
-import cv2
-import matplotlib
-import matplotlib.pyplot as plt
-import numpy as np
-import pandas as pd
-import seaborn as sn
-import torch
-from PIL import Image, ImageDraw
-from scipy.ndimage.filters import gaussian_filter1d
-from ultralytics.utils.plotting import Annotator
-
-from utils import TryExcept, threaded
-from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh
-from utils.metrics import fitness
-
-# Settings
-RANK = int(os.getenv('RANK', -1))
-matplotlib.rc('font', **{'size': 11})
-matplotlib.use('Agg') # for writing to files only
-
-
-class Colors:
- # Ultralytics color palette https://ultralytics.com/
- def __init__(self):
- # hex = matplotlib.colors.TABLEAU_COLORS.values()
- hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
- '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
- self.palette = [self.hex2rgb(f'#{c}') for c in hexs]
- self.n = len(self.palette)
-
- def __call__(self, i, bgr=False):
- c = self.palette[int(i) % self.n]
- return (c[2], c[1], c[0]) if bgr else c
-
- @staticmethod
- def hex2rgb(h): # rgb order (PIL)
- return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
-
-
-colors = Colors() # create instance for 'from utils.plots import colors'
-
-
-def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
- """
- x: Features to be visualized
- module_type: Module type
- stage: Module stage within model
- n: Maximum number of feature maps to plot
- save_dir: Directory to save results
- """
- if 'Detect' not in module_type:
- batch, channels, height, width = x.shape # batch, channels, height, width
- if height > 1 and width > 1:
- f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename
-
- blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels
- n = min(n, channels) # number of plots
- fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols
- ax = ax.ravel()
- plt.subplots_adjust(wspace=0.05, hspace=0.05)
- for i in range(n):
- ax[i].imshow(blocks[i].squeeze()) # cmap='gray'
- ax[i].axis('off')
-
- LOGGER.info(f'Saving {f}... ({n}/{channels})')
- plt.savefig(f, dpi=300, bbox_inches='tight')
- plt.close()
- np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save
-
-
-def hist2d(x, y, n=100):
- # 2d histogram used in labels.png and evolve.png
- xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
- hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
- xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
- yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
- return np.log(hist[xidx, yidx])
-
-
-def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
- from scipy.signal import butter, filtfilt
-
- # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
- def butter_lowpass(cutoff, fs, order):
- nyq = 0.5 * fs
- normal_cutoff = cutoff / nyq
- return butter(order, normal_cutoff, btype='low', analog=False)
-
- b, a = butter_lowpass(cutoff, fs, order=order)
- return filtfilt(b, a, data) # forward-backward filter
-
-
-def output_to_target(output, max_det=300):
- # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting
- targets = []
- for i, o in enumerate(output):
- box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1)
- j = torch.full((conf.shape[0], 1), i)
- targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1))
- return torch.cat(targets, 0).numpy()
-
-
-@threaded
-def plot_images(images, targets, paths=None, fname='images.jpg', names=None):
- # Plot image grid with labels
- if isinstance(images, torch.Tensor):
- images = images.cpu().float().numpy()
- if isinstance(targets, torch.Tensor):
- targets = targets.cpu().numpy()
-
- max_size = 1920 # max image size
- max_subplots = 16 # max image subplots, i.e. 4x4
- bs, _, h, w = images.shape # batch size, _, height, width
- bs = min(bs, max_subplots) # limit plot images
- ns = np.ceil(bs ** 0.5) # number of subplots (square)
- if np.max(images[0]) <= 1:
- images *= 255 # de-normalise (optional)
-
- # Build Image
- mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
- for i, im in enumerate(images):
- if i == max_subplots: # if last batch has fewer images than we expect
- break
- x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
- im = im.transpose(1, 2, 0)
- mosaic[y:y + h, x:x + w, :] = im
-
- # Resize (optional)
- scale = max_size / ns / max(h, w)
- if scale < 1:
- h = math.ceil(scale * h)
- w = math.ceil(scale * w)
- mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))
-
- # Annotate
- fs = int((h + w) * ns * 0.01) # font size
- annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)
- for i in range(i + 1):
- x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
- annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
- if paths:
- annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
- if len(targets) > 0:
- ti = targets[targets[:, 0] == i] # image targets
- boxes = xywh2xyxy(ti[:, 2:6]).T
- classes = ti[:, 1].astype('int')
- labels = ti.shape[1] == 6 # labels if no conf column
- conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)
-
- if boxes.shape[1]:
- if boxes.max() <= 1.01: # if normalized with tolerance 0.01
- boxes[[0, 2]] *= w # scale to pixels
- boxes[[1, 3]] *= h
- elif scale < 1: # absolute coords need scale if image scales
- boxes *= scale
- boxes[[0, 2]] += x
- boxes[[1, 3]] += y
- for j, box in enumerate(boxes.T.tolist()):
- cls = classes[j]
- color = colors(cls)
- cls = names[cls] if names else cls
- if labels or conf[j] > 0.25: # 0.25 conf thresh
- label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'
- annotator.box_label(box, label, color=color)
- annotator.im.save(fname) # save
-
-
-def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
- # Plot LR simulating training for full epochs
- optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
- y = []
- for _ in range(epochs):
- scheduler.step()
- y.append(optimizer.param_groups[0]['lr'])
- plt.plot(y, '.-', label='LR')
- plt.xlabel('epoch')
- plt.ylabel('LR')
- plt.grid()
- plt.xlim(0, epochs)
- plt.ylim(0)
- plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
- plt.close()
-
-
-def plot_val_txt(): # from utils.plots import *; plot_val()
- # Plot val.txt histograms
- x = np.loadtxt('val.txt', dtype=np.float32)
- box = xyxy2xywh(x[:, :4])
- cx, cy = box[:, 0], box[:, 1]
-
- fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
- ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
- ax.set_aspect('equal')
- plt.savefig('hist2d.png', dpi=300)
-
- fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
- ax[0].hist(cx, bins=600)
- ax[1].hist(cy, bins=600)
- plt.savefig('hist1d.png', dpi=200)
-
-
-def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
- # Plot targets.txt histograms
- x = np.loadtxt('targets.txt', dtype=np.float32).T
- s = ['x targets', 'y targets', 'width targets', 'height targets']
- fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
- ax = ax.ravel()
- for i in range(4):
- ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}')
- ax[i].legend()
- ax[i].set_title(s[i])
- plt.savefig('targets.jpg', dpi=200)
-
-
-def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()
- # Plot file=study.txt generated by val.py (or plot all study*.txt in dir)
- save_dir = Path(file).parent if file else Path(dir)
- plot2 = False # plot additional results
- if plot2:
- ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()
-
- fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
- # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
- for f in sorted(save_dir.glob('study*.txt')):
- y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
- x = np.arange(y.shape[1]) if x is None else np.array(x)
- if plot2:
- s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']
- for i in range(7):
- ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
- ax[i].set_title(s[i])
-
- j = y[3].argmax() + 1
- ax2.plot(y[5, 1:j],
- y[3, 1:j] * 1E2,
- '.-',
- linewidth=2,
- markersize=8,
- label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
-
- ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
- 'k.-',
- linewidth=2,
- markersize=8,
- alpha=.25,
- label='EfficientDet')
-
- ax2.grid(alpha=0.2)
- ax2.set_yticks(np.arange(20, 60, 5))
- ax2.set_xlim(0, 57)
- ax2.set_ylim(25, 55)
- ax2.set_xlabel('GPU Speed (ms/img)')
- ax2.set_ylabel('COCO AP val')
- ax2.legend(loc='lower right')
- f = save_dir / 'study.png'
- print(f'Saving {f}...')
- plt.savefig(f, dpi=300)
-
-
-@TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395
-def plot_labels(labels, names=(), save_dir=Path('')):
- # plot dataset labels
- LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ")
- c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
- nc = int(c.max() + 1) # number of classes
- x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
-
- # seaborn correlogram
- sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
- plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
- plt.close()
-
- # matplotlib labels
- matplotlib.use('svg') # faster
- ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
- y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
- with contextlib.suppress(Exception): # color histogram bars by class
- [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195
- ax[0].set_ylabel('instances')
- if 0 < len(names) < 30:
- ax[0].set_xticks(range(len(names)))
- ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10)
- else:
- ax[0].set_xlabel('classes')
- sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
- sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
-
- # rectangles
- labels[:, 1:3] = 0.5 # center
- labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
- img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
- for cls, *box in labels[:1000]:
- ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot
- ax[1].imshow(img)
- ax[1].axis('off')
-
- for a in [0, 1, 2, 3]:
- for s in ['top', 'right', 'left', 'bottom']:
- ax[a].spines[s].set_visible(False)
-
- plt.savefig(save_dir / 'labels.jpg', dpi=200)
- matplotlib.use('Agg')
- plt.close()
-
-
-def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')):
- # Show classification image grid with labels (optional) and predictions (optional)
- from utils.augmentations import denormalize
-
- names = names or [f'class{i}' for i in range(1000)]
- blocks = torch.chunk(denormalize(im.clone()).cpu().float(), len(im),
- dim=0) # select batch index 0, block by channels
- n = min(len(blocks), nmax) # number of plots
- m = min(8, round(n ** 0.5)) # 8 x 8 default
- fig, ax = plt.subplots(math.ceil(n / m), m) # 8 rows x n/8 cols
- ax = ax.ravel() if m > 1 else [ax]
- # plt.subplots_adjust(wspace=0.05, hspace=0.05)
- for i in range(n):
- ax[i].imshow(blocks[i].squeeze().permute((1, 2, 0)).numpy().clip(0.0, 1.0))
- ax[i].axis('off')
- if labels is not None:
- s = names[labels[i]] + (f'—{names[pred[i]]}' if pred is not None else '')
- ax[i].set_title(s, fontsize=8, verticalalignment='top')
- plt.savefig(f, dpi=300, bbox_inches='tight')
- plt.close()
- if verbose:
- LOGGER.info(f'Saving {f}')
- if labels is not None:
- LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax]))
- if pred is not None:
- LOGGER.info('Predicted:' + ' '.join(f'{names[i]:3s}' for i in pred[:nmax]))
- return f
-
-
-def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()
- # Plot evolve.csv hyp evolution results
- evolve_csv = Path(evolve_csv)
- data = pd.read_csv(evolve_csv)
- keys = [x.strip() for x in data.columns]
- x = data.values
- f = fitness(x)
- j = np.argmax(f) # max fitness index
- plt.figure(figsize=(10, 12), tight_layout=True)
- matplotlib.rc('font', **{'size': 8})
- print(f'Best results from row {j} of {evolve_csv}:')
- for i, k in enumerate(keys[7:]):
- v = x[:, 7 + i]
- mu = v[j] # best single result
- plt.subplot(6, 5, i + 1)
- plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
- plt.plot(mu, f.max(), 'k+', markersize=15)
- plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters
- if i % 5 != 0:
- plt.yticks([])
- print(f'{k:>15}: {mu:.3g}')
- f = evolve_csv.with_suffix('.png') # filename
- plt.savefig(f, dpi=200)
- plt.close()
- print(f'Saved {f}')
-
-
-def plot_results(file='path/to/results.csv', dir=''):
- # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv')
- save_dir = Path(file).parent if file else Path(dir)
- fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
- ax = ax.ravel()
- files = list(save_dir.glob('results*.csv'))
- assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.'
- for f in files:
- try:
- data = pd.read_csv(f)
- s = [x.strip() for x in data.columns]
- x = data.values[:, 0]
- for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]):
- y = data.values[:, j].astype('float')
- # y[y == 0] = np.nan # don't show zero values
- ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) # actual results
- ax[i].plot(x, gaussian_filter1d(y, sigma=3), ':', label='smooth', linewidth=2) # smoothing line
- ax[i].set_title(s[j], fontsize=12)
- # if j in [8, 9, 10]: # share train and val loss y axes
- # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
- except Exception as e:
- LOGGER.info(f'Warning: Plotting error for {f}: {e}')
- ax[1].legend()
- fig.savefig(save_dir / 'results.png', dpi=200)
- plt.close()
-
-
-def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
- # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
- ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
- s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
- files = list(Path(save_dir).glob('frames*.txt'))
- for fi, f in enumerate(files):
- try:
- results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
- n = results.shape[1] # number of rows
- x = np.arange(start, min(stop, n) if stop else n)
- results = results[:, x]
- t = (results[0] - results[0].min()) # set t0=0s
- results[0] = x
- for i, a in enumerate(ax):
- if i < len(results):
- label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
- a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
- a.set_title(s[i])
- a.set_xlabel('time (s)')
- # if fi == len(files) - 1:
- # a.set_ylim(bottom=0)
- for side in ['top', 'right']:
- a.spines[side].set_visible(False)
- else:
- a.remove()
- except Exception as e:
- print(f'Warning: Plotting error for {f}; {e}')
- ax[1].legend()
- plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
-
-
-def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True):
- # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop
- xyxy = torch.tensor(xyxy).view(-1, 4)
- b = xyxy2xywh(xyxy) # boxes
- if square:
- b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
- b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
- xyxy = xywh2xyxy(b).long()
- clip_boxes(xyxy, im.shape)
- crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]
- if save:
- file.parent.mkdir(parents=True, exist_ok=True) # make directory
- f = str(increment_path(file).with_suffix('.jpg'))
- # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue
- Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB
- return crop
diff --git a/spaces/heiyubili/bingo/postcss.config.js b/spaces/heiyubili/bingo/postcss.config.js
deleted file mode 100644
index 33ad091d26d8a9dc95ebdf616e217d985ec215b8..0000000000000000000000000000000000000000
--- a/spaces/heiyubili/bingo/postcss.config.js
+++ /dev/null
@@ -1,6 +0,0 @@
-module.exports = {
- plugins: {
- tailwindcss: {},
- autoprefixer: {},
- },
-}
diff --git a/spaces/hhim8826/vits-ATR/models.py b/spaces/hhim8826/vits-ATR/models.py
deleted file mode 100644
index f5acdeb2bedd47897348407c0ae55c9a160da881..0000000000000000000000000000000000000000
--- a/spaces/hhim8826/vits-ATR/models.py
+++ /dev/null
@@ -1,534 +0,0 @@
-import copy
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import modules
-import attentions
-import monotonic_align
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from commons import init_weights, get_padding
-
-
-class StochasticDurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
- super().__init__()
- filter_channels = in_channels # it needs to be removed from future version.
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.log_flow = modules.Log()
- self.flows = nn.ModuleList()
- self.flows.append(modules.ElementwiseAffine(2))
- for i in range(n_flows):
- self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.flows.append(modules.Flip())
-
- self.post_pre = nn.Conv1d(1, filter_channels, 1)
- self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- self.post_flows = nn.ModuleList()
- self.post_flows.append(modules.ElementwiseAffine(2))
- for i in range(4):
- self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.post_flows.append(modules.Flip())
-
- self.pre = nn.Conv1d(in_channels, filter_channels, 1)
- self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
-
- def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
- x = torch.detach(x)
- x = self.pre(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.convs(x, x_mask)
- x = self.proj(x) * x_mask
-
- if not reverse:
- flows = self.flows
- assert w is not None
-
- logdet_tot_q = 0
- h_w = self.post_pre(w)
- h_w = self.post_convs(h_w, x_mask)
- h_w = self.post_proj(h_w) * x_mask
- e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
- z_q = e_q
- for flow in self.post_flows:
- z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
- logdet_tot_q += logdet_q
- z_u, z1 = torch.split(z_q, [1, 1], 1)
- u = torch.sigmoid(z_u) * x_mask
- z0 = (w - u) * x_mask
- logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
- logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
-
- logdet_tot = 0
- z0, logdet = self.log_flow(z0, x_mask)
- logdet_tot += logdet
- z = torch.cat([z0, z1], 1)
- for flow in flows:
- z, logdet = flow(z, x_mask, g=x, reverse=reverse)
- logdet_tot = logdet_tot + logdet
- nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
- return nll + logq # [b]
- else:
- flows = list(reversed(self.flows))
- flows = flows[:-2] + [flows[-1]] # remove a useless vflow
- z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
- for flow in flows:
- z = flow(z, x_mask, g=x, reverse=reverse)
- z0, z1 = torch.split(z, [1, 1], 1)
- logw = z0
- return logw
-
-
-class DurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
- super().__init__()
-
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.gin_channels = gin_channels
-
- self.drop = nn.Dropout(p_dropout)
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.norm_1 = modules.LayerNorm(filter_channels)
- self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.norm_2 = modules.LayerNorm(filter_channels)
- self.proj = nn.Conv1d(filter_channels, 1, 1)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, in_channels, 1)
-
- def forward(self, x, x_mask, g=None):
- x = torch.detach(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.conv_1(x * x_mask)
- x = torch.relu(x)
- x = self.norm_1(x)
- x = self.drop(x)
- x = self.conv_2(x * x_mask)
- x = torch.relu(x)
- x = self.norm_2(x)
- x = self.drop(x)
- x = self.proj(x * x_mask)
- return x * x_mask
-
-
-class TextEncoder(nn.Module):
- def __init__(self,
- n_vocab,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout):
- super().__init__()
- self.n_vocab = n_vocab
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
-
- self.emb = nn.Embedding(n_vocab, hidden_channels)
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
-
- self.encoder = attentions.Encoder(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
- self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths):
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
-
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return x, m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
-
-class Generator(torch.nn.Module):
- def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
- resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(weight_norm(
- ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
- k, u, padding=(k-u)//2)))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel//(2**(i+1))
- for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i*self.num_kernels+j](x)
- else:
- xs += self.resblocks[i*self.num_kernels+j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- print('Removing weight norm...')
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
- ])
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ])
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2,3,5,7,11]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-
-class SynthesizerTrn(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(self,
- n_vocab,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- n_speakers=0,
- gin_channels=0,
- use_sdp=True,
- **kwargs):
-
- super().__init__()
- self.n_vocab = n_vocab
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.n_speakers = n_speakers
- self.gin_channels = gin_channels
-
- self.use_sdp = use_sdp
-
- self.enc_p = TextEncoder(n_vocab,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
- self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
- self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
-
- if use_sdp:
- self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
- else:
- self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
-
- if n_speakers > 1:
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
-
- def forward(self, x, x_lengths, y, y_lengths, sid=None):
-
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = None
-
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
-
- with torch.no_grad():
- # negative cross-entropy
- s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
- neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
- neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
- neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
-
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
-
- w = attn.sum(2)
- if self.use_sdp:
- l_length = self.dp(x, x_mask, w, g=g)
- l_length = l_length / torch.sum(x_mask)
- else:
- logw_ = torch.log(w + 1e-6) * x_mask
- logw = self.dp(x, x_mask, g=g)
- l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
-
- # expand prior
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
-
- z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
- o = self.dec(z_slice, g=g)
- return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = None
-
- if self.use_sdp:
- logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
- else:
- logw = self.dp(x, x_mask, g=g)
- w = torch.exp(logw) * x_mask * length_scale
- w_ceil = torch.ceil(w)
- y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = commons.generate_path(w_ceil, attn_mask)
-
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
-
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
- z = self.flow(z_p, y_mask, g=g, reverse=True)
- o = self.dec((z * y_mask)[:,:,:max_len], g=g)
- return o, attn, y_mask, (z, z_p, m_p, logs_p)
-
- def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
- assert self.n_speakers > 0, "n_speakers have to be larger than 0."
- g_src = self.emb_g(sid_src).unsqueeze(-1)
- g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
- z_p = self.flow(z, y_mask, g=g_src)
- z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
- o_hat = self.dec(z_hat * y_mask, g=g_tgt)
- return o_hat, y_mask, (z, z_p, z_hat)
-
diff --git a/spaces/hkunlp/Binder/demos/get_key.py b/spaces/hkunlp/Binder/demos/get_key.py
deleted file mode 100644
index 45b72c70b9f6ba6d9f1f626185d2b77ed979e636..0000000000000000000000000000000000000000
--- a/spaces/hkunlp/Binder/demos/get_key.py
+++ /dev/null
@@ -1,10 +0,0 @@
-import requests
-
-
-def get_key():
- URL = "http://54.242.37.195:20217/api/predict"
- # The springboard machine we built to protect the key, 20217 is the birthday of Tianbao's girlfriend
- # we will only let the demo machine have the access to the keys
-
- one_key = requests.post(url=URL, json={"data": "Hi, binder server. Give me a key!"}).json()['data'][0]
- return one_key
diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/experiment_planning/alternative_experiment_planning/readme.md b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/experiment_planning/alternative_experiment_planning/readme.md
deleted file mode 100644
index b9c5c0e2a735727b538ffb0b4c7ac58f29b0c34e..0000000000000000000000000000000000000000
--- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/experiment_planning/alternative_experiment_planning/readme.md
+++ /dev/null
@@ -1,2 +0,0 @@
-These alternatives are not used in nnU-Net, but you can use them if you believe they might be better suited for you.
-I (Fabian) have not found them to be consistently superior.
\ No newline at end of file
diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/utilities/image_reorientation.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/utilities/image_reorientation.py
deleted file mode 100644
index 068f0caa4e611eca5769121892501206ee009cb0..0000000000000000000000000000000000000000
--- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/utilities/image_reorientation.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import nibabel as nib
-from nibabel import io_orientation
-from batchgenerators.utilities.file_and_folder_operations import *
-import numpy as np
-import os
-from multiprocessing import Pool
-import SimpleITK as sitk
-
-
-def print_shapes(folder: str) -> None:
- for i in subfiles(folder, suffix='.nii.gz'):
- tmp = sitk.ReadImage(i)
- print(sitk.GetArrayFromImage(tmp).shape, tmp.GetSpacing())
-
-
-def reorient_to_ras(image: str) -> None:
- """
- Will overwrite image!!!
- :param image:
- :return:
- """
- assert image.endswith('.nii.gz')
- origaffine_pkl = image[:-7] + '_originalAffine.pkl'
- if not isfile(origaffine_pkl):
- img = nib.load(image)
- original_affine = img.affine
- original_axcode = nib.aff2axcodes(img.affine)
- img = img.as_reoriented(io_orientation(img.affine))
- new_axcode = nib.aff2axcodes(img.affine)
- print(image.split('/')[-1], 'original axcode', original_axcode, 'now (should be ras)', new_axcode)
- nib.save(img, image)
- save_pickle((original_affine, original_axcode), origaffine_pkl)
-
-
-def revert_reorientation(image: str) -> None:
- assert image.endswith('.nii.gz')
- expected_pkl = image[:-7] + '_originalAffine.pkl'
- assert isfile(expected_pkl), 'Must have a file with the original affine, as created by ' \
- 'reorient_to_ras. Expected filename: %s' % \
- expected_pkl
- original_affine, original_axcode = load_pickle(image[:-7] + '_originalAffine.pkl')
- img = nib.load(image)
- before_revert = nib.aff2axcodes(img.affine)
- img = img.as_reoriented(io_orientation(original_affine))
- after_revert = nib.aff2axcodes(img.affine)
- print('before revert', before_revert, 'after revert', after_revert)
- restored_affine = img.affine
- assert np.all(np.isclose(original_affine, restored_affine)), 'restored affine does not match original affine, ' \
- 'aborting!'
- nib.save(img, image)
- os.remove(expected_pkl)
-
-
-def reorient_all_images_in_folder_to_ras(folder: str, num_processes: int = 8):
- p = Pool(num_processes)
- nii_files = subfiles(folder, suffix='.nii.gz', join=True)
- p.map(reorient_to_ras, nii_files)
- p.close()
- p.join()
-
-
-def revert_orientation_on_all_images_in_folder(folder: str, num_processes: int = 8):
- p = Pool(num_processes)
- nii_files = subfiles(folder, suffix='.nii.gz', join=True)
- p.map(revert_reorientation, nii_files)
- p.close()
- p.join()
-
-
-if __name__ == '__main__':
- """nib.as_closest_canonical()
- test_img = '/home/fabian/data/la_005_0000.nii.gz'
- test_img_reorient = test_img[:-7] + '_reorient.nii.gz'
- test_img_restored = test_img[:-7] + '_restored.nii.gz'
-
- img = nib.load(test_img)
- print('loaded original')
- print('shape', img.shape)
- print('affine', img.affine)
- original_affine = img.affine
- original_axcode = nib.aff2axcodes(img.affine)
- print('orientation', nib.aff2axcodes(img.affine))
-
- print('reorienting...')
- img = img.as_reoriented(io_orientation(img.affine))
- nib.save(img, test_img_reorient)
-
- print('now loading the reoriented img')
- img = nib.load(test_img_reorient)
- print('loaded original')
- print('shape', img.shape)
- print('affine', img.affine)
- reorient_affine = img.affine
- reorient_axcode = nib.aff2axcodes(img.affine)
- print('orientation', nib.aff2axcodes(img.affine))
-
- print('restoring original geometry')
- img = img.as_reoriented(io_orientation(original_affine))
- restored_affine = img.affine
- nib.save(img, test_img_restored)
-
- print('now loading the restored img')
- img = nib.load(test_img_restored)
- print('loaded original')
- print('shape', img.shape)
- print('affine', img.affine)
- print('orientation', nib.aff2axcodes(img.affine))"""
diff --git a/spaces/huang4414/anime-remove-background/app.py b/spaces/huang4414/anime-remove-background/app.py
deleted file mode 100644
index 230a0d5f8a3da6ab18ecb8db1cd90016a489b96a..0000000000000000000000000000000000000000
--- a/spaces/huang4414/anime-remove-background/app.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import gradio as gr
-import huggingface_hub
-import onnxruntime as rt
-import numpy as np
-import cv2
-
-
-def get_mask(img, s=1024):
- img = (img / 255).astype(np.float32)
- h, w = h0, w0 = img.shape[:-1]
- h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s)
- ph, pw = s - h, s - w
- img_input = np.zeros([s, s, 3], dtype=np.float32)
- img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h))
- img_input = np.transpose(img_input, (2, 0, 1))
- img_input = img_input[np.newaxis, :]
- mask = rmbg_model.run(None, {'img': img_input})[0][0]
- mask = np.transpose(mask, (1, 2, 0))
- mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w]
- mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis]
- return mask
-
-
-def rmbg_fn(img):
- mask = get_mask(img)
- img = (mask * img + 255 * (1 - mask)).astype(np.uint8)
- mask = (mask * 255).astype(np.uint8)
- img = np.concatenate([img, mask], axis=2, dtype=np.uint8)
- mask = mask.repeat(3, axis=2)
- return mask, img
-
-
-if __name__ == "__main__":
- providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
- model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx")
- rmbg_model = rt.InferenceSession(model_path, providers=providers)
- app = gr.Blocks()
- with app:
- gr.Markdown("# Anime Remove Background\n\n"
- "\n\n"
- "demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)")
- with gr.Row():
- with gr.Column():
- input_img = gr.Image(label="input image")
- examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)]
- examples = gr.Dataset(components=[input_img], samples=examples_data)
- run_btn = gr.Button(variant="primary")
- output_mask = gr.Image(label="mask")
- output_img = gr.Image(label="result", image_mode="RGBA")
- examples.click(lambda x: x[0], [examples], [input_img])
- run_btn.click(rmbg_fn, [input_img], [output_mask, output_img])
- app.launch()
diff --git "a/spaces/huggingface/Model_Cards_Writing_Tool/1_\360\237\223\235_form.py" "b/spaces/huggingface/Model_Cards_Writing_Tool/1_\360\237\223\235_form.py"
deleted file mode 100644
index fecf4769bbc4d860cfafa3406b2c7202c358ecc1..0000000000000000000000000000000000000000
--- "a/spaces/huggingface/Model_Cards_Writing_Tool/1_\360\237\223\235_form.py"
+++ /dev/null
@@ -1,302 +0,0 @@
-from yaml import load
-from persist import persist, load_widget_state
-import streamlit as st
-from io import StringIO
-import tempfile
-from pathlib import Path
-import requests
-from huggingface_hub import hf_hub_download, upload_file
-import pandas as pd
-from huggingface_hub import create_repo
-import os
-from middleMan import parse_into_jinja_markdown as pj
-
-
-
-@st.cache
-def get_cached_data():
- languages_df = pd.read_html("https://hf.co/languages")[0]
- languages_map = pd.Series(languages_df["Language"].values, index=languages_df["ISO code"]).to_dict()
-
- license_df = pd.read_html("https://huggingface.co/docs/hub/repositories-licenses")[0]
- license_map = pd.Series(
- license_df["License identifier (to use in model card)"].values, index=license_df.Fullname
- ).to_dict()
-
- available_metrics = [x['id'] for x in requests.get('https://huggingface.co/api/metrics').json()]
-
- r = requests.get('https://huggingface.co/api/models-tags-by-type')
- tags_data = r.json()
- libraries = [x['id'] for x in tags_data['library']]
- tasks = [x['id'] for x in tags_data['pipeline_tag']]
- return languages_map, license_map, available_metrics, libraries, tasks
-
-
-def card_upload(card_info,repo_id,token):
- #commit_message=None,
- repo_type = "space"
- commit_description=None,
- revision=None,
- create_pr=None
- with tempfile.TemporaryDirectory() as tmpdir:
- tmp_path = Path(tmpdir) / "README.md"
- tmp_path.write_text(str(card_info))
- url = upload_file(
- path_or_fileobj=str(tmp_path),
- path_in_repo="README.md",
- repo_id=repo_id,
- token=token,
- repo_type=repo_type,
- identical_ok=True,
- revision=revision,
- )
- return url
-
-def validate(self, repo_type="model"):
- """Validates card against Hugging Face Hub's model card validation logic.
- Using this function requires access to the internet, so it is only called
- internally by `modelcards.ModelCard.push_to_hub`.
- Args:
- repo_type (`str`, *optional*):
- The type of Hugging Face repo to push to. Defaults to None, which will use
- use "model". Other options are "dataset" and "space".
- """
- if repo_type is None:
- repo_type = "model"
-
- # TODO - compare against repo types constant in huggingface_hub if we move this object there.
- if repo_type not in ["model", "space", "dataset"]:
- raise RuntimeError(
- "Provided repo_type '{repo_type}' should be one of ['model', 'space',"
- " 'dataset']."
- )
-
- body = {
- "repoType": repo_type,
- "content": str(self),
- }
- headers = {"Accept": "text/plain"}
-
- try:
- r = requests.post(
- "https://huggingface.co/api/validate-yaml", body, headers=headers
- )
- r.raise_for_status()
- except requests.exceptions.HTTPError as exc:
- if r.status_code == 400:
- raise RuntimeError(r.text)
- else:
- raise exc
-
-
-## Save uploaded [markdown] file to directory to be used by jinja parser function
-def save_uploadedfile(uploadedfile):
- with open(os.path.join("temp_uploaded_filed_Dir",uploadedfile.name),"wb") as f:
- f.write(uploadedfile.getbuffer())
- st.success("Saved File:{} to temp_uploaded_filed_Dir".format(uploadedfile.name))
- return uploadedfile.name
-
-
-def main_page():
-
-
- if "model_name" not in st.session_state:
- # Initialize session state.
- st.session_state.update({
- "input_model_name": "",
- "languages": [],
- "license": "",
- "library_name": "",
- "datasets": "",
- "metrics": [],
- "task": "",
- "tags": "",
- "model_description": "Some cool model...",
- "the_authors":"",
- "Shared_by":"",
- "Model_details_text": "",
- "Model_developers": "",
- "blog_url":"",
- "Parent_Model_url":"",
- "Parent_Model_name":"",
-
- "Model_how_to": "",
-
- "Model_uses": "",
- "Direct_Use": "",
- "Downstream_Use":"",
- "Out-of-Scope_Use":"",
-
- "Model_Limits_n_Risks": "",
- "Recommendations":"",
-
- "training_Data": "",
- "model_preprocessing":"",
- "Speeds_Sizes_Times":"",
-
-
-
- "Model_Eval": "",
- "Testing_Data":"",
- "Factors":"",
- "Metrics":"",
- "Model_Results":"",
-
- "Model_c02_emitted": "",
- "Model_hardware":"",
- "hours_used":"",
- "Model_cloud_provider":"",
- "Model_cloud_region":"",
-
- "Model_cite": "",
- "paper_url": "",
- "github_url": "",
- "bibtex_citation": "",
- "APA_citation":"",
-
- "Model_examin":"",
- "Model_card_contact":"",
- "Model_card_authors":"",
- "Glossary":"",
- "More_info":"",
-
- "Model_specs":"",
- "compute_infrastructure":"",
- "technical_specs_software":"",
-
- "check_box": bool,
- "markdown_upload":" ",
- "legal_view":bool,
- "researcher_view":bool,
- "beginner_technical_view":bool,
- "markdown_state":"",
- })
- ## getting cache for each warnings
- languages_map, license_map, available_metrics, libraries, tasks = get_cached_data()
-
- ## form UI setting
- st.header("Model Card Form")
-
- warning_placeholder = st.empty()
-
- st.text_input("Model Name", key=persist("model_name"))
- st.text_area("Model Description", help="The model description provides basic details about the model. This includes the architecture, version, if it was introduced in a paper, if an original implementation is available, the author, and general information about the model. Any copyright should be attributed here. General information about training procedures, parameters, and important disclaimers can also be mentioned in this section.", key=persist('model_description'))
- st.multiselect("Language(s)", list(languages_map), format_func=lambda x: languages_map[x], help="The language(s) associated with this model. If this is not a text-based model, you should specify whatever language that is used in the dataset. For instance, if the dataset's labels are in english, you should select English here.", key=persist("languages"))
- st.selectbox("License", [""] + list(license_map.values()), help="The license associated with this model.", key=persist("license"))
- st.selectbox("Library Name", [""] + libraries, help="The name of the library this model came from (Ex. pytorch, timm, spacy, keras, etc.). This is usually automatically detected in model repos, so it is not required.", key=persist('library_name'))
- st.text_input("Parent Model (URL)", help="If this model has another model as its base, please provide the URL link to the parent model", key=persist("Parent_Model_name"))
- st.text_input("Datasets (comma separated)", help="The dataset(s) used to train this model. Use dataset id from https://hf.co/datasets.", key=persist("datasets"))
- st.multiselect("Metrics", available_metrics, help="Metrics used in the training/evaluation of this model. Use metric id from https://hf.co/metrics.", key=persist("metrics"))
- st.selectbox("Task", [""] + tasks, help="What task does this model aim to solve?", key=persist('task'))
- st.text_input("Tags (comma separated)", help="Additional tags to add which will be filterable on https://hf.co/models. (Ex. image-classification, vision, resnet)", key=persist("tags"))
- st.text_input("Author(s) (comma separated)", help="The authors who developed this model. If you trained this model, the author is you.", key=persist("the_authors"))
- st.text_input("Related Research Paper", help="Research paper related to this model.", key=persist("paper_url"))
- st.text_input("Related GitHub Repository", help="Link to a GitHub repository used in the development of this model", key=persist("github_url"))
- st.text_area("Bibtex Citation", help="Bibtex citations for related work", key=persist("bibtex_citations"))
- st.text_input("Carbon Emitted:", help="You can estimate carbon emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700)", key=persist("Model_c02_emitted"))
-
-
-
- # warnings setting
- languages=st.session_state.languages or None
- license=st.session_state.license or None
- task = st.session_state.task or None
- markdown_upload = st.session_state.markdown_upload
- #uploaded_model_card = st.session_state.uploaded_model
- # Handle any warnings...
- do_warn = False
- warning_msg = "Warning: The following fields are required but have not been filled in: "
- if not languages:
- warning_msg += "\n- Languages"
- do_warn = True
- if not license:
- warning_msg += "\n- License"
- do_warn = True
- if not task or not markdown_upload:
- warning_msg += "\n- Please choose a task or upload a model card"
- do_warn = True
- if do_warn:
- warning_placeholder.error(warning_msg)
-
- with st.sidebar:
-
- ######################################################
- ### Uploading a model card from local drive
- ######################################################
- st.markdown("## Upload Model Card")
-
- st.markdown("#### Model Card must be in markdown (.md) format.")
-
- # Read a single file
- uploaded_file = st.file_uploader("Choose a file", type = ['md'], help = 'Please choose a markdown (.md) file type to upload')
- if uploaded_file is not None:
-
- file_details = {"FileName":uploaded_file.name,"FileType":uploaded_file.type}
- name_of_uploaded_file = save_uploadedfile(uploaded_file)
-
- st.session_state.markdown_upload = name_of_uploaded_file ## uploaded model card
-
- elif st.session_state.task =='fill-mask' or 'translation' or 'token-classification' or ' sentence-similarity' or 'summarization' or 'question-answering' or 'text2text-generation' or 'text-classification' or 'text-generation' or 'conversational':
- #st.session_state.markdown_upload = open(
- # "language_model_template1.md", "r+"
- #).read()
- st.session_state.markdown_upload = "language_model_template1.md" ## language model template
-
- elif st.session_state.task:
-
- st.session_state.markdown_upload = "current_card.md" ## default non language model template
-
- #########################################
- ### Uploading model card to HUB
- #########################################
- out_markdown =open( st.session_state.markdown_upload, "r+"
- ).read()
- print_out_final = f"{out_markdown}"
- st.markdown("## Export Loaded Model Card to Hub")
- with st.form("Upload to 🤗 Hub"):
- st.markdown("Use a token with write access from [here](https://hf.co/settings/tokens)")
- token = st.text_input("Token", type='password')
- repo_id = st.text_input("Repo ID")
- submit = st.form_submit_button('Upload to 🤗 Hub', help='The current model card will be uploaded to a branch in the supplied repo ')
-
- if submit:
- if len(repo_id.split('/')) == 2:
- repo_url = create_repo(repo_id, exist_ok=True, token=token)
- new_url = card_upload(pj(),repo_id, token=token)
- st.success(f"Pushed the card to the repo [here]({new_url})!") # note: was repo_url
- else:
- st.error("Repo ID invalid. It should be username/repo-name. For example: nateraw/food")
-
-
- #########################################
- ### Download model card
- #########################################
-
-
- st.markdown("## Download current Model Card")
-
- if st.session_state.model_name is None or st.session_state.model_name== ' ':
- downloaded_file_name = 'current_model_card.md'
- else:
- downloaded_file_name = st.session_state.model_name+'_'+'model_card.md'
- download_status = st.download_button(label = 'Download Model Card', data = pj(), file_name = downloaded_file_name, help = "The current model card will be downloaded as a markdown (.md) file")
- if download_status == True:
- st.success("Your current model card, successfully downloaded 🤗")
-
-
-def page_switcher(page):
- st.session_state.runpage = page
-
-def main():
-
- st.header("About Model Cards")
- st.markdown(Path('about.md').read_text(), unsafe_allow_html=True)
- btn = st.button('Create a Model Card 📝',on_click=page_switcher,args=(main_page,))
- if btn:
- st.experimental_rerun() # rerun is needed to clear the page
-
-if __name__ == '__main__':
- load_widget_state()
- if 'runpage' not in st.session_state :
- st.session_state.runpage = main
- st.session_state.runpage()
diff --git a/spaces/hunger11243/VITS-Umamusume-voice-synthesizer/monotonic_align/__init__.py b/spaces/hunger11243/VITS-Umamusume-voice-synthesizer/monotonic_align/__init__.py
deleted file mode 100644
index 3d7009c40fea3a98168e3e3bc9ae061e91327422..0000000000000000000000000000000000000000
--- a/spaces/hunger11243/VITS-Umamusume-voice-synthesizer/monotonic_align/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import numpy as np
-import torch
-from .monotonic_align.core import maximum_path_c
-
-
-def maximum_path(neg_cent, mask):
- """ Cython optimized version.
- neg_cent: [b, t_t, t_s]
- mask: [b, t_t, t_s]
- """
- device = neg_cent.device
- dtype = neg_cent.dtype
- neg_cent = neg_cent.data.cpu().numpy().astype(np.float32)
- path = np.zeros(neg_cent.shape, dtype=np.int32)
-
- t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32)
- t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32)
- maximum_path_c(path, neg_cent, t_t_max, t_s_max)
- return torch.from_numpy(path).to(device=device, dtype=dtype)
diff --git a/spaces/imseldrith/Imagine/templates/output.html b/spaces/imseldrith/Imagine/templates/output.html
deleted file mode 100644
index 17970a40eddb18f3654d67b80a618c9ea1fc6888..0000000000000000000000000000000000000000
--- a/spaces/imseldrith/Imagine/templates/output.html
+++ /dev/null
@@ -1,172 +0,0 @@
-
-
-
- Generated Image
-
-
-
-
-
-
-
- Generated Image
-
-
-
-
-
- Image Size: 1200x800 pixels
- File Format: JPEG
-
-
-
-
-
-
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Always Korean Movie Subtitle Download REPACK.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Always Korean Movie Subtitle Download REPACK.md
deleted file mode 100644
index f94c1c5cd2291ff36cf62b4249111ddbf8a8a0e9..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Always Korean Movie Subtitle Download REPACK.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-How to Watch Always (2011) with Subtitles in Any Language
-
-Always is a 2011 South Korean romantic drama film directed by Song Il Gon and starring So Ji Sub and Han Hyo Joo. It tells the story of a former boxer who falls in love with a blind woman and decides to fight again to help her regain her sight.
-
-Always is a heartwarming and emotional movie that will make you cry and smile. It has received positive reviews from critics and audiences alike, and won several awards at various film festivals. It is also one of the highest-grossing Korean films of 2011.
-Always Korean Movie Subtitle Download
Download ↔ https://urlin.us/2uExov
-
-If you want to watch Always with subtitles in your preferred language, you have several options to choose from. Here are some of the best sites to download subtitles for Always (2011):
-
-OpenSubtitles
-
-OpenSubtitles is one of the most popular and reliable sites for downloading subtitles for movies and TV shows. It has a large database of subtitles in various languages, formats, and quality. You can easily find subtitles for Always (2011) by searching for the movie title or the release name. You can also request translation or correction of subtitles if needed.
-
-To download subtitles from OpenSubtitles, you need to create a free account and log in. Then, you can browse or search for the subtitles you want and click on the download button. You can also use the OpenSubtitles app or extension to integrate subtitles with your media player.
-
-elSubtitle
-
-elSubtitle is another site that offers subtitles for movies and TV shows in different languages. It has a simple and user-friendly interface that allows you to search for subtitles by movie title, release name, or language. You can also filter subtitles by genre, year, rating, or popularity.
-
-To download subtitles from elSubtitle, you don't need to register or log in. Just click on the subtitle file you want and save it to your device. You can also use the elSubtitle player to watch movies online with subtitles.
-
-Viki
-
-Viki is a streaming platform that specializes in Asian content, including Korean movies and dramas. It has a huge collection of movies and shows with subtitles in multiple languages, including English, Spanish, French, Arabic, and more. You can watch Always (2011) on Viki with high-quality video and audio.
-
-To watch movies on Viki, you need to sign up for a free account and choose your preferred language. Then, you can browse or search for the movie you want and click on the play button. You can also adjust the subtitle settings according to your preferences.
-
-
-iQIYI
-
-iQIYI is another streaming platform that offers Asian content, including Korean movies and dramas. It has a wide range of movies and shows with subtitles in various languages, such as English, Thai, Vietnamese, Indonesian, and more. You can watch Always (2011) on iQIYI with HD video and sound.
-
-To watch movies on iQIYI, you need to create a free account and select your preferred language. Then, you can browse or search for the movie you want and click on the watch button. You can also change the subtitle settings as you like.
-
-MakeTechEasier
-
-MakeTechEasier is a site that provides tips and guides on how to use technology more efficiently and effectively. It has a section dedicated to downloading subtitles for movies and TV shows, where you can find useful information on how to find, download, and use subtitles for your favorite media.
-
-To learn more about downloading subtitles from MakeTechEasier, you can visit their website and read their articles on the topic. You can also follow their social media accounts or subscribe to their newsletter for more tech-related updates.
-
-Conclusion
-
-Always (2011) is a beautiful Korean movie that deserves to be watched with subtitles in your preferred language. Whether you want to download subtitles or stream online, you have plenty of options to choose from. We hope this article has helped you find the best site for downloading subtitles for Always (2011).
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Bdk 1.1 Zip !EXCLUSIVE! Free Download.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Bdk 1.1 Zip !EXCLUSIVE! Free Download.md
deleted file mode 100644
index 272bc13843a251c5792608c03bd064211de9f95e..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Bdk 1.1 Zip !EXCLUSIVE! Free Download.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Bdk 1.1 zip free download
Download Zip >>> https://urlin.us/2uEyTm
-
- 4d29de3e1b
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Dragon Age Origins Trainer 11014141.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Dragon Age Origins Trainer 11014141.md
deleted file mode 100644
index a57209e7036632994cd5effa7b8b5e62265d36ee..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Dragon Age Origins Trainer 11014141.md
+++ /dev/null
@@ -1,43 +0,0 @@
-
-How to Use Dragon Age Origins Trainer 11014141 to Enhance Your Gameplay
-
-Dragon Age Origins is a classic role-playing game that lets you create your own hero and shape the fate of a world. But sometimes, you might want to tweak some aspects of the game to suit your preferences or overcome some challenges. That's where a trainer comes in handy.
-
-A trainer is a program that modifies the game's memory and allows you to activate various cheats and features. For example, you can get unlimited health, mana, gold, items, skill points, and more. You can also remove cooldowns, equip requirements, and rogue stealth limitations.
-Dragon Age Origins Trainer 11014141
Download ↔ https://urlin.us/2uExt9
-
-One of the most popular trainers for Dragon Age Origins is the one created by STiNGERR. It supports the Ultimate Edition of the game on Steam and has 14 options to choose from. You can download it for free from WeMod[^1^], a platform that hosts thousands of trainers and mods for various games.
-
-To use the trainer, you need to follow these steps:
-
-
-- Download and install WeMod from its official website.
-- Launch WeMod and create an account or log in with an existing one.
-- Search for Dragon Age Origins - Ultimate Edition in the games list and select it.
-- Click on the Play button and wait for the trainer to load.
-- Launch the game from Steam or from WeMod.
-- Once the game is running, press F1 to open the trainer menu.
-- Select the cheats you want to activate by pressing the corresponding keys.
-- Enjoy the game with your customized settings!
-
-
-Note that some antivirus programs may flag the trainer as a potential threat, but this is a false positive. You can safely ignore it or add an exception for WeMod in your antivirus settings. Also, make sure to disable any online features or multiplayer modes in the game before using the trainer, as it may cause conflicts or bans.
-
-If you want to try another trainer for Dragon Age Origins, you can also check out the one made by Brewers[^2^]. It has fewer options but it works with any version of the game. You can download it from Nexus Mods, a website that hosts many mods and trainers for various games.
-
-To use this trainer, you need to follow these steps:
-
-
-
-- Download and unzip the trainer file from Nexus Mods.
-- Launch the game from your preferred launcher.
-- Press Alt+Tab to switch to the desktop and run the trainer as administrator.
-- Select the cheats you want to activate by pressing the corresponding keys.
-- Switch back to the game and enjoy!
-
-
-Note that this trainer may not work with some mods or DLCs installed. You can also find some audio issues when switching between the game and the trainer. To avoid this, you can mute the trainer or lower its volume in your sound settings.
-
-Whether you use WeMod or Nexus Mods, you can find many trainers and mods for Dragon Age Origins that can enhance your gameplay experience. You can also listen to some soundtracks inspired by the game on SoundCloud[^3^] [^4^]. Have fun playing Dragon Age Origins with your trainer!
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/ESET PureFix V2.02.exe.md b/spaces/inplisQlawa/anything-midjourney-v4-1/ESET PureFix V2.02.exe.md
deleted file mode 100644
index 2e8b6f4aac7cf827080cd8ce4d6fde3d95ccaee3..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/ESET PureFix V2.02.exe.md
+++ /dev/null
@@ -1,6 +0,0 @@
-ESET PureFix v2.02.exe
Download Zip ⚙ https://urlin.us/2uEyS1
-
-Index of /utilidades/ESET NO32 Antivirus v5.094.0 X32/parche/. Name Last ... 15:06 - [BIN] ESET PureFix v2.02.exe 08-Nov-2012 15:06 336k ... 4d29de3e1b
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Karizma Designer 5.0 Software Free Download Torrent.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Karizma Designer 5.0 Software Free Download Torrent.md
deleted file mode 100644
index 6585da610b826734da922627060f0615b906cafb..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Karizma Designer 5.0 Software Free Download Torrent.md
+++ /dev/null
@@ -1,11 +0,0 @@
-karizma designer 5.0 software free download torrent
Download ··· https://urlin.us/2uEwur
-
-August 12, 2020 - You can also download Julie Pixel Blaze photo album design software. AlbumPoint 5.0 is very easy to use software. Photographers can create ... Read more
-August 13, 2020 - This is the latest version of the Julie Paper photo printing software.
-New version of Julie Paper 2.0 ...
-Read more
-August 10, 2020 - You can also download Julie Pixel Blaze photo album design software. AlbumPoint 5.0 is very easy to use software. Photographers...
-July 27, 2020 - The following photo printer versions are available: Julie Pageprinters, Julie Paper and Julie Paper Pro. 8a78ff9644
-
-
-
diff --git a/spaces/itsjacksimon/runwayml-stable-diffusion-v1-5/app.py b/spaces/itsjacksimon/runwayml-stable-diffusion-v1-5/app.py
deleted file mode 100644
index a82df332731f067826d3e1ef79fabceffb74d07e..0000000000000000000000000000000000000000
--- a/spaces/itsjacksimon/runwayml-stable-diffusion-v1-5/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/runwayml/stable-diffusion-v1-5").launch()
\ No newline at end of file
diff --git a/spaces/jackli888/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/rife/rife_new_gen/refine.py b/spaces/jackli888/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/rife/rife_new_gen/refine.py
deleted file mode 100644
index ff3807c636d461862f13200fe0017b62db5c20c5..0000000000000000000000000000000000000000
--- a/spaces/jackli888/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/rife/rife_new_gen/refine.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import torch
-import torch.nn as nn
-import numpy as np
-from torch.optim import AdamW
-import torch.optim as optim
-import itertools
-from model.warplayer import warp
-from torch.nn.parallel import DistributedDataParallel as DDP
-import torch.nn.functional as F
-
-device = torch.device("cuda")
-
-def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
- return nn.Sequential(
- nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
- padding=padding, dilation=dilation, bias=True),
- nn.PReLU(out_planes)
- )
-
-def conv_woact(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
- return nn.Sequential(
- nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
- padding=padding, dilation=dilation, bias=True),
- )
-
-def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1):
- return nn.Sequential(
- torch.nn.ConvTranspose2d(in_channels=in_planes, out_channels=out_planes, kernel_size=4, stride=2, padding=1, bias=True),
- nn.PReLU(out_planes)
- )
-
-class Conv2(nn.Module):
- def __init__(self, in_planes, out_planes, stride=2):
- super(Conv2, self).__init__()
- self.conv1 = conv(in_planes, out_planes, 3, stride, 1)
- self.conv2 = conv(out_planes, out_planes, 3, 1, 1)
-
- def forward(self, x):
- x = self.conv1(x)
- x = self.conv2(x)
- return x
-
-c = 16
-class Contextnet(nn.Module):
- def __init__(self):
- super(Contextnet, self).__init__()
- self.conv1 = Conv2(3, c)
- self.conv2 = Conv2(c, 2*c)
- self.conv3 = Conv2(2*c, 4*c)
- self.conv4 = Conv2(4*c, 8*c)
-
- def forward(self, x, flow):
- x = self.conv1(x)
- flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False) * 0.5
- f1 = warp(x, flow)
- x = self.conv2(x)
- flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False) * 0.5
- f2 = warp(x, flow)
- x = self.conv3(x)
- flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False) * 0.5
- f3 = warp(x, flow)
- x = self.conv4(x)
- flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False) * 0.5
- f4 = warp(x, flow)
- return [f1, f2, f3, f4]
-
-class Unet(nn.Module):
- def __init__(self):
- super(Unet, self).__init__()
- self.down0 = Conv2(17, 2*c)
- self.down1 = Conv2(4*c, 4*c)
- self.down2 = Conv2(8*c, 8*c)
- self.down3 = Conv2(16*c, 16*c)
- self.up0 = deconv(32*c, 8*c)
- self.up1 = deconv(16*c, 4*c)
- self.up2 = deconv(8*c, 2*c)
- self.up3 = deconv(4*c, c)
- self.conv = nn.Conv2d(c, 3, 3, 1, 1)
-
- def forward(self, img0, img1, warped_img0, warped_img1, mask, flow, c0, c1):
- s0 = self.down0(torch.cat((img0, img1, warped_img0, warped_img1, mask, flow), 1))
- s1 = self.down1(torch.cat((s0, c0[0], c1[0]), 1))
- s2 = self.down2(torch.cat((s1, c0[1], c1[1]), 1))
- s3 = self.down3(torch.cat((s2, c0[2], c1[2]), 1))
- x = self.up0(torch.cat((s3, c0[3], c1[3]), 1))
- x = self.up1(torch.cat((x, s2), 1))
- x = self.up2(torch.cat((x, s1), 1))
- x = self.up3(torch.cat((x, s0), 1))
- x = self.conv(x)
- return torch.sigmoid(x)
diff --git a/spaces/jackli888/stable-diffusion-webui/modules/shared_items.py b/spaces/jackli888/stable-diffusion-webui/modules/shared_items.py
deleted file mode 100644
index 8dd832ed9b1e610b2ab1b4d5f911c58d63c00f80..0000000000000000000000000000000000000000
--- a/spaces/jackli888/stable-diffusion-webui/modules/shared_items.py
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-def realesrgan_models_names():
- import modules.realesrgan_model
- return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]
-
-
-def postprocessing_scripts():
- import modules.scripts
-
- return modules.scripts.scripts_postproc.scripts
-
-
-def sd_vae_items():
- import modules.sd_vae
-
- return ["Automatic", "None"] + list(modules.sd_vae.vae_dict)
-
-
-def refresh_vae_list():
- import modules.sd_vae
-
- modules.sd_vae.refresh_vae_list()
diff --git a/spaces/jarvisbot/ChatImprovement/crazy_functions/test_project/cpp/cppipc/pool_alloc.cpp b/spaces/jarvisbot/ChatImprovement/crazy_functions/test_project/cpp/cppipc/pool_alloc.cpp
deleted file mode 100644
index c94575903bdf2eef71ecbe66382375552446e510..0000000000000000000000000000000000000000
--- a/spaces/jarvisbot/ChatImprovement/crazy_functions/test_project/cpp/cppipc/pool_alloc.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-#include "libipc/pool_alloc.h"
-
-#include "libipc/memory/resource.h"
-
-namespace ipc {
-namespace mem {
-
-void* pool_alloc::alloc(std::size_t size) {
- return async_pool_alloc::alloc(size);
-}
-
-void pool_alloc::free(void* p, std::size_t size) {
- async_pool_alloc::free(p, size);
-}
-
-} // namespace mem
-} // namespace ipc
diff --git a/spaces/jbilcke-hf/LifeSim/src/app/types.ts b/spaces/jbilcke-hf/LifeSim/src/app/types.ts
deleted file mode 100644
index b17e19774be04031d17ee9c0d4fc635847210d81..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/LifeSim/src/app/types.ts
+++ /dev/null
@@ -1,40 +0,0 @@
-
-export interface RenderRequest {
- prompt: string
-
- // whether to use video segmentation
- // disabled (default)
- // firstframe: we only analyze the first frame
- // allframes: we analyze all the frames
- segmentation: 'disabled' | 'firstframe' | 'allframes'
-
- // segmentation will only be executed if we have a non-empty list of actionnables
- // actionnables are names of things like "chest", "key", "tree", "chair" etc
- actionnables: string[]
-
- // note: this is the number of frames for Zeroscope,
- // which is currently configured to only output 3 seconds, so:
- // nbFrames=8 -> 1 sec
- // nbFrames=16 -> 2 sec
- // nbFrames=24 -> 3 sec
- nbFrames: number // min: 8, max: 24
-
- nbSteps: number // min: 1, max: 50
-
- seed: number
-}
-
-export interface ImageSegment {
- id: number
- box: number[]
- color: number[]
- label: string
- score: number
-}
-
-export interface RenderedScene {
- assetUrl: string
- error: string
- maskBase64: string
- segments: ImageSegment[]
-}
\ No newline at end of file
diff --git a/spaces/jbilcke-hf/MusicGen/Makefile b/spaces/jbilcke-hf/MusicGen/Makefile
deleted file mode 100644
index 5bfd89dd833d7448b21073eb6ee7cfac1d5157dd..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/MusicGen/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-default: linter tests
-
-install:
- pip install -U pip
- pip install -U -e '.[dev]'
-
-linter:
- flake8 audiocraft && mypy audiocraft
- flake8 tests && mypy tests
-
-tests:
- coverage run -m pytest tests
- coverage report --include 'audiocraft/*'
-
-docs:
- pdoc3 --html -o docs -f audiocraft
-
-dist:
- python setup.py sdist
-
-.PHONY: linter tests docs dist
diff --git a/spaces/jbilcke-hf/webapp-factory-llama-node/public/js/README.md b/spaces/jbilcke-hf/webapp-factory-llama-node/public/js/README.md
deleted file mode 100644
index 63ef400c316e115c8eb0d2651960e021417d7f30..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/webapp-factory-llama-node/public/js/README.md
+++ /dev/null
@@ -1 +0,0 @@
-URL used for tailwind: https://cdn.tailwindcss.com/3.3.2?plugins=forms@0.5.3,typography@0.5.9,aspect-ratio@0.4.2,line-clamp@0.4.4
\ No newline at end of file
diff --git a/spaces/jdhuka/HTML5Interactivity/index.html b/spaces/jdhuka/HTML5Interactivity/index.html
deleted file mode 100644
index 589ffd92702c54fac9133631da2a15be27858788..0000000000000000000000000000000000000000
--- a/spaces/jdhuka/HTML5Interactivity/index.html
+++ /dev/null
@@ -1,110 +0,0 @@
-
-
-
-
- Babylon.js L-system Fractal Example
-
-
-
-
-
-
-
-
-
-
diff --git a/spaces/jgurzoni/image_background_swapper/saicinpainting/training/data/datasets.py b/spaces/jgurzoni/image_background_swapper/saicinpainting/training/data/datasets.py
deleted file mode 100644
index c4f503dafffb970d8dbaca33934da417036d1e55..0000000000000000000000000000000000000000
--- a/spaces/jgurzoni/image_background_swapper/saicinpainting/training/data/datasets.py
+++ /dev/null
@@ -1,304 +0,0 @@
-import glob
-import logging
-import os
-import random
-
-import albumentations as A
-import cv2
-import numpy as np
-import torch
-import torch.nn.functional as F
-import webdataset
-from omegaconf import open_dict, OmegaConf
-from skimage.feature import canny
-from skimage.transform import rescale, resize
-from torch.utils.data import Dataset, IterableDataset, DataLoader, DistributedSampler, ConcatDataset
-
-from saicinpainting.evaluation.data import InpaintingDataset as InpaintingEvaluationDataset, \
- OurInpaintingDataset as OurInpaintingEvaluationDataset, ceil_modulo, InpaintingEvalOnlineDataset
-from saicinpainting.training.data.aug import IAAAffine2, IAAPerspective2
-from saicinpainting.training.data.masks import get_mask_generator
-
-LOGGER = logging.getLogger(__name__)
-
-
-class InpaintingTrainDataset(Dataset):
- def __init__(self, indir, mask_generator, transform):
- self.in_files = list(glob.glob(os.path.join(indir, '**', '*.jpg'), recursive=True))
- self.mask_generator = mask_generator
- self.transform = transform
- self.iter_i = 0
-
- def __len__(self):
- return len(self.in_files)
-
- def __getitem__(self, item):
- path = self.in_files[item]
- img = cv2.imread(path)
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
- img = self.transform(image=img)['image']
- img = np.transpose(img, (2, 0, 1))
- # TODO: maybe generate mask before augmentations? slower, but better for segmentation-based masks
- mask = self.mask_generator(img, iter_i=self.iter_i)
- self.iter_i += 1
- return dict(image=img,
- mask=mask)
-
-
-class InpaintingTrainWebDataset(IterableDataset):
- def __init__(self, indir, mask_generator, transform, shuffle_buffer=200):
- self.impl = webdataset.Dataset(indir).shuffle(shuffle_buffer).decode('rgb').to_tuple('jpg')
- self.mask_generator = mask_generator
- self.transform = transform
-
- def __iter__(self):
- for iter_i, (img,) in enumerate(self.impl):
- img = np.clip(img * 255, 0, 255).astype('uint8')
- img = self.transform(image=img)['image']
- img = np.transpose(img, (2, 0, 1))
- mask = self.mask_generator(img, iter_i=iter_i)
- yield dict(image=img,
- mask=mask)
-
-
-class ImgSegmentationDataset(Dataset):
- def __init__(self, indir, mask_generator, transform, out_size, segm_indir, semantic_seg_n_classes):
- self.indir = indir
- self.segm_indir = segm_indir
- self.mask_generator = mask_generator
- self.transform = transform
- self.out_size = out_size
- self.semantic_seg_n_classes = semantic_seg_n_classes
- self.in_files = list(glob.glob(os.path.join(indir, '**', '*.jpg'), recursive=True))
-
- def __len__(self):
- return len(self.in_files)
-
- def __getitem__(self, item):
- path = self.in_files[item]
- img = cv2.imread(path)
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
- img = cv2.resize(img, (self.out_size, self.out_size))
- img = self.transform(image=img)['image']
- img = np.transpose(img, (2, 0, 1))
- mask = self.mask_generator(img)
- segm, segm_classes= self.load_semantic_segm(path)
- result = dict(image=img,
- mask=mask,
- segm=segm,
- segm_classes=segm_classes)
- return result
-
- def load_semantic_segm(self, img_path):
- segm_path = img_path.replace(self.indir, self.segm_indir).replace(".jpg", ".png")
- mask = cv2.imread(segm_path, cv2.IMREAD_GRAYSCALE)
- mask = cv2.resize(mask, (self.out_size, self.out_size))
- tensor = torch.from_numpy(np.clip(mask.astype(int)-1, 0, None))
- ohe = F.one_hot(tensor.long(), num_classes=self.semantic_seg_n_classes) # w x h x n_classes
- return ohe.permute(2, 0, 1).float(), tensor.unsqueeze(0)
-
-
-def get_transforms(transform_variant, out_size):
- if transform_variant == 'default':
- transform = A.Compose([
- A.RandomScale(scale_limit=0.2), # +/- 20%
- A.PadIfNeeded(min_height=out_size, min_width=out_size),
- A.RandomCrop(height=out_size, width=out_size),
- A.HorizontalFlip(),
- A.CLAHE(),
- A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
- A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
- A.ToFloat()
- ])
- elif transform_variant == 'distortions':
- transform = A.Compose([
- IAAPerspective2(scale=(0.0, 0.06)),
- IAAAffine2(scale=(0.7, 1.3),
- rotate=(-40, 40),
- shear=(-0.1, 0.1)),
- A.PadIfNeeded(min_height=out_size, min_width=out_size),
- A.OpticalDistortion(),
- A.RandomCrop(height=out_size, width=out_size),
- A.HorizontalFlip(),
- A.CLAHE(),
- A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
- A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
- A.ToFloat()
- ])
- elif transform_variant == 'distortions_scale05_1':
- transform = A.Compose([
- IAAPerspective2(scale=(0.0, 0.06)),
- IAAAffine2(scale=(0.5, 1.0),
- rotate=(-40, 40),
- shear=(-0.1, 0.1),
- p=1),
- A.PadIfNeeded(min_height=out_size, min_width=out_size),
- A.OpticalDistortion(),
- A.RandomCrop(height=out_size, width=out_size),
- A.HorizontalFlip(),
- A.CLAHE(),
- A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
- A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
- A.ToFloat()
- ])
- elif transform_variant == 'distortions_scale03_12':
- transform = A.Compose([
- IAAPerspective2(scale=(0.0, 0.06)),
- IAAAffine2(scale=(0.3, 1.2),
- rotate=(-40, 40),
- shear=(-0.1, 0.1),
- p=1),
- A.PadIfNeeded(min_height=out_size, min_width=out_size),
- A.OpticalDistortion(),
- A.RandomCrop(height=out_size, width=out_size),
- A.HorizontalFlip(),
- A.CLAHE(),
- A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
- A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
- A.ToFloat()
- ])
- elif transform_variant == 'distortions_scale03_07':
- transform = A.Compose([
- IAAPerspective2(scale=(0.0, 0.06)),
- IAAAffine2(scale=(0.3, 0.7), # scale 512 to 256 in average
- rotate=(-40, 40),
- shear=(-0.1, 0.1),
- p=1),
- A.PadIfNeeded(min_height=out_size, min_width=out_size),
- A.OpticalDistortion(),
- A.RandomCrop(height=out_size, width=out_size),
- A.HorizontalFlip(),
- A.CLAHE(),
- A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
- A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
- A.ToFloat()
- ])
- elif transform_variant == 'distortions_light':
- transform = A.Compose([
- IAAPerspective2(scale=(0.0, 0.02)),
- IAAAffine2(scale=(0.8, 1.8),
- rotate=(-20, 20),
- shear=(-0.03, 0.03)),
- A.PadIfNeeded(min_height=out_size, min_width=out_size),
- A.RandomCrop(height=out_size, width=out_size),
- A.HorizontalFlip(),
- A.CLAHE(),
- A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
- A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
- A.ToFloat()
- ])
- elif transform_variant == 'non_space_transform':
- transform = A.Compose([
- A.CLAHE(),
- A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
- A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5),
- A.ToFloat()
- ])
- elif transform_variant == 'no_augs':
- transform = A.Compose([
- A.ToFloat()
- ])
- else:
- raise ValueError(f'Unexpected transform_variant {transform_variant}')
- return transform
-
-
-def make_default_train_dataloader(indir, kind='default', out_size=512, mask_gen_kwargs=None, transform_variant='default',
- mask_generator_kind="mixed", dataloader_kwargs=None, ddp_kwargs=None, **kwargs):
- LOGGER.info(f'Make train dataloader {kind} from {indir}. Using mask generator={mask_generator_kind}')
-
- mask_generator = get_mask_generator(kind=mask_generator_kind, kwargs=mask_gen_kwargs)
- transform = get_transforms(transform_variant, out_size)
-
- if kind == 'default':
- dataset = InpaintingTrainDataset(indir=indir,
- mask_generator=mask_generator,
- transform=transform,
- **kwargs)
- elif kind == 'default_web':
- dataset = InpaintingTrainWebDataset(indir=indir,
- mask_generator=mask_generator,
- transform=transform,
- **kwargs)
- elif kind == 'img_with_segm':
- dataset = ImgSegmentationDataset(indir=indir,
- mask_generator=mask_generator,
- transform=transform,
- out_size=out_size,
- **kwargs)
- else:
- raise ValueError(f'Unknown train dataset kind {kind}')
-
- if dataloader_kwargs is None:
- dataloader_kwargs = {}
-
- is_dataset_only_iterable = kind in ('default_web',)
-
- if ddp_kwargs is not None and not is_dataset_only_iterable:
- dataloader_kwargs['shuffle'] = False
- dataloader_kwargs['sampler'] = DistributedSampler(dataset, **ddp_kwargs)
-
- if is_dataset_only_iterable and 'shuffle' in dataloader_kwargs:
- with open_dict(dataloader_kwargs):
- del dataloader_kwargs['shuffle']
-
- dataloader = DataLoader(dataset, **dataloader_kwargs)
- return dataloader
-
-
-def make_default_val_dataset(indir, kind='default', out_size=512, transform_variant='default', **kwargs):
- if OmegaConf.is_list(indir) or isinstance(indir, (tuple, list)):
- return ConcatDataset([
- make_default_val_dataset(idir, kind=kind, out_size=out_size, transform_variant=transform_variant, **kwargs) for idir in indir
- ])
-
- LOGGER.info(f'Make val dataloader {kind} from {indir}')
- mask_generator = get_mask_generator(kind=kwargs.get("mask_generator_kind"), kwargs=kwargs.get("mask_gen_kwargs"))
-
- if transform_variant is not None:
- transform = get_transforms(transform_variant, out_size)
-
- if kind == 'default':
- dataset = InpaintingEvaluationDataset(indir, **kwargs)
- elif kind == 'our_eval':
- dataset = OurInpaintingEvaluationDataset(indir, **kwargs)
- elif kind == 'img_with_segm':
- dataset = ImgSegmentationDataset(indir=indir,
- mask_generator=mask_generator,
- transform=transform,
- out_size=out_size,
- **kwargs)
- elif kind == 'online':
- dataset = InpaintingEvalOnlineDataset(indir=indir,
- mask_generator=mask_generator,
- transform=transform,
- out_size=out_size,
- **kwargs)
- else:
- raise ValueError(f'Unknown val dataset kind {kind}')
-
- return dataset
-
-
-def make_default_val_dataloader(*args, dataloader_kwargs=None, **kwargs):
- dataset = make_default_val_dataset(*args, **kwargs)
-
- if dataloader_kwargs is None:
- dataloader_kwargs = {}
- dataloader = DataLoader(dataset, **dataloader_kwargs)
- return dataloader
-
-
-def make_constant_area_crop_params(img_height, img_width, min_size=128, max_size=512, area=256*256, round_to_mod=16):
- min_size = min(img_height, img_width, min_size)
- max_size = min(img_height, img_width, max_size)
- if random.random() < 0.5:
- out_height = min(max_size, ceil_modulo(random.randint(min_size, max_size), round_to_mod))
- out_width = min(max_size, ceil_modulo(area // out_height, round_to_mod))
- else:
- out_width = min(max_size, ceil_modulo(random.randint(min_size, max_size), round_to_mod))
- out_height = min(max_size, ceil_modulo(area // out_width, round_to_mod))
-
- start_y = random.randint(0, img_height - out_height)
- start_x = random.randint(0, img_width - out_width)
- return (start_y, start_x, out_height, out_width)
diff --git a/spaces/jimschat/VITS-Umamusume-voice-synthesizer/text/thai.py b/spaces/jimschat/VITS-Umamusume-voice-synthesizer/text/thai.py
deleted file mode 100644
index 998207c01a85c710a46db1ec8b62c39c2d94bc84..0000000000000000000000000000000000000000
--- a/spaces/jimschat/VITS-Umamusume-voice-synthesizer/text/thai.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import re
-from num_thai.thainumbers import NumThai
-
-
-num = NumThai()
-
-# List of (Latin alphabet, Thai) pairs:
-_latin_to_thai = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', 'เอ'),
- ('b','บี'),
- ('c','ซี'),
- ('d','ดี'),
- ('e','อี'),
- ('f','เอฟ'),
- ('g','จี'),
- ('h','เอช'),
- ('i','ไอ'),
- ('j','เจ'),
- ('k','เค'),
- ('l','แอล'),
- ('m','เอ็ม'),
- ('n','เอ็น'),
- ('o','โอ'),
- ('p','พี'),
- ('q','คิว'),
- ('r','แอร์'),
- ('s','เอส'),
- ('t','ที'),
- ('u','ยู'),
- ('v','วี'),
- ('w','ดับเบิลยู'),
- ('x','เอ็กซ์'),
- ('y','วาย'),
- ('z','ซี')
-]]
-
-
-def num_to_thai(text):
- return re.sub(r'(?:\d+(?:,?\d+)?)+(?:\.\d+(?:,?\d+)?)?', lambda x: ''.join(num.NumberToTextThai(float(x.group(0).replace(',', '')))), text)
-
-def latin_to_thai(text):
- for regex, replacement in _latin_to_thai:
- text = re.sub(regex, replacement, text)
- return text
diff --git a/spaces/jonathang/YoutubeSmartSpeed/README.md b/spaces/jonathang/YoutubeSmartSpeed/README.md
deleted file mode 100644
index 5fdc20172cf2ae3d0089858fa248c9d685c3576f..0000000000000000000000000000000000000000
--- a/spaces/jonathang/YoutubeSmartSpeed/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: YoutubeSmartSpeed
-emoji: 😻
-colorFrom: indigo
-colorTo: green
-sdk: gradio
-sdk_version: 3.36.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/jordonpeter01/MusicGen2/tests/data/__init__.py b/spaces/jordonpeter01/MusicGen2/tests/data/__init__.py
deleted file mode 100644
index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000
--- a/spaces/jordonpeter01/MusicGen2/tests/data/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/spaces/jsdt/lol-predictor/app.py b/spaces/jsdt/lol-predictor/app.py
deleted file mode 100644
index d7a6308b7197c0e7742a55c0b727b9861e9aea3a..0000000000000000000000000000000000000000
--- a/spaces/jsdt/lol-predictor/app.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import gradio as gr
-
-def greet(name):
- return "Hello " + name + "!!"
-
-gr.Interface(fn=greet, inputs="text", outputs="text").launch()
diff --git a/spaces/juancopi81/whisper-youtube-2-hf_dataset/storing/sqlitecontextmanager.py b/spaces/juancopi81/whisper-youtube-2-hf_dataset/storing/sqlitecontextmanager.py
deleted file mode 100644
index c63ad892b18fc64288db31730bfc8742454fe357..0000000000000000000000000000000000000000
--- a/spaces/juancopi81/whisper-youtube-2-hf_dataset/storing/sqlitecontextmanager.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import sqlite3
-
-class SQLiteContextManager:
- """Context manager for SQLite db, that handles
- db open / closing connection.
- """
-
- def __init__(self, db_path: str) -> None:
- self.db_path = db_path
- self.connection = None
-
- def __enter__(self):
- """Establish connection with db and return cursor to be used
- to execute queries.
- """
- self.connection = sqlite3.connect(self.db_path)
- return self.connection.cursor()
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- """Commit queries and close db connection.
- """
- self.connection.commit()
- self.connection.close()
\ No newline at end of file
diff --git a/spaces/juancopi81/youtube-music-transcribe/t5x/examples/scalable_t5/__init__.py b/spaces/juancopi81/youtube-music-transcribe/t5x/examples/scalable_t5/__init__.py
deleted file mode 100644
index da022c16301721a096a208e8bdb2a71bb87f9788..0000000000000000000000000000000000000000
--- a/spaces/juancopi81/youtube-music-transcribe/t5x/examples/scalable_t5/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2022 The T5X Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This empty file is needed for loading the gin files in this directory.
diff --git a/spaces/juuxn/SimpleRVC/utils/__init__.py b/spaces/juuxn/SimpleRVC/utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/kadirnar/yolor/yolor/utils/__init__.py b/spaces/kadirnar/yolor/yolor/utils/__init__.py
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/spaces/kadirnar/yolor/yolor/utils/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/spaces/kbora/minerva-generate-docker/diffmodels/diffusion_utils.py b/spaces/kbora/minerva-generate-docker/diffmodels/diffusion_utils.py
deleted file mode 100644
index 512a477f5a7b3caa61191da014618a2670f83ef7..0000000000000000000000000000000000000000
--- a/spaces/kbora/minerva-generate-docker/diffmodels/diffusion_utils.py
+++ /dev/null
@@ -1,218 +0,0 @@
-# Utility class for loading and using diffusers model
-import diffusers
-import transformers
-
-import torch
-from typing import Union
-import os
-import warnings
-import numpy as np
-from PIL import Image
-import tqdm
-from copy import deepcopy
-import matplotlib.pyplot as plt
-
-def build_generator(
- device : torch.device,
- seed : int,
-):
- """
- Build a torch.Generator with a given seed.
- """
- generator = torch.Generator(device).manual_seed(seed)
- return generator
-
-def load_stablediffusion_model(
- model_id : Union[str, os.PathLike],
- device : torch.device,
- ):
- """
- Load a complete diffusion model from a model id.
- Returns a tuple of the model and a torch.Generator if seed is not None.
-
- """
- pipe = diffusers.DiffusionPipeline.from_pretrained(
- model_id,
- revision="fp16",
- torch_dtype=torch.float16,
- use_auth_token=True,
- )
- pipe.scheduler = diffusers.DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
- try:
- pipe = pipe.to(device)
- except:
- warnings.warn(
- f'Could not load model to device:{device}. Using CPU instead.'
- )
- pipe = pipe.to('cpu')
- device = 'cpu'
-
- return pipe
-
-
-def visualize_image_grid(
- imgs : np.array,
- rows : int,
- cols : int):
-
- assert len(imgs) == rows*cols
-
- # create grid
- w, h = imgs[0].size # assuming each image is the same size
-
- grid = Image.new('RGB', size=(cols*w, rows*h))
-
- for i,img in enumerate(imgs):
- grid.paste(img, box=(i%cols*w, i//cols*h))
- return grid
-
-
-def build_pipeline(
- autoencoder : Union[str, os.PathLike] = "CompVis/stable-diffusion-v1-4",
- tokenizer : Union[str, os.PathLike] = "openai/clip-vit-large-patch14",
- text_encoder : Union[str, os.PathLike] = "openai/clip-vit-large-patch14",
- unet : Union[str, os.PathLike] = "CompVis/stable-diffusion-v1-4",
- device : torch.device = torch.device('cuda'),
- ):
- """
- Create a pipeline for StableDiffusion by loading the model and component seperetely.
- Arguments:
- autoencoder: path to model that autoencoder will be loaded from
- tokenizer: path to tokenizer
- text_encoder: path to text_encoder
- unet: path to unet
- """
- # Load the VAE for encoding images into the latent space
- vae = diffusers.AutoencoderKL.from_pretrained(autoencoder, subfolder = 'vae')
-
- # Load tokenizer & text encoder for encoding text into the latent space
- tokenizer = transformers.CLIPTokenizer.from_pretrained(tokenizer)
- text_encoder = transformers.CLIPTextModel.from_pretrained(text_encoder)
-
- # Use the UNet model for conditioning the diffusion process
- unet = diffusers.UNet2DConditionModel.from_pretrained(unet, subfolder = 'unet')
-
- # Move all the components to device
- vae = vae.to(device)
- text_encoder = text_encoder.to(device)
- unet = unet.to(device)
-
- return vae, tokenizer, text_encoder, unet
-
-#TODO : Add negative prompting
-def custom_stablediffusion_inference(
- vae,
- tokenizer,
- text_encoder,
- unet,
- noise_scheduler,
- prompt : list,
- device : torch.device,
- num_inference_steps = 100,
- image_size = (512,512),
- guidance_scale = 8,
- seed = 42,
- return_image_step = 5,
- ):
- # Get the text embeddings that will condition the diffusion process
- if isinstance(prompt,str):
- prompt = [prompt]
-
- batch_size = len(prompt)
- text_input = tokenizer(
- prompt,
- padding = 'max_length',
- truncation = True,
- max_length = tokenizer.model_max_length,
- return_tensors = 'pt').to(device)
-
- text_embeddings = text_encoder(
- text_input.input_ids.to(device)
- )[0]
-
- # Get the text embeddings for classifier-free guidance
- max_length = text_input.input_ids.shape[-1]
- empty = [""] * batch_size
- uncond_input = tokenizer(
- empty,
- padding = 'max_length',
- truncation = True,
- max_length = max_length,
- return_tensors = 'pt').to(device)
-
- uncond_embeddings = text_encoder(
- uncond_input.input_ids.to(device)
- )[0]
-
- # Concatenate the text embeddings to get the conditioning vector
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
-
- # Generate initial noise
- latents = torch.randn(
- (1, unet.in_channels, image_size[0] // 8, image_size[1] // 8),
- generator=torch.manual_seed(seed) if seed is not None else None
- )
- print(latents.shape)
-
- latents = latents.to(device)
-
- # Initialize scheduler for noise generation
- noise_scheduler.set_timesteps(num_inference_steps)
-
- latents = latents * noise_scheduler.init_noise_sigma
-
- noise_scheduler.set_timesteps(num_inference_steps)
- for i,t in tqdm.tqdm(enumerate(noise_scheduler.timesteps)):
- # If no text embedding is provided (classifier-free guidance), extend the conditioning vector
- latent_model_input = torch.cat([latents] * 2)
-
- latent_model_input = noise_scheduler.scale_model_input(latent_model_input, timestep=t)
-
- with torch.no_grad():
- # Get the noise prediction from the UNet
- noise_pred = unet(latent_model_input, t, encoder_hidden_states = text_embeddings).sample
-
- # Perform guidance from the text embeddings
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
-
- # Compute the previously noisy sample x_t -> x_t-1
- latents = noise_scheduler.step(noise_pred, t, latents).prev_sample
-
- # Now that latent is generated from a noise, use unet decoder to generate images
- if i % return_image_step == 0:
- with torch.no_grad():
- latents_copy = deepcopy(latents)
- image = vae.decode(1/0.18215 * latents_copy).sample
-
- image = (image / 2 + 0.5).clamp(0,1)
- image = image.detach().cpu().permute(0,2,3,1).numpy() # bxhxwxc
- images = (image * 255).round().astype("uint8")
-
- pil_images = [Image.fromarray(img) for img in images]
-
- yield pil_images[0]
-
- yield pil_images[0]
-
-if __name__ == "__main__":
- device = torch.device("cpu")
- model_id = "stabilityai/stable-diffusion-2-1"
- tokenizer_id = "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
- #noise_scheduler = diffusers.LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
- noise_scheduler = diffusers.DPMSolverMultistepScheduler.from_pretrained(model_id,subfolder="scheduler")
- prompt = "A Hyperrealistic photograph of Italian architectural modern home in Italy, lens flares,\
- cinematic, hdri, matte painting, concept art, celestial, soft render, highly detailed, octane\
- render, architectural HD, HQ, 4k, 8k"
-
- vae, tokenizer, text_encoder, unet = build_pipeline(
- autoencoder = model_id,
- tokenizer=tokenizer_id,
- text_encoder=tokenizer_id,
- unet=model_id,
- device=device,
- )
- image_iter = custom_stablediffusion_inference(vae, tokenizer, text_encoder, unet, noise_scheduler, prompt = prompt, device=device, seed = None)
- for i, image in enumerate(image_iter):
- image.save(f"step_{i}.png")
-
diff --git a/spaces/kevinwang676/VALLE/utils/g2p/mandarin.py b/spaces/kevinwang676/VALLE/utils/g2p/mandarin.py
deleted file mode 100644
index da7680b7a4e65de8cac1c9afd9a271b0bc666a7c..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/VALLE/utils/g2p/mandarin.py
+++ /dev/null
@@ -1,326 +0,0 @@
-import os
-import sys
-import re
-import jieba
-import cn2an
-import logging
-
-
-# List of (Latin alphabet, bopomofo) pairs:
-_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', 'ㄟˉ'),
- ('b', 'ㄅㄧˋ'),
- ('c', 'ㄙㄧˉ'),
- ('d', 'ㄉㄧˋ'),
- ('e', 'ㄧˋ'),
- ('f', 'ㄝˊㄈㄨˋ'),
- ('g', 'ㄐㄧˋ'),
- ('h', 'ㄝˇㄑㄩˋ'),
- ('i', 'ㄞˋ'),
- ('j', 'ㄐㄟˋ'),
- ('k', 'ㄎㄟˋ'),
- ('l', 'ㄝˊㄛˋ'),
- ('m', 'ㄝˊㄇㄨˋ'),
- ('n', 'ㄣˉ'),
- ('o', 'ㄡˉ'),
- ('p', 'ㄆㄧˉ'),
- ('q', 'ㄎㄧㄡˉ'),
- ('r', 'ㄚˋ'),
- ('s', 'ㄝˊㄙˋ'),
- ('t', 'ㄊㄧˋ'),
- ('u', 'ㄧㄡˉ'),
- ('v', 'ㄨㄧˉ'),
- ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
- ('x', 'ㄝˉㄎㄨˋㄙˋ'),
- ('y', 'ㄨㄞˋ'),
- ('z', 'ㄗㄟˋ')
-]]
-
-# List of (bopomofo, romaji) pairs:
-_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄅㄛ', 'p⁼wo'),
- ('ㄆㄛ', 'pʰwo'),
- ('ㄇㄛ', 'mwo'),
- ('ㄈㄛ', 'fwo'),
- ('ㄅ', 'p⁼'),
- ('ㄆ', 'pʰ'),
- ('ㄇ', 'm'),
- ('ㄈ', 'f'),
- ('ㄉ', 't⁼'),
- ('ㄊ', 'tʰ'),
- ('ㄋ', 'n'),
- ('ㄌ', 'l'),
- ('ㄍ', 'k⁼'),
- ('ㄎ', 'kʰ'),
- ('ㄏ', 'h'),
- ('ㄐ', 'ʧ⁼'),
- ('ㄑ', 'ʧʰ'),
- ('ㄒ', 'ʃ'),
- ('ㄓ', 'ʦ`⁼'),
- ('ㄔ', 'ʦ`ʰ'),
- ('ㄕ', 's`'),
- ('ㄖ', 'ɹ`'),
- ('ㄗ', 'ʦ⁼'),
- ('ㄘ', 'ʦʰ'),
- ('ㄙ', 's'),
- ('ㄚ', 'a'),
- ('ㄛ', 'o'),
- ('ㄜ', 'ə'),
- ('ㄝ', 'e'),
- ('ㄞ', 'ai'),
- ('ㄟ', 'ei'),
- ('ㄠ', 'au'),
- ('ㄡ', 'ou'),
- ('ㄧㄢ', 'yeNN'),
- ('ㄢ', 'aNN'),
- ('ㄧㄣ', 'iNN'),
- ('ㄣ', 'əNN'),
- ('ㄤ', 'aNg'),
- ('ㄧㄥ', 'iNg'),
- ('ㄨㄥ', 'uNg'),
- ('ㄩㄥ', 'yuNg'),
- ('ㄥ', 'əNg'),
- ('ㄦ', 'əɻ'),
- ('ㄧ', 'i'),
- ('ㄨ', 'u'),
- ('ㄩ', 'ɥ'),
- ('ˉ', '→'),
- ('ˊ', '↑'),
- ('ˇ', '↓↑'),
- ('ˋ', '↓'),
- ('˙', ''),
- (',', ','),
- ('。', '.'),
- ('!', '!'),
- ('?', '?'),
- ('—', '-')
-]]
-
-# List of (romaji, ipa) pairs:
-_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('ʃy', 'ʃ'),
- ('ʧʰy', 'ʧʰ'),
- ('ʧ⁼y', 'ʧ⁼'),
- ('NN', 'n'),
- ('Ng', 'ŋ'),
- ('y', 'j'),
- ('h', 'x')
-]]
-
-# List of (bopomofo, ipa) pairs:
-_bopomofo_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄅㄛ', 'p⁼wo'),
- ('ㄆㄛ', 'pʰwo'),
- ('ㄇㄛ', 'mwo'),
- ('ㄈㄛ', 'fwo'),
- ('ㄅ', 'p⁼'),
- ('ㄆ', 'pʰ'),
- ('ㄇ', 'm'),
- ('ㄈ', 'f'),
- ('ㄉ', 't⁼'),
- ('ㄊ', 'tʰ'),
- ('ㄋ', 'n'),
- ('ㄌ', 'l'),
- ('ㄍ', 'k⁼'),
- ('ㄎ', 'kʰ'),
- ('ㄏ', 'x'),
- ('ㄐ', 'tʃ⁼'),
- ('ㄑ', 'tʃʰ'),
- ('ㄒ', 'ʃ'),
- ('ㄓ', 'ts`⁼'),
- ('ㄔ', 'ts`ʰ'),
- ('ㄕ', 's`'),
- ('ㄖ', 'ɹ`'),
- ('ㄗ', 'ts⁼'),
- ('ㄘ', 'tsʰ'),
- ('ㄙ', 's'),
- ('ㄚ', 'a'),
- ('ㄛ', 'o'),
- ('ㄜ', 'ə'),
- ('ㄝ', 'ɛ'),
- ('ㄞ', 'aɪ'),
- ('ㄟ', 'eɪ'),
- ('ㄠ', 'ɑʊ'),
- ('ㄡ', 'oʊ'),
- ('ㄧㄢ', 'jɛn'),
- ('ㄩㄢ', 'ɥæn'),
- ('ㄢ', 'an'),
- ('ㄧㄣ', 'in'),
- ('ㄩㄣ', 'ɥn'),
- ('ㄣ', 'ən'),
- ('ㄤ', 'ɑŋ'),
- ('ㄧㄥ', 'iŋ'),
- ('ㄨㄥ', 'ʊŋ'),
- ('ㄩㄥ', 'jʊŋ'),
- ('ㄥ', 'əŋ'),
- ('ㄦ', 'əɻ'),
- ('ㄧ', 'i'),
- ('ㄨ', 'u'),
- ('ㄩ', 'ɥ'),
- ('ˉ', '→'),
- ('ˊ', '↑'),
- ('ˇ', '↓↑'),
- ('ˋ', '↓'),
- ('˙', ''),
- (',', ','),
- ('。', '.'),
- ('!', '!'),
- ('?', '?'),
- ('—', '-')
-]]
-
-# List of (bopomofo, ipa2) pairs:
-_bopomofo_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄅㄛ', 'pwo'),
- ('ㄆㄛ', 'pʰwo'),
- ('ㄇㄛ', 'mwo'),
- ('ㄈㄛ', 'fwo'),
- ('ㄅ', 'p'),
- ('ㄆ', 'pʰ'),
- ('ㄇ', 'm'),
- ('ㄈ', 'f'),
- ('ㄉ', 't'),
- ('ㄊ', 'tʰ'),
- ('ㄋ', 'n'),
- ('ㄌ', 'l'),
- ('ㄍ', 'k'),
- ('ㄎ', 'kʰ'),
- ('ㄏ', 'h'),
- ('ㄐ', 'tɕ'),
- ('ㄑ', 'tɕʰ'),
- ('ㄒ', 'ɕ'),
- ('ㄓ', 'tʂ'),
- ('ㄔ', 'tʂʰ'),
- ('ㄕ', 'ʂ'),
- ('ㄖ', 'ɻ'),
- ('ㄗ', 'ts'),
- ('ㄘ', 'tsʰ'),
- ('ㄙ', 's'),
- ('ㄚ', 'a'),
- ('ㄛ', 'o'),
- ('ㄜ', 'ɤ'),
- ('ㄝ', 'ɛ'),
- ('ㄞ', 'aɪ'),
- ('ㄟ', 'eɪ'),
- ('ㄠ', 'ɑʊ'),
- ('ㄡ', 'oʊ'),
- ('ㄧㄢ', 'jɛn'),
- ('ㄩㄢ', 'yæn'),
- ('ㄢ', 'an'),
- ('ㄧㄣ', 'in'),
- ('ㄩㄣ', 'yn'),
- ('ㄣ', 'ən'),
- ('ㄤ', 'ɑŋ'),
- ('ㄧㄥ', 'iŋ'),
- ('ㄨㄥ', 'ʊŋ'),
- ('ㄩㄥ', 'jʊŋ'),
- ('ㄥ', 'ɤŋ'),
- ('ㄦ', 'əɻ'),
- ('ㄧ', 'i'),
- ('ㄨ', 'u'),
- ('ㄩ', 'y'),
- ('ˉ', '˥'),
- ('ˊ', '˧˥'),
- ('ˇ', '˨˩˦'),
- ('ˋ', '˥˩'),
- ('˙', ''),
- (',', ','),
- ('。', '.'),
- ('!', '!'),
- ('?', '?'),
- ('—', '-')
-]]
-
-
-def number_to_chinese(text):
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
- for number in numbers:
- text = text.replace(number, cn2an.an2cn(number), 1)
- return text
-
-
-def chinese_to_bopomofo(text):
- from pypinyin import lazy_pinyin, BOPOMOFO
- text = text.replace('、', ',').replace(';', ',').replace(':', ',')
- words = jieba.lcut(text, cut_all=False)
- text = ''
- for word in words:
- bopomofos = lazy_pinyin(word, BOPOMOFO)
- if not re.search('[\u4e00-\u9fff]', word):
- text += word
- continue
- for i in range(len(bopomofos)):
- bopomofos[i] = re.sub(r'([\u3105-\u3129])$', r'\1ˉ', bopomofos[i])
- if text != '':
- text += ' '
- text += ''.join(bopomofos)
- return text
-
-
-def latin_to_bopomofo(text):
- for regex, replacement in _latin_to_bopomofo:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def bopomofo_to_romaji(text):
- for regex, replacement in _bopomofo_to_romaji:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def bopomofo_to_ipa(text):
- for regex, replacement in _bopomofo_to_ipa:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def bopomofo_to_ipa2(text):
- for regex, replacement in _bopomofo_to_ipa2:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def chinese_to_romaji(text):
- text = number_to_chinese(text)
- text = chinese_to_bopomofo(text)
- text = latin_to_bopomofo(text)
- text = bopomofo_to_romaji(text)
- text = re.sub('i([aoe])', r'y\1', text)
- text = re.sub('u([aoəe])', r'w\1', text)
- text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)',
- r'\1ɹ`\2', text).replace('ɻ', 'ɹ`')
- text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text)
- return text
-
-
-def chinese_to_lazy_ipa(text):
- text = chinese_to_romaji(text)
- for regex, replacement in _romaji_to_ipa:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def chinese_to_ipa(text):
- text = number_to_chinese(text)
- text = chinese_to_bopomofo(text)
- text = latin_to_bopomofo(text)
- text = bopomofo_to_ipa(text)
- text = re.sub('i([aoe])', r'j\1', text)
- text = re.sub('u([aoəe])', r'w\1', text)
- text = re.sub('([sɹ]`[⁼ʰ]?)([→↓↑ ]+|$)',
- r'\1ɹ`\2', text).replace('ɻ', 'ɹ`')
- text = re.sub('([s][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text)
- return text
-
-
-def chinese_to_ipa2(text):
- text = number_to_chinese(text)
- text = chinese_to_bopomofo(text)
- text = latin_to_bopomofo(text)
- text = bopomofo_to_ipa2(text)
- text = re.sub(r'i([aoe])', r'j\1', text)
- text = re.sub(r'u([aoəe])', r'w\1', text)
- text = re.sub(r'([ʂɹ]ʰ?)([˩˨˧˦˥ ]+|$)', r'\1ʅ\2', text)
- text = re.sub(r'(sʰ?)([˩˨˧˦˥ ]+|$)', r'\1ɿ\2', text)
- return text
diff --git a/spaces/kevinwang676/VoiceChanger/src/utils/model2safetensor.py b/spaces/kevinwang676/VoiceChanger/src/utils/model2safetensor.py
deleted file mode 100644
index 50c485000d43ba9c230a0bc64ce8aeaaec6e2b29..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/VoiceChanger/src/utils/model2safetensor.py
+++ /dev/null
@@ -1,141 +0,0 @@
-import torch
-import yaml
-import os
-
-import safetensors
-from safetensors.torch import save_file
-from yacs.config import CfgNode as CN
-import sys
-
-sys.path.append('/apdcephfs/private_shadowcun/SadTalker')
-
-from src.face3d.models import networks
-
-from src.facerender.modules.keypoint_detector import HEEstimator, KPDetector
-from src.facerender.modules.mapping import MappingNet
-from src.facerender.modules.generator import OcclusionAwareGenerator, OcclusionAwareSPADEGenerator
-
-from src.audio2pose_models.audio2pose import Audio2Pose
-from src.audio2exp_models.networks import SimpleWrapperV2
-from src.test_audio2coeff import load_cpk
-
-size = 256
-############ face vid2vid
-config_path = os.path.join('src', 'config', 'facerender.yaml')
-current_root_path = '.'
-
-path_of_net_recon_model = os.path.join(current_root_path, 'checkpoints', 'epoch_20.pth')
-net_recon = networks.define_net_recon(net_recon='resnet50', use_last_fc=False, init_path='')
-checkpoint = torch.load(path_of_net_recon_model, map_location='cpu')
-net_recon.load_state_dict(checkpoint['net_recon'])
-
-with open(config_path) as f:
- config = yaml.safe_load(f)
-
-generator = OcclusionAwareSPADEGenerator(**config['model_params']['generator_params'],
- **config['model_params']['common_params'])
-kp_extractor = KPDetector(**config['model_params']['kp_detector_params'],
- **config['model_params']['common_params'])
-he_estimator = HEEstimator(**config['model_params']['he_estimator_params'],
- **config['model_params']['common_params'])
-mapping = MappingNet(**config['model_params']['mapping_params'])
-
-def load_cpk_facevid2vid(checkpoint_path, generator=None, discriminator=None,
- kp_detector=None, he_estimator=None, optimizer_generator=None,
- optimizer_discriminator=None, optimizer_kp_detector=None,
- optimizer_he_estimator=None, device="cpu"):
-
- checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
- if generator is not None:
- generator.load_state_dict(checkpoint['generator'])
- if kp_detector is not None:
- kp_detector.load_state_dict(checkpoint['kp_detector'])
- if he_estimator is not None:
- he_estimator.load_state_dict(checkpoint['he_estimator'])
- if discriminator is not None:
- try:
- discriminator.load_state_dict(checkpoint['discriminator'])
- except:
- print ('No discriminator in the state-dict. Dicriminator will be randomly initialized')
- if optimizer_generator is not None:
- optimizer_generator.load_state_dict(checkpoint['optimizer_generator'])
- if optimizer_discriminator is not None:
- try:
- optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
- except RuntimeError as e:
- print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized')
- if optimizer_kp_detector is not None:
- optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector'])
- if optimizer_he_estimator is not None:
- optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator'])
-
- return checkpoint['epoch']
-
-
-def load_cpk_facevid2vid_safetensor(checkpoint_path, generator=None,
- kp_detector=None, he_estimator=None,
- device="cpu"):
-
- checkpoint = safetensors.torch.load_file(checkpoint_path)
-
- if generator is not None:
- x_generator = {}
- for k,v in checkpoint.items():
- if 'generator' in k:
- x_generator[k.replace('generator.', '')] = v
- generator.load_state_dict(x_generator)
- if kp_detector is not None:
- x_generator = {}
- for k,v in checkpoint.items():
- if 'kp_extractor' in k:
- x_generator[k.replace('kp_extractor.', '')] = v
- kp_detector.load_state_dict(x_generator)
- if he_estimator is not None:
- x_generator = {}
- for k,v in checkpoint.items():
- if 'he_estimator' in k:
- x_generator[k.replace('he_estimator.', '')] = v
- he_estimator.load_state_dict(x_generator)
-
- return None
-
-free_view_checkpoint = '/apdcephfs/private_shadowcun/SadTalker/checkpoints/facevid2vid_'+str(size)+'-model.pth.tar'
-load_cpk_facevid2vid(free_view_checkpoint, kp_detector=kp_extractor, generator=generator, he_estimator=he_estimator)
-
-wav2lip_checkpoint = os.path.join(current_root_path, 'checkpoints', 'wav2lip.pth')
-
-audio2pose_checkpoint = os.path.join(current_root_path, 'checkpoints', 'auido2pose_00140-model.pth')
-audio2pose_yaml_path = os.path.join(current_root_path, 'src', 'config', 'auido2pose.yaml')
-
-audio2exp_checkpoint = os.path.join(current_root_path, 'checkpoints', 'auido2exp_00300-model.pth')
-audio2exp_yaml_path = os.path.join(current_root_path, 'src', 'config', 'auido2exp.yaml')
-
-fcfg_pose = open(audio2pose_yaml_path)
-cfg_pose = CN.load_cfg(fcfg_pose)
-cfg_pose.freeze()
-audio2pose_model = Audio2Pose(cfg_pose, wav2lip_checkpoint)
-audio2pose_model.eval()
-load_cpk(audio2pose_checkpoint, model=audio2pose_model, device='cpu')
-
-# load audio2exp_model
-netG = SimpleWrapperV2()
-netG.eval()
-load_cpk(audio2exp_checkpoint, model=netG, device='cpu')
-
-class SadTalker(torch.nn.Module):
- def __init__(self, kp_extractor, generator, netG, audio2pose, face_3drecon):
- super(SadTalker, self).__init__()
- self.kp_extractor = kp_extractor
- self.generator = generator
- self.audio2exp = netG
- self.audio2pose = audio2pose
- self.face_3drecon = face_3drecon
-
-
-model = SadTalker(kp_extractor, generator, netG, audio2pose_model, net_recon)
-
-# here, we want to convert it to safetensor
-save_file(model.state_dict(), "checkpoints/SadTalker_V0.0.2_"+str(size)+".safetensors")
-
-### test
-load_cpk_facevid2vid_safetensor('checkpoints/SadTalker_V0.0.2_'+str(size)+'.safetensors', kp_detector=kp_extractor, generator=generator, he_estimator=None)
\ No newline at end of file
diff --git a/spaces/kevinwang676/VoiceChangers/src/face3d/util/visualizer.py b/spaces/kevinwang676/VoiceChangers/src/face3d/util/visualizer.py
deleted file mode 100644
index 4023a6d4086acba9bc88e079f625194d324d7c9e..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/VoiceChangers/src/face3d/util/visualizer.py
+++ /dev/null
@@ -1,227 +0,0 @@
-"""This script defines the visualizer for Deep3DFaceRecon_pytorch
-"""
-
-import numpy as np
-import os
-import sys
-import ntpath
-import time
-from . import util, html
-from subprocess import Popen, PIPE
-from torch.utils.tensorboard import SummaryWriter
-
-def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
- """Save images to the disk.
-
- Parameters:
- webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
- visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
- image_path (str) -- the string is used to create image paths
- aspect_ratio (float) -- the aspect ratio of saved images
- width (int) -- the images will be resized to width x width
-
- This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
- """
- image_dir = webpage.get_image_dir()
- short_path = ntpath.basename(image_path[0])
- name = os.path.splitext(short_path)[0]
-
- webpage.add_header(name)
- ims, txts, links = [], [], []
-
- for label, im_data in visuals.items():
- im = util.tensor2im(im_data)
- image_name = '%s/%s.png' % (label, name)
- os.makedirs(os.path.join(image_dir, label), exist_ok=True)
- save_path = os.path.join(image_dir, image_name)
- util.save_image(im, save_path, aspect_ratio=aspect_ratio)
- ims.append(image_name)
- txts.append(label)
- links.append(image_name)
- webpage.add_images(ims, txts, links, width=width)
-
-
-class Visualizer():
- """This class includes several functions that can display/save images and print/save logging information.
-
- It uses a Python library tensprboardX for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
- """
-
- def __init__(self, opt):
- """Initialize the Visualizer class
-
- Parameters:
- opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
- Step 1: Cache the training/test options
- Step 2: create a tensorboard writer
- Step 3: create an HTML object for saveing HTML filters
- Step 4: create a logging file to store training losses
- """
- self.opt = opt # cache the option
- self.use_html = opt.isTrain and not opt.no_html
- self.writer = SummaryWriter(os.path.join(opt.checkpoints_dir, 'logs', opt.name))
- self.win_size = opt.display_winsize
- self.name = opt.name
- self.saved = False
- if self.use_html: # create an HTML object at /web/; images will be saved under /web/images/
- self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
- self.img_dir = os.path.join(self.web_dir, 'images')
- print('create web directory %s...' % self.web_dir)
- util.mkdirs([self.web_dir, self.img_dir])
- # create a logging file to store training losses
- self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
- with open(self.log_name, "a") as log_file:
- now = time.strftime("%c")
- log_file.write('================ Training Loss (%s) ================\n' % now)
-
- def reset(self):
- """Reset the self.saved status"""
- self.saved = False
-
-
- def display_current_results(self, visuals, total_iters, epoch, save_result):
- """Display current results on tensorboad; save current results to an HTML file.
-
- Parameters:
- visuals (OrderedDict) - - dictionary of images to display or save
- total_iters (int) -- total iterations
- epoch (int) - - the current epoch
- save_result (bool) - - if save the current results to an HTML file
- """
- for label, image in visuals.items():
- self.writer.add_image(label, util.tensor2im(image), total_iters, dataformats='HWC')
-
- if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.
- self.saved = True
- # save images to the disk
- for label, image in visuals.items():
- image_numpy = util.tensor2im(image)
- img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
- util.save_image(image_numpy, img_path)
-
- # update website
- webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=0)
- for n in range(epoch, 0, -1):
- webpage.add_header('epoch [%d]' % n)
- ims, txts, links = [], [], []
-
- for label, image_numpy in visuals.items():
- image_numpy = util.tensor2im(image)
- img_path = 'epoch%.3d_%s.png' % (n, label)
- ims.append(img_path)
- txts.append(label)
- links.append(img_path)
- webpage.add_images(ims, txts, links, width=self.win_size)
- webpage.save()
-
- def plot_current_losses(self, total_iters, losses):
- # G_loss_collection = {}
- # D_loss_collection = {}
- # for name, value in losses.items():
- # if 'G' in name or 'NCE' in name or 'idt' in name:
- # G_loss_collection[name] = value
- # else:
- # D_loss_collection[name] = value
- # self.writer.add_scalars('G_collec', G_loss_collection, total_iters)
- # self.writer.add_scalars('D_collec', D_loss_collection, total_iters)
- for name, value in losses.items():
- self.writer.add_scalar(name, value, total_iters)
-
- # losses: same format as |losses| of plot_current_losses
- def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
- """print current losses on console; also save the losses to the disk
-
- Parameters:
- epoch (int) -- current epoch
- iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
- losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
- t_comp (float) -- computational time per data point (normalized by batch_size)
- t_data (float) -- data loading time per data point (normalized by batch_size)
- """
- message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
- for k, v in losses.items():
- message += '%s: %.3f ' % (k, v)
-
- print(message) # print the message
- with open(self.log_name, "a") as log_file:
- log_file.write('%s\n' % message) # save the message
-
-
-class MyVisualizer:
- def __init__(self, opt):
- """Initialize the Visualizer class
-
- Parameters:
- opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
- Step 1: Cache the training/test options
- Step 2: create a tensorboard writer
- Step 3: create an HTML object for saveing HTML filters
- Step 4: create a logging file to store training losses
- """
- self.opt = opt # cache the optio
- self.name = opt.name
- self.img_dir = os.path.join(opt.checkpoints_dir, opt.name, 'results')
-
- if opt.phase != 'test':
- self.writer = SummaryWriter(os.path.join(opt.checkpoints_dir, opt.name, 'logs'))
- # create a logging file to store training losses
- self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
- with open(self.log_name, "a") as log_file:
- now = time.strftime("%c")
- log_file.write('================ Training Loss (%s) ================\n' % now)
-
-
- def display_current_results(self, visuals, total_iters, epoch, dataset='train', save_results=False, count=0, name=None,
- add_image=True):
- """Display current results on tensorboad; save current results to an HTML file.
-
- Parameters:
- visuals (OrderedDict) - - dictionary of images to display or save
- total_iters (int) -- total iterations
- epoch (int) - - the current epoch
- dataset (str) - - 'train' or 'val' or 'test'
- """
- # if (not add_image) and (not save_results): return
-
- for label, image in visuals.items():
- for i in range(image.shape[0]):
- image_numpy = util.tensor2im(image[i])
- if add_image:
- self.writer.add_image(label + '%s_%02d'%(dataset, i + count),
- image_numpy, total_iters, dataformats='HWC')
-
- if save_results:
- save_path = os.path.join(self.img_dir, dataset, 'epoch_%s_%06d'%(epoch, total_iters))
- if not os.path.isdir(save_path):
- os.makedirs(save_path)
-
- if name is not None:
- img_path = os.path.join(save_path, '%s.png' % name)
- else:
- img_path = os.path.join(save_path, '%s_%03d.png' % (label, i + count))
- util.save_image(image_numpy, img_path)
-
-
- def plot_current_losses(self, total_iters, losses, dataset='train'):
- for name, value in losses.items():
- self.writer.add_scalar(name + '/%s'%dataset, value, total_iters)
-
- # losses: same format as |losses| of plot_current_losses
- def print_current_losses(self, epoch, iters, losses, t_comp, t_data, dataset='train'):
- """print current losses on console; also save the losses to the disk
-
- Parameters:
- epoch (int) -- current epoch
- iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
- losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
- t_comp (float) -- computational time per data point (normalized by batch_size)
- t_data (float) -- data loading time per data point (normalized by batch_size)
- """
- message = '(dataset: %s, epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (
- dataset, epoch, iters, t_comp, t_data)
- for k, v in losses.items():
- message += '%s: %.3f ' % (k, v)
-
- print(message) # print the message
- with open(self.log_name, "a") as log_file:
- log_file.write('%s\n' % message) # save the message
diff --git a/spaces/koajoel/PolyFormer/CONTRIBUTING.md b/spaces/koajoel/PolyFormer/CONTRIBUTING.md
deleted file mode 100644
index c4b6a1c5081adcf78822222488e7c5b0f1dc6499..0000000000000000000000000000000000000000
--- a/spaces/koajoel/PolyFormer/CONTRIBUTING.md
+++ /dev/null
@@ -1,59 +0,0 @@
-# Contributing Guidelines
-
-Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
-documentation, we greatly value feedback and contributions from our community.
-
-Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
-information to effectively respond to your bug report or contribution.
-
-
-## Reporting Bugs/Feature Requests
-
-We welcome you to use the GitHub issue tracker to report bugs or suggest features.
-
-When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already
-reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
-
-* A reproducible test case or series of steps
-* The version of our code being used
-* Any modifications you've made relevant to the bug
-* Anything unusual about your environment or deployment
-
-
-## Contributing via Pull Requests
-Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
-
-1. You are working against the latest source on the *main* branch.
-2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
-3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
-
-To send us a pull request, please:
-
-1. Fork the repository.
-2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
-3. Ensure local tests pass.
-4. Commit to your fork using clear commit messages.
-5. Send us a pull request, answering any default questions in the pull request interface.
-6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
-
-GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
-[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
-
-
-## Finding contributions to work on
-Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
-
-
-## Code of Conduct
-This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
-For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
-opensource-codeofconduct@amazon.com with any additional questions or comments.
-
-
-## Security issue notifications
-If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
-
-
-## Licensing
-
-See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/check_self_overlaps.py b/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/check_self_overlaps.py
deleted file mode 100644
index 07b338dcfd2d7f10317608274631d0edd93ba889..0000000000000000000000000000000000000000
--- a/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/check_self_overlaps.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-
-import os
-import glob
-import argparse
-from utils.dedup import deup
-import sys
-
-WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None)
-
-if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip():
- print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."')
- sys.exit(-1)
-
-def get_directions(folder):
- raw_files = glob.glob(f'{folder}/train*')
- directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files]
- return directions
-
-def diff_list(lhs, rhs):
- return set(lhs).difference(set(rhs))
-
-def check_diff(
- from_src_file, from_tgt_file,
- to_src_file, to_tgt_file,
-):
- seen_in_from = set()
- seen_src_in_from = set()
- seen_tgt_in_from = set()
- from_count = 0
- with open(from_src_file, encoding='utf-8') as fsrc, \
- open(from_tgt_file, encoding='utf-8') as ftgt:
- for s, t in zip(fsrc, ftgt):
- seen_in_from.add((s, t))
- seen_src_in_from.add(s)
- seen_tgt_in_from.add(t)
- from_count += 1
- common = 0
- common_src = 0
- common_tgt = 0
- to_count = 0
- seen = set()
-
- with open(to_src_file, encoding='utf-8') as fsrc, \
- open(to_tgt_file, encoding='utf-8') as ftgt:
- for s, t in zip(fsrc, ftgt):
- to_count += 1
- if (s, t) not in seen:
- if (s, t) in seen_in_from:
- common += 1
- if s in seen_src_in_from:
- common_src += 1
- seen_src_in_from.remove(s)
- if t in seen_tgt_in_from:
- common_tgt += 1
- seen_tgt_in_from.remove(t)
- seen.add((s, t))
- return common, common_src, common_tgt, from_count, to_count
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument("--folder", type=str, required=True,
- help="the data folder ")
- parser.add_argument("--split", type=str, default='test',
- help="split (valid, test) to check against training data")
- parser.add_argument('--directions', type=str, default=None, required=False)
-
- args = parser.parse_args()
-
- if args.directions is None:
- directions = set(get_directions(args.folder))
- directions = sorted(directions)
- else:
- directions = args.directions.split(',')
- directions = sorted(set(directions))
-
- results = []
- print(f'checking where {args.split} split data are in training')
- print(f'direction\tcommon_count\tsrc common\ttgt common\tfrom_size\tto_size')
-
- for direction in directions:
- src, tgt = direction.split('-')
- from_src_file = f'{args.folder}/{args.split}.{src}-{tgt}.{src}'
- from_tgt_file = f'{args.folder}/{args.split}.{src}-{tgt}.{tgt}'
- if not os.path.exists(from_src_file):
- # some test/valid data might in reverse directinos:
- from_src_file = f'{args.folder}/{args.split}.{tgt}-{src}.{src}'
- from_tgt_file = f'{args.folder}/{args.split}.{tgt}-{src}.{tgt}'
- to_src_file = f'{args.folder}/train.{src}-{tgt}.{src}'
- to_tgt_file = f'{args.folder}/train.{src}-{tgt}.{tgt}'
- if not os.path.exists(to_src_file) or not os.path.exists(from_src_file):
- continue
- r = check_diff(from_src_file, from_tgt_file, to_src_file, to_tgt_file)
- results.append(r)
- print(f'{direction}\t', '\t'.join(map(str, r)))
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/speech_recognition/criterions/cross_entropy_acc.py b/spaces/koajoel/PolyFormer/fairseq/examples/speech_recognition/criterions/cross_entropy_acc.py
deleted file mode 100644
index 7c4d8ba3802a2da9467c42b0aa18653c7bbb2ec9..0000000000000000000000000000000000000000
--- a/spaces/koajoel/PolyFormer/fairseq/examples/speech_recognition/criterions/cross_entropy_acc.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from __future__ import absolute_import, division, print_function, unicode_literals
-
-import logging
-import math
-
-import torch
-import torch.nn.functional as F
-from fairseq import utils
-from fairseq.criterions import FairseqCriterion, register_criterion
-
-
-@register_criterion("cross_entropy_acc")
-class CrossEntropyWithAccCriterion(FairseqCriterion):
- def __init__(self, task, sentence_avg):
- super().__init__(task)
- self.sentence_avg = sentence_avg
-
- def compute_loss(self, model, net_output, target, reduction, log_probs):
- # N, T -> N * T
- target = target.view(-1)
- lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
- if not hasattr(lprobs, "batch_first"):
- logging.warning(
- "ERROR: we need to know whether "
- "batch first for the net output; "
- "you need to set batch_first attribute for the return value of "
- "model.get_normalized_probs. Now, we assume this is true, but "
- "in the future, we will raise exception instead. "
- )
- batch_first = getattr(lprobs, "batch_first", True)
- if not batch_first:
- lprobs = lprobs.transpose(0, 1)
-
- # N, T, D -> N * T, D
- lprobs = lprobs.view(-1, lprobs.size(-1))
- loss = F.nll_loss(
- lprobs, target, ignore_index=self.padding_idx, reduction=reduction
- )
- return lprobs, loss
-
- def get_logging_output(self, sample, target, lprobs, loss):
- target = target.view(-1)
- mask = target != self.padding_idx
- correct = torch.sum(
- lprobs.argmax(1).masked_select(mask) == target.masked_select(mask)
- )
- total = torch.sum(mask)
- sample_size = (
- sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
- )
-
- logging_output = {
- "loss": utils.item(loss.data), # * sample['ntokens'],
- "ntokens": sample["ntokens"],
- "nsentences": sample["target"].size(0),
- "sample_size": sample_size,
- "correct": utils.item(correct.data),
- "total": utils.item(total.data),
- "nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
- }
-
- return sample_size, logging_output
-
- def forward(self, model, sample, reduction="sum", log_probs=True):
- """Computes the cross entropy with accuracy metric for the given sample.
-
- This is similar to CrossEntropyCriterion in fairseq, but also
- computes accuracy metrics as part of logging
-
- Args:
- logprobs (Torch.tensor) of shape N, T, D i.e.
- batchsize, timesteps, dimensions
- targets (Torch.tensor) of shape N, T i.e batchsize, timesteps
-
- Returns:
- tuple: With three elements:
- 1) the loss
- 2) the sample size, which is used as the denominator for the gradient
- 3) logging outputs to display while training
-
- TODO:
- * Currently this Criterion will only work with LSTMEncoderModels or
- FairseqModels which have decoder, or Models which return TorchTensor
- as net_output.
- We need to make a change to support all FairseqEncoder models.
- """
- net_output = model(**sample["net_input"])
- target = model.get_targets(sample, net_output)
- lprobs, loss = self.compute_loss(
- model, net_output, target, reduction, log_probs
- )
- sample_size, logging_output = self.get_logging_output(
- sample, target, lprobs, loss
- )
- return loss, sample_size, logging_output
-
- @staticmethod
- def aggregate_logging_outputs(logging_outputs):
- """Aggregate logging outputs from data parallel training."""
- correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
- total_sum = sum(log.get("total", 0) for log in logging_outputs)
- loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
- ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
- nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
- sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
- nframes = sum(log.get("nframes", 0) for log in logging_outputs)
- agg_output = {
- "loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
- # if args.sentence_avg, then sample_size is nsentences, then loss
- # is per-sentence loss; else sample_size is ntokens, the loss
- # becomes per-output token loss
- "ntokens": ntokens,
- "nsentences": nsentences,
- "nframes": nframes,
- "sample_size": sample_size,
- "acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
- "correct": correct_sum,
- "total": total_sum,
- # total is the number of validate tokens
- }
- if sample_size != ntokens:
- agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
- # loss: per output token loss
- # nll_loss: per sentence loss
- return agg_output
diff --git a/spaces/kouenYoung/anime-tts/text/cleaners.py b/spaces/kouenYoung/anime-tts/text/cleaners.py
deleted file mode 100644
index 15c5cc1fdff01bbaf399d69e06c59bcffde807be..0000000000000000000000000000000000000000
--- a/spaces/kouenYoung/anime-tts/text/cleaners.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import re
-
-
-def japanese_cleaners(text):
- from text.japanese import japanese_to_romaji_with_accent
- text = japanese_to_romaji_with_accent(text)
- if re.match('[A-Za-z]', text[-1]):
- text += '.'
- return text
-
-
-def japanese_cleaners2(text):
- return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…')
-
-
-def korean_cleaners(text):
- '''Pipeline for Korean text'''
- from text.korean import latin_to_hangul, number_to_hangul, divide_hangul
- text = latin_to_hangul(text)
- text = number_to_hangul(text)
- text = divide_hangul(text)
- if re.match('[\u3131-\u3163]', text[-1]):
- text += '.'
- return text
-
-
-def chinese_cleaners(text):
- '''Pipeline for Chinese text'''
- from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo
- text = number_to_chinese(text)
- text = chinese_to_bopomofo(text)
- text = latin_to_bopomofo(text)
- if re.match('[ˉˊˇˋ˙]', text[-1]):
- text += '。'
- return text
-
-
-def zh_ja_mixture_cleaners(text):
- from text.mandarin import chinese_to_romaji
- from text.japanese import japanese_to_romaji_with_accent
- chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text)
- japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text)
- for chinese_text in chinese_texts:
- cleaned_text = chinese_to_romaji(chinese_text[4:-4])
- text = text.replace(chinese_text, cleaned_text+' ', 1)
- for japanese_text in japanese_texts:
- cleaned_text = japanese_to_romaji_with_accent(
- japanese_text[4:-4]).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…')
- text = text.replace(japanese_text, cleaned_text+' ', 1)
- text = text[:-1]
- if re.match('[A-Za-zɯɹəɥ→↓↑]', text[-1]):
- text += '.'
- return text
-
-
-def sanskrit_cleaners(text):
- text = text.replace('॥', '।').replace('ॐ', 'ओम्')
- if text[-1] != '।':
- text += ' ।'
- return text
-
-
-def cjks_cleaners(text):
- from text.mandarin import chinese_to_lazy_ipa
- from text.japanese import japanese_to_ipa
- from text.korean import korean_to_lazy_ipa
- from text.sanskrit import devanagari_to_ipa
- chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text)
- japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text)
- korean_texts = re.findall(r'\[KO\].*?\[KO\]', text)
- sanskrit_texts = re.findall(r'\[SA\].*?\[SA\]', text)
- for chinese_text in chinese_texts:
- cleaned_text = chinese_to_lazy_ipa(chinese_text[4:-4])
- text = text.replace(chinese_text, cleaned_text+' ', 1)
- for japanese_text in japanese_texts:
- cleaned_text = japanese_to_ipa(japanese_text[4:-4])
- text = text.replace(japanese_text, cleaned_text+' ', 1)
- for korean_text in korean_texts:
- cleaned_text = korean_to_lazy_ipa(korean_text[4:-4])
- text = text.replace(korean_text, cleaned_text+' ', 1)
- for sanskrit_text in sanskrit_texts:
- cleaned_text = devanagari_to_ipa(sanskrit_text[4:-4])
- text = text.replace(sanskrit_text, cleaned_text+' ', 1)
- text = text[:-1]
- if re.match(r'[^\.,!\?\-…~]', text[-1]):
- text += '.'
- return text
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/pens/cairoPen.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/pens/cairoPen.py
deleted file mode 100644
index 9cd5da9128fc0054cf748de703540afa7685b7b2..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/pens/cairoPen.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""Pen to draw to a Cairo graphics library context."""
-
-from fontTools.pens.basePen import BasePen
-
-
-__all__ = ["CairoPen"]
-
-
-class CairoPen(BasePen):
- """Pen to draw to a Cairo graphics library context."""
-
- def __init__(self, glyphSet, context):
- BasePen.__init__(self, glyphSet)
- self.context = context
-
- def _moveTo(self, p):
- self.context.move_to(*p)
-
- def _lineTo(self, p):
- self.context.line_to(*p)
-
- def _curveToOne(self, p1, p2, p3):
- self.context.curve_to(*p1, *p2, *p3)
-
- def _closePath(self):
- self.context.close_path()
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/backends/qt_editor/figureoptions.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/backends/qt_editor/figureoptions.py
deleted file mode 100644
index 2a95109801061fa591f5c2a10b3529530139aaaa..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/backends/qt_editor/figureoptions.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# Copyright © 2009 Pierre Raybaut
-# Licensed under the terms of the MIT License
-# see the Matplotlib licenses directory for a copy of the license
-
-
-"""Module that provides a GUI-based editor for Matplotlib's figure options."""
-
-from itertools import chain
-from matplotlib import cbook, cm, colors as mcolors, markers, image as mimage
-from matplotlib.backends.qt_compat import QtGui
-from matplotlib.backends.qt_editor import _formlayout
-from matplotlib.dates import DateConverter, num2date
-
-LINESTYLES = {'-': 'Solid',
- '--': 'Dashed',
- '-.': 'DashDot',
- ':': 'Dotted',
- 'None': 'None',
- }
-
-DRAWSTYLES = {
- 'default': 'Default',
- 'steps-pre': 'Steps (Pre)', 'steps': 'Steps (Pre)',
- 'steps-mid': 'Steps (Mid)',
- 'steps-post': 'Steps (Post)'}
-
-MARKERS = markers.MarkerStyle.markers
-
-
-def figure_edit(axes, parent=None):
- """Edit matplotlib figure options"""
- sep = (None, None) # separator
-
- # Get / General
- def convert_limits(lim, converter):
- """Convert axis limits for correct input editors."""
- if isinstance(converter, DateConverter):
- return map(num2date, lim)
- # Cast to builtin floats as they have nicer reprs.
- return map(float, lim)
-
- axis_map = axes._axis_map
- axis_limits = {
- name: tuple(convert_limits(
- getattr(axes, f'get_{name}lim')(), axis.converter
- ))
- for name, axis in axis_map.items()
- }
- general = [
- ('Title', axes.get_title()),
- sep,
- *chain.from_iterable([
- (
- (None, f"{name.title()}-Axis"),
- ('Min', axis_limits[name][0]),
- ('Max', axis_limits[name][1]),
- ('Label', axis.get_label().get_text()),
- ('Scale', [axis.get_scale(),
- 'linear', 'log', 'symlog', 'logit']),
- sep,
- )
- for name, axis in axis_map.items()
- ]),
- ('(Re-)Generate automatic legend', False),
- ]
-
- # Save the converter and unit data
- axis_converter = {
- name: axis.converter
- for name, axis in axis_map.items()
- }
- axis_units = {
- name: axis.get_units()
- for name, axis in axis_map.items()
- }
-
- # Get / Curves
- labeled_lines = []
- for line in axes.get_lines():
- label = line.get_label()
- if label == '_nolegend_':
- continue
- labeled_lines.append((label, line))
- curves = []
-
- def prepare_data(d, init):
- """
- Prepare entry for FormLayout.
-
- *d* is a mapping of shorthands to style names (a single style may
- have multiple shorthands, in particular the shorthands `None`,
- `"None"`, `"none"` and `""` are synonyms); *init* is one shorthand
- of the initial style.
-
- This function returns an list suitable for initializing a
- FormLayout combobox, namely `[initial_name, (shorthand,
- style_name), (shorthand, style_name), ...]`.
- """
- if init not in d:
- d = {**d, init: str(init)}
- # Drop duplicate shorthands from dict (by overwriting them during
- # the dict comprehension).
- name2short = {name: short for short, name in d.items()}
- # Convert back to {shorthand: name}.
- short2name = {short: name for name, short in name2short.items()}
- # Find the kept shorthand for the style specified by init.
- canonical_init = name2short[d[init]]
- # Sort by representation and prepend the initial value.
- return ([canonical_init] +
- sorted(short2name.items(),
- key=lambda short_and_name: short_and_name[1]))
-
- for label, line in labeled_lines:
- color = mcolors.to_hex(
- mcolors.to_rgba(line.get_color(), line.get_alpha()),
- keep_alpha=True)
- ec = mcolors.to_hex(
- mcolors.to_rgba(line.get_markeredgecolor(), line.get_alpha()),
- keep_alpha=True)
- fc = mcolors.to_hex(
- mcolors.to_rgba(line.get_markerfacecolor(), line.get_alpha()),
- keep_alpha=True)
- curvedata = [
- ('Label', label),
- sep,
- (None, 'Line'),
- ('Line style', prepare_data(LINESTYLES, line.get_linestyle())),
- ('Draw style', prepare_data(DRAWSTYLES, line.get_drawstyle())),
- ('Width', line.get_linewidth()),
- ('Color (RGBA)', color),
- sep,
- (None, 'Marker'),
- ('Style', prepare_data(MARKERS, line.get_marker())),
- ('Size', line.get_markersize()),
- ('Face color (RGBA)', fc),
- ('Edge color (RGBA)', ec)]
- curves.append([curvedata, label, ""])
- # Is there a curve displayed?
- has_curve = bool(curves)
-
- # Get ScalarMappables.
- labeled_mappables = []
- for mappable in [*axes.images, *axes.collections]:
- label = mappable.get_label()
- if label == '_nolegend_' or mappable.get_array() is None:
- continue
- labeled_mappables.append((label, mappable))
- mappables = []
- cmaps = [(cmap, name) for name, cmap in sorted(cm._colormaps.items())]
- for label, mappable in labeled_mappables:
- cmap = mappable.get_cmap()
- if cmap not in cm._colormaps.values():
- cmaps = [(cmap, cmap.name), *cmaps]
- low, high = mappable.get_clim()
- mappabledata = [
- ('Label', label),
- ('Colormap', [cmap.name] + cmaps),
- ('Min. value', low),
- ('Max. value', high),
- ]
- if hasattr(mappable, "get_interpolation"): # Images.
- interpolations = [
- (name, name) for name in sorted(mimage.interpolations_names)]
- mappabledata.append((
- 'Interpolation',
- [mappable.get_interpolation(), *interpolations]))
- mappables.append([mappabledata, label, ""])
- # Is there a scalarmappable displayed?
- has_sm = bool(mappables)
-
- datalist = [(general, "Axes", "")]
- if curves:
- datalist.append((curves, "Curves", ""))
- if mappables:
- datalist.append((mappables, "Images, etc.", ""))
-
- def apply_callback(data):
- """A callback to apply changes."""
- orig_limits = {
- name: getattr(axes, f"get_{name}lim")()
- for name in axis_map
- }
-
- general = data.pop(0)
- curves = data.pop(0) if has_curve else []
- mappables = data.pop(0) if has_sm else []
- if data:
- raise ValueError("Unexpected field")
-
- title = general.pop(0)
- axes.set_title(title)
- generate_legend = general.pop()
-
- for i, (name, axis) in enumerate(axis_map.items()):
- axis_min = general[4*i]
- axis_max = general[4*i + 1]
- axis_label = general[4*i + 2]
- axis_scale = general[4*i + 3]
- if axis.get_scale() != axis_scale:
- getattr(axes, f"set_{name}scale")(axis_scale)
-
- axis._set_lim(axis_min, axis_max, auto=False)
- axis.set_label_text(axis_label)
-
- # Restore the unit data
- axis.converter = axis_converter[name]
- axis.set_units(axis_units[name])
-
- # Set / Curves
- for index, curve in enumerate(curves):
- line = labeled_lines[index][1]
- (label, linestyle, drawstyle, linewidth, color, marker, markersize,
- markerfacecolor, markeredgecolor) = curve
- line.set_label(label)
- line.set_linestyle(linestyle)
- line.set_drawstyle(drawstyle)
- line.set_linewidth(linewidth)
- rgba = mcolors.to_rgba(color)
- line.set_alpha(None)
- line.set_color(rgba)
- if marker != 'none':
- line.set_marker(marker)
- line.set_markersize(markersize)
- line.set_markerfacecolor(markerfacecolor)
- line.set_markeredgecolor(markeredgecolor)
-
- # Set ScalarMappables.
- for index, mappable_settings in enumerate(mappables):
- mappable = labeled_mappables[index][1]
- if len(mappable_settings) == 5:
- label, cmap, low, high, interpolation = mappable_settings
- mappable.set_interpolation(interpolation)
- elif len(mappable_settings) == 4:
- label, cmap, low, high = mappable_settings
- mappable.set_label(label)
- mappable.set_cmap(cm.get_cmap(cmap))
- mappable.set_clim(*sorted([low, high]))
-
- # re-generate legend, if checkbox is checked
- if generate_legend:
- draggable = None
- ncols = 1
- if axes.legend_ is not None:
- old_legend = axes.get_legend()
- draggable = old_legend._draggable is not None
- ncols = old_legend._ncols
- new_legend = axes.legend(ncols=ncols)
- if new_legend:
- new_legend.set_draggable(draggable)
-
- # Redraw
- figure = axes.get_figure()
- figure.canvas.draw()
- for name in axis_map:
- if getattr(axes, f"get_{name}lim")() != orig_limits[name]:
- figure.canvas.toolbar.push_current()
- break
-
- _formlayout.fedit(
- datalist, title="Figure options", parent=parent,
- icon=QtGui.QIcon(
- str(cbook._get_data_path('images', 'qt4_editor_options.svg'))),
- apply=apply_callback)
diff --git a/spaces/lambdalabs/LambdaSuperRes/KAIR/models/network_rrdb.py b/spaces/lambdalabs/LambdaSuperRes/KAIR/models/network_rrdb.py
deleted file mode 100644
index 91ae94cc5ed857ffead176fc317d553edc97a507..0000000000000000000000000000000000000000
--- a/spaces/lambdalabs/LambdaSuperRes/KAIR/models/network_rrdb.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import math
-import torch.nn as nn
-import models.basicblock as B
-
-
-"""
-# --------------------------------------------
-# SR network with Residual in Residual Dense Block (RRDB)
-# "ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks"
-# --------------------------------------------
-"""
-
-
-class RRDB(nn.Module):
- """
- gc: number of growth channels
- nb: number of RRDB
- """
- def __init__(self, in_nc=3, out_nc=3, nc=64, nb=23, gc=32, upscale=4, act_mode='L', upsample_mode='upconv'):
- super(RRDB, self).__init__()
- assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL'
-
- n_upscale = int(math.log(upscale, 2))
- if upscale == 3:
- n_upscale = 1
-
- m_head = B.conv(in_nc, nc, mode='C')
-
- m_body = [B.RRDB(nc, gc=32, mode='C'+act_mode) for _ in range(nb)]
- m_body.append(B.conv(nc, nc, mode='C'))
-
- if upsample_mode == 'upconv':
- upsample_block = B.upsample_upconv
- elif upsample_mode == 'pixelshuffle':
- upsample_block = B.upsample_pixelshuffle
- elif upsample_mode == 'convtranspose':
- upsample_block = B.upsample_convtranspose
- else:
- raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
-
- if upscale == 3:
- m_uper = upsample_block(nc, nc, mode='3'+act_mode)
- else:
- m_uper = [upsample_block(nc, nc, mode='2'+act_mode) for _ in range(n_upscale)]
-
- H_conv0 = B.conv(nc, nc, mode='C'+act_mode)
- H_conv1 = B.conv(nc, out_nc, mode='C')
- m_tail = B.sequential(H_conv0, H_conv1)
-
- self.model = B.sequential(m_head, B.ShortcutBlock(B.sequential(*m_body)), *m_uper, m_tail)
-
- def forward(self, x):
- x = self.model(x)
- return x
diff --git a/spaces/latent-consistency/lcm-LoraTheExplorer/lora.py b/spaces/latent-consistency/lcm-LoraTheExplorer/lora.py
deleted file mode 100644
index 3ac02a748131ab2c841fec0248c5fe18e2659dd3..0000000000000000000000000000000000000000
--- a/spaces/latent-consistency/lcm-LoraTheExplorer/lora.py
+++ /dev/null
@@ -1,1222 +0,0 @@
-# LoRA network module taken from https://github.com/bmaltais/kohya_ss/blob/master/networks/lora.py
-# reference:
-# https://github.com/microsoft/LoRA/blob/main/loralib/layers.py
-# https://github.com/cloneofsimo/lora/blob/master/lora_diffusion/lora.py
-
-import math
-import os
-from typing import Dict, List, Optional, Tuple, Type, Union
-from diffusers import AutoencoderKL
-from transformers import CLIPTextModel
-import numpy as np
-import torch
-import re
-
-
-RE_UPDOWN = re.compile(r"(up|down)_blocks_(\d+)_(resnets|upsamplers|downsamplers|attentions)_(\d+)_")
-
-RE_UPDOWN = re.compile(r"(up|down)_blocks_(\d+)_(resnets|upsamplers|downsamplers|attentions)_(\d+)_")
-
-
-class LoRAModule(torch.nn.Module):
- """
- replaces forward method of the original Linear, instead of replacing the original Linear module.
- """
-
- def __init__(
- self,
- lora_name,
- org_module: torch.nn.Module,
- multiplier=1.0,
- lora_dim=4,
- alpha=1,
- dropout=None,
- rank_dropout=None,
- module_dropout=None,
- ):
- """if alpha == 0 or None, alpha is rank (no scaling)."""
- super().__init__()
- self.lora_name = lora_name
-
- if org_module.__class__.__name__ == "Conv2d":
- in_dim = org_module.in_channels
- out_dim = org_module.out_channels
- else:
- in_dim = org_module.in_features
- out_dim = org_module.out_features
-
- # if limit_rank:
- # self.lora_dim = min(lora_dim, in_dim, out_dim)
- # if self.lora_dim != lora_dim:
- # print(f"{lora_name} dim (rank) is changed to: {self.lora_dim}")
- # else:
- self.lora_dim = lora_dim
-
- if org_module.__class__.__name__ == "Conv2d":
- kernel_size = org_module.kernel_size
- stride = org_module.stride
- padding = org_module.padding
- self.lora_down = torch.nn.Conv2d(in_dim, self.lora_dim, kernel_size, stride, padding, bias=False)
- self.lora_up = torch.nn.Conv2d(self.lora_dim, out_dim, (1, 1), (1, 1), bias=False)
- else:
- self.lora_down = torch.nn.Linear(in_dim, self.lora_dim, bias=False)
- self.lora_up = torch.nn.Linear(self.lora_dim, out_dim, bias=False)
-
- if type(alpha) == torch.Tensor:
- alpha = alpha.detach().float().numpy() # without casting, bf16 causes error
- alpha = self.lora_dim if alpha is None or alpha == 0 else alpha
- self.scale = alpha / self.lora_dim
- self.register_buffer("alpha", torch.tensor(alpha)) # 定数として扱える
-
- # same as microsoft's
- torch.nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))
- torch.nn.init.zeros_(self.lora_up.weight)
-
- self.multiplier = multiplier
- self.org_module = org_module # remove in applying
- self.dropout = dropout
- self.rank_dropout = rank_dropout
- self.module_dropout = module_dropout
-
- def apply_to(self):
- self.org_forward = self.org_module.forward
- self.org_module.forward = self.forward
- del self.org_module
-
- def forward(self, x):
- org_forwarded = self.org_forward(x)
-
- # module dropout
- if self.module_dropout is not None and self.training:
- if torch.rand(1) < self.module_dropout:
- return org_forwarded
-
- lx = self.lora_down(x)
-
- # normal dropout
- if self.dropout is not None and self.training:
- lx = torch.nn.functional.dropout(lx, p=self.dropout)
-
- # rank dropout
- if self.rank_dropout is not None and self.training:
- mask = torch.rand((lx.size(0), self.lora_dim), device=lx.device) > self.rank_dropout
- if len(lx.size()) == 3:
- mask = mask.unsqueeze(1) # for Text Encoder
- elif len(lx.size()) == 4:
- mask = mask.unsqueeze(-1).unsqueeze(-1) # for Conv2d
- lx = lx * mask
-
- # scaling for rank dropout: treat as if the rank is changed
- # maskから計算することも考えられるが、augmentation的な効果を期待してrank_dropoutを用いる
- scale = self.scale * (1.0 / (1.0 - self.rank_dropout)) # redundant for readability
- else:
- scale = self.scale
-
- lx = self.lora_up(lx)
-
- return org_forwarded + lx * self.multiplier * scale
-
-
-class LoRAInfModule(LoRAModule):
- def __init__(
- self,
- lora_name,
- org_module: torch.nn.Module,
- multiplier=1.0,
- lora_dim=4,
- alpha=1,
- **kwargs,
- ):
- # no dropout for inference
- super().__init__(lora_name, org_module, multiplier, lora_dim, alpha)
-
- self.org_module_ref = [org_module] # 後から参照できるように
- self.enabled = True
-
- # check regional or not by lora_name
- self.text_encoder = False
- if lora_name.startswith("lora_te_"):
- self.regional = False
- self.use_sub_prompt = True
- self.text_encoder = True
- elif "attn2_to_k" in lora_name or "attn2_to_v" in lora_name:
- self.regional = False
- self.use_sub_prompt = True
- elif "time_emb" in lora_name:
- self.regional = False
- self.use_sub_prompt = False
- else:
- self.regional = True
- self.use_sub_prompt = False
-
- self.network: LoRANetwork = None
-
- def set_network(self, network):
- self.network = network
-
- # freezeしてマージする
- def merge_to(self, sd, dtype, device):
- # get up/down weight
- up_weight = sd["lora_up.weight"].to(torch.float).to(device)
- down_weight = sd["lora_down.weight"].to(torch.float).to(device)
-
- # extract weight from org_module
- org_sd = self.org_module.state_dict()
- weight = org_sd["weight"].to(torch.float)
-
- # merge weight
- if len(weight.size()) == 2:
- # linear
- weight = weight + self.multiplier * (up_weight @ down_weight) * self.scale
- elif down_weight.size()[2:4] == (1, 1):
- # conv2d 1x1
- weight = (
- weight
- + self.multiplier
- * (up_weight.squeeze(3).squeeze(2) @ down_weight.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze(3)
- * self.scale
- )
- else:
- # conv2d 3x3
- conved = torch.nn.functional.conv2d(down_weight.permute(1, 0, 2, 3), up_weight).permute(1, 0, 2, 3)
- # print(conved.size(), weight.size(), module.stride, module.padding)
- weight = weight + self.multiplier * conved * self.scale
-
- # set weight to org_module
- org_sd["weight"] = weight.to(dtype)
- self.org_module.load_state_dict(org_sd)
-
- # 復元できるマージのため、このモジュールのweightを返す
- def get_weight(self, multiplier=None):
- if multiplier is None:
- multiplier = self.multiplier
-
- # get up/down weight from module
- up_weight = self.lora_up.weight.to(torch.float)
- down_weight = self.lora_down.weight.to(torch.float)
-
- # pre-calculated weight
- if len(down_weight.size()) == 2:
- # linear
- weight = self.multiplier * (up_weight @ down_weight) * self.scale
- elif down_weight.size()[2:4] == (1, 1):
- # conv2d 1x1
- weight = (
- self.multiplier
- * (up_weight.squeeze(3).squeeze(2) @ down_weight.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze(3)
- * self.scale
- )
- else:
- # conv2d 3x3
- conved = torch.nn.functional.conv2d(down_weight.permute(1, 0, 2, 3), up_weight).permute(1, 0, 2, 3)
- weight = self.multiplier * conved * self.scale
-
- return weight
-
- def set_region(self, region):
- self.region = region
- self.region_mask = None
-
- def default_forward(self, x):
- # print("default_forward", self.lora_name, x.size())
- return self.org_forward(x) + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale
-
- def forward(self, x):
- if not self.enabled:
- return self.org_forward(x)
-
- if self.network is None or self.network.sub_prompt_index is None:
- return self.default_forward(x)
- if not self.regional and not self.use_sub_prompt:
- return self.default_forward(x)
-
- if self.regional:
- return self.regional_forward(x)
- else:
- return self.sub_prompt_forward(x)
-
- def get_mask_for_x(self, x):
- # calculate size from shape of x
- if len(x.size()) == 4:
- h, w = x.size()[2:4]
- area = h * w
- else:
- area = x.size()[1]
-
- mask = self.network.mask_dic[area]
- if mask is None:
- raise ValueError(f"mask is None for resolution {area}")
- if len(x.size()) != 4:
- mask = torch.reshape(mask, (1, -1, 1))
- return mask
-
- def regional_forward(self, x):
- if "attn2_to_out" in self.lora_name:
- return self.to_out_forward(x)
-
- if self.network.mask_dic is None: # sub_prompt_index >= 3
- return self.default_forward(x)
-
- # apply mask for LoRA result
- lx = self.lora_up(self.lora_down(x)) * self.multiplier * self.scale
- mask = self.get_mask_for_x(lx)
- # print("regional", self.lora_name, self.network.sub_prompt_index, lx.size(), mask.size())
- lx = lx * mask
-
- x = self.org_forward(x)
- x = x + lx
-
- if "attn2_to_q" in self.lora_name and self.network.is_last_network:
- x = self.postp_to_q(x)
-
- return x
-
- def postp_to_q(self, x):
- # repeat x to num_sub_prompts
- has_real_uncond = x.size()[0] // self.network.batch_size == 3
- qc = self.network.batch_size # uncond
- qc += self.network.batch_size * self.network.num_sub_prompts # cond
- if has_real_uncond:
- qc += self.network.batch_size # real_uncond
-
- query = torch.zeros((qc, x.size()[1], x.size()[2]), device=x.device, dtype=x.dtype)
- query[: self.network.batch_size] = x[: self.network.batch_size]
-
- for i in range(self.network.batch_size):
- qi = self.network.batch_size + i * self.network.num_sub_prompts
- query[qi : qi + self.network.num_sub_prompts] = x[self.network.batch_size + i]
-
- if has_real_uncond:
- query[-self.network.batch_size :] = x[-self.network.batch_size :]
-
- # print("postp_to_q", self.lora_name, x.size(), query.size(), self.network.num_sub_prompts)
- return query
-
- def sub_prompt_forward(self, x):
- if x.size()[0] == self.network.batch_size: # if uncond in text_encoder, do not apply LoRA
- return self.org_forward(x)
-
- emb_idx = self.network.sub_prompt_index
- if not self.text_encoder:
- emb_idx += self.network.batch_size
-
- # apply sub prompt of X
- lx = x[emb_idx :: self.network.num_sub_prompts]
- lx = self.lora_up(self.lora_down(lx)) * self.multiplier * self.scale
-
- # print("sub_prompt_forward", self.lora_name, x.size(), lx.size(), emb_idx)
-
- x = self.org_forward(x)
- x[emb_idx :: self.network.num_sub_prompts] += lx
-
- return x
-
- def to_out_forward(self, x):
- # print("to_out_forward", self.lora_name, x.size(), self.network.is_last_network)
-
- if self.network.is_last_network:
- masks = [None] * self.network.num_sub_prompts
- self.network.shared[self.lora_name] = (None, masks)
- else:
- lx, masks = self.network.shared[self.lora_name]
-
- # call own LoRA
- x1 = x[self.network.batch_size + self.network.sub_prompt_index :: self.network.num_sub_prompts]
- lx1 = self.lora_up(self.lora_down(x1)) * self.multiplier * self.scale
-
- if self.network.is_last_network:
- lx = torch.zeros(
- (self.network.num_sub_prompts * self.network.batch_size, *lx1.size()[1:]), device=lx1.device, dtype=lx1.dtype
- )
- self.network.shared[self.lora_name] = (lx, masks)
-
- # print("to_out_forward", lx.size(), lx1.size(), self.network.sub_prompt_index, self.network.num_sub_prompts)
- lx[self.network.sub_prompt_index :: self.network.num_sub_prompts] += lx1
- masks[self.network.sub_prompt_index] = self.get_mask_for_x(lx1)
-
- # if not last network, return x and masks
- x = self.org_forward(x)
- if not self.network.is_last_network:
- return x
-
- lx, masks = self.network.shared.pop(self.lora_name)
-
- # if last network, combine separated x with mask weighted sum
- has_real_uncond = x.size()[0] // self.network.batch_size == self.network.num_sub_prompts + 2
-
- out = torch.zeros((self.network.batch_size * (3 if has_real_uncond else 2), *x.size()[1:]), device=x.device, dtype=x.dtype)
- out[: self.network.batch_size] = x[: self.network.batch_size] # uncond
- if has_real_uncond:
- out[-self.network.batch_size :] = x[-self.network.batch_size :] # real_uncond
-
- # print("to_out_forward", self.lora_name, self.network.sub_prompt_index, self.network.num_sub_prompts)
- # for i in range(len(masks)):
- # if masks[i] is None:
- # masks[i] = torch.zeros_like(masks[-1])
-
- mask = torch.cat(masks)
- mask_sum = torch.sum(mask, dim=0) + 1e-4
- for i in range(self.network.batch_size):
- # 1枚の画像ごとに処理する
- lx1 = lx[i * self.network.num_sub_prompts : (i + 1) * self.network.num_sub_prompts]
- lx1 = lx1 * mask
- lx1 = torch.sum(lx1, dim=0)
-
- xi = self.network.batch_size + i * self.network.num_sub_prompts
- x1 = x[xi : xi + self.network.num_sub_prompts]
- x1 = x1 * mask
- x1 = torch.sum(x1, dim=0)
- x1 = x1 / mask_sum
-
- x1 = x1 + lx1
- out[self.network.batch_size + i] = x1
-
- # print("to_out_forward", x.size(), out.size(), has_real_uncond)
- return out
-
-
-def parse_block_lr_kwargs(nw_kwargs):
- down_lr_weight = nw_kwargs.get("down_lr_weight", None)
- mid_lr_weight = nw_kwargs.get("mid_lr_weight", None)
- up_lr_weight = nw_kwargs.get("up_lr_weight", None)
-
- # 以上のいずれにも設定がない場合は無効としてNoneを返す
- if down_lr_weight is None and mid_lr_weight is None and up_lr_weight is None:
- return None, None, None
-
- # extract learning rate weight for each block
- if down_lr_weight is not None:
- # if some parameters are not set, use zero
- if "," in down_lr_weight:
- down_lr_weight = [(float(s) if s else 0.0) for s in down_lr_weight.split(",")]
-
- if mid_lr_weight is not None:
- mid_lr_weight = float(mid_lr_weight)
-
- if up_lr_weight is not None:
- if "," in up_lr_weight:
- up_lr_weight = [(float(s) if s else 0.0) for s in up_lr_weight.split(",")]
-
- down_lr_weight, mid_lr_weight, up_lr_weight = get_block_lr_weight(
- down_lr_weight, mid_lr_weight, up_lr_weight, float(nw_kwargs.get("block_lr_zero_threshold", 0.0))
- )
-
- return down_lr_weight, mid_lr_weight, up_lr_weight
-
-
-def create_network(
- multiplier: float,
- network_dim: Optional[int],
- network_alpha: Optional[float],
- vae: AutoencoderKL,
- text_encoder: Union[CLIPTextModel, List[CLIPTextModel]],
- unet,
- neuron_dropout: Optional[float] = None,
- **kwargs,
-):
- if network_dim is None:
- network_dim = 4 # default
- if network_alpha is None:
- network_alpha = 1.0
-
- # extract dim/alpha for conv2d, and block dim
- conv_dim = kwargs.get("conv_dim", None)
- conv_alpha = kwargs.get("conv_alpha", None)
- if conv_dim is not None:
- conv_dim = int(conv_dim)
- if conv_alpha is None:
- conv_alpha = 1.0
- else:
- conv_alpha = float(conv_alpha)
-
- # block dim/alpha/lr
- block_dims = kwargs.get("block_dims", None)
- down_lr_weight, mid_lr_weight, up_lr_weight = parse_block_lr_kwargs(kwargs)
-
- # 以上のいずれかに指定があればblockごとのdim(rank)を有効にする
- if block_dims is not None or down_lr_weight is not None or mid_lr_weight is not None or up_lr_weight is not None:
- block_alphas = kwargs.get("block_alphas", None)
- conv_block_dims = kwargs.get("conv_block_dims", None)
- conv_block_alphas = kwargs.get("conv_block_alphas", None)
-
- block_dims, block_alphas, conv_block_dims, conv_block_alphas = get_block_dims_and_alphas(
- block_dims, block_alphas, network_dim, network_alpha, conv_block_dims, conv_block_alphas, conv_dim, conv_alpha
- )
-
- # remove block dim/alpha without learning rate
- block_dims, block_alphas, conv_block_dims, conv_block_alphas = remove_block_dims_and_alphas(
- block_dims, block_alphas, conv_block_dims, conv_block_alphas, down_lr_weight, mid_lr_weight, up_lr_weight
- )
-
- else:
- block_alphas = None
- conv_block_dims = None
- conv_block_alphas = None
-
- # rank/module dropout
- rank_dropout = kwargs.get("rank_dropout", None)
- if rank_dropout is not None:
- rank_dropout = float(rank_dropout)
- module_dropout = kwargs.get("module_dropout", None)
- if module_dropout is not None:
- module_dropout = float(module_dropout)
-
- # すごく引数が多いな ( ^ω^)・・・
- network = LoRANetwork(
- text_encoder,
- unet,
- multiplier=multiplier,
- lora_dim=network_dim,
- alpha=network_alpha,
- dropout=neuron_dropout,
- rank_dropout=rank_dropout,
- module_dropout=module_dropout,
- conv_lora_dim=conv_dim,
- conv_alpha=conv_alpha,
- block_dims=block_dims,
- block_alphas=block_alphas,
- conv_block_dims=conv_block_dims,
- conv_block_alphas=conv_block_alphas,
- varbose=True,
- )
-
- if up_lr_weight is not None or mid_lr_weight is not None or down_lr_weight is not None:
- network.set_block_lr_weight(up_lr_weight, mid_lr_weight, down_lr_weight)
-
- return network
-
-
-# このメソッドは外部から呼び出される可能性を考慮しておく
-# network_dim, network_alpha にはデフォルト値が入っている。
-# block_dims, block_alphas は両方ともNoneまたは両方とも値が入っている
-# conv_dim, conv_alpha は両方ともNoneまたは両方とも値が入っている
-def get_block_dims_and_alphas(
- block_dims, block_alphas, network_dim, network_alpha, conv_block_dims, conv_block_alphas, conv_dim, conv_alpha
-):
- num_total_blocks = LoRANetwork.NUM_OF_BLOCKS * 2 + 1
-
- def parse_ints(s):
- return [int(i) for i in s.split(",")]
-
- def parse_floats(s):
- return [float(i) for i in s.split(",")]
-
- # block_dimsとblock_alphasをパースする。必ず値が入る
- if block_dims is not None:
- block_dims = parse_ints(block_dims)
- assert (
- len(block_dims) == num_total_blocks
- ), f"block_dims must have {num_total_blocks} elements / block_dimsは{num_total_blocks}個指定してください"
- else:
- print(f"block_dims is not specified. all dims are set to {network_dim} / block_dimsが指定されていません。すべてのdimは{network_dim}になります")
- block_dims = [network_dim] * num_total_blocks
-
- if block_alphas is not None:
- block_alphas = parse_floats(block_alphas)
- assert (
- len(block_alphas) == num_total_blocks
- ), f"block_alphas must have {num_total_blocks} elements / block_alphasは{num_total_blocks}個指定してください"
- else:
- print(
- f"block_alphas is not specified. all alphas are set to {network_alpha} / block_alphasが指定されていません。すべてのalphaは{network_alpha}になります"
- )
- block_alphas = [network_alpha] * num_total_blocks
-
- # conv_block_dimsとconv_block_alphasを、指定がある場合のみパースする。指定がなければconv_dimとconv_alphaを使う
- if conv_block_dims is not None:
- conv_block_dims = parse_ints(conv_block_dims)
- assert (
- len(conv_block_dims) == num_total_blocks
- ), f"conv_block_dims must have {num_total_blocks} elements / conv_block_dimsは{num_total_blocks}個指定してください"
-
- if conv_block_alphas is not None:
- conv_block_alphas = parse_floats(conv_block_alphas)
- assert (
- len(conv_block_alphas) == num_total_blocks
- ), f"conv_block_alphas must have {num_total_blocks} elements / conv_block_alphasは{num_total_blocks}個指定してください"
- else:
- if conv_alpha is None:
- conv_alpha = 1.0
- print(
- f"conv_block_alphas is not specified. all alphas are set to {conv_alpha} / conv_block_alphasが指定されていません。すべてのalphaは{conv_alpha}になります"
- )
- conv_block_alphas = [conv_alpha] * num_total_blocks
- else:
- if conv_dim is not None:
- print(
- f"conv_dim/alpha for all blocks are set to {conv_dim} and {conv_alpha} / すべてのブロックのconv_dimとalphaは{conv_dim}および{conv_alpha}になります"
- )
- conv_block_dims = [conv_dim] * num_total_blocks
- conv_block_alphas = [conv_alpha] * num_total_blocks
- else:
- conv_block_dims = None
- conv_block_alphas = None
-
- return block_dims, block_alphas, conv_block_dims, conv_block_alphas
-
-
-# 層別学習率用に層ごとの学習率に対する倍率を定義する、外部から呼び出される可能性を考慮しておく
-def get_block_lr_weight(
- down_lr_weight, mid_lr_weight, up_lr_weight, zero_threshold
-) -> Tuple[List[float], List[float], List[float]]:
- # パラメータ未指定時は何もせず、今までと同じ動作とする
- if up_lr_weight is None and mid_lr_weight is None and down_lr_weight is None:
- return None, None, None
-
- max_len = LoRANetwork.NUM_OF_BLOCKS # フルモデル相当でのup,downの層の数
-
- def get_list(name_with_suffix) -> List[float]:
- import math
-
- tokens = name_with_suffix.split("+")
- name = tokens[0]
- base_lr = float(tokens[1]) if len(tokens) > 1 else 0.0
-
- if name == "cosine":
- return [math.sin(math.pi * (i / (max_len - 1)) / 2) + base_lr for i in reversed(range(max_len))]
- elif name == "sine":
- return [math.sin(math.pi * (i / (max_len - 1)) / 2) + base_lr for i in range(max_len)]
- elif name == "linear":
- return [i / (max_len - 1) + base_lr for i in range(max_len)]
- elif name == "reverse_linear":
- return [i / (max_len - 1) + base_lr for i in reversed(range(max_len))]
- elif name == "zeros":
- return [0.0 + base_lr] * max_len
- else:
- print(
- "Unknown lr_weight argument %s is used. Valid arguments: / 不明なlr_weightの引数 %s が使われました。有効な引数:\n\tcosine, sine, linear, reverse_linear, zeros"
- % (name)
- )
- return None
-
- if type(down_lr_weight) == str:
- down_lr_weight = get_list(down_lr_weight)
- if type(up_lr_weight) == str:
- up_lr_weight = get_list(up_lr_weight)
-
- if (up_lr_weight != None and len(up_lr_weight) > max_len) or (down_lr_weight != None and len(down_lr_weight) > max_len):
- print("down_weight or up_weight is too long. Parameters after %d-th are ignored." % max_len)
- print("down_weightもしくはup_weightが長すぎます。%d個目以降のパラメータは無視されます。" % max_len)
- up_lr_weight = up_lr_weight[:max_len]
- down_lr_weight = down_lr_weight[:max_len]
-
- if (up_lr_weight != None and len(up_lr_weight) < max_len) or (down_lr_weight != None and len(down_lr_weight) < max_len):
- print("down_weight or up_weight is too short. Parameters after %d-th are filled with 1." % max_len)
- print("down_weightもしくはup_weightが短すぎます。%d個目までの不足したパラメータは1で補われます。" % max_len)
-
- if down_lr_weight != None and len(down_lr_weight) < max_len:
- down_lr_weight = down_lr_weight + [1.0] * (max_len - len(down_lr_weight))
- if up_lr_weight != None and len(up_lr_weight) < max_len:
- up_lr_weight = up_lr_weight + [1.0] * (max_len - len(up_lr_weight))
-
- if (up_lr_weight != None) or (mid_lr_weight != None) or (down_lr_weight != None):
- print("apply block learning rate / 階層別学習率を適用します。")
- if down_lr_weight != None:
- down_lr_weight = [w if w > zero_threshold else 0 for w in down_lr_weight]
- print("down_lr_weight (shallower -> deeper, 浅い層->深い層):", down_lr_weight)
- else:
- print("down_lr_weight: all 1.0, すべて1.0")
-
- if mid_lr_weight != None:
- mid_lr_weight = mid_lr_weight if mid_lr_weight > zero_threshold else 0
- print("mid_lr_weight:", mid_lr_weight)
- else:
- print("mid_lr_weight: 1.0")
-
- if up_lr_weight != None:
- up_lr_weight = [w if w > zero_threshold else 0 for w in up_lr_weight]
- print("up_lr_weight (deeper -> shallower, 深い層->浅い層):", up_lr_weight)
- else:
- print("up_lr_weight: all 1.0, すべて1.0")
-
- return down_lr_weight, mid_lr_weight, up_lr_weight
-
-
-# lr_weightが0のblockをblock_dimsから除外する、外部から呼び出す可能性を考慮しておく
-def remove_block_dims_and_alphas(
- block_dims, block_alphas, conv_block_dims, conv_block_alphas, down_lr_weight, mid_lr_weight, up_lr_weight
-):
- # set 0 to block dim without learning rate to remove the block
- if down_lr_weight != None:
- for i, lr in enumerate(down_lr_weight):
- if lr == 0:
- block_dims[i] = 0
- if conv_block_dims is not None:
- conv_block_dims[i] = 0
- if mid_lr_weight != None:
- if mid_lr_weight == 0:
- block_dims[LoRANetwork.NUM_OF_BLOCKS] = 0
- if conv_block_dims is not None:
- conv_block_dims[LoRANetwork.NUM_OF_BLOCKS] = 0
- if up_lr_weight != None:
- for i, lr in enumerate(up_lr_weight):
- if lr == 0:
- block_dims[LoRANetwork.NUM_OF_BLOCKS + 1 + i] = 0
- if conv_block_dims is not None:
- conv_block_dims[LoRANetwork.NUM_OF_BLOCKS + 1 + i] = 0
-
- return block_dims, block_alphas, conv_block_dims, conv_block_alphas
-
-
-# 外部から呼び出す可能性を考慮しておく
-def get_block_index(lora_name: str) -> int:
- block_idx = -1 # invalid lora name
-
- m = RE_UPDOWN.search(lora_name)
- if m:
- g = m.groups()
- i = int(g[1])
- j = int(g[3])
- if g[2] == "resnets":
- idx = 3 * i + j
- elif g[2] == "attentions":
- idx = 3 * i + j
- elif g[2] == "upsamplers" or g[2] == "downsamplers":
- idx = 3 * i + 2
-
- if g[0] == "down":
- block_idx = 1 + idx # 0に該当するLoRAは存在しない
- elif g[0] == "up":
- block_idx = LoRANetwork.NUM_OF_BLOCKS + 1 + idx
-
- elif "mid_block_" in lora_name:
- block_idx = LoRANetwork.NUM_OF_BLOCKS # idx=12
-
- return block_idx
-
-
-# Create network from weights for inference, weights are not loaded here (because can be merged)
-def create_network_from_weights(multiplier, file, vae, text_encoder, unet, weights_sd=None, for_inference=False, **kwargs):
- if weights_sd is None:
- if os.path.splitext(file)[1] == ".safetensors":
- from safetensors.torch import load_file, safe_open
-
- weights_sd = load_file(file)
- else:
- weights_sd = torch.load(file, map_location="cpu")
-
- # get dim/alpha mapping
- modules_dim = {}
- modules_alpha = {}
- for key, value in weights_sd.items():
- if "." not in key:
- continue
-
- lora_name = key.split(".")[0]
- if "alpha" in key:
- modules_alpha[lora_name] = value
- elif "lora_down" in key:
- dim = value.size()[0]
- modules_dim[lora_name] = dim
- # print(lora_name, value.size(), dim)
-
- # support old LoRA without alpha
- for key in modules_dim.keys():
- if key not in modules_alpha:
- modules_alpha[key] = modules_dim[key]
-
- module_class = LoRAInfModule if for_inference else LoRAModule
-
- network = LoRANetwork(
- text_encoder, unet, multiplier=multiplier, modules_dim=modules_dim, modules_alpha=modules_alpha, module_class=module_class
- )
-
- # block lr
- down_lr_weight, mid_lr_weight, up_lr_weight = parse_block_lr_kwargs(kwargs)
- if up_lr_weight is not None or mid_lr_weight is not None or down_lr_weight is not None:
- network.set_block_lr_weight(up_lr_weight, mid_lr_weight, down_lr_weight)
-
- return network, weights_sd
-
-
-class LoRANetwork(torch.nn.Module):
- NUM_OF_BLOCKS = 12 # フルモデル相当でのup,downの層の数
-
- UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel"]
- UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"]
- TEXT_ENCODER_TARGET_REPLACE_MODULE = ["CLIPAttention", "CLIPMLP"]
- LORA_PREFIX_UNET = "lora_unet"
- LORA_PREFIX_TEXT_ENCODER = "lora_te"
-
- # SDXL: must starts with LORA_PREFIX_TEXT_ENCODER
- LORA_PREFIX_TEXT_ENCODER1 = "lora_te1"
- LORA_PREFIX_TEXT_ENCODER2 = "lora_te2"
-
- def __init__(
- self,
- text_encoder: Union[List[CLIPTextModel], CLIPTextModel],
- unet,
- multiplier: float = 1.0,
- lora_dim: int = 4,
- alpha: float = 1,
- dropout: Optional[float] = None,
- rank_dropout: Optional[float] = None,
- module_dropout: Optional[float] = None,
- conv_lora_dim: Optional[int] = None,
- conv_alpha: Optional[float] = None,
- block_dims: Optional[List[int]] = None,
- block_alphas: Optional[List[float]] = None,
- conv_block_dims: Optional[List[int]] = None,
- conv_block_alphas: Optional[List[float]] = None,
- modules_dim: Optional[Dict[str, int]] = None,
- modules_alpha: Optional[Dict[str, int]] = None,
- module_class: Type[object] = LoRAModule,
- varbose: Optional[bool] = False,
- ) -> None:
- """
- LoRA network: すごく引数が多いが、パターンは以下の通り
- 1. lora_dimとalphaを指定
- 2. lora_dim、alpha、conv_lora_dim、conv_alphaを指定
- 3. block_dimsとblock_alphasを指定 : Conv2d3x3には適用しない
- 4. block_dims、block_alphas、conv_block_dims、conv_block_alphasを指定 : Conv2d3x3にも適用する
- 5. modules_dimとmodules_alphaを指定 (推論用)
- """
- super().__init__()
- self.multiplier = multiplier
-
- self.lora_dim = lora_dim
- self.alpha = alpha
- self.conv_lora_dim = conv_lora_dim
- self.conv_alpha = conv_alpha
- self.dropout = dropout
- self.rank_dropout = rank_dropout
- self.module_dropout = module_dropout
-
- if modules_dim is not None:
- print(f"create LoRA network from weights")
- elif block_dims is not None:
- print(f"create LoRA network from block_dims")
- print(f"neuron dropout: p={self.dropout}, rank dropout: p={self.rank_dropout}, module dropout: p={self.module_dropout}")
- print(f"block_dims: {block_dims}")
- print(f"block_alphas: {block_alphas}")
- if conv_block_dims is not None:
- print(f"conv_block_dims: {conv_block_dims}")
- print(f"conv_block_alphas: {conv_block_alphas}")
- else:
- print(f"create LoRA network. base dim (rank): {lora_dim}, alpha: {alpha}")
- print(f"neuron dropout: p={self.dropout}, rank dropout: p={self.rank_dropout}, module dropout: p={self.module_dropout}")
- if self.conv_lora_dim is not None:
- print(f"apply LoRA to Conv2d with kernel size (3,3). dim (rank): {self.conv_lora_dim}, alpha: {self.conv_alpha}")
-
- # create module instances
- def create_modules(
- is_unet: bool,
- text_encoder_idx: Optional[int], # None, 1, 2
- root_module: torch.nn.Module,
- target_replace_modules: List[torch.nn.Module],
- ) -> List[LoRAModule]:
- prefix = (
- self.LORA_PREFIX_UNET
- if is_unet
- else (
- self.LORA_PREFIX_TEXT_ENCODER
- if text_encoder_idx is None
- else (self.LORA_PREFIX_TEXT_ENCODER1 if text_encoder_idx == 1 else self.LORA_PREFIX_TEXT_ENCODER2)
- )
- )
- loras = []
- skipped = []
- for name, module in root_module.named_modules():
- if module.__class__.__name__ in target_replace_modules:
- for child_name, child_module in module.named_modules():
- is_linear = child_module.__class__.__name__ == "Linear"
- is_conv2d = child_module.__class__.__name__ == "Conv2d"
- is_conv2d_1x1 = is_conv2d and child_module.kernel_size == (1, 1)
-
- if is_linear or is_conv2d:
- lora_name = prefix + "." + name + "." + child_name
- lora_name = lora_name.replace(".", "_")
-
- dim = None
- alpha = None
-
- if modules_dim is not None:
- # モジュール指定あり
- if lora_name in modules_dim:
- dim = modules_dim[lora_name]
- alpha = modules_alpha[lora_name]
- elif is_unet and block_dims is not None:
- # U-Netでblock_dims指定あり
- block_idx = get_block_index(lora_name)
- if is_linear or is_conv2d_1x1:
- dim = block_dims[block_idx]
- alpha = block_alphas[block_idx]
- elif conv_block_dims is not None:
- dim = conv_block_dims[block_idx]
- alpha = conv_block_alphas[block_idx]
- else:
- # 通常、すべて対象とする
- if is_linear or is_conv2d_1x1:
- dim = self.lora_dim
- alpha = self.alpha
- elif self.conv_lora_dim is not None:
- dim = self.conv_lora_dim
- alpha = self.conv_alpha
-
- if dim is None or dim == 0:
- # skipした情報を出力
- if is_linear or is_conv2d_1x1 or (self.conv_lora_dim is not None or conv_block_dims is not None):
- skipped.append(lora_name)
- continue
-
- lora = module_class(
- lora_name,
- child_module,
- self.multiplier,
- dim,
- alpha,
- dropout=dropout,
- rank_dropout=rank_dropout,
- module_dropout=module_dropout,
- )
- loras.append(lora)
- return loras, skipped
-
- text_encoders = text_encoder if type(text_encoder) == list else [text_encoder]
- print(text_encoders)
- # create LoRA for text encoder
- # 毎回すべてのモジュールを作るのは無駄なので要検討
- self.text_encoder_loras = []
- skipped_te = []
- for i, text_encoder in enumerate(text_encoders):
- if len(text_encoders) > 1:
- index = i + 1
- print(f"create LoRA for Text Encoder {index}:")
- else:
- index = None
- print(f"create LoRA for Text Encoder:")
-
- print(text_encoder)
- text_encoder_loras, skipped = create_modules(False, index, text_encoder, LoRANetwork.TEXT_ENCODER_TARGET_REPLACE_MODULE)
- self.text_encoder_loras.extend(text_encoder_loras)
- skipped_te += skipped
- print(f"create LoRA for Text Encoder: {len(self.text_encoder_loras)} modules.")
-
- # extend U-Net target modules if conv2d 3x3 is enabled, or load from weights
- target_modules = LoRANetwork.UNET_TARGET_REPLACE_MODULE
- if modules_dim is not None or self.conv_lora_dim is not None or conv_block_dims is not None:
- target_modules += LoRANetwork.UNET_TARGET_REPLACE_MODULE_CONV2D_3X3
-
- self.unet_loras, skipped_un = create_modules(True, None, unet, target_modules)
- print(f"create LoRA for U-Net: {len(self.unet_loras)} modules.")
-
- skipped = skipped_te + skipped_un
- if varbose and len(skipped) > 0:
- print(
- f"because block_lr_weight is 0 or dim (rank) is 0, {len(skipped)} LoRA modules are skipped / block_lr_weightまたはdim (rank)が0の為、次の{len(skipped)}個のLoRAモジュールはスキップされます:"
- )
- for name in skipped:
- print(f"\t{name}")
-
- self.up_lr_weight: List[float] = None
- self.down_lr_weight: List[float] = None
- self.mid_lr_weight: float = None
- self.block_lr = False
-
- # assertion
- names = set()
- for lora in self.text_encoder_loras + self.unet_loras:
- assert lora.lora_name not in names, f"duplicated lora name: {lora.lora_name}"
- names.add(lora.lora_name)
-
- def set_multiplier(self, multiplier):
- self.multiplier = multiplier
- for lora in self.text_encoder_loras + self.unet_loras:
- lora.multiplier = self.multiplier
-
- def load_weights(self, file):
- if os.path.splitext(file)[1] == ".safetensors":
- from safetensors.torch import load_file
-
- weights_sd = load_file(file)
- else:
- weights_sd = torch.load(file, map_location="cpu")
- info = self.load_state_dict(weights_sd, False)
- return info
-
- def apply_to(self, text_encoder, unet, apply_text_encoder=True, apply_unet=True):
- if apply_text_encoder:
- print("enable LoRA for text encoder")
- else:
- self.text_encoder_loras = []
-
- if apply_unet:
- print("enable LoRA for U-Net")
- else:
- self.unet_loras = []
-
- for lora in self.text_encoder_loras + self.unet_loras:
- lora.apply_to()
- self.add_module(lora.lora_name, lora)
-
- # マージできるかどうかを返す
- def is_mergeable(self):
- return True
-
- # TODO refactor to common function with apply_to
- def merge_to(self, text_encoder, unet, weights_sd, dtype, device):
- apply_text_encoder = apply_unet = False
- for key in weights_sd.keys():
- if key.startswith(LoRANetwork.LORA_PREFIX_TEXT_ENCODER):
- apply_text_encoder = True
- elif key.startswith(LoRANetwork.LORA_PREFIX_UNET):
- apply_unet = True
-
- if apply_text_encoder:
- print("enable LoRA for text encoder")
- else:
- self.text_encoder_loras = []
-
- if apply_unet:
- print("enable LoRA for U-Net")
- else:
- self.unet_loras = []
-
- for lora in self.text_encoder_loras + self.unet_loras:
- sd_for_lora = {}
- for key in weights_sd.keys():
- if key.startswith(lora.lora_name):
- sd_for_lora[key[len(lora.lora_name) + 1 :]] = weights_sd[key]
- lora.merge_to(sd_for_lora, dtype, device)
-
- print(f"weights are merged")
-
- # 層別学習率用に層ごとの学習率に対する倍率を定義する 引数の順番が逆だがとりあえず気にしない
- def set_block_lr_weight(
- self,
- up_lr_weight: List[float] = None,
- mid_lr_weight: float = None,
- down_lr_weight: List[float] = None,
- ):
- self.block_lr = True
- self.down_lr_weight = down_lr_weight
- self.mid_lr_weight = mid_lr_weight
- self.up_lr_weight = up_lr_weight
-
- def get_lr_weight(self, lora: LoRAModule) -> float:
- lr_weight = 1.0
- block_idx = get_block_index(lora.lora_name)
- if block_idx < 0:
- return lr_weight
-
- if block_idx < LoRANetwork.NUM_OF_BLOCKS:
- if self.down_lr_weight != None:
- lr_weight = self.down_lr_weight[block_idx]
- elif block_idx == LoRANetwork.NUM_OF_BLOCKS:
- if self.mid_lr_weight != None:
- lr_weight = self.mid_lr_weight
- elif block_idx > LoRANetwork.NUM_OF_BLOCKS:
- if self.up_lr_weight != None:
- lr_weight = self.up_lr_weight[block_idx - LoRANetwork.NUM_OF_BLOCKS - 1]
-
- return lr_weight
-
- # 二つのText Encoderに別々の学習率を設定できるようにするといいかも
- def prepare_optimizer_params(self, text_encoder_lr, unet_lr, default_lr):
- self.requires_grad_(True)
- all_params = []
-
- def enumerate_params(loras):
- params = []
- for lora in loras:
- params.extend(lora.parameters())
- return params
-
- if self.text_encoder_loras:
- param_data = {"params": enumerate_params(self.text_encoder_loras)}
- if text_encoder_lr is not None:
- param_data["lr"] = text_encoder_lr
- all_params.append(param_data)
-
- if self.unet_loras:
- if self.block_lr:
- # 学習率のグラフをblockごとにしたいので、blockごとにloraを分類
- block_idx_to_lora = {}
- for lora in self.unet_loras:
- idx = get_block_index(lora.lora_name)
- if idx not in block_idx_to_lora:
- block_idx_to_lora[idx] = []
- block_idx_to_lora[idx].append(lora)
-
- # blockごとにパラメータを設定する
- for idx, block_loras in block_idx_to_lora.items():
- param_data = {"params": enumerate_params(block_loras)}
-
- if unet_lr is not None:
- param_data["lr"] = unet_lr * self.get_lr_weight(block_loras[0])
- elif default_lr is not None:
- param_data["lr"] = default_lr * self.get_lr_weight(block_loras[0])
- if ("lr" in param_data) and (param_data["lr"] == 0):
- continue
- all_params.append(param_data)
-
- else:
- param_data = {"params": enumerate_params(self.unet_loras)}
- if unet_lr is not None:
- param_data["lr"] = unet_lr
- all_params.append(param_data)
-
- return all_params
-
- def enable_gradient_checkpointing(self):
- # not supported
- pass
-
- def prepare_grad_etc(self, text_encoder, unet):
- self.requires_grad_(True)
-
- def on_epoch_start(self, text_encoder, unet):
- self.train()
-
- def get_trainable_params(self):
- return self.parameters()
-
- def save_weights(self, file, dtype, metadata):
- if metadata is not None and len(metadata) == 0:
- metadata = None
-
- state_dict = self.state_dict()
-
- if dtype is not None:
- for key in list(state_dict.keys()):
- v = state_dict[key]
- v = v.detach().clone().to("cpu").to(dtype)
- state_dict[key] = v
-
- if os.path.splitext(file)[1] == ".safetensors":
- from safetensors.torch import save_file
- from library import train_util
-
- # Precalculate model hashes to save time on indexing
- if metadata is None:
- metadata = {}
- model_hash, legacy_hash = train_util.precalculate_safetensors_hashes(state_dict, metadata)
- metadata["sshs_model_hash"] = model_hash
- metadata["sshs_legacy_hash"] = legacy_hash
-
- save_file(state_dict, file, metadata)
- else:
- torch.save(state_dict, file)
-
- # mask is a tensor with values from 0 to 1
- def set_region(self, sub_prompt_index, is_last_network, mask):
- if mask.max() == 0:
- mask = torch.ones_like(mask)
-
- self.mask = mask
- self.sub_prompt_index = sub_prompt_index
- self.is_last_network = is_last_network
-
- for lora in self.text_encoder_loras + self.unet_loras:
- lora.set_network(self)
-
- def set_current_generation(self, batch_size, num_sub_prompts, width, height, shared):
- self.batch_size = batch_size
- self.num_sub_prompts = num_sub_prompts
- self.current_size = (height, width)
- self.shared = shared
-
- # create masks
- mask = self.mask
- mask_dic = {}
- mask = mask.unsqueeze(0).unsqueeze(1) # b(1),c(1),h,w
- ref_weight = self.text_encoder_loras[0].lora_down.weight if self.text_encoder_loras else self.unet_loras[0].lora_down.weight
- dtype = ref_weight.dtype
- device = ref_weight.device
-
- def resize_add(mh, mw):
- # print(mh, mw, mh * mw)
- m = torch.nn.functional.interpolate(mask, (mh, mw), mode="bilinear") # doesn't work in bf16
- m = m.to(device, dtype=dtype)
- mask_dic[mh * mw] = m
-
- h = height // 8
- w = width // 8
- for _ in range(4):
- resize_add(h, w)
- if h % 2 == 1 or w % 2 == 1: # add extra shape if h/w is not divisible by 2
- resize_add(h + h % 2, w + w % 2)
- h = (h + 1) // 2
- w = (w + 1) // 2
-
- self.mask_dic = mask_dic
-
- def backup_weights(self):
- # 重みのバックアップを行う
- loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras
- for lora in loras:
- org_module = lora.org_module_ref[0]
- if not hasattr(org_module, "_lora_org_weight"):
- sd = org_module.state_dict()
- org_module._lora_org_weight = sd["weight"].detach().clone()
- org_module._lora_restored = True
-
- def restore_weights(self):
- # 重みのリストアを行う
- loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras
- for lora in loras:
- org_module = lora.org_module_ref[0]
- if not org_module._lora_restored:
- sd = org_module.state_dict()
- sd["weight"] = org_module._lora_org_weight
- org_module.load_state_dict(sd)
- org_module._lora_restored = True
-
- def pre_calculation(self):
- # 事前計算を行う
- loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras
- for lora in loras:
- org_module = lora.org_module_ref[0]
- sd = org_module.state_dict()
-
- org_weight = sd["weight"]
- lora_weight = lora.get_weight().to(org_weight.device, dtype=org_weight.dtype)
- sd["weight"] = org_weight + lora_weight
- assert sd["weight"].shape == org_weight.shape
- org_module.load_state_dict(sd)
-
- org_module._lora_restored = False
- lora.enabled = False
-
- def apply_max_norm_regularization(self, max_norm_value, device):
- downkeys = []
- upkeys = []
- alphakeys = []
- norms = []
- keys_scaled = 0
-
- state_dict = self.state_dict()
- for key in state_dict.keys():
- if "lora_down" in key and "weight" in key:
- downkeys.append(key)
- upkeys.append(key.replace("lora_down", "lora_up"))
- alphakeys.append(key.replace("lora_down.weight", "alpha"))
-
- for i in range(len(downkeys)):
- down = state_dict[downkeys[i]].to(device)
- up = state_dict[upkeys[i]].to(device)
- alpha = state_dict[alphakeys[i]].to(device)
- dim = down.shape[0]
- scale = alpha / dim
-
- if up.shape[2:] == (1, 1) and down.shape[2:] == (1, 1):
- updown = (up.squeeze(2).squeeze(2) @ down.squeeze(2).squeeze(2)).unsqueeze(2).unsqueeze(3)
- elif up.shape[2:] == (3, 3) or down.shape[2:] == (3, 3):
- updown = torch.nn.functional.conv2d(down.permute(1, 0, 2, 3), up).permute(1, 0, 2, 3)
- else:
- updown = up @ down
-
- updown *= scale
-
- norm = updown.norm().clamp(min=max_norm_value / 2)
- desired = torch.clamp(norm, max=max_norm_value)
- ratio = desired.cpu() / norm.cpu()
- sqrt_ratio = ratio**0.5
- if ratio != 1:
- keys_scaled += 1
- state_dict[upkeys[i]] *= sqrt_ratio
- state_dict[downkeys[i]] *= sqrt_ratio
- scalednorm = updown.norm() * ratio
- norms.append(scalednorm.item())
-
- return keys_scaled, sum(norms) / len(norms), max(norms)
\ No newline at end of file
diff --git a/spaces/lewtun/hslu-demo/README.md b/spaces/lewtun/hslu-demo/README.md
deleted file mode 100644
index bcf7fafe91f659b999a6ef00786f439eb2863d7d..0000000000000000000000000000000000000000
--- a/spaces/lewtun/hslu-demo/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-title: Hslu Demo
-emoji: 🌖
-colorFrom: indigo
-colorTo: indigo
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/limingcv/AlignDet/pretrain/selfsup_cbv2_swin-L_1x_coco/cbv2.py b/spaces/limingcv/AlignDet/pretrain/selfsup_cbv2_swin-L_1x_coco/cbv2.py
deleted file mode 100644
index cc465c43a0809a52ba8fac1541e193dbc6bfc963..0000000000000000000000000000000000000000
--- a/spaces/limingcv/AlignDet/pretrain/selfsup_cbv2_swin-L_1x_coco/cbv2.py
+++ /dev/null
@@ -1,518 +0,0 @@
-train_dataset_type = 'MultiViewCocoDataset'
-test_dataset_type = 'CocoDataset'
-data_root = 'data/coco/'
-classes = ['selective_search']
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-load_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', with_bbox=True, with_mask=False)
-]
-train_pipeline1 = [
- dict(
- type='Resize',
- img_scale=[(1600, 400), (1600, 1400)],
- multiscale_mode='range',
- keep_ratio=True),
- dict(type='FilterAnnotations', min_gt_bbox_wh=(0.01, 0.01)),
- dict(type='Pad', size_divisor=32),
- dict(type='RandFlip', flip_ratio=0.5),
- dict(
- type='OneOf',
- transforms=[
- dict(type='Identity'),
- dict(type='AutoContrast'),
- dict(type='RandEqualize'),
- dict(type='RandSolarize'),
- dict(type='RandColor'),
- dict(type='RandContrast'),
- dict(type='RandBrightness'),
- dict(type='RandSharpness'),
- dict(type='RandPosterize')
- ]),
- dict(
- type='Normalize',
- mean=[123.675, 116.28, 103.53],
- std=[58.395, 57.12, 57.375],
- to_rgb=True),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-train_pipeline2 = [
- dict(
- type='Resize',
- img_scale=[(1600, 400), (1600, 1400)],
- multiscale_mode='range',
- keep_ratio=True),
- dict(type='FilterAnnotations', min_gt_bbox_wh=(0.01, 0.01)),
- dict(type='Pad', size_divisor=32),
- dict(type='RandFlip', flip_ratio=0.5),
- dict(
- type='OneOf',
- transforms=[
- dict(type='Identity'),
- dict(type='AutoContrast'),
- dict(type='RandEqualize'),
- dict(type='RandSolarize'),
- dict(type='RandColor'),
- dict(type='RandContrast'),
- dict(type='RandBrightness'),
- dict(type='RandSharpness'),
- dict(type='RandPosterize')
- ]),
- dict(
- type='Normalize',
- mean=[123.675, 116.28, 103.53],
- std=[58.395, 57.12, 57.375],
- to_rgb=True),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(1333, 800),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(
- type='Normalize',
- mean=[123.675, 116.28, 103.53],
- std=[58.395, 57.12, 57.375],
- to_rgb=True),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
- ])
-]
-data = dict(
- samples_per_gpu=2,
- workers_per_gpu=2,
- train=dict(
- type='MultiViewCocoDataset',
- dataset=dict(
- type='CocoDataset',
- classes=['selective_search'],
- ann_file=
- 'data/coco/filtered_proposals/train2017_ratio3size0008@0.5.json',
- img_prefix='data/coco/train2017/',
- pipeline=[
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', with_bbox=True, with_mask=False)
- ]),
- num_views=2,
- pipelines=[[{
- 'type': 'Resize',
- 'img_scale': [(1600, 400), (1600, 1400)],
- 'multiscale_mode': 'range',
- 'keep_ratio': True
- }, {
- 'type': 'FilterAnnotations',
- 'min_gt_bbox_wh': (0.01, 0.01)
- }, {
- 'type': 'Pad',
- 'size_divisor': 32
- }, {
- 'type': 'RandFlip',
- 'flip_ratio': 0.5
- }, {
- 'type':
- 'OneOf',
- 'transforms': [{
- 'type': 'Identity'
- }, {
- 'type': 'AutoContrast'
- }, {
- 'type': 'RandEqualize'
- }, {
- 'type': 'RandSolarize'
- }, {
- 'type': 'RandColor'
- }, {
- 'type': 'RandContrast'
- }, {
- 'type': 'RandBrightness'
- }, {
- 'type': 'RandSharpness'
- }, {
- 'type': 'RandPosterize'
- }]
- }, {
- 'type': 'Normalize',
- 'mean': [123.675, 116.28, 103.53],
- 'std': [58.395, 57.12, 57.375],
- 'to_rgb': True
- }, {
- 'type': 'DefaultFormatBundle'
- }, {
- 'type': 'Collect',
- 'keys': ['img', 'gt_bboxes', 'gt_labels']
- }],
- [{
- 'type': 'Resize',
- 'img_scale': [(1600, 400), (1600, 1400)],
- 'multiscale_mode': 'range',
- 'keep_ratio': True
- }, {
- 'type': 'FilterAnnotations',
- 'min_gt_bbox_wh': (0.01, 0.01)
- }, {
- 'type': 'Pad',
- 'size_divisor': 32
- }, {
- 'type': 'RandFlip',
- 'flip_ratio': 0.5
- }, {
- 'type':
- 'OneOf',
- 'transforms': [{
- 'type': 'Identity'
- }, {
- 'type': 'AutoContrast'
- }, {
- 'type': 'RandEqualize'
- }, {
- 'type': 'RandSolarize'
- }, {
- 'type': 'RandColor'
- }, {
- 'type': 'RandContrast'
- }, {
- 'type': 'RandBrightness'
- }, {
- 'type': 'RandSharpness'
- }, {
- 'type': 'RandPosterize'
- }]
- }, {
- 'type': 'Normalize',
- 'mean': [123.675, 116.28, 103.53],
- 'std': [58.395, 57.12, 57.375],
- 'to_rgb': True
- }, {
- 'type': 'DefaultFormatBundle'
- }, {
- 'type': 'Collect',
- 'keys': ['img', 'gt_bboxes', 'gt_labels']
- }]]),
- val=dict(
- type='CocoDataset',
- classes=['selective_search'],
- ann_file='data/coco/annotations/instances_val2017.json',
- img_prefix='data/coco/val2017/',
- pipeline=[
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(1333, 800),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(
- type='Normalize',
- mean=[123.675, 116.28, 103.53],
- std=[58.395, 57.12, 57.375],
- to_rgb=True),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
- ])
- ]),
- test=dict(
- type='CocoDataset',
- classes=['selective_search'],
- ann_file='data/coco/annotations/instances_val2017.json',
- img_prefix='data/coco/val2017/',
- pipeline=[
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(1333, 800),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(
- type='Normalize',
- mean=[123.675, 116.28, 103.53],
- std=[58.395, 57.12, 57.375],
- to_rgb=True),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
- ])
- ]))
-evaluation = dict(interval=65535, gpu_collect=True)
-optimizer = dict(
- type='AdamW',
- lr=0.0001,
- betas=(0.9, 0.999),
- weight_decay=0.05,
- paramwise_cfg=dict(
- custom_keys=dict(
- absolute_pos_embed=dict(decay_mult=0.0),
- relative_position_bias_table=dict(decay_mult=0.0),
- norm=dict(decay_mult=0.0))))
-optimizer_config = dict(grad_clip=None)
-lr_config = dict(
- policy='step',
- warmup='linear',
- warmup_iters=500,
- warmup_ratio=0.001,
- step=[8, 11])
-runner = dict(type='EpochBasedRunner', max_epochs=12)
-checkpoint_config = dict(interval=1)
-log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
-custom_hooks = [
- dict(type='MomentumUpdateHook'),
- dict(
- type='MMDetWandbHook',
- init_kwargs=dict(project='I2B', group='pretrain'),
- interval=50,
- num_eval_images=0,
- log_checkpoint=False)
-]
-dist_params = dict(backend='nccl')
-log_level = 'INFO'
-load_from = None
-resume_from = None
-workflow = [('train', 1)]
-opencv_num_threads = 0
-mp_start_method = 'fork'
-auto_scale_lr = dict(enable=True, base_batch_size=16)
-custom_imports = dict(
- imports=[
- 'mmselfsup.datasets.pipelines',
- 'selfsup.core.hook.momentum_update_hook',
- 'selfsup.datasets.pipelines.selfsup_pipelines',
- 'selfsup.datasets.pipelines.rand_aug',
- 'selfsup.datasets.single_view_coco',
- 'selfsup.datasets.multi_view_coco',
- 'selfsup.models.losses.contrastive_loss',
- 'selfsup.models.dense_heads.fcos_head',
- 'selfsup.models.dense_heads.retina_head',
- 'selfsup.models.dense_heads.detr_head',
- 'selfsup.models.dense_heads.deformable_detr_head',
- 'selfsup.models.roi_heads.bbox_heads.convfc_bbox_head',
- 'selfsup.models.roi_heads.standard_roi_head',
- 'selfsup.models.roi_heads.htc_roi_head',
- 'selfsup.models.roi_heads.cbv2_roi_head',
- 'selfsup.models.necks.cb_fpn', 'selfsup.models.backbones.cbv2',
- 'selfsup.models.backbones.swinv1',
- 'selfsup.models.detectors.selfsup_detector',
- 'selfsup.models.detectors.selfsup_fcos',
- 'selfsup.models.detectors.selfsup_detr',
- 'selfsup.models.detectors.selfsup_deformable_detr',
- 'selfsup.models.detectors.selfsup_retinanet',
- 'selfsup.models.detectors.selfsup_mask_rcnn',
- 'selfsup.models.detectors.selfsup_htc',
- 'selfsup.models.detectors.selfsup_cbv2',
- 'selfsup.models.detectors.cbv2',
- 'selfsup.core.bbox.assigners.hungarian_assigner',
- 'selfsup.core.bbox.assigners.pseudo_hungarian_assigner',
- 'selfsup.core.bbox.match_costs.match_cost'
- ],
- allow_failed_imports=False)
-model = dict(
- type='SelfSupDetector',
- backbone=dict(
- type='SelfSupCBv2',
- backbone=dict(
- type='CBSwinTransformer',
- embed_dim=192,
- depths=[2, 2, 18, 2],
- num_heads=[6, 12, 24, 48],
- window_size=7,
- mlp_ratio=4.0,
- qkv_bias=True,
- qk_scale=None,
- drop_rate=0.0,
- attn_drop_rate=0.0,
- drop_path_rate=0.2,
- ape=False,
- patch_norm=True,
- out_indices=(0, 1, 2, 3),
- pretrained=
- 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth',
- use_checkpoint=False),
- neck=dict(
- type='CBFPN',
- in_channels=[192, 384, 768, 1536],
- out_channels=256,
- num_outs=5),
- rpn_head=dict(
- type='RPNHead',
- in_channels=256,
- feat_channels=256,
- anchor_generator=dict(
- type='AnchorGenerator',
- scales=[8],
- ratios=[0.5, 1.0, 2.0],
- strides=[4, 8, 16, 32, 64]),
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[0.0, 0.0, 0.0, 0.0],
- target_stds=[1.0, 1.0, 1.0, 1.0]),
- loss_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
- loss_bbox=dict(
- type='SmoothL1Loss', beta=0.1111111111111111,
- loss_weight=1.0)),
- roi_head=dict(
- type='SelfSupCBv2Head',
- interleaved=True,
- mask_info_flow=True,
- num_stages=3,
- stage_loss_weights=[1, 0.5, 0.25],
- bbox_roi_extractor=dict(
- type='SingleRoIExtractor',
- roi_layer=dict(
- type='RoIAlign', output_size=7, sampling_ratio=0),
- out_channels=256,
- featmap_strides=[4, 8, 16, 32]),
- bbox_head=[
- dict(
- type='SelfSupShared4Conv1FCBBoxHead',
- in_channels=256,
- fc_out_channels=1024,
- roi_feat_size=7,
- num_classes=256,
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[0.0, 0.0, 0.0, 0.0],
- target_stds=[0.1, 0.1, 0.2, 0.2]),
- reg_class_agnostic=True,
- loss_cls=dict(
- type='ContrastiveLoss',
- loss_weight=1.0,
- temperature=0.5),
- loss_bbox=dict(
- type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
- dict(
- type='SelfSupShared4Conv1FCBBoxHead',
- in_channels=256,
- fc_out_channels=1024,
- roi_feat_size=7,
- num_classes=256,
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[0.0, 0.0, 0.0, 0.0],
- target_stds=[0.05, 0.05, 0.1, 0.1]),
- reg_class_agnostic=True,
- loss_cls=dict(
- type='ContrastiveLoss',
- loss_weight=1.0,
- temperature=0.5),
- loss_bbox=dict(
- type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
- dict(
- type='SelfSupShared4Conv1FCBBoxHead',
- in_channels=256,
- fc_out_channels=1024,
- roi_feat_size=7,
- num_classes=256,
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[0.0, 0.0, 0.0, 0.0],
- target_stds=[0.033, 0.033, 0.067, 0.067]),
- reg_class_agnostic=True,
- loss_cls=dict(
- type='ContrastiveLoss',
- loss_weight=1.0,
- temperature=0.5),
- loss_bbox=dict(
- type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
- ],
- mask_roi_extractor=None,
- mask_head=None),
- train_cfg=dict(
- rpn=dict(
- assigner=dict(
- type='MaxIoUAssigner',
- pos_iou_thr=0.7,
- neg_iou_thr=0.3,
- min_pos_iou=0.3,
- ignore_iof_thr=-1),
- sampler=dict(
- type='RandomSampler',
- num=256,
- pos_fraction=0.5,
- neg_pos_ub=-1,
- add_gt_as_proposals=False),
- allowed_border=0,
- pos_weight=-1,
- debug=False),
- rpn_proposal=dict(
- nms_pre=2000,
- max_per_img=2000,
- nms=dict(type='nms', iou_threshold=0.7),
- min_bbox_size=0),
- rcnn=[
- dict(
- assigner=dict(
- type='MaxIoUAssigner',
- pos_iou_thr=0.5,
- neg_iou_thr=0.5,
- min_pos_iou=0.5,
- ignore_iof_thr=-1),
- sampler=dict(
- type='RandomSampler',
- num=512,
- pos_fraction=0.25,
- neg_pos_ub=-1,
- add_gt_as_proposals=True),
- mask_size=28,
- pos_weight=-1,
- debug=False),
- dict(
- assigner=dict(
- type='MaxIoUAssigner',
- pos_iou_thr=0.6,
- neg_iou_thr=0.6,
- min_pos_iou=0.6,
- ignore_iof_thr=-1),
- sampler=dict(
- type='RandomSampler',
- num=512,
- pos_fraction=0.25,
- neg_pos_ub=-1,
- add_gt_as_proposals=True),
- mask_size=28,
- pos_weight=-1,
- debug=False),
- dict(
- assigner=dict(
- type='MaxIoUAssigner',
- pos_iou_thr=0.7,
- neg_iou_thr=0.7,
- min_pos_iou=0.7,
- ignore_iof_thr=-1),
- sampler=dict(
- type='RandomSampler',
- num=512,
- pos_fraction=0.25,
- neg_pos_ub=-1,
- add_gt_as_proposals=True),
- mask_size=28,
- pos_weight=-1,
- debug=False)
- ]),
- test_cfg=dict(
- rpn=dict(
- nms_pre=1000,
- max_per_img=1000,
- nms=dict(type='nms', iou_threshold=0.7),
- min_bbox_size=0),
- rcnn=dict(
- score_thr=0.001,
- nms=dict(type='nms', iou_threshold=0.5),
- max_per_img=100,
- mask_thr_binary=0.5))))
-find_unused_parameters = True
-fp16 = dict(loss_scale='dynamic')
-work_dir = 'work_dirs/selfsup_cbv2_swin-L_1x_coco'
-auto_resume = False
-gpu_ids = range(0, 64)
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Avira Phantom VPN Pro V3.7.1.26756 Final Crack (2018) Serial Key Keygen.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Avira Phantom VPN Pro V3.7.1.26756 Final Crack (2018) Serial Key Keygen.md
deleted file mode 100644
index a8c0b3d9d9c6ae7f9ad96e9499c6a831760eea8e..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Avira Phantom VPN Pro V3.7.1.26756 Final Crack (2018) Serial Key Keygen.md
+++ /dev/null
@@ -1,40 +0,0 @@
-
-How to Use Avira Phantom VPN Pro V3.7.1.26756 Final Crack (2018) Serial Key Keygen
-If you are looking for a reliable and secure VPN service that can protect your online privacy and bypass geo-restrictions, you might want to try Avira Phantom VPN Pro V3.7.1.26756 Final Crack (2018) Serial Key Keygen. This is a cracked version of the popular Avira Phantom VPN Pro software that allows you to use it for free without any limitations.
-In this article, we will show you how to download, install and activate Avira Phantom VPN Pro V3.7.1.26756 Final Crack (2018) Serial Key Keygen on your Windows PC. We will also explain some of the features and benefits of using this VPN service.
-Avira Phantom VPN Pro V3.7.1.26756 Final Crack (2018) Serial Key Keygen
Download ►►► https://bytlly.com/2uGwjE
-What is Avira Phantom VPN Pro?
-Avira Phantom VPN Pro is a premium VPN service that offers you unlimited bandwidth, fast speed, and strong encryption. It also has a kill switch feature that automatically disconnects you from the internet if the VPN connection drops, preventing any data leaks.
-With Avira Phantom VPN Pro, you can access any website or service that is blocked in your region, such as Netflix, Hulu, BBC iPlayer, etc. You can also hide your IP address and location from hackers, trackers, and advertisers who might want to spy on your online activities or steal your personal information.
-Avira Phantom VPN Pro supports up to 5 devices simultaneously and has servers in 38 countries around the world. It is compatible with Windows, Mac, Android, and iOS devices.
-How to Download Avira Phantom VPN Pro V3.7.1.26756 Final Crack (2018) Serial Key Keygen?
-To download Avira Phantom VPN Pro V3.7.1.26756 Final Crack (2018) Serial Key Keygen, you need to follow these steps:
-
-- Click on this link to go to the download page: https://example.com/download
-- Choose the option that says "Download Avira Phantom VPN Pro V3.7.1.26756 Final Crack (2018) Serial Key Keygen"
-- Wait for the download to finish and save the file on your PC
-
-How to Install Avira Phantom VPN Pro V3.7.1.26756 Final Crack (2018) Serial Key Keygen?
-To install Avira Phantom VPN Pro V3.7.1.26756 Final Crack (2018) Serial Key Keygen, you need to follow these steps:
-
-- Open the downloaded file and run the setup wizard
-- Follow the instructions on the screen and agree to the terms and conditions
-- Choose the destination folder where you want to install the software
-- Click on "Install" and wait for the installation to complete
-
-How to Activate Avira Phantom VPN Pro V3.7.1.26756 Final Crack (2018) Serial Key Keygen?
-To activate Avira Phantom VPN Pro V3.7.1.26756 Final Crack (2018) Serial Key Keygen, you need to follow these steps:
-
-- Launch the software and click on "Activate"
-- Enter the serial key that you received in your email or copy it from this link: https://example.com/serial
-- Click on "OK" and enjoy using Avira Phantom VPN Pro for free
-
-What are the Features and Benefits of Using Avira Phantom VPN Pro V3.7.1.26756 Final Crack (2018) Serial Key Keygen?
-Some of the features and benefits of using Avira Phantom VPN Pro V3.7.1.26756 Final Crack (2018) Serial Key Keygen are:
-
-
-- You can use it for free without any limitations or restrictions
-- You can access any website or service that is blocked in your region
-- You can hide your IP address and location from hackers, d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Getway Raid Recovery Crack !!LINK!!.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Getway Raid Recovery Crack !!LINK!!.md
deleted file mode 100644
index 2fe0175ca7907347efb30b9ef0b7ead06c28aee9..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Getway Raid Recovery Crack !!LINK!!.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Getway Raid Recovery Crack
Download ►►►►► https://bytlly.com/2uGwQe
-
-Fast Automatic Data Recovery from RAID 0, 1, 0+1, 1+0, 1E, RAID 4, RAID 5, 50, 5EE, 5R, RAID 6, 60 and JBOD. RAID Recovery Software for NVidia, . NET/Raptor, ATA and SCSI, recovery of lost partitions or disks, as well as to perform a complete analysis of the RAID profile structure. RAIDRECADER is a program for fast data recovery from RAID, SCSI, ATA and storage devices. Supports RAID 0, 1, 0+1, 1E, RAID 4, RAID 5, 50, 52 and JBOD. Performs full recovery of files, folders and partitions in a few seconds. It also releases a version for Windows NT. The program supports recovery of lost disks, partitions and folders. 8a78ff9644
-
-
-
diff --git a/spaces/linhdo/document-layout-analysis/app.py b/spaces/linhdo/document-layout-analysis/app.py
deleted file mode 100644
index 467c2aae29a5a9553a5405b5ebcbb0b7b7dc691f..0000000000000000000000000000000000000000
--- a/spaces/linhdo/document-layout-analysis/app.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# Import libraries
-import cv2 # for reading images, draw bounding boxes
-from ultralytics import YOLO
-import gradio as gr
-
-# Define constants
-ENTITIES_COLORS = {
- "Caption": (191, 100, 21),
- "Footnote": (2, 62, 115),
- "Formula": (140, 80, 58),
- "List-item": (168, 181, 69),
- "Page-footer": (2, 69, 84),
- "Page-header": (83, 115, 106),
- "Picture": (255, 72, 88),
- "Section-header": (0, 204, 192),
- "Table": (116, 127, 127),
- "Text": (0, 153, 221),
- "Title": (196, 51, 2)
-}
-BOX_PADDING = 2
-
-# Load models
-DETECTION_MODEL = YOLO("models/dla-model.pt")
-
-def detect(image_path):
- """
- Output inference image with bounding box
-
- Args:
- - image: to check for checkboxes
-
- Return: image with bounding boxes drawn
- """
- image = cv2.imread(image_path)
- if image is None:
- return image
-
- # Predict on image
- results = DETECTION_MODEL.predict(source=image, conf=0.2, iou=0.8) # Predict on image
- boxes = results[0].boxes # Get bounding boxes
-
- if len(boxes) == 0:
- return image
-
- # Get bounding boxes
- for box in boxes:
- detection_class_conf = round(box.conf.item(), 2)
- cls = list(ENTITIES_COLORS)[int(box.cls)]
- # Get start and end points of the current box
- start_box = (int(box.xyxy[0][0]), int(box.xyxy[0][1]))
- end_box = (int(box.xyxy[0][2]), int(box.xyxy[0][3]))
-
-
- # 01. DRAW BOUNDING BOX OF OBJECT
- line_thickness = round(0.002 * (image.shape[0] + image.shape[1]) / 2) + 1
- image = cv2.rectangle(img=image,
- pt1=start_box,
- pt2=end_box,
- color=ENTITIES_COLORS[cls],
- thickness = line_thickness) # Draw the box with predefined colors
-
- # 02. DRAW LABEL
- text = cls + " " + str(detection_class_conf)
- # Get text dimensions to draw wrapping box
- font_thickness = max(line_thickness - 1, 1)
- (text_w, text_h), _ = cv2.getTextSize(text=text, fontFace=2, fontScale=line_thickness/3, thickness=font_thickness)
- # Draw wrapping box for text
- image = cv2.rectangle(img=image,
- pt1=(start_box[0], start_box[1] - text_h - BOX_PADDING*2),
- pt2=(start_box[0] + text_w + BOX_PADDING * 2, start_box[1]),
- color=ENTITIES_COLORS[cls],
- thickness=-1)
- # Put class name on image
- start_text = (start_box[0] + BOX_PADDING, start_box[1] - BOX_PADDING)
- image = cv2.putText(img=image, text=text, org=start_text, fontFace=0, color=(255,255,255), fontScale=line_thickness/3, thickness=font_thickness)
-
- return image
-
-iface = gr.Interface(fn=detect,
- inputs=gr.Image(label="Upload scanned document", type="filepath"),
- outputs="image")
-iface.launch()
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/spaces/lithiumice/SadTalker/README.md b/spaces/lithiumice/SadTalker/README.md
deleted file mode 100644
index d6468baa29b635161882902a3efbdb98f3c71317..0000000000000000000000000000000000000000
--- a/spaces/lithiumice/SadTalker/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-title: SadTalker
-emoji: 😭
-colorFrom: purple
-colorTo: green
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: vinthony/SadTalker
----
-
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/livinNector/TaNER/README.md b/spaces/livinNector/TaNER/README.md
deleted file mode 100644
index b63a0a4dd48aea5f576b3024a9bcb5fffd0af8fa..0000000000000000000000000000000000000000
--- a/spaces/livinNector/TaNER/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: TaNER
-emoji: 🤖
-colorFrom: green
-colorTo: blue
-sdk: gradio
-sdk_version: 3.15.0
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: ai4bharat/IndicNER
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/logasja/LowKey/backbone/model_irse.py b/spaces/logasja/LowKey/backbone/model_irse.py
deleted file mode 100644
index 4efccaf6e6e3f25f8a1f32253a114b9eff5bb63f..0000000000000000000000000000000000000000
--- a/spaces/logasja/LowKey/backbone/model_irse.py
+++ /dev/null
@@ -1,237 +0,0 @@
-import torch
-import torch.nn as nn
-from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, ReLU, Sigmoid, Dropout, MaxPool2d, \
- AdaptiveAvgPool2d, Sequential, Module
-from collections import namedtuple
-
-
-# Support: ['IR_50', 'IR_101', 'IR_152', 'IR_SE_50', 'IR_SE_101', 'IR_SE_152']
-
-
-class Flatten(Module):
- def forward(self, input):
- return input.view(input.size(0), -1)
-
-
-def l2_norm(input, axis=1):
- norm = torch.norm(input, 2, axis, True)
- output = torch.div(input, norm)
-
- return output
-
-
-class SEModule(Module):
- def __init__(self, channels, reduction):
- super(SEModule, self).__init__()
- self.avg_pool = AdaptiveAvgPool2d(1)
- self.fc1 = Conv2d(
- channels, channels // reduction, kernel_size=1, padding=0, bias=False)
-
- nn.init.xavier_uniform_(self.fc1.weight.data)
-
- self.relu = ReLU(inplace=True)
- self.fc2 = Conv2d(
- channels // reduction, channels, kernel_size=1, padding=0, bias=False)
-
- self.sigmoid = Sigmoid()
-
- def forward(self, x):
- module_input = x
- x = self.avg_pool(x)
- x = self.fc1(x)
- x = self.relu(x)
- x = self.fc2(x)
- x = self.sigmoid(x)
-
- return module_input * x
-
-
-class bottleneck_IR(Module):
- def __init__(self, in_channel, depth, stride):
- super(bottleneck_IR, self).__init__()
- if in_channel == depth:
- self.shortcut_layer = MaxPool2d(1, stride)
- else:
- self.shortcut_layer = Sequential(
- Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth))
- self.res_layer = Sequential(
- BatchNorm2d(in_channel),
- Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
- Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth))
-
- def forward(self, x):
- shortcut = self.shortcut_layer(x)
- res = self.res_layer(x)
-
- return res + shortcut
-
-
-class bottleneck_IR_SE(Module):
- def __init__(self, in_channel, depth, stride):
- super(bottleneck_IR_SE, self).__init__()
- if in_channel == depth:
- self.shortcut_layer = MaxPool2d(1, stride)
- else:
- self.shortcut_layer = Sequential(
- Conv2d(in_channel, depth, (1, 1), stride, bias=False),
- BatchNorm2d(depth))
- self.res_layer = Sequential(
- BatchNorm2d(in_channel),
- Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
- PReLU(depth),
- Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
- BatchNorm2d(depth),
- SEModule(depth, 16)
- )
-
- def forward(self, x):
- shortcut = self.shortcut_layer(x)
- res = self.res_layer(x)
-
- return res + shortcut
-
-
-class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
- '''A named tuple describing a ResNet block.'''
-
-
-def get_block(in_channel, depth, num_units, stride=2):
-
- return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
-
-
-def get_blocks(num_layers):
- if num_layers == 50:
- blocks = [
- get_block(in_channel=64, depth=64, num_units=3),
- get_block(in_channel=64, depth=128, num_units=4),
- get_block(in_channel=128, depth=256, num_units=14),
- get_block(in_channel=256, depth=512, num_units=3)
- ]
- elif num_layers == 100:
- blocks = [
- get_block(in_channel=64, depth=64, num_units=3),
- get_block(in_channel=64, depth=128, num_units=13),
- get_block(in_channel=128, depth=256, num_units=30),
- get_block(in_channel=256, depth=512, num_units=3)
- ]
- elif num_layers == 152:
- blocks = [
- get_block(in_channel=64, depth=64, num_units=3),
- get_block(in_channel=64, depth=128, num_units=8),
- get_block(in_channel=128, depth=256, num_units=36),
- get_block(in_channel=256, depth=512, num_units=3)
- ]
-
- return blocks
-
-
-class Backbone(Module):
- def __init__(self, input_size, num_layers, mode='ir'):
- super(Backbone, self).__init__()
- assert input_size[0] in [112, 224], "input_size should be [112, 112] or [224, 224]"
- assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
- assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
- blocks = get_blocks(num_layers)
- if mode == 'ir':
- unit_module = bottleneck_IR
- elif mode == 'ir_se':
- unit_module = bottleneck_IR_SE
- self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
- BatchNorm2d(64),
- PReLU(64))
- if input_size[0] == 112:
- self.output_layer = Sequential(BatchNorm2d(512),
- Dropout(),
- Flatten(),
- Linear(512 * 7 * 7, 512),
- BatchNorm1d(512))
- else:
- self.output_layer = Sequential(BatchNorm2d(512),
- Dropout(),
- Flatten(),
- Linear(512 * 14 * 14, 512),
- BatchNorm1d(512))
-
- modules = []
- for block in blocks:
- for bottleneck in block:
- modules.append(
- unit_module(bottleneck.in_channel,
- bottleneck.depth,
- bottleneck.stride))
- self.body = Sequential(*modules)
-
- self._initialize_weights()
-
- def forward(self, x):
- x = self.input_layer(x)
- x = self.body(x)
- x = self.output_layer(x)
-
- return x
-
- def _initialize_weights(self):
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- nn.init.xavier_uniform_(m.weight.data)
- if m.bias is not None:
- m.bias.data.zero_()
- elif isinstance(m, nn.BatchNorm2d):
- m.weight.data.fill_(1)
- m.bias.data.zero_()
- elif isinstance(m, nn.BatchNorm1d):
- m.weight.data.fill_(1)
- m.bias.data.zero_()
- elif isinstance(m, nn.Linear):
- nn.init.xavier_uniform_(m.weight.data)
- if m.bias is not None:
- m.bias.data.zero_()
-
-
-def IR_50(input_size):
- """Constructs a ir-50 model.
- """
- model = Backbone(input_size, 50, 'ir')
-
- return model
-
-
-def IR_101(input_size):
- """Constructs a ir-101 model.
- """
- model = Backbone(input_size, 100, 'ir')
-
- return model
-
-
-def IR_152(input_size):
- """Constructs a ir-152 model.
- """
- model = Backbone(input_size, 152, 'ir')
-
- return model
-
-
-def IR_SE_50(input_size):
- """Constructs a ir_se-50 model.
- """
- model = Backbone(input_size, 50, 'ir_se')
-
- return model
-
-
-def IR_SE_101(input_size):
- """Constructs a ir_se-101 model.
- """
- model = Backbone(input_size, 100, 'ir_se')
-
- return model
-
-
-def IR_SE_152(input_size):
- """Constructs a ir_se-152 model.
- """
- model = Backbone(input_size, 152, 'ir_se')
-
- return model
diff --git a/spaces/lusea/rvc-Qinggan/lib/infer_pack/attentions.py b/spaces/lusea/rvc-Qinggan/lib/infer_pack/attentions.py
deleted file mode 100644
index 05501be1871643f78dddbeaa529c96667031a8db..0000000000000000000000000000000000000000
--- a/spaces/lusea/rvc-Qinggan/lib/infer_pack/attentions.py
+++ /dev/null
@@ -1,417 +0,0 @@
-import copy
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from lib.infer_pack import commons
-from lib.infer_pack import modules
-from lib.infer_pack.modules import LayerNorm
-
-
-class Encoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- window_size=10,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- window_size=window_size,
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- proximal_bias=False,
- proximal_init=True,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- proximal_bias=proximal_bias,
- proximal_init=proximal_init,
- )
- )
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(
- MultiHeadAttention(
- hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- causal=True,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
- device=x.device, dtype=x.dtype
- )
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(
- self,
- channels,
- out_channels,
- n_heads,
- p_dropout=0.0,
- window_size=None,
- heads_share=True,
- block_length=None,
- proximal_bias=False,
- proximal_init=False,
- ):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
- self.emb_rel_v = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert (
- t_s == t_t
- ), "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(
- query / math.sqrt(self.k_channels), key_relative_embeddings
- )
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(
- device=scores.device, dtype=scores.dtype
- )
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert (
- t_s == t_t
- ), "Local attention is only available for self-attention."
- block_mask = (
- torch.ones_like(scores)
- .triu(-self.block_length)
- .tril(self.block_length)
- )
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(
- self.emb_rel_v, t_s
- )
- output = output + self._matmul_with_relative_values(
- relative_weights, value_relative_embeddings
- )
- output = (
- output.transpose(2, 3).contiguous().view(b, d, t_t)
- ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
- )
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[
- :, slice_start_position:slice_end_position
- ]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(
- x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
- )
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
- :, :, :length, length - 1 :
- ]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(
- x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
- )
- x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- filter_channels,
- kernel_size,
- p_dropout=0.0,
- activation=None,
- causal=False,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/lusea/rvc-Qinggan/lib/infer_pack/modules/F0Predictor/__init__.py b/spaces/lusea/rvc-Qinggan/lib/infer_pack/modules/F0Predictor/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/ma-xu/LIVE/diffvg.cpp b/spaces/ma-xu/LIVE/diffvg.cpp
deleted file mode 100644
index 7346d24b758b135bdd402fdb67ea412f48419eb3..0000000000000000000000000000000000000000
--- a/spaces/ma-xu/LIVE/diffvg.cpp
+++ /dev/null
@@ -1,1792 +0,0 @@
-#include "diffvg.h"
-#include "aabb.h"
-#include "shape.h"
-#include "sample_boundary.h"
-#include "atomic.h"
-#include "cdf.h"
-#include "compute_distance.h"
-#include "cuda_utils.h"
-#include "edge_query.h"
-#include "filter.h"
-#include "matrix.h"
-#include "parallel.h"
-#include "pcg.h"
-#include "ptr.h"
-#include "scene.h"
-#include "vector.h"
-#include "winding_number.h"
-#include "within_distance.h"
-#include
-#include
-#include
-#include
-#include
-
-namespace py = pybind11;
-
-struct Command {
- int shape_group_id;
- int shape_id;
- int point_id; // Only used by path
-};
-
-DEVICE
-bool is_inside(const SceneData &scene_data,
- int shape_group_id,
- const Vector2f &pt,
- EdgeQuery *edge_query) {
- const ShapeGroup &shape_group = scene_data.shape_groups[shape_group_id];
- // pt is in canvas space, transform it to shape's local space
- auto local_pt = xform_pt(shape_group.canvas_to_shape, pt);
- const auto &bvh_nodes = scene_data.shape_groups_bvh_nodes[shape_group_id];
- const AABB &bbox = bvh_nodes[2 * shape_group.num_shapes - 2].box;
- if (!inside(bbox, local_pt)) {
- return false;
- }
- auto winding_number = 0;
- // Traverse the shape group BVH
- constexpr auto max_bvh_stack_size = 64;
- int bvh_stack[max_bvh_stack_size];
- auto stack_size = 0;
- bvh_stack[stack_size++] = 2 * shape_group.num_shapes - 2;
- while (stack_size > 0) {
- const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]];
- if (node.child1 < 0) {
- // leaf
- auto shape_id = node.child0;
- auto w = compute_winding_number(
- scene_data.shapes[shape_id], scene_data.path_bvhs[shape_id], local_pt);
- winding_number += w;
- if (edge_query != nullptr) {
- if (edge_query->shape_group_id == shape_group_id &&
- edge_query->shape_id == shape_id) {
- if ((shape_group.use_even_odd_rule && abs(w) % 2 == 1) ||
- (!shape_group.use_even_odd_rule && w != 0)) {
- edge_query->hit = true;
- }
- }
- }
- } else {
- assert(node.child0 >= 0 && node.child1 >= 0);
- const AABB &b0 = bvh_nodes[node.child0].box;
- if (inside(b0, local_pt)) {
- bvh_stack[stack_size++] = node.child0;
- }
- const AABB &b1 = bvh_nodes[node.child1].box;
- if (inside(b1, local_pt)) {
- bvh_stack[stack_size++] = node.child1;
- }
- assert(stack_size <= max_bvh_stack_size);
- }
- }
- if (shape_group.use_even_odd_rule) {
- return abs(winding_number) % 2 == 1;
- } else {
- return winding_number != 0;
- }
-}
-
-DEVICE void accumulate_boundary_gradient(const Shape &shape,
- float contrib,
- float t,
- const Vector2f &normal,
- const BoundaryData &boundary_data,
- Shape &d_shape,
- const Matrix3x3f &shape_to_canvas,
- const Vector2f &local_boundary_pt,
- Matrix3x3f &d_shape_to_canvas) {
- assert(isfinite(contrib));
- assert(isfinite(normal));
- // According to Reynold transport theorem,
- // the Jacobian of the boundary integral is dot(velocity, normal),
- // where the velocity depends on the variable being differentiated with.
- if (boundary_data.is_stroke) {
- auto has_path_thickness = false;
- if (shape.type == ShapeType::Path) {
- const Path &path = *(const Path *)shape.ptr;
- has_path_thickness = path.thickness != nullptr;
- }
- // differentiate stroke width: velocity is the same as normal
- if (has_path_thickness) {
- Path *d_p = (Path*)d_shape.ptr;
- auto base_point_id = boundary_data.path.base_point_id;
- auto point_id = boundary_data.path.point_id;
- auto t = boundary_data.path.t;
- const Path &path = *(const Path *)shape.ptr;
- if (path.num_control_points[base_point_id] == 0) {
- // Straight line
- auto i0 = point_id;
- auto i1 = (point_id + 1) % path.num_points;
- // r = r0 + t * (r1 - r0)
- atomic_add(&d_p->thickness[i0], (1 - t) * contrib);
- atomic_add(&d_p->thickness[i1], ( t) * contrib);
- } else if (path.num_control_points[base_point_id] == 1) {
- // Quadratic Bezier curve
- auto i0 = point_id;
- auto i1 = point_id + 1;
- auto i2 = (point_id + 2) % path.num_points;
- // r = (1-t)^2r0 + 2(1-t)t r1 + t^2 r2
- atomic_add(&d_p->thickness[i0], square(1 - t) * contrib);
- atomic_add(&d_p->thickness[i1], (2*(1-t)*t) * contrib);
- atomic_add(&d_p->thickness[i2], (t*t) * contrib);
- } else if (path.num_control_points[base_point_id] == 2) {
- auto i0 = point_id;
- auto i1 = point_id + 1;
- auto i2 = point_id + 2;
- auto i3 = (point_id + 3) % path.num_points;
- // r = (1-t)^3r0 + 3*(1-t)^2tr1 + 3*(1-t)t^2r2 + t^3r3
- atomic_add(&d_p->thickness[i0], cubic(1 - t) * contrib);
- atomic_add(&d_p->thickness[i1], 3 * square(1 - t) * t * contrib);
- atomic_add(&d_p->thickness[i2], 3 * (1 - t) * t * t * contrib);
- atomic_add(&d_p->thickness[i3], t * t * t * contrib);
- } else {
- assert(false);
- }
- } else {
- atomic_add(&d_shape.stroke_width, contrib);
- }
- }
- switch (shape.type) {
- case ShapeType::Circle: {
- Circle *d_p = (Circle*)d_shape.ptr;
- // velocity for the center is (1, 0) for x and (0, 1) for y
- atomic_add(&d_p->center[0], normal * contrib);
- // velocity for the radius is the same as the normal
- atomic_add(&d_p->radius, contrib);
- break;
- } case ShapeType::Ellipse: {
- Ellipse *d_p = (Ellipse*)d_shape.ptr;
- // velocity for the center is (1, 0) for x and (0, 1) for y
- atomic_add(&d_p->center[0], normal * contrib);
- // velocity for the radius:
- // x = center.x + r.x * cos(2pi * t)
- // y = center.y + r.y * sin(2pi * t)
- // for r.x: (cos(2pi * t), 0)
- // for r.y: (0, sin(2pi * t))
- atomic_add(&d_p->radius.x, cos(2 * float(M_PI) * t) * normal.x * contrib);
- atomic_add(&d_p->radius.y, sin(2 * float(M_PI) * t) * normal.y * contrib);
- break;
- } case ShapeType::Path: {
- Path *d_p = (Path*)d_shape.ptr;
- auto base_point_id = boundary_data.path.base_point_id;
- auto point_id = boundary_data.path.point_id;
- auto t = boundary_data.path.t;
- const Path &path = *(const Path *)shape.ptr;
- if (path.num_control_points[base_point_id] == 0) {
- // Straight line
- auto i0 = point_id;
- auto i1 = (point_id + 1) % path.num_points;
- // pt = p0 + t * (p1 - p0)
- // velocity for p0.x: (1 - t, 0)
- // p0.y: ( 0, 1 - t)
- // p1.x: ( t, 0)
- // p1.y: ( 0, t)
- atomic_add(&d_p->points[2 * i0 + 0], (1 - t) * normal.x * contrib);
- atomic_add(&d_p->points[2 * i0 + 1], (1 - t) * normal.y * contrib);
- atomic_add(&d_p->points[2 * i1 + 0], ( t) * normal.x * contrib);
- atomic_add(&d_p->points[2 * i1 + 1], ( t) * normal.y * contrib);
- } else if (path.num_control_points[base_point_id] == 1) {
- // Quadratic Bezier curve
- auto i0 = point_id;
- auto i1 = point_id + 1;
- auto i2 = (point_id + 2) % path.num_points;
- // pt = (1-t)^2p0 + 2(1-t)t p1 + t^2 p2
- // velocity for p0.x: ((1-t)^2, 0)
- // p0.y: ( 0, (1-t)^2)
- // p1.x: (2(1-t)t, 0)
- // p1.y: ( 0, 2(1-t)t)
- // p1.x: ( t^2, 0)
- // p1.y: ( 0, t^2)
- atomic_add(&d_p->points[2 * i0 + 0], square(1 - t) * normal.x * contrib);
- atomic_add(&d_p->points[2 * i0 + 1], square(1 - t) * normal.y * contrib);
- atomic_add(&d_p->points[2 * i1 + 0], (2*(1-t)*t) * normal.x * contrib);
- atomic_add(&d_p->points[2 * i1 + 1], (2*(1-t)*t) * normal.y * contrib);
- atomic_add(&d_p->points[2 * i2 + 0], (t*t) * normal.x * contrib);
- atomic_add(&d_p->points[2 * i2 + 1], (t*t) * normal.y * contrib);
- } else if (path.num_control_points[base_point_id] == 2) {
- auto i0 = point_id;
- auto i1 = point_id + 1;
- auto i2 = point_id + 2;
- auto i3 = (point_id + 3) % path.num_points;
- // pt = (1-t)^3p0 + 3*(1-t)^2tp1 + 3*(1-t)t^2p2 + t^3p3
- // velocity for p0.x: ( (1-t)^3, 0)
- // p0.y: ( 0, (1-t)^3)
- // p1.x: (3*(1-t)^2t, 0)
- // p1.y: ( 0, 3*(1-t)^2t)
- // p2.x: (3*(1-t)t^2, 0)
- // p2.y: ( 0, 3*(1-t)t^2)
- // p2.x: ( t^3, 0)
- // p2.y: ( 0, t^3)
- atomic_add(&d_p->points[2 * i0 + 0], cubic(1 - t) * normal.x * contrib);
- atomic_add(&d_p->points[2 * i0 + 1], cubic(1 - t) * normal.y * contrib);
- atomic_add(&d_p->points[2 * i1 + 0], 3 * square(1 - t) * t * normal.x * contrib);
- atomic_add(&d_p->points[2 * i1 + 1], 3 * square(1 - t) * t * normal.y * contrib);
- atomic_add(&d_p->points[2 * i2 + 0], 3 * (1 - t) * t * t * normal.x * contrib);
- atomic_add(&d_p->points[2 * i2 + 1], 3 * (1 - t) * t * t * normal.y * contrib);
- atomic_add(&d_p->points[2 * i3 + 0], t * t * t * normal.x * contrib);
- atomic_add(&d_p->points[2 * i3 + 1], t * t * t * normal.y * contrib);
- } else {
- assert(false);
- }
- break;
- } case ShapeType::Rect: {
- Rect *d_p = (Rect*)d_shape.ptr;
- // The velocity depends on the position of the boundary
- if (normal == Vector2f{-1, 0}) {
- // left
- // velocity for p_min is (1, 0) for x and (0, 0) for y
- atomic_add(&d_p->p_min.x, -contrib);
- } else if (normal == Vector2f{1, 0}) {
- // right
- // velocity for p_max is (1, 0) for x and (0, 0) for y
- atomic_add(&d_p->p_max.x, contrib);
- } else if (normal == Vector2f{0, -1}) {
- // top
- // velocity for p_min is (0, 0) for x and (0, 1) for y
- atomic_add(&d_p->p_min.y, -contrib);
- } else if (normal == Vector2f{0, 1}) {
- // bottom
- // velocity for p_max is (0, 0) for x and (0, 1) for y
- atomic_add(&d_p->p_max.y, contrib);
- } else {
- // incorrect normal assignment?
- assert(false);
- }
- break;
- } default: {
- assert(false);
- break;
- }
- }
- // for shape_to_canvas we have the following relationship:
- // boundary_pt = xform_pt(shape_to_canvas, local_pt)
- // the velocity is the derivative of boundary_pt with respect to shape_to_canvas
- // we can use reverse-mode AD to compute the dot product of the velocity and the Jacobian
- // by passing the normal in d_xform_pt
- auto d_shape_to_canvas_ = Matrix3x3f();
- auto d_local_boundary_pt = Vector2f{0, 0};
- d_xform_pt(shape_to_canvas,
- local_boundary_pt,
- normal * contrib,
- d_shape_to_canvas_,
- d_local_boundary_pt);
- atomic_add(&d_shape_to_canvas(0, 0), d_shape_to_canvas_);
-}
-
-DEVICE
-Vector4f sample_color(const ColorType &color_type,
- void *color,
- const Vector2f &pt) {
- switch (color_type) {
- case ColorType::Constant: {
- auto c = (const Constant*)color;
- assert(isfinite(c->color));
- return c->color;
- } case ColorType::LinearGradient: {
- auto c = (const LinearGradient*)color;
- // Project pt to (c->begin, c->end)
- auto beg = c->begin;
- auto end = c->end;
- auto t = dot(pt - beg, end - beg) / max(dot(end - beg, end - beg), 1e-3f);
- // Find the correponding stop:
- if (t < c->stop_offsets[0]) {
- return Vector4f{c->stop_colors[0],
- c->stop_colors[1],
- c->stop_colors[2],
- c->stop_colors[3]};
- }
- for (int i = 0; i < c->num_stops - 1; i++) {
- auto offset_curr = c->stop_offsets[i];
- auto offset_next = c->stop_offsets[i + 1];
- assert(offset_next > offset_curr);
- if (t >= offset_curr && t < offset_next) {
- auto color_curr = Vector4f{
- c->stop_colors[4 * i + 0],
- c->stop_colors[4 * i + 1],
- c->stop_colors[4 * i + 2],
- c->stop_colors[4 * i + 3]};
- auto color_next = Vector4f{
- c->stop_colors[4 * (i + 1) + 0],
- c->stop_colors[4 * (i + 1) + 1],
- c->stop_colors[4 * (i + 1) + 2],
- c->stop_colors[4 * (i + 1) + 3]};
- auto tt = (t - offset_curr) / (offset_next - offset_curr);
- assert(isfinite(tt));
- assert(isfinite(color_curr));
- assert(isfinite(color_next));
- return color_curr * (1 - tt) + color_next * tt;
- }
- }
- return Vector4f{c->stop_colors[4 * (c->num_stops - 1) + 0],
- c->stop_colors[4 * (c->num_stops - 1) + 1],
- c->stop_colors[4 * (c->num_stops - 1) + 2],
- c->stop_colors[4 * (c->num_stops - 1) + 3]};
- } case ColorType::RadialGradient: {
- auto c = (const RadialGradient*)color;
- // Distance from pt to center
- auto offset = pt - c->center;
- auto normalized_offset = offset / c->radius;
- auto t = length(normalized_offset);
- // Find the correponding stop:
- if (t < c->stop_offsets[0]) {
- return Vector4f{c->stop_colors[0],
- c->stop_colors[1],
- c->stop_colors[2],
- c->stop_colors[3]};
- }
- for (int i = 0; i < c->num_stops - 1; i++) {
- auto offset_curr = c->stop_offsets[i];
- auto offset_next = c->stop_offsets[i + 1];
- assert(offset_next > offset_curr);
- if (t >= offset_curr && t < offset_next) {
- auto color_curr = Vector4f{
- c->stop_colors[4 * i + 0],
- c->stop_colors[4 * i + 1],
- c->stop_colors[4 * i + 2],
- c->stop_colors[4 * i + 3]};
- auto color_next = Vector4f{
- c->stop_colors[4 * (i + 1) + 0],
- c->stop_colors[4 * (i + 1) + 1],
- c->stop_colors[4 * (i + 1) + 2],
- c->stop_colors[4 * (i + 1) + 3]};
- auto tt = (t - offset_curr) / (offset_next - offset_curr);
- assert(isfinite(tt));
- assert(isfinite(color_curr));
- assert(isfinite(color_next));
- return color_curr * (1 - tt) + color_next * tt;
- }
- }
- return Vector4f{c->stop_colors[4 * (c->num_stops - 1) + 0],
- c->stop_colors[4 * (c->num_stops - 1) + 1],
- c->stop_colors[4 * (c->num_stops - 1) + 2],
- c->stop_colors[4 * (c->num_stops - 1) + 3]};
- } default: {
- assert(false);
- }
- }
- return Vector4f{};
-}
-
-DEVICE
-void d_sample_color(const ColorType &color_type,
- void *color_ptr,
- const Vector2f &pt,
- const Vector4f &d_color,
- void *d_color_ptr,
- float *d_translation) {
- switch (color_type) {
- case ColorType::Constant: {
- auto d_c = (Constant*)d_color_ptr;
- atomic_add(&d_c->color[0], d_color);
- return;
- } case ColorType::LinearGradient: {
- auto c = (const LinearGradient*)color_ptr;
- auto d_c = (LinearGradient*)d_color_ptr;
- // Project pt to (c->begin, c->end)
- auto beg = c->begin;
- auto end = c->end;
- auto t = dot(pt - beg, end - beg) / max(dot(end - beg, end - beg), 1e-3f);
- // Find the correponding stop:
- if (t < c->stop_offsets[0]) {
- atomic_add(&d_c->stop_colors[0], d_color);
- return;
- }
- for (int i = 0; i < c->num_stops - 1; i++) {
- auto offset_curr = c->stop_offsets[i];
- auto offset_next = c->stop_offsets[i + 1];
- assert(offset_next > offset_curr);
- if (t >= offset_curr && t < offset_next) {
- auto color_curr = Vector4f{
- c->stop_colors[4 * i + 0],
- c->stop_colors[4 * i + 1],
- c->stop_colors[4 * i + 2],
- c->stop_colors[4 * i + 3]};
- auto color_next = Vector4f{
- c->stop_colors[4 * (i + 1) + 0],
- c->stop_colors[4 * (i + 1) + 1],
- c->stop_colors[4 * (i + 1) + 2],
- c->stop_colors[4 * (i + 1) + 3]};
- auto tt = (t - offset_curr) / (offset_next - offset_curr);
- // return color_curr * (1 - tt) + color_next * tt;
- auto d_color_curr = d_color * (1 - tt);
- auto d_color_next = d_color * tt;
- auto d_tt = sum(d_color * (color_next - color_curr));
- auto d_offset_next = -d_tt * tt / (offset_next - offset_curr);
- auto d_offset_curr = d_tt * ((tt - 1.f) / (offset_next - offset_curr));
- auto d_t = d_tt / (offset_next - offset_curr);
- assert(isfinite(d_tt));
- atomic_add(&d_c->stop_colors[4 * i], d_color_curr);
- atomic_add(&d_c->stop_colors[4 * (i + 1)], d_color_next);
- atomic_add(&d_c->stop_offsets[i], d_offset_curr);
- atomic_add(&d_c->stop_offsets[i + 1], d_offset_next);
- // auto t = dot(pt - beg, end - beg) / max(dot(end - beg, end - beg), 1e-6f);
- // l = max(dot(end - beg, end - beg), 1e-3f)
- // t = dot(pt - beg, end - beg) / l;
- auto l = max(dot(end - beg, end - beg), 1e-3f);
- auto d_beg = d_t * (-(pt - beg)-(end - beg)) / l;
- auto d_end = d_t * (pt - beg) / l;
- auto d_l = -d_t * t / l;
- if (dot(end - beg, end - beg) > 1e-3f) {
- d_beg += 2 * d_l * (beg - end);
- d_end += 2 * d_l * (end - beg);
- }
- atomic_add(&d_c->begin[0], d_beg);
- atomic_add(&d_c->end[0], d_end);
- if (d_translation != nullptr) {
- atomic_add(d_translation, (d_beg + d_end));
- }
- return;
- }
- }
- atomic_add(&d_c->stop_colors[4 * (c->num_stops - 1)], d_color);
- return;
- } case ColorType::RadialGradient: {
- auto c = (const RadialGradient*)color_ptr;
- auto d_c = (RadialGradient*)d_color_ptr;
- // Distance from pt to center
- auto offset = pt - c->center;
- auto normalized_offset = offset / c->radius;
- auto t = length(normalized_offset);
- // Find the correponding stop:
- if (t < c->stop_offsets[0]) {
- atomic_add(&d_c->stop_colors[0], d_color);
- return;
- }
- for (int i = 0; i < c->num_stops - 1; i++) {
- auto offset_curr = c->stop_offsets[i];
- auto offset_next = c->stop_offsets[i + 1];
- assert(offset_next > offset_curr);
- if (t >= offset_curr && t < offset_next) {
- auto color_curr = Vector4f{
- c->stop_colors[4 * i + 0],
- c->stop_colors[4 * i + 1],
- c->stop_colors[4 * i + 2],
- c->stop_colors[4 * i + 3]};
- auto color_next = Vector4f{
- c->stop_colors[4 * (i + 1) + 0],
- c->stop_colors[4 * (i + 1) + 1],
- c->stop_colors[4 * (i + 1) + 2],
- c->stop_colors[4 * (i + 1) + 3]};
- auto tt = (t - offset_curr) / (offset_next - offset_curr);
- assert(isfinite(tt));
- // return color_curr * (1 - tt) + color_next * tt;
- auto d_color_curr = d_color * (1 - tt);
- auto d_color_next = d_color * tt;
- auto d_tt = sum(d_color * (color_next - color_curr));
- auto d_offset_next = -d_tt * tt / (offset_next - offset_curr);
- auto d_offset_curr = d_tt * ((tt - 1.f) / (offset_next - offset_curr));
- auto d_t = d_tt / (offset_next - offset_curr);
- assert(isfinite(d_t));
- atomic_add(&d_c->stop_colors[4 * i], d_color_curr);
- atomic_add(&d_c->stop_colors[4 * (i + 1)], d_color_next);
- atomic_add(&d_c->stop_offsets[i], d_offset_curr);
- atomic_add(&d_c->stop_offsets[i + 1], d_offset_next);
- // offset = pt - c->center
- // normalized_offset = offset / c->radius
- // t = length(normalized_offset)
- auto d_normalized_offset = d_length(normalized_offset, d_t);
- auto d_offset = d_normalized_offset / c->radius;
- auto d_radius = -d_normalized_offset * offset / (c->radius * c->radius);
- auto d_center = -d_offset;
- atomic_add(&d_c->center[0], d_center);
- atomic_add(&d_c->radius[0], d_radius);
- if (d_translation != nullptr) {
- atomic_add(d_translation, d_center);
- }
- }
- }
- atomic_add(&d_c->stop_colors[4 * (c->num_stops - 1)], d_color);
- return;
- } default: {
- assert(false);
- }
- }
-}
-
-struct Fragment {
- Vector3f color;
- float alpha;
- int group_id;
- bool is_stroke;
-};
-
-struct PrefilterFragment {
- Vector3f color;
- float alpha;
- int group_id;
- bool is_stroke;
- int shape_id;
- float distance;
- Vector2f closest_pt;
- ClosestPointPathInfo path_info;
- bool within_distance;
-};
-
-DEVICE
-Vector4f sample_color(const SceneData &scene,
- const Vector4f *background_color,
- const Vector2f &screen_pt,
- const Vector4f *d_color = nullptr,
- EdgeQuery *edge_query = nullptr,
- Vector4f *d_background_color = nullptr,
- float *d_translation = nullptr) {
- if (edge_query != nullptr) {
- edge_query->hit = false;
- }
-
- // screen_pt is in screen space ([0, 1), [0, 1)),
- // need to transform to canvas space
- auto pt = screen_pt;
- pt.x *= scene.canvas_width;
- pt.y *= scene.canvas_height;
- constexpr auto max_hit_shapes = 256;
- constexpr auto max_bvh_stack_size = 64;
- Fragment fragments[max_hit_shapes];
- int bvh_stack[max_bvh_stack_size];
- auto stack_size = 0;
- auto num_fragments = 0;
- bvh_stack[stack_size++] = 2 * scene.num_shape_groups - 2;
- while (stack_size > 0) {
- const BVHNode &node = scene.bvh_nodes[bvh_stack[--stack_size]];
- if (node.child1 < 0) {
- // leaf
- auto group_id = node.child0;
- const ShapeGroup &shape_group = scene.shape_groups[group_id];
- if (shape_group.stroke_color != nullptr) {
- if (within_distance(scene, group_id, pt, edge_query)) {
- auto color_alpha = sample_color(shape_group.stroke_color_type,
- shape_group.stroke_color,
- pt);
- Fragment f;
- f.color = Vector3f{color_alpha[0], color_alpha[1], color_alpha[2]};
- f.alpha = color_alpha[3];
- f.group_id = group_id;
- f.is_stroke = true;
- assert(num_fragments < max_hit_shapes);
- fragments[num_fragments++] = f;
- }
- }
- if (shape_group.fill_color != nullptr) {
- if (is_inside(scene, group_id, pt, edge_query)) {
- auto color_alpha = sample_color(shape_group.fill_color_type,
- shape_group.fill_color,
- pt);
- Fragment f;
- f.color = Vector3f{color_alpha[0], color_alpha[1], color_alpha[2]};
- f.alpha = color_alpha[3];
- f.group_id = group_id;
- f.is_stroke = false;
- assert(num_fragments < max_hit_shapes);
- fragments[num_fragments++] = f;
- }
- }
- } else {
- assert(node.child0 >= 0 && node.child1 >= 0);
- const AABB &b0 = scene.bvh_nodes[node.child0].box;
- if (inside(b0, pt, scene.bvh_nodes[node.child0].max_radius)) {
- bvh_stack[stack_size++] = node.child0;
- }
- const AABB &b1 = scene.bvh_nodes[node.child1].box;
- if (inside(b1, pt, scene.bvh_nodes[node.child1].max_radius)) {
- bvh_stack[stack_size++] = node.child1;
- }
- assert(stack_size <= max_bvh_stack_size);
- }
- }
- if (num_fragments <= 0) {
- if (background_color != nullptr) {
- if (d_background_color != nullptr) {
- *d_background_color = *d_color;
- }
- return *background_color;
- }
- return Vector4f{0, 0, 0, 0};
- }
- // Sort the fragments from back to front (i.e. increasing order of group id)
- // https://github.com/frigaut/yorick-imutil/blob/master/insort.c#L37
- for (int i = 1; i < num_fragments; i++) {
- auto j = i;
- auto temp = fragments[j];
- while (j > 0 && fragments[j - 1].group_id > temp.group_id) {
- fragments[j] = fragments[j - 1];
- j--;
- }
- fragments[j] = temp;
- }
- // Blend the color
- Vector3f accum_color[max_hit_shapes];
- float accum_alpha[max_hit_shapes];
- // auto hit_opaque = false;
- auto first_alpha = 0.f;
- auto first_color = Vector3f{0, 0, 0};
- if (background_color != nullptr) {
- first_alpha = background_color->w;
- first_color = Vector3f{background_color->x,
- background_color->y,
- background_color->z};
- }
- for (int i = 0; i < num_fragments; i++) {
- const Fragment &fragment = fragments[i];
- auto new_color = fragment.color;
- auto new_alpha = fragment.alpha;
- auto prev_alpha = i > 0 ? accum_alpha[i - 1] : first_alpha;
- auto prev_color = i > 0 ? accum_color[i - 1] : first_color;
- if (edge_query != nullptr) {
- // Do we hit the target shape?
- if (new_alpha >= 1.f && edge_query->hit) {
- // A fully opaque shape in front of the target occludes it
- edge_query->hit = false;
- }
- if (edge_query->shape_group_id == fragment.group_id) {
- edge_query->hit = true;
- }
- }
- // prev_color is alpha premultiplied, don't need to multiply with
- // prev_alpha
- accum_color[i] = prev_color * (1 - new_alpha) + new_alpha * new_color;
- accum_alpha[i] = prev_alpha * (1 - new_alpha) + new_alpha;
- }
- auto final_color = accum_color[num_fragments - 1];
- auto final_alpha = accum_alpha[num_fragments - 1];
- if (final_alpha > 1e-6f) {
- final_color /= final_alpha;
- }
- assert(isfinite(final_color));
- assert(isfinite(final_alpha));
- if (d_color != nullptr) {
- // Backward pass
- auto d_final_color = Vector3f{(*d_color)[0], (*d_color)[1], (*d_color)[2]};
- auto d_final_alpha = (*d_color)[3];
- auto d_curr_color = d_final_color;
- auto d_curr_alpha = d_final_alpha;
- if (final_alpha > 1e-6f) {
- // final_color = curr_color / final_alpha
- d_curr_color = d_final_color / final_alpha;
- d_curr_alpha -= sum(d_final_color * final_color) / final_alpha;
- }
- assert(isfinite(*d_color));
- assert(isfinite(d_curr_color));
- assert(isfinite(d_curr_alpha));
- for (int i = num_fragments - 1; i >= 0; i--) {
- // color[n] = prev_color * (1 - new_alpha) + new_alpha * new_color;
- // alpha[n] = prev_alpha * (1 - new_alpha) + new_alpha;
- auto prev_alpha = i > 0 ? accum_alpha[i - 1] : first_alpha;
- auto prev_color = i > 0 ? accum_color[i - 1] : first_color;
- auto d_prev_alpha = d_curr_alpha * (1.f - fragments[i].alpha);
- auto d_alpha_i = d_curr_alpha * (1.f - prev_alpha);
- d_alpha_i += sum(d_curr_color * (fragments[i].color - prev_color));
- auto d_prev_color = d_curr_color * (1 - fragments[i].alpha);
- auto d_color_i = d_curr_color * fragments[i].alpha;
- auto group_id = fragments[i].group_id;
- if (fragments[i].is_stroke) {
- d_sample_color(scene.shape_groups[group_id].stroke_color_type,
- scene.shape_groups[group_id].stroke_color,
- pt,
- Vector4f{d_color_i[0], d_color_i[1], d_color_i[2], d_alpha_i},
- scene.d_shape_groups[group_id].stroke_color,
- d_translation);
- } else {
- d_sample_color(scene.shape_groups[group_id].fill_color_type,
- scene.shape_groups[group_id].fill_color,
- pt,
- Vector4f{d_color_i[0], d_color_i[1], d_color_i[2], d_alpha_i},
- scene.d_shape_groups[group_id].fill_color,
- d_translation);
- }
- d_curr_color = d_prev_color;
- d_curr_alpha = d_prev_alpha;
- }
- if (d_background_color != nullptr) {
- d_background_color->x += d_curr_color.x;
- d_background_color->y += d_curr_color.y;
- d_background_color->z += d_curr_color.z;
- d_background_color->w += d_curr_alpha;
- }
- }
- return Vector4f{final_color[0], final_color[1], final_color[2], final_alpha};
-}
-
-DEVICE
-float sample_distance(const SceneData &scene,
- const Vector2f &screen_pt,
- float weight,
- const float *d_dist = nullptr,
- float *d_translation = nullptr) {
- // screen_pt is in screen space ([0, 1), [0, 1)),
- // need to transform to canvas space
- auto pt = screen_pt;
- pt.x *= scene.canvas_width;
- pt.y *= scene.canvas_height;
- // for each shape
- auto min_group_id = -1;
- auto min_distance = 0.f;
- auto min_shape_id = -1;
- auto closest_pt = Vector2f{0, 0};
- auto min_path_info = ClosestPointPathInfo{-1, -1, 0};
- for (int group_id = scene.num_shape_groups - 1; group_id >= 0; group_id--) {
- auto s = -1;
- auto p = Vector2f{0, 0};
- ClosestPointPathInfo local_path_info;
- auto d = infinity();
- if (compute_distance(scene, group_id, pt, infinity(), &s, &p, &local_path_info, &d)) {
- if (min_group_id == -1 || d < min_distance) {
- min_distance = d;
- min_group_id = group_id;
- min_shape_id = s;
- closest_pt = p;
- min_path_info = local_path_info;
- }
- }
- }
- if (min_group_id == -1) {
- return min_distance;
- }
- min_distance *= weight;
- auto inside = false;
- const ShapeGroup &shape_group = scene.shape_groups[min_group_id];
- if (shape_group.fill_color != nullptr) {
- inside = is_inside(scene,
- min_group_id,
- pt,
- nullptr);
- if (inside) {
- min_distance = -min_distance;
- }
- }
- assert((min_group_id >= 0 && min_shape_id >= 0) || scene.num_shape_groups == 0);
- if (d_dist != nullptr) {
- auto d_abs_dist = inside ? -(*d_dist) : (*d_dist);
- const ShapeGroup &shape_group = scene.shape_groups[min_group_id];
- const Shape &shape = scene.shapes[min_shape_id];
- ShapeGroup &d_shape_group = scene.d_shape_groups[min_group_id];
- Shape &d_shape = scene.d_shapes[min_shape_id];
- d_compute_distance(shape_group.canvas_to_shape,
- shape_group.shape_to_canvas,
- shape,
- pt,
- closest_pt,
- min_path_info,
- d_abs_dist,
- d_shape_group.shape_to_canvas,
- d_shape,
- d_translation);
- }
- return min_distance;
-}
-
-// Gather d_color from d_image inside the filter kernel, normalize by
-// weight_image.
-DEVICE
-Vector4f gather_d_color(const Filter &filter,
- const float *d_color_image,
- const float *weight_image,
- int width,
- int height,
- const Vector2f &pt) {
- auto x = int(pt.x);
- auto y = int(pt.y);
- auto radius = filter.radius;
- assert(radius > 0);
- auto ri = (int)ceil(radius);
- auto d_color = Vector4f{0, 0, 0, 0};
- for (int dy = -ri; dy <= ri; dy++) {
- for (int dx = -ri; dx <= ri; dx++) {
- auto xx = x + dx;
- auto yy = y + dy;
- if (xx >= 0 && xx < width && yy >= 0 && yy < height) {
- auto xc = xx + 0.5f;
- auto yc = yy + 0.5f;
- auto filter_weight =
- compute_filter_weight(filter, xc - pt.x, yc - pt.y);
- // pixel = \sum weight * color / \sum weight
- auto weight_sum = weight_image[yy * width + xx];
- if (weight_sum > 0) {
- d_color += (filter_weight / weight_sum) * Vector4f{
- d_color_image[4 * (yy * width + xx) + 0],
- d_color_image[4 * (yy * width + xx) + 1],
- d_color_image[4 * (yy * width + xx) + 2],
- d_color_image[4 * (yy * width + xx) + 3],
- };
- }
- }
- }
- }
- return d_color;
-}
-
-DEVICE
-float smoothstep(float d) {
- auto t = clamp((d + 1.f) / 2.f, 0.f, 1.f);
- return t * t * (3 - 2 * t);
-}
-
-DEVICE
-float d_smoothstep(float d, float d_ret) {
- if (d < -1.f || d > 1.f) {
- return 0.f;
- }
- auto t = (d + 1.f) / 2.f;
- // ret = t * t * (3 - 2 * t)
- // = 3 * t * t - 2 * t * t * t
- auto d_t = d_ret * (6 * t - 6 * t * t);
- return d_t / 2.f;
-}
-
-DEVICE
-Vector4f sample_color_prefiltered(const SceneData &scene,
- const Vector4f *background_color,
- const Vector2f &screen_pt,
- const Vector4f *d_color = nullptr,
- Vector4f *d_background_color = nullptr,
- float *d_translation = nullptr) {
- // screen_pt is in screen space ([0, 1), [0, 1)),
- // need to transform to canvas space
- auto pt = screen_pt;
- pt.x *= scene.canvas_width;
- pt.y *= scene.canvas_height;
- constexpr auto max_hit_shapes = 64;
- constexpr auto max_bvh_stack_size = 64;
- PrefilterFragment fragments[max_hit_shapes];
- int bvh_stack[max_bvh_stack_size];
- auto stack_size = 0;
- auto num_fragments = 0;
- bvh_stack[stack_size++] = 2 * scene.num_shape_groups - 2;
- while (stack_size > 0) {
- const BVHNode &node = scene.bvh_nodes[bvh_stack[--stack_size]];
- if (node.child1 < 0) {
- // leaf
- auto group_id = node.child0;
- const ShapeGroup &shape_group = scene.shape_groups[group_id];
- if (shape_group.stroke_color != nullptr) {
- auto min_shape_id = -1;
- auto closest_pt = Vector2f{0, 0};
- auto local_path_info = ClosestPointPathInfo{-1, -1, 0};
- auto d = infinity();
- compute_distance(scene, group_id, pt, infinity(),
- &min_shape_id, &closest_pt, &local_path_info, &d);
- assert(min_shape_id != -1);
- const auto &shape = scene.shapes[min_shape_id];
- auto w = smoothstep(fabs(d) + shape.stroke_width) -
- smoothstep(fabs(d) - shape.stroke_width);
- if (w > 0) {
- auto color_alpha = sample_color(shape_group.stroke_color_type,
- shape_group.stroke_color,
- pt);
- color_alpha[3] *= w;
-
- PrefilterFragment f;
- f.color = Vector3f{color_alpha[0], color_alpha[1], color_alpha[2]};
- f.alpha = color_alpha[3];
- f.group_id = group_id;
- f.shape_id = min_shape_id;
- f.distance = d;
- f.closest_pt = closest_pt;
- f.is_stroke = true;
- f.path_info = local_path_info;
- f.within_distance = true;
- assert(num_fragments < max_hit_shapes);
- fragments[num_fragments++] = f;
- }
- }
- if (shape_group.fill_color != nullptr) {
- auto min_shape_id = -1;
- auto closest_pt = Vector2f{0, 0};
- auto local_path_info = ClosestPointPathInfo{-1, -1, 0};
- auto d = infinity();
- auto found = compute_distance(scene,
- group_id,
- pt,
- 1.f,
- &min_shape_id,
- &closest_pt,
- &local_path_info,
- &d);
- auto inside = is_inside(scene, group_id, pt, nullptr);
- if (found || inside) {
- if (!inside) {
- d = -d;
- }
- auto w = smoothstep(d);
- if (w > 0) {
- auto color_alpha = sample_color(shape_group.fill_color_type,
- shape_group.fill_color,
- pt);
- color_alpha[3] *= w;
-
- PrefilterFragment f;
- f.color = Vector3f{color_alpha[0], color_alpha[1], color_alpha[2]};
- f.alpha = color_alpha[3];
- f.group_id = group_id;
- f.shape_id = min_shape_id;
- f.distance = d;
- f.closest_pt = closest_pt;
- f.is_stroke = false;
- f.path_info = local_path_info;
- f.within_distance = found;
- assert(num_fragments < max_hit_shapes);
- fragments[num_fragments++] = f;
- }
- }
- }
- } else {
- assert(node.child0 >= 0 && node.child1 >= 0);
- const AABB &b0 = scene.bvh_nodes[node.child0].box;
- if (inside(b0, pt, scene.bvh_nodes[node.child0].max_radius)) {
- bvh_stack[stack_size++] = node.child0;
- }
- const AABB &b1 = scene.bvh_nodes[node.child1].box;
- if (inside(b1, pt, scene.bvh_nodes[node.child1].max_radius)) {
- bvh_stack[stack_size++] = node.child1;
- }
- assert(stack_size <= max_bvh_stack_size);
- }
- }
- if (num_fragments <= 0) {
- if (background_color != nullptr) {
- if (d_background_color != nullptr) {
- *d_background_color = *d_color;
- }
- return *background_color;
- }
- return Vector4f{0, 0, 0, 0};
- }
- // Sort the fragments from back to front (i.e. increasing order of group id)
- // https://github.com/frigaut/yorick-imutil/blob/master/insort.c#L37
- for (int i = 1; i < num_fragments; i++) {
- auto j = i;
- auto temp = fragments[j];
- while (j > 0 && fragments[j - 1].group_id > temp.group_id) {
- fragments[j] = fragments[j - 1];
- j--;
- }
- fragments[j] = temp;
- }
- // Blend the color
- Vector3f accum_color[max_hit_shapes];
- float accum_alpha[max_hit_shapes];
- auto first_alpha = 0.f;
- auto first_color = Vector3f{0, 0, 0};
- if (background_color != nullptr) {
- first_alpha = background_color->w;
- first_color = Vector3f{background_color->x,
- background_color->y,
- background_color->z};
- }
- for (int i = 0; i < num_fragments; i++) {
- const PrefilterFragment &fragment = fragments[i];
- auto new_color = fragment.color;
- auto new_alpha = fragment.alpha;
- auto prev_alpha = i > 0 ? accum_alpha[i - 1] : first_alpha;
- auto prev_color = i > 0 ? accum_color[i - 1] : first_color;
- // prev_color is alpha premultiplied, don't need to multiply with
- // prev_alpha
- accum_color[i] = prev_color * (1 - new_alpha) + new_alpha * new_color;
- accum_alpha[i] = prev_alpha * (1 - new_alpha) + new_alpha;
- }
- auto final_color = accum_color[num_fragments - 1];
- auto final_alpha = accum_alpha[num_fragments - 1];
- if (final_alpha > 1e-6f) {
- final_color /= final_alpha;
- }
- assert(isfinite(final_color));
- assert(isfinite(final_alpha));
- if (d_color != nullptr) {
- // Backward pass
- auto d_final_color = Vector3f{(*d_color)[0], (*d_color)[1], (*d_color)[2]};
- auto d_final_alpha = (*d_color)[3];
- auto d_curr_color = d_final_color;
- auto d_curr_alpha = d_final_alpha;
- if (final_alpha > 1e-6f) {
- // final_color = curr_color / final_alpha
- d_curr_color = d_final_color / final_alpha;
- d_curr_alpha -= sum(d_final_color * final_color) / final_alpha;
- }
- assert(isfinite(*d_color));
- assert(isfinite(d_curr_color));
- assert(isfinite(d_curr_alpha));
- for (int i = num_fragments - 1; i >= 0; i--) {
- // color[n] = prev_color * (1 - new_alpha) + new_alpha * new_color;
- // alpha[n] = prev_alpha * (1 - new_alpha) + new_alpha;
- auto prev_alpha = i > 0 ? accum_alpha[i - 1] : first_alpha;
- auto prev_color = i > 0 ? accum_color[i - 1] : first_color;
- auto d_prev_alpha = d_curr_alpha * (1.f - fragments[i].alpha);
- auto d_alpha_i = d_curr_alpha * (1.f - prev_alpha);
- d_alpha_i += sum(d_curr_color * (fragments[i].color - prev_color));
- auto d_prev_color = d_curr_color * (1 - fragments[i].alpha);
- auto d_color_i = d_curr_color * fragments[i].alpha;
- auto group_id = fragments[i].group_id;
- if (fragments[i].is_stroke) {
- const auto &shape = scene.shapes[fragments[i].shape_id];
- auto d = fragments[i].distance;
- auto abs_d_plus_width = fabs(d) + shape.stroke_width;
- auto abs_d_minus_width = fabs(d) - shape.stroke_width;
- auto w = smoothstep(abs_d_plus_width) -
- smoothstep(abs_d_minus_width);
- if (w != 0) {
- auto d_w = w > 0 ? (fragments[i].alpha / w) * d_alpha_i : 0.f;
- d_alpha_i *= w;
-
- // Backprop to color
- d_sample_color(scene.shape_groups[group_id].stroke_color_type,
- scene.shape_groups[group_id].stroke_color,
- pt,
- Vector4f{d_color_i[0], d_color_i[1], d_color_i[2], d_alpha_i},
- scene.d_shape_groups[group_id].stroke_color,
- d_translation);
-
- auto d_abs_d_plus_width = d_smoothstep(abs_d_plus_width, d_w);
- auto d_abs_d_minus_width = -d_smoothstep(abs_d_minus_width, d_w);
-
- auto d_d = d_abs_d_plus_width + d_abs_d_minus_width;
- if (d < 0) {
- d_d = -d_d;
- }
- auto d_stroke_width = d_abs_d_plus_width - d_abs_d_minus_width;
-
- const auto &shape_group = scene.shape_groups[group_id];
- ShapeGroup &d_shape_group = scene.d_shape_groups[group_id];
- Shape &d_shape = scene.d_shapes[fragments[i].shape_id];
- if (fabs(d_d) > 1e-10f) {
- d_compute_distance(shape_group.canvas_to_shape,
- shape_group.shape_to_canvas,
- shape,
- pt,
- fragments[i].closest_pt,
- fragments[i].path_info,
- d_d,
- d_shape_group.shape_to_canvas,
- d_shape,
- d_translation);
- }
- atomic_add(&d_shape.stroke_width, d_stroke_width);
- }
- } else {
- const auto &shape = scene.shapes[fragments[i].shape_id];
- auto d = fragments[i].distance;
- auto w = smoothstep(d);
- if (w != 0) {
- // color_alpha[3] = color_alpha[3] * w;
- auto d_w = w > 0 ? (fragments[i].alpha / w) * d_alpha_i : 0.f;
- d_alpha_i *= w;
-
- d_sample_color(scene.shape_groups[group_id].fill_color_type,
- scene.shape_groups[group_id].fill_color,
- pt,
- Vector4f{d_color_i[0], d_color_i[1], d_color_i[2], d_alpha_i},
- scene.d_shape_groups[group_id].fill_color,
- d_translation);
-
- // w = smoothstep(d)
- auto d_d = d_smoothstep(d, d_w);
- if (d < 0) {
- d_d = -d_d;
- }
-
- const auto &shape_group = scene.shape_groups[group_id];
- ShapeGroup &d_shape_group = scene.d_shape_groups[group_id];
- Shape &d_shape = scene.d_shapes[fragments[i].shape_id];
- if (fabs(d_d) > 1e-10f && fragments[i].within_distance) {
- d_compute_distance(shape_group.canvas_to_shape,
- shape_group.shape_to_canvas,
- shape,
- pt,
- fragments[i].closest_pt,
- fragments[i].path_info,
- d_d,
- d_shape_group.shape_to_canvas,
- d_shape,
- d_translation);
- }
- }
- }
- d_curr_color = d_prev_color;
- d_curr_alpha = d_prev_alpha;
- }
- if (d_background_color != nullptr) {
- d_background_color->x += d_curr_color.x;
- d_background_color->y += d_curr_color.y;
- d_background_color->z += d_curr_color.z;
- d_background_color->w += d_curr_alpha;
- }
- }
- return Vector4f{final_color[0], final_color[1], final_color[2], final_alpha};
-}
-
-struct weight_kernel {
- DEVICE void operator()(int idx) {
- auto rng_state = init_pcg32(idx, seed);
- // height * width * num_samples_y * num_samples_x
- auto sx = idx % num_samples_x;
- auto sy = (idx / num_samples_x) % num_samples_y;
- auto x = (idx / (num_samples_x * num_samples_y)) % width;
- auto y = (idx / (num_samples_x * num_samples_y * width));
- assert(y < height);
- auto rx = next_pcg32_float(&rng_state);
- auto ry = next_pcg32_float(&rng_state);
- if (use_prefiltering) {
- rx = ry = 0.5f;
- }
- auto pt = Vector2f{x + ((float)sx + rx) / num_samples_x,
- y + ((float)sy + ry) / num_samples_y};
- auto radius = scene.filter->radius;
- assert(radius >= 0);
- auto ri = (int)ceil(radius);
- for (int dy = -ri; dy <= ri; dy++) {
- for (int dx = -ri; dx <= ri; dx++) {
- auto xx = x + dx;
- auto yy = y + dy;
- if (xx >= 0 && xx < width && yy >= 0 && yy < height) {
- auto xc = xx + 0.5f;
- auto yc = yy + 0.5f;
- auto filter_weight = compute_filter_weight(*scene.filter,
- xc - pt.x,
- yc - pt.y);
- atomic_add(weight_image[yy * width + xx], filter_weight);
- }
- }
- }
- }
-
- SceneData scene;
- float *weight_image;
- int width;
- int height;
- int num_samples_x;
- int num_samples_y;
- uint64_t seed;
- bool use_prefiltering;
-};
-
-// We use a "mega kernel" for rendering
-struct render_kernel {
- DEVICE void operator()(int idx) {
- // height * width * num_samples_y * num_samples_x
- auto pt = Vector2f{0, 0};
- auto x = 0;
- auto y = 0;
- if (eval_positions == nullptr) {
- auto rng_state = init_pcg32(idx, seed);
- auto sx = idx % num_samples_x;
- auto sy = (idx / num_samples_x) % num_samples_y;
- x = (idx / (num_samples_x * num_samples_y)) % width;
- y = (idx / (num_samples_x * num_samples_y * width));
- assert(x < width && y < height);
- auto rx = next_pcg32_float(&rng_state);
- auto ry = next_pcg32_float(&rng_state);
- if (use_prefiltering) {
- rx = ry = 0.5f;
- }
- pt = Vector2f{x + ((float)sx + rx) / num_samples_x,
- y + ((float)sy + ry) / num_samples_y};
- } else {
- pt = Vector2f{eval_positions[2 * idx],
- eval_positions[2 * idx + 1]};
- x = int(pt.x);
- y = int(pt.y);
- }
-
- // normalize pt to [0, 1]
- auto npt = pt;
- npt.x /= width;
- npt.y /= height;
- auto num_samples = num_samples_x * num_samples_y;
- if (render_image != nullptr || d_render_image != nullptr) {
- Vector4f d_color = Vector4f{0, 0, 0, 0};
- if (d_render_image != nullptr) {
- // Gather d_color from d_render_image inside the filter kernel
- // normalize using weight_image
- d_color = gather_d_color(*scene.filter,
- d_render_image,
- weight_image,
- width,
- height,
- pt);
- }
- auto color = Vector4f{0, 0, 0, 0};
- if (use_prefiltering) {
- color = sample_color_prefiltered(scene,
- background_image != nullptr ? (const Vector4f*)&background_image[4 * ((y * width) + x)] : nullptr,
- npt,
- d_render_image != nullptr ? &d_color : nullptr,
- d_background_image != nullptr ? (Vector4f*)&d_background_image[4 * ((y * width) + x)] : nullptr,
- d_translation != nullptr ? &d_translation[2 * (y * width + x)] : nullptr);
- } else {
- color = sample_color(scene,
- background_image != nullptr ? (const Vector4f*)&background_image[4 * ((y * width) + x)] : nullptr,
- npt,
- d_render_image != nullptr ? &d_color : nullptr,
- nullptr,
- d_background_image != nullptr ? (Vector4f*)&d_background_image[4 * ((y * width) + x)] : nullptr,
- d_translation != nullptr ? &d_translation[2 * (y * width + x)] : nullptr);
- }
- assert(isfinite(color));
- // Splat color onto render_image
- auto radius = scene.filter->radius;
- assert(radius >= 0);
- auto ri = (int)ceil(radius);
- for (int dy = -ri; dy <= ri; dy++) {
- for (int dx = -ri; dx <= ri; dx++) {
- auto xx = x + dx;
- auto yy = y + dy;
- if (xx >= 0 && xx < width && yy >= 0 && yy < height &&
- weight_image[yy * width + xx] > 0) {
- auto weight_sum = weight_image[yy * width + xx];
- auto xc = xx + 0.5f;
- auto yc = yy + 0.5f;
- auto filter_weight = compute_filter_weight(*scene.filter,
- xc - pt.x,
- yc - pt.y);
- auto weighted_color = filter_weight * color / weight_sum;
- if (render_image != nullptr) {
- atomic_add(render_image[4 * (yy * width + xx) + 0],
- weighted_color[0]);
- atomic_add(render_image[4 * (yy * width + xx) + 1],
- weighted_color[1]);
- atomic_add(render_image[4 * (yy * width + xx) + 2],
- weighted_color[2]);
- atomic_add(render_image[4 * (yy * width + xx) + 3],
- weighted_color[3]);
- }
- if (d_render_image != nullptr) {
- // Backprop to filter_weight
- // pixel = \sum weight * color / \sum weight
- auto d_pixel = Vector4f{
- d_render_image[4 * (yy * width + xx) + 0],
- d_render_image[4 * (yy * width + xx) + 1],
- d_render_image[4 * (yy * width + xx) + 2],
- d_render_image[4 * (yy * width + xx) + 3],
- };
- auto d_weight =
- (dot(d_pixel, color) * weight_sum -
- filter_weight * dot(d_pixel, color) * (weight_sum - filter_weight)) /
- square(weight_sum);
- d_compute_filter_weight(*scene.filter,
- xc - pt.x,
- yc - pt.y,
- d_weight,
- scene.d_filter);
- }
- }
- }
- }
- }
- if (sdf_image != nullptr || d_sdf_image != nullptr) {
- float d_dist = 0.f;
- if (d_sdf_image != nullptr) {
- if (eval_positions == nullptr) {
- d_dist = d_sdf_image[y * width + x];
- } else {
- d_dist = d_sdf_image[idx];
- }
- }
- auto weight = eval_positions == nullptr ? 1.f / num_samples : 1.f;
- auto dist = sample_distance(scene, npt, weight,
- d_sdf_image != nullptr ? &d_dist : nullptr,
- d_translation != nullptr ? &d_translation[2 * (y * width + x)] : nullptr);
- if (sdf_image != nullptr) {
- if (eval_positions == nullptr) {
- atomic_add(sdf_image[y * width + x], dist);
- } else {
- atomic_add(sdf_image[idx], dist);
- }
- }
- }
- }
-
- SceneData scene;
- float *background_image;
- float *render_image;
- float *weight_image;
- float *sdf_image;
- float *d_background_image;
- float *d_render_image;
- float *d_sdf_image;
- float *d_translation;
- int width;
- int height;
- int num_samples_x;
- int num_samples_y;
- uint64_t seed;
- bool use_prefiltering;
- float *eval_positions;
-};
-
-struct BoundarySample {
- Vector2f pt;
- Vector2f local_pt;
- Vector2f normal;
- int shape_group_id;
- int shape_id;
- float t;
- BoundaryData data;
- float pdf;
-};
-
-struct sample_boundary_kernel {
- DEVICE void operator()(int idx) {
- boundary_samples[idx].pt = Vector2f{0, 0};
- boundary_samples[idx].shape_id = -1;
- boundary_ids[idx] = idx;
- morton_codes[idx] = 0;
-
- auto rng_state = init_pcg32(idx, seed);
- auto u = next_pcg32_float(&rng_state);
- // Sample a shape
- auto sample_id = sample(scene.sample_shapes_cdf,
- scene.num_total_shapes,
- u);
- assert(sample_id >= 0 && sample_id < scene.num_total_shapes);
- auto shape_id = scene.sample_shape_id[sample_id];
- assert(shape_id >= 0 && shape_id < scene.num_shapes);
- auto shape_group_id = scene.sample_group_id[sample_id];
- assert(shape_group_id >= 0 && shape_group_id < scene.num_shape_groups);
- auto shape_pmf = scene.sample_shapes_pmf[shape_id];
- if (shape_pmf <= 0) {
- return;
- }
- // Sample a point on the boundary of the shape
- auto boundary_pdf = 0.f;
- auto normal = Vector2f{0, 0};
- auto t = next_pcg32_float(&rng_state);
- BoundaryData boundary_data;
- const ShapeGroup &shape_group = scene.shape_groups[shape_group_id];
- auto local_boundary_pt = sample_boundary(
- scene, shape_group_id, shape_id,
- t, normal, boundary_pdf, boundary_data);
- if (boundary_pdf <= 0) {
- return;
- }
-
- // local_boundary_pt & normal are in shape's local space,
- // transform them to canvas space
- auto boundary_pt = xform_pt(shape_group.shape_to_canvas, local_boundary_pt);
- normal = xform_normal(shape_group.canvas_to_shape, normal);
- // Normalize boundary_pt to [0, 1)
- boundary_pt.x /= scene.canvas_width;
- boundary_pt.y /= scene.canvas_height;
-
- boundary_samples[idx].pt = boundary_pt;
- boundary_samples[idx].local_pt = local_boundary_pt;
- boundary_samples[idx].normal = normal;
- boundary_samples[idx].shape_group_id = shape_group_id;
- boundary_samples[idx].shape_id = shape_id;
- boundary_samples[idx].t = t;
- boundary_samples[idx].data = boundary_data;
- boundary_samples[idx].pdf = shape_pmf * boundary_pdf;
- TVector2 p_i{boundary_pt.x * 1023, boundary_pt.y * 1023};
- morton_codes[idx] = (expand_bits(p_i.x) << 1u) |
- (expand_bits(p_i.y) << 0u);
- }
-
- SceneData scene;
- uint64_t seed;
- BoundarySample *boundary_samples;
- int *boundary_ids;
- uint32_t *morton_codes;
-};
-
-struct render_edge_kernel {
- DEVICE void operator()(int idx) {
- auto bid = boundary_ids[idx];
- if (boundary_samples[bid].shape_id == -1) {
- return;
- }
- auto boundary_pt = boundary_samples[bid].pt;
- auto local_boundary_pt = boundary_samples[bid].local_pt;
- auto normal = boundary_samples[bid].normal;
- auto shape_group_id = boundary_samples[bid].shape_group_id;
- auto shape_id = boundary_samples[bid].shape_id;
- auto t = boundary_samples[bid].t;
- auto boundary_data = boundary_samples[bid].data;
- auto pdf = boundary_samples[bid].pdf;
-
- const ShapeGroup &shape_group = scene.shape_groups[shape_group_id];
-
- auto bx = int(boundary_pt.x * width);
- auto by = int(boundary_pt.y * height);
- if (bx < 0 || bx >= width || by < 0 || by >= height) {
- return;
- }
-
- // Sample the two sides of the boundary
- auto inside_query = EdgeQuery{shape_group_id, shape_id, false};
- auto outside_query = EdgeQuery{shape_group_id, shape_id, false};
- auto color_inside = sample_color(scene,
- background_image != nullptr ? (const Vector4f *)&background_image[4 * ((by * width) + bx)] : nullptr,
- boundary_pt - 1e-4f * normal,
- nullptr, &inside_query);
- auto color_outside = sample_color(scene,
- background_image != nullptr ? (const Vector4f *)&background_image[4 * ((by * width) + bx)] : nullptr,
- boundary_pt + 1e-4f * normal,
- nullptr, &outside_query);
- if (!inside_query.hit && !outside_query.hit) {
- // occluded
- return;
- }
- if (!inside_query.hit) {
- normal = -normal;
- swap_(inside_query, outside_query);
- swap_(color_inside, color_outside);
- }
- // Boundary point in screen space
- auto sboundary_pt = boundary_pt;
- sboundary_pt.x *= width;
- sboundary_pt.y *= height;
- auto d_color = gather_d_color(*scene.filter,
- d_render_image,
- weight_image,
- width,
- height,
- sboundary_pt);
- // Normalization factor
- d_color /= float(scene.canvas_width * scene.canvas_height);
-
- assert(isfinite(d_color));
- assert(isfinite(pdf) && pdf > 0);
- auto contrib = dot(color_inside - color_outside, d_color) / pdf;
- ShapeGroup &d_shape_group = scene.d_shape_groups[shape_group_id];
- accumulate_boundary_gradient(scene.shapes[shape_id],
- contrib, t, normal, boundary_data, scene.d_shapes[shape_id],
- shape_group.shape_to_canvas, local_boundary_pt, d_shape_group.shape_to_canvas);
- // Don't need to backprop to filter weights:
- // \int f'(x) g(x) dx doesn't contain discontinuities
- // if f is continuous, even if g is discontinuous
- if (d_translation != nullptr) {
- // According to Reynold transport theorem,
- // the Jacobian of the boundary integral is dot(velocity, normal)
- // The velocity of the object translating x is (1, 0)
- // The velocity of the object translating y is (0, 1)
- atomic_add(&d_translation[2 * (by * width + bx) + 0], normal.x * contrib);
- atomic_add(&d_translation[2 * (by * width + bx) + 1], normal.y * contrib);
- }
- }
-
- SceneData scene;
- const float *background_image;
- const BoundarySample *boundary_samples;
- const int *boundary_ids;
- float *weight_image;
- float *d_render_image;
- float *d_translation;
- int width;
- int height;
- int num_samples_x;
- int num_samples_y;
-};
-
-void render(std::shared_ptr scene,
- ptr background_image,
- ptr render_image,
- ptr render_sdf,
- int width,
- int height,
- int num_samples_x,
- int num_samples_y,
- uint64_t seed,
- ptr d_background_image,
- ptr d_render_image,
- ptr d_render_sdf,
- ptr d_translation,
- bool use_prefiltering,
- ptr eval_positions,
- int num_eval_positions) {
-#ifdef __NVCC__
- int old_device_id = -1;
- if (scene->use_gpu) {
- checkCuda(cudaGetDevice(&old_device_id));
- if (scene->gpu_index != -1) {
- checkCuda(cudaSetDevice(scene->gpu_index));
- }
- }
-#endif
- parallel_init();
-
- float *weight_image = nullptr;
- // Allocate and zero the weight image
- if (scene->use_gpu) {
-#ifdef __CUDACC__
- if (eval_positions.get() == nullptr) {
- checkCuda(cudaMallocManaged(&weight_image, width * height * sizeof(float)));
- cudaMemset(weight_image, 0, width * height * sizeof(float));
- }
-#else
- assert(false);
-#endif
- } else {
- if (eval_positions.get() == nullptr) {
- weight_image = (float*)malloc(width * height * sizeof(float));
- memset(weight_image, 0, width * height * sizeof(float));
- }
- }
-
- if (render_image.get() != nullptr || d_render_image.get() != nullptr ||
- render_sdf.get() != nullptr || d_render_sdf.get() != nullptr) {
- if (weight_image != nullptr) {
- parallel_for(weight_kernel{
- get_scene_data(*scene.get()),
- weight_image,
- width,
- height,
- num_samples_x,
- num_samples_y,
- seed
- }, width * height * num_samples_x * num_samples_y, scene->use_gpu);
- }
-
- auto num_samples = eval_positions.get() == nullptr ?
- width * height * num_samples_x * num_samples_y : num_eval_positions;
- parallel_for(render_kernel{
- get_scene_data(*scene.get()),
- background_image.get(),
- render_image.get(),
- weight_image,
- render_sdf.get(),
- d_background_image.get(),
- d_render_image.get(),
- d_render_sdf.get(),
- d_translation.get(),
- width,
- height,
- num_samples_x,
- num_samples_y,
- seed,
- use_prefiltering,
- eval_positions.get()
- }, num_samples, scene->use_gpu);
- }
-
- // Boundary sampling
- if (!use_prefiltering && d_render_image.get() != nullptr) {
- auto num_samples = width * height * num_samples_x * num_samples_y;
- BoundarySample *boundary_samples = nullptr;
- int *boundary_ids = nullptr; // for sorting
- uint32_t *morton_codes = nullptr; // for sorting
- // Allocate boundary samples
- if (scene->use_gpu) {
-#ifdef __CUDACC__
- checkCuda(cudaMallocManaged(&boundary_samples,
- num_samples * sizeof(BoundarySample)));
- checkCuda(cudaMallocManaged(&boundary_ids,
- num_samples * sizeof(int)));
- checkCuda(cudaMallocManaged(&morton_codes,
- num_samples * sizeof(uint32_t)));
-#else
- assert(false);
- #endif
- } else {
- boundary_samples = (BoundarySample*)malloc(
- num_samples * sizeof(BoundarySample));
- boundary_ids = (int*)malloc(
- num_samples * sizeof(int));
- morton_codes = (uint32_t*)malloc(
- num_samples * sizeof(uint32_t));
- }
-
- // Edge sampling
- // We sort the boundary samples for better thread coherency
- parallel_for(sample_boundary_kernel{
- get_scene_data(*scene.get()),
- seed,
- boundary_samples,
- boundary_ids,
- morton_codes
- }, num_samples, scene->use_gpu);
- if (scene->use_gpu) {
- thrust::sort_by_key(thrust::device, morton_codes, morton_codes + num_samples, boundary_ids);
- } else {
- // Don't need to sort for CPU, we are not using SIMD hardware anyway.
- // thrust::sort_by_key(thrust::host, morton_codes, morton_codes + num_samples, boundary_ids);
- }
- parallel_for(render_edge_kernel{
- get_scene_data(*scene.get()),
- background_image.get(),
- boundary_samples,
- boundary_ids,
- weight_image,
- d_render_image.get(),
- d_translation.get(),
- width,
- height,
- num_samples_x,
- num_samples_y
- }, num_samples, scene->use_gpu);
- if (scene->use_gpu) {
-#ifdef __CUDACC__
- checkCuda(cudaFree(boundary_samples));
- checkCuda(cudaFree(boundary_ids));
- checkCuda(cudaFree(morton_codes));
-#else
- assert(false);
-#endif
- } else {
- free(boundary_samples);
- free(boundary_ids);
- free(morton_codes);
- }
- }
-
- // Clean up weight image
- if (scene->use_gpu) {
-#ifdef __CUDACC__
- checkCuda(cudaFree(weight_image));
-#else
- assert(false);
-#endif
- } else {
- free(weight_image);
- }
-
- if (scene->use_gpu) {
- cuda_synchronize();
- }
-
- parallel_cleanup();
-#ifdef __NVCC__
- if (old_device_id != -1) {
- checkCuda(cudaSetDevice(old_device_id));
- }
-#endif
-}
-
-PYBIND11_MODULE(diffvg, m) {
- m.doc() = "Differential Vector Graphics";
-
- py::class_>(m, "void_ptr")
- .def(py::init())
- .def("as_size_t", &ptr::as_size_t);
- py::class_>(m, "float_ptr")
- .def(py::init());
- py::class_>(m, "int_ptr")
- .def(py::init());
-
- py::class_(m, "Vector2f")
- .def(py::init())
- .def_readwrite("x", &Vector2f::x)
- .def_readwrite("y", &Vector2f::y);
-
- py::class_(m, "Vector3f")
- .def(py::init())
- .def_readwrite("x", &Vector3f::x)
- .def_readwrite("y", &Vector3f::y)
- .def_readwrite("z", &Vector3f::z);
-
- py::class_(m, "Vector4f")
- .def(py::init())
- .def_readwrite("x", &Vector4f::x)
- .def_readwrite("y", &Vector4f::y)
- .def_readwrite("z", &Vector4f::z)
- .def_readwrite("w", &Vector4f::w);
-
- py::enum_(m, "ShapeType")
- .value("circle", ShapeType::Circle)
- .value("ellipse", ShapeType::Ellipse)
- .value("path", ShapeType::Path)
- .value("rect", ShapeType::Rect);
-
- py::class_(m, "Circle")
- .def(py::init())
- .def("get_ptr", &Circle::get_ptr)
- .def_readonly("radius", &Circle::radius)
- .def_readonly("center", &Circle::center);
-
- py::class_(m, "Ellipse")
- .def(py::init())
- .def("get_ptr", &Ellipse::get_ptr)
- .def_readonly("radius", &Ellipse::radius)
- .def_readonly("center", &Ellipse::center);
-
- py::class_(m, "Path")
- .def(py::init, ptr, ptr, int, int, bool, bool>())
- .def("get_ptr", &Path::get_ptr)
- .def("has_thickness", &Path::has_thickness)
- .def("copy_to", &Path::copy_to)
- .def_readonly("num_points", &Path::num_points);
-
- py::class_(m, "Rect")
- .def(py::init())
- .def("get_ptr", &Rect::get_ptr)
- .def_readonly("p_min", &Rect::p_min)
- .def_readonly("p_max", &Rect::p_max);
-
- py::enum_(m, "ColorType")
- .value("constant", ColorType::Constant)
- .value("linear_gradient", ColorType::LinearGradient)
- .value("radial_gradient", ColorType::RadialGradient);
-
- py::class_(m, "Constant")
- .def(py::init())
- .def("get_ptr", &Constant::get_ptr)
- .def_readonly("color", &Constant::color);
-
- py::class_(m, "LinearGradient")
- .def(py::init, ptr>())
- .def("get_ptr", &LinearGradient::get_ptr)
- .def("copy_to", &LinearGradient::copy_to)
- .def_readonly("begin", &LinearGradient::begin)
- .def_readonly("end", &LinearGradient::end)
- .def_readonly("num_stops", &LinearGradient::num_stops);
-
- py::class_(m, "RadialGradient")
- .def(py::init, ptr>())
- .def("get_ptr", &RadialGradient::get_ptr)
- .def("copy_to", &RadialGradient::copy_to)
- .def_readonly("center", &RadialGradient::center)
- .def_readonly("radius", &RadialGradient::radius)
- .def_readonly("num_stops", &RadialGradient::num_stops);
-
- py::class_(m, "Shape")
- .def(py::init, float>())
- .def("as_circle", &Shape::as_circle)
- .def("as_ellipse", &Shape::as_ellipse)
- .def("as_path", &Shape::as_path)
- .def("as_rect", &Shape::as_rect)
- .def_readonly("type", &Shape::type)
- .def_readonly("stroke_width", &Shape::stroke_width);
-
- py::class_(m, "ShapeGroup")
- .def(py::init,
- int,
- ColorType,
- ptr,
- ColorType,
- ptr,
- bool,
- ptr>())
- .def("fill_color_as_constant", &ShapeGroup::fill_color_as_constant)
- .def("fill_color_as_linear_gradient", &ShapeGroup::fill_color_as_linear_gradient)
- .def("fill_color_as_radial_gradient", &ShapeGroup::fill_color_as_radial_gradient)
- .def("stroke_color_as_constant", &ShapeGroup::stroke_color_as_constant)
- .def("stroke_color_as_linear_gradient", &ShapeGroup::stroke_color_as_linear_gradient)
- .def("stroke_color_as_radial_gradient", &ShapeGroup::fill_color_as_radial_gradient)
- .def("has_fill_color", &ShapeGroup::has_fill_color)
- .def("has_stroke_color", &ShapeGroup::has_stroke_color)
- .def("copy_to", &ShapeGroup::copy_to)
- .def_readonly("fill_color_type", &ShapeGroup::fill_color_type)
- .def_readonly("stroke_color_type", &ShapeGroup::stroke_color_type);
-
- py::enum_(m, "FilterType")
- .value("box", FilterType::Box)
- .value("tent", FilterType::Tent)
- .value("parabolic", FilterType::RadialParabolic)
- .value("hann", FilterType::Hann);
-
- py::class_(m, "Filter")
- .def(py::init());
-
- py::class_>(m, "Scene")
- .def(py::init &,
- const std::vector &,
- const Filter &,
- bool,
- int>())
- .def("get_d_shape", &Scene::get_d_shape)
- .def("get_d_shape_group", &Scene::get_d_shape_group)
- .def("get_d_filter_radius", &Scene::get_d_filter_radius)
- .def_readonly("num_shapes", &Scene::num_shapes)
- .def_readonly("num_shape_groups", &Scene::num_shape_groups);
-
- m.def("render", &render, "");
-}
diff --git a/spaces/ma-xu/LIVE/thrust/thrust/detail/allocator/allocator_traits.h b/spaces/ma-xu/LIVE/thrust/thrust/detail/allocator/allocator_traits.h
deleted file mode 100644
index c2557b57efa55a1538f58bf5abc790cff5a360a3..0000000000000000000000000000000000000000
--- a/spaces/ma-xu/LIVE/thrust/thrust/detail/allocator/allocator_traits.h
+++ /dev/null
@@ -1,422 +0,0 @@
-/*
- * Copyright 2008-2018 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// allocator_traits::rebind_alloc and allocator::rebind_traits are from libc++,
-// dual licensed under the MIT and the University of Illinois Open Source
-// Licenses.
-
-#pragma once
-
-#include
-#include
-#include
-#include
-#include
-
-namespace thrust
-{
-namespace detail
-{
-
-
-// forward declaration for has_member_system
-template struct allocator_system;
-
-
-namespace allocator_traits_detail
-{
-
-__THRUST_DEFINE_HAS_NESTED_TYPE(has_value_type, value_type)
-__THRUST_DEFINE_HAS_NESTED_TYPE(has_pointer, pointer)
-__THRUST_DEFINE_HAS_NESTED_TYPE(has_const_pointer, const_pointer)
-__THRUST_DEFINE_HAS_NESTED_TYPE(has_reference, reference)
-__THRUST_DEFINE_HAS_NESTED_TYPE(has_const_reference, const_reference)
-__THRUST_DEFINE_HAS_NESTED_TYPE(has_void_pointer, void_pointer)
-__THRUST_DEFINE_HAS_NESTED_TYPE(has_const_void_pointer, const_void_pointer)
-__THRUST_DEFINE_HAS_NESTED_TYPE(has_difference_type, difference_type)
-__THRUST_DEFINE_HAS_NESTED_TYPE(has_size_type, size_type)
-__THRUST_DEFINE_HAS_NESTED_TYPE(has_propagate_on_container_copy_assignment, propagate_on_container_copy_assignment)
-__THRUST_DEFINE_HAS_NESTED_TYPE(has_propagate_on_container_move_assignment, propagate_on_container_move_assignment)
-__THRUST_DEFINE_HAS_NESTED_TYPE(has_propagate_on_container_swap, propagate_on_container_swap)
-__THRUST_DEFINE_HAS_NESTED_TYPE(has_system_type, system_type)
-__THRUST_DEFINE_HAS_NESTED_TYPE(has_is_always_equal, is_always_equal)
-__THRUST_DEFINE_HAS_MEMBER_FUNCTION(has_member_system_impl, system)
-
-template
- struct has_rebind
-{
- typedef char yes_type;
- typedef int no_type;
-
- template
- static yes_type test(typename S::template rebind::other*);
- template
- static no_type test(...);
-
- static bool const value = sizeof(test(0)) == sizeof(yes_type);
-
- typedef thrust::detail::integral_constant type;
-};
-
-template
- struct nested_pointer
-{
- typedef typename T::pointer type;
-};
-
-template
- struct nested_const_pointer
-{
- typedef typename T::const_pointer type;
-};
-
-template
- struct nested_reference
-{
- typedef typename T::reference type;
-};
-
-template
- struct nested_const_reference
-{
- typedef typename T::const_reference type;
-};
-
-template
- struct nested_void_pointer
-{
- typedef typename T::void_pointer type;
-};
-
-template
- struct nested_const_void_pointer
-{
- typedef typename T::const_void_pointer type;
-};
-
-template
- struct nested_difference_type
-{
- typedef typename T::difference_type type;
-};
-
-template
- struct nested_size_type
-{
- typedef typename T::size_type type;
-};
-
-template
- struct nested_propagate_on_container_copy_assignment
-{
- typedef typename T::propagate_on_container_copy_assignment type;
-};
-
-template
- struct nested_propagate_on_container_move_assignment
-{
- typedef typename T::propagate_on_container_move_assignment type;
-};
-
-template
- struct nested_propagate_on_container_swap
-{
- typedef typename T::propagate_on_container_swap type;
-};
-
-template
- struct nested_is_always_equal
-{
- typedef typename T::is_always_equal type;
-};
-
-template
- struct nested_system_type
-{
- typedef typename T::system_type type;
-};
-
-template
- struct has_member_system
-{
- typedef typename allocator_system::type system_type;
-
- typedef typename has_member_system_impl::type type;
- static const bool value = type::value;
-};
-
-template::value>
- struct rebind_alloc
-{
- typedef typename Alloc::template rebind::other type;
-};
-
-#if THRUST_CPP_DIALECT >= 2011
-template class Alloc,
- typename T, typename... Args, typename U>
- struct rebind_alloc, U, true>
-{
- typedef typename Alloc::template rebind::other type;
-};
-
-template class Alloc,
- typename T, typename... Args, typename U>
- struct rebind_alloc, U, false>
-{
- typedef Alloc type;
-};
-#else // C++03
-template class Alloc, typename T, typename U>
- struct rebind_alloc, U, true>
-{
- typedef typename Alloc