diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/Avatar Friday Patcher V1.1 __HOT__.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/Avatar Friday Patcher V1.1 __HOT__.md
deleted file mode 100644
index deab4b50df68137193797b766f2db1964d235cb3..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/Avatar Friday Patcher V1.1 __HOT__.md
+++ /dev/null
@@ -1,84 +0,0 @@
-## Avatar Friday Patcher V1.1
-
-
-
-
-
- 
-
-
-
-
-
-**Download ✅ [https://jinyurl.com/2tA014](https://jinyurl.com/2tA014)**
-
-
-
-
-
-
-
-
-
-
-
-
-
-# Avatar Friday Patcher v1.1: How to Fix Common Issues and Enjoy the Game
-
-
-
-If you are a fan of the Avatar franchise, you might have been eagerly waiting for the release of Avatar Friday, the new open-world RPG game based on the popular movie and TV series. However, some players have reported experiencing various issues with the game, such as crashes, glitches, low FPS, and missing features. Fortunately, there is a solution: Avatar Friday Patcher v1.1.
-
-
-
-Avatar Friday Patcher v1.1 is a fan-made mod that aims to improve the performance and stability of the game, as well as add some missing features and enhancements. The patcher is easy to use and compatible with most versions of the game. Here are some of the benefits of using Avatar Friday Patcher v1.1:
-
-
-
-- Fixes crashes and freezes that occur randomly or at certain points in the game.
-
-- Optimizes the graphics settings and reduces the CPU and GPU load, resulting in higher FPS and smoother gameplay.
-
-- Enables full-screen mode and custom resolutions, allowing you to play the game in your preferred display settings.
-
-- Adds missing features such as subtitles, controller support, achievements, and cloud saves.
-
-- Enhances the game's visuals and audio quality, making the world of Pandora more immersive and realistic.
-
-- Fixes bugs and glitches that affect the gameplay, such as broken quests, missing items, clipping issues, and more.
-
-
-
-To use Avatar Friday Patcher v1.1, you need to download it from the official website or a trusted source. Then, you need to extract the files to your game folder and run the patcher.exe file. The patcher will automatically detect your game version and apply the necessary changes. You can also customize some of the options according to your preferences. Once the patching process is done, you can launch the game and enjoy it without any problems.
-
-
-
-Avatar Friday Patcher v1.1 is a must-have mod for anyone who wants to play Avatar Friday without any hassle. It will make your gaming experience more enjoyable and satisfying. Download it today and see for yourself!
-
-
-
-Avatar Friday Patcher v1.1 is not only a mod that fixes and improves the game, but also a mod that adds new content and features. Here are some of the additional things you can do with Avatar Friday Patcher v1.1:
-
-
-
-- Explore new areas and locations that were not included in the original game, such as the Floating Mountains, the Tree of Souls, and the Hallelujah Mountains.
-
-- Interact with new characters and factions that have their own stories and quests, such as the Na'vi clans, the RDA soldiers, and the wildlife researchers.
-
-- Customize your avatar's appearance and skills, choosing from different races, genders, hairstyles, outfits, weapons, and abilities.
-
-- Collect and craft new items and resources, such as plants, minerals, artifacts, and equipment.
-
-- Ride and tame various creatures that inhabit Pandora, such as banshees, direhorses, thanators, and more.
-
-
-
-Avatar Friday Patcher v1.1 is a mod that transforms Avatar Friday into a more complete and satisfying game. It is compatible with most of the other mods available for the game, so you can mix and match them to create your own unique experience. If you are looking for a way to enhance your Avatar Friday adventure, you should definitely give Avatar Friday Patcher v1.1 a try!
-
- 145887f19f
-
-
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Crystal-Cs4280-Cm-Ep-Sound-Card-Driver-FOR-WINDOWS-7181.md b/spaces/1gistliPinn/ChatGPT4/Crystal-Cs4280-Cm-Ep-Sound-Card-Driver-FOR-WINDOWS-7181.md
deleted file mode 100644
index ce0d6c2924d56f4cd750adde6aba2a263d3eed81..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Crystal-Cs4280-Cm-Ep-Sound-Card-Driver-FOR-WINDOWS-7181.md
+++ /dev/null
@@ -1,65 +0,0 @@
-Crystal Cs4280 Cm Ep Sound Card Driver FOR WINDOWS 7.181
-
-
-
-DOWNLOAD === [https://gohhs.com/2tvp6s](https://gohhs.com/2tvp6s)
-
-
-
-
-
-
-
-
-
-Here is a possible title and article for your keyword:
-
-How to Install Crystal Cs4280 Cm Ep Sound Card Driver for Windows 7.181
-
-If you have a Crystal Cs4280 Cm Ep sound card and you want to use it with Windows 7.181, you may need to install a driver to make it work properly. A driver is a software that allows your computer to communicate with your hardware devices. Without a driver, your sound card may not function correctly or at all.
-
-In this article, we will show you how to download and install the Crystal Cs4280 Cm Ep sound card driver for Windows 7.181 in a few easy steps. We will also provide some tips on how to troubleshoot common issues that may arise during or after the installation process.
-
-Step 1: Download the Crystal Cs4280 Cm Ep Sound Card Driver
-
-The first step is to download the Crystal Cs4280 Cm Ep sound card driver from a reliable source. You can use one of the following links to download the driver file:
-
-
-Crystal Digital cs4280-cm Drivers Download - Solvusoft [^1^]
-Crystal CS4280/CS4614/CS4624 Sound Driver | Crystal Semiconductors [^2^]
-Crystal Audio Drivers Cs4280-Cm | Audio-Digital.net [^3^]
-Crystal Cs4280 Cm Driver Download Win7 [^4^]
-
-
-Make sure you choose the correct version of the driver that matches your operating system and your sound card model. The file name should be something like d1265070.rar or Crystal_CS4281.zip.
-
-Save the file to a location where you can easily find it later, such as your desktop or downloads folder.
-
-Step 2: Extract the Crystal Cs4280 Cm Ep Sound Card Driver File
-
-The next step is to extract the contents of the driver file that you downloaded. The file is compressed in a .rar or .zip format, so you will need a software that can open and extract these types of files. You can use one of the following programs:
-
-
-WinRAR
-7-Zip
-PeaZip
-
-
-Right-click on the driver file and select "Extract Here" or "Extract to" from the menu. Choose a destination folder where you want to extract the files, such as your desktop or downloads folder.
-
-You should see a folder with the name of the driver file, such as d1265070 or Crystal_CS4281. Open this folder and look for a file named setup.exe or install.exe. This is the executable file that will install the driver on your computer.
-
-Step 3: Install the Crystal Cs4280 Cm Ep Sound Card Driver
-
-The final step is to run the setup.exe or install.exe file that you extracted in the previous step. Double-click on this file and follow the instructions on the screen to complete the installation process.
-
-You may need to agree to some terms and conditions, choose a language and a destination folder, and restart your computer after the installation is finished.
-
-Once the installation is done, you should be able to use your Crystal Cs4280 Cm Ep sound card with Windows 7.181 without any problems.
-
-Troubleshooting Tips
-
-If you encounter any issues during or after installing dfd1c89656
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Fifa 11 World Cup Patch Update V1.rar How to Get the Most Out of Your Fifa 11 Game.md b/spaces/1gistliPinn/ChatGPT4/Examples/Fifa 11 World Cup Patch Update V1.rar How to Get the Most Out of Your Fifa 11 Game.md
deleted file mode 100644
index d8b9c50fa78e23a587d5630b72c13045cbda4b0c..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Fifa 11 World Cup Patch Update V1.rar How to Get the Most Out of Your Fifa 11 Game.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
Then, from 21st November to 18th December, a new "live" World Cup mode will be updated during the group and knockout stages, letting you play a single-player tournament along with real-world fixtures and squads for each game. You can play any past game to rewrite history and better real-world England's inevitable disappointing result.
-
EA Game has released FIFA 23 patch 1.04 details for PC, PS4, and Xbox One. According to the official Fifa 23 patch notes, the latest update added the FIFA World Cup 2022 to the game. Apart from this, Fifa 23 update 1.04 also includes stability fixes.
This patch is based on FIFA 17, and will 'update' FIFA 11 to the 2016-17 season. The squads (player stats, team tactics, ...) are exactly same as the FIFA 17 ea' squad updates graphics (kits, shoes, ...) are mostly from fifa 17, combined with files from FIFA online 3 & FIFA 16 mods (fifa online 3 have updated 2014-15 --FIFA 15-- graphics)
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Blue 3 ft. Radio Weasel - Where You Are - Free MP3 and Lyrics Download.md b/spaces/1phancelerku/anime-remove-background/Blue 3 ft. Radio Weasel - Where You Are - Free MP3 and Lyrics Download.md
deleted file mode 100644
index be17fc817ce44d5280180d5ca1ad7e6ac3cd4d77..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Blue 3 ft. Radio Weasel - Where You Are - Free MP3 and Lyrics Download.md
+++ /dev/null
@@ -1,105 +0,0 @@
-
-
How to Download Blue 3's "Where You Are" as an MP3 File
-
If you love Ugandan music, you probably know Blue 3, the girl group that rose to fame in 2005 after winning a talent show. And you probably know their hit song "Where You Are", which features Radio and Weasel, another popular duo in the Ugandan music scene.
-
"Where You Are" is a catchy and romantic song that blends Afrobeat, R&B, and dancehall genres. It has over 20,000 views on YouTube and has been praised by critics and fans alike.
But what if you want to listen to this song offline, without any interruptions or ads? What if you want to save some storage space on your device and still enjoy the high-quality sound of this song?
-
The answer is simple: download "Where You Are" as an MP3 file.
-
In this article, we'll show you how to do that in a few easy steps. We'll also give you some alternative sources where you can download this song as an MP3 file.
-
So let's get started!
-
What is Blue 3 and "Where You Are"?
-
Blue 3 was a Ugandan girl group that consisted of Jackie Chandiru, Lillian Mbabazi, and Cindy Sanyu. They formed in 2005 after winning a talent show called Coca-Cola Popstars.
-
The group released their debut album "Hitaji" in 2006, which featured songs like "Burrn", "Ndayila", and "Hitaji". They also collaborated with other Ugandan artists like Bebe Cool, Jose Chameleone, and Bobi Wine.
-
"Where You Are" was one of their most successful songs, released in 2008. It featured Radio and Weasel, who were part of the Goodlyfe Crew at the time. The song was a love ballad that expressed the desire to be with someone no matter where they are.
-
The song was well-received by both fans and critics, who praised its catchy melody, smooth vocals, and sweet lyrics. It also won several awards, including Song of the Year at the Pearl of Africa Music Awards in 2008.
-
How to download blue 3 where you are mp3 for free
-Download blue 3 where you are mp3 audio
-Blue 3 where you are mp3 download Uganda
-Where you are by blue 3 ft radio and weasel mp3 download
-Download blue 3 where you are mp3 song
-Blue 3 where you are mp3 lyrics download
-Download blue 3 where you are mp3 video
-Blue 3 where you are mp3 online download
-Download blue 3 where you are mp3 remix
-Blue 3 where you are mp3 instrumental download
-Download blue 3 where you are mp3 album
-Blue 3 where you are mp3 ringtone download
-Download blue 3 where you are mp3 music
-Blue 3 where you are mp3 karaoke download
-Download blue 3 where you are mp3 version
-Blue 3 where you are mp3 live performance download
-Download blue 3 where you are mp3 original
-Blue 3 where you are mp3 cover download
-Download blue 3 where you are mp3 official
-Blue 3 where you are mp3 dance download
-Download blue 3 where you are mp3 quality
-Blue 3 where you are mp3 review download
-Download blue 3 where you are mp3 format
-Blue 3 where you are mp3 genre download
-Download blue 3 where you are mp3 release date
-Blue 3 where you are mp3 history download
-Download blue 3 where you are mp3 meaning
-Blue 3 where you are mp3 reaction download
-Download blue 3 where you are mp3 playlist
-Blue 3 where you are mp3 streaming download
-Download blue 3 where you are mp4 to mp3 converter
-Blue 4k video downloader for blue 4k video downloader for blue 4k video downloader for blue 4k video downloader for blue 4k video downloader for blue 4k video downloader for blue
-
Why Download "Where You Are" as an MP3 File?
-
Downloading "Where You Are" as an MP3 file has many advantages over streaming it online or playing it from a CD. Here are some of them:
-
-
You can listen to it offline, without any internet connection or data charges.
-
You can avoid any ads or interruptions that may ruin your listening experience.
-
You can save some storage space on your device, as MP3 files are smaller than video or audio files.
-
You can transfer it to any device that supports MP3 playback, such as your smartphone, tablet, laptop, or MP3 player.
-
You can enjoy the high-quality sound of the song, as MP3 files preserve the original audio quality.
-
-
As you can see, downloading "Where You Are" as an MP3 file is a smart and convenient way to enjoy this song anytime, anywhere.
-
How to Download "Where You Are" as an MP3 File from YouTube
-
One of the easiest ways to download "Where You Are" as an MP3 file is to use YouTube, where you can find the official video of the song. Here are the steps you need to follow:
-
-
Go to YouTube and search for "Blue 3 Where You Are".
-
Select the video that has the title "Blue 3 ft Radio & Weasel - Where You Are (Official Video)" and has over 20,000 views. This is the official video of the song.
-
Copy the URL of the video from the address bar of your browser.
-
Go to a website that can convert YouTube videos into MP3 files, such as ytmp3.cc, y2mate.com, or onlinevideoconverter.com.
-
Paste the URL of the video into the input box of the website and click on "Convert" or "Download".
-
Wait for a few seconds until the conversion is done and then click on "Download" or "Save" to save the MP3 file to your device.
-
-
Congratulations! You have successfully downloaded "Where You Are" as an MP3 file from YouTube. You can now play it on your device or transfer it to another device.
-
How to Download "Where You Are" as an MP3 File from Other Sources
-
If you don't want to use YouTube or you want to explore other sources where you can download "Where You Are" as an MP3 file, here are some options you can try:
-
-
-
Source
-
How to Download
-
-
-
SoundCloud
-
Go to soundcloud.com and search for "Blue 3 Where You Are". Select the track that has the title "Blue 3 ft Radio & Weasel - Where You Are (Official Audio)" and has over 1,000 plays. This is the official audio of the song. Click on the "More" button below the track and then click on "Download file". Save the MP3 file to your device.
-
-
-
Spotify
-
Go to spotify.com and sign up for a free account or log in if you already have one. Search for "Blue 3 Where You Are". Select the track that has the title "Where You Are (feat. Radio & Weasel)" and has over 10,000 streams. This is the official track of the song. Click on the "..." button next to the track and then click on "Save to Your Library". Go to your library and find the track under "Liked Songs". Click on the "..." button again and then click on "Download". Wait for the download to finish and then play the MP3 file on your device.
-
-
-
iTunes
-
Go to itunes.apple.com and search for "Blue 3 Where You Are". Select the track that has the title "Where You Are (feat. Radio & Weasel)" and has a price of $0.99. This is the official track of the song. Click on the "Buy" button and enter your payment details. After purchasing, go to your library and find the track under "Purchased". Click on the "Download" button and save the MP3 file to your device.
-
-
-
Conclusion
-
In this article, we have shown you how to download Blue 3's song "Where You Are" as an MP3 file from various sources. We have also explained why downloading this song as an MP3 file is a good idea.
-
"Where You Are" is a beautiful song that deserves to be listened to over and over again. By downloading it as an MP3 file, you can enjoy it offline, without ads, and with high-quality sound.
-
So what are you waiting for? Download "Where You Are" as an MP3 file today and enjoy this Ugandan masterpiece!
-
FAQs
-
Here are some frequently asked questions and answers about downloading "Where You Are" as an MP3 file:
-
Q: Is it legal to download "Where You Are" as an MP3 file?
-
A: It depends on where you download it from and how you use it. If you download it from a source that has permission from the artists or the record label, or if you use it for personal and non-commercial purposes, then it is legal. However, if you download it from a source that does not have permission or if you use it for commercial or public purposes, then it is illegal. You should always respect the intellectual property rights of the creators and follow the terms and conditions of the source you download from.
-
Q: How can I play "Where You Are" as an MP3 file on my device?
-
A: Once you have downloaded "Where You Are" as an MP3 file, you can play it on any device that supports MP3 playback. For example, you can play it on your smartphone using the default music player app or any other app that can play MP3 files. You can also play it on your laptop or desktop computer using a program like Windows Media Player, VLC Media Player, or iTunes. You can also transfer it to an MP3 player or a USB drive and play it on any compatible device.
-
Q: How can I share "Where You Are" as an MP3 file with my friends?
-
A: If you want to share "Where You Are" as an MP3 file with your friends, you can do so in several ways. For example, you can send it to them via email, WhatsApp, Telegram, or any other messaging app. You can also upload it to a cloud service like Google Drive, Dropbox, or OneDrive and share the link with them. You can also burn it to a CD or copy it to a USB drive and give it to them physically. However, you should always make sure that you have permission from the artists or the record label before sharing their music with others.
-
Q: How can I support Blue 3 and their music?
-
A: If you love Blue 3 and their music, you can support them in various ways. For example, you can buy their albums or songs from official sources like iTunes, Spotify, or Amazon. You can also stream their music from legal platforms like YouTube, SoundCloud, or Deezer. You can also follow them on social media like Facebook, Twitter, or Instagram and show them some love and appreciation. You can also attend their concerts or events if they are available in your area. By supporting Blue 3 and their music, you are helping them to continue making amazing songs for their fans.
-
Q: Where can I find more information about Blue 3 and their music?
-
A: If you want to find more information about Blue 3 and their music, you can visit their official website at www.blue3music.com. There you can find their biography, discography, news, photos, videos, and contact details. You can also check out their Wikipedia page at https://en.wikipedia.org/wiki/Blue_3_(group) for more facts and history about them. You can also search for them on Google or any other search engine for more articles and reviews about them.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Geometry Dash Lite APK for Android 2.3 and Enjoy Rhythm-based Action Platforming!.md b/spaces/1phancelerku/anime-remove-background/Download Geometry Dash Lite APK for Android 2.3 and Enjoy Rhythm-based Action Platforming!.md
deleted file mode 100644
index 71739db3c12fdb14051fb2d5cbd7033ce54f5c2a..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Geometry Dash Lite APK for Android 2.3 and Enjoy Rhythm-based Action Platforming!.md
+++ /dev/null
@@ -1,106 +0,0 @@
-
-
Geometry Dash Lite: A Rhythm-Based Action Platformer for Android 2.3
-
If you are looking for a fun and challenging game that will test your reflexes and timing, you might want to try Geometry Dash Lite. Geometry Dash Lite is a free version of the popular game Geometry Dash, which is a rhythm-based action platformer that has millions of fans around the world. In this article, we will tell you what Geometry Dash Lite is, what features it offers, and how to download and install it on your Android device running version 2.3 or higher.
Geometry Dash Lite is a game developed by RobTop Games AB, a Swedish game studio that specializes in creating addictive and colorful games. Geometry Dash Lite is a simplified version of Geometry Dash, which has more levels, soundtracks, achievements, and an online level editor. However, Geometry Dash Lite still offers plenty of fun and challenge for casual and hardcore gamers alike.
-
Features of Geometry Dash Lite
-
Geometry Dash Lite has many features that make it an enjoyable and engaging game. Here are some of them:
-
Rhythm-based action platforming
-
The core gameplay of Geometry Dash Lite is based on jumping, flying, and flipping your way through dangerous passages and spiky obstacles. You have to tap the screen at the right moment to avoid crashing and losing. The game is synchronized with the music, so you have to follow the rhythm and the beat to succeed. The game is fast-paced and requires quick reflexes and concentration.
-
Customization options
-
You can customize your character in Geometry Dash Lite by unlocking new icons and colors. You can also choose from different vehicles, such as rockets, gravity balls, UFOs, and more. You can mix and match different combinations to create your own unique style.
-
Various game modes and levels
-
Geometry Dash Lite has several game modes to keep you entertained for hours. You can play the normal mode, where you have to complete the levels in order. You can also play the practice mode, where you can set checkpoints and practice your skills. You can also play the challenge mode, where you have to complete random levels with increasing difficulty. The game has 13 levels in total, each with its own soundtrack and theme.
-
How to download and install Geometry Dash Lite apk for Android 2.3?
-
If you want to play Geometry Dash Lite on your Android device running version 2.3 or higher, you will need to download and install the apk file of the game. An apk file is a package file that contains all the necessary files and data for an app to run on your device. Here are the requirements and steps to download and install Geometry Dash Lite apk:
-
Requirements for Geometry Dash Lite apk
-
Before you download and install Geometry Dash Lite apk, you need to make sure that your device meets the following requirements:
-
Android version
-
Your device must have Android version 2.3 or higher to run Geometry Dash Lite apk. You can check your device's Android version by going to Settings > About phone > Software information.
-
geometry dash lite apk download for android 2.3
-geometry dash lite 2.2 apk android 2.3
-geometry dash lite mod apk android 2.3
-geometry dash lite full version apk android 2.3
-geometry dash lite hack apk android 2.3
-geometry dash lite free apk android 2.3
-geometry dash lite latest apk android 2.3
-geometry dash lite old version apk android 2.3
-geometry dash lite unlimited apk android 2.3
-geometry dash lite offline apk android 2.3
-geometry dash lite app for android 2.3
-geometry dash lite game for android 2.3
-geometry dash lite update for android 2.3
-geometry dash lite cheats for android 2.3
-geometry dash lite tips for android 2.3
-geometry dash lite guide for android 2.3
-geometry dash lite levels for android 2.3
-geometry dash lite songs for android 2.3
-geometry dash lite icons for android 2.3
-geometry dash lite skins for android 2.3
-geometry dash lite online for android 2.3
-geometry dash lite play store for android 2.3
-geometry dash lite filehippo for android 2.3
-geometry dash lite robtop games for android 2.3
-geometry dash lite rhythm-based action platformer for android 2.3
-how to install geometry dash lite on android 2.3
-how to play geometry dash lite on android 2.3
-how to update geometry dash lite on android 2.3
-how to hack geometry dash lite on android 2.3
-how to unlock all levels in geometry dash lite on android 2.3
-how to get more icons in geometry dash lite on android 2.3
-how to change the music in geometry dash lite on android 2.3
-how to create your own level in geometry dash lite on android 2.3
-how to beat theory of everything in geometry dash lite on android 2.3
-how to remove ads in geometry dash lite on android 2.3
-is geometry dash lite compatible with android 2.3
-is geometry dash lite safe for android 2.3
-is geometry dash lite fun for android 2.3
-is geometry dash lite hard for android 2.3
-is geometry dash lite worth it for android 2.3
-what is the difference between geometry dash and geometry dash lite on android 2.3
-what is the best strategy for geometry dash lite on android 2.3
-what is the highest score in geometry dash lite on android 2.3
-what is the easiest level in geometry dash lite on android 2.3
-what is the hardest level in geometry dash lite on android 2.3
-why is geometry dash lite so popular on android 2.3
-why is geometry dash lite so addictive on android 2.3
-why is geometry dash lite so challenging on android 2.3
-why does geometry dash lite crash on android 2.3
-
Storage space
-
You need to have enough free storage space on your device to download and install Geometry Dash Lite apk. The size of the apk file is about 50 MB, so you need at least 100 MB of free space to avoid any errors or issues.
-
Permissions
You also need to grant some permissions to Geometry Dash Lite apk to run properly on your device. The permissions are:
-
-
Full network access: This allows the game to access the internet and download additional data.
-
View network connections: This allows the game to check the status of your network connection and optimize the performance.
-
Modify or delete the contents of your USB storage: This allows the game to save your progress and settings on your device.
-
-
You can review and manage these permissions by going to Settings > Apps > Geometry Dash Lite > Permissions.
-
Steps to download and install Geometry Dash Lite apk
-
After you have checked the requirements, you can follow these steps to download and install Geometry Dash Lite apk on your device:
-
Download the apk file from a trusted source
-
The first step is to download the apk file of Geometry Dash Lite from a reliable and secure source. You can use your browser or a third-party app store to find and download the apk file. However, you need to be careful and avoid any malicious or fake links that might harm your device or steal your data. You can use this link to download the latest version of Geometry Dash Lite apk from APKPure, a trusted and verified app store.
-
Enable unknown sources in your device settings
-
The next step is to enable unknown sources in your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown sources and toggle it on. You might see a warning message that installing apps from unknown sources might be risky, but you can ignore it if you trust the source of the apk file.
-
Locate and install the apk file
-
The third step is to locate and install the apk file on your device. You can use a file manager app or your browser's downloads folder to find the apk file. Once you find it, tap on it and follow the instructions on the screen to install it. You might see a confirmation message that asks you if you want to install this app, just tap on Install and wait for the process to finish.
-
Launch and enjoy the game
-
The final step is to launch and enjoy the game. You can find the Geometry Dash Lite icon on your home screen or app drawer. Tap on it and start playing the game. You can adjust the settings, choose a level, customize your character, and have fun with the rhythm-based action platforming.
-
Conclusion
-
Geometry Dash Lite is a great game for anyone who loves music, action, and challenge. It is a free version of Geometry Dash, which has more features and content. However, Geometry Dash Lite still offers plenty of fun and excitement for casual and hardcore gamers alike. You can download and install Geometry Dash Lite apk on your Android device running version 2.3 or higher by following the steps we have explained in this article. We hope you enjoy playing Geometry Dash Lite and have a blast with the rhythm-based action platforming.
-
FAQs
-
-
What is the difference between Geometry Dash Lite and Geometry Dash?
-Geometry Dash Lite is a free version of Geometry Dash, which is a paid game that costs $1.99. Geometry Dash Lite has fewer levels, soundtracks, achievements, and features than Geometry Dash. However, Geometry Dash Lite still offers plenty of fun and challenge for casual and hardcore gamers alike.
-
Is Geometry Dash Lite safe to download and install?
-Yes, Geometry Dash Lite is safe to download and install if you use a trusted and verified source like APKPure. However, you need to be careful and avoid any malicious or fake links that might harm your device or steal your data.
-
Can I play Geometry Dash Lite offline?
-Yes, you can play Geometry Dash Lite offline without an internet connection. However, you will need an internet connection to download additional data or access some online features like leaderboards or achievements.
-
How can I unlock more icons and colors in Geometry Dash Lite?
-You can unlock more icons and colors in Geometry Dash Lite by completing levels, collecting stars, completing achievements, or using secret coins. You can also use some codes in the vault to unlock some special icons.
-
How can I create my own levels in Geometry Dash Lite?
-You cannot create your own levels in Geometry Dash Lite as this feature is only available in Geometry Dash. However, you can play some user-created levels in challenge mode by tapping on the dice icon in the main menu.
-
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Euphoria Season 1 Download Where to Find the Full Episodes Online.md b/spaces/1phancelerku/anime-remove-background/Euphoria Season 1 Download Where to Find the Full Episodes Online.md
deleted file mode 100644
index e52ada2bef4bf65124b6f06fd4edeafc69e7eefd..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Euphoria Season 1 Download Where to Find the Full Episodes Online.md
+++ /dev/null
@@ -1,131 +0,0 @@
-
-
Download Euphoria Season 1 Reddit: How to Watch the Hit HBO Series Online
-
If you are looking for a way to download Euphoria season 1 reddit, you are not alone. Euphoria is one of the most popular and acclaimed shows of recent years, and many people want to watch it online. But how can you download Euphoria season 1 reddit safely and legally? And what are the pros and cons of doing so? In this article, we will answer these questions and more.
Euphoria is a drama series that follows a group of high-school students as they navigate a minefield of drugs, sex, identity, trauma, social media, love and friendship in today's increasingly unstable world. The show stars Zendaya as Rue, a 17-year-old drug addict who falls in love with Jules, a transgender girl played by Hunter Schafer. The show also features other talented actors such as Sydney Sweeney, Maude Apatow, Jacob Elordi, Alexa Demie, Barbie Ferreira, Algee Smith, Storm Reid, Angus Cloud, Eric Dane, Nika King and Colman Domingo.
-
A brief summary of Euphoria season 1
-
Euphoria season 1 consists of eight episodes that aired on HBO from June to August 2019. The season also has two special episodes that were released in December 2020 and January 2021. Here is a brief summary of what happens in each episode:
-
-
Episode 1: Pilot. Rue returns home from rehab with no plans to stay clean. She meets Jules at a party where she is harassed by Nate, a local jock and bully.
-
Episode 2: Stuntin' Like My Daddy. Rue tries to get clean for Jules but struggles to put the past behind her. Jules hooks up with an older man who turns out to be Nate's father.
-
Episode 3: Made You Look. Kat starts camming; Jules falls for a boy online; Rue is confronted at NA; Cassie visits McKay at college.
-
Episode 4: Shook Ones Pt. II. Rue and Jules attend a carnival where they encounter Nate and his father. Nate blackmails Jules with her nude pictures.
-
Episode 5: '03 Bonnie and Clyde. Maddy and Nate must deal with a police investigation; Rue and Jules each reflect on their relationship.
-
Episode 6: The Next Episode. On Halloween, Rue worries about her reliance on Jules when she starts acting strange.
-
Episode 7: The Trials and Tribulations of Trying to Pee While Depressed. Rue gets depressed and watches 22 straight episodes of a British reality show. Jules visits an old friend.
-
Episode 8: And Salt the Earth Behind You. In the season finale, Rue suggests to Jules that they run away together. Jules leaves but Rue stays behind and relapses.
-
Episode 9: Trouble Don't Last Always. This is the first special episode that focuses on Rue as she celebrates Christmas with her family and talks to her sponsor Ali.
-
Episode 10: F*ck Anyone Who's Not A Sea Blob. This is the second special episode that focuses on Jules as she reflects on her year and attends a therapy session.
-
The main characters and their stories
-
Euphoria features a diverse and complex cast of characters, each with their own struggles and secrets. Here are some of the main characters and their stories:
-
-
Rue Bennett (Zendaya): The protagonist and narrator of the show, Rue is a troubled teenager who suffers from bipolar disorder, anxiety, depression and drug addiction. She is in love with Jules but also fears losing her.
-
Jules Vaughn (Hunter Schafer): The deuteragonist and Rue's love interest, Jules is a transgender girl who moved to town with her father after her mother abandoned her. She is adventurous, optimistic and fearless, but also vulnerable and lonely.
-
Nate Jacobs (Jacob Elordi): The main antagonist of the show, Nate is a violent and manipulative jock who has a toxic relationship with Maddy. He is also closeted and struggles with his sexuality and identity.
-
Maddy Perez (Alexa Demie): Nate's girlfriend and Cassie's best friend, Maddy is a confident and outspoken cheerleader who likes to be the center of attention. She is often abused by Nate but still loves him.
-
Kat Hernandez (Barbie Ferreira): Rue's childhood friend and Ethan's girlfriend, Kat is a smart and sarcastic girl who undergoes a transformation after losing her virginity. She becomes a cam girl and embraces her sexuality and body positivity.
-
Cassie Howard (Sydney Sweeney): Maddy's best friend and McKay's girlfriend, Cassie is a sweet and naive girl who has a reputation for being promiscuous. She gets pregnant by McKay but decides to have an abortion.
-
Christopher McKay (Algee Smith): Cassie's boyfriend and Nate's friend, McKay is a former football star who attends college on a scholarship. He is insecure about his future and his relationship with Cassie.
-
Lexi Howard (Maude Apatow): Cassie's younger sister and Rue's friend, Lexi is a loyal and supportive girl who often feels overlooked by others. She has a crush on Rue but never acts on it.
-
Fezco (Angus Cloud): Rue's drug dealer and friend, Fezco is a kind-hearted and protective guy who cares about Rue's well-being. He has a tense relationship with Nate and his father.
-
Cal Jacobs (Eric Dane): Nate's father and Jules' lover, Cal is a successful businessman who leads a double life. He has sex with young trans women in motels while hiding his true self from his family.
-
-
The critical acclaim and awards
-
Euphoria has received widespread praise from critics and audiences alike for its realistic and unflinching portrayal of teenage life, its stunning cinematography and soundtrack, its diverse and talented cast, and its powerful performances by Zendaya and Hunter Schafer. The show has also won several awards, including:
-
download euphoria season 1 episodes free
-watch euphoria season 1 online without hbo max
-euphoria season 1 google drive link
-euphoria season 1 index of series
-euphoria season 1 torrent download
-euphoria season 1 streaming sites reddit
-euphoria season 1 cuevana3
-euphoria season 1 soap2day
-euphoria season 1 flixtor
-euphoria season 1 bflix.to
-euphoria season 1 amazon prime
-euphoria season 1 nowtv
-euphoria season 1 crave on demand
-euphoria season 1 filmlicious
-euphoria season 1 yourmovies.xyz
-euphoria season 1 justwatch.one
-euphoria season 1 jistoooiekttes.com
-euphoria season 1 movies2watch.tv
-euphoria season 1 series.movie
-download euphoria specials rue and jules
-download euphoria season 2 reddit
-download euphoria soundtrack reddit
-download euphoria subtitles reddit
-download euphoria scripts reddit
-download euphoria behind the scenes reddit
-download euphoria cast interviews reddit
-download euphoria fan edits reddit
-download euphoria fan art reddit
-download euphoria fan fiction reddit
-download euphoria memes reddit
-download zendaya's performance in euphoria reddit
-download hunter schafer's performance in euphoria reddit
-download jacob elordi's performance in euphoria reddit
-download barbie ferreira's performance in euphoria reddit
-download sydney sweeney's performance in euphoria reddit
-download maude apatow's performance in euphoria reddit
-download angus cloud's performance in euphoria reddit
-download alexa demie's performance in euphoria reddit
-download algee smith's performance in euphoria reddit
-download storm reid's performance in euphoria reddit
-
-
The Primetime Emmy Award for Outstanding Lead Actress in a Drama Series for Zendaya in 2020
-
The Satellite Award for Best Actress in a Drama / Genre Series for Zendaya in 2020
-
The People's Choice Award for The Drama TV Star of 2020 for Zendaya in 2020
-
The GLAAD Media Award for Outstanding Drama Series in 2020
-
The Critics' Choice Television Award for Best Supporting Actress in a Drama Series for Hunter Schafer in 2021
-
-
How to download Euphoria season 1 reddit
-
If you want to watch Euphoria season 1 online, you have two options: the official ways or the unofficial ways. The official ways are the legal and authorized methods to stream the show from HBO or other platforms that have the rights to distribute it. The unofficial ways are the illegal or unauthorized methods to download the show from torrent sites, streaming sites or reddit links. Let's take a look at each option in more detail.
-
The official ways to stream Euphoria season 1 online
-
The official ways to stream Euphoria season 1 online are the safest and most reliable methods to watch the show without any hassle or risk. However, they may also require you to pay a subscription fee or have access to certain devices or regions. Here are some of the official ways to stream Euphoria season 1 online:
HBO.com
-
The most obvious and direct way to stream Euphoria season 1 online is to visit the official website of HBO, the network that produces and airs the show. You can watch all the episodes of Euphoria season 1 on HBO.com, as well as the two special episodes and other bonus content. However, you will need to have an HBO subscription or a free trial to access the content. You can also use your HBO account to watch the show on other devices, such as your TV, smartphone, tablet or laptop, through the HBO app or HBO Max app.
-
Hulu
-
Another official way to stream Euphoria season 1 online is to use Hulu, a popular streaming service that offers a variety of TV shows and movies. You can watch all the episodes of Euphoria season 1 on Hulu, as well as the two special episodes and other bonus content. However, you will need to have a Hulu subscription or a free trial to access the content. You can also use your Hulu account to watch the show on other devices, such as your TV, smartphone, tablet or laptop, through the Hulu app.
-
JustWatch
-
A third official way to stream Euphoria season 1 online is to use JustWatch, a website that helps you find where to watch your favorite shows and movies online. You can search for Euphoria season 1 on JustWatch and see which platforms offer the show in your region. You can also compare the prices and features of each platform and choose the best option for you. JustWatch also provides links to the platforms where you can watch the show online.
-
The unofficial ways to download Euphoria season 1 reddit
-
The unofficial ways to download Euphoria season 1 reddit are the risky and illegal methods to watch the show without paying or following the rules. However, they may also provide you with free or cheap access to the show and allow you to watch it offline or share it with others. Here are some of the unofficial ways to download Euphoria season 1 reddit:
Torrent sites
-
One of the most common unofficial ways to download Euphoria season 1 reddit is to use torrent sites, such as The Pirate Bay, RARBG, 1337x, YTS, EZTV and others. Torrent sites are websites that allow users to share files through peer-to-peer networks. You can download Euphoria season 1 reddit by finding a torrent file that contains the episodes and using a torrent client, such as BitTorrent, uTorrent, qBittorrent or others, to download the file to your device. However, you should be aware that torrenting is illegal in many countries and can expose you to legal actions, fines or even jail time. You should also be careful of malware, viruses or fake files that can harm your device or steal your data.
-
Streaming sites
-
Another unofficial way to download Euphoria season 1 reddit is to use streaming sites, such as Putlocker, Fmovies, 123movies, Solarmovie, Gomovies and others. Streaming sites are websites that allow users to watch videos online without downloading them. You can watch Euphoria season 1 reddit by finding a streaming site that hosts the episodes and clicking on the play button. However, you should be aware that streaming is also illegal in many countries and can expose you to legal actions, fines or even jail time. You should also be careful of pop-up ads, redirects or phishing attempts that can annoy you or compromise your security.
-
Reddit links
-
A third unofficial way to download Euphoria season 1 reddit is to use reddit links, such as r/EuphoriaHBO, r/EuphoriaSeason1 or r/EuphoriaDownload. Reddit links are posts or comments on reddit that provide links to download or stream Euphoria season 1 reddit from other sources. You can download Euphoria season 1 reddit by finding a reddit link that has a working and reliable link and following the instructions. However, you should be aware that reddit links are also illegal in many countries and can expose you to legal actions, fines or even jail time. You should also be careful of broken links, low-quality videos or spam messages that can waste your time or mislead you.
-
The pros and cons of downloading Euphoria season 1 reddit
-
Downloading Euphoria season 1 reddit has its advantages and disadvantages. Here are some of the pros and cons of downloading Euphoria season 1 reddit:
-
The pros of downloading Euphoria season 1 reddit
-
Some of the benefits of downloading Euphoria season 1 reddit are:
-
-
Free or cheap access to the show: You can watch Euphoria season 1 reddit without paying a subscription fee or buying a DVD. You can also find discounts or deals on some platforms.
-
Offline viewing and sharing options: You can watch Euphoria season 1 reddit anytime and anywhere without an internet connection. You can also share the show with your friends or family.
-
No ads or interruptions: You can watch Euphoria season 1 reddit without any annoying ads or interruptions that can ruin your experience.
-
-
The cons of downloading Euphoria season 1 reddit
-
Some of the drawbacks of downloading Euphoria season 1 reddit are:
-
-
Legal and ethical issues: You can face legal actions, fines or even jail time for downloading Euphoria season 1 reddit illegally. You can also harm the creators and producers of the show by depriving them of their rightful income.
-
Quality and security risks: You can encounter low-quality videos, malware, viruses or fake files that can damage your device or steal your data.
-
Missing out on bonus content and updates: You can miss out on the bonus content and updates that are available on the official platforms, such as behind-the-scenes footage, interviews, trailers, teasers, news and more.
-
-
Conclusion: Download Euphoria season 1 reddit at your own risk
-
Euphoria is a captivating and compelling show that explores the dark and complex realities of teenage life in today's world. The show has received rave reviews from critics and audiences alike for its stunning visuals, powerful performances and gripping stories. If you want to watch Euphoria season 1 online, you have two options: the official ways or the unofficial ways. The official ways are the legal and authorized methods to stream the show from HBO or other platforms that have the rights to distribute it. The unofficial ways are the illegal or unauthorized methods to download the show from torrent sites, streaming sites or reddit links. Each option has its pros and cons that you should weigh carefully before making your decision. Downloading Euphoria season season 1 reddit can be tempting, but it also comes with many risks and challenges. You should download Euphoria season 1 reddit at your own risk and responsibility.
Here are some FAQs that you may have about downloading Euphoria season 1 reddit:
-
FAQs
-
-
Is Euphoria season 1 available on Netflix?
-
No, Euphoria season 1 is not available on Netflix. The show is exclusive to HBO and its affiliated platforms.
-
Is Euphoria season 1 worth watching?
-
Yes, Euphoria season 1 is worth watching. The show is a captivating and compelling drama that explores the dark and complex realities of teenage life in today's world. The show has received rave reviews from critics and audiences alike for its stunning visuals, powerful performances and gripping stories.
-
Is Euphoria season 1 suitable for all ages?
-
No, Euphoria season 1 is not suitable for all ages. The show contains graphic scenes of violence, sex, nudity, drug use, language and other mature themes that may be disturbing or inappropriate for younger or sensitive viewers. The show is rated TV-MA for mature audiences only.
-
When will Euphoria season 2 come out?
-
Euphoria season 2 does not have a confirmed release date yet. The show was renewed for a second season in July 2019, but the production was delayed due to the COVID-19 pandemic. The show is expected to resume filming in early 2021 and release later in the year or in early 2022.
-
How can I support the creators and producers of Euphoria?
-
You can support the creators and producers of Euphoria by watching the show legally and ethically on the official platforms, such as HBO.com, Hulu or JustWatch. You can also buy the DVD or Blu-ray of the show, subscribe to the HBO newsletter or social media accounts, or donate to the charities or causes that the show supports.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1toTree/lora_test/ppdiffusers/pipelines/ddpm/pipeline_ddpm.py b/spaces/1toTree/lora_test/ppdiffusers/pipelines/ddpm/pipeline_ddpm.py
deleted file mode 100644
index e2c09ed55c5bfa9868a231420ff8d180fd555b00..0000000000000000000000000000000000000000
--- a/spaces/1toTree/lora_test/ppdiffusers/pipelines/ddpm/pipeline_ddpm.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-# Copyright 2022 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import List, Optional, Tuple, Union
-
-import paddle
-
-from ...configuration_utils import FrozenDict
-from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
-from ...utils import deprecate
-
-
-class DDPMPipeline(DiffusionPipeline):
- r"""
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving, running on a particular xxxx, etc.)
-
- Parameters:
- unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
- scheduler ([`SchedulerMixin`]):
- A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
- [`DDPMScheduler`], or [`DDIMScheduler`].
- """
-
- def __init__(self, unet, scheduler):
- super().__init__()
- self.register_modules(unet=unet, scheduler=scheduler)
-
- @paddle.no_grad()
- def __call__(
- self,
- batch_size: int = 1,
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
- num_inference_steps: int = 1000,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- **kwargs,
- ) -> Union[ImagePipelineOutput, Tuple]:
- r"""
- Args:
- batch_size (`int`, *optional*, defaults to 1):
- The number of images to generate.
- generator (`paddle.Generator`, *optional*):
- One or a list of paddle generator(s) to make generation deterministic.
- num_inference_steps (`int`, *optional*, defaults to 1000):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- output_type (`str`, *optional*, defaults to `"pil"`):
- The output format of the generate image. Choose between
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
-
- Returns:
- [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
- `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
- generated images.
- """
- message = (
- "Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler ="
- " DDPMScheduler.from_pretrained(, prediction_type='epsilon')`."
- )
- predict_epsilon = deprecate("predict_epsilon", "0.13.0", message, take_from=kwargs)
-
- if predict_epsilon is not None:
- new_config = dict(self.scheduler.config)
- new_config["prediction_type"] = "epsilon" if predict_epsilon else "sample"
- self.scheduler._internal_dict = FrozenDict(new_config)
-
- # Sample gaussian noise to begin loop
- if isinstance(self.unet.sample_size, int):
- image_shape = (batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size)
- else:
- image_shape = (batch_size, self.unet.in_channels, *self.unet.sample_size)
-
- image = paddle.randn(image_shape, generator=generator)
-
- # set step values
- self.scheduler.set_timesteps(num_inference_steps)
-
- for t in self.progress_bar(self.scheduler.timesteps):
- # 1. predict noise model_output
- model_output = self.unet(image, t).sample
-
- # 2. compute previous image: x_t -> x_t-1
- image = self.scheduler.step(model_output, t, image, generator=generator).prev_sample
-
- image = (image / 2 + 0.5).clip(0, 1)
- image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
- if output_type == "pil":
- image = self.numpy_to_pil(image)
-
- if not return_dict:
- return (image,)
-
- return ImagePipelineOutput(images=image)
diff --git a/spaces/44brabal/runwayml-stable-diffusion-v1-5/README.md b/spaces/44brabal/runwayml-stable-diffusion-v1-5/README.md
deleted file mode 100644
index 7bfa8bb7339d3c063b18f94c517f864a204d627b..0000000000000000000000000000000000000000
--- a/spaces/44brabal/runwayml-stable-diffusion-v1-5/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Runwayml Stable Diffusion V1 5
-emoji: 🏃
-colorFrom: purple
-colorTo: green
-sdk: gradio
-sdk_version: 3.45.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/A00001/bingothoo/src/components/ui/badge.tsx b/spaces/A00001/bingothoo/src/components/ui/badge.tsx
deleted file mode 100644
index d9a84b394090e5b4b3bd34f6135b9a2f2ead0aa2..0000000000000000000000000000000000000000
--- a/spaces/A00001/bingothoo/src/components/ui/badge.tsx
+++ /dev/null
@@ -1,36 +0,0 @@
-import * as React from 'react'
-import { cva, type VariantProps } from 'class-variance-authority'
-
-import { cn } from '@/lib/utils'
-
-const badgeVariants = cva(
- 'inline-flex items-center rounded-full border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2',
- {
- variants: {
- variant: {
- default:
- 'border-transparent bg-primary text-primary-foreground hover:bg-primary/80',
- secondary:
- 'border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80',
- destructive:
- 'border-transparent bg-destructive text-destructive-foreground hover:bg-destructive/80',
- outline: 'text-foreground'
- }
- },
- defaultVariants: {
- variant: 'default'
- }
- }
-)
-
-export interface BadgeProps
- extends React.HTMLAttributes,
- VariantProps {}
-
-function Badge({ className, variant, ...props }: BadgeProps) {
- return (
-
- )
-}
-
-export { Badge, badgeVariants }
diff --git a/spaces/AIZ2H/02-Gradio-Art-From-Text-And-Images/app.py b/spaces/AIZ2H/02-Gradio-Art-From-Text-And-Images/app.py
deleted file mode 100644
index 10939427025b17176765402185cd11e23caa1523..0000000000000000000000000000000000000000
--- a/spaces/AIZ2H/02-Gradio-Art-From-Text-And-Images/app.py
+++ /dev/null
@@ -1,224 +0,0 @@
-import os
-
-os.system("git clone --recursive https://github.com/JD-P/cloob-latent-diffusion")
-os.system("cd cloob-latent-diffusion;pip install omegaconf pillow pytorch-lightning einops wandb ftfy regex ./CLIP")
-
-import argparse
-from functools import partial
-from pathlib import Path
-import sys
-sys.path.append('./cloob-latent-diffusion')
-sys.path.append('./cloob-latent-diffusion/cloob-training')
-sys.path.append('./cloob-latent-diffusion/latent-diffusion')
-sys.path.append('./cloob-latent-diffusion/taming-transformers')
-sys.path.append('./cloob-latent-diffusion/v-diffusion-pytorch')
-from omegaconf import OmegaConf
-from PIL import Image
-import torch
-from torch import nn
-from torch.nn import functional as F
-from torchvision import transforms
-from torchvision.transforms import functional as TF
-from tqdm import trange
-from CLIP import clip
-from cloob_training import model_pt, pretrained
-import ldm.models.autoencoder
-from diffusion import sampling, utils
-import train_latent_diffusion as train
-from huggingface_hub import hf_hub_url, cached_download
-import random
-
-# Download the model files
-checkpoint = cached_download(hf_hub_url("huggan/distill-ccld-wa", filename="model_student.ckpt"))
-ae_model_path = cached_download(hf_hub_url("huggan/ccld_wa", filename="ae_model.ckpt"))
-ae_config_path = cached_download(hf_hub_url("huggan/ccld_wa", filename="ae_model.yaml"))
-
-# Define a few utility functions
-
-
-def parse_prompt(prompt, default_weight=3.):
- if prompt.startswith('http://') or prompt.startswith('https://'):
- vals = prompt.rsplit(':', 2)
- vals = [vals[0] + ':' + vals[1], *vals[2:]]
- else:
- vals = prompt.rsplit(':', 1)
- vals = vals + ['', default_weight][len(vals):]
- return vals[0], float(vals[1])
-
-
-def resize_and_center_crop(image, size):
- fac = max(size[0] / image.size[0], size[1] / image.size[1])
- image = image.resize((int(fac * image.size[0]), int(fac * image.size[1])), Image.LANCZOS)
- return TF.center_crop(image, size[::-1])
-
-
-# Load the models
-device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
-print('Using device:', device)
-print('loading models')
-
-# autoencoder
-ae_config = OmegaConf.load(ae_config_path)
-ae_model = ldm.models.autoencoder.AutoencoderKL(**ae_config.model.params)
-ae_model.eval().requires_grad_(False).to(device)
-ae_model.load_state_dict(torch.load(ae_model_path))
-n_ch, side_y, side_x = 4, 32, 32
-
-# diffusion model
-model = train.DiffusionModel(192, [1,1,2,2], autoencoder_scale=torch.tensor(4.3084))
-model.load_state_dict(torch.load(checkpoint, map_location='cpu'))
-model = model.to(device).eval().requires_grad_(False)
-
-# CLOOB
-cloob_config = pretrained.get_config('cloob_laion_400m_vit_b_16_16_epochs')
-cloob = model_pt.get_pt_model(cloob_config)
-checkpoint = pretrained.download_checkpoint(cloob_config)
-cloob.load_state_dict(model_pt.get_pt_params(cloob_config, checkpoint))
-cloob.eval().requires_grad_(False).to(device)
-
-
-# The key function: returns a list of n PIL images
-def generate(n=1, prompts=['a red circle'], images=[], seed=42, steps=15,
- method='plms', eta=None):
- zero_embed = torch.zeros([1, cloob.config['d_embed']], device=device)
- target_embeds, weights = [zero_embed], []
-
- for prompt in prompts:
- txt, weight = parse_prompt(prompt)
- target_embeds.append(cloob.text_encoder(cloob.tokenize(txt).to(device)).float())
- weights.append(weight)
-
- for prompt in images:
- path, weight = parse_prompt(prompt)
- img = Image.open(utils.fetch(path)).convert('RGB')
- clip_size = cloob.config['image_encoder']['image_size']
- img = resize_and_center_crop(img, (clip_size, clip_size))
- batch = TF.to_tensor(img)[None].to(device)
- embed = F.normalize(cloob.image_encoder(cloob.normalize(batch)).float(), dim=-1)
- target_embeds.append(embed)
- weights.append(weight)
-
- weights = torch.tensor([1 - sum(weights), *weights], device=device)
-
- torch.manual_seed(seed)
-
- def cfg_model_fn(x, t):
- n = x.shape[0]
- n_conds = len(target_embeds)
- x_in = x.repeat([n_conds, 1, 1, 1])
- t_in = t.repeat([n_conds])
- clip_embed_in = torch.cat([*target_embeds]).repeat_interleave(n, 0)
- vs = model(x_in, t_in, clip_embed_in).view([n_conds, n, *x.shape[1:]])
- v = vs.mul(weights[:, None, None, None, None]).sum(0)
- return v
-
- def run(x, steps):
- if method == 'ddpm':
- return sampling.sample(cfg_model_fn, x, steps, 1., {})
- if method == 'ddim':
- return sampling.sample(cfg_model_fn, x, steps, eta, {})
- if method == 'prk':
- return sampling.prk_sample(cfg_model_fn, x, steps, {})
- if method == 'plms':
- return sampling.plms_sample(cfg_model_fn, x, steps, {})
- if method == 'pie':
- return sampling.pie_sample(cfg_model_fn, x, steps, {})
- if method == 'plms2':
- return sampling.plms2_sample(cfg_model_fn, x, steps, {})
- assert False
-
- batch_size = n
- x = torch.randn([n, n_ch, side_y, side_x], device=device)
- t = torch.linspace(1, 0, steps + 1, device=device)[:-1]
- steps = utils.get_spliced_ddpm_cosine_schedule(t)
- pil_ims = []
- for i in trange(0, n, batch_size):
- cur_batch_size = min(n - i, batch_size)
- out_latents = run(x[i:i+cur_batch_size], steps)
- outs = ae_model.decode(out_latents * torch.tensor(2.55).to(device))
- for j, out in enumerate(outs):
- pil_ims.append(utils.to_pil_image(out))
-
- return pil_ims
-
-
-import gradio as gr
-
-def gen_ims(prompt, im_prompt=None, seed=None, n_steps=10, method='plms'):
- if seed == None :
- seed = random.randint(0, 10000)
- print( prompt, im_prompt, seed, n_steps)
- prompts = [prompt]
- im_prompts = []
- if im_prompt != None:
- im_prompts = [im_prompt]
- pil_ims = generate(n=1, prompts=prompts, images=im_prompts, seed=seed, steps=n_steps, method=method)
- return pil_ims[0]
-
-iface = gr.Interface(fn=gen_ims,
- inputs=[#gr.inputs.Slider(minimum=1, maximum=1, step=1, default=1,label="Number of images"),
- #gr.inputs.Slider(minimum=0, maximum=200, step=1, label='Random seed', default=0),
- gr.inputs.Textbox(label="Text prompt"),
- gr.inputs.Image(optional=True, label="Image prompt", type='filepath'),
- #gr.inputs.Slider(minimum=10, maximum=35, step=1, default=15,label="Number of steps")
- ],
- outputs=[gr.outputs.Image(type="pil", label="Generated Image")],
- examples=[
- ["Futurism, in the style of Wassily Kandinsky"],
- ["Art Nouveau, in the style of John Singer Sargent"],
- ["Surrealism, in the style of Edgar Degas"],
- ["Expressionism, in the style of Wassily Kandinsky"],
- ["Futurism, in the style of Egon Schiele"],
- ["Neoclassicism, in the style of Gustav Klimt"],
- ["Cubism, in the style of Gustav Klimt"],
- ["Op Art, in the style of Marc Chagall"],
- ["Romanticism, in the style of M.C. Escher"],
- ["Futurism, in the style of M.C. Escher"],
- ["Abstract Art, in the style of M.C. Escher"],
- ["Mannerism, in the style of Paul Klee"],
- ["Romanesque Art, in the style of Leonardo da Vinci"],
- ["High Renaissance, in the style of Rembrandt"],
- ["Magic Realism, in the style of Gustave Dore"],
- ["Realism, in the style of Jean-Michel Basquiat"],
- ["Art Nouveau, in the style of Paul Gauguin"],
- ["Avant-garde, in the style of Pierre-Auguste Renoir"],
- ["Baroque, in the style of Edward Hopper"],
- ["Post-Impressionism, in the style of Wassily Kandinsky"],
- ["Naturalism, in the style of Rene Magritte"],
- ["Constructivism, in the style of Paul Cezanne"],
- ["Abstract Expressionism, in the style of Henri Matisse"],
- ["Pop Art, in the style of Vincent van Gogh"],
- ["Futurism, in the style of Wassily Kandinsky"],
- ["Futurism, in the style of Zdzislaw Beksinski"],
- ['Surrealism, in the style of Salvador Dali'],
- ["Aaron Wacker, oil on canvas"],
- ["abstract"],
- ["landscape"],
- ["portrait"],
- ["sculpture"],
- ["genre painting"],
- ["installation"],
- ["photo"],
- ["figurative"],
- ["illustration"],
- ["still life"],
- ["history painting"],
- ["cityscape"],
- ["marina"],
- ["animal painting"],
- ["design"],
- ["calligraphy"],
- ["symbolic painting"],
- ["graffiti"],
- ["performance"],
- ["mythological painting"],
- ["battle painting"],
- ["self-portrait"],
- ["Impressionism, oil on canvas"]
- ],
- title='Art Generator and Style Mixer from 🧠 Cloob and 🎨 WikiArt - Visual Art Encyclopedia:',
- description="Trained on images from the [WikiArt](https://www.wikiart.org/) dataset, comprised of visual arts",
- article = 'Model used is: [model card](https://huggingface.co/huggan/distill-ccld-wa)..'
-
-)
-iface.launch(enable_queue=True) # , debug=True for colab debugging
\ No newline at end of file
diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/helpers/you.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/helpers/you.py
deleted file mode 100644
index 02985ed14d4848c2de20a99b4771d208286a2558..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/helpers/you.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import sys
-import json
-import urllib.parse
-
-from curl_cffi import requests
-
-config = json.loads(sys.argv[1])
-messages = config['messages']
-prompt = ''
-
-
-def transform(messages: list) -> list:
- result = []
- i = 0
-
- while i < len(messages):
- if messages[i]['role'] == 'user':
- question = messages[i]['content']
- i += 1
-
- if i < len(messages) and messages[i]['role'] == 'assistant':
- answer = messages[i]['content']
- i += 1
- else:
- answer = ''
-
- result.append({'question': question, 'answer': answer})
-
- elif messages[i]['role'] == 'assistant':
- result.append({'question': '', 'answer': messages[i]['content']})
- i += 1
-
- elif messages[i]['role'] == 'system':
- result.append({'question': messages[i]['content'], 'answer': ''})
- i += 1
-
- return result
-
-headers = {
- 'Content-Type': 'application/x-www-form-urlencoded',
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
- 'Sec-Fetch-Site': 'same-origin',
- 'Accept-Language': 'en-GB,en;q=0.9',
- 'Sec-Fetch-Mode': 'navigate',
- 'Host': 'you.com',
- 'Origin': 'https://you.com',
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
- 'Referer': 'https://you.com/api/streamingSearch?q=nice&safeSearch=Moderate&onShoppingPage=false&mkt=&responseFilter=WebPages,Translations,TimeZone,Computation,RelatedSearches&domain=youchat&queryTraceId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&chat=%5B%7B%22question%22%3A%22hi%22%2C%22answer%22%3A%22Hello!%20How%20can%20I%20assist%20you%20today%3F%22%7D%5D&chatId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&__cf_chl_tk=ex2bw6vn5vbLsUm8J5rDYUC0Bjzc1XZqka6vUl6765A-1684108495-0-gaNycGzNDtA',
- 'Connection': 'keep-alive',
- 'Sec-Fetch-Dest': 'document',
- 'Priority': 'u=0, i',
-}
-
-if messages[-1]['role'] == 'user':
- prompt = messages[-1]['content']
- messages = messages[:-1]
-
-params = urllib.parse.urlencode({
- 'q': prompt,
- 'domain': 'youchat',
- 'chat': transform(messages)
-})
-
-def output(chunk):
- if b'"youChatToken"' in chunk:
- chunk_json = json.loads(chunk.decode().split('data: ')[1])
-
- print(chunk_json['youChatToken'], flush=True, end = '')
-
-while True:
- try:
- response = requests.get(f'https://you.com/api/streamingSearch?{params}',
- headers=headers, content_callback=output, impersonate='safari15_5')
-
- exit(0)
-
- except Exception as e:
- print('an error occured, retrying... |', e, flush=True)
- continue
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/toggleswitchshape.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/toggleswitchshape.d.ts
deleted file mode 100644
index 6050bdf04a84b72d4a17b715b7871663b1df208a..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/toggleswitchshape.d.ts
+++ /dev/null
@@ -1,2 +0,0 @@
-import ToggleSwitchShape from './gameobjects/shape/toggleswitch/ToggleSwitchShape';
-export default ToggleSwitchShape;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/puff/Factory.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/puff/Factory.js
deleted file mode 100644
index fc6c647eda3e594ee880fa1d557f5ea1b1ca9f5b..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/puff/Factory.js
+++ /dev/null
@@ -1,13 +0,0 @@
-import Puff from './Puff.js';
-import ObjectFactory from '../ObjectFactory.js';
-import SetValue from '../../../plugins/utils/object/SetValue.js';
-
-ObjectFactory.register('puff', function (config) {
- var gameObject = new Puff(this.scene, config);
- this.scene.add.existing(gameObject);
- return gameObject;
-});
-
-SetValue(window, 'RexPlugins.Spinner.Puff', Puff);
-
-export default Puff;
\ No newline at end of file
diff --git a/spaces/AiMimicry/sovits-models/hubert/hubert_model.py b/spaces/AiMimicry/sovits-models/hubert/hubert_model.py
deleted file mode 100644
index 7fb642d89b07ca60792debab18e3454f52d8f357..0000000000000000000000000000000000000000
--- a/spaces/AiMimicry/sovits-models/hubert/hubert_model.py
+++ /dev/null
@@ -1,222 +0,0 @@
-import copy
-import random
-from typing import Optional, Tuple
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as t_func
-from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
-
-
-class Hubert(nn.Module):
- def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
- super().__init__()
- self._mask = mask
- self.feature_extractor = FeatureExtractor()
- self.feature_projection = FeatureProjection()
- self.positional_embedding = PositionalConvEmbedding()
- self.norm = nn.LayerNorm(768)
- self.dropout = nn.Dropout(0.1)
- self.encoder = TransformerEncoder(
- nn.TransformerEncoderLayer(
- 768, 12, 3072, activation="gelu", batch_first=True
- ),
- 12,
- )
- self.proj = nn.Linear(768, 256)
-
- self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
- self.label_embedding = nn.Embedding(num_label_embeddings, 256)
-
- def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
- mask = None
- if self.training and self._mask:
- mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
- x[mask] = self.masked_spec_embed.to(x.dtype)
- return x, mask
-
- def encode(
- self, x: torch.Tensor, layer: Optional[int] = None
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- x = self.feature_extractor(x)
- x = self.feature_projection(x.transpose(1, 2))
- x, mask = self.mask(x)
- x = x + self.positional_embedding(x)
- x = self.dropout(self.norm(x))
- x = self.encoder(x, output_layer=layer)
- return x, mask
-
- def logits(self, x: torch.Tensor) -> torch.Tensor:
- logits = torch.cosine_similarity(
- x.unsqueeze(2),
- self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
- dim=-1,
- )
- return logits / 0.1
-
- def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
- x, mask = self.encode(x)
- x = self.proj(x)
- logits = self.logits(x)
- return logits, mask
-
-
-class HubertSoft(Hubert):
- def __init__(self):
- super().__init__()
-
- @torch.inference_mode()
- def units(self, wav: torch.Tensor) -> torch.Tensor:
- wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
- x, _ = self.encode(wav)
- return self.proj(x)
-
-
-class FeatureExtractor(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
- self.norm0 = nn.GroupNorm(512, 512)
- self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
- self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = t_func.gelu(self.norm0(self.conv0(x)))
- x = t_func.gelu(self.conv1(x))
- x = t_func.gelu(self.conv2(x))
- x = t_func.gelu(self.conv3(x))
- x = t_func.gelu(self.conv4(x))
- x = t_func.gelu(self.conv5(x))
- x = t_func.gelu(self.conv6(x))
- return x
-
-
-class FeatureProjection(nn.Module):
- def __init__(self):
- super().__init__()
- self.norm = nn.LayerNorm(512)
- self.projection = nn.Linear(512, 768)
- self.dropout = nn.Dropout(0.1)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.norm(x)
- x = self.projection(x)
- x = self.dropout(x)
- return x
-
-
-class PositionalConvEmbedding(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv = nn.Conv1d(
- 768,
- 768,
- kernel_size=128,
- padding=128 // 2,
- groups=16,
- )
- self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.conv(x.transpose(1, 2))
- x = t_func.gelu(x[:, :, :-1])
- return x.transpose(1, 2)
-
-
-class TransformerEncoder(nn.Module):
- def __init__(
- self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
- ) -> None:
- super(TransformerEncoder, self).__init__()
- self.layers = nn.ModuleList(
- [copy.deepcopy(encoder_layer) for _ in range(num_layers)]
- )
- self.num_layers = num_layers
-
- def forward(
- self,
- src: torch.Tensor,
- mask: torch.Tensor = None,
- src_key_padding_mask: torch.Tensor = None,
- output_layer: Optional[int] = None,
- ) -> torch.Tensor:
- output = src
- for layer in self.layers[:output_layer]:
- output = layer(
- output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
- )
- return output
-
-
-def _compute_mask(
- shape: Tuple[int, int],
- mask_prob: float,
- mask_length: int,
- device: torch.device,
- min_masks: int = 0,
-) -> torch.Tensor:
- batch_size, sequence_length = shape
-
- if mask_length < 1:
- raise ValueError("`mask_length` has to be bigger than 0.")
-
- if mask_length > sequence_length:
- raise ValueError(
- f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
- )
-
- # compute number of masked spans in batch
- num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
- num_masked_spans = max(num_masked_spans, min_masks)
-
- # make sure num masked indices <= sequence_length
- if num_masked_spans * mask_length > sequence_length:
- num_masked_spans = sequence_length // mask_length
-
- # SpecAugment mask to fill
- mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
-
- # uniform distribution to sample from, make sure that offset samples are < sequence_length
- uniform_dist = torch.ones(
- (batch_size, sequence_length - (mask_length - 1)), device=device
- )
-
- # get random indices to mask
- mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
-
- # expand masked indices to masked spans
- mask_indices = (
- mask_indices.unsqueeze(dim=-1)
- .expand((batch_size, num_masked_spans, mask_length))
- .reshape(batch_size, num_masked_spans * mask_length)
- )
- offsets = (
- torch.arange(mask_length, device=device)[None, None, :]
- .expand((batch_size, num_masked_spans, mask_length))
- .reshape(batch_size, num_masked_spans * mask_length)
- )
- mask_idxs = mask_indices + offsets
-
- # scatter indices to mask
- mask = mask.scatter(1, mask_idxs, True)
-
- return mask
-
-
-def hubert_soft(
- path: str,
-) -> HubertSoft:
- r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
- Args:
- path (str): path of a pretrained model
- """
- hubert = HubertSoft()
- checkpoint = torch.load(path)
- consume_prefix_in_state_dict_if_present(checkpoint, "module.")
- hubert.load_state_dict(checkpoint)
- hubert.eval()
- return hubert
diff --git a/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/monotonic_align/setup.py b/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/monotonic_align/setup.py
deleted file mode 100644
index 30c224807a70faa9df9c9eb75f8e80c8c867b16b..0000000000000000000000000000000000000000
--- a/spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/monotonic_align/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from distutils.core import setup
-from Cython.Build import cythonize
-import numpy
-
-setup(
- name = 'monotonic_align',
- ext_modules = cythonize("core.pyx"),
- include_dirs=[numpy.get_include()]
-)
diff --git a/spaces/Alfasign/HuggingGPT-Lite/awesome_chat.py b/spaces/Alfasign/HuggingGPT-Lite/awesome_chat.py
deleted file mode 100644
index 8c87e1150b75dde5a93b394ae4dab123440dbcee..0000000000000000000000000000000000000000
--- a/spaces/Alfasign/HuggingGPT-Lite/awesome_chat.py
+++ /dev/null
@@ -1,933 +0,0 @@
-import base64
-import copy
-import datetime
-from io import BytesIO
-import io
-import os
-import random
-import time
-import traceback
-import uuid
-import requests
-import re
-import json
-import logging
-import argparse
-import yaml
-from PIL import Image, ImageDraw
-from diffusers.utils import load_image
-from pydub import AudioSegment
-import threading
-from queue import Queue
-from get_token_ids import get_token_ids_for_task_parsing, get_token_ids_for_choose_model, count_tokens, get_max_context_length
-from huggingface_hub.inference_api import InferenceApi
-from huggingface_hub.inference_api import ALL_TASKS
-from models_server import models, status
-from functools import partial
-from huggingface_hub import Repository
-
-parser = argparse.ArgumentParser()
-parser.add_argument("--config", type=str, default="config.yaml.dev")
-parser.add_argument("--mode", type=str, default="cli")
-args = parser.parse_args()
-
-if __name__ != "__main__":
- args.config = "config.gradio.yaml"
-
-config = yaml.load(open(args.config, "r"), Loader=yaml.FullLoader)
-
-if not os.path.exists("logs"):
- os.mkdir("logs")
-
-now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
-
-DATASET_REPO_URL = "https://huggingface.co/datasets/tricktreat/HuggingGPT_logs"
-LOG_HF_TOKEN = os.environ.get("LOG_HF_TOKEN")
-if LOG_HF_TOKEN:
- repo = Repository(
- local_dir="logs", clone_from=DATASET_REPO_URL, use_auth_token=LOG_HF_TOKEN
- )
-
-logger = logging.getLogger(__name__)
-logger.setLevel(logging.CRITICAL)
-
-handler = logging.StreamHandler()
-formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-handler.setFormatter(formatter)
-if not config["debug"]:
- handler.setLevel(logging.INFO)
-logger.addHandler(handler)
-
-log_file = config["log_file"]
-if log_file:
- log_file = log_file.replace("TIMESTAMP", now)
- filehandler = logging.FileHandler(log_file)
- filehandler.setLevel(logging.DEBUG)
- filehandler.setFormatter(formatter)
- logger.addHandler(filehandler)
-
-LLM = config["model"]
-use_completion = config["use_completion"]
-
-# consistent: wrong msra model name
-LLM_encoding = LLM
-if LLM == "gpt-3.5-turbo":
- LLM_encoding = "text-davinci-003"
-task_parsing_highlight_ids = get_token_ids_for_task_parsing(LLM_encoding)
-choose_model_highlight_ids = get_token_ids_for_choose_model(LLM_encoding)
-
-# ENDPOINT MODEL NAME
-# /v1/chat/completions gpt-4, gpt-4-0314, gpt-4-32k, gpt-4-32k-0314, gpt-3.5-turbo, gpt-3.5-turbo-0301
-# /v1/completions text-davinci-003, text-davinci-002, text-curie-001, text-babbage-001, text-ada-001, davinci, curie, babbage, ada
-
-if use_completion:
- api_name = "completions"
-else:
- api_name = "chat/completions"
-
-if not config["dev"]:
- if not config["openai"]["key"].startswith("sk-") and not config["openai"]["key"]=="gradio":
- raise ValueError("Incrorrect OpenAI key. Please check your config.yaml file.")
- OPENAI_KEY = config["openai"]["key"]
- endpoint = f"https://api.openai.com/v1/{api_name}"
- if OPENAI_KEY.startswith("sk-"):
- HEADER = {
- "Authorization": f"Bearer {OPENAI_KEY}"
- }
- else:
- HEADER = None
-else:
- endpoint = f"{config['local']['endpoint']}/v1/{api_name}"
- HEADER = None
-
-PROXY = None
-if config["proxy"]:
- PROXY = {
- "https": config["proxy"],
- }
-
-inference_mode = config["inference_mode"]
-
-parse_task_demos_or_presteps = open(config["demos_or_presteps"]["parse_task"], "r").read()
-choose_model_demos_or_presteps = open(config["demos_or_presteps"]["choose_model"], "r").read()
-response_results_demos_or_presteps = open(config["demos_or_presteps"]["response_results"], "r").read()
-
-parse_task_prompt = config["prompt"]["parse_task"]
-choose_model_prompt = config["prompt"]["choose_model"]
-response_results_prompt = config["prompt"]["response_results"]
-
-parse_task_tprompt = config["tprompt"]["parse_task"]
-choose_model_tprompt = config["tprompt"]["choose_model"]
-response_results_tprompt = config["tprompt"]["response_results"]
-
-MODELS = [json.loads(line) for line in open("data/p0_models.jsonl", "r").readlines()]
-MODELS_MAP = {}
-for model in MODELS:
- tag = model["task"]
- if tag not in MODELS_MAP:
- MODELS_MAP[tag] = []
- MODELS_MAP[tag].append(model)
-METADATAS = {}
-for model in MODELS:
- METADATAS[model["id"]] = model
-
-def convert_chat_to_completion(data):
- messages = data.pop('messages', [])
- tprompt = ""
- if messages[0]['role'] == "system":
- tprompt = messages[0]['content']
- messages = messages[1:]
- final_prompt = ""
- for message in messages:
- if message['role'] == "user":
- final_prompt += (""+ "user" + "\n" + message['content'] + "\n")
- elif message['role'] == "assistant":
- final_prompt += (""+ "assistant" + "\n" + message['content'] + "\n")
- else:
- final_prompt += (""+ "system" + "\n" + message['content'] + "\n")
- final_prompt = tprompt + final_prompt
- final_prompt = final_prompt + "assistant"
- data["prompt"] = final_prompt
- data['stop'] = data.get('stop', [""])
- data['max_tokens'] = data.get('max_tokens', max(get_max_context_length(LLM) - count_tokens(LLM_encoding, final_prompt), 1))
- return data
-
-def send_request(data):
- global HEADER
- openaikey = data.pop("openaikey")
- if use_completion:
- data = convert_chat_to_completion(data)
- if openaikey and openaikey.startswith("sk-"):
- HEADER = {
- "Authorization": f"Bearer {openaikey}"
- }
-
- response = requests.post(endpoint, json=data, headers=HEADER, proxies=PROXY)
- logger.debug(response.text.strip())
- if "choices" not in response.json():
- return response.json()
- if use_completion:
- return response.json()["choices"][0]["text"].strip()
- else:
- return response.json()["choices"][0]["message"]["content"].strip()
-
-def replace_slot(text, entries):
- for key, value in entries.items():
- if not isinstance(value, str):
- value = str(value)
- text = text.replace("{{" + key +"}}", value.replace('"', "'").replace('\n', ""))
- return text
-
-def find_json(s):
- s = s.replace("\'", "\"")
- start = s.find("{")
- end = s.rfind("}")
- res = s[start:end+1]
- res = res.replace("\n", "")
- return res
-
-def field_extract(s, field):
- try:
- field_rep = re.compile(f'{field}.*?:.*?"(.*?)"', re.IGNORECASE)
- extracted = field_rep.search(s).group(1).replace("\"", "\'")
- except:
- field_rep = re.compile(f'{field}:\ *"(.*?)"', re.IGNORECASE)
- extracted = field_rep.search(s).group(1).replace("\"", "\'")
- return extracted
-
-def get_id_reason(choose_str):
- reason = field_extract(choose_str, "reason")
- id = field_extract(choose_str, "id")
- choose = {"id": id, "reason": reason}
- return id.strip(), reason.strip(), choose
-
-def record_case(success, **args):
- if not success:
- return
- f = open(f"logs/log_success_{now}.jsonl", "a")
- log = args
- f.write(json.dumps(log) + "\n")
- f.close()
- if LOG_HF_TOKEN:
- commit_url = repo.push_to_hub(blocking=False)
-
-def image_to_bytes(img_url):
- img_byte = io.BytesIO()
- type = img_url.split(".")[-1]
- load_image(img_url).save(img_byte, format="png")
- img_data = img_byte.getvalue()
- return img_data
-
-def resource_has_dep(command):
- args = command["args"]
- for _, v in args.items():
- if "" in v:
- return True
- return False
-
-def fix_dep(tasks):
- for task in tasks:
- args = task["args"]
- task["dep"] = []
- for k, v in args.items():
- if "" in v:
- dep_task_id = int(v.split("-")[1])
- if dep_task_id not in task["dep"]:
- task["dep"].append(dep_task_id)
- if len(task["dep"]) == 0:
- task["dep"] = [-1]
- return tasks
-
-def unfold(tasks):
- flag_unfold_task = False
- try:
- for task in tasks:
- for key, value in task["args"].items():
- if "" in value:
- generated_items = value.split(",")
- if len(generated_items) > 1:
- flag_unfold_task = True
- for item in generated_items:
- new_task = copy.deepcopy(task)
- dep_task_id = int(item.split("-")[1])
- new_task["dep"] = [dep_task_id]
- new_task["args"][key] = item
- tasks.append(new_task)
- tasks.remove(task)
- except Exception as e:
- print(e)
- traceback.print_exc()
- logger.debug("unfold task failed.")
-
- if flag_unfold_task:
- logger.debug(f"unfold tasks: {tasks}")
-
- return tasks
-
-def chitchat(messages, openaikey=None):
- data = {
- "model": LLM,
- "messages": messages,
- "openaikey": openaikey
- }
- return send_request(data)
-
-def parse_task(context, input, openaikey=None):
- demos_or_presteps = parse_task_demos_or_presteps
- messages = json.loads(demos_or_presteps)
- messages.insert(0, {"role": "system", "content": parse_task_tprompt})
-
- # cut chat logs
- start = 0
- while start <= len(context):
- history = context[start:]
- prompt = replace_slot(parse_task_prompt, {
- "input": input,
- "context": history
- })
- messages.append({"role": "user", "content": prompt})
- history_text = "\nuser".join([m["content"] for m in messages])
- num = count_tokens(LLM_encoding, history_text)
- if get_max_context_length(LLM) - num > 800:
- break
- messages.pop()
- start += 2
-
- logger.debug(messages)
- data = {
- "model": LLM,
- "messages": messages,
- "temperature": 0,
- "logit_bias": {item: config["logit_bias"]["parse_task"] for item in task_parsing_highlight_ids},
- "openaikey": openaikey
- }
- return send_request(data)
-
-def choose_model(input, task, metas, openaikey = None):
- prompt = replace_slot(choose_model_prompt, {
- "input": input,
- "task": task,
- "metas": metas,
- })
- demos_or_presteps = replace_slot(choose_model_demos_or_presteps, {
- "input": input,
- "task": task,
- "metas": metas
- })
- messages = json.loads(demos_or_presteps)
- messages.insert(0, {"role": "system", "content": choose_model_tprompt})
- messages.append({"role": "user", "content": prompt})
- logger.debug(messages)
- data = {
- "model": LLM,
- "messages": messages,
- "temperature": 0,
- "logit_bias": {item: config["logit_bias"]["choose_model"] for item in choose_model_highlight_ids}, # 5
- "openaikey": openaikey
- }
- return send_request(data)
-
-
-def response_results(input, results, openaikey=None):
- results = [v for k, v in sorted(results.items(), key=lambda item: item[0])]
- prompt = replace_slot(response_results_prompt, {
- "input": input,
- })
- demos_or_presteps = replace_slot(response_results_demos_or_presteps, {
- "input": input,
- "processes": results
- })
- messages = json.loads(demos_or_presteps)
- messages.insert(0, {"role": "system", "content": response_results_tprompt})
- messages.append({"role": "user", "content": prompt})
- logger.debug(messages)
- data = {
- "model": LLM,
- "messages": messages,
- "temperature": 0,
- "openaikey": openaikey
- }
- return send_request(data)
-
-def huggingface_model_inference(model_id, data, task, huggingfacetoken=None):
- if huggingfacetoken is None:
- HUGGINGFACE_HEADERS = {}
- else:
- HUGGINGFACE_HEADERS = {
- "Authorization": f"Bearer {huggingfacetoken}",
- }
- task_url = f"https://api-inference.huggingface.co/models/{model_id}" # InferenceApi does not yet support some tasks
- inference = InferenceApi(repo_id=model_id, token=huggingfacetoken)
-
- # NLP tasks
- if task == "question-answering":
- inputs = {"question": data["text"], "context": (data["context"] if "context" in data else "" )}
- result = inference(inputs)
- if task == "sentence-similarity":
- inputs = {"source_sentence": data["text1"], "target_sentence": data["text2"]}
- result = inference(inputs)
- if task in ["text-classification", "token-classification", "text2text-generation", "summarization", "translation", "conversational", "text-generation"]:
- inputs = data["text"]
- result = inference(inputs)
-
- # CV tasks
- if task == "visual-question-answering" or task == "document-question-answering":
- img_url = data["image"]
- text = data["text"]
- img_data = image_to_bytes(img_url)
- img_base64 = base64.b64encode(img_data).decode("utf-8")
- json_data = {}
- json_data["inputs"] = {}
- json_data["inputs"]["question"] = text
- json_data["inputs"]["image"] = img_base64
- result = requests.post(task_url, headers=HUGGINGFACE_HEADERS, json=json_data).json()
- # result = inference(inputs) # not support
-
- if task == "image-to-image":
- img_url = data["image"]
- img_data = image_to_bytes(img_url)
- # result = inference(data=img_data) # not support
- HUGGINGFACE_HEADERS["Content-Length"] = str(len(img_data))
- r = requests.post(task_url, headers=HUGGINGFACE_HEADERS, data=img_data)
- result = r.json()
- if "path" in result:
- result["generated image"] = result.pop("path")
-
- if task == "text-to-image":
- inputs = data["text"]
- img = inference(inputs)
- name = str(uuid.uuid4())[:4]
- img.save(f"public/images/{name}.png")
- result = {}
- result["generated image"] = f"/images/{name}.png"
-
- if task == "image-segmentation":
- img_url = data["image"]
- img_data = image_to_bytes(img_url)
- image = Image.open(BytesIO(img_data))
- predicted = inference(data=img_data)
- colors = []
- for i in range(len(predicted)):
- colors.append((random.randint(100, 255), random.randint(100, 255), random.randint(100, 255), 155))
- for i, pred in enumerate(predicted):
- label = pred["label"]
- mask = pred.pop("mask").encode("utf-8")
- mask = base64.b64decode(mask)
- mask = Image.open(BytesIO(mask), mode='r')
- mask = mask.convert('L')
-
- layer = Image.new('RGBA', mask.size, colors[i])
- image.paste(layer, (0, 0), mask)
- name = str(uuid.uuid4())[:4]
- image.save(f"public/images/{name}.jpg")
- result = {}
- result["generated image with segmentation mask"] = f"/images/{name}.jpg"
- result["predicted"] = predicted
-
- if task == "object-detection":
- img_url = data["image"]
- img_data = image_to_bytes(img_url)
- predicted = inference(data=img_data)
- image = Image.open(BytesIO(img_data))
- draw = ImageDraw.Draw(image)
- labels = list(item['label'] for item in predicted)
- color_map = {}
- for label in labels:
- if label not in color_map:
- color_map[label] = (random.randint(0, 255), random.randint(0, 100), random.randint(0, 255))
- for label in predicted:
- box = label["box"]
- draw.rectangle(((box["xmin"], box["ymin"]), (box["xmax"], box["ymax"])), outline=color_map[label["label"]], width=2)
- draw.text((box["xmin"]+5, box["ymin"]-15), label["label"], fill=color_map[label["label"]])
- name = str(uuid.uuid4())[:4]
- image.save(f"public/images/{name}.jpg")
- result = {}
- result["generated image with predicted box"] = f"/images/{name}.jpg"
- result["predicted"] = predicted
-
- if task in ["image-classification"]:
- img_url = data["image"]
- img_data = image_to_bytes(img_url)
- result = inference(data=img_data)
-
- if task == "image-to-text":
- img_url = data["image"]
- img_data = image_to_bytes(img_url)
- HUGGINGFACE_HEADERS["Content-Length"] = str(len(img_data))
- r = requests.post(task_url, headers=HUGGINGFACE_HEADERS, data=img_data)
- result = {}
- if "generated_text" in r.json()[0]:
- result["generated text"] = r.json()[0].pop("generated_text")
-
- # AUDIO tasks
- if task == "text-to-speech":
- inputs = data["text"]
- response = inference(inputs, raw_response=True)
- # response = requests.post(task_url, headers=HUGGINGFACE_HEADERS, json={"inputs": text})
- name = str(uuid.uuid4())[:4]
- with open(f"public/audios/{name}.flac", "wb") as f:
- f.write(response.content)
- result = {"generated audio": f"/audios/{name}.flac"}
- if task in ["automatic-speech-recognition", "audio-to-audio", "audio-classification"]:
- audio_url = data["audio"]
- audio_data = requests.get(audio_url, timeout=10).content
- response = inference(data=audio_data, raw_response=True)
- result = response.json()
- if task == "audio-to-audio":
- content = None
- type = None
- for k, v in result[0].items():
- if k == "blob":
- content = base64.b64decode(v.encode("utf-8"))
- if k == "content-type":
- type = "audio/flac".split("/")[-1]
- audio = AudioSegment.from_file(BytesIO(content))
- name = str(uuid.uuid4())[:4]
- audio.export(f"public/audios/{name}.{type}", format=type)
- result = {"generated audio": f"/audios/{name}.{type}"}
- return result
-
-def local_model_inference(model_id, data, task):
- inference = partial(models, model_id)
- # contronlet
- if model_id.startswith("lllyasviel/sd-controlnet-"):
- img_url = data["image"]
- text = data["text"]
- results = inference({"img_url": img_url, "text": text})
- if "path" in results:
- results["generated image"] = results.pop("path")
- return results
- if model_id.endswith("-control"):
- img_url = data["image"]
- results = inference({"img_url": img_url})
- if "path" in results:
- results["generated image"] = results.pop("path")
- return results
-
- if task == "text-to-video":
- results = inference(data)
- if "path" in results:
- results["generated video"] = results.pop("path")
- return results
-
- # NLP tasks
- if task == "question-answering" or task == "sentence-similarity":
- results = inference(json=data)
- return results
- if task in ["text-classification", "token-classification", "text2text-generation", "summarization", "translation", "conversational", "text-generation"]:
- results = inference(json=data)
- return results
-
- # CV tasks
- if task == "depth-estimation":
- img_url = data["image"]
- results = inference({"img_url": img_url})
- if "path" in results:
- results["generated depth image"] = results.pop("path")
- return results
- if task == "image-segmentation":
- img_url = data["image"]
- results = inference({"img_url": img_url})
- results["generated image with segmentation mask"] = results.pop("path")
- return results
- if task == "image-to-image":
- img_url = data["image"]
- results = inference({"img_url": img_url})
- if "path" in results:
- results["generated image"] = results.pop("path")
- return results
- if task == "text-to-image":
- results = inference(data)
- if "path" in results:
- results["generated image"] = results.pop("path")
- return results
- if task == "object-detection":
- img_url = data["image"]
- predicted = inference({"img_url": img_url})
- if "error" in predicted:
- return predicted
- image = load_image(img_url)
- draw = ImageDraw.Draw(image)
- labels = list(item['label'] for item in predicted)
- color_map = {}
- for label in labels:
- if label not in color_map:
- color_map[label] = (random.randint(0, 255), random.randint(0, 100), random.randint(0, 255))
- for label in predicted:
- box = label["box"]
- draw.rectangle(((box["xmin"], box["ymin"]), (box["xmax"], box["ymax"])), outline=color_map[label["label"]], width=2)
- draw.text((box["xmin"]+5, box["ymin"]-15), label["label"], fill=color_map[label["label"]])
- name = str(uuid.uuid4())[:4]
- image.save(f"public/images/{name}.jpg")
- results = {}
- results["generated image with predicted box"] = f"/images/{name}.jpg"
- results["predicted"] = predicted
- return results
- if task in ["image-classification", "image-to-text", "document-question-answering", "visual-question-answering"]:
- img_url = data["image"]
- text = None
- if "text" in data:
- text = data["text"]
- results = inference({"img_url": img_url, "text": text})
- return results
- # AUDIO tasks
- if task == "text-to-speech":
- results = inference(data)
- if "path" in results:
- results["generated audio"] = results.pop("path")
- return results
- if task in ["automatic-speech-recognition", "audio-to-audio", "audio-classification"]:
- audio_url = data["audio"]
- results = inference({"audio_url": audio_url})
- return results
-
-
-def model_inference(model_id, data, hosted_on, task, huggingfacetoken=None):
- if huggingfacetoken:
- HUGGINGFACE_HEADERS = {
- "Authorization": f"Bearer {huggingfacetoken}",
- }
- else:
- HUGGINGFACE_HEADERS = None
- if hosted_on == "unknown":
- r = status(model_id)
- logger.debug("Local Server Status: " + str(r))
- if "loaded" in r and r["loaded"]:
- hosted_on = "local"
- else:
- huggingfaceStatusUrl = f"https://api-inference.huggingface.co/status/{model_id}"
- r = requests.get(huggingfaceStatusUrl, headers=HUGGINGFACE_HEADERS, proxies=PROXY)
- logger.debug("Huggingface Status: " + str(r.json()))
- if "loaded" in r and r["loaded"]:
- hosted_on = "huggingface"
- try:
- if hosted_on == "local":
- inference_result = local_model_inference(model_id, data, task)
- elif hosted_on == "huggingface":
- inference_result = huggingface_model_inference(model_id, data, task, huggingfacetoken)
- except Exception as e:
- print(e)
- traceback.print_exc()
- inference_result = {"error":{"message": str(e)}}
- return inference_result
-
-
-def get_model_status(model_id, url, headers, queue = None):
- endpoint_type = "huggingface" if "huggingface" in url else "local"
- if "huggingface" in url:
- r = requests.get(url, headers=headers, proxies=PROXY)
- else:
- r = status(model_id)
- if "loaded" in r and r["loaded"]:
- if queue:
- queue.put((model_id, True, endpoint_type))
- return True
- else:
- if queue:
- queue.put((model_id, False, None))
- return False
-
-def get_avaliable_models(candidates, topk=10, huggingfacetoken = None):
- all_available_models = {"local": [], "huggingface": []}
- threads = []
- result_queue = Queue()
- HUGGINGFACE_HEADERS = {
- "Authorization": f"Bearer {huggingfacetoken}",
- }
- for candidate in candidates:
- model_id = candidate["id"]
-
- if inference_mode != "local":
- huggingfaceStatusUrl = f"https://api-inference.huggingface.co/status/{model_id}"
- thread = threading.Thread(target=get_model_status, args=(model_id, huggingfaceStatusUrl, HUGGINGFACE_HEADERS, result_queue))
- threads.append(thread)
- thread.start()
-
- if inference_mode != "huggingface" and config["local_deployment"] != "minimal":
- thread = threading.Thread(target=get_model_status, args=(model_id, "", {}, result_queue))
- threads.append(thread)
- thread.start()
-
- result_count = len(threads)
- while result_count:
- model_id, status, endpoint_type = result_queue.get()
- if status and model_id not in all_available_models:
- all_available_models[endpoint_type].append(model_id)
- if len(all_available_models["local"] + all_available_models["huggingface"]) >= topk:
- break
- result_count -= 1
-
- for thread in threads:
- thread.join()
-
- return all_available_models
-
-def collect_result(command, choose, inference_result):
- result = {"task": command}
- result["inference result"] = inference_result
- result["choose model result"] = choose
- logger.debug(f"inference result: {inference_result}")
- return result
-
-
-def run_task(input, command, results, openaikey = None, huggingfacetoken = None):
- id = command["id"]
- args = command["args"]
- task = command["task"]
- deps = command["dep"]
- if deps[0] != -1:
- dep_tasks = [results[dep] for dep in deps]
- else:
- dep_tasks = []
-
- logger.debug(f"Run task: {id} - {task}")
- logger.debug("Deps: " + json.dumps(dep_tasks))
-
- if deps[0] != -1:
- if "image" in args and "-" in args["image"]:
- resource_id = int(args["image"].split("-")[1])
- if "generated image" in results[resource_id]["inference result"]:
- args["image"] = results[resource_id]["inference result"]["generated image"]
- if "audio" in args and "-" in args["audio"]:
- resource_id = int(args["audio"].split("-")[1])
- if "generated audio" in results[resource_id]["inference result"]:
- args["audio"] = results[resource_id]["inference result"]["generated audio"]
- if "text" in args and "-" in args["text"]:
- resource_id = int(args["text"].split("-")[1])
- if "generated text" in results[resource_id]["inference result"]:
- args["text"] = results[resource_id]["inference result"]["generated text"]
-
- text = image = audio = None
- for dep_task in dep_tasks:
- if "generated text" in dep_task["inference result"]:
- text = dep_task["inference result"]["generated text"]
- logger.debug("Detect the generated text of dependency task (from results):" + text)
- elif "text" in dep_task["task"]["args"]:
- text = dep_task["task"]["args"]["text"]
- logger.debug("Detect the text of dependency task (from args): " + text)
- if "generated image" in dep_task["inference result"]:
- image = dep_task["inference result"]["generated image"]
- logger.debug("Detect the generated image of dependency task (from results): " + image)
- elif "image" in dep_task["task"]["args"]:
- image = dep_task["task"]["args"]["image"]
- logger.debug("Detect the image of dependency task (from args): " + image)
- if "generated audio" in dep_task["inference result"]:
- audio = dep_task["inference result"]["generated audio"]
- logger.debug("Detect the generated audio of dependency task (from results): " + audio)
- elif "audio" in dep_task["task"]["args"]:
- audio = dep_task["task"]["args"]["audio"]
- logger.debug("Detect the audio of dependency task (from args): " + audio)
-
- if "image" in args and "" in args["image"]:
- if image:
- args["image"] = image
- if "audio" in args and "" in args["audio"]:
- if audio:
- args["audio"] = audio
- if "text" in args and "" in args["text"]:
- if text:
- args["text"] = text
-
- for resource in ["image", "audio"]:
- if resource in args and not args[resource].startswith("public/") and len(args[resource]) > 0 and not args[resource].startswith("http"):
- args[resource] = f"public/{args[resource]}"
-
- if "-text-to-image" in command['task'] and "text" not in args:
- logger.debug("control-text-to-image task, but text is empty, so we use control-generation instead.")
- control = task.split("-")[0]
-
- if control == "seg":
- task = "image-segmentation"
- command['task'] = task
- elif control == "depth":
- task = "depth-estimation"
- command['task'] = task
- else:
- task = f"{control}-control"
-
- command["args"] = args
- logger.debug(f"parsed task: {command}")
-
- if task.endswith("-text-to-image") or task.endswith("-control"):
- if inference_mode != "huggingface":
- if task.endswith("-text-to-image"):
- control = task.split("-")[0]
- best_model_id = f"lllyasviel/sd-controlnet-{control}"
- else:
- best_model_id = task
- hosted_on = "local"
- reason = "ControlNet is the best model for this task."
- choose = {"id": best_model_id, "reason": reason}
- logger.debug(f"chosen model: {choose}")
- else:
- logger.warning(f"Task {command['task']} is not available. ControlNet need to be deployed locally.")
- record_case(success=False, **{"input": input, "task": command, "reason": f"Task {command['task']} is not available. ControlNet need to be deployed locally.", "op":"message"})
- inference_result = {"error": f"service related to ControlNet is not available."}
- results[id] = collect_result(command, "", inference_result)
- return False
- elif task in ["summarization", "translation", "conversational", "text-generation", "text2text-generation"]: # ChatGPT Can do
- best_model_id = "ChatGPT"
- reason = "ChatGPT performs well on some NLP tasks as well."
- choose = {"id": best_model_id, "reason": reason}
- messages = [{
- "role": "user",
- "content": f"[ {input} ] contains a task in JSON format {command}, 'task' indicates the task type and 'args' indicates the arguments required for the task. Don't explain the task to me, just help me do it and give me the result. The result must be in text form without any urls."
- }]
- response = chitchat(messages, openaikey)
- results[id] = collect_result(command, choose, {"response": response})
- return True
- else:
- if task not in MODELS_MAP:
- logger.warning(f"no available models on {task} task.")
- record_case(success=False, **{"input": input, "task": command, "reason": f"task not support: {command['task']}", "op":"message"})
- inference_result = {"error": f"{command['task']} not found in available tasks."}
- results[id] = collect_result(command, "", inference_result)
- return False
-
- candidates = MODELS_MAP[task][:20]
- all_avaliable_models = get_avaliable_models(candidates, config["num_candidate_models"], huggingfacetoken)
- all_avaliable_model_ids = all_avaliable_models["local"] + all_avaliable_models["huggingface"]
- logger.debug(f"avaliable models on {command['task']}: {all_avaliable_models}")
-
- if len(all_avaliable_model_ids) == 0:
- logger.warning(f"no available models on {command['task']}")
- record_case(success=False, **{"input": input, "task": command, "reason": f"no available models: {command['task']}", "op":"message"})
- inference_result = {"error": f"no available models on {command['task']} task."}
- results[id] = collect_result(command, "", inference_result)
- return False
-
- if len(all_avaliable_model_ids) == 1:
- best_model_id = all_avaliable_model_ids[0]
- hosted_on = "local" if best_model_id in all_avaliable_models["local"] else "huggingface"
- reason = "Only one model available."
- choose = {"id": best_model_id, "reason": reason}
- logger.debug(f"chosen model: {choose}")
- else:
- cand_models_info = [
- {
- "id": model["id"],
- "inference endpoint": all_avaliable_models.get(
- "local" if model["id"] in all_avaliable_models["local"] else "huggingface"
- ),
- "likes": model.get("likes"),
- "description": model.get("description", "")[:config["max_description_length"]],
- "language": model.get("language"),
- "tags": model.get("tags"),
- }
- for model in candidates
- if model["id"] in all_avaliable_model_ids
- ]
-
- choose_str = choose_model(input, command, cand_models_info, openaikey)
- logger.debug(f"chosen model: {choose_str}")
- try:
- choose = json.loads(choose_str)
- reason = choose["reason"]
- best_model_id = choose["id"]
- hosted_on = "local" if best_model_id in all_avaliable_models["local"] else "huggingface"
- except Exception as e:
- logger.warning(f"the response [ {choose_str} ] is not a valid JSON, try to find the model id and reason in the response.")
- choose_str = find_json(choose_str)
- best_model_id, reason, choose = get_id_reason(choose_str)
- hosted_on = "local" if best_model_id in all_avaliable_models["local"] else "huggingface"
- inference_result = model_inference(best_model_id, args, hosted_on, command['task'], huggingfacetoken)
-
- if "error" in inference_result:
- logger.warning(f"Inference error: {inference_result['error']}")
- record_case(success=False, **{"input": input, "task": command, "reason": f"inference error: {inference_result['error']}", "op":"message"})
- results[id] = collect_result(command, choose, inference_result)
- return False
-
- results[id] = collect_result(command, choose, inference_result)
- return True
-
-def chat_huggingface(messages, openaikey = None, huggingfacetoken = None, return_planning = False, return_results = False):
- start = time.time()
- context = messages[:-1]
- input = messages[-1]["content"]
- logger.info("*"*80)
- logger.info(f"input: {input}")
-
- task_str = parse_task(context, input, openaikey)
- logger.info(task_str)
-
- if "error" in task_str:
- return str(task_str), {}
- else:
- task_str = task_str.strip()
-
- try:
- tasks = json.loads(task_str)
- except Exception as e:
- logger.debug(e)
- response = chitchat(messages, openaikey)
- record_case(success=False, **{"input": input, "task": task_str, "reason": "task parsing fail", "op":"chitchat"})
- return response, {}
-
- if task_str == "[]": # using LLM response for empty task
- record_case(success=False, **{"input": input, "task": [], "reason": "task parsing fail: empty", "op": "chitchat"})
- response = chitchat(messages, openaikey)
- return response, {}
-
- if len(tasks)==1 and tasks[0]["task"] in ["summarization", "translation", "conversational", "text-generation", "text2text-generation"]:
- record_case(success=True, **{"input": input, "task": tasks, "reason": "task parsing fail: empty", "op": "chitchat"})
- response = chitchat(messages, openaikey)
- best_model_id = "ChatGPT"
- reason = "ChatGPT performs well on some NLP tasks as well."
- choose = {"id": best_model_id, "reason": reason}
- return response, collect_result(tasks[0], choose, {"response": response})
-
-
- tasks = unfold(tasks)
- tasks = fix_dep(tasks)
- logger.debug(tasks)
-
- if return_planning:
- return tasks
-
- results = {}
- threads = []
- tasks = tasks[:]
- d = dict()
- retry = 0
- while True:
- num_threads = len(threads)
- for task in tasks:
- dep = task["dep"]
- # logger.debug(f"d.keys(): {d.keys()}, dep: {dep}")
- for dep_id in dep:
- if dep_id >= task["id"]:
- task["dep"] = [-1]
- dep = [-1]
- break
- if len(list(set(dep).intersection(d.keys()))) == len(dep) or dep[0] == -1:
- tasks.remove(task)
- thread = threading.Thread(target=run_task, args=(input, task, d, openaikey, huggingfacetoken))
- thread.start()
- threads.append(thread)
- if num_threads == len(threads):
- time.sleep(0.5)
- retry += 1
- if retry > 80:
- logger.debug("User has waited too long, Loop break.")
- break
- if len(tasks) == 0:
- break
- for thread in threads:
- thread.join()
-
- results = d.copy()
-
- logger.debug(results)
- if return_results:
- return results
-
- response = response_results(input, results, openaikey).strip()
-
- end = time.time()
- during = end - start
-
- answer = {"message": response}
- record_case(success=True, **{"input": input, "task": task_str, "results": results, "response": response, "during": during, "op":"response"})
- logger.info(f"response: {response}")
- return response, results
diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/criteria/localitly_regulizer.py b/spaces/Amrrs/DragGan-Inversion/PTI/criteria/localitly_regulizer.py
deleted file mode 100644
index 09a5a40d44153bd0110d22b2d9a4d50970cf7515..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/PTI/criteria/localitly_regulizer.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import torch
-import numpy as np
-from PTI.criteria import l2_loss
-from PTI.configs import hyperparameters
-from PTI.configs import global_config
-
-
-class Space_Regulizer:
- def __init__(self, original_G, lpips_net):
- self.original_G = original_G
- self.morphing_regulizer_alpha = hyperparameters.regulizer_alpha
- self.lpips_loss = lpips_net
-
- def get_morphed_w_code(self, new_w_code, fixed_w):
- interpolation_direction = new_w_code - fixed_w
- interpolation_direction_norm = torch.norm(interpolation_direction, p=2)
- direction_to_move = hyperparameters.regulizer_alpha * \
- interpolation_direction / interpolation_direction_norm
- result_w = fixed_w + direction_to_move
- self.morphing_regulizer_alpha * fixed_w + \
- (1 - self.morphing_regulizer_alpha) * new_w_code
-
- return result_w
-
- def get_image_from_ws(self, w_codes, G):
- return torch.cat([G.synthesis(w_code, noise_mode='none', force_fp32=True) for w_code in w_codes])
-
- def ball_holder_loss_lazy(self, new_G, num_of_sampled_latents, w_batch, use_wandb=False):
- loss = 0.0
-
- z_samples = np.random.randn(
- num_of_sampled_latents, self.original_G.z_dim)
- w_samples = self.original_G.mapping(torch.from_numpy(z_samples).to(global_config.device), None,
- truncation_psi=0.5)
- territory_indicator_ws = [self.get_morphed_w_code(
- w_code.unsqueeze(0), w_batch) for w_code in w_samples]
-
- for w_code in territory_indicator_ws:
- new_img = new_G.synthesis(
- w_code, noise_mode='none', force_fp32=True)
- with torch.no_grad():
- old_img = self.original_G.synthesis(
- w_code, noise_mode='none', force_fp32=True)
-
- if hyperparameters.regulizer_l2_lambda > 0:
- l2_loss_val = l2_loss.l2_loss(old_img, new_img)
- if use_wandb:
- wandb.log({f'space_regulizer_l2_loss_val': l2_loss_val.detach().cpu()},
- step=global_config.training_step)
- loss += l2_loss_val * hyperparameters.regulizer_l2_lambda
-
- if hyperparameters.regulizer_lpips_lambda > 0:
- loss_lpips = self.lpips_loss(old_img, new_img)
- loss_lpips = torch.mean(torch.squeeze(loss_lpips))
- if use_wandb:
- wandb.log({f'space_regulizer_lpips_loss_val': loss_lpips.detach().cpu()},
- step=global_config.training_step)
- loss += loss_lpips * hyperparameters.regulizer_lpips_lambda
-
- return loss / len(territory_indicator_ws)
-
- def space_regulizer_loss(self, new_G, w_batch, use_wandb):
- ret_val = self.ball_holder_loss_lazy(
- new_G, hyperparameters.latent_ball_num_of_samples, w_batch, use_wandb)
- return ret_val
diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/filtered_lrelu.h b/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/filtered_lrelu.h
deleted file mode 100644
index 524c804122a2582e20e2e4e9c49267e1a1b6db60..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/filtered_lrelu.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// NVIDIA CORPORATION and its licensors retain all intellectual property
-// and proprietary rights in and to this software, related documentation
-// and any modifications thereto. Any use, reproduction, disclosure or
-// distribution of this software and related documentation without an express
-// license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-#include
-
-//------------------------------------------------------------------------
-// CUDA kernel parameters.
-
-struct filtered_lrelu_kernel_params
-{
- // These parameters decide which kernel to use.
- int up; // upsampling ratio (1, 2, 4)
- int down; // downsampling ratio (1, 2, 4)
- int2 fuShape; // [size, 1] | [size, size]
- int2 fdShape; // [size, 1] | [size, size]
-
- int _dummy; // Alignment.
-
- // Rest of the parameters.
- const void* x; // Input tensor.
- void* y; // Output tensor.
- const void* b; // Bias tensor.
- unsigned char* s; // Sign tensor in/out. NULL if unused.
- const float* fu; // Upsampling filter.
- const float* fd; // Downsampling filter.
-
- int2 pad0; // Left/top padding.
- float gain; // Additional gain factor.
- float slope; // Leaky ReLU slope on negative side.
- float clamp; // Clamp after nonlinearity.
- int flip; // Filter kernel flip for gradient computation.
-
- int tilesXdim; // Original number of horizontal output tiles.
- int tilesXrep; // Number of horizontal tiles per CTA.
- int blockZofs; // Block z offset to support large minibatch, channel dimensions.
-
- int4 xShape; // [width, height, channel, batch]
- int4 yShape; // [width, height, channel, batch]
- int2 sShape; // [width, height] - width is in bytes. Contiguous. Zeros if unused.
- int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor.
- int swLimit; // Active width of sign tensor in bytes.
-
- longlong4 xStride; // Strides of all tensors except signs, same component order as shapes.
- longlong4 yStride; //
- int64_t bStride; //
- longlong3 fuStride; //
- longlong3 fdStride; //
-};
-
-struct filtered_lrelu_act_kernel_params
-{
- void* x; // Input/output, modified in-place.
- unsigned char* s; // Sign tensor in/out. NULL if unused.
-
- float gain; // Additional gain factor.
- float slope; // Leaky ReLU slope on negative side.
- float clamp; // Clamp after nonlinearity.
-
- int4 xShape; // [width, height, channel, batch]
- longlong4 xStride; // Input/output tensor strides, same order as in shape.
- int2 sShape; // [width, height] - width is in elements. Contiguous. Zeros if unused.
- int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor.
-};
-
-//------------------------------------------------------------------------
-// CUDA kernel specialization.
-
-struct filtered_lrelu_kernel_spec
-{
- void* setup; // Function for filter kernel setup.
- void* exec; // Function for main operation.
- int2 tileOut; // Width/height of launch tile.
- int numWarps; // Number of warps per thread block, determines launch block size.
- int xrep; // For processing multiple horizontal tiles per thread block.
- int dynamicSharedKB; // How much dynamic shared memory the exec kernel wants.
-};
-
-//------------------------------------------------------------------------
-// CUDA kernel selection.
-
-template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
-template void* choose_filtered_lrelu_act_kernel(void);
-template cudaError_t copy_filters(cudaStream_t stream);
-
-//------------------------------------------------------------------------
\ No newline at end of file
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_controlnet_inpaint_img2img.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_controlnet_inpaint_img2img.py
deleted file mode 100644
index bad1df0e13fb55ef57f4d0ebade910a506409187..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_controlnet_inpaint_img2img.py
+++ /dev/null
@@ -1,1119 +0,0 @@
-# Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/
-
-import inspect
-from typing import Any, Callable, Dict, List, Optional, Union
-
-import numpy as np
-import PIL.Image
-import torch
-import torch.nn.functional as F
-from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
-
-from diffusers import AutoencoderKL, ControlNetModel, DiffusionPipeline, UNet2DConditionModel, logging
-from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
-from diffusers.schedulers import KarrasDiffusionSchedulers
-from diffusers.utils import (
- PIL_INTERPOLATION,
- is_accelerate_available,
- is_accelerate_version,
- randn_tensor,
- replace_example_docstring,
-)
-
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-EXAMPLE_DOC_STRING = """
- Examples:
- ```py
- >>> import numpy as np
- >>> import torch
- >>> from PIL import Image
- >>> from stable_diffusion_controlnet_inpaint_img2img import StableDiffusionControlNetInpaintImg2ImgPipeline
-
- >>> from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
- >>> from diffusers import ControlNetModel, UniPCMultistepScheduler
- >>> from diffusers.utils import load_image
-
- >>> def ade_palette():
- return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
- [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
- [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
- [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
- [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
- [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
- [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
- [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
- [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
- [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
- [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
- [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
- [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
- [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
- [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
- [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
- [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
- [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
- [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
- [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
- [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
- [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
- [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
- [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
- [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
- [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
- [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
- [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
- [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
- [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
- [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
- [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
- [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
- [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
- [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
- [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
- [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
- [102, 255, 0], [92, 0, 255]]
-
- >>> image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small")
- >>> image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small")
-
- >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16)
-
- >>> pipe = StableDiffusionControlNetInpaintImg2ImgPipeline.from_pretrained(
- "runwayml/stable-diffusion-inpainting", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
- )
-
- >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
- >>> pipe.enable_xformers_memory_efficient_attention()
- >>> pipe.enable_model_cpu_offload()
-
- >>> def image_to_seg(image):
- pixel_values = image_processor(image, return_tensors="pt").pixel_values
- with torch.no_grad():
- outputs = image_segmentor(pixel_values)
- seg = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
- color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) # height, width, 3
- palette = np.array(ade_palette())
- for label, color in enumerate(palette):
- color_seg[seg == label, :] = color
- color_seg = color_seg.astype(np.uint8)
- seg_image = Image.fromarray(color_seg)
- return seg_image
-
- >>> image = load_image(
- "https://github.com/CompVis/latent-diffusion/raw/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
- )
-
- >>> mask_image = load_image(
- "https://github.com/CompVis/latent-diffusion/raw/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
- )
-
- >>> controlnet_conditioning_image = image_to_seg(image)
-
- >>> image = pipe(
- "Face of a yellow cat, high resolution, sitting on a park bench",
- image,
- mask_image,
- controlnet_conditioning_image,
- num_inference_steps=20,
- ).images[0]
-
- >>> image.save("out.png")
- ```
-"""
-
-
-def prepare_image(image):
- if isinstance(image, torch.Tensor):
- # Batch single image
- if image.ndim == 3:
- image = image.unsqueeze(0)
-
- image = image.to(dtype=torch.float32)
- else:
- # preprocess image
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
- image = [image]
-
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
- image = np.concatenate(image, axis=0)
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
- image = np.concatenate([i[None, :] for i in image], axis=0)
-
- image = image.transpose(0, 3, 1, 2)
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
-
- return image
-
-
-def prepare_mask_image(mask_image):
- if isinstance(mask_image, torch.Tensor):
- if mask_image.ndim == 2:
- # Batch and add channel dim for single mask
- mask_image = mask_image.unsqueeze(0).unsqueeze(0)
- elif mask_image.ndim == 3 and mask_image.shape[0] == 1:
- # Single mask, the 0'th dimension is considered to be
- # the existing batch size of 1
- mask_image = mask_image.unsqueeze(0)
- elif mask_image.ndim == 3 and mask_image.shape[0] != 1:
- # Batch of mask, the 0'th dimension is considered to be
- # the batching dimension
- mask_image = mask_image.unsqueeze(1)
-
- # Binarize mask
- mask_image[mask_image < 0.5] = 0
- mask_image[mask_image >= 0.5] = 1
- else:
- # preprocess mask
- if isinstance(mask_image, (PIL.Image.Image, np.ndarray)):
- mask_image = [mask_image]
-
- if isinstance(mask_image, list) and isinstance(mask_image[0], PIL.Image.Image):
- mask_image = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask_image], axis=0)
- mask_image = mask_image.astype(np.float32) / 255.0
- elif isinstance(mask_image, list) and isinstance(mask_image[0], np.ndarray):
- mask_image = np.concatenate([m[None, None, :] for m in mask_image], axis=0)
-
- mask_image[mask_image < 0.5] = 0
- mask_image[mask_image >= 0.5] = 1
- mask_image = torch.from_numpy(mask_image)
-
- return mask_image
-
-
-def prepare_controlnet_conditioning_image(
- controlnet_conditioning_image, width, height, batch_size, num_images_per_prompt, device, dtype
-):
- if not isinstance(controlnet_conditioning_image, torch.Tensor):
- if isinstance(controlnet_conditioning_image, PIL.Image.Image):
- controlnet_conditioning_image = [controlnet_conditioning_image]
-
- if isinstance(controlnet_conditioning_image[0], PIL.Image.Image):
- controlnet_conditioning_image = [
- np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"]))[None, :]
- for i in controlnet_conditioning_image
- ]
- controlnet_conditioning_image = np.concatenate(controlnet_conditioning_image, axis=0)
- controlnet_conditioning_image = np.array(controlnet_conditioning_image).astype(np.float32) / 255.0
- controlnet_conditioning_image = controlnet_conditioning_image.transpose(0, 3, 1, 2)
- controlnet_conditioning_image = torch.from_numpy(controlnet_conditioning_image)
- elif isinstance(controlnet_conditioning_image[0], torch.Tensor):
- controlnet_conditioning_image = torch.cat(controlnet_conditioning_image, dim=0)
-
- image_batch_size = controlnet_conditioning_image.shape[0]
-
- if image_batch_size == 1:
- repeat_by = batch_size
- else:
- # image batch size is the same as prompt batch size
- repeat_by = num_images_per_prompt
-
- controlnet_conditioning_image = controlnet_conditioning_image.repeat_interleave(repeat_by, dim=0)
-
- controlnet_conditioning_image = controlnet_conditioning_image.to(device=device, dtype=dtype)
-
- return controlnet_conditioning_image
-
-
-class StableDiffusionControlNetInpaintImg2ImgPipeline(DiffusionPipeline):
- """
- Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/
- """
-
- _optional_components = ["safety_checker", "feature_extractor"]
-
- def __init__(
- self,
- vae: AutoencoderKL,
- text_encoder: CLIPTextModel,
- tokenizer: CLIPTokenizer,
- unet: UNet2DConditionModel,
- controlnet: ControlNetModel,
- scheduler: KarrasDiffusionSchedulers,
- safety_checker: StableDiffusionSafetyChecker,
- feature_extractor: CLIPImageProcessor,
- requires_safety_checker: bool = True,
- ):
- super().__init__()
-
- if safety_checker is None and requires_safety_checker:
- logger.warning(
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
- )
-
- if safety_checker is not None and feature_extractor is None:
- raise ValueError(
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
- )
-
- self.register_modules(
- vae=vae,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- unet=unet,
- controlnet=controlnet,
- scheduler=scheduler,
- safety_checker=safety_checker,
- feature_extractor=feature_extractor,
- )
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
- self.register_to_config(requires_safety_checker=requires_safety_checker)
-
- def enable_vae_slicing(self):
- r"""
- Enable sliced VAE decoding.
-
- When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
- steps. This is useful to save some memory and allow larger batch sizes.
- """
- self.vae.enable_slicing()
-
- def disable_vae_slicing(self):
- r"""
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
- computing decoding in one step.
- """
- self.vae.disable_slicing()
-
- def enable_sequential_cpu_offload(self, gpu_id=0):
- r"""
- Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
- text_encoder, vae, controlnet, and safety checker have their state dicts saved to CPU and then are moved to a
- `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
- Note that offloading happens on a submodule basis. Memory savings are higher than with
- `enable_model_cpu_offload`, but performance is lower.
- """
- if is_accelerate_available():
- from accelerate import cpu_offload
- else:
- raise ImportError("Please install accelerate via `pip install accelerate`")
-
- device = torch.device(f"cuda:{gpu_id}")
-
- for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.controlnet]:
- cpu_offload(cpu_offloaded_model, device)
-
- if self.safety_checker is not None:
- cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
-
- def enable_model_cpu_offload(self, gpu_id=0):
- r"""
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
- """
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
- from accelerate import cpu_offload_with_hook
- else:
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
-
- device = torch.device(f"cuda:{gpu_id}")
-
- hook = None
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
-
- if self.safety_checker is not None:
- # the safety checker can offload the vae again
- _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
-
- # control net hook has be manually offloaded as it alternates with unet
- cpu_offload_with_hook(self.controlnet, device)
-
- # We'll offload the last model manually.
- self.final_offload_hook = hook
-
- @property
- def _execution_device(self):
- r"""
- Returns the device on which the pipeline's models will be executed. After calling
- `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
- hooks.
- """
- if not hasattr(self.unet, "_hf_hook"):
- return self.device
- for module in self.unet.modules():
- if (
- hasattr(module, "_hf_hook")
- and hasattr(module._hf_hook, "execution_device")
- and module._hf_hook.execution_device is not None
- ):
- return torch.device(module._hf_hook.execution_device)
- return self.device
-
- def _encode_prompt(
- self,
- prompt,
- device,
- num_images_per_prompt,
- do_classifier_free_guidance,
- negative_prompt=None,
- prompt_embeds: Optional[torch.FloatTensor] = None,
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
- ):
- r"""
- Encodes the prompt into text encoder hidden states.
-
- Args:
- prompt (`str` or `List[str]`, *optional*):
- prompt to be encoded
- device: (`torch.device`):
- torch device
- num_images_per_prompt (`int`):
- number of images that should be generated per prompt
- do_classifier_free_guidance (`bool`):
- whether to use classifier free guidance or not
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead.
- Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
- prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
- provided, text embeddings will be generated from `prompt` input argument.
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
- argument.
- """
- if prompt is not None and isinstance(prompt, str):
- batch_size = 1
- elif prompt is not None and isinstance(prompt, list):
- batch_size = len(prompt)
- else:
- batch_size = prompt_embeds.shape[0]
-
- if prompt_embeds is None:
- text_inputs = self.tokenizer(
- prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- text_input_ids = text_inputs.input_ids
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
-
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
- text_input_ids, untruncated_ids
- ):
- removed_text = self.tokenizer.batch_decode(
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
- )
- logger.warning(
- "The following part of your input was truncated because CLIP can only handle sequences up to"
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = text_inputs.attention_mask.to(device)
- else:
- attention_mask = None
-
- prompt_embeds = self.text_encoder(
- text_input_ids.to(device),
- attention_mask=attention_mask,
- )
- prompt_embeds = prompt_embeds[0]
-
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
-
- bs_embed, seq_len, _ = prompt_embeds.shape
- # duplicate text embeddings for each generation per prompt, using mps friendly method
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
-
- # get unconditional embeddings for classifier free guidance
- if do_classifier_free_guidance and negative_prompt_embeds is None:
- uncond_tokens: List[str]
- if negative_prompt is None:
- uncond_tokens = [""] * batch_size
- elif type(prompt) is not type(negative_prompt):
- raise TypeError(
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
- f" {type(prompt)}."
- )
- elif isinstance(negative_prompt, str):
- uncond_tokens = [negative_prompt]
- elif batch_size != len(negative_prompt):
- raise ValueError(
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
- " the batch size of `prompt`."
- )
- else:
- uncond_tokens = negative_prompt
-
- max_length = prompt_embeds.shape[1]
- uncond_input = self.tokenizer(
- uncond_tokens,
- padding="max_length",
- max_length=max_length,
- truncation=True,
- return_tensors="pt",
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = uncond_input.attention_mask.to(device)
- else:
- attention_mask = None
-
- negative_prompt_embeds = self.text_encoder(
- uncond_input.input_ids.to(device),
- attention_mask=attention_mask,
- )
- negative_prompt_embeds = negative_prompt_embeds[0]
-
- if do_classifier_free_guidance:
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
- seq_len = negative_prompt_embeds.shape[1]
-
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
-
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and text embeddings into a single batch
- # to avoid doing two forward passes
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
-
- return prompt_embeds
-
- def run_safety_checker(self, image, device, dtype):
- if self.safety_checker is not None:
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
- image, has_nsfw_concept = self.safety_checker(
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
- )
- else:
- has_nsfw_concept = None
- return image, has_nsfw_concept
-
- def decode_latents(self, latents):
- latents = 1 / self.vae.config.scaling_factor * latents
- image = self.vae.decode(latents).sample
- image = (image / 2 + 0.5).clamp(0, 1)
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
- return image
-
- def prepare_extra_step_kwargs(self, generator, eta):
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
- # and should be between [0, 1]
-
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
- extra_step_kwargs = {}
- if accepts_eta:
- extra_step_kwargs["eta"] = eta
-
- # check if the scheduler accepts generator
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
- if accepts_generator:
- extra_step_kwargs["generator"] = generator
- return extra_step_kwargs
-
- def check_inputs(
- self,
- prompt,
- image,
- mask_image,
- controlnet_conditioning_image,
- height,
- width,
- callback_steps,
- negative_prompt=None,
- prompt_embeds=None,
- negative_prompt_embeds=None,
- strength=None,
- ):
- if height % 8 != 0 or width % 8 != 0:
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
-
- if (callback_steps is None) or (
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
- ):
- raise ValueError(
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
- f" {type(callback_steps)}."
- )
-
- if prompt is not None and prompt_embeds is not None:
- raise ValueError(
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
- " only forward one of the two."
- )
- elif prompt is None and prompt_embeds is None:
- raise ValueError(
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
- )
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
-
- if negative_prompt is not None and negative_prompt_embeds is not None:
- raise ValueError(
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
- )
-
- if prompt_embeds is not None and negative_prompt_embeds is not None:
- if prompt_embeds.shape != negative_prompt_embeds.shape:
- raise ValueError(
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
- f" {negative_prompt_embeds.shape}."
- )
-
- controlnet_cond_image_is_pil = isinstance(controlnet_conditioning_image, PIL.Image.Image)
- controlnet_cond_image_is_tensor = isinstance(controlnet_conditioning_image, torch.Tensor)
- controlnet_cond_image_is_pil_list = isinstance(controlnet_conditioning_image, list) and isinstance(
- controlnet_conditioning_image[0], PIL.Image.Image
- )
- controlnet_cond_image_is_tensor_list = isinstance(controlnet_conditioning_image, list) and isinstance(
- controlnet_conditioning_image[0], torch.Tensor
- )
-
- if (
- not controlnet_cond_image_is_pil
- and not controlnet_cond_image_is_tensor
- and not controlnet_cond_image_is_pil_list
- and not controlnet_cond_image_is_tensor_list
- ):
- raise TypeError(
- "image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors"
- )
-
- if controlnet_cond_image_is_pil:
- controlnet_cond_image_batch_size = 1
- elif controlnet_cond_image_is_tensor:
- controlnet_cond_image_batch_size = controlnet_conditioning_image.shape[0]
- elif controlnet_cond_image_is_pil_list:
- controlnet_cond_image_batch_size = len(controlnet_conditioning_image)
- elif controlnet_cond_image_is_tensor_list:
- controlnet_cond_image_batch_size = len(controlnet_conditioning_image)
-
- if prompt is not None and isinstance(prompt, str):
- prompt_batch_size = 1
- elif prompt is not None and isinstance(prompt, list):
- prompt_batch_size = len(prompt)
- elif prompt_embeds is not None:
- prompt_batch_size = prompt_embeds.shape[0]
-
- if controlnet_cond_image_batch_size != 1 and controlnet_cond_image_batch_size != prompt_batch_size:
- raise ValueError(
- f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {controlnet_cond_image_batch_size}, prompt batch size: {prompt_batch_size}"
- )
-
- if isinstance(image, torch.Tensor) and not isinstance(mask_image, torch.Tensor):
- raise TypeError("if `image` is a tensor, `mask_image` must also be a tensor")
-
- if isinstance(image, PIL.Image.Image) and not isinstance(mask_image, PIL.Image.Image):
- raise TypeError("if `image` is a PIL image, `mask_image` must also be a PIL image")
-
- if isinstance(image, torch.Tensor):
- if image.ndim != 3 and image.ndim != 4:
- raise ValueError("`image` must have 3 or 4 dimensions")
-
- if mask_image.ndim != 2 and mask_image.ndim != 3 and mask_image.ndim != 4:
- raise ValueError("`mask_image` must have 2, 3, or 4 dimensions")
-
- if image.ndim == 3:
- image_batch_size = 1
- image_channels, image_height, image_width = image.shape
- elif image.ndim == 4:
- image_batch_size, image_channels, image_height, image_width = image.shape
-
- if mask_image.ndim == 2:
- mask_image_batch_size = 1
- mask_image_channels = 1
- mask_image_height, mask_image_width = mask_image.shape
- elif mask_image.ndim == 3:
- mask_image_channels = 1
- mask_image_batch_size, mask_image_height, mask_image_width = mask_image.shape
- elif mask_image.ndim == 4:
- mask_image_batch_size, mask_image_channels, mask_image_height, mask_image_width = mask_image.shape
-
- if image_channels != 3:
- raise ValueError("`image` must have 3 channels")
-
- if mask_image_channels != 1:
- raise ValueError("`mask_image` must have 1 channel")
-
- if image_batch_size != mask_image_batch_size:
- raise ValueError("`image` and `mask_image` mush have the same batch sizes")
-
- if image_height != mask_image_height or image_width != mask_image_width:
- raise ValueError("`image` and `mask_image` must have the same height and width dimensions")
-
- if image.min() < -1 or image.max() > 1:
- raise ValueError("`image` should be in range [-1, 1]")
-
- if mask_image.min() < 0 or mask_image.max() > 1:
- raise ValueError("`mask_image` should be in range [0, 1]")
- else:
- mask_image_channels = 1
- image_channels = 3
-
- single_image_latent_channels = self.vae.config.latent_channels
-
- total_latent_channels = single_image_latent_channels * 2 + mask_image_channels
-
- if total_latent_channels != self.unet.config.in_channels:
- raise ValueError(
- f"The config of `pipeline.unet` expects {self.unet.config.in_channels} but received"
- f" non inpainting latent channels: {single_image_latent_channels},"
- f" mask channels: {mask_image_channels}, and masked image channels: {single_image_latent_channels}."
- f" Please verify the config of `pipeline.unet` and the `mask_image` and `image` inputs."
- )
-
- if strength < 0 or strength > 1:
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
-
- def get_timesteps(self, num_inference_steps, strength, device):
- # get the original timestep using init_timestep
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
-
- t_start = max(num_inference_steps - init_timestep, 0)
- timesteps = self.scheduler.timesteps[t_start:]
-
- return timesteps, num_inference_steps - t_start
-
- def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
- raise ValueError(
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
- )
-
- image = image.to(device=device, dtype=dtype)
-
- batch_size = batch_size * num_images_per_prompt
- if isinstance(generator, list) and len(generator) != batch_size:
- raise ValueError(
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
- )
-
- if isinstance(generator, list):
- init_latents = [
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
- ]
- init_latents = torch.cat(init_latents, dim=0)
- else:
- init_latents = self.vae.encode(image).latent_dist.sample(generator)
-
- init_latents = self.vae.config.scaling_factor * init_latents
-
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
- raise ValueError(
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
- )
- else:
- init_latents = torch.cat([init_latents], dim=0)
-
- shape = init_latents.shape
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
-
- # get latents
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
- latents = init_latents
-
- return latents
-
- def prepare_mask_latents(self, mask_image, batch_size, height, width, dtype, device, do_classifier_free_guidance):
- # resize the mask to latents shape as we concatenate the mask to the latents
- # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
- # and half precision
- mask_image = F.interpolate(mask_image, size=(height // self.vae_scale_factor, width // self.vae_scale_factor))
- mask_image = mask_image.to(device=device, dtype=dtype)
-
- # duplicate mask for each generation per prompt, using mps friendly method
- if mask_image.shape[0] < batch_size:
- if not batch_size % mask_image.shape[0] == 0:
- raise ValueError(
- "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
- f" a total batch size of {batch_size}, but {mask_image.shape[0]} masks were passed. Make sure the number"
- " of masks that you pass is divisible by the total requested batch size."
- )
- mask_image = mask_image.repeat(batch_size // mask_image.shape[0], 1, 1, 1)
-
- mask_image = torch.cat([mask_image] * 2) if do_classifier_free_guidance else mask_image
-
- mask_image_latents = mask_image
-
- return mask_image_latents
-
- def prepare_masked_image_latents(
- self, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
- ):
- masked_image = masked_image.to(device=device, dtype=dtype)
-
- # encode the mask image into latents space so we can concatenate it to the latents
- if isinstance(generator, list):
- masked_image_latents = [
- self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(generator=generator[i])
- for i in range(batch_size)
- ]
- masked_image_latents = torch.cat(masked_image_latents, dim=0)
- else:
- masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
- masked_image_latents = self.vae.config.scaling_factor * masked_image_latents
-
- # duplicate masked_image_latents for each generation per prompt, using mps friendly method
- if masked_image_latents.shape[0] < batch_size:
- if not batch_size % masked_image_latents.shape[0] == 0:
- raise ValueError(
- "The passed images and the required batch size don't match. Images are supposed to be duplicated"
- f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
- " Make sure the number of images that you pass is divisible by the total requested batch size."
- )
- masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
-
- masked_image_latents = (
- torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
- )
-
- # aligning device to prevent device errors when concating it with the latent model input
- masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
- return masked_image_latents
-
- def _default_height_width(self, height, width, image):
- if isinstance(image, list):
- image = image[0]
-
- if height is None:
- if isinstance(image, PIL.Image.Image):
- height = image.height
- elif isinstance(image, torch.Tensor):
- height = image.shape[3]
-
- height = (height // 8) * 8 # round down to nearest multiple of 8
-
- if width is None:
- if isinstance(image, PIL.Image.Image):
- width = image.width
- elif isinstance(image, torch.Tensor):
- width = image.shape[2]
-
- width = (width // 8) * 8 # round down to nearest multiple of 8
-
- return height, width
-
- @torch.no_grad()
- @replace_example_docstring(EXAMPLE_DOC_STRING)
- def __call__(
- self,
- prompt: Union[str, List[str]] = None,
- image: Union[torch.Tensor, PIL.Image.Image] = None,
- mask_image: Union[torch.Tensor, PIL.Image.Image] = None,
- controlnet_conditioning_image: Union[
- torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]
- ] = None,
- strength: float = 0.8,
- height: Optional[int] = None,
- width: Optional[int] = None,
- num_inference_steps: int = 50,
- guidance_scale: float = 7.5,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: Optional[int] = 1,
- eta: float = 0.0,
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
- latents: Optional[torch.FloatTensor] = None,
- prompt_embeds: Optional[torch.FloatTensor] = None,
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
- callback_steps: int = 1,
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
- controlnet_conditioning_scale: float = 1.0,
- ):
- r"""
- Function invoked when calling the pipeline for generation.
-
- Args:
- prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
- instead.
- image (`torch.Tensor` or `PIL.Image.Image`):
- `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
- be masked out with `mask_image` and repainted according to `prompt`.
- mask_image (`torch.Tensor` or `PIL.Image.Image`):
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
- repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
- to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
- instead of 3, so the expected shape would be `(B, H, W, 1)`.
- controlnet_conditioning_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]`):
- The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
- the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. PIL.Image.Image` can
- also be accepted as an image. The control image is automatically resized to fit the output image.
- strength (`float`, *optional*):
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
- will be used as a starting point, adding more noise to it the larger the `strength`. The number of
- denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
- be maximum and the denoising process will run for the full number of iterations specified in
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
- The height in pixels of the generated image.
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
- The width in pixels of the generated image.
- num_inference_steps (`int`, *optional*, defaults to 50):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- guidance_scale (`float`, *optional*, defaults to 7.5):
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
- usually at the expense of lower image quality.
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead.
- Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
- num_images_per_prompt (`int`, *optional*, defaults to 1):
- The number of images to generate per prompt.
- eta (`float`, *optional*, defaults to 0.0):
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
- [`schedulers.DDIMScheduler`], will be ignored for others.
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
- to make generation deterministic.
- latents (`torch.FloatTensor`, *optional*):
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
- tensor will ge generated by sampling using the supplied random `generator`.
- prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
- provided, text embeddings will be generated from `prompt` input argument.
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
- argument.
- output_type (`str`, *optional*, defaults to `"pil"`):
- The output format of the generate image. Choose between
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
- plain tuple.
- callback (`Callable`, *optional*):
- A function that will be called every `callback_steps` steps during inference. The function will be
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
- callback_steps (`int`, *optional*, defaults to 1):
- The frequency at which the `callback` function will be called. If not specified, the callback will be
- called at every step.
- cross_attention_kwargs (`dict`, *optional*):
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
- `self.processor` in
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
- controlnet_conditioning_scale (`float`, *optional*, defaults to 1.0):
- The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
- to the residual in the original unet.
-
- Examples:
-
- Returns:
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
- When returning a tuple, the first element is a list with the generated images, and the second element is a
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
- (nsfw) content, according to the `safety_checker`.
- """
- # 0. Default height and width to unet
- height, width = self._default_height_width(height, width, controlnet_conditioning_image)
-
- # 1. Check inputs. Raise error if not correct
- self.check_inputs(
- prompt,
- image,
- mask_image,
- controlnet_conditioning_image,
- height,
- width,
- callback_steps,
- negative_prompt,
- prompt_embeds,
- negative_prompt_embeds,
- strength,
- )
-
- # 2. Define call parameters
- if prompt is not None and isinstance(prompt, str):
- batch_size = 1
- elif prompt is not None and isinstance(prompt, list):
- batch_size = len(prompt)
- else:
- batch_size = prompt_embeds.shape[0]
-
- device = self._execution_device
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
- # corresponds to doing no classifier free guidance.
- do_classifier_free_guidance = guidance_scale > 1.0
-
- # 3. Encode input prompt
- prompt_embeds = self._encode_prompt(
- prompt,
- device,
- num_images_per_prompt,
- do_classifier_free_guidance,
- negative_prompt,
- prompt_embeds=prompt_embeds,
- negative_prompt_embeds=negative_prompt_embeds,
- )
-
- # 4. Prepare mask, image, and controlnet_conditioning_image
- image = prepare_image(image)
-
- mask_image = prepare_mask_image(mask_image)
-
- controlnet_conditioning_image = prepare_controlnet_conditioning_image(
- controlnet_conditioning_image,
- width,
- height,
- batch_size * num_images_per_prompt,
- num_images_per_prompt,
- device,
- self.controlnet.dtype,
- )
-
- masked_image = image * (mask_image < 0.5)
-
- # 5. Prepare timesteps
- self.scheduler.set_timesteps(num_inference_steps, device=device)
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
-
- # 6. Prepare latent variables
- latents = self.prepare_latents(
- image,
- latent_timestep,
- batch_size,
- num_images_per_prompt,
- prompt_embeds.dtype,
- device,
- generator,
- )
-
- mask_image_latents = self.prepare_mask_latents(
- mask_image,
- batch_size * num_images_per_prompt,
- height,
- width,
- prompt_embeds.dtype,
- device,
- do_classifier_free_guidance,
- )
-
- masked_image_latents = self.prepare_masked_image_latents(
- masked_image,
- batch_size * num_images_per_prompt,
- height,
- width,
- prompt_embeds.dtype,
- device,
- generator,
- do_classifier_free_guidance,
- )
-
- if do_classifier_free_guidance:
- controlnet_conditioning_image = torch.cat([controlnet_conditioning_image] * 2)
-
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
-
- # 8. Denoising loop
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
- with self.progress_bar(total=num_inference_steps) as progress_bar:
- for i, t in enumerate(timesteps):
- # expand the latents if we are doing classifier free guidance
- non_inpainting_latent_model_input = (
- torch.cat([latents] * 2) if do_classifier_free_guidance else latents
- )
-
- non_inpainting_latent_model_input = self.scheduler.scale_model_input(
- non_inpainting_latent_model_input, t
- )
-
- inpainting_latent_model_input = torch.cat(
- [non_inpainting_latent_model_input, mask_image_latents, masked_image_latents], dim=1
- )
-
- down_block_res_samples, mid_block_res_sample = self.controlnet(
- non_inpainting_latent_model_input,
- t,
- encoder_hidden_states=prompt_embeds,
- controlnet_cond=controlnet_conditioning_image,
- return_dict=False,
- )
-
- down_block_res_samples = [
- down_block_res_sample * controlnet_conditioning_scale
- for down_block_res_sample in down_block_res_samples
- ]
- mid_block_res_sample *= controlnet_conditioning_scale
-
- # predict the noise residual
- noise_pred = self.unet(
- inpainting_latent_model_input,
- t,
- encoder_hidden_states=prompt_embeds,
- cross_attention_kwargs=cross_attention_kwargs,
- down_block_additional_residuals=down_block_res_samples,
- mid_block_additional_residual=mid_block_res_sample,
- ).sample
-
- # perform guidance
- if do_classifier_free_guidance:
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
-
- # compute the previous noisy sample x_t -> x_t-1
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
-
- # call the callback, if provided
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
- progress_bar.update()
- if callback is not None and i % callback_steps == 0:
- callback(i, t, latents)
-
- # If we do sequential model offloading, let's offload unet and controlnet
- # manually for max memory savings
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
- self.unet.to("cpu")
- self.controlnet.to("cpu")
- torch.cuda.empty_cache()
-
- if output_type == "latent":
- image = latents
- has_nsfw_concept = None
- elif output_type == "pil":
- # 8. Post-processing
- image = self.decode_latents(latents)
-
- # 9. Run safety checker
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
-
- # 10. Convert to PIL
- image = self.numpy_to_pil(image)
- else:
- # 8. Post-processing
- image = self.decode_latents(latents)
-
- # 9. Run safety checker
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
-
- # Offload last model to CPU
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
- self.final_offload_hook.offload()
-
- if not return_dict:
- return (image, has_nsfw_concept)
-
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/fast_rcnn/README.md b/spaces/Andy1621/uniformer_image_detection/configs/fast_rcnn/README.md
deleted file mode 100644
index c756507ab772cf40a23d706a591cb7bf7da2e4eb..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/fast_rcnn/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-# Fast R-CNN
-
-## Introduction
-
-[ALGORITHM]
-
-```latex
-@inproceedings{girshick2015fast,
- title={Fast r-cnn},
- author={Girshick, Ross},
- booktitle={Proceedings of the IEEE international conference on computer vision},
- year={2015}
-}
-```
-
-## Results and models
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py
deleted file mode 100644
index 3a2a510689308e556af803968a641dcf2594fe77..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = './mask_rcnn_hrnetv2p_w40_1x_coco.py'
-# learning policy
-lr_config = dict(step=[16, 22])
-runner = dict(type='EpochBasedRunner', max_epochs=24)
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/wider_face/ssd300_wider_face.py b/spaces/Andy1621/uniformer_image_detection/configs/wider_face/ssd300_wider_face.py
deleted file mode 100644
index 5a3eb38df3dc75af176cc6972af88e76124ba4dc..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/wider_face/ssd300_wider_face.py
+++ /dev/null
@@ -1,18 +0,0 @@
-_base_ = [
- '../_base_/models/ssd300.py', '../_base_/datasets/wider_face.py',
- '../_base_/default_runtime.py'
-]
-model = dict(bbox_head=dict(num_classes=1))
-# optimizer
-optimizer = dict(type='SGD', lr=0.012, momentum=0.9, weight_decay=5e-4)
-optimizer_config = dict()
-# learning policy
-lr_config = dict(
- policy='step',
- warmup='linear',
- warmup_iters=1000,
- warmup_ratio=0.001,
- step=[16, 20])
-# runtime settings
-runner = dict(type='EpochBasedRunner', max_epochs=24)
-log_config = dict(interval=1)
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/fast_rcnn.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/fast_rcnn.py
deleted file mode 100644
index 3d6e242767b927ed37198b6bc7862abecef99a33..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/fast_rcnn.py
+++ /dev/null
@@ -1,52 +0,0 @@
-from ..builder import DETECTORS
-from .two_stage import TwoStageDetector
-
-
-@DETECTORS.register_module()
-class FastRCNN(TwoStageDetector):
- """Implementation of `Fast R-CNN `_"""
-
- def __init__(self,
- backbone,
- roi_head,
- train_cfg,
- test_cfg,
- neck=None,
- pretrained=None):
- super(FastRCNN, self).__init__(
- backbone=backbone,
- neck=neck,
- roi_head=roi_head,
- train_cfg=train_cfg,
- test_cfg=test_cfg,
- pretrained=pretrained)
-
- def forward_test(self, imgs, img_metas, proposals, **kwargs):
- """
- Args:
- imgs (List[Tensor]): the outer list indicates test-time
- augmentations and inner Tensor should have a shape NxCxHxW,
- which contains all images in the batch.
- img_metas (List[List[dict]]): the outer list indicates test-time
- augs (multiscale, flip, etc.) and the inner list indicates
- images in a batch.
- proposals (List[List[Tensor]]): the outer list indicates test-time
- augs (multiscale, flip, etc.) and the inner list indicates
- images in a batch. The Tensor should have a shape Px4, where
- P is the number of proposals.
- """
- for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
- if not isinstance(var, list):
- raise TypeError(f'{name} must be a list, but got {type(var)}')
-
- num_augs = len(imgs)
- if num_augs != len(img_metas):
- raise ValueError(f'num of augmentations ({len(imgs)}) '
- f'!= num of image meta ({len(img_metas)})')
-
- if num_augs == 1:
- return self.simple_test(imgs[0], img_metas[0], proposals[0],
- **kwargs)
- else:
- # TODO: support test-time augmentation
- assert NotImplementedError
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/pisa_roi_head.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/pisa_roi_head.py
deleted file mode 100644
index e01113629837eb9c065ba40cd4025899b7bd0172..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/pisa_roi_head.py
+++ /dev/null
@@ -1,159 +0,0 @@
-from mmdet.core import bbox2roi
-from ..builder import HEADS
-from ..losses.pisa_loss import carl_loss, isr_p
-from .standard_roi_head import StandardRoIHead
-
-
-@HEADS.register_module()
-class PISARoIHead(StandardRoIHead):
- r"""The RoI head for `Prime Sample Attention in Object Detection
- `_."""
-
- def forward_train(self,
- x,
- img_metas,
- proposal_list,
- gt_bboxes,
- gt_labels,
- gt_bboxes_ignore=None,
- gt_masks=None):
- """Forward function for training.
-
- Args:
- x (list[Tensor]): List of multi-level img features.
- img_metas (list[dict]): List of image info dict where each dict
- has: 'img_shape', 'scale_factor', 'flip', and may also contain
- 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
- For details on the values of these keys see
- `mmdet/datasets/pipelines/formatting.py:Collect`.
- proposals (list[Tensors]): List of region proposals.
- gt_bboxes (list[Tensor]): Each item are the truth boxes for each
- image in [tl_x, tl_y, br_x, br_y] format.
- gt_labels (list[Tensor]): Class indices corresponding to each box
- gt_bboxes_ignore (list[Tensor], optional): Specify which bounding
- boxes can be ignored when computing the loss.
- gt_masks (None | Tensor) : True segmentation masks for each box
- used if the architecture supports a segmentation task.
-
- Returns:
- dict[str, Tensor]: a dictionary of loss components
- """
- # assign gts and sample proposals
- if self.with_bbox or self.with_mask:
- num_imgs = len(img_metas)
- if gt_bboxes_ignore is None:
- gt_bboxes_ignore = [None for _ in range(num_imgs)]
- sampling_results = []
- neg_label_weights = []
- for i in range(num_imgs):
- assign_result = self.bbox_assigner.assign(
- proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
- gt_labels[i])
- sampling_result = self.bbox_sampler.sample(
- assign_result,
- proposal_list[i],
- gt_bboxes[i],
- gt_labels[i],
- feats=[lvl_feat[i][None] for lvl_feat in x])
- # neg label weight is obtained by sampling when using ISR-N
- neg_label_weight = None
- if isinstance(sampling_result, tuple):
- sampling_result, neg_label_weight = sampling_result
- sampling_results.append(sampling_result)
- neg_label_weights.append(neg_label_weight)
-
- losses = dict()
- # bbox head forward and loss
- if self.with_bbox:
- bbox_results = self._bbox_forward_train(
- x,
- sampling_results,
- gt_bboxes,
- gt_labels,
- img_metas,
- neg_label_weights=neg_label_weights)
- losses.update(bbox_results['loss_bbox'])
-
- # mask head forward and loss
- if self.with_mask:
- mask_results = self._mask_forward_train(x, sampling_results,
- bbox_results['bbox_feats'],
- gt_masks, img_metas)
- losses.update(mask_results['loss_mask'])
-
- return losses
-
- def _bbox_forward(self, x, rois):
- """Box forward function used in both training and testing."""
- # TODO: a more flexible way to decide which feature maps to use
- bbox_feats = self.bbox_roi_extractor(
- x[:self.bbox_roi_extractor.num_inputs], rois)
- if self.with_shared_head:
- bbox_feats = self.shared_head(bbox_feats)
- cls_score, bbox_pred = self.bbox_head(bbox_feats)
-
- bbox_results = dict(
- cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
- return bbox_results
-
- def _bbox_forward_train(self,
- x,
- sampling_results,
- gt_bboxes,
- gt_labels,
- img_metas,
- neg_label_weights=None):
- """Run forward function and calculate loss for box head in training."""
- rois = bbox2roi([res.bboxes for res in sampling_results])
-
- bbox_results = self._bbox_forward(x, rois)
-
- bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
- gt_labels, self.train_cfg)
-
- # neg_label_weights obtained by sampler is image-wise, mapping back to
- # the corresponding location in label weights
- if neg_label_weights[0] is not None:
- label_weights = bbox_targets[1]
- cur_num_rois = 0
- for i in range(len(sampling_results)):
- num_pos = sampling_results[i].pos_inds.size(0)
- num_neg = sampling_results[i].neg_inds.size(0)
- label_weights[cur_num_rois + num_pos:cur_num_rois + num_pos +
- num_neg] = neg_label_weights[i]
- cur_num_rois += num_pos + num_neg
-
- cls_score = bbox_results['cls_score']
- bbox_pred = bbox_results['bbox_pred']
-
- # Apply ISR-P
- isr_cfg = self.train_cfg.get('isr', None)
- if isr_cfg is not None:
- bbox_targets = isr_p(
- cls_score,
- bbox_pred,
- bbox_targets,
- rois,
- sampling_results,
- self.bbox_head.loss_cls,
- self.bbox_head.bbox_coder,
- **isr_cfg,
- num_class=self.bbox_head.num_classes)
- loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois,
- *bbox_targets)
-
- # Add CARL Loss
- carl_cfg = self.train_cfg.get('carl', None)
- if carl_cfg is not None:
- loss_carl = carl_loss(
- cls_score,
- bbox_targets[0],
- bbox_pred,
- bbox_targets[2],
- self.bbox_head.loss_bbox,
- **carl_cfg,
- num_class=self.bbox_head.num_classes)
- loss_bbox.update(loss_carl)
-
- bbox_results.update(loss_bbox=loss_bbox)
- return bbox_results
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/encnet_r50-d8.py b/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/encnet_r50-d8.py
deleted file mode 100644
index be777123a886503172a95fe0719e956a147bbd68..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/encnet_r50-d8.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- type='EncoderDecoder',
- pretrained='open-mmlab://resnet50_v1c',
- backbone=dict(
- type='ResNetV1c',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- dilations=(1, 1, 2, 4),
- strides=(1, 2, 1, 1),
- norm_cfg=norm_cfg,
- norm_eval=False,
- style='pytorch',
- contract_dilation=True),
- decode_head=dict(
- type='EncHead',
- in_channels=[512, 1024, 2048],
- in_index=(1, 2, 3),
- channels=512,
- num_codes=32,
- use_se_loss=True,
- add_lateral=False,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
- loss_se_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)),
- auxiliary_head=dict(
- type='FCNHead',
- in_channels=1024,
- in_index=2,
- channels=256,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='whole'))
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_512x1024_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_512x1024_80k_cityscapes.py
deleted file mode 100644
index a9bab32b52ca41155062c7655986ed84677a8280..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_512x1024_80k_cityscapes.py
+++ /dev/null
@@ -1,10 +0,0 @@
-_base_ = './fcn_hr18_512x1024_80k_cityscapes.py'
-model = dict(
- pretrained='open-mmlab://msra/hrnetv2_w48',
- backbone=dict(
- extra=dict(
- stage2=dict(num_channels=(48, 96)),
- stage3=dict(num_channels=(48, 96, 192)),
- stage4=dict(num_channels=(48, 96, 192, 384)))),
- decode_head=dict(
- in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
diff --git a/spaces/Aniquel/WizApp/README.md b/spaces/Aniquel/WizApp/README.md
deleted file mode 100644
index c35ada5d3aebf480928d778d9b54d9736ea783dc..0000000000000000000000000000000000000000
--- a/spaces/Aniquel/WizApp/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: WizApp
-emoji: ⚡
-colorFrom: yellow
-colorTo: red
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-pinned: false
-license: gpl-3.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/RWKV-model.md b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/RWKV-model.md
deleted file mode 100644
index 88f13fa56e0567bf3442b21c1d2a1cdd56d29647..0000000000000000000000000000000000000000
--- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/RWKV-model.md
+++ /dev/null
@@ -1,72 +0,0 @@
-> RWKV: RNN with Transformer-level LLM Performance
->
-> It combines the best of RNN and transformer - great performance, fast inference, saves VRAM, fast training, "infinite" ctx_len, and free sentence embedding (using the final hidden state).
-
-https://github.com/BlinkDL/RWKV-LM
-
-https://github.com/BlinkDL/ChatRWKV
-
-## Using RWKV in the web UI
-
-### Hugging Face weights
-
-Simply download the weights from https://huggingface.co/RWKV and load them as you would for any other model.
-
-There is a bug in transformers==4.29.2 that prevents RWKV from being loaded in 8-bit mode. You can install the dev branch to solve this bug: `pip install git+https://github.com/huggingface/transformers`
-
-### Original .pth weights
-
-The instructions below are from before RWKV was supported in transformers, and they are kept for legacy purposes. The old implementation is possibly faster, but it lacks the full range of samplers that the transformers library offers.
-
-#### 0. Install the RWKV library
-
-```
-pip install rwkv
-```
-
-`0.7.3` was the last version that I tested. If you experience any issues, try ```pip install rwkv==0.7.3```.
-
-#### 1. Download the model
-
-It is available in different sizes:
-
-* https://huggingface.co/BlinkDL/rwkv-4-pile-3b/
-* https://huggingface.co/BlinkDL/rwkv-4-pile-7b/
-* https://huggingface.co/BlinkDL/rwkv-4-pile-14b/
-
-There are also older releases with smaller sizes like:
-
-* https://huggingface.co/BlinkDL/rwkv-4-pile-169m/resolve/main/RWKV-4-Pile-169M-20220807-8023.pth
-
-Download the chosen `.pth` and put it directly in the `models` folder.
-
-#### 2. Download the tokenizer
-
-[20B_tokenizer.json](https://raw.githubusercontent.com/BlinkDL/ChatRWKV/main/v2/20B_tokenizer.json)
-
-Also put it directly in the `models` folder. Make sure to not rename it. It should be called `20B_tokenizer.json`.
-
-#### 3. Launch the web UI
-
-No additional steps are required. Just launch it as you would with any other model.
-
-```
-python server.py --listen --no-stream --model RWKV-4-Pile-169M-20220807-8023.pth
-```
-
-#### Setting a custom strategy
-
-It is possible to have very fine control over the offloading and precision for the model with the `--rwkv-strategy` flag. Possible values include:
-
-```
-"cpu fp32" # CPU mode
-"cuda fp16" # GPU mode with float16 precision
-"cuda fp16 *30 -> cpu fp32" # GPU+CPU offloading. The higher the number after *, the higher the GPU allocation.
-"cuda fp16i8" # GPU mode with 8-bit precision
-```
-
-See the README for the PyPl package for more details: https://pypi.org/project/rwkv/
-
-#### Compiling the CUDA kernel
-
-You can compile the CUDA kernel for the model with `--rwkv-cuda-on`. This should improve the performance a lot but I haven't been able to get it to work yet.
diff --git a/spaces/AnonAndDesu/Desu_Proxy/Dockerfile b/spaces/AnonAndDesu/Desu_Proxy/Dockerfile
deleted file mode 100644
index eef259fa372a804549fb0af0913718a13344da34..0000000000000000000000000000000000000000
--- a/spaces/AnonAndDesu/Desu_Proxy/Dockerfile
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM node:18-bullseye-slim
-RUN apt-get update && \
- apt-get install -y git
-RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
-WORKDIR /app
-RUN npm install
-COPY Dockerfile greeting.md* .env* ./
-RUN npm run build
-EXPOSE 7860
-ENV NODE_ENV=production
-CMD [ "npm", "start" ]
diff --git a/spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/geometry.py b/spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/geometry.py
deleted file mode 100644
index 207e98fded56c0e7e63d63626ddace65b910bf9c..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/geometry.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import torch
-import torch.nn.functional as F
-
-
-def coords_grid(b, h, w, homogeneous=False, device=None):
- y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) # [H, W]
-
- stacks = [x, y]
-
- if homogeneous:
- ones = torch.ones_like(x) # [H, W]
- stacks.append(ones)
-
- grid = torch.stack(stacks, dim=0).float() # [2, H, W] or [3, H, W]
-
- grid = grid[None].repeat(b, 1, 1, 1) # [B, 2, H, W] or [B, 3, H, W]
-
- if device is not None:
- grid = grid.to(device)
-
- return grid
-
-
-def generate_window_grid(h_min, h_max, w_min, w_max, len_h, len_w, device=None):
- assert device is not None
-
- x, y = torch.meshgrid([torch.linspace(w_min, w_max, len_w, device=device),
- torch.linspace(h_min, h_max, len_h, device=device)],
- )
- grid = torch.stack((x, y), -1).transpose(0, 1).float() # [H, W, 2]
-
- return grid
-
-
-def normalize_coords(coords, h, w):
- # coords: [B, H, W, 2]
- c = torch.Tensor([(w - 1) / 2., (h - 1) / 2.]).float().to(coords.device)
- return (coords - c) / c # [-1, 1]
-
-
-def bilinear_sample(img, sample_coords, mode='bilinear', padding_mode='zeros', return_mask=False):
- # img: [B, C, H, W]
- # sample_coords: [B, 2, H, W] in image scale
- if sample_coords.size(1) != 2: # [B, H, W, 2]
- sample_coords = sample_coords.permute(0, 3, 1, 2)
-
- b, _, h, w = sample_coords.shape
-
- # Normalize to [-1, 1]
- x_grid = 2 * sample_coords[:, 0] / (w - 1) - 1
- y_grid = 2 * sample_coords[:, 1] / (h - 1) - 1
-
- grid = torch.stack([x_grid, y_grid], dim=-1) # [B, H, W, 2]
-
- img = F.grid_sample(img, grid, mode=mode, padding_mode=padding_mode, align_corners=True)
-
- if return_mask:
- mask = (x_grid >= -1) & (y_grid >= -1) & (x_grid <= 1) & (y_grid <= 1) # [B, H, W]
-
- return img, mask
-
- return img
-
-
-def flow_warp(feature, flow, mask=False, padding_mode='zeros'):
- b, c, h, w = feature.size()
- assert flow.size(1) == 2
-
- grid = coords_grid(b, h, w).to(flow.device) + flow # [B, 2, H, W]
-
- return bilinear_sample(feature, grid, padding_mode=padding_mode,
- return_mask=mask)
-
-
-def forward_backward_consistency_check(fwd_flow, bwd_flow,
- alpha=0.01,
- beta=0.5
- ):
- # fwd_flow, bwd_flow: [B, 2, H, W]
- # alpha and beta values are following UnFlow (https://arxiv.org/abs/1711.07837)
- assert fwd_flow.dim() == 4 and bwd_flow.dim() == 4
- assert fwd_flow.size(1) == 2 and bwd_flow.size(1) == 2
- flow_mag = torch.norm(fwd_flow, dim=1) + torch.norm(bwd_flow, dim=1) # [B, H, W]
-
- warped_bwd_flow = flow_warp(bwd_flow, fwd_flow) # [B, 2, H, W]
- warped_fwd_flow = flow_warp(fwd_flow, bwd_flow) # [B, 2, H, W]
-
- diff_fwd = torch.norm(fwd_flow + warped_bwd_flow, dim=1) # [B, H, W]
- diff_bwd = torch.norm(bwd_flow + warped_fwd_flow, dim=1)
-
- threshold = alpha * flow_mag + beta
-
- fwd_occ = (diff_fwd > threshold).float() # [B, H, W]
- bwd_occ = (diff_bwd > threshold).float()
-
- return fwd_occ, bwd_occ
diff --git a/spaces/Ariharasudhan/YoloV5/utils/aws/resume.py b/spaces/Ariharasudhan/YoloV5/utils/aws/resume.py
deleted file mode 100644
index b21731c979a121ab8227280351b70d6062efd983..0000000000000000000000000000000000000000
--- a/spaces/Ariharasudhan/YoloV5/utils/aws/resume.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Resume all interrupted trainings in yolov5/ dir including DDP trainings
-# Usage: $ python utils/aws/resume.py
-
-import os
-import sys
-from pathlib import Path
-
-import torch
-import yaml
-
-FILE = Path(__file__).resolve()
-ROOT = FILE.parents[2] # YOLOv5 root directory
-if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT)) # add ROOT to PATH
-
-port = 0 # --master_port
-path = Path('').resolve()
-for last in path.rglob('*/**/last.pt'):
- ckpt = torch.load(last)
- if ckpt['optimizer'] is None:
- continue
-
- # Load opt.yaml
- with open(last.parent.parent / 'opt.yaml', errors='ignore') as f:
- opt = yaml.safe_load(f)
-
- # Get device count
- d = opt['device'].split(',') # devices
- nd = len(d) # number of devices
- ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
-
- if ddp: # multi-GPU
- port += 1
- cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
- else: # single-GPU
- cmd = f'python train.py --resume {last}'
-
- cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
- print(cmd)
- os.system(cmd)
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel.py
deleted file mode 100644
index 064811ad11bb07b2b7bc8e30ec6c03f21997d6b2..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import logging
-import os
-from typing import Optional
-
-from pip._vendor.pyproject_hooks import BuildBackendHookCaller
-
-from pip._internal.utils.subprocess import runner_with_spinner_message
-
-logger = logging.getLogger(__name__)
-
-
-def build_wheel_pep517(
- name: str,
- backend: BuildBackendHookCaller,
- metadata_directory: str,
- tempd: str,
-) -> Optional[str]:
- """Build one InstallRequirement using the PEP 517 build process.
-
- Returns path to wheel if successfully built. Otherwise, returns None.
- """
- assert metadata_directory is not None
- try:
- logger.debug("Destination directory: %s", tempd)
-
- runner = runner_with_spinner_message(
- f"Building wheel for {name} (pyproject.toml)"
- )
- with backend.subprocess_runner(runner):
- wheel_name = backend.build_wheel(
- tempd,
- metadata_directory=metadata_directory,
- )
- except Exception:
- logger.error("Failed building wheel for %s", name)
- return None
- return os.path.join(tempd, wheel_name)
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__init__.py
deleted file mode 100644
index 7ecf7eee35fb95b726f035fac9793055dd94ade8..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__init__.py
+++ /dev/null
@@ -1,142 +0,0 @@
-"""
- pygments.formatters
- ~~~~~~~~~~~~~~~~~~~
-
- Pygments formatters.
-
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import sys
-import types
-from fnmatch import fnmatch
-from os.path import basename
-
-from pip._vendor.pygments.formatters._mapping import FORMATTERS
-from pip._vendor.pygments.plugin import find_plugin_formatters
-from pip._vendor.pygments.util import ClassNotFound
-
-__all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
- 'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS)
-
-_formatter_cache = {} # classes by name
-
-def _load_formatters(module_name):
- """Load a formatter (and all others in the module too)."""
- mod = __import__(module_name, None, None, ['__all__'])
- for formatter_name in mod.__all__:
- cls = getattr(mod, formatter_name)
- _formatter_cache[cls.name] = cls
-
-
-def get_all_formatters():
- """Return a generator for all formatter classes."""
- # NB: this returns formatter classes, not info like get_all_lexers().
- for info in FORMATTERS.values():
- if info[1] not in _formatter_cache:
- _load_formatters(info[0])
- yield _formatter_cache[info[1]]
- for _, formatter in find_plugin_formatters():
- yield formatter
-
-
-def find_formatter_class(alias):
- """Lookup a formatter by alias.
-
- Returns None if not found.
- """
- for module_name, name, aliases, _, _ in FORMATTERS.values():
- if alias in aliases:
- if name not in _formatter_cache:
- _load_formatters(module_name)
- return _formatter_cache[name]
- for _, cls in find_plugin_formatters():
- if alias in cls.aliases:
- return cls
-
-
-def get_formatter_by_name(_alias, **options):
- """Lookup and instantiate a formatter by alias.
-
- Raises ClassNotFound if not found.
- """
- cls = find_formatter_class(_alias)
- if cls is None:
- raise ClassNotFound("no formatter found for name %r" % _alias)
- return cls(**options)
-
-
-def load_formatter_from_file(filename, formattername="CustomFormatter",
- **options):
- """Load a formatter from a file.
-
- This method expects a file located relative to the current working
- directory, which contains a class named CustomFormatter. By default,
- it expects the Formatter to be named CustomFormatter; you can specify
- your own class name as the second argument to this function.
-
- Users should be very careful with the input, because this method
- is equivalent to running eval on the input file.
-
- Raises ClassNotFound if there are any problems importing the Formatter.
-
- .. versionadded:: 2.2
- """
- try:
- # This empty dict will contain the namespace for the exec'd file
- custom_namespace = {}
- with open(filename, 'rb') as f:
- exec(f.read(), custom_namespace)
- # Retrieve the class `formattername` from that namespace
- if formattername not in custom_namespace:
- raise ClassNotFound('no valid %s class found in %s' %
- (formattername, filename))
- formatter_class = custom_namespace[formattername]
- # And finally instantiate it with the options
- return formatter_class(**options)
- except OSError as err:
- raise ClassNotFound('cannot read %s: %s' % (filename, err))
- except ClassNotFound:
- raise
- except Exception as err:
- raise ClassNotFound('error when loading custom formatter: %s' % err)
-
-
-def get_formatter_for_filename(fn, **options):
- """Lookup and instantiate a formatter by filename pattern.
-
- Raises ClassNotFound if not found.
- """
- fn = basename(fn)
- for modname, name, _, filenames, _ in FORMATTERS.values():
- for filename in filenames:
- if fnmatch(fn, filename):
- if name not in _formatter_cache:
- _load_formatters(modname)
- return _formatter_cache[name](**options)
- for cls in find_plugin_formatters():
- for filename in cls.filenames:
- if fnmatch(fn, filename):
- return cls(**options)
- raise ClassNotFound("no formatter found for file name %r" % fn)
-
-
-class _automodule(types.ModuleType):
- """Automatically import formatters."""
-
- def __getattr__(self, name):
- info = FORMATTERS.get(name)
- if info:
- _load_formatters(info[0])
- cls = _formatter_cache[info[1]]
- setattr(self, name, cls)
- return cls
- raise AttributeError(name)
-
-
-oldmod = sys.modules[__name__]
-newmod = _automodule(__name__)
-newmod.__dict__.update(oldmod.__dict__)
-sys.modules[__name__] = newmod
-del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
diff --git a/spaces/AvaterClasher/Food_Classifier_Moni/model.py b/spaces/AvaterClasher/Food_Classifier_Moni/model.py
deleted file mode 100644
index 52c2696c874740179528f0bdae8ce87b774a138f..0000000000000000000000000000000000000000
--- a/spaces/AvaterClasher/Food_Classifier_Moni/model.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import torch
-import torchvision
-
-from torch import nn
-
-
-def create_effnetb2_model(num_classes:int=3,
- seed:int=42):
- """Creates an EfficientNetB2 feature extractor model and transforms.
-
- Args:
- num_classes (int, optional): number of classes in the classifier head.
- Defaults to 3.
- seed (int, optional): random seed value. Defaults to 42.
-
- Returns:
- model (torch.nn.Module): EffNetB2 feature extractor model.
- transforms (torchvision.transforms): EffNetB2 image transforms.
- """
- # Create EffNetB2 pretrained weights, transforms and model
- weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
- transforms = weights.transforms()
- model = torchvision.models.efficientnet_b2(weights=weights)
-
- # Freeze all layers in base model
- for param in model.parameters():
- param.requires_grad = False
-
- # Change classifier head with random seed for reproducibility
- torch.manual_seed(seed)
- model.classifier = nn.Sequential(
- nn.Dropout(p=0.3, inplace=True),
- nn.Linear(in_features=1408, out_features=num_classes),
- )
-
- return model, transforms
diff --git a/spaces/Awesimo/jojogan/e4e/criteria/moco_loss.py b/spaces/Awesimo/jojogan/e4e/criteria/moco_loss.py
deleted file mode 100644
index 8fb13fbd426202cff9014c876c85b0d5c4ec6a9d..0000000000000000000000000000000000000000
--- a/spaces/Awesimo/jojogan/e4e/criteria/moco_loss.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import torch
-from torch import nn
-import torch.nn.functional as F
-
-from configs.paths_config import model_paths
-
-
-class MocoLoss(nn.Module):
-
- def __init__(self, opts):
- super(MocoLoss, self).__init__()
- print("Loading MOCO model from path: {}".format(model_paths["moco"]))
- self.model = self.__load_model()
- self.model.eval()
- for param in self.model.parameters():
- param.requires_grad = False
-
- @staticmethod
- def __load_model():
- import torchvision.models as models
- model = models.__dict__["resnet50"]()
- # freeze all layers but the last fc
- for name, param in model.named_parameters():
- if name not in ['fc.weight', 'fc.bias']:
- param.requires_grad = False
- checkpoint = torch.load(model_paths['moco'], map_location="cpu")
- state_dict = checkpoint['state_dict']
- # rename moco pre-trained keys
- for k in list(state_dict.keys()):
- # retain only encoder_q up to before the embedding layer
- if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):
- # remove prefix
- state_dict[k[len("module.encoder_q."):]] = state_dict[k]
- # delete renamed or unused k
- del state_dict[k]
- msg = model.load_state_dict(state_dict, strict=False)
- assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
- # remove output layer
- model = nn.Sequential(*list(model.children())[:-1]).cuda()
- return model
-
- def extract_feats(self, x):
- x = F.interpolate(x, size=224)
- x_feats = self.model(x)
- x_feats = nn.functional.normalize(x_feats, dim=1)
- x_feats = x_feats.squeeze()
- return x_feats
-
- def forward(self, y_hat, y, x):
- n_samples = x.shape[0]
- x_feats = self.extract_feats(x)
- y_feats = self.extract_feats(y)
- y_hat_feats = self.extract_feats(y_hat)
- y_feats = y_feats.detach()
- loss = 0
- sim_improvement = 0
- sim_logs = []
- count = 0
- for i in range(n_samples):
- diff_target = y_hat_feats[i].dot(y_feats[i])
- diff_input = y_hat_feats[i].dot(x_feats[i])
- diff_views = y_feats[i].dot(x_feats[i])
- sim_logs.append({'diff_target': float(diff_target),
- 'diff_input': float(diff_input),
- 'diff_views': float(diff_views)})
- loss += 1 - diff_target
- sim_diff = float(diff_target) - float(diff_views)
- sim_improvement += sim_diff
- count += 1
-
- return loss / count, sim_improvement / count, sim_logs
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/evaluation/rotated_coco_evaluation.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/evaluation/rotated_coco_evaluation.py
deleted file mode 100644
index ea6d1b381dcf106339a03f08577df673ad439c46..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/evaluation/rotated_coco_evaluation.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import itertools
-import json
-import numpy as np
-import os
-import torch
-from pycocotools.cocoeval import COCOeval, maskUtils
-
-from detectron2.structures import BoxMode, RotatedBoxes, pairwise_iou_rotated
-from detectron2.utils.file_io import PathManager
-
-from .coco_evaluation import COCOEvaluator
-
-
-class RotatedCOCOeval(COCOeval):
- @staticmethod
- def is_rotated(box_list):
- if type(box_list) == np.ndarray:
- return box_list.shape[1] == 5
- elif type(box_list) == list:
- if box_list == []: # cannot decide the box_dim
- return False
- return np.all(
- np.array(
- [
- (len(obj) == 5) and ((type(obj) == list) or (type(obj) == np.ndarray))
- for obj in box_list
- ]
- )
- )
- return False
-
- @staticmethod
- def boxlist_to_tensor(boxlist, output_box_dim):
- if type(boxlist) == np.ndarray:
- box_tensor = torch.from_numpy(boxlist)
- elif type(boxlist) == list:
- if boxlist == []:
- return torch.zeros((0, output_box_dim), dtype=torch.float32)
- else:
- box_tensor = torch.FloatTensor(boxlist)
- else:
- raise Exception("Unrecognized boxlist type")
-
- input_box_dim = box_tensor.shape[1]
- if input_box_dim != output_box_dim:
- if input_box_dim == 4 and output_box_dim == 5:
- box_tensor = BoxMode.convert(box_tensor, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS)
- else:
- raise Exception(
- "Unable to convert from {}-dim box to {}-dim box".format(
- input_box_dim, output_box_dim
- )
- )
- return box_tensor
-
- def compute_iou_dt_gt(self, dt, gt, is_crowd):
- if self.is_rotated(dt) or self.is_rotated(gt):
- # TODO: take is_crowd into consideration
- assert all(c == 0 for c in is_crowd)
- dt = RotatedBoxes(self.boxlist_to_tensor(dt, output_box_dim=5))
- gt = RotatedBoxes(self.boxlist_to_tensor(gt, output_box_dim=5))
- return pairwise_iou_rotated(dt, gt)
- else:
- # This is the same as the classical COCO evaluation
- return maskUtils.iou(dt, gt, is_crowd)
-
- def computeIoU(self, imgId, catId):
- p = self.params
- if p.useCats:
- gt = self._gts[imgId, catId]
- dt = self._dts[imgId, catId]
- else:
- gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
- dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
- if len(gt) == 0 and len(dt) == 0:
- return []
- inds = np.argsort([-d["score"] for d in dt], kind="mergesort")
- dt = [dt[i] for i in inds]
- if len(dt) > p.maxDets[-1]:
- dt = dt[0 : p.maxDets[-1]]
-
- assert p.iouType == "bbox", "unsupported iouType for iou computation"
-
- g = [g["bbox"] for g in gt]
- d = [d["bbox"] for d in dt]
-
- # compute iou between each dt and gt region
- iscrowd = [int(o["iscrowd"]) for o in gt]
-
- # Note: this function is copied from cocoeval.py in cocoapi
- # and the major difference is here.
- ious = self.compute_iou_dt_gt(d, g, iscrowd)
- return ious
-
-
-class RotatedCOCOEvaluator(COCOEvaluator):
- """
- Evaluate object proposal/instance detection outputs using COCO-like metrics and APIs,
- with rotated boxes support.
- Note: this uses IOU only and does not consider angle differences.
- """
-
- def process(self, inputs, outputs):
- """
- Args:
- inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
- It is a list of dict. Each dict corresponds to an image and
- contains keys like "height", "width", "file_name", "image_id".
- outputs: the outputs of a COCO model. It is a list of dicts with key
- "instances" that contains :class:`Instances`.
- """
- for input, output in zip(inputs, outputs):
- prediction = {"image_id": input["image_id"]}
-
- if "instances" in output:
- instances = output["instances"].to(self._cpu_device)
-
- prediction["instances"] = self.instances_to_json(instances, input["image_id"])
- if "proposals" in output:
- prediction["proposals"] = output["proposals"].to(self._cpu_device)
- self._predictions.append(prediction)
-
- def instances_to_json(self, instances, img_id):
- num_instance = len(instances)
- if num_instance == 0:
- return []
-
- boxes = instances.pred_boxes.tensor.numpy()
- if boxes.shape[1] == 4:
- boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
- boxes = boxes.tolist()
- scores = instances.scores.tolist()
- classes = instances.pred_classes.tolist()
-
- results = []
- for k in range(num_instance):
- result = {
- "image_id": img_id,
- "category_id": classes[k],
- "bbox": boxes[k],
- "score": scores[k],
- }
-
- results.append(result)
- return results
-
- def _eval_predictions(self, predictions, img_ids=None): # img_ids: unused
- """
- Evaluate predictions on the given tasks.
- Fill self._results with the metrics of the tasks.
- """
- self._logger.info("Preparing results for COCO format ...")
- coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
-
- # unmap the category ids for COCO
- if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
- reverse_id_mapping = {
- v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
- }
- for result in coco_results:
- result["category_id"] = reverse_id_mapping[result["category_id"]]
-
- if self._output_dir:
- file_path = os.path.join(self._output_dir, "coco_instances_results.json")
- self._logger.info("Saving results to {}".format(file_path))
- with PathManager.open(file_path, "w") as f:
- f.write(json.dumps(coco_results))
- f.flush()
-
- if not self._do_evaluation:
- self._logger.info("Annotations are not available for evaluation.")
- return
-
- self._logger.info("Evaluating predictions ...")
-
- assert self._tasks is None or set(self._tasks) == {
- "bbox"
- }, "[RotatedCOCOEvaluator] Only bbox evaluation is supported"
- coco_eval = (
- self._evaluate_predictions_on_coco(self._coco_api, coco_results)
- if len(coco_results) > 0
- else None # cocoapi does not handle empty results very well
- )
-
- task = "bbox"
- res = self._derive_coco_results(
- coco_eval, task, class_names=self._metadata.get("thing_classes")
- )
- self._results[task] = res
-
- def _evaluate_predictions_on_coco(self, coco_gt, coco_results):
- """
- Evaluate the coco results using COCOEval API.
- """
- assert len(coco_results) > 0
-
- coco_dt = coco_gt.loadRes(coco_results)
-
- # Only bbox is supported for now
- coco_eval = RotatedCOCOeval(coco_gt, coco_dt, iouType="bbox")
-
- coco_eval.evaluate()
- coco_eval.accumulate()
- coco_eval.summarize()
-
- return coco_eval
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/visualize_json_results.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/visualize_json_results.py
deleted file mode 100644
index 472190e0b3b38b55773795915badbb5bc4599d42..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/visualize_json_results.py
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-import argparse
-import json
-import numpy as np
-import os
-from collections import defaultdict
-import cv2
-import tqdm
-
-from detectron2.data import DatasetCatalog, MetadataCatalog
-from detectron2.structures import Boxes, BoxMode, Instances
-from detectron2.utils.file_io import PathManager
-from detectron2.utils.logger import setup_logger
-from detectron2.utils.visualizer import Visualizer
-
-
-def create_instances(predictions, image_size):
- ret = Instances(image_size)
-
- score = np.asarray([x["score"] for x in predictions])
- chosen = (score > args.conf_threshold).nonzero()[0]
- score = score[chosen]
- bbox = np.asarray([predictions[i]["bbox"] for i in chosen]).reshape(-1, 4)
- bbox = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
-
- labels = np.asarray([dataset_id_map(predictions[i]["category_id"]) for i in chosen])
-
- ret.scores = score
- ret.pred_boxes = Boxes(bbox)
- ret.pred_classes = labels
-
- try:
- ret.pred_masks = [predictions[i]["segmentation"] for i in chosen]
- except KeyError:
- pass
- return ret
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(
- description="A script that visualizes the json predictions from COCO or LVIS dataset."
- )
- parser.add_argument("--input", required=True, help="JSON file produced by the model")
- parser.add_argument("--output", required=True, help="output directory")
- parser.add_argument("--dataset", help="name of the dataset", default="coco_2017_val")
- parser.add_argument("--conf-threshold", default=0.5, type=float, help="confidence threshold")
- args = parser.parse_args()
-
- logger = setup_logger()
-
- with PathManager.open(args.input, "r") as f:
- predictions = json.load(f)
-
- pred_by_image = defaultdict(list)
- for p in predictions:
- pred_by_image[p["image_id"]].append(p)
-
- dicts = list(DatasetCatalog.get(args.dataset))
- metadata = MetadataCatalog.get(args.dataset)
- if hasattr(metadata, "thing_dataset_id_to_contiguous_id"):
-
- def dataset_id_map(ds_id):
- return metadata.thing_dataset_id_to_contiguous_id[ds_id]
-
- elif "lvis" in args.dataset:
- # LVIS results are in the same format as COCO results, but have a different
- # mapping from dataset category id to contiguous category id in [0, #categories - 1]
- def dataset_id_map(ds_id):
- return ds_id - 1
-
- else:
- raise ValueError("Unsupported dataset: {}".format(args.dataset))
-
- os.makedirs(args.output, exist_ok=True)
-
- for dic in tqdm.tqdm(dicts):
- img = cv2.imread(dic["file_name"], cv2.IMREAD_COLOR)[:, :, ::-1]
- basename = os.path.basename(dic["file_name"])
-
- predictions = create_instances(pred_by_image[dic["image_id"]], img.shape[:2])
- vis = Visualizer(img, metadata)
- vis_pred = vis.draw_instance_predictions(predictions).get_image()
-
- vis = Visualizer(img, metadata)
- vis_gt = vis.draw_dataset_dict(dic).get_image()
-
- concat = np.concatenate((vis_pred, vis_gt), axis=1)
- cv2.imwrite(os.path.join(args.output, basename), concat[:, :, ::-1])
diff --git a/spaces/Bart92/RVC_HF/tools/calc_rvc_model_similarity.py b/spaces/Bart92/RVC_HF/tools/calc_rvc_model_similarity.py
deleted file mode 100644
index 42496e088e51dc5162d0714470c2226f696e260c..0000000000000000000000000000000000000000
--- a/spaces/Bart92/RVC_HF/tools/calc_rvc_model_similarity.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# This code references https://huggingface.co/JosephusCheung/ASimilarityCalculatior/blob/main/qwerty.py
-# Fill in the path of the model to be queried and the root directory of the reference models, and this script will return the similarity between the model to be queried and all reference models.
-import os
-import logging
-
-logger = logging.getLogger(__name__)
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-def cal_cross_attn(to_q, to_k, to_v, rand_input):
- hidden_dim, embed_dim = to_q.shape
- attn_to_q = nn.Linear(hidden_dim, embed_dim, bias=False)
- attn_to_k = nn.Linear(hidden_dim, embed_dim, bias=False)
- attn_to_v = nn.Linear(hidden_dim, embed_dim, bias=False)
- attn_to_q.load_state_dict({"weight": to_q})
- attn_to_k.load_state_dict({"weight": to_k})
- attn_to_v.load_state_dict({"weight": to_v})
-
- return torch.einsum(
- "ik, jk -> ik",
- F.softmax(
- torch.einsum("ij, kj -> ik", attn_to_q(rand_input), attn_to_k(rand_input)),
- dim=-1,
- ),
- attn_to_v(rand_input),
- )
-
-
-def model_hash(filename):
- try:
- with open(filename, "rb") as file:
- import hashlib
-
- m = hashlib.sha256()
-
- file.seek(0x100000)
- m.update(file.read(0x10000))
- return m.hexdigest()[0:8]
- except FileNotFoundError:
- return "NOFILE"
-
-
-def eval(model, n, input):
- qk = f"enc_p.encoder.attn_layers.{n}.conv_q.weight"
- uk = f"enc_p.encoder.attn_layers.{n}.conv_k.weight"
- vk = f"enc_p.encoder.attn_layers.{n}.conv_v.weight"
- atoq, atok, atov = model[qk][:, :, 0], model[uk][:, :, 0], model[vk][:, :, 0]
-
- attn = cal_cross_attn(atoq, atok, atov, input)
- return attn
-
-
-def main(path, root):
- torch.manual_seed(114514)
- model_a = torch.load(path, map_location="cpu")["weight"]
-
- logger.info("Query:\t\t%s\t%s" % (path, model_hash(path)))
-
- map_attn_a = {}
- map_rand_input = {}
- for n in range(6):
- hidden_dim, embed_dim, _ = model_a[
- f"enc_p.encoder.attn_layers.{n}.conv_v.weight"
- ].shape
- rand_input = torch.randn([embed_dim, hidden_dim])
-
- map_attn_a[n] = eval(model_a, n, rand_input)
- map_rand_input[n] = rand_input
-
- del model_a
-
- for name in sorted(list(os.listdir(root))):
- path = "%s/%s" % (root, name)
- model_b = torch.load(path, map_location="cpu")["weight"]
-
- sims = []
- for n in range(6):
- attn_a = map_attn_a[n]
- attn_b = eval(model_b, n, map_rand_input[n])
-
- sim = torch.mean(torch.cosine_similarity(attn_a, attn_b))
- sims.append(sim)
-
- logger.info(
- "Reference:\t%s\t%s\t%s"
- % (path, model_hash(path), f"{torch.mean(torch.stack(sims)) * 1e2:.2f}%")
- )
-
-
-if __name__ == "__main__":
- query_path = r"assets\weights\mi v3.pth"
- reference_root = r"assets\weights"
- main(query_path, reference_root)
diff --git a/spaces/Benson/text-generation/Examples/Choo Choo Charles Juego Completo.md b/spaces/Benson/text-generation/Examples/Choo Choo Charles Juego Completo.md
deleted file mode 100644
index 7a57cc175aa9809c4b2c6aed3842e973f1a00af8..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Choo Choo Charles Juego Completo.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-
Choo Choo Charles: Un juego de terror de supervivencia con un tren de araña
-
Si usted está buscando un juego emocionante y aterrador que le mantendrá en el borde de su asiento, entonces es posible que desee echa un vistazo a Choo Choo Charles. Este es un juego de terror de supervivencia que te enfrenta a un tren de araña malvado llamado Charles, que te está cazando en una isla de mundo abierto. Tienes que usar tu propio tren, que puedes actualizar y personalizar, para luchar y sobrevivir. En este artículo, le diremos todo lo que necesita saber sobre Choo Choo Charles, incluyendo lo que es, cómo jugarlo y por qué debe jugarlo.
Choo Choo Charles es un juego de terror de supervivencia que fue lanzado el 9 de diciembre de 2022 por Two Star Games, un estudio de juegos independiente con sede en Canadá. El juego está disponible en Steam para Windows PC, y ha recibido críticas muy positivas de jugadores y críticos por igual. El juego está inspirado en el libro infantil Charlie the Choo-Choo, de Stephen King, de 2016, que se extrae de la serie King’s Dark Tower.
-
La historia y la configuración del juego
-
El juego tiene lugar en una isla que está habitada por un tren de araña monstruoso llamado Charles, que ha estado aterrorizando a la gente durante años. Usted es uno de los sobrevivientes que ha logrado encontrar un viejo tren que todavía funciona, y decide usarlo para explorar la isla y encontrar una manera de detener a Charles. En el camino, te encontrarás con otros supervivientes que te ofrecerán misiones, objetos e información a cambio de tu ayuda. También descubrirá los secretos y misterios de la isla y su historia.
-
La jugabilidad y características del juego
-
-
El juego también te permite actualizar tu tren con varias piezas y accesorios que puedes encontrar o comprar de otros supervivientes. Puede mejorar la velocidad de su tren, la durabilidad, la eficiencia del combustible, la capacidad de almacenamiento y más. También puede personalizar la apariencia de su tren con diferentes colores, calcomanías, banderas, cuernos, luces y más. Incluso puedes nombrar tu tren y darle una personalidad.
-
-
El juego tiene un ciclo día-noche y un sistema de clima dinámico que afectan el juego y la atmósfera. Durante el día, puedes ver más claramente y viajar más rápido, pero Charles también puede localizarte más fácilmente. Durante la noche, puedes esconderte mejor y escabullirte, pero Charles también puede sorprenderte más fácilmente. El clima también puede cambiar de soleado a lluvioso a brumoso a tormentoso, creando diferentes desafíos y oportunidades.
-
La recepción y comentarios del juego
-
-
Si usted está interesado en jugar Choo Choo Charles, aquí hay algunos consejos sobre cómo empezar y disfrutar del juego.
-
La línea de búsqueda principal y las misiones secundarias
-
El juego tiene una misión principal que sigue tu viaje para encontrar y detener a Charles. Tendrás que completar varios objetivos, como encontrar pistas, encontrar aliados, sabotear las pistas de Charles y enfrentar a Charles en batallas épicas. La principal línea de búsqueda te llevará a diferentes lugares de la isla, como ciudades, granjas, minas, bosques, montañas y más.
-
El juego también tiene muchas misiones secundarias que puedes hacer para ganar recompensas adicionales, como dinero, objetos, piezas, armas e información. Puedes encontrar misiones secundarias hablando con otros sobrevivientes, explorando la isla o escuchando la radio. Algunas de las misiones secundarias incluyen ayudar a otros sobrevivientes con sus problemas, recolectar recursos, cazar animales, encontrar tesoros, destruir a los secuaces de Carlos, y más.
-
El tren mejora y armas
-
El juego te permite actualizar tu tren con varias piezas y accesorios que puedes encontrar o comprar de otros supervivientes. Puede mejorar la velocidad de su tren, la durabilidad, la eficiencia del combustible, la capacidad de almacenamiento y más. También puede personalizar la apariencia de su tren con diferentes colores, calcomanías, banderas, cuernos, luces y más. Incluso puedes nombrar tu tren y darle una personalidad.
-
El juego también te permite equipar tu tren con diferentes armas que puedes usar para luchar contra Charles. Puedes elegir entre cañones, ametralladoras, cohetes, lanzallamas y más. Cada arma tiene sus propias ventajas y desventajas, como rango, daño, precisión, tiempo de recarga, capacidad de munición y más. Tienes que equilibrar tus armas según tu estilo de juego y estrategia.
-
Los consejos y trucos para sobrevivir Charles
-
-
Si todavía no está convencido de que Choo Choo Charles es un juego que vale la pena jugar, aquí hay algunas razones por las que debe darle una oportunidad:
-
El concepto único y original del juego
-
Choo Choo Charles es un juego que se destaca de la multitud con su concepto único y original. ¿Cuántos juegos puedes pensar que cuentan con un tren de araña gigante como el antagonista principal? El juego es una mezcla creativa e innovadora de géneros, como el terror de supervivencia, la acción-aventura, el mundo abierto y el sandbox. El juego ofrece una experiencia fresca y emocionante que no encontrarás en ningún otro lugar.
-
Los gráficos inmersivos y atmosféricos y el sonido del juego
-
Choo Choo Charles es un juego que te sumerge en su mundo con sus impresionantes gráficos y sonido. El juego tiene un estilo gráfico realista y detallado que muestra la belleza y la diversidad de la isla. El juego también tiene un sistema dinámico de iluminación y sombra que crea un efecto dramático y cinematográfico. El juego también tiene un excelente diseño de sonido que mejora el estado de ánimo y la atmósfera del juego. El juego cuenta con sonidos realistas y ambientales, como el viento, la lluvia, los pájaros, los animales y más. El juego también cuenta con una banda sonora aterradora y emocionante que acompaña tus encuentros con Charles.
-
La dificultad desafiante y gratificante y el valor de repetición del juego
-
-
Conclusión
-
Choo Choo Charles es un juego de terror de supervivencia que te mantendrá enganchado con su concepto único y original, gráficos inmersivos y atmosféricos y sonido, dificultad desafiante y gratificante y valor de repetición. Si usted está buscando un juego emocionante y aterrador que le hará gritar, reír, llorar y animar, entonces usted debe jugar Choo Choo Charles.
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre Choo Choo Charles:
-
Q: ¿Cuánto dura el juego?
-
A: La duración del juego depende de cómo lo juegues, pero en promedio, se tarda unas 10 horas en completar la línea de misión principal. Sin embargo, hay muchas misiones secundarias y secretos por descubrir que pueden extender tu tiempo de juego.
-
Q: ¿Es el juego multijugador?
-
A: No, el juego es actualmente solo para un jugador. Sin embargo, los desarrolladores han declarado que podrían considerar agregar características multijugador en el futuro si hay suficiente demanda.
-
Q: ¿El juego da miedo?
-
A: Sí, el juego da mucho miedo. El juego tiene muchos sustos de salto, gore, violencia, suspenso, tensión y elementos de terror que te harán gritar o temblar. Sin embargo, el juego también tiene muchos elementos de humor, encanto, diversión y aventura que te harán sonreír o reír.
-
Q: ¿Es el juego adecuado para los niños?
-
A: No, el juego no es adecuado para niños. El juego tiene muchos contenidos maduros, como sangre, violencia, lenguaje y horror que no son apropiados para el público joven. El juego tiene una calificación de M para Maduro por la ESRB, lo que significa que es adecuado para edades de 17 años en adelante.
-
Q: ¿Dónde puedo comprar el juego?
-
A: Puedes comprar el juego en Steam para PC con Windows. El juego cuesta $19.99 USD, pero puedes obtenerlo por un precio reducido durante las ventas o promociones. También puedes buscar el juego en Steam para recibir notificaciones cuando esté a la venta o actualizado.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Cmo Descargar Llamada De Deber Warzone Mvil Apk.md b/spaces/Benson/text-generation/Examples/Cmo Descargar Llamada De Deber Warzone Mvil Apk.md
deleted file mode 100644
index 16cba4d734a925bf6b348a2ef9eaa6040547c35b..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Cmo Descargar Llamada De Deber Warzone Mvil Apk.md
+++ /dev/null
@@ -1,165 +0,0 @@
-
-
-
-
-
Tabla 2: Artículo con formato HTML
-
-
-
-
Cómo descargar llamada de Duty Warzone móvil APK
-
¿Eres un fan de la franquicia Call of Duty y quieres experimentar la emoción de battle royale en tu dispositivo móvil? Si es así, entonces usted debe comprobar definitivamente Call of Duty Warzone móvil APK, la última adición a la popular serie FPS. Call of Duty Warzone Mobile APK es un juego independiente que le permite disfrutar de la experiencia completa de Warzone en su teléfono inteligente o tableta. Puedes jugar solo o formar equipo con tus amigos en un mapa masivo de Verdansk, donde tienes que saquear, disparar y sobrevivir contra hasta 150 jugadores. También puedes usar contratos, killstreaks y vehículos para ganar ventaja sobre tus enemigos. Call of Duty Warzone Mobile APK todavía no se ha lanzado oficialmente, pero usted puede pre-registrarse ahora para obtener acceso temprano y recompensas exclusivas. En este artículo, le mostraremos cómo descargar Call of Duty Warzone Mobile APK de varias fuentes y cómo jugar como un profesional. ¡Vamos a empezar!
-
Cómo Pre-Register para Call of Duty Warzone móvil APK
-
Una de las formas más fáciles de descargar Call of Duty Warzone APK móvil es pre-registrarse en la Google Play Store o el sitio web oficial. Al registrarte, estarás entre los primeros en ser notificado cuando el juego esté disponible para descargar. También recibirás algunas recompensas especiales, como pieles, armas y moneda del juego. Aquí es cómo pre-registrarse para Call of Duty Warzone móvil APK:
Para pre-registrarse en Google Play Store, siga estos pasos:
-
-
Abra la aplicación Google Play Store en su dispositivo.
-
Buscar "Call of Duty Warzone Mobile" o haga clic en este enlace.
-
Toque en el botón "Pre-registro".
-
Confirma tu pre-registro tocando en "OK".
-
Verás un mensaje que dice "Estás registrado".
-
-
-
Eso es todo! Usted ha pre-registrado con éxito para Call of Duty Warzone APK móvil en Google Play Store. Recibirás una notificación cuando el juego esté listo para descargar.
-
Cómo pre-registrarse en el sitio web oficial
-
Para pre-registrarse en el sitio web oficial, siga estos pasos:
-
-
Abra su navegador y vaya a este enlace.
-
Introduzca su dirección de correo electrónico y toque en "Enviar".
-
Verá un mensaje que dice "Gracias por registrarse".
-
También recibirá una confirmación por correo electrónico de Activision.
-
-
Eso es todo! Usted ha pre-registrado con éxito para Call of Duty Warzone Mobile APK en el sitio web oficial. Recibirás un correo electrónico cuando el juego esté disponible para descargar.
-
¿Cuáles son los beneficios del prerregistro?
-
Al registrarse previamente para Call of Duty Warzone Mobile APK, disfrutará de algunos beneficios, tales como:
-
-
-
Usted estará entre los primeros en descargar y jugar el juego.
-
Recibirás recompensas exclusivas, como pieles, armas y moneda del juego.
-
Podrás participar en las pruebas beta y proporcionar comentarios para mejorar el juego.
-
Podrás unirte a la comunidad y compartir tus pensamientos y experiencias con otros jugadores.
-
-
Entonces, ¿qué estás esperando? ¡Regístrate ahora y prepárate para la mejor experiencia de batalla real en tu dispositivo móvil!
-
Cómo descargar Call of Duty Warzone móvil APK de otras fuentes
-
Si usted no quiere esperar a que el lanzamiento oficial de Call of Duty Warzone Mobile APK, también se puede descargar desde otras fuentes. Sin embargo, debe tener cuidado al descargar archivos APK de sitios web desconocidos o no confiables, ya que pueden contener malware o virus que pueden dañar su dispositivo. Le recomendamos que utilice fuentes confiables y confiables, como Uptodown o APKCombo. Aquí es cómo descargar Call of Duty Warzone móvil APK de estas fuentes:
-
Cómo descargar desde Uptodown
-
-
-
Abra su navegador y vaya a este enlace.
-
Toque en el botón "Descargar".
-
Verá una ventana emergente que le pide que elija un método de descarga. Toque en "Descargar APK".
-
El archivo APK comenzará a descargarse automáticamente.
-
Puede comprobar el progreso de la descarga en la barra de notificaciones.
-
-
Cómo descargar desde APKCombo
-
Para descargar desde APKCombo, siga estos pasos:
-
-
Abra su navegador y vaya a este enlace.
-
Toque en el botón "Descargar".
-
Verá una ventana emergente que le pide que elija un método de descarga. Toque en "APK Downloader".
-
Verá una lista de las versiones disponibles. Elija la última y toque en "Descargar".
-
El archivo APK comenzará a descargarse automáticamente.
-
Puede comprobar el progreso de la descarga en la barra de notificaciones.
-
-
Cómo instalar el archivo APK en tu dispositivo
-
Después de descargar el archivo APK desde cualquier fuente, debe instalarlo en su dispositivo. Para hacerlo, siga estos pasos:
-
-
Ir a la configuración del dispositivo y habilitar "Fuentes desconocidas" o "Instalar aplicaciones desconocidas" opción. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store.
-
Busque el archivo APK en su administrador de archivos o carpeta de descargas y toque en él.
-
Verá una ventana emergente pidiéndole que confirme la instalación. Toque en "Instalar".
-
El proceso de instalación tomará unos segundos.
-
Una vez completada la instalación, verá un mensaje que dice "App instalado".
-
Ahora puede abrir la aplicación y disfrutar del juego.
-
-
Cómo jugar Call of Duty Warzone móvil APK
-
Ahora que ha descargado e instalado Call of Duty Warzone Mobile APK, usted está listo para jugar el juego. Estos son algunos pasos para ayudarte a empezar:
-
Cómo crear una cuenta e iniciar sesión
-
-
-
Abra la aplicación y toque en "Registrarse" o "Iniciar sesión".
-
Si tiene una cuenta de Activision, introduzca su correo electrónico y contraseña y toque en "Iniciar sesión".
-
Si no tiene una cuenta de Activision, toque en "Crear cuenta" y complete los detalles necesarios. También puedes registrarte con tu cuenta de Facebook, Google o Apple.
-
Aceptar los términos de servicio y la política de privacidad y toque en "Continuar".
-
Verá un mensaje que dice "Cuenta creada" o "Iniciada sesión con éxito".
-
Ahora puedes acceder al menú del juego y personalizar tu perfil.
-
-
Cómo personalizar la configuración y los controles
-
Antes de unirse a un partido, es posible que desee personalizar la configuración y los controles para adaptarse a sus preferencias. Puede ajustar varias opciones, como gráficos, sonido, sensibilidad, diseño y más. Así es como:
-
-
Desde el menú del juego, toque en el icono del engranaje en la esquina superior derecha.
-
Verá una lista de pestañas, como General, Gráficos, Audio, Controles, etc.
-
Toque en cualquier pestaña y explore las opciones disponibles.
-
Hacer cualquier cambio según su gusto y toque en "Aplicar" o "Guardar".
-
También puede restablecer la configuración por defecto pulsando en "Restablecer".
-
-
Cómo unirse a un partido y jugar con tus amigos
-
Para unirte a un partido, puedes jugar solo o formar equipo con tus amigos. También puedes elegir entre diferentes modos, como Battle Royale, Plunder, Resurgence, etc. Así es como:
-
-
Desde el menú del juego, toque en el icono de modo en la esquina superior izquierda.
-
Verás una lista de modos, como Battle Royale, Plunder, Resurgence, etc.
-
Toque en cualquier modo y seleccione sus opciones preferidas, como tamaño de escuadrón, tamaño de mapa, relleno o no, etc.
-
Si quieres jugar con tus amigos, toca el icono de invitación en la esquina inferior derecha.
-
Verás una lista de tus amigos que están en línea o fuera de línea.
-
Toque en cualquier amigo y enviarles una invitación.
-
-
Una vez que esté listo, toque en "Iniciar partido" o "Jugar" en el centro inferior.
-
Serás emparejado con otros jugadores y entrarás al lobby del juego.
-
Puedes chatear con tus compañeros de equipo usando chat de voz o texto.
-
También puede cambiar su carga, operador, piel, arma, etc. tocando los iconos en la esquina inferior izquierda.
-
Cuando comience la coincidencia, se desplegará desde un avión sobre Verdansk.
-
Puede elegir dónde aterrizar abriendo su mapa y marcando una ubicación.
-
También puedes seguir a tus compañeros de equipo o al líder del escuadrón tocando su nombre o icono.
-
Una vez que aterrizas, tienes que saquear, disparar y sobrevivir contra otros jugadores y el círculo de gas.
-
Puedes usar contratos, killstreaks y vehículos para obtener una ventaja sobre tus enemigos.
-
También puede revivir a sus compañeros de equipo o comprarlos de nuevo en las estaciones de compra.
-
El último equipo o jugador de pie gana el partido.
-
-
Consejos y trucos para Call of Duty Warzone móvil APK
-
Jugar Call of Duty Warzone APK móvil puede ser desafiante y divertido, pero también frustrante y competitivo. Para mejorar tu rendimiento y habilidades, necesitas practicar y aprender algunos consejos y trucos. Estos son algunos de ellos:
-
Cómo mejorar tu rendimiento y habilidades
-
Para mejorar tu rendimiento y habilidades, necesitas hacer lo siguiente:
-
-
Elige la carga correcta, operador, piel, arma, etc. que se adapte a tu estilo de juego y estrategia.
-
Ajusta tus ajustes y controles para optimizar tus gráficos, sonido, sensibilidad, diseño, etc.
-
Practica tu puntería, movimiento y tácticas en el modo de entrenamiento o en el modo de práctica.
-
Ver tutoriales, guías y vídeos de juego de otros jugadores o streamers.
-
Aprende de tus errores y analiza tus estadísticas y repeticiones.
-
Manténgase actualizado con las últimas noticias, actualizaciones y eventos del juego.
-
-
-
Contratos, killstreaks, y los vehículos son algunas de las características que hacen Call of Duty Warzone móvil APK único y emocionante. Puedes usarlos para ganar ventaja sobre tus enemigos. Así es como:
-
-
Los contratos son misiones que puedes encontrar y activar en el mapa. Te dan recompensas como dinero, botín, intel, etc. Hay diferentes tipos de contratos, como recompensa, carroñero, reconocimiento, más buscados, etc. Elige los que se adapten a tu situación y objetivo.
-
Killstreaks son habilidades especiales que puedes usar una vez que tengas suficiente dinero o puntos. Incluyen ataques aéreos, UAV, ataques de racimo, etc. Puede comprarlos en las estaciones de compra o encontrarlos en cajas de botín. Úsalos sabiamente y estratégicamente para eliminar o distraer a tus enemigos.
-
Los vehículos son modos de transporte que puede utilizar para moverse por el mapa más rápido y más seguro. Incluyen helicópteros, camiones, ATV, etc. Puedes encontrarlos en varios lugares o llamarlos desde las estaciones de compra. Tenga cuidado al usarlos mientras hacen ruido y atraen la atención.
-
-
Cómo sobrevivir en Verdansk y ganar la batalla real
-
Verdansk es el mapa principal de Call of Duty Warzone Mobile APK. Es un mapa enorme y diverso con varias ubicaciones, como el centro, el aeropuerto, el estadio, la prisión, etc. Para sobrevivir en Verdansk y ganar la batalla real, debe hacer lo siguiente:
-
-
Elige un buen lugar de aterrizaje que tenga suficiente botín y cobertura.
-
Saquea tanto como puedas pero no te vuelvas codicioso o distraído.
-
Evite peleas y compromisos innecesarios a menos que tenga una clara ventaja u objetivo.
-
Manténgase alerta y consciente de sus alrededores y enemigos.
-
Usa el sistema de ping y el chat de voz para comunicarte con tus compañeros de equipo.
-
Muévete con el círculo de gas y evita quedar atrapado fuera de él.
-
Elige tus batallas sabiamente y sabe cuándo luchar o huir.
-
-
No te olvides de revivir o comprar a tus compañeros de equipo si están abajo o muertos.
-
Juega inteligente y divertirse!
-
-
Conclusión
-
En conclusión, Call of Duty Warzone Mobile APK es un gran juego que ofrece una experiencia de batalla real emocionante e inmersiva en su dispositivo móvil. Puede descargarlo de varias fuentes o pre-registrarse ahora para obtener acceso temprano y recompensas exclusivas. También puedes jugar solo o con tus amigos en diferentes modos y mapas. También puede utilizar contratos, killstreaks y vehículos para mejorar su juego y estrategia. También puede mejorar su rendimiento y habilidades siguiendo algunos consejos y trucos. Si usted está buscando un juego divertido y desafiante que le mantendrá enganchado durante horas, entonces usted debe probar definitivamente Call of Duty Warzone móvil APK. ¡Descárgalo ahora y únete a la zona de guerra!
FAQs
-
Aquí hay algunas preguntas frecuentes sobre Call of Duty Warzone móvil APK:
-
Q1: ¿Es Call of Duty Warzone móvil APK libre para jugar?
-
A1: Sí, Call of Duty Warzone Mobile APK es libre de jugar con compras opcionales en el juego. Puede descargarlo de varias fuentes o pre-registrarse ahora para obtener acceso temprano y recompensas exclusivas.
-
Q2: ¿Cuáles son las especificaciones mínimas del dispositivo para Call of Duty Warzone Mobile APK?
-
A2: Necesita un Adreno 618 o mejor GPU y 6GB RAM o más para jugar Call of Duty Warzone Mobile APK. También necesita una conexión a Internet estable y suficiente espacio de almacenamiento.
-
Q3: ¿Cuándo se lanzará oficialmente Call of Duty Warzone Mobile APK?
-
A3: La fecha oficial de lanzamiento de Call of Duty Warzone Mobile APK no se ha anunciado todavía, pero puede pre-registrarse ahora para recibir una notificación cuando esté disponible. También puede seguir las cuentas oficiales de las redes sociales o el sitio web para obtener las últimas noticias y actualizaciones.
-
Q4: ¿Puedo jugar Call of Duty Warzone móvil APK con jugadores en otras plataformas?
-
-
Q5: ¿Puedo transferir mi progreso de Call of Duty Warzone en PC o consola a Call of Duty Warzone Mobile APK?
-
A5: Sí, puede transferir su progreso de Call of Duty Warzone en PC o consola a Call of Duty Warzone Mobile APK. Solo necesitas usar tu cuenta de Activision existente o crear una nueva para sincronizar tu progreso en todos los dispositivos.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/__init__.py
deleted file mode 100644
index 7ecf7eee35fb95b726f035fac9793055dd94ade8..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/__init__.py
+++ /dev/null
@@ -1,142 +0,0 @@
-"""
- pygments.formatters
- ~~~~~~~~~~~~~~~~~~~
-
- Pygments formatters.
-
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import sys
-import types
-from fnmatch import fnmatch
-from os.path import basename
-
-from pip._vendor.pygments.formatters._mapping import FORMATTERS
-from pip._vendor.pygments.plugin import find_plugin_formatters
-from pip._vendor.pygments.util import ClassNotFound
-
-__all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
- 'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS)
-
-_formatter_cache = {} # classes by name
-
-def _load_formatters(module_name):
- """Load a formatter (and all others in the module too)."""
- mod = __import__(module_name, None, None, ['__all__'])
- for formatter_name in mod.__all__:
- cls = getattr(mod, formatter_name)
- _formatter_cache[cls.name] = cls
-
-
-def get_all_formatters():
- """Return a generator for all formatter classes."""
- # NB: this returns formatter classes, not info like get_all_lexers().
- for info in FORMATTERS.values():
- if info[1] not in _formatter_cache:
- _load_formatters(info[0])
- yield _formatter_cache[info[1]]
- for _, formatter in find_plugin_formatters():
- yield formatter
-
-
-def find_formatter_class(alias):
- """Lookup a formatter by alias.
-
- Returns None if not found.
- """
- for module_name, name, aliases, _, _ in FORMATTERS.values():
- if alias in aliases:
- if name not in _formatter_cache:
- _load_formatters(module_name)
- return _formatter_cache[name]
- for _, cls in find_plugin_formatters():
- if alias in cls.aliases:
- return cls
-
-
-def get_formatter_by_name(_alias, **options):
- """Lookup and instantiate a formatter by alias.
-
- Raises ClassNotFound if not found.
- """
- cls = find_formatter_class(_alias)
- if cls is None:
- raise ClassNotFound("no formatter found for name %r" % _alias)
- return cls(**options)
-
-
-def load_formatter_from_file(filename, formattername="CustomFormatter",
- **options):
- """Load a formatter from a file.
-
- This method expects a file located relative to the current working
- directory, which contains a class named CustomFormatter. By default,
- it expects the Formatter to be named CustomFormatter; you can specify
- your own class name as the second argument to this function.
-
- Users should be very careful with the input, because this method
- is equivalent to running eval on the input file.
-
- Raises ClassNotFound if there are any problems importing the Formatter.
-
- .. versionadded:: 2.2
- """
- try:
- # This empty dict will contain the namespace for the exec'd file
- custom_namespace = {}
- with open(filename, 'rb') as f:
- exec(f.read(), custom_namespace)
- # Retrieve the class `formattername` from that namespace
- if formattername not in custom_namespace:
- raise ClassNotFound('no valid %s class found in %s' %
- (formattername, filename))
- formatter_class = custom_namespace[formattername]
- # And finally instantiate it with the options
- return formatter_class(**options)
- except OSError as err:
- raise ClassNotFound('cannot read %s: %s' % (filename, err))
- except ClassNotFound:
- raise
- except Exception as err:
- raise ClassNotFound('error when loading custom formatter: %s' % err)
-
-
-def get_formatter_for_filename(fn, **options):
- """Lookup and instantiate a formatter by filename pattern.
-
- Raises ClassNotFound if not found.
- """
- fn = basename(fn)
- for modname, name, _, filenames, _ in FORMATTERS.values():
- for filename in filenames:
- if fnmatch(fn, filename):
- if name not in _formatter_cache:
- _load_formatters(modname)
- return _formatter_cache[name](**options)
- for cls in find_plugin_formatters():
- for filename in cls.filenames:
- if fnmatch(fn, filename):
- return cls(**options)
- raise ClassNotFound("no formatter found for file name %r" % fn)
-
-
-class _automodule(types.ModuleType):
- """Automatically import formatters."""
-
- def __getattr__(self, name):
- info = FORMATTERS.get(name)
- if info:
- _load_formatters(info[0])
- cls = _formatter_cache[info[1]]
- setattr(self, name, cls)
- return cls
- raise AttributeError(name)
-
-
-oldmod = sys.modules[__name__]
-newmod = _automodule(__name__)
-newmod.__dict__.update(oldmod.__dict__)
-sys.modules[__name__] = newmod
-del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
diff --git a/spaces/CVH-vn1210/make_hair/minigpt4/datasets/builders/base_dataset_builder.py b/spaces/CVH-vn1210/make_hair/minigpt4/datasets/builders/base_dataset_builder.py
deleted file mode 100644
index 8885b2c2de676b717081a512230ed6f90a0064ce..0000000000000000000000000000000000000000
--- a/spaces/CVH-vn1210/make_hair/minigpt4/datasets/builders/base_dataset_builder.py
+++ /dev/null
@@ -1,235 +0,0 @@
-"""
- Copyright (c) 2022, salesforce.com, inc.
- All rights reserved.
- SPDX-License-Identifier: BSD-3-Clause
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
-"""
-
-import logging
-import os
-import shutil
-import warnings
-
-from omegaconf import OmegaConf
-import torch.distributed as dist
-from torchvision.datasets.utils import download_url
-
-import minigpt4.common.utils as utils
-from minigpt4.common.dist_utils import is_dist_avail_and_initialized, is_main_process
-from minigpt4.common.registry import registry
-from minigpt4.processors.base_processor import BaseProcessor
-
-
-
-class BaseDatasetBuilder:
- train_dataset_cls, eval_dataset_cls = None, None
-
- def __init__(self, cfg=None):
- super().__init__()
-
- if cfg is None:
- # help to create datasets from default config.
- self.config = load_dataset_config(self.default_config_path())
- elif isinstance(cfg, str):
- self.config = load_dataset_config(cfg)
- else:
- # when called from task.build_dataset()
- self.config = cfg
-
- self.data_type = self.config.data_type
-
- self.vis_processors = {"train": BaseProcessor(), "eval": BaseProcessor()}
- self.text_processors = {"train": BaseProcessor(), "eval": BaseProcessor()}
-
- def build_datasets(self):
- # download, split, etc...
- # only called on 1 GPU/TPU in distributed
-
- if is_main_process():
- self._download_data()
-
- if is_dist_avail_and_initialized():
- dist.barrier()
-
- # at this point, all the annotations and image/videos should be all downloaded to the specified locations.
- logging.info("Building datasets...")
- datasets = self.build() # dataset['train'/'val'/'test']
-
- return datasets
-
- def build_processors(self):
- vis_proc_cfg = self.config.get("vis_processor")
- txt_proc_cfg = self.config.get("text_processor")
-
- if vis_proc_cfg is not None:
- vis_train_cfg = vis_proc_cfg.get("train")
- vis_eval_cfg = vis_proc_cfg.get("eval")
-
- self.vis_processors["train"] = self._build_proc_from_cfg(vis_train_cfg)
- self.vis_processors["eval"] = self._build_proc_from_cfg(vis_eval_cfg)
-
- if txt_proc_cfg is not None:
- txt_train_cfg = txt_proc_cfg.get("train")
- txt_eval_cfg = txt_proc_cfg.get("eval")
-
- self.text_processors["train"] = self._build_proc_from_cfg(txt_train_cfg)
- self.text_processors["eval"] = self._build_proc_from_cfg(txt_eval_cfg)
-
- @staticmethod
- def _build_proc_from_cfg(cfg):
- return (
- registry.get_processor_class(cfg.name).from_config(cfg)
- if cfg is not None
- else None
- )
-
- @classmethod
- def default_config_path(cls, type="default"):
- return utils.get_abs_path(cls.DATASET_CONFIG_DICT[type])
-
- def _download_data(self):
- self._download_ann()
- self._download_vis()
-
- def _download_ann(self):
- """
- Download annotation files if necessary.
- All the vision-language datasets should have annotations of unified format.
-
- storage_path can be:
- (1) relative/absolute: will be prefixed with env.cache_root to make full path if relative.
- (2) basename/dirname: will be suffixed with base name of URL if dirname is provided.
-
- Local annotation paths should be relative.
- """
- anns = self.config.build_info.annotations
-
- splits = anns.keys()
-
- cache_root = registry.get_path("cache_root")
-
- for split in splits:
- info = anns[split]
-
- urls, storage_paths = info.get("url", None), info.storage
-
- if isinstance(urls, str):
- urls = [urls]
- if isinstance(storage_paths, str):
- storage_paths = [storage_paths]
-
- assert len(urls) == len(storage_paths)
-
- for url_or_filename, storage_path in zip(urls, storage_paths):
- # if storage_path is relative, make it full by prefixing with cache_root.
- if not os.path.isabs(storage_path):
- storage_path = os.path.join(cache_root, storage_path)
-
- dirname = os.path.dirname(storage_path)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
-
- if os.path.isfile(url_or_filename):
- src, dst = url_or_filename, storage_path
- if not os.path.exists(dst):
- shutil.copyfile(src=src, dst=dst)
- else:
- logging.info("Using existing file {}.".format(dst))
- else:
- if os.path.isdir(storage_path):
- # if only dirname is provided, suffix with basename of URL.
- raise ValueError(
- "Expecting storage_path to be a file path, got directory {}".format(
- storage_path
- )
- )
- else:
- filename = os.path.basename(storage_path)
-
- download_url(url=url_or_filename, root=dirname, filename=filename)
-
- def _download_vis(self):
-
- storage_path = self.config.build_info.get(self.data_type).storage
- storage_path = utils.get_cache_path(storage_path)
-
- if not os.path.exists(storage_path):
- warnings.warn(
- f"""
- The specified path {storage_path} for visual inputs does not exist.
- Please provide a correct path to the visual inputs or
- refer to datasets/download_scripts/README.md for downloading instructions.
- """
- )
-
- def build(self):
- """
- Create by split datasets inheriting torch.utils.data.Datasets.
-
- # build() can be dataset-specific. Overwrite to customize.
- """
- self.build_processors()
-
- build_info = self.config.build_info
-
- ann_info = build_info.annotations
- vis_info = build_info.get(self.data_type)
-
- datasets = dict()
- for split in ann_info.keys():
- if split not in ["train", "val", "test"]:
- continue
-
- is_train = split == "train"
-
- # processors
- vis_processor = (
- self.vis_processors["train"]
- if is_train
- else self.vis_processors["eval"]
- )
- text_processor = (
- self.text_processors["train"]
- if is_train
- else self.text_processors["eval"]
- )
-
- # annotation path
- ann_paths = ann_info.get(split).storage
- if isinstance(ann_paths, str):
- ann_paths = [ann_paths]
-
- abs_ann_paths = []
- for ann_path in ann_paths:
- if not os.path.isabs(ann_path):
- ann_path = utils.get_cache_path(ann_path)
- abs_ann_paths.append(ann_path)
- ann_paths = abs_ann_paths
-
- # visual data storage path
- vis_path = os.path.join(vis_info.storage, split)
-
- if not os.path.isabs(vis_path):
- # vis_path = os.path.join(utils.get_cache_path(), vis_path)
- vis_path = utils.get_cache_path(vis_path)
-
- if not os.path.exists(vis_path):
- warnings.warn("storage path {} does not exist.".format(vis_path))
-
- # create datasets
- dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls
- datasets[split] = dataset_cls(
- vis_processor=vis_processor,
- text_processor=text_processor,
- ann_paths=ann_paths,
- vis_root=vis_path,
- )
-
- return datasets
-
-
-def load_dataset_config(cfg_path):
- cfg = OmegaConf.load(cfg_path).datasets
- cfg = cfg[list(cfg.keys())[0]]
-
- return cfg
diff --git a/spaces/CVPR/LIVE/scene.cpp b/spaces/CVPR/LIVE/scene.cpp
deleted file mode 100644
index 1799c962146fbca169594e73f304daa76aa36d0b..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/scene.cpp
+++ /dev/null
@@ -1,1035 +0,0 @@
-#include "scene.h"
-#include "aabb.h"
-#include "cuda_utils.h"
-#include "filter.h"
-#include "shape.h"
-#include
-#include
-#include
-#include
-#include
-
-size_t align(size_t s) {
- auto a = alignof(std::max_align_t);
- return ((s + a - 1) / a) * a;
-}
-
-template
-void allocate(bool use_gpu, T **p) {
- if (use_gpu) {
-#ifdef __NVCC__
- checkCuda(cudaMallocManaged(p, sizeof(T)));
-#else
- throw std::runtime_error("diffvg not compiled with GPU");
- assert(false);
-#endif
- } else {
- *p = (T*)malloc(sizeof(T));
- }
-}
-
-template
-void allocate(bool use_gpu, size_t size, T **p) {
- if (use_gpu) {
-#ifdef __NVCC__
- checkCuda(cudaMallocManaged(p, size * sizeof(T)));
-#else
- throw std::runtime_error("diffvg not compiled with GPU");
- assert(false);
-#endif
- } else {
- *p = (T*)malloc(size * sizeof(T));
- }
-}
-
-void copy_and_init_shapes(Scene &scene,
- const std::vector &shape_list) {
- for (int shape_id = 0; shape_id < scene.num_shapes; shape_id++) {
- switch (shape_list[shape_id]->type) {
- case ShapeType::Circle: {
- Circle *p = (Circle *)scene.shapes[shape_id].ptr;
- const Circle *p_ = (const Circle*)(shape_list[shape_id]->ptr);
- *p = *p_;
- Circle *d_p = (Circle *)scene.d_shapes[shape_id].ptr;
- d_p->radius = 0;
- d_p->center = Vector2f{0, 0};
- break;
- } case ShapeType::Ellipse: {
- Ellipse *p = (Ellipse *)scene.shapes[shape_id].ptr;
- const Ellipse *p_ = (const Ellipse*)(shape_list[shape_id]->ptr);
- *p = *p_;
- Ellipse *d_p = (Ellipse *)scene.d_shapes[shape_id].ptr;
- d_p->radius = Vector2f{0, 0};
- d_p->center = Vector2f{0, 0};
- break;
- } case ShapeType::Path: {
- Path *p = (Path *)scene.shapes[shape_id].ptr;
- const Path *p_ = (const Path*)(shape_list[shape_id]->ptr);
- p->num_points = p_->num_points;
- p->num_base_points = p_->num_base_points;
- for (int i = 0; i < p_->num_base_points; i++) {
- p->num_control_points[i] = p_->num_control_points[i];
- }
- for (int i = 0; i < 2 * p_->num_points; i++) {
- p->points[i] = p_->points[i];
- }
- p->is_closed = p_->is_closed;
- p->use_distance_approx = p_->use_distance_approx;
- Path *d_p = (Path *)scene.d_shapes[shape_id].ptr;
- d_p->num_points = p_->num_points;
- d_p->num_base_points = p_->num_base_points;
- for (int i = 0; i < 2 * p_->num_points; i++) {
- d_p->points[i] = 0;
- }
- d_p->is_closed = p_->is_closed;
- if (p_->thickness != nullptr) {
- for (int i = 0; i < p_->num_points; i++) {
- p->thickness[i] = p_->thickness[i];
- d_p->thickness[i] = 0;
- }
- }
- d_p->use_distance_approx = p_->use_distance_approx;
- break;
- } case ShapeType::Rect: {
- Rect *p = (Rect *)scene.shapes[shape_id].ptr;
- const Rect *p_ = (const Rect*)(shape_list[shape_id]->ptr);
- *p = *p_;
- Rect *d_p = (Rect *)scene.d_shapes[shape_id].ptr;
- d_p->p_min = Vector2f{0, 0};
- d_p->p_max = Vector2f{0, 0};
- break;
- } default: {
- assert(false);
- break;
- }
- }
- scene.shapes[shape_id].type = shape_list[shape_id]->type;
- scene.shapes[shape_id].stroke_width = shape_list[shape_id]->stroke_width;
- scene.d_shapes[shape_id].type = shape_list[shape_id]->type;
- scene.d_shapes[shape_id].stroke_width = 0;
- }
-}
-
-std::vector
-compute_shape_length(const std::vector &shape_list) {
- int num_shapes = (int)shape_list.size();
- std::vector shape_length_list(num_shapes, 0.f);
- for (int shape_id = 0; shape_id < num_shapes; shape_id++) {
- auto shape_length = 0.f;
- switch (shape_list[shape_id]->type) {
- case ShapeType::Circle: {
- const Circle *p_ = (const Circle*)(shape_list[shape_id]->ptr);
- shape_length += float(2.f * M_PI) * p_->radius;
- break;
- } case ShapeType::Ellipse: {
- const Ellipse *p_ = (const Ellipse*)(shape_list[shape_id]->ptr);
- // https://en.wikipedia.org/wiki/Ellipse#Circumference
- // Ramanujan's ellipse circumference approximation
- auto a = p_->radius.x;
- auto b = p_->radius.y;
- shape_length += float(M_PI) * (3 * (a + b) - sqrt((3 * a + b) * (a + 3 * b)));
- break;
- } case ShapeType::Path: {
- const Path *p_ = (const Path*)(shape_list[shape_id]->ptr);
- auto length = 0.f;
- auto point_id = 0;
- for (int i = 0; i < p_->num_base_points; i++) {
- if (p_->num_control_points[i] == 0) {
- // Straight line
- auto i0 = point_id;
- assert(i0 < p_->num_points);
- auto i1 = (i0 + 1) % p_->num_points;
- point_id += 1;
- auto p0 = Vector2f{p_->points[2 * i0], p_->points[2 * i0 + 1]};
- auto p1 = Vector2f{p_->points[2 * i1], p_->points[2 * i1 + 1]};
- length += distance(p1, p0);
- } else if (p_->num_control_points[i] == 1) {
- // Quadratic Bezier curve
- auto i0 = point_id;
- auto i1 = i0 + 1;
- auto i2 = (i0 + 2) % p_->num_points;
- point_id += 2;
- auto p0 = Vector2f{p_->points[2 * i0], p_->points[2 * i0 + 1]};
- auto p1 = Vector2f{p_->points[2 * i1], p_->points[2 * i1 + 1]};
- auto p2 = Vector2f{p_->points[2 * i2], p_->points[2 * i2 + 1]};
- auto eval = [&](float t) -> Vector2f {
- auto tt = 1 - t;
- return (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2;
- };
- // We use 3-point samples to approximate the length
- auto v0 = p0;
- auto v1 = eval(0.5f);
- auto v2 = p2;
- length += distance(v1, v0) + distance(v1, v2);
- } else if (p_->num_control_points[i] == 2) {
- // Cubic Bezier curve
- auto i0 = point_id;
- auto i1 = i0 + 1;
- auto i2 = i0 + 2;
- auto i3 = (i0 + 3) % p_->num_points;
- point_id += 3;
- auto p0 = Vector2f{p_->points[2 * i0], p_->points[2 * i0 + 1]};
- auto p1 = Vector2f{p_->points[2 * i1], p_->points[2 * i1 + 1]};
- auto p2 = Vector2f{p_->points[2 * i2], p_->points[2 * i2 + 1]};
- auto p3 = Vector2f{p_->points[2 * i3], p_->points[2 * i3 + 1]};
- auto eval = [&](float t) -> Vector2f {
- auto tt = 1 - t;
- return (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3;
- };
- // We use 4-point samples to approximate the length
- auto v0 = p0;
- auto v1 = eval(1.f/3.f);
- auto v2 = eval(2.f/3.f);
- auto v3 = p3;
- length += distance(v1, v0) + distance(v1, v2) + distance(v2, v3);
- } else {
- assert(false);
- }
- }
- assert(isfinite(length));
- shape_length += length;
- break;
- } case ShapeType::Rect: {
- const Rect *p_ = (const Rect*)(shape_list[shape_id]->ptr);
- shape_length += 2 * (p_->p_max.x - p_->p_min.x + p_->p_max.y - p_->p_min.y);
- break;
- } default: {
- assert(false);
- break;
- }
- }
- assert(isfinite(shape_length));
- shape_length_list[shape_id] = shape_length;
- }
- return shape_length_list;
-}
-
-void build_shape_cdfs(Scene &scene,
- const std::vector &shape_group_list,
- const std::vector &shape_length_list) {
- int sample_id = 0;
- for (int shape_group_id = 0; shape_group_id < (int)shape_group_list.size(); shape_group_id++) {
- const ShapeGroup *shape_group = shape_group_list[shape_group_id];
- for (int i = 0; i < shape_group->num_shapes; i++) {
- int shape_id = shape_group->shape_ids[i];
- float length = shape_length_list[shape_id];
- scene.sample_shape_id[sample_id] = shape_id;
- if (sample_id == 0) {
- scene.sample_shapes_cdf[sample_id] = length;
- } else {
- scene.sample_shapes_cdf[sample_id] = length +
- scene.sample_shapes_cdf[sample_id - 1];
- }
- assert(isfinite(length));
- scene.sample_shapes_pmf[sample_id] = length;
- scene.sample_group_id[sample_id] = shape_group_id;
- sample_id++;
- }
- }
- assert(sample_id == scene.num_total_shapes);
- auto normalization = scene.sample_shapes_cdf[scene.num_total_shapes - 1];
- if (normalization <= 0) {
- char buf[256];
- sprintf(buf, "The total length of the shape boundaries in the scene is equal or less than 0. Length = %f", normalization);
- throw std::runtime_error(buf);
- }
- if (!isfinite(normalization)) {
- char buf[256];
- sprintf(buf, "The total length of the shape boundaries in the scene is not a number. Length = %f", normalization);
- throw std::runtime_error(buf);
- }
- assert(normalization > 0);
- for (int sample_id = 0; sample_id < scene.num_total_shapes; sample_id++) {
- scene.sample_shapes_cdf[sample_id] /= normalization;
- scene.sample_shapes_pmf[sample_id] /= normalization;
- }
-}
-
-void build_path_cdfs(Scene &scene,
- const std::vector &shape_list,
- const std::vector &shape_length_list) {
- for (int shape_id = 0; shape_id < scene.num_shapes; shape_id++) {
- if (shape_list[shape_id]->type == ShapeType::Path) {
- const Path &path = shape_list[shape_id]->as_path();
- float *pmf = scene.path_length_pmf[shape_id];
- float *cdf = scene.path_length_cdf[shape_id];
- int *point_id_map = scene.path_point_id_map[shape_id];
- auto path_length = shape_length_list[shape_id];
- auto inv_length = 1.f / path_length;
- auto point_id = 0;
- for (int i = 0; i < path.num_base_points; i++) {
- point_id_map[i] = point_id;
- if (path.num_control_points[i] == 0) {
- // Straight line
- auto i0 = point_id;
- auto i1 = (i0 + 1) % path.num_points;
- point_id += 1;
- auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]};
- auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]};
- auto d = distance(p0, p1) * inv_length;
- pmf[i] = d;
- if (i == 0) {
- cdf[i] = d;
- } else {
- cdf[i] = d + cdf[i - 1];
- }
- } else if (path.num_control_points[i] == 1) {
- // Quadratic Bezier curve
- auto i0 = point_id;
- auto i1 = i0 + 1;
- auto i2 = (i0 + 2) % path.num_points;
- point_id += 2;
- auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]};
- auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]};
- auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]};
- auto eval = [&](float t) -> Vector2f {
- auto tt = 1 - t;
- return (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2;
- };
- // We use 3-point samples to approximate the length
- auto v0 = p0;
- auto v1 = eval(0.5f);
- auto v2 = p2;
- auto d = (distance(v0, v1) + distance(v1, v2)) * inv_length;
- pmf[i] = d;
- if (i == 0) {
- cdf[i] = d;
- } else {
- cdf[i] = d + cdf[i - 1];
- }
- } else if (path.num_control_points[i] == 2) {
- // Cubic Bezier curve
- auto i0 = point_id;
- auto i1 = point_id + 1;
- auto i2 = point_id + 2;
- auto i3 = (point_id + 3) % path.num_points;
- point_id += 3;
- auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]};
- auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]};
- auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]};
- auto p3 = Vector2f{path.points[2 * i3], path.points[2 * i3 + 1]};
- auto eval = [&](float t) -> Vector2f {
- auto tt = 1 - t;
- return (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3;
- };
- // We use 4-point samples to approximate the length
- auto v0 = p0;
- auto v1 = eval(1.f/3.f);
- auto v2 = eval(2.f/3.f);
- auto v3 = p3;
- auto d = (distance(v1, v0) + distance(v1, v2) + distance(v2, v3)) * inv_length;
- pmf[i] = d;
- if (i == 0) {
- cdf[i] = d;
- } else {
- cdf[i] = d + cdf[i - 1];
- }
- } else {
- assert(false);
- }
- }
- }
- }
-}
-
-void copy_and_init_shape_groups(Scene &scene,
- const std::vector &shape_group_list) {
- for (int group_id = 0; group_id < scene.num_shape_groups; group_id++) {
- const ShapeGroup *shape_group = shape_group_list[group_id];
- auto copy_and_init_color = [&](const ColorType &color_type, void *color_ptr, void *target_ptr, void *d_target_ptr) {
- switch (color_type) {
- case ColorType::Constant: {
- Constant *c = (Constant*)target_ptr;
- Constant *d_c = (Constant*)d_target_ptr;
- const Constant *c_ = (const Constant*)color_ptr;
- *c = *c_;
- d_c->color = Vector4{0, 0, 0, 0};
- break;
- } case ColorType::LinearGradient: {
- LinearGradient *c = (LinearGradient*)target_ptr;
- LinearGradient *d_c = (LinearGradient*)d_target_ptr;
- const LinearGradient *c_ = (const LinearGradient*)color_ptr;
- c->begin = c_->begin;
- c->end = c_->end;
- c->num_stops = c_->num_stops;
- for (int i = 0; i < c_->num_stops; i++) {
- c->stop_offsets[i] = c_->stop_offsets[i];
- }
- for (int i = 0; i < 4 * c_->num_stops; i++) {
- c->stop_colors[i] = c_->stop_colors[i];
- }
- d_c->begin = Vector2f{0, 0};
- d_c->end = Vector2f{0, 0};
- d_c->num_stops = c_->num_stops;
- for (int i = 0; i < c_->num_stops; i++) {
- d_c->stop_offsets[i] = 0;
- }
- for (int i = 0; i < 4 * c_->num_stops; i++) {
- d_c->stop_colors[i] = 0;
- }
- break;
- } case ColorType::RadialGradient: {
- RadialGradient *c = (RadialGradient*)target_ptr;
- RadialGradient *d_c = (RadialGradient*)d_target_ptr;
- const RadialGradient *c_ = (const RadialGradient*)color_ptr;
- c->center = c_->center;
- c->radius = c_->radius;
- c->num_stops = c_->num_stops;
- for (int i = 0; i < c_->num_stops; i++) {
- c->stop_offsets[i] = c_->stop_offsets[i];
- }
- for (int i = 0; i < 4 * c_->num_stops; i++) {
- c->stop_colors[i] = c_->stop_colors[i];
- }
- d_c->center = Vector2f{0, 0};
- d_c->radius = Vector2f{0, 0};
- d_c->num_stops = c_->num_stops;
- for (int i = 0; i < c_->num_stops; i++) {
- d_c->stop_offsets[i] = 0;
- }
- for (int i = 0; i < 4 * c_->num_stops; i++) {
- d_c->stop_colors[i] = 0;
- }
- break;
- } default: {
- assert(false);
- }
- }
- };
- for (int i = 0; i < shape_group->num_shapes; i++) {
- scene.shape_groups[group_id].shape_ids[i] = shape_group->shape_ids[i];
- }
- scene.shape_groups[group_id].num_shapes = shape_group->num_shapes;
- scene.shape_groups[group_id].use_even_odd_rule = shape_group->use_even_odd_rule;
- scene.shape_groups[group_id].canvas_to_shape = shape_group->canvas_to_shape;
- scene.shape_groups[group_id].shape_to_canvas = shape_group->shape_to_canvas;
- scene.d_shape_groups[group_id].shape_ids = nullptr;
- scene.d_shape_groups[group_id].num_shapes = shape_group->num_shapes;
- scene.d_shape_groups[group_id].use_even_odd_rule = shape_group->use_even_odd_rule;
- scene.d_shape_groups[group_id].canvas_to_shape = Matrix3x3f{};
- scene.d_shape_groups[group_id].shape_to_canvas = Matrix3x3f{};
-
- scene.shape_groups[group_id].fill_color_type = shape_group->fill_color_type;
- scene.d_shape_groups[group_id].fill_color_type = shape_group->fill_color_type;
- if (shape_group->fill_color != nullptr) {
- copy_and_init_color(shape_group->fill_color_type,
- shape_group->fill_color,
- scene.shape_groups[group_id].fill_color,
- scene.d_shape_groups[group_id].fill_color);
- }
- scene.shape_groups[group_id].stroke_color_type = shape_group->stroke_color_type;
- scene.d_shape_groups[group_id].stroke_color_type = shape_group->stroke_color_type;
- if (shape_group->stroke_color != nullptr) {
- copy_and_init_color(shape_group->stroke_color_type,
- shape_group->stroke_color,
- scene.shape_groups[group_id].stroke_color,
- scene.d_shape_groups[group_id].stroke_color);
- }
- }
-}
-
-DEVICE uint32_t morton2D(const Vector2f &p, int canvas_width, int canvas_height) {
- auto scene_bounds = Vector2f{canvas_width, canvas_height};
- auto pp = p / scene_bounds;
- TVector2 pp_i{pp.x * 1023, pp.y * 1023};
- return (expand_bits(pp_i.x) << 1u) |
- (expand_bits(pp_i.y) << 0u);
-}
-
-template
-void build_bvh(const Scene &scene, BVHNode *nodes, int num_primitives) {
- auto bvh_size = 2 * num_primitives - 1;
- if (bvh_size > 1) {
- if (sort) {
- // Sort by Morton code
- std::sort(nodes, nodes + num_primitives,
- [&] (const BVHNode &n0, const BVHNode &n1) {
- auto p0 = 0.5f * (n0.box.p_min + n0.box.p_max);
- auto p1 = 0.5f * (n1.box.p_min + n1.box.p_max);
- auto m0 = morton2D(p0, scene.canvas_width, scene.canvas_height);
- auto m1 = morton2D(p1, scene.canvas_width, scene.canvas_height);
- return m0 < m1;
- });
- }
- for (int i = num_primitives; i < bvh_size; i++) {
- nodes[i] = BVHNode{-1, -1, AABB{}, 0.f};
- }
- int prev_beg = 0;
- int prev_end = num_primitives;
- // For handling odd number of nodes at a level
- int leftover = prev_end % 2 == 0 ? -1 : prev_end - 1;
- while (prev_end - prev_beg >= 1 || leftover != -1) {
- int length = (prev_end - prev_beg) / 2;
- if ((prev_end - prev_beg) % 2 == 1 && leftover != -1 &&
- leftover != prev_end - 1) {
- length += 1;
- }
- for (int i = 0; i < length; i++) {
- BVHNode node;
- node.child0 = prev_beg + 2 * i;
- node.child1 = prev_beg + 2 * i + 1;
- if (node.child1 >= prev_end) {
- assert(leftover != -1);
- node.child1 = leftover;
- leftover = -1;
- }
- AABB child0_box = nodes[node.child0].box;
- AABB child1_box = nodes[node.child1].box;
- node.box = merge(child0_box, child1_box);
- node.max_radius = std::max(nodes[node.child0].max_radius,
- nodes[node.child1].max_radius);
- nodes[prev_end + i] = node;
- }
- if (length == 1 && leftover == -1) {
- break;
- }
- prev_beg = prev_end;
- prev_end = prev_beg + length;
- if (length % 2 == 1 && leftover == -1) {
- leftover = prev_end - 1;
- }
- }
- }
- assert(nodes[2 * num_primitives - 2].child0 != -1);
-}
-
-void compute_bounding_boxes(Scene &scene,
- const std::vector &shape_list,
- const std::vector &shape_group_list) {
- for (int shape_id = 0; shape_id < scene.num_shapes; shape_id++) {
- switch (shape_list[shape_id]->type) {
- case ShapeType::Circle: {
- const Circle *p = (const Circle*)(shape_list[shape_id]->ptr);
- scene.shapes_bbox[shape_id] = AABB{p->center - p->radius,
- p->center + p->radius};
- break;
- } case ShapeType::Ellipse: {
- const Ellipse *p = (const Ellipse*)(shape_list[shape_id]->ptr);
- scene.shapes_bbox[shape_id] = AABB{p->center - p->radius,
- p->center + p->radius};
- break;
- } case ShapeType::Path: {
- const Path *p = (const Path*)(shape_list[shape_id]->ptr);
- AABB box;
- if (p->num_points > 0) {
- box = AABB{Vector2f{p->points[0], p->points[1]},
- Vector2f{p->points[0], p->points[1]}};
- }
- for (int i = 1; i < p->num_points; i++) {
- box = merge(box, Vector2f{p->points[2 * i], p->points[2 * i + 1]});
- }
- scene.shapes_bbox[shape_id] = box;
- std::vector boxes(p->num_base_points);
- std::vector thickness(p->num_base_points);
- std::vector first_point_id(p->num_base_points);
- auto r = shape_list[shape_id]->stroke_width;
- auto point_id = 0;
- for (int i = 0; i < p->num_base_points; i++) {
- first_point_id[i] = point_id;
- if (p->num_control_points[i] == 0) {
- // Straight line
- auto i0 = point_id;
- auto i1 = (i0 + 1) % p->num_points;
- point_id += 1;
- auto p0 = Vector2f{p->points[2 * i0], p->points[2 * i0 + 1]};
- auto p1 = Vector2f{p->points[2 * i1], p->points[2 * i1 + 1]};
- boxes[i] = AABB();
- boxes[i] = merge(boxes[i], p0);
- boxes[i] = merge(boxes[i], p1);
- auto r0 = r;
- auto r1 = r;
- // override radius if path has thickness
- if (p->thickness != nullptr) {
- r0 = p->thickness[i0];
- r1 = p->thickness[i1];
- }
- thickness[i] = max(r0, r1);
- } else if (p->num_control_points[i] == 1) {
- // Quadratic Bezier curve
- auto i0 = point_id;
- auto i1 = i0 + 1;
- auto i2 = (i0 + 2) % p->num_points;
- point_id += 2;
- auto p0 = Vector2f{p->points[2 * i0], p->points[2 * i0 + 1]};
- auto p1 = Vector2f{p->points[2 * i1], p->points[2 * i1 + 1]};
- auto p2 = Vector2f{p->points[2 * i2], p->points[2 * i2 + 1]};
- boxes[i] = AABB();
- boxes[i] = merge(boxes[i], p0);
- boxes[i] = merge(boxes[i], p1);
- boxes[i] = merge(boxes[i], p2);
- auto r0 = r;
- auto r1 = r;
- auto r2 = r;
- // override radius if path has thickness
- if (p->thickness != nullptr) {
- r0 = p->thickness[i0];
- r1 = p->thickness[i1];
- r2 = p->thickness[i2];
- }
- thickness[i] = max(max(r0, r1), r2);
- } else if (p->num_control_points[i] == 2) {
- // Cubic Bezier curve
- auto i0 = point_id;
- auto i1 = i0 + 1;
- auto i2 = i0 + 2;
- auto i3 = (i0 + 3) % p->num_points;
- point_id += 3;
- auto p0 = Vector2f{p->points[2 * i0], p->points[2 * i0 + 1]};
- auto p1 = Vector2f{p->points[2 * i1], p->points[2 * i1 + 1]};
- auto p2 = Vector2f{p->points[2 * i2], p->points[2 * i2 + 1]};
- auto p3 = Vector2f{p->points[2 * i3], p->points[2 * i3 + 1]};
- boxes[i] = AABB();
- boxes[i] = merge(boxes[i], p0);
- boxes[i] = merge(boxes[i], p1);
- boxes[i] = merge(boxes[i], p2);
- boxes[i] = merge(boxes[i], p3);
- auto r0 = r;
- auto r1 = r;
- auto r2 = r;
- auto r3 = r;
- // override radius if path has thickness
- if (p->thickness != nullptr) {
- r0 = p->thickness[i0];
- r1 = p->thickness[i1];
- r2 = p->thickness[i2];
- r3 = p->thickness[i3];
- }
- thickness[i] = max(max(max(r0, r1), r2), r3);
- } else {
- assert(false);
- }
- }
- // Sort the boxes by y
- std::vector idx(boxes.size());
- std::iota(idx.begin(), idx.end(), 0);
- std::sort(idx.begin(), idx.end(), [&](int i0, int i1) {
- const AABB &b0 = boxes[i0];
- const AABB &b1 = boxes[i1];
- auto b0y = 0.5f * (b0.p_min.y + b0.p_max.y);
- auto b1y = 0.5f * (b1.p_min.y + b1.p_max.y);
- return b0y < b1y;
- });
- BVHNode *nodes = scene.path_bvhs[shape_id];
- for (int i = 0; i < (int)idx.size(); i++) {
- nodes[i] = BVHNode{idx[i],
- -(first_point_id[idx[i]]+1),
- boxes[idx[i]],
- thickness[idx[i]]};
- }
- build_bvh(scene, nodes, boxes.size());
- break;
- } case ShapeType::Rect: {
- const Rect *p = (const Rect*)(shape_list[shape_id]->ptr);
- scene.shapes_bbox[shape_id] = AABB{p->p_min, p->p_max};
- break;
- } default: {
- assert(false);
- break;
- }
- }
- }
-
- for (int shape_group_id = 0; shape_group_id < (int)shape_group_list.size(); shape_group_id++) {
- const ShapeGroup *shape_group = shape_group_list[shape_group_id];
- // Build a BVH for each shape group
- BVHNode *nodes = scene.shape_groups_bvh_nodes[shape_group_id];
- for (int i = 0; i < shape_group->num_shapes; i++) {
- auto shape_id = shape_group->shape_ids[i];
- auto r = shape_group->stroke_color == nullptr ? 0 : shape_list[shape_id]->stroke_width;
- nodes[i] = BVHNode{shape_id,
- -1,
- scene.shapes_bbox[shape_id],
- r};
- }
- build_bvh(scene, nodes, shape_group->num_shapes);
- }
-
- BVHNode *nodes = scene.bvh_nodes;
- for (int shape_group_id = 0; shape_group_id < (int)shape_group_list.size(); shape_group_id++) {
- const ShapeGroup *shape_group = shape_group_list[shape_group_id];
- auto max_radius = shape_list[shape_group->shape_ids[0]]->stroke_width;
- if (shape_list[shape_group->shape_ids[0]]->type == ShapeType::Path) {
- const Path *p = (const Path*)(shape_list[shape_group->shape_ids[0]]->ptr);
- if (p->thickness != nullptr) {
- const BVHNode *nodes = scene.path_bvhs[shape_group->shape_ids[0]];
- max_radius = nodes[0].max_radius;
- }
- }
- for (int i = 1; i < shape_group->num_shapes; i++) {
- auto shape_id = shape_group->shape_ids[i];
- auto shape = shape_list[shape_id];
- auto r = shape->stroke_width;
- if (shape->type == ShapeType::Path) {
- const Path *p = (const Path*)(shape_list[shape_id]->ptr);
- if (p->thickness != nullptr) {
- const BVHNode *nodes = scene.path_bvhs[shape_id];
- r = nodes[0].max_radius;
- }
- }
- max_radius = std::max(max_radius, r);
- }
- // Fetch group bbox from BVH
- auto bbox = scene.shape_groups_bvh_nodes[shape_group_id][2 * shape_group->num_shapes - 2].box;
- // Transform box from local to world space
- nodes[shape_group_id].child0 = shape_group_id;
- nodes[shape_group_id].child1 = -1;
- nodes[shape_group_id].box = transform(shape_group->shape_to_canvas, bbox);
- if (shape_group->stroke_color == nullptr) {
- nodes[shape_group_id].max_radius = 0;
- } else {
- nodes[shape_group_id].max_radius = max_radius;
- }
- }
- build_bvh(scene, nodes, shape_group_list.size());
-}
-
-template
-size_t allocate_buffers(Scene &scene,
- const std::vector &shape_list,
- const std::vector &shape_group_list) {
- auto num_shapes = shape_list.size();
- auto num_shape_groups = shape_group_list.size();
-
- size_t buffer_size = 0;
- if (alloc_mode) scene.shapes = (Shape*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(Shape) * num_shapes);
- if (alloc_mode) scene.d_shapes = (Shape*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(Shape) * num_shapes);
- if (alloc_mode) scene.shape_groups = (ShapeGroup*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(ShapeGroup) * num_shape_groups);
- if (alloc_mode) scene.d_shape_groups = (ShapeGroup*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(ShapeGroup) * num_shape_groups);
- if (alloc_mode) scene.sample_shapes_cdf = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * scene.num_total_shapes);
- if (alloc_mode) scene.sample_shapes_pmf = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * scene.num_total_shapes);
- if (alloc_mode) scene.sample_shape_id = (int*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(int) * scene.num_total_shapes);
- if (alloc_mode) scene.sample_group_id = (int*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(int) * scene.num_total_shapes);
- if (alloc_mode) scene.shapes_length = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * num_shapes);
- if (alloc_mode) scene.path_length_cdf = (float**)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float*) * num_shapes);
- if (alloc_mode) scene.path_length_pmf = (float**)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float*) * num_shapes);
- if (alloc_mode) scene.path_point_id_map = (int**)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(int*) * num_shapes);
- if (alloc_mode) scene.filter = (Filter*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(Filter));
- if (alloc_mode) scene.d_filter = (DFilter*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(DFilter));
- if (alloc_mode) scene.shapes_bbox = (AABB*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(AABB) * num_shapes);
- if (alloc_mode) scene.path_bvhs = (BVHNode**)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(BVHNode*) * num_shapes);
- if (alloc_mode) scene.shape_groups_bvh_nodes = (BVHNode**)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(BVHNode*) * num_shape_groups);
- if (alloc_mode) scene.bvh_nodes = (BVHNode*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(BVHNode) * (2 * num_shape_groups - 1));
-
- if (alloc_mode) {
- for (int i = 0; i < num_shapes; i++) {
- scene.path_length_cdf[i] = nullptr;
- scene.path_length_pmf[i] = nullptr;
- scene.path_point_id_map[i] = nullptr;
- scene.path_bvhs[i] = nullptr;
- }
- }
-
- for (int shape_id = 0; shape_id < scene.num_shapes; shape_id++) {
- switch (shape_list[shape_id]->type) {
- case ShapeType::Circle: {
- if (alloc_mode) scene.shapes[shape_id].ptr = (Circle*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(Circle)); // scene.shapes[shape_id].ptr
- if (alloc_mode) scene.d_shapes[shape_id].ptr = (Circle*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(Circle)); // scene.d_shapes[shape_id].ptr
- break;
- } case ShapeType::Ellipse: {
- if (alloc_mode) scene.shapes[shape_id].ptr = (Ellipse*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(Ellipse)); // scene.shapes[shape_id].ptr
- if (alloc_mode) scene.d_shapes[shape_id].ptr = (Ellipse*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(Ellipse)); // scene.d_shapes[shape_id].ptr
- break;
- } case ShapeType::Path: {
- if (alloc_mode) scene.shapes[shape_id].ptr = (Path*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(Path)); // scene.shapes[shape_id].ptr
- if (alloc_mode) scene.d_shapes[shape_id].ptr = (Path*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(Path)); // scene.d_shapes[shape_id].ptr
-
- const Path *p_ = (const Path*)(shape_list[shape_id]->ptr);
- Path *p = nullptr, *d_p = nullptr;
- if (alloc_mode) p = (Path*)scene.shapes[shape_id].ptr;
- if (alloc_mode) d_p = (Path*)scene.d_shapes[shape_id].ptr;
- if (alloc_mode) p->num_control_points = (int*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(int) * p_->num_base_points); // p->num_control_points
- if (alloc_mode) p->points = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * (2 * p_->num_points)); // p->points
- if (alloc_mode) d_p->points = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * (2 * p_->num_points)); // d_p->points
- if (p_->thickness != nullptr) {
- if (alloc_mode) p->thickness = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * p_->num_points); // p->thickness
- if (alloc_mode) d_p->thickness = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * p_->num_points); // d_p->thickness
- } else {
- if (alloc_mode) p->thickness = nullptr;
- if (alloc_mode) d_p->thickness = nullptr;
- }
- if (alloc_mode) scene.path_length_pmf[shape_id] = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * p_->num_base_points); // scene.path_length_pmf
- if (alloc_mode) scene.path_length_cdf[shape_id] = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * p_->num_base_points); // scene.path_length_cdf
- if (alloc_mode) scene.path_point_id_map[shape_id] = (int*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(int) * p_->num_base_points); // scene.path_point_id_map
- if (alloc_mode) scene.path_bvhs[shape_id] = (BVHNode*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(BVHNode) * (2 * p_->num_base_points - 1));
- break;
- } case ShapeType::Rect: {
- if (alloc_mode) scene.shapes[shape_id].ptr = (Ellipse*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(Rect)); // scene.shapes[shape_id].ptr
- if (alloc_mode) scene.d_shapes[shape_id].ptr = (Ellipse*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(Rect)); // scene.d_shapes[shape_id].ptr
- break;
- } default: {
- assert(false);
- break;
- }
- }
- }
-
- for (int group_id = 0; group_id < scene.num_shape_groups; group_id++) {
- const ShapeGroup *shape_group = shape_group_list[group_id];
- if (shape_group->fill_color != nullptr) {
- switch (shape_group->fill_color_type) {
- case ColorType::Constant: {
- if (alloc_mode) scene.shape_groups[group_id].fill_color = (Constant*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(Constant)); // color
- if (alloc_mode) scene.d_shape_groups[group_id].fill_color = (Constant*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(Constant)); // d_color
- break;
- } case ColorType::LinearGradient: {
- if (alloc_mode) scene.shape_groups[group_id].fill_color = (LinearGradient*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(LinearGradient)); // color
- if (alloc_mode) scene.d_shape_groups[group_id].fill_color = (LinearGradient*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(LinearGradient)); // d_color
-
- const LinearGradient *c_ = (const LinearGradient *)shape_group->fill_color;
- LinearGradient *c = nullptr, *d_c = nullptr;
- if (alloc_mode) c = (LinearGradient *)scene.shape_groups[group_id].fill_color;
- if (alloc_mode) d_c = (LinearGradient *)scene.d_shape_groups[group_id].fill_color;
- if (alloc_mode) c->stop_offsets = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * c_->num_stops); // c->stop_offsets
- if (alloc_mode) c->stop_colors = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * 4 * c_->num_stops); // c->stop_colors
- if (alloc_mode) d_c->stop_offsets = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * c_->num_stops); // d_c->stop_offsets
- if (alloc_mode) d_c->stop_colors = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * 4 * c_->num_stops); // d_c->stop_colors
- break;
- } case ColorType::RadialGradient: {
- if (alloc_mode) scene.shape_groups[group_id].fill_color = (RadialGradient*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(RadialGradient)); // color
- if (alloc_mode) scene.d_shape_groups[group_id].fill_color = (RadialGradient*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(RadialGradient)); // d_color
-
- const RadialGradient *c_ = (const RadialGradient *)shape_group->fill_color;
- RadialGradient *c = nullptr, *d_c = nullptr;
- if (alloc_mode) c = (RadialGradient *)scene.shape_groups[group_id].fill_color;
- if (alloc_mode) d_c = (RadialGradient *)scene.d_shape_groups[group_id].fill_color;
- if (alloc_mode) c->stop_offsets = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * c_->num_stops); // c->stop_offsets
- if (alloc_mode) c->stop_colors = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * 4 * c_->num_stops); // c->stop_colors
- if (alloc_mode) d_c->stop_offsets = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * c_->num_stops); // d_c->stop_offsets
- if (alloc_mode) d_c->stop_colors = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * 4 * c_->num_stops); // d_c->stop_colors
- break;
- } default: {
- assert(false);
- }
- }
- } else {
- if (alloc_mode) scene.shape_groups[group_id].fill_color = nullptr;
- if (alloc_mode) scene.d_shape_groups[group_id].fill_color = nullptr;
- }
- if (shape_group->stroke_color != nullptr) {
- switch (shape_group->stroke_color_type) {
- case ColorType::Constant: {
- if (alloc_mode) scene.shape_groups[group_id].stroke_color = (Constant*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(Constant)); // color
- if (alloc_mode) scene.d_shape_groups[group_id].stroke_color = (Constant*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(Constant)); // d_color
- break;
- } case ColorType::LinearGradient: {
- if (alloc_mode) scene.shape_groups[group_id].stroke_color = (LinearGradient*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(LinearGradient)); // color
- if (alloc_mode) scene.shape_groups[group_id].stroke_color = (LinearGradient*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(LinearGradient)); // d_color
-
- const LinearGradient *c_ = (const LinearGradient *)shape_group->stroke_color;
- LinearGradient *c = nullptr, *d_c = nullptr;
- if (alloc_mode) c = (LinearGradient *)scene.shape_groups[group_id].stroke_color;
- if (alloc_mode) d_c = (LinearGradient *)scene.d_shape_groups[group_id].stroke_color;
- if (alloc_mode) c->stop_offsets = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * c_->num_stops); // c->stop_offsets
- if (alloc_mode) c->stop_colors = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * 4 * c_->num_stops); // c->stop_colors
- if (alloc_mode) d_c->stop_offsets = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * c_->num_stops); // d_c->stop_offsets
- if (alloc_mode) d_c->stop_colors = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * 4 * c_->num_stops); // d_c->stop_colors
- break;
- } case ColorType::RadialGradient: {
- if (alloc_mode) scene.shape_groups[group_id].stroke_color = (RadialGradient*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(RadialGradient)); // color
- if (alloc_mode) scene.shape_groups[group_id].stroke_color = (RadialGradient*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(RadialGradient)); // d_color
-
- const RadialGradient *c_ = (const RadialGradient *)shape_group->stroke_color;
- RadialGradient *c = nullptr, *d_c = nullptr;
- if (alloc_mode) c = (RadialGradient *)scene.shape_groups[group_id].stroke_color;
- if (alloc_mode) d_c = (RadialGradient *)scene.d_shape_groups[group_id].stroke_color;
- if (alloc_mode) c->stop_offsets = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * c_->num_stops); // c->stop_offsets
- if (alloc_mode) c->stop_colors = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * 4 * c_->num_stops); // c->stop_colors
- if (alloc_mode) d_c->stop_offsets = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * c_->num_stops); // d_c->stop_offsets
- if (alloc_mode) d_c->stop_colors = (float*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(float) * 4 * c_->num_stops); // d_c->stop_colors
- break;
- } default: {
- assert(false);
- }
- }
- } else {
- if (alloc_mode) scene.shape_groups[group_id].stroke_color = nullptr;
- if (alloc_mode) scene.d_shape_groups[group_id].stroke_color = nullptr;
- }
- if (alloc_mode) scene.shape_groups[group_id].shape_ids = (int*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(int) * shape_group->num_shapes); // shape_group->shape_ids
- if (alloc_mode) scene.shape_groups_bvh_nodes[group_id] = (BVHNode*)&scene.buffer[buffer_size];
- buffer_size += align(sizeof(BVHNode) * (2 * shape_group->num_shapes - 1)); // scene.shape_groups_bvh_nodes[group_id]
- }
- return buffer_size;
-}
-
-Scene::Scene(int canvas_width,
- int canvas_height,
- const std::vector &shape_list,
- const std::vector &shape_group_list,
- const Filter &filter,
- bool use_gpu,
- int gpu_index)
- : canvas_width(canvas_width),
- canvas_height(canvas_height),
- num_shapes(shape_list.size()),
- num_shape_groups(shape_group_list.size()),
- use_gpu(use_gpu),
- gpu_index(gpu_index) {
- if (num_shapes == 0) {
- return;
- }
- // Shape group may reuse some of the shapes,
- // record the total number of shapes.
- int num_total_shapes = 0;
- for (const ShapeGroup *sg : shape_group_list) {
- num_total_shapes += sg->num_shapes;
- }
- this->num_total_shapes = num_total_shapes;
-
- // Memory initialization
-#ifdef __NVCC__
- int old_device_id = -1;
-#endif
- if (use_gpu) {
-#ifdef __NVCC__
- checkCuda(cudaGetDevice(&old_device_id));
- if (gpu_index != -1) {
- checkCuda(cudaSetDevice(gpu_index));
- }
-#else
- throw std::runtime_error("diffvg not compiled with GPU");
- assert(false);
-#endif
- }
-
- size_t buffer_size = allocate_buffers(*this, shape_list, shape_group_list);
- // Allocate a huge buffer for everything
- allocate(use_gpu, buffer_size, &buffer);
- // memset(buffer, 111, buffer_size);
- // Actually distribute the buffer
- allocate_buffers(*this, shape_list, shape_group_list);
- copy_and_init_shapes(*this, shape_list);
- copy_and_init_shape_groups(*this, shape_group_list);
-
- std::vector shape_length_list = compute_shape_length(shape_list);
- // Copy shape_length
- if (use_gpu) {
-#ifdef __NVCC__
- checkCuda(cudaMemcpy(this->shapes_length, &shape_length_list[0], num_shapes * sizeof(float), cudaMemcpyHostToDevice));
-#else
- throw std::runtime_error("diffvg not compiled with GPU");
- assert(false);
-#endif
- } else {
- memcpy(this->shapes_length, &shape_length_list[0], num_shapes * sizeof(float));
- }
- build_shape_cdfs(*this, shape_group_list, shape_length_list);
- build_path_cdfs(*this, shape_list, shape_length_list);
- compute_bounding_boxes(*this, shape_list, shape_group_list);
-
- // Filter initialization
- *(this->filter) = filter;
- this->d_filter->radius = 0;
-
- if (use_gpu) {
-#ifdef __NVCC__
- if (old_device_id != -1) {
- checkCuda(cudaSetDevice(old_device_id));
- }
-#else
- throw std::runtime_error("diffvg not compiled with GPU");
- assert(false);
-#endif
- }
-}
-
-Scene::~Scene() {
- if (num_shapes == 0) {
- return;
- }
- if (use_gpu) {
-#ifdef __NVCC__
- int old_device_id = -1;
- checkCuda(cudaGetDevice(&old_device_id));
- if (gpu_index != -1) {
- checkCuda(cudaSetDevice(gpu_index));
- }
-
- checkCuda(cudaFree(buffer));
-
- checkCuda(cudaSetDevice(old_device_id));
-#else
- // Don't throw because C++ don't want a destructor to throw.
- std::cerr << "diffvg not compiled with GPU";
- exit(1);
-#endif
- } else {
- free(buffer);
- }
-}
-
-Shape Scene::get_d_shape(int shape_id) const {
- return d_shapes[shape_id];
-}
-
-ShapeGroup Scene::get_d_shape_group(int group_id) const {
- return d_shape_groups[group_id];
-}
-
-float Scene::get_d_filter_radius() const {
- return d_filter->radius;
-}
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cpp/execution_policy.h b/spaces/CVPR/LIVE/thrust/thrust/system/cpp/execution_policy.h
deleted file mode 100644
index 3bf521be348f834fe71f0a754425a9c2438a1526..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/cpp/execution_policy.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-/*! \file thrust/system/cpp/execution_policy.h
- * \brief Execution policies for Thrust's standard C++ system.
- */
-
-#include
-
-// get the execution policies definitions first
-#include
-
-// get the definition of par
-#include
-
-// now get all the algorithm definitions
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-
-// define these entities here for the purpose of Doxygenating them
-// they are actually defined elsewhere
-#if 0
-namespace thrust
-{
-namespace system
-{
-namespace cpp
-{
-
-
-/*! \addtogroup execution_policies
- * \{
- */
-
-
-/*! \p thrust::system::cpp::execution_policy is the base class for all Thrust parallel execution
- * policies which are derived from Thrust's standard C++ backend system.
- */
-template
-struct execution_policy : thrust::execution_policy
-{};
-
-
-/*! \p thrust::system::cpp::tag is a type representing Thrust's standard C++ backend system in C++'s type system.
- * Iterators "tagged" with a type which is convertible to \p cpp::tag assert that they may be
- * "dispatched" to algorithm implementations in the \p cpp system.
- */
-struct tag : thrust::system::cpp::execution_policy { unspecified };
-
-
-/*!
- * \p thrust::system::cpp::par is the parallel execution policy associated with Thrust's standard
- * C++ backend system.
- *
- * Instead of relying on implicit algorithm dispatch through iterator system tags, users may
- * directly target Thrust's C++ backend system by providing \p thrust::cpp::par as an algorithm
- * parameter.
- *
- * Explicit dispatch can be useful in avoiding the introduction of data copies into containers such
- * as \p thrust::cpp::vector.
- *
- * The type of \p thrust::cpp::par is implementation-defined.
- *
- * The following code snippet demonstrates how to use \p thrust::cpp::par to explicitly dispatch an
- * invocation of \p thrust::for_each to the standard C++ backend system:
- *
- * \code
- * #include
- * #include
- * #include
- *
- * struct printf_functor
- * {
- * __host__ __device__
- * void operator()(int x)
- * {
- * printf("%d\n", x);
- * }
- * };
- * ...
- * int vec[3];
- * vec[0] = 0; vec[1] = 1; vec[2] = 2;
- *
- * thrust::for_each(thrust::cpp::par, vec.begin(), vec.end(), printf_functor());
- *
- * // 0 1 2 is printed to standard output in some unspecified order
- * \endcode
- */
-static const unspecified par;
-
-
-/*! \}
- */
-
-
-} // end cpp
-} // end system
-} // end thrust
-#endif
-
-
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/uninitialized_fill.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/uninitialized_fill.h
deleted file mode 100644
index 65e59fae5dce223c35403adc364a3e1748687923..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/uninitialized_fill.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include
-
-// this system has no special unintialized_fill functions
-
diff --git a/spaces/Catspin/2_ai_chat/README.md b/spaces/Catspin/2_ai_chat/README.md
deleted file mode 100644
index a99d5fabe5e7b3edaf5728cadb329fc79622a8f5..0000000000000000000000000000000000000000
--- a/spaces/Catspin/2_ai_chat/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Boxai
-emoji: 👀
-colorFrom: purple
-colorTo: yellow
-sdk: static
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/attrs/exceptions.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/attrs/exceptions.py
deleted file mode 100644
index bd9efed202ab1cdf57a9e99cb4e094ef6f38d0c0..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/attrs/exceptions.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-from attr.exceptions import * # noqa
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/_textwrap.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/_textwrap.py
deleted file mode 100644
index b47dcbd4264e86715adfae1c5124c288b67a983e..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/click/_textwrap.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import textwrap
-import typing as t
-from contextlib import contextmanager
-
-
-class TextWrapper(textwrap.TextWrapper):
- def _handle_long_word(
- self,
- reversed_chunks: t.List[str],
- cur_line: t.List[str],
- cur_len: int,
- width: int,
- ) -> None:
- space_left = max(width - cur_len, 1)
-
- if self.break_long_words:
- last = reversed_chunks[-1]
- cut = last[:space_left]
- res = last[space_left:]
- cur_line.append(cut)
- reversed_chunks[-1] = res
- elif not cur_line:
- cur_line.append(reversed_chunks.pop())
-
- @contextmanager
- def extra_indent(self, indent: str) -> t.Iterator[None]:
- old_initial_indent = self.initial_indent
- old_subsequent_indent = self.subsequent_indent
- self.initial_indent += indent
- self.subsequent_indent += indent
-
- try:
- yield
- finally:
- self.initial_indent = old_initial_indent
- self.subsequent_indent = old_subsequent_indent
-
- def indent_only(self, text: str) -> str:
- rv = []
-
- for idx, line in enumerate(text.splitlines()):
- indent = self.initial_indent
-
- if idx > 0:
- indent = self.subsequent_indent
-
- rv.append(f"{indent}{line}")
-
- return "\n".join(rv)
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/libarchive.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/libarchive.py
deleted file mode 100644
index de862b111d8ffa5141c8ace34849193e105d6460..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/libarchive.py
+++ /dev/null
@@ -1,217 +0,0 @@
-from __future__ import absolute_import, division, print_function
-
-from contextlib import contextmanager
-from ctypes import (
- CFUNCTYPE,
- POINTER,
- c_int,
- c_longlong,
- c_void_p,
- cast,
- create_string_buffer,
-)
-
-import libarchive
-import libarchive.ffi as ffi
-
-from fsspec import open_files
-from fsspec.archive import AbstractArchiveFileSystem
-from fsspec.implementations.memory import MemoryFile
-from fsspec.utils import DEFAULT_BLOCK_SIZE
-
-# Libarchive requires seekable files or memory only for certain archive
-# types. However, since we read the directory first to cache the contents
-# and also allow random access to any file, the file-like object needs
-# to be seekable no matter what.
-
-# Seek call-backs (not provided in the libarchive python wrapper)
-SEEK_CALLBACK = CFUNCTYPE(c_longlong, c_int, c_void_p, c_longlong, c_int)
-read_set_seek_callback = ffi.ffi(
- "read_set_seek_callback", [ffi.c_archive_p, SEEK_CALLBACK], c_int, ffi.check_int
-)
-new_api = hasattr(ffi, "NO_OPEN_CB")
-
-
-@contextmanager
-def custom_reader(file, format_name="all", filter_name="all", block_size=ffi.page_size):
- """Read an archive from a seekable file-like object.
-
- The `file` object must support the standard `readinto` and 'seek' methods.
- """
- buf = create_string_buffer(block_size)
- buf_p = cast(buf, c_void_p)
-
- def read_func(archive_p, context, ptrptr):
- # readinto the buffer, returns number of bytes read
- length = file.readinto(buf)
- # write the address of the buffer into the pointer
- ptrptr = cast(ptrptr, POINTER(c_void_p))
- ptrptr[0] = buf_p
- # tell libarchive how much data was written into the buffer
- return length
-
- def seek_func(archive_p, context, offset, whence):
- file.seek(offset, whence)
- # tell libarchvie the current position
- return file.tell()
-
- read_cb = ffi.READ_CALLBACK(read_func)
- seek_cb = SEEK_CALLBACK(seek_func)
-
- if new_api:
- open_cb = ffi.NO_OPEN_CB
- close_cb = ffi.NO_CLOSE_CB
- else:
- open_cb = libarchive.read.OPEN_CALLBACK(ffi.VOID_CB)
- close_cb = libarchive.read.CLOSE_CALLBACK(ffi.VOID_CB)
-
- with libarchive.read.new_archive_read(format_name, filter_name) as archive_p:
- read_set_seek_callback(archive_p, seek_cb)
- ffi.read_open(archive_p, None, open_cb, read_cb, close_cb)
- yield libarchive.read.ArchiveRead(archive_p)
-
-
-class LibArchiveFileSystem(AbstractArchiveFileSystem):
- """Compressed archives as a file-system (read-only)
-
- Supports the following formats:
- tar, pax , cpio, ISO9660, zip, mtree, shar, ar, raw, xar, lha/lzh, rar
- Microsoft CAB, 7-Zip, WARC
-
- See the libarchive documentation for further restrictions.
- https://www.libarchive.org/
-
- Keeps file object open while instance lives. It only works in seekable
- file-like objects. In case the filesystem does not support this kind of
- file object, it is recommended to cache locally.
-
- This class is pickleable, but not necessarily thread-safe (depends on the
- platform). See libarchive documentation for details.
- """
-
- root_marker = ""
- protocol = "libarchive"
- cachable = False
-
- def __init__(
- self,
- fo="",
- mode="r",
- target_protocol=None,
- target_options=None,
- block_size=DEFAULT_BLOCK_SIZE,
- **kwargs,
- ):
- """
- Parameters
- ----------
- fo: str or file-like
- Contains ZIP, and must exist. If a str, will fetch file using
- :meth:`~fsspec.open_files`, which must return one file exactly.
- mode: str
- Currently, only 'r' accepted
- target_protocol: str (optional)
- If ``fo`` is a string, this value can be used to override the
- FS protocol inferred from a URL
- target_options: dict (optional)
- Kwargs passed when instantiating the target FS, if ``fo`` is
- a string.
- """
- super().__init__(self, **kwargs)
- if mode != "r":
- raise ValueError("Only read from archive files accepted")
- if isinstance(fo, str):
- files = open_files(fo, protocol=target_protocol, **(target_options or {}))
- if len(files) != 1:
- raise ValueError(
- 'Path "{}" did not resolve to exactly'
- 'one file: "{}"'.format(fo, files)
- )
- fo = files[0]
- self.of = fo
- self.fo = fo.__enter__() # the whole instance is a context
- self.block_size = block_size
- self.dir_cache = None
-
- @contextmanager
- def _open_archive(self):
- self.fo.seek(0)
- with custom_reader(self.fo, block_size=self.block_size) as arc:
- yield arc
-
- @classmethod
- def _strip_protocol(cls, path):
- # file paths are always relative to the archive root
- return super()._strip_protocol(path).lstrip("/")
-
- def _get_dirs(self):
- fields = {
- "name": "pathname",
- "size": "size",
- "created": "ctime",
- "mode": "mode",
- "uid": "uid",
- "gid": "gid",
- "mtime": "mtime",
- }
-
- if self.dir_cache is not None:
- return
-
- self.dir_cache = {}
- list_names = []
- with self._open_archive() as arc:
- for entry in arc:
- if not entry.isdir and not entry.isfile:
- # Skip symbolic links, fifo entries, etc.
- continue
- self.dir_cache.update(
- {
- dirname
- + "/": {"name": dirname + "/", "size": 0, "type": "directory"}
- for dirname in self._all_dirnames(set(entry.name))
- }
- )
- f = {key: getattr(entry, fields[key]) for key in fields}
- f["type"] = "directory" if entry.isdir else "file"
- list_names.append(entry.name)
-
- self.dir_cache[f["name"]] = f
- # libarchive does not seem to return an entry for the directories (at least
- # not in all formats), so get the directories names from the files names
- self.dir_cache.update(
- {
- dirname + "/": {"name": dirname + "/", "size": 0, "type": "directory"}
- for dirname in self._all_dirnames(list_names)
- }
- )
-
- def _open(
- self,
- path,
- mode="rb",
- block_size=None,
- autocommit=True,
- cache_options=None,
- **kwargs,
- ):
- path = self._strip_protocol(path)
- if mode != "rb":
- raise NotImplementedError
-
- data = bytes()
- with self._open_archive() as arc:
- for entry in arc:
- if entry.pathname != path:
- continue
-
- if entry.size == 0:
- # empty file, so there are no blocks
- break
-
- for block in entry.get_blocks(entry.size):
- data = block
- break
- else:
- raise ValueError
- return MemoryFile(fs=self, path=path, data=data)
diff --git a/spaces/DaleChen/AutoGPT/autogpt/commands/times.py b/spaces/DaleChen/AutoGPT/autogpt/commands/times.py
deleted file mode 100644
index 3c9b8a4fc67a251c9e81a8c4a725cd1e25fcbebe..0000000000000000000000000000000000000000
--- a/spaces/DaleChen/AutoGPT/autogpt/commands/times.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from datetime import datetime
-
-
-def get_datetime() -> str:
- """Return the current date and time
-
- Returns:
- str: The current date and time
- """
- return "Current date and time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S")
diff --git a/spaces/DanielPinsk/StableDiffusion/stablediffusionui_tom_riddle_edition.py b/spaces/DanielPinsk/StableDiffusion/stablediffusionui_tom_riddle_edition.py
deleted file mode 100644
index 0d725350c1b57fa1f9245256f3a837207ca5e600..0000000000000000000000000000000000000000
--- a/spaces/DanielPinsk/StableDiffusion/stablediffusionui_tom_riddle_edition.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# -*- coding: utf-8 -*-
-"""StableDiffusionUI_Tom_Riddle_Edition.ipynb
-
-Automatically generated by Colaboratory.
-
-Original file is located at
- https://colab.research.google.com/github/WASasquatch/StableDiffusionUI-TomRiddle/blob/main/StableDiffusionUI_Tom_Riddle_Edition.ipynb
-
-# StableDiffusionUI - Tom Riddle Edition  [](https://github.com/WASasquatch/StableDiffusionUI-TomRiddle)
-
-Adapted from: https://colab.research.google.com/drive/1AfAmwLMd_Vx33O9IwY2TmO9wKZ8ABRRa
-
-Cleaned up for font-end use by **WAS**
-
-## Stablity.AI Model Terms of Use
-
-**By using this Notebook, you agree to the following Terms of Use, and license**
-
-This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.
-
-The CreativeML OpenRAIL License specifies:
-1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content
-2. CompVis claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
-3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)
-
-Please read the full license here: https://huggingface.co/spaces/CompVis/stable-diffusion-license
-"""
-
-#@title Check GPU Allocation
-!nvidia-smi
-
-import os, subprocess, time
-#@title Setup Installations
-#@markdown Installation will restart the colab at the end. This is OK. Just run '**Launch WebUI**' when it's restarted.
-CLEAR_SETUP_LOG = True #@param{type:'boolean'}
-ALLOW_COLAB_RESTART = True #@param{type: 'boolean'}
-WD = '/content/stable-diffusion-webui'
-
-def clear():
- from IPython.display import clear_output; return clear_output()
-
-def fetch_bytes(url_or_path):
- if str(url_or_path).startswith('http://') or str(url_or_path).startswith('https://'):
- from urllib.request import urlopen
- return urlopen(url_or_path)
- return open(url_or_path, 'r')
-
-def packages():
- import sys, subprocess
- return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
-
-res = ''
-print('⏳ Installing Stable Diffusion WebUI ...')
-os.chdir('/content/')
-if not os.path.exists(WD):
- res += subprocess.run(['git', 'clone', '--quiet', 'https://github.com/AUTOMATIC1111/stable-diffusion-webui'], stdout=subprocess.PIPE).stdout.decode('utf-8')
- os.chdir(WD)
- res += subprocess.run(['pip', '-q', 'install', '-r', 'requirements.txt'], stdout=subprocess.PIPE).stdout.decode('utf-8')
-print(res)
-print('✅ Stable Diffusion WebUI installed.')
-
-#os.chdir(WD)
-if not os.path.exists('repositories'):
- os.makedirs('repositories')
-
-res = ''
-print('⏳ Cloning repositories...')
-if not os.path.exists(f'{WD}/repositories/stable-diffusion'):
- res += subprocess.run(['git', 'clone', '--quiet', 'https://github.com/CompVis/stable-diffusion.git', f'{WD}/repositories/stable-diffusion'], stdout=subprocess.PIPE).stdout.decode('utf-8')
-if not os.path.exists(f'{WD}/repositories/taming-transformers'):
- res += subprocess.run(['git', 'clone', '--quiet', 'https://github.com/CompVis/taming-transformers.git', f'{WD}/repositories/taming-transformers'], stdout=subprocess.PIPE).stdout.decode('utf-8')
-if not os.path.exists(f'{WD}/repositories/CodeFormer'):
- os.chdir(f'{WD}/repositories')
- res += subprocess.run(['git', 'clone', '--quiet', 'https://github.com/sczhou/CodeFormer.git'], stdout=subprocess.PIPE).stdout.decode('utf-8')
- res += subprocess.run(['pip', '-q', 'install', '-r', f'{WD}/repositories/CodeFormer/requirements.txt'], stdout=subprocess.PIPE).stdout.decode('utf-8')
-print(res)
-print('✅ Repositories cloned.')
-
-os.chdir(WD)
-
-# get a token from https://huggingface.co/settings/tokens
-try:
- with fetch_bytes('https://raw.githubusercontent.com/WASasquatch/easydiffusion/main/key.txt') as f:
- k = f.read().decode('utf-8').split(':'); hu = k[0].strip(); ht = k[1].strip()
-except OSError as e:
- raise e
-#user_token = "" #@param {type:"string"}
-print('⏳ Downloading model ...')
-if not os.path.exists('model.ckpt'):
- print(subprocess.run(['wget', '-q', '--show-progress', f'https://{hu}:{ht}@huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt', '-O', 'model.ckpt'], stdout=subprocess.PIPE).stdout.decode('utf-8'))
- if os.path.exists('model.ckpt'):
- print('✅ Model downloaded.')
- else:
- print('⚠️ Unable to download the model!')
-else:
- print('✅ Model downloaded.')
-
-if not os.path.exists('GFPGANv1.3.pth'):
- print(subprocess.run(['wget', '-q', '--show-progress', 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth', '-O', 'GFPGANv1.3.pth'], stdout=subprocess.PIPE).stdout.decode('utf-8'))
-
-os.chdir(WD)
-
-subprocess.run(['git', 'pull', '--quiet'], stdout=subprocess.DEVNULL)
-
-if CLEAR_SETUP_LOG:
- clear()
- print('♻️ Setup finished! Restarting Colab Environment in 5 seconds ...')
- time.sleep(5)
-
-if ALLOW_COLAB_RESTART:
- os.kill(os.getpid(), 9) # This will crash Colab (required, everything will still be intact so dont worry)
-
-"""Launch WebUI. You will get a link to nnn.gradio.app, follow it."""
-
-#@title Launch WebUI
-import sys, os
-
-WD = '/content/stable-diffusion-webui'
-sys.argv = ["--opt-split-attention"]
-os.chdir(WD)
-
-import webui
-import modules.ui
-import modules.txt2img
-import modules.img2img
-
-demo = modules.ui.create_ui(
- txt2img=webui.wrap_gradio_gpu_call(modules.txt2img.txt2img),
- img2img=webui.wrap_gradio_gpu_call(modules.img2img.img2img),
- run_extras=webui.wrap_gradio_gpu_call(modules.extras.run_extras),
- run_pnginfo=modules.extras.run_pnginfo
-)
-
-demo.launch(share=True)
\ No newline at end of file
diff --git a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/preprocessing/__init__.py b/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/preprocessing/__init__.py
deleted file mode 100644
index 11899be3b477996772cd1ae754815056f22ca205..0000000000000000000000000000000000000000
--- a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/preprocessing/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-"""
-@date: 2021/7/5
-@description:
-"""
diff --git a/spaces/DragGan/DragGan-Inversion/torch_utils/ops/filtered_lrelu.h b/spaces/DragGan/DragGan-Inversion/torch_utils/ops/filtered_lrelu.h
deleted file mode 100644
index 2c403e3f275f472315662321cad54dd0dbc56d00..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/torch_utils/ops/filtered_lrelu.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// NVIDIA CORPORATION and its licensors retain all intellectual property
-// and proprietary rights in and to this software, related documentation
-// and any modifications thereto. Any use, reproduction, disclosure or
-// distribution of this software and related documentation without an express
-// license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-#include
-
-//------------------------------------------------------------------------
-// CUDA kernel parameters.
-
-struct filtered_lrelu_kernel_params
-{
- // These parameters decide which kernel to use.
- int up; // upsampling ratio (1, 2, 4)
- int down; // downsampling ratio (1, 2, 4)
- int2 fuShape; // [size, 1] | [size, size]
- int2 fdShape; // [size, 1] | [size, size]
-
- int _dummy; // Alignment.
-
- // Rest of the parameters.
- const void* x; // Input tensor.
- void* y; // Output tensor.
- const void* b; // Bias tensor.
- unsigned char* s; // Sign tensor in/out. NULL if unused.
- const float* fu; // Upsampling filter.
- const float* fd; // Downsampling filter.
-
- int2 pad0; // Left/top padding.
- float gain; // Additional gain factor.
- float slope; // Leaky ReLU slope on negative side.
- float clamp; // Clamp after nonlinearity.
- int flip; // Filter kernel flip for gradient computation.
-
- int tilesXdim; // Original number of horizontal output tiles.
- int tilesXrep; // Number of horizontal tiles per CTA.
- int blockZofs; // Block z offset to support large minibatch, channel dimensions.
-
- int4 xShape; // [width, height, channel, batch]
- int4 yShape; // [width, height, channel, batch]
- int2 sShape; // [width, height] - width is in bytes. Contiguous. Zeros if unused.
- int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor.
- int swLimit; // Active width of sign tensor in bytes.
-
- longlong4 xStride; // Strides of all tensors except signs, same component order as shapes.
- longlong4 yStride; //
- int64_t bStride; //
- longlong3 fuStride; //
- longlong3 fdStride; //
-};
-
-struct filtered_lrelu_act_kernel_params
-{
- void* x; // Input/output, modified in-place.
- unsigned char* s; // Sign tensor in/out. NULL if unused.
-
- float gain; // Additional gain factor.
- float slope; // Leaky ReLU slope on negative side.
- float clamp; // Clamp after nonlinearity.
-
- int4 xShape; // [width, height, channel, batch]
- longlong4 xStride; // Input/output tensor strides, same order as in shape.
- int2 sShape; // [width, height] - width is in elements. Contiguous. Zeros if unused.
- int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor.
-};
-
-//------------------------------------------------------------------------
-// CUDA kernel specialization.
-
-struct filtered_lrelu_kernel_spec
-{
- void* setup; // Function for filter kernel setup.
- void* exec; // Function for main operation.
- int2 tileOut; // Width/height of launch tile.
- int numWarps; // Number of warps per thread block, determines launch block size.
- int xrep; // For processing multiple horizontal tiles per thread block.
- int dynamicSharedKB; // How much dynamic shared memory the exec kernel wants.
-};
-
-//------------------------------------------------------------------------
-// CUDA kernel selection.
-
-template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB);
-template void* choose_filtered_lrelu_act_kernel(void);
-template cudaError_t copy_filters(cudaStream_t stream);
-
-//------------------------------------------------------------------------
diff --git a/spaces/Epoching/GLIDE_Inpaint/glide_text2im/gaussian_diffusion.py b/spaces/Epoching/GLIDE_Inpaint/glide_text2im/gaussian_diffusion.py
deleted file mode 100644
index 1c0f97783e7a336390324516f2ba8e89d1dcfaf1..0000000000000000000000000000000000000000
--- a/spaces/Epoching/GLIDE_Inpaint/glide_text2im/gaussian_diffusion.py
+++ /dev/null
@@ -1,639 +0,0 @@
-"""
-Simplified from https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/gaussian_diffusion.py.
-"""
-
-import math
-
-import numpy as np
-import torch as th
-
-
-def _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac):
- betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)
- warmup_time = int(num_diffusion_timesteps * warmup_frac)
- betas[:warmup_time] = np.linspace(beta_start, beta_end, warmup_time, dtype=np.float64)
- return betas
-
-
-def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps):
- """
- This is the deprecated API for creating beta schedules.
-
- See get_named_beta_schedule() for the new library of schedules.
- """
- if beta_schedule == "quad":
- betas = (
- np.linspace(
- beta_start ** 0.5,
- beta_end ** 0.5,
- num_diffusion_timesteps,
- dtype=np.float64,
- )
- ** 2
- )
- elif beta_schedule == "linear":
- betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
- elif beta_schedule == "warmup10":
- betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.1)
- elif beta_schedule == "warmup50":
- betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.5)
- elif beta_schedule == "const":
- betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)
- elif beta_schedule == "jsd": # 1/T, 1/(T-1), 1/(T-2), ..., 1
- betas = 1.0 / np.linspace(
- num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64
- )
- else:
- raise NotImplementedError(beta_schedule)
- assert betas.shape == (num_diffusion_timesteps,)
- return betas
-
-
-def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
- """
- Get a pre-defined beta schedule for the given name.
-
- The beta schedule library consists of beta schedules which remain similar
- in the limit of num_diffusion_timesteps.
- Beta schedules may be added, but should not be removed or changed once
- they are committed to maintain backwards compatibility.
- """
- if schedule_name == "linear":
- # Linear schedule from Ho et al, extended to work for any number of
- # diffusion steps.
- scale = 1000 / num_diffusion_timesteps
- return get_beta_schedule(
- "linear",
- beta_start=scale * 0.0001,
- beta_end=scale * 0.02,
- num_diffusion_timesteps=num_diffusion_timesteps,
- )
- elif schedule_name == "squaredcos_cap_v2":
- return betas_for_alpha_bar(
- num_diffusion_timesteps,
- lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
- )
- else:
- raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
-
-
-def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
- """
- Create a beta schedule that discretizes the given alpha_t_bar function,
- which defines the cumulative product of (1-beta) over time from t = [0,1].
-
- :param num_diffusion_timesteps: the number of betas to produce.
- :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
- produces the cumulative product of (1-beta) up to that
- part of the diffusion process.
- :param max_beta: the maximum beta to use; use values lower than 1 to
- prevent singularities.
- """
- betas = []
- for i in range(num_diffusion_timesteps):
- t1 = i / num_diffusion_timesteps
- t2 = (i + 1) / num_diffusion_timesteps
- betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
- return np.array(betas)
-
-
-class GaussianDiffusion:
- """
- Utilities for training and sampling diffusion models.
-
- Original ported from this codebase:
- https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
-
- :param betas: a 1-D numpy array of betas for each diffusion timestep,
- starting at T and going to 1.
- """
-
- def __init__(
- self,
- *,
- betas,
- ):
- # Use float64 for accuracy.
- betas = np.array(betas, dtype=np.float64)
- self.betas = betas
- assert len(betas.shape) == 1, "betas must be 1-D"
- assert (betas > 0).all() and (betas <= 1).all()
-
- self.num_timesteps = int(betas.shape[0])
-
- alphas = 1.0 - betas
- self.alphas_cumprod = np.cumprod(alphas, axis=0)
- self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
- self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
- assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
-
- # calculations for diffusion q(x_t | x_{t-1}) and others
- self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
- self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
- self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
- self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
- self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
-
- # calculations for posterior q(x_{t-1} | x_t, x_0)
- self.posterior_variance = (
- betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
- )
- # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
- self.posterior_log_variance_clipped = np.log(
- np.append(self.posterior_variance[1], self.posterior_variance[1:])
- )
- self.posterior_mean_coef1 = (
- betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
- )
- self.posterior_mean_coef2 = (
- (1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - self.alphas_cumprod)
- )
-
- def q_mean_variance(self, x_start, t):
- """
- Get the distribution q(x_t | x_0).
-
- :param x_start: the [N x C x ...] tensor of noiseless inputs.
- :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
- :return: A tuple (mean, variance, log_variance), all of x_start's shape.
- """
- mean = _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
- variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
- log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
- return mean, variance, log_variance
-
- def q_sample(self, x_start, t, noise=None):
- """
- Diffuse the data for a given number of diffusion steps.
-
- In other words, sample from q(x_t | x_0).
-
- :param x_start: the initial data batch.
- :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
- :param noise: if specified, the split-out normal noise.
- :return: A noisy version of x_start.
- """
- if noise is None:
- noise = th.randn_like(x_start)
- assert noise.shape == x_start.shape
- return (
- _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
- + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
- )
-
- def q_posterior_mean_variance(self, x_start, x_t, t):
- """
- Compute the mean and variance of the diffusion posterior:
-
- q(x_{t-1} | x_t, x_0)
-
- """
- assert x_start.shape == x_t.shape
- posterior_mean = (
- _extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
- + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
- )
- posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
- posterior_log_variance_clipped = _extract_into_tensor(
- self.posterior_log_variance_clipped, t, x_t.shape
- )
- assert (
- posterior_mean.shape[0]
- == posterior_variance.shape[0]
- == posterior_log_variance_clipped.shape[0]
- == x_start.shape[0]
- )
- return posterior_mean, posterior_variance, posterior_log_variance_clipped
-
- def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
- """
- Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
- the initial x, x_0.
-
- :param model: the model, which takes a signal and a batch of timesteps
- as input.
- :param x: the [N x C x ...] tensor at time t.
- :param t: a 1-D Tensor of timesteps.
- :param clip_denoised: if True, clip the denoised signal into [-1, 1].
- :param denoised_fn: if not None, a function which applies to the
- x_start prediction before it is used to sample. Applies before
- clip_denoised.
- :param model_kwargs: if not None, a dict of extra keyword arguments to
- pass to the model. This can be used for conditioning.
- :return: a dict with the following keys:
- - 'mean': the model mean output.
- - 'variance': the model variance output.
- - 'log_variance': the log of 'variance'.
- - 'pred_xstart': the prediction for x_0.
- """
- if model_kwargs is None:
- model_kwargs = {}
-
- B, C = x.shape[:2]
- assert t.shape == (B,)
- model_output = model(x, t, **model_kwargs)
- if isinstance(model_output, tuple):
- model_output, extra = model_output
- else:
- extra = None
-
- assert model_output.shape == (B, C * 2, *x.shape[2:])
- model_output, model_var_values = th.split(model_output, C, dim=1)
- min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape)
- max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
- # The model_var_values is [-1, 1] for [min_var, max_var].
- frac = (model_var_values + 1) / 2
- model_log_variance = frac * max_log + (1 - frac) * min_log
- model_variance = th.exp(model_log_variance)
-
- def process_xstart(x):
- if denoised_fn is not None:
- x = denoised_fn(x)
- if clip_denoised:
- return x.clamp(-1, 1)
- return x
-
- pred_xstart = process_xstart(self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output))
- model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)
-
- assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
- return {
- "mean": model_mean,
- "variance": model_variance,
- "log_variance": model_log_variance,
- "pred_xstart": pred_xstart,
- "extra": extra,
- }
-
- def _predict_xstart_from_eps(self, x_t, t, eps):
- assert x_t.shape == eps.shape
- return (
- _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
- )
-
- def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
- return (
- _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart
- ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
-
- def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
- """
- Compute the mean for the previous step, given a function cond_fn that
- computes the gradient of a conditional log probability with respect to
- x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
- condition on y.
-
- This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
- """
- gradient = cond_fn(x, t, **model_kwargs)
- new_mean = p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
- return new_mean
-
- def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
- """
- Compute what the p_mean_variance output would have been, should the
- model's score function be conditioned by cond_fn.
-
- See condition_mean() for details on cond_fn.
-
- Unlike condition_mean(), this instead uses the conditioning strategy
- from Song et al (2020).
- """
- alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
-
- eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
- eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, **model_kwargs)
-
- out = p_mean_var.copy()
- out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
- out["mean"], _, _ = self.q_posterior_mean_variance(x_start=out["pred_xstart"], x_t=x, t=t)
- return out
-
- def p_sample(
- self,
- model,
- x,
- t,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- ):
- """
- Sample x_{t-1} from the model at the given timestep.
-
- :param model: the model to sample from.
- :param x: the current tensor at x_{t-1}.
- :param t: the value of t, starting at 0 for the first diffusion step.
- :param clip_denoised: if True, clip the x_start prediction to [-1, 1].
- :param denoised_fn: if not None, a function which applies to the
- x_start prediction before it is used to sample.
- :param cond_fn: if not None, this is a gradient function that acts
- similarly to the model.
- :param model_kwargs: if not None, a dict of extra keyword arguments to
- pass to the model. This can be used for conditioning.
- :return: a dict containing the following keys:
- - 'sample': a random sample from the model.
- - 'pred_xstart': a prediction of x_0.
- """
- out = self.p_mean_variance(
- model,
- x,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- model_kwargs=model_kwargs,
- )
- noise = th.randn_like(x)
- nonzero_mask = (
- (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
- ) # no noise when t == 0
- if cond_fn is not None:
- out["mean"] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs)
- sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
- return {"sample": sample, "pred_xstart": out["pred_xstart"]}
-
- def p_sample_loop(
- self,
- model,
- shape,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=False,
- ):
- """
- Generate samples from the model.
-
- :param model: the model module.
- :param shape: the shape of the samples, (N, C, H, W).
- :param noise: if specified, the noise from the encoder to sample.
- Should be of the same shape as `shape`.
- :param clip_denoised: if True, clip x_start predictions to [-1, 1].
- :param denoised_fn: if not None, a function which applies to the
- x_start prediction before it is used to sample.
- :param cond_fn: if not None, this is a gradient function that acts
- similarly to the model.
- :param model_kwargs: if not None, a dict of extra keyword arguments to
- pass to the model. This can be used for conditioning.
- :param device: if specified, the device to create the samples on.
- If not specified, use a model parameter's device.
- :param progress: if True, show a tqdm progress bar.
- :return: a non-differentiable batch of samples.
- """
- final = None
- for sample in self.p_sample_loop_progressive(
- model,
- shape,
- noise=noise,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- device=device,
- progress=progress,
- ):
- final = sample
- return final["sample"]
-
- def p_sample_loop_progressive(
- self,
- model,
- shape,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=False,
- ):
- """
- Generate samples from the model and yield intermediate samples from
- each timestep of diffusion.
-
- Arguments are the same as p_sample_loop().
- Returns a generator over dicts, where each dict is the return value of
- p_sample().
- """
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- if noise is not None:
- img = noise
- else:
- img = th.randn(*shape, device=device)
- indices = list(range(self.num_timesteps))[::-1]
-
- if progress:
- # Lazy import so that we don't depend on tqdm.
- from tqdm.auto import tqdm
-
- indices = tqdm(indices)
-
- for i in indices:
- t = th.tensor([i] * shape[0], device=device)
- with th.no_grad():
- out = self.p_sample(
- model,
- img,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- )
- yield out
- img = out["sample"]
-
- def ddim_sample(
- self,
- model,
- x,
- t,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- eta=0.0,
- ):
- """
- Sample x_{t-1} from the model using DDIM.
-
- Same usage as p_sample().
- """
- out = self.p_mean_variance(
- model,
- x,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- model_kwargs=model_kwargs,
- )
- if cond_fn is not None:
- out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
-
- # Usually our model outputs epsilon, but we re-derive it
- # in case we used x_start or x_prev prediction.
- eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
-
- alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
- alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
- sigma = (
- eta
- * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
- * th.sqrt(1 - alpha_bar / alpha_bar_prev)
- )
- # Equation 12.
- noise = th.randn_like(x)
- mean_pred = (
- out["pred_xstart"] * th.sqrt(alpha_bar_prev)
- + th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
- )
- nonzero_mask = (
- (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
- ) # no noise when t == 0
- sample = mean_pred + nonzero_mask * sigma * noise
- return {"sample": sample, "pred_xstart": out["pred_xstart"]}
-
- def ddim_reverse_sample(
- self,
- model,
- x,
- t,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- eta=0.0,
- ):
- """
- Sample x_{t+1} from the model using DDIM reverse ODE.
- """
- assert eta == 0.0, "Reverse ODE only for deterministic path"
- out = self.p_mean_variance(
- model,
- x,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- model_kwargs=model_kwargs,
- )
- if cond_fn is not None:
- out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
- # Usually our model outputs epsilon, but we re-derive it
- # in case we used x_start or x_prev prediction.
- eps = (
- _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- - out["pred_xstart"]
- ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
- alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
-
- # Equation 12. reversed
- mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_next) + th.sqrt(1 - alpha_bar_next) * eps
-
- return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
-
- def ddim_sample_loop(
- self,
- model,
- shape,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=False,
- eta=0.0,
- ):
- """
- Generate samples from the model using DDIM.
-
- Same usage as p_sample_loop().
- """
- final = None
- for sample in self.ddim_sample_loop_progressive(
- model,
- shape,
- noise=noise,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- device=device,
- progress=progress,
- eta=eta,
- ):
- final = sample
- return final["sample"]
-
- def ddim_sample_loop_progressive(
- self,
- model,
- shape,
- noise=None,
- clip_denoised=True,
- denoised_fn=None,
- cond_fn=None,
- model_kwargs=None,
- device=None,
- progress=False,
- eta=0.0,
- ):
- """
- Use DDIM to sample from the model and yield intermediate samples from
- each timestep of DDIM.
-
- Same usage as p_sample_loop_progressive().
- """
- if device is None:
- device = next(model.parameters()).device
- assert isinstance(shape, (tuple, list))
- if noise is not None:
- img = noise
- else:
- img = th.randn(*shape, device=device)
- indices = list(range(self.num_timesteps))[::-1]
-
- if progress:
- # Lazy import so that we don't depend on tqdm.
- from tqdm.auto import tqdm
-
- indices = tqdm(indices)
-
- for i in indices:
- t = th.tensor([i] * shape[0], device=device)
- with th.no_grad():
- out = self.ddim_sample(
- model,
- img,
- t,
- clip_denoised=clip_denoised,
- denoised_fn=denoised_fn,
- cond_fn=cond_fn,
- model_kwargs=model_kwargs,
- eta=eta,
- )
- yield out
- img = out["sample"]
-
-
-def _extract_into_tensor(arr, timesteps, broadcast_shape):
- """
- Extract values from a 1-D numpy array for a batch of indices.
-
- :param arr: the 1-D numpy array.
- :param timesteps: a tensor of indices into the array to extract.
- :param broadcast_shape: a larger shape of K dimensions with the batch
- dimension equal to the length of timesteps.
- :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
- """
- res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
- while len(res.shape) < len(broadcast_shape):
- res = res[..., None]
- return res + th.zeros(broadcast_shape, device=timesteps.device)
diff --git a/spaces/EuroPython2022/BayesCap/src/app.py b/spaces/EuroPython2022/BayesCap/src/app.py
deleted file mode 100644
index c023d500800237aca29dfa2145b8d45d8c1c4ed7..0000000000000000000000000000000000000000
--- a/spaces/EuroPython2022/BayesCap/src/app.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import numpy as np
-import random
-import matplotlib.pyplot as plt
-from matplotlib import cm
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torchvision.models as models
-from torch.utils.data import Dataset, DataLoader
-from torchvision import transforms
-from torchvision.transforms.functional import InterpolationMode as IMode
-
-from PIL import Image
-
-from ds import *
-from losses import *
-from networks_SRGAN import *
-from utils import *
-
-
-NetG = Generator()
-model_parameters = filter(lambda p: True, NetG.parameters())
-params = sum([np.prod(p.size()) for p in model_parameters])
-print("Number of Parameters:",params)
-NetC = BayesCap(in_channels=3, out_channels=3)
-
-
-NetG = Generator()
-NetG.load_state_dict(torch.load('../ckpt/srgan-ImageNet-bc347d67.pth', map_location='cuda:0'))
-NetG.to('cuda')
-NetG.eval()
-
-NetC = BayesCap(in_channels=3, out_channels=3)
-NetC.load_state_dict(torch.load('../ckpt/BayesCap_SRGAN_best.pth', map_location='cuda:0'))
-NetC.to('cuda')
-NetC.eval()
-
-def tensor01_to_pil(xt):
- r = transforms.ToPILImage(mode='RGB')(xt.squeeze())
- return r
-
-
-def predict(img):
- """
- img: image
- """
- image_size = (256,256)
- upscale_factor = 4
- lr_transforms = transforms.Resize((image_size[0]//upscale_factor, image_size[1]//upscale_factor), interpolation=IMode.BICUBIC, antialias=True)
- # lr_transforms = transforms.Resize((128, 128), interpolation=IMode.BICUBIC, antialias=True)
-
- img = Image.fromarray(np.array(img))
- img = lr_transforms(img)
- lr_tensor = utils.image2tensor(img, range_norm=False, half=False)
-
- device = 'cuda'
- dtype = torch.cuda.FloatTensor
- xLR = lr_tensor.to(device).unsqueeze(0)
- xLR = xLR.type(dtype)
- # pass them through the network
- with torch.no_grad():
- xSR = NetG(xLR)
- xSRC_mu, xSRC_alpha, xSRC_beta = NetC(xSR)
-
- a_map = (1/(xSRC_alpha[0] + 1e-5)).to('cpu').data
- b_map = xSRC_beta[0].to('cpu').data
- u_map = (a_map**2)*(torch.exp(torch.lgamma(3/(b_map + 1e-2)))/torch.exp(torch.lgamma(1/(b_map + 1e-2))))
-
-
- x_LR = tensor01_to_pil(xLR.to('cpu').data.clip(0,1).transpose(0,2).transpose(0,1))
-
- x_mean = tensor01_to_pil(xSR.to('cpu').data.clip(0,1).transpose(0,2).transpose(0,1))
-
- #im = Image.fromarray(np.uint8(cm.gist_earth(myarray)*255))
-
- a_map = torch.clamp(a_map, min=0, max=0.1)
- a_map = (a_map - a_map.min())/(a_map.max() - a_map.min())
- x_alpha = Image.fromarray(np.uint8(cm.inferno(a_map.transpose(0,2).transpose(0,1).squeeze())*255))
-
- b_map = torch.clamp(b_map, min=0.45, max=0.75)
- b_map = (b_map - b_map.min())/(b_map.max() - b_map.min())
- x_beta = Image.fromarray(np.uint8(cm.cividis(b_map.transpose(0,2).transpose(0,1).squeeze())*255))
-
- u_map = torch.clamp(u_map, min=0, max=0.15)
- u_map = (u_map - u_map.min())/(u_map.max() - u_map.min())
- x_uncer = Image.fromarray(np.uint8(cm.hot(u_map.transpose(0,2).transpose(0,1).squeeze())*255))
-
- return x_LR, x_mean, x_alpha, x_beta, x_uncer
-
-import gradio as gr
-
-title = "BayesCap"
-description = "BayesCap: Bayesian Identity Cap for Calibrated Uncertainty in Frozen Neural Networks (ECCV 2022)"
-article = "
BayesCap: Bayesian Identity Cap for Calibrated Uncertainty in Frozen Neural Networks| Github Repo
"
-
-
-gr.Interface(
- fn=predict,
- inputs=gr.inputs.Image(type='pil', label="Orignal"),
- outputs=[
- gr.outputs.Image(type='pil', label="Low-res"),
- gr.outputs.Image(type='pil', label="Super-res"),
- gr.outputs.Image(type='pil', label="Alpha"),
- gr.outputs.Image(type='pil', label="Beta"),
- gr.outputs.Image(type='pil', label="Uncertainty")
- ],
- title=title,
- description=description,
- article=article,
- examples=[
- ["../demo_examples/baby.png"],
- ["../demo_examples/bird.png"]
- ]
-).launch(share=True)
\ No newline at end of file
diff --git "a/spaces/Fengbinbin/gpt-academic/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" "b/spaces/Fengbinbin/gpt-academic/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py"
deleted file mode 100644
index efada619a6fe121cba28a18f92b3c4a0de4c88bc..0000000000000000000000000000000000000000
--- "a/spaces/Fengbinbin/gpt-academic/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py"
+++ /dev/null
@@ -1,175 +0,0 @@
-from toolbox import update_ui
-from toolbox import CatchException, report_execption, write_results_to_file
-fast_debug = False
-
-class PaperFileGroup():
- def __init__(self):
- self.file_paths = []
- self.file_contents = []
- self.sp_file_contents = []
- self.sp_file_index = []
- self.sp_file_tag = []
-
- # count_token
- from request_llm.bridge_all import model_info
- enc = model_info["gpt-3.5-turbo"]['tokenizer']
- def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
- self.get_token_num = get_token_num
-
- def run_file_split(self, max_token_limit=1900):
- """
- 将长文本分离开来
- """
- for index, file_content in enumerate(self.file_contents):
- if self.get_token_num(file_content) < max_token_limit:
- self.sp_file_contents.append(file_content)
- self.sp_file_index.append(index)
- self.sp_file_tag.append(self.file_paths[index])
- else:
- from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
- segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
- for j, segment in enumerate(segments):
- self.sp_file_contents.append(segment)
- self.sp_file_index.append(index)
- self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
-
- print('Segmentation: done')
-
-def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
- import time, os, re
- from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
-
- # <-------- 读取Latex文件,删除其中的所有注释 ---------->
- pfg = PaperFileGroup()
-
- for index, fp in enumerate(file_manifest):
- with open(fp, 'r', encoding='utf-8', errors='replace') as f:
- file_content = f.read()
- # 定义注释的正则表达式
- comment_pattern = r'%.*'
- # 使用正则表达式查找注释,并替换为空字符串
- clean_tex_content = re.sub(comment_pattern, '', file_content)
- # 记录删除注释后的文本
- pfg.file_paths.append(fp)
- pfg.file_contents.append(clean_tex_content)
-
- # <-------- 拆分过长的latex文件 ---------->
- pfg.run_file_split(max_token_limit=1024)
- n_split = len(pfg.sp_file_contents)
-
- # <-------- 抽取摘要 ---------->
- # if language == 'en':
- # abs_extract_inputs = f"Please write an abstract for this paper"
-
- # # 单线,获取文章meta信息
- # paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(
- # inputs=abs_extract_inputs,
- # inputs_show_user=f"正在抽取摘要信息。",
- # llm_kwargs=llm_kwargs,
- # chatbot=chatbot, history=[],
- # sys_prompt="Your job is to collect information from materials。",
- # )
-
- # <-------- 多线程润色开始 ---------->
- if language == 'en->zh':
- inputs_array = ["Below is a section from an English academic paper, translate it into Chinese, do not modify any latex command such as \section, \cite and equations:" +
- f"\n\n{frag}" for frag in pfg.sp_file_contents]
- inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
- sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
- elif language == 'zh->en':
- inputs_array = [f"Below is a section from a Chinese academic paper, translate it into English, do not modify any latex command such as \section, \cite and equations:" +
- f"\n\n{frag}" for frag in pfg.sp_file_contents]
- inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
- sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
-
- gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
- inputs_array=inputs_array,
- inputs_show_user_array=inputs_show_user_array,
- llm_kwargs=llm_kwargs,
- chatbot=chatbot,
- history_array=[[""] for _ in range(n_split)],
- sys_prompt_array=sys_prompt_array,
- # max_workers=5, # OpenAI所允许的最大并行过载
- scroller_max_len = 80
- )
-
- # <-------- 整理结果,退出 ---------->
- create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
- res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
- history = gpt_response_collection
- chatbot.append((f"{fp}完成了吗?", res))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
-
-
-
-
-@CatchException
-def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- # 基本信息:功能、贡献者
- chatbot.append([
- "函数插件功能?",
- "对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
- try:
- import tiktoken
- except:
- report_execption(chatbot, history,
- a=f"解析项目: {txt}",
- b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- history = [] # 清空历史,以免输入溢出
- import glob, os
- if os.path.exists(txt):
- project_folder = txt
- else:
- if txt == "": txt = '空空如也的输入栏'
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
- if len(file_manifest) == 0:
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
-
-
-
-
-
-@CatchException
-def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- # 基本信息:功能、贡献者
- chatbot.append([
- "函数插件功能?",
- "对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
- try:
- import tiktoken
- except:
- report_execption(chatbot, history,
- a=f"解析项目: {txt}",
- b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- history = [] # 清空历史,以免输入溢出
- import glob, os
- if os.path.exists(txt):
- project_folder = txt
- else:
- if txt == "": txt = '空空如也的输入栏'
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
- if len(file_manifest) == 0:
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
\ No newline at end of file
diff --git a/spaces/FlippFuzz/whisper-webui/src/segments.py b/spaces/FlippFuzz/whisper-webui/src/segments.py
deleted file mode 100644
index ec2650dceade5d0b2022264f6419115eab085aea..0000000000000000000000000000000000000000
--- a/spaces/FlippFuzz/whisper-webui/src/segments.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from typing import Any, Dict, List
-
-import copy
-
-def merge_timestamps(timestamps: List[Dict[str, Any]], merge_window: float = 5, max_merge_size: float = 30, padding_left: float = 1, padding_right: float = 1):
- result = []
-
- if len(timestamps) == 0:
- return result
- if max_merge_size is None:
- return timestamps
-
- if padding_left is None:
- padding_left = 0
- if padding_right is None:
- padding_right = 0
-
- processed_time = 0
- current_segment = None
-
- for i in range(len(timestamps)):
- next_segment = timestamps[i]
-
- delta = next_segment['start'] - processed_time
-
- # Note that segments can still be longer than the max merge size, they just won't be merged in that case
- if current_segment is None or (merge_window is not None and delta > merge_window) \
- or next_segment['end'] - current_segment['start'] > max_merge_size:
- # Finish the current segment
- if current_segment is not None:
- # Add right padding
- finish_padding = min(padding_right, delta / 2) if delta < padding_left + padding_right else padding_right
- current_segment['end'] += finish_padding
- delta -= finish_padding
-
- result.append(current_segment)
-
- # Start a new segment
- current_segment = copy.deepcopy(next_segment)
-
- # Pad the segment
- current_segment['start'] = current_segment['start'] - min(padding_left, delta)
- processed_time = current_segment['end']
-
- else:
- # Merge the segment
- current_segment['end'] = next_segment['end']
- processed_time = current_segment['end']
-
- # Add the last segment
- if current_segment is not None:
- current_segment['end'] += padding_right
- result.append(current_segment)
-
- return result
\ No newline at end of file
diff --git a/spaces/FoxMeo/fire-detector/utils/aws/resume.py b/spaces/FoxMeo/fire-detector/utils/aws/resume.py
deleted file mode 100644
index 338685b19c19ddb47aa2fde22a535a8efcf17802..0000000000000000000000000000000000000000
--- a/spaces/FoxMeo/fire-detector/utils/aws/resume.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Resume all interrupted trainings in yolor/ dir including DDP trainings
-# Usage: $ python utils/aws/resume.py
-
-import os
-import sys
-from pathlib import Path
-
-import torch
-import yaml
-
-sys.path.append('./') # to run '$ python *.py' files in subdirectories
-
-port = 0 # --master_port
-path = Path('').resolve()
-for last in path.rglob('*/**/last.pt'):
- ckpt = torch.load(last)
- if ckpt['optimizer'] is None:
- continue
-
- # Load opt.yaml
- with open(last.parent.parent / 'opt.yaml') as f:
- opt = yaml.load(f, Loader=yaml.SafeLoader)
-
- # Get device count
- d = opt['device'].split(',') # devices
- nd = len(d) # number of devices
- ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
-
- if ddp: # multi-GPU
- port += 1
- cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
- else: # single-GPU
- cmd = f'python train.py --resume {last}'
-
- cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
- print(cmd)
- os.system(cmd)
diff --git a/spaces/Frederick/Clause_Segmentation_and_Classification/app.py b/spaces/Frederick/Clause_Segmentation_and_Classification/app.py
deleted file mode 100644
index 3a245175e8c068fc747f93c18e3c2f9eb8df00f1..0000000000000000000000000000000000000000
--- a/spaces/Frederick/Clause_Segmentation_and_Classification/app.py
+++ /dev/null
@@ -1,248 +0,0 @@
-import torch
-import numpy as np
-import sys
-import os
-from transformers import RobertaTokenizer, AutoModelForTokenClassification, RobertaForSequenceClassification
-import spacy
-import tokenizations
-from numpy import asarray
-from numpy import savetxt, loadtxt
-import numpy as np
-import json
-from copy import deepcopy
-# from sty import fg, bg, ef, rs, RgbBg, Style
-import re
-from tqdm import tqdm
-import gradio as gr
-from matplotlib import pyplot as plt
-import seaborn as sns
-
-os.system("python -m spacy download en_core_web_sm")
-nlp = spacy.load("en_core_web_sm")
-tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
-clause_model = AutoModelForTokenClassification.from_pretrained("./clause_model_512", num_labels=3)
-classification_model = RobertaForSequenceClassification.from_pretrained("./classfication_model", num_labels=18)
-
-
-labels2attrs = {
- "##BOUNDED EVENT (SPECIFIC)": ("specific", "dynamic", "episodic"),
- "##BOUNDED EVENT (GENERIC)": ("generic", "dynamic", "episodic"),
- "##UNBOUNDED EVENT (SPECIFIC)": ("specific", "dynamic", "static"), # This should be (static, or habitual)
- "##UNBOUNDED EVENT (GENERIC)": ("generic", "dynamic", "static"),
- "##BASIC STATE": ("specific", "stative", "static"),
- "##COERCED STATE (SPECIFIC)": ("specific", "dynamic", "static"),
- "##COERCED STATE (GENERIC)": ("generic", "dynamic", "static"),
- "##PERFECT COERCED STATE (SPECIFIC)": ("specific", "dynamic", "episodic"),
- "##PERFECT COERCED STATE (GENERIC)": ("generic", "dynamic", "episodic"),
- "##GENERIC SENTENCE (DYNAMIC)": ("generic", "dynamic", "habitual"), # habitual count as unbounded
- "##GENERIC SENTENCE (STATIC)": ("generic", "stative", "static"), # The car is red now (static)
- "##GENERIC SENTENCE (HABITUAL)": ("generic", "stative", "habitual"), # I go to the gym regularly (habitual)
- "##GENERALIZING SENTENCE (DYNAMIC)": ("specific", "dynamic", "habitual"),
- "##GENERALIZING SENTENCE (STATIVE)": ("specific", "stative", "habitual"),
- "##QUESTION": ("NA", "NA", "NA"),
- "##IMPERATIVE": ("NA", "NA", "NA"),
- "##NONSENSE": ("NA", "NA", "NA"),
- "##OTHER": ("NA", "NA", "NA"),
-}
-
-label2index = {l:i for l,i in zip(labels2attrs.keys(), np.arange(len(labels2attrs)))}
-index2label = {i:l for l,i in label2index.items()}
-
-def auto_split(text):
- doc = nlp(text)
- current_len = 0
- snippets = []
- current_snippet = ""
- for sent in doc.sents:
- text = sent.text
- words = text.split()
- if current_len + len(words) > 200:
- snippets.append(current_snippet)
- current_snippet = text
- current_len = len(words)
- else:
- current_snippet += " " + text
- current_len += len(words)
- snippets.append(current_snippet) # the leftover part.
- return snippets
-
-
-def majority_vote(array):
- unique, counts = np.unique(np.array(array), return_counts=True)
- return unique[np.argmax(counts)]
-
-def get_pred_clause_labels(text, words):
- model_inputs = tokenizer(text, padding='max_length', max_length=512, truncation=True, return_tensors='pt')
- roberta_tokens = (tokenizer.convert_ids_to_tokens(model_inputs['input_ids'][0]))
- a2b, b2a = tokenizations.get_alignments(words, roberta_tokens)
- logits = clause_model(**model_inputs)[0]
- tagging = logits.argmax(-1)[0].numpy()
- pred_labels = []
- for aligment in a2b: # spacy token index to roberta_token index
- if len(aligment) == 0: pred_labels.append(1)
- elif len(aligment) == 1: pred_labels.append(tagging[aligment[0]])
- else:
- pred_labels.append(majority_vote([tagging[a] for a in aligment]))
- assert len(pred_labels) == len(words)
- return pred_labels
-
-def seg_clause(text):
- words = text.strip().split()
- labels = get_pred_clause_labels(text, words)
- segmented_clauses = []
- prev_label = 2
- current_clause = None
- for cur_token, cur_label in zip(words, labels):
- if prev_label == 2: current_clause = []
- if current_clause != None: current_clause.append(cur_token)
-
- if cur_label == 2:
- if prev_label in [0, 1]:
- segmented_clauses.append(deepcopy(current_clause)) ## 0 1 1 1 1 2 0 1 1
- current_clause = None
- prev_label = cur_label
-
- if current_clause is not None and len(current_clause) != 0: # append leftover
- segmented_clauses.append(deepcopy(current_clause))
- return [" ".join(clause) for clause in segmented_clauses if clause is not None]
-
-# def pretty_print_segmented_clause(segmented_clauses):
-# np.random.seed(42)
-# bg.orange = Style(RgbBg(255, 150, 50))
-# bg.purple = Style(RgbBg(180, 130, 225))
-# colors = [bg.red, bg.orange, bg.yellow, bg.green, bg.blue, bg.purple]
-# prev_color = 0
-# to_print = []
-# for cl in segmented_clauses:
-# color_choice = np.random.choice(np.delete(np.arange(len(colors)), prev_color))
-# prev_color = color_choice
-# colored_cl = colors[color_choice] + cl + bg.rs
-# to_print.append(colored_cl)
-# print(*to_print, sep=" ")
-
-
-def get_pred_classification_labels(clauses, batch_size=32):
- clause2labels = []
- for i in range(0, len(clauses), batch_size):
- batch_examples = clauses[i : i + batch_size]
- model_inputs = tokenizer(batch_examples, padding='max_length', max_length=128, truncation=True, return_tensors='pt')
- logits = classification_model(**model_inputs)[0]
- pred_labels = logits.argmax(-1).numpy()
- pred_labels = [index2label[l] for l in pred_labels]
- clause2labels.extend([(s, labels2attrs[l],) for s,l in zip(batch_examples, pred_labels)])
- return clause2labels
-
-def label_visualization(clause2labels):
- total_clauses = len(clause2labels)
- aspect_labels, genericity_labels, boundedness_labels = [], [], []
- for _, labels in clause2labels:
- labels = tuple(labels)
- print(labels)
-
- genericity_label = labels[0]
- aspect_label = labels[1]
- boundedness_label = labels[2]
- aspect_labels.append(aspect_label)
- genericity_labels.append(genericity_label)
- boundedness_labels.append(boundedness_label)
- aspect_dict = {"Dynamic": aspect_labels.count("dynamic"), "Stative": aspect_labels.count("stative"), "NA": aspect_labels.count("NA")}
- genericity_dict = {"Generic": genericity_labels.count("generic"), "Specific": genericity_labels.count("specific"), "NA": genericity_labels.count("NA")}
- boundedness_dict = {"Static": boundedness_labels.count("static"), "Episodic": boundedness_labels.count("episodic"), "Habitual": boundedness_labels.count("habitual"), "NA": boundedness_labels.count("NA")}
- print(aspect_dict, genericity_dict, boundedness_dict)
- fig, axs = plt.subplots(1, 3, figsize=(10, 6,))
- fig.tight_layout(pad=5.0)
- dict_aspect = {k : float(v / total_clauses) for k, v in aspect_dict.items() if v != 0}
- dict_genericity = {k : float(v / total_clauses) for k, v in genericity_dict.items() if v != 0}
- dict_boundedness = {k : float(v / total_clauses) for k, v in boundedness_dict.items() if v != 0}
- print(dict_aspect)
- print(list(dict_aspect.values()), len(dict_aspect.keys()), list(dict_aspect.keys()))
- axs[0].pie(list(dict_aspect.values()), colors = sns.color_palette('pastel')[0:len(dict_aspect.keys())],
- labels=dict_aspect.keys(), autopct='%.0f%%', normalize=True )
- axs[0].set_title("Aspect")
- axs[1].pie(list(dict_genericity.values()), colors = sns.color_palette('pastel')[3: 3 + len(dict_genericity.keys())],
- labels=dict_genericity.keys(), autopct='%.0f%%', normalize=True)
- axs[1].set_title("Genericity")
- axs[2].pie(list(dict_boundedness.values()), colors = sns.color_palette('pastel')[6: 6 + len(dict_boundedness.keys())],
- labels=dict_boundedness.keys(), autopct='%.0f%%', normalize=True)
- axs[2].set_title("Boundedness")
- return fig
-
-def run_pipeline(text):
- snippets = auto_split(text)
- print(snippets)
- all_clauses = []
- for s in snippets:
- segmented_clauses = seg_clause(s)
- all_clauses.extend(segmented_clauses)
-
- clause2labels = get_pred_classification_labels(all_clauses)
- output_clauses = [(c, str(i + 1)) for i, c in enumerate(all_clauses)]
- figure = label_visualization(clause2labels)
- clause2labels = [(k,str(v),) for k, v in clause2labels]
- return output_clauses, clause2labels, figure
-
-
-# with open("pipeline_outputs.jsonl", "w") as fw:
-# with open("all_text.txt", "r") as f:
-# lines = f.readlines()
-# print(f"Totally detected {len(lines)} documents.")
-# for text in tqdm(lines):
-# snippets = auto_split(text)
-# all_clauses = []
-# for s in snippets:
-# segmented_clauses = seg_clause(s)
-# all_clauses.extend(segmented_clauses)
-# # pretty_print_segmented_clause(segmented_clauses)
-
-# clause2labels = get_pred_classification_labels(all_clauses)
-# json.dump(clause2labels, fw)
-# fw.write("\n")
-
-color_panel_1 = ["red", "green", "yellow", "DodgerBlue", "orange", "DarkSalmon", "pink", "cyan", "gold", "aqua", "violet"]
-index_colormap = {str(i) : color_panel_1[i % len(color_panel_1)] for i in np.arange(1, 100000)}
-color_panel_2 = ["Gray", "DodgerBlue", "Wheat", "OliveDrab", "DarkKhaki", "DarkSalmon", "Orange", "Gold", "Aqua", "Tomato", "Violet"]
-str_attrs = sorted([str(v) for v in set(labels2attrs.values())])
-# print(str_attrs, len(str_attrs), len(color_panel_2))
-assert len(str_attrs) == len(color_panel_2)
-attr_colormap = {a:c for a, c in zip(str_attrs, color_panel_2)}
-# attr_colormap = {
-# ("specific", "dynamic", "episodic"):
-# ("generic", "dynamic", "episodic"):
-# ("specific", "dynamic", "static"):
-# ("generic", "dynamic", "static"):
-# ("specific", "stative", "static"):
-# ("specific", "dynamic", "static"):
-# ("generic", "dynamic", "static"):
-# ("specific", "dynamic", "episodic"):
-# ("generic", "dynamic", "episodic"):
-# ("generic", "dynamic", "habitual"):
-# ("generic", "stative", "static"):
-# ("generic", "stative", "habitual"):
-# ("specific", "dynamic", "habitual"):
-# ("specific", "stative", "habitual"):
-# ("NA", "NA", "NA"):
-# }
-
-
-demo = gr.Interface(
- fn=run_pipeline,
- inputs=["text"],
- outputs= [
- gr.HighlightedText(
- label="Clause Segmentation",
- show_label=True,
- combine_adjacent=False,
- ).style(color_map = index_colormap),
-
- gr.HighlightedText(
- label="Attribute Classification",
- show_label=True,
- show_legend=True,
- combine_adjacent=False,
- ).style(color_map=attr_colormap),
-
- gr.Plot(),
- ]
-)
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/GeorgeOrville/bingo/src/components/providers.tsx b/spaces/GeorgeOrville/bingo/src/components/providers.tsx
deleted file mode 100644
index 892226412d80fe0b05211911b9e245cd22876460..0000000000000000000000000000000000000000
--- a/spaces/GeorgeOrville/bingo/src/components/providers.tsx
+++ /dev/null
@@ -1,15 +0,0 @@
-'use client'
-
-import * as React from 'react'
-import { ThemeProvider as NextThemesProvider } from 'next-themes'
-import { ThemeProviderProps } from 'next-themes/dist/types'
-
-import { TooltipProvider } from '@/components/ui/tooltip'
-
-export function Providers({ children, ...props }: ThemeProviderProps) {
- return (
-
- {children}
-
- )
-}
diff --git a/spaces/GeorgeOrville/bingo/src/components/ui/sheet.tsx b/spaces/GeorgeOrville/bingo/src/components/ui/sheet.tsx
deleted file mode 100644
index c9f5ce0f81a91067bb013e988a07eb1e6bf6953b..0000000000000000000000000000000000000000
--- a/spaces/GeorgeOrville/bingo/src/components/ui/sheet.tsx
+++ /dev/null
@@ -1,122 +0,0 @@
-'use client'
-
-import * as React from 'react'
-import * as SheetPrimitive from '@radix-ui/react-dialog'
-
-import { cn } from '@/lib/utils'
-import { IconClose } from '@/components/ui/icons'
-
-const Sheet = SheetPrimitive.Root
-
-const SheetTrigger = SheetPrimitive.Trigger
-
-const SheetClose = SheetPrimitive.Close
-
-const SheetPortal = ({
- className,
- children,
- ...props
-}: SheetPrimitive.DialogPortalProps) => (
-
- {children}
-
-)
-SheetPortal.displayName = SheetPrimitive.Portal.displayName
-
-const SheetOverlay = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, children, ...props }, ref) => (
-
-))
-SheetOverlay.displayName = SheetPrimitive.Overlay.displayName
-
-const SheetContent = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, children, ...props }, ref) => (
-
-
- {children}
-
-
- Close
-
-
-
-))
-SheetContent.displayName = SheetPrimitive.Content.displayName
-
-const SheetHeader = ({
- className,
- ...props
-}: React.HTMLAttributes) => (
-
-)
-SheetHeader.displayName = 'SheetHeader'
-
-const SheetFooter = ({
- className,
- ...props
-}: React.HTMLAttributes) => (
-
-)
-SheetFooter.displayName = 'SheetFooter'
-
-const SheetTitle = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-SheetTitle.displayName = SheetPrimitive.Title.displayName
-
-const SheetDescription = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-SheetDescription.displayName = SheetPrimitive.Description.displayName
-
-export {
- Sheet,
- SheetTrigger,
- SheetClose,
- SheetContent,
- SheetHeader,
- SheetFooter,
- SheetTitle,
- SheetDescription
-}
diff --git a/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/data/tools/kalign.py b/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/data/tools/kalign.py
deleted file mode 100644
index fc4e58a43205c138b7f29c07f39a87ea741d2656..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/data/tools/kalign.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright 2021 DeepMind Technologies Limited
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""A Python wrapper for Kalign."""
-import os
-import subprocess
-from typing import Sequence
-
-from absl import logging
-
-from alphafold.data.tools import utils
-# Internal import (7716).
-
-
-def _to_a3m(sequences: Sequence[str]) -> str:
- """Converts sequences to an a3m file."""
- names = ['sequence %d' % i for i in range(1, len(sequences) + 1)]
- a3m = []
- for sequence, name in zip(sequences, names):
- a3m.append(u'>' + name + u'\n')
- a3m.append(sequence + u'\n')
- return ''.join(a3m)
-
-
-class Kalign:
- """Python wrapper of the Kalign binary."""
-
- def __init__(self, *, binary_path: str):
- """Initializes the Python Kalign wrapper.
-
- Args:
- binary_path: The path to the Kalign binary.
-
- Raises:
- RuntimeError: If Kalign binary not found within the path.
- """
- self.binary_path = binary_path
-
- def align(self, sequences: Sequence[str]) -> str:
- """Aligns the sequences and returns the alignment in A3M string.
-
- Args:
- sequences: A list of query sequence strings. The sequences have to be at
- least 6 residues long (Kalign requires this). Note that the order in
- which you give the sequences might alter the output slightly as
- different alignment tree might get constructed.
-
- Returns:
- A string with the alignment in a3m format.
-
- Raises:
- RuntimeError: If Kalign fails.
- ValueError: If any of the sequences is less than 6 residues long.
- """
- logging.info('Aligning %d sequences', len(sequences))
-
- for s in sequences:
- if len(s) < 6:
- raise ValueError('Kalign requires all sequences to be at least 6 '
- 'residues long. Got %s (%d residues).' % (s, len(s)))
-
- with utils.tmpdir_manager(base_dir='/tmp') as query_tmp_dir:
- input_fasta_path = os.path.join(query_tmp_dir, 'input.fasta')
- output_a3m_path = os.path.join(query_tmp_dir, 'output.a3m')
-
- with open(input_fasta_path, 'w') as f:
- f.write(_to_a3m(sequences))
-
- cmd = [
- self.binary_path,
- '-i', input_fasta_path,
- '-o', output_a3m_path,
- '-format', 'fasta',
- ]
-
- logging.info('Launching subprocess "%s"', ' '.join(cmd))
- process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
-
- with utils.timing('Kalign query'):
- stdout, stderr = process.communicate()
- retcode = process.wait()
- logging.info('Kalign stdout:\n%s\n\nstderr:\n%s\n',
- stdout.decode('utf-8'), stderr.decode('utf-8'))
-
- if retcode:
- raise RuntimeError('Kalign failed\nstdout:\n%s\n\nstderr:\n%s\n'
- % (stdout.decode('utf-8'), stderr.decode('utf-8')))
-
- with open(output_a3m_path) as f:
- a3m = f.read()
-
- return a3m
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/models/gcnet_r50-d8.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/models/gcnet_r50-d8.py
deleted file mode 100644
index 3d2ad69f5c22adfe79d5fdabf920217628987166..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/models/gcnet_r50-d8.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- type='EncoderDecoder',
- pretrained='open-mmlab://resnet50_v1c',
- backbone=dict(
- type='ResNetV1c',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- dilations=(1, 1, 2, 4),
- strides=(1, 2, 1, 1),
- norm_cfg=norm_cfg,
- norm_eval=False,
- style='pytorch',
- contract_dilation=True),
- decode_head=dict(
- type='GCHead',
- in_channels=2048,
- in_index=3,
- channels=512,
- ratio=1 / 4.,
- pooling_type='att',
- fusion_types=('channel_add', ),
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
- auxiliary_head=dict(
- type='FCNHead',
- in_channels=1024,
- in_index=2,
- channels=256,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='whole'))
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/aspp_head.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/aspp_head.py
deleted file mode 100644
index 6332ab120ceb040f18ab962c74a97dd23bb17caa..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/aspp_head.py
+++ /dev/null
@@ -1,107 +0,0 @@
-import torch
-import torch.nn as nn
-from mmcv.cnn import ConvModule
-
-from mmseg.ops import resize
-from ..builder import HEADS
-from .decode_head import BaseDecodeHead
-
-
-class ASPPModule(nn.ModuleList):
- """Atrous Spatial Pyramid Pooling (ASPP) Module.
-
- Args:
- dilations (tuple[int]): Dilation rate of each layer.
- in_channels (int): Input channels.
- channels (int): Channels after modules, before conv_seg.
- conv_cfg (dict|None): Config of conv layers.
- norm_cfg (dict|None): Config of norm layers.
- act_cfg (dict): Config of activation layers.
- """
-
- def __init__(self, dilations, in_channels, channels, conv_cfg, norm_cfg,
- act_cfg):
- super(ASPPModule, self).__init__()
- self.dilations = dilations
- self.in_channels = in_channels
- self.channels = channels
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- self.act_cfg = act_cfg
- for dilation in dilations:
- self.append(
- ConvModule(
- self.in_channels,
- self.channels,
- 1 if dilation == 1 else 3,
- dilation=dilation,
- padding=0 if dilation == 1 else dilation,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg))
-
- def forward(self, x):
- """Forward function."""
- aspp_outs = []
- for aspp_module in self:
- aspp_outs.append(aspp_module(x))
-
- return aspp_outs
-
-
-@HEADS.register_module()
-class ASPPHead(BaseDecodeHead):
- """Rethinking Atrous Convolution for Semantic Image Segmentation.
-
- This head is the implementation of `DeepLabV3
- `_.
-
- Args:
- dilations (tuple[int]): Dilation rates for ASPP module.
- Default: (1, 6, 12, 18).
- """
-
- def __init__(self, dilations=(1, 6, 12, 18), **kwargs):
- super(ASPPHead, self).__init__(**kwargs)
- assert isinstance(dilations, (list, tuple))
- self.dilations = dilations
- self.image_pool = nn.Sequential(
- nn.AdaptiveAvgPool2d(1),
- ConvModule(
- self.in_channels,
- self.channels,
- 1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg))
- self.aspp_modules = ASPPModule(
- dilations,
- self.in_channels,
- self.channels,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
- self.bottleneck = ConvModule(
- (len(dilations) + 1) * self.channels,
- self.channels,
- 3,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
-
- def forward(self, inputs):
- """Forward function."""
- x = self._transform_inputs(inputs)
- aspp_outs = [
- resize(
- self.image_pool(x),
- size=x.size()[2:],
- mode='bilinear',
- align_corners=self.align_corners)
- ]
- aspp_outs.extend(self.aspp_modules(x))
- aspp_outs = torch.cat(aspp_outs, dim=1)
- output = self.bottleneck(aspp_outs)
- output = self.cls_seg(output)
- return output
diff --git a/spaces/Grezz/generate_human_motion/VQ-Trans/utils/paramUtil.py b/spaces/Grezz/generate_human_motion/VQ-Trans/utils/paramUtil.py
deleted file mode 100644
index a9f1708b85ca80a9051cb3675cec9b999a0d0e2b..0000000000000000000000000000000000000000
--- a/spaces/Grezz/generate_human_motion/VQ-Trans/utils/paramUtil.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import numpy as np
-
-# Define a kinematic tree for the skeletal struture
-kit_kinematic_chain = [[0, 11, 12, 13, 14, 15], [0, 16, 17, 18, 19, 20], [0, 1, 2, 3, 4], [3, 5, 6, 7], [3, 8, 9, 10]]
-
-kit_raw_offsets = np.array(
- [
- [0, 0, 0],
- [0, 1, 0],
- [0, 1, 0],
- [0, 1, 0],
- [0, 1, 0],
- [1, 0, 0],
- [0, -1, 0],
- [0, -1, 0],
- [-1, 0, 0],
- [0, -1, 0],
- [0, -1, 0],
- [1, 0, 0],
- [0, -1, 0],
- [0, -1, 0],
- [0, 0, 1],
- [0, 0, 1],
- [-1, 0, 0],
- [0, -1, 0],
- [0, -1, 0],
- [0, 0, 1],
- [0, 0, 1]
- ]
-)
-
-t2m_raw_offsets = np.array([[0,0,0],
- [1,0,0],
- [-1,0,0],
- [0,1,0],
- [0,-1,0],
- [0,-1,0],
- [0,1,0],
- [0,-1,0],
- [0,-1,0],
- [0,1,0],
- [0,0,1],
- [0,0,1],
- [0,1,0],
- [1,0,0],
- [-1,0,0],
- [0,0,1],
- [0,-1,0],
- [0,-1,0],
- [0,-1,0],
- [0,-1,0],
- [0,-1,0],
- [0,-1,0]])
-
-t2m_kinematic_chain = [[0, 2, 5, 8, 11], [0, 1, 4, 7, 10], [0, 3, 6, 9, 12, 15], [9, 14, 17, 19, 21], [9, 13, 16, 18, 20]]
-t2m_left_hand_chain = [[20, 22, 23, 24], [20, 34, 35, 36], [20, 25, 26, 27], [20, 31, 32, 33], [20, 28, 29, 30]]
-t2m_right_hand_chain = [[21, 43, 44, 45], [21, 46, 47, 48], [21, 40, 41, 42], [21, 37, 38, 39], [21, 49, 50, 51]]
-
-
-kit_tgt_skel_id = '03950'
-
-t2m_tgt_skel_id = '000021'
-
diff --git a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/utils/model_builder.py b/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/utils/model_builder.py
deleted file mode 100644
index 1152fea39ad52c5d5dda0bd5e5d926b4940fd682..0000000000000000000000000000000000000000
--- a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/utils/model_builder.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# --------------------------------------------------------
-# Based on timm and MAE-priv code bases
-# https://github.com/rwightman/pytorch-image-models/tree/master/timm
-# https://github.com/BUPT-PRIV/MAE-priv
-# --------------------------------------------------------
-
-from .registry import is_model_in_modules, model_entrypoint
-
-
-def split_model_name(model_name):
- model_split = model_name.split(':', 1)
- if len(model_split) == 1:
- return '', model_split[0]
- else:
- source_name, model_name = model_split
- assert source_name in ('timm', 'hf_hub')
- return source_name, model_name
-
-
-def safe_model_name(model_name, remove_source=True):
- def make_safe(name):
- return ''.join(c if c.isalnum() else '_' for c in name).rstrip('_')
-
- if remove_source:
- model_name = split_model_name(model_name)[-1]
- return make_safe(model_name)
-
-
-def create_model(
- model_name,
- pretrained=False,
- checkpoint_path='',
- scriptable=None,
- exportable=None,
- no_jit=None,
- **kwargs):
- """Create a model
-
- Args:
- model_name (str): name of model to instantiate
- pretrained (bool): load pretrained ImageNet-1k weights if true
- checkpoint_path (str): path of checkpoint to load after model is initialized
- scriptable (bool): set layer config so that model is jit scriptable (not working for all models yet)
- exportable (bool): set layer config so that model is traceable / ONNX exportable (not fully impl/obeyed yet)
- no_jit (bool): set layer config so that model doesn't utilize jit scripted layers (so far activations only)
-
- Keyword Args:
- drop_rate (float): dropout rate for training (default: 0.0)
- global_pool (str): global pool type (default: 'avg')
- **: other kwargs are model specific
- """
- source_name, model_name = split_model_name(model_name)
-
- # Only EfficientNet and MobileNetV3 models have support for batchnorm params or drop_connect_rate passed as args
- is_efficientnet = is_model_in_modules(model_name, ['efficientnet', 'mobilenetv3'])
- if not is_efficientnet:
- kwargs.pop('bn_tf', None)
- kwargs.pop('bn_momentum', None)
- kwargs.pop('bn_eps', None)
-
- # handle backwards compat with drop_connect -> drop_path change
- drop_connect_rate = kwargs.pop('drop_connect_rate', None)
- if drop_connect_rate is not None and kwargs.get('drop_path_rate', None) is None:
- print("WARNING: 'drop_connect' as an argument is deprecated, please use 'drop_path'."
- " Setting drop_path to %f." % drop_connect_rate)
- kwargs['drop_path_rate'] = drop_connect_rate
-
- # Parameters that aren't supported by all models or are intended to only override model defaults if set
- # should default to None in command line args/cfg. Remove them if they are present and not set so that
- # non-supporting models don't break and default args remain in effect.
- kwargs = {k: v for k, v in kwargs.items()}
-
- create_fn = model_entrypoint(model_name)
- model = create_fn(**kwargs)
-
- return model
diff --git a/spaces/HaoFeng2019/DocGeoNet/seg.py b/spaces/HaoFeng2019/DocGeoNet/seg.py
deleted file mode 100644
index eea31f965a38a37e9208d3b0fcb3bdaf166a634a..0000000000000000000000000000000000000000
--- a/spaces/HaoFeng2019/DocGeoNet/seg.py
+++ /dev/null
@@ -1,567 +0,0 @@
-import torch
-import torch.nn as nn
-from torchvision import models
-import torch.nn.functional as F
-import numpy as np
-
-
-class sobel_net(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv_opx = nn.Conv2d(1, 1, 3, bias=False)
- self.conv_opy = nn.Conv2d(1, 1, 3, bias=False)
- sobel_kernelx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype='float32').reshape((1, 1, 3, 3))
- sobel_kernely = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype='float32').reshape((1, 1, 3, 3))
- self.conv_opx.weight.data = torch.from_numpy(sobel_kernelx)
- self.conv_opy.weight.data = torch.from_numpy(sobel_kernely)
-
- for p in self.parameters():
- p.requires_grad = False
-
- def forward(self, im): # input rgb
- x = (0.299 * im[:, 0, :, :] + 0.587 * im[:, 1, :, :] + 0.114 * im[:, 2, :, :]).unsqueeze(1) # rgb2gray
- gradx = self.conv_opx(x)
- grady = self.conv_opy(x)
-
- x = (gradx ** 2 + grady ** 2) ** 0.5
- x = (x - x.min()) / (x.max() - x.min())
- x = F.pad(x, (1, 1, 1, 1))
-
- x = torch.cat([im, x], dim=1)
- return x
-
-
-class REBNCONV(nn.Module):
- def __init__(self, in_ch=3, out_ch=3, dirate=1):
- super(REBNCONV, self).__init__()
-
- self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate)
- self.bn_s1 = nn.BatchNorm2d(out_ch)
- self.relu_s1 = nn.ReLU(inplace=True)
-
- def forward(self, x):
- hx = x
- xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))
-
- return xout
-
-
-## upsample tensor 'src' to have the same spatial size with tensor 'tar'
-def _upsample_like(src, tar):
- src = F.interpolate(src, size=tar.shape[2:], mode='bilinear', align_corners=False)
-
- return src
-
-
-### RSU-7 ###
-class RSU7(nn.Module): # UNet07DRES(nn.Module):
-
- def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
- super(RSU7, self).__init__()
-
- self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
-
- self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
- self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
- self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
- self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
- self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1)
- self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=1)
-
- self.rebnconv7 = REBNCONV(mid_ch, mid_ch, dirate=2)
-
- self.rebnconv6d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
- self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
- self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
- self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
- self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
- self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)
-
- def forward(self, x):
- hx = x
- hxin = self.rebnconvin(hx)
-
- hx1 = self.rebnconv1(hxin)
- hx = self.pool1(hx1)
-
- hx2 = self.rebnconv2(hx)
- hx = self.pool2(hx2)
-
- hx3 = self.rebnconv3(hx)
- hx = self.pool3(hx3)
-
- hx4 = self.rebnconv4(hx)
- hx = self.pool4(hx4)
-
- hx5 = self.rebnconv5(hx)
- hx = self.pool5(hx5)
-
- hx6 = self.rebnconv6(hx)
-
- hx7 = self.rebnconv7(hx6)
-
- hx6d = self.rebnconv6d(torch.cat((hx7, hx6), 1))
- hx6dup = _upsample_like(hx6d, hx5)
-
- hx5d = self.rebnconv5d(torch.cat((hx6dup, hx5), 1))
- hx5dup = _upsample_like(hx5d, hx4)
-
- hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1))
- hx4dup = _upsample_like(hx4d, hx3)
-
- hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
- hx3dup = _upsample_like(hx3d, hx2)
-
- hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
- hx2dup = _upsample_like(hx2d, hx1)
-
- hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
-
- return hx1d + hxin
-
-
-### RSU-6 ###
-class RSU6(nn.Module): # UNet06DRES(nn.Module):
-
- def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
- super(RSU6, self).__init__()
-
- self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
-
- self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
- self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
- self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
- self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
- self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1)
-
- self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=2)
-
- self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
- self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
- self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
- self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
- self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)
-
- def forward(self, x):
- hx = x
-
- hxin = self.rebnconvin(hx)
-
- hx1 = self.rebnconv1(hxin)
- hx = self.pool1(hx1)
-
- hx2 = self.rebnconv2(hx)
- hx = self.pool2(hx2)
-
- hx3 = self.rebnconv3(hx)
- hx = self.pool3(hx3)
-
- hx4 = self.rebnconv4(hx)
- hx = self.pool4(hx4)
-
- hx5 = self.rebnconv5(hx)
-
- hx6 = self.rebnconv6(hx5)
-
- hx5d = self.rebnconv5d(torch.cat((hx6, hx5), 1))
- hx5dup = _upsample_like(hx5d, hx4)
-
- hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1))
- hx4dup = _upsample_like(hx4d, hx3)
-
- hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
- hx3dup = _upsample_like(hx3d, hx2)
-
- hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
- hx2dup = _upsample_like(hx2d, hx1)
-
- hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
-
- return hx1d + hxin
-
-
-### RSU-5 ###
-class RSU5(nn.Module): # UNet05DRES(nn.Module):
-
- def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
- super(RSU5, self).__init__()
-
- self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
-
- self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
- self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
- self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
- self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
-
- self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=2)
-
- self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
- self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
- self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
- self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)
-
- def forward(self, x):
- hx = x
-
- hxin = self.rebnconvin(hx)
-
- hx1 = self.rebnconv1(hxin)
- hx = self.pool1(hx1)
-
- hx2 = self.rebnconv2(hx)
- hx = self.pool2(hx2)
-
- hx3 = self.rebnconv3(hx)
- hx = self.pool3(hx3)
-
- hx4 = self.rebnconv4(hx)
-
- hx5 = self.rebnconv5(hx4)
-
- hx4d = self.rebnconv4d(torch.cat((hx5, hx4), 1))
- hx4dup = _upsample_like(hx4d, hx3)
-
- hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
- hx3dup = _upsample_like(hx3d, hx2)
-
- hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
- hx2dup = _upsample_like(hx2d, hx1)
-
- hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
-
- return hx1d + hxin
-
-
-### RSU-4 ###
-class RSU4(nn.Module): # UNet04DRES(nn.Module):
-
- def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
- super(RSU4, self).__init__()
-
- self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
-
- self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
- self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
- self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
-
- self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=2)
-
- self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
- self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
- self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)
-
- def forward(self, x):
- hx = x
-
- hxin = self.rebnconvin(hx)
-
- hx1 = self.rebnconv1(hxin)
- hx = self.pool1(hx1)
-
- hx2 = self.rebnconv2(hx)
- hx = self.pool2(hx2)
-
- hx3 = self.rebnconv3(hx)
-
- hx4 = self.rebnconv4(hx3)
-
- hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1))
- hx3dup = _upsample_like(hx3d, hx2)
-
- hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
- hx2dup = _upsample_like(hx2d, hx1)
-
- hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
-
- return hx1d + hxin
-
-
-### RSU-4F ###
-class RSU4F(nn.Module): # UNet04FRES(nn.Module):
-
- def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
- super(RSU4F, self).__init__()
-
- self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
-
- self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
- self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=2)
- self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=4)
-
- self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=8)
-
- self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=4)
- self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=2)
- self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)
-
- def forward(self, x):
- hx = x
-
- hxin = self.rebnconvin(hx)
-
- hx1 = self.rebnconv1(hxin)
- hx2 = self.rebnconv2(hx1)
- hx3 = self.rebnconv3(hx2)
-
- hx4 = self.rebnconv4(hx3)
-
- hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1))
- hx2d = self.rebnconv2d(torch.cat((hx3d, hx2), 1))
- hx1d = self.rebnconv1d(torch.cat((hx2d, hx1), 1))
-
- return hx1d + hxin
-
-
-##### U^2-Net ####
-class U2NET(nn.Module):
-
- def __init__(self, in_ch=3, out_ch=1):
- super(U2NET, self).__init__()
- self.edge = sobel_net()
-
- self.stage1 = RSU7(in_ch, 32, 64)
- self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.stage2 = RSU6(64, 32, 128)
- self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.stage3 = RSU5(128, 64, 256)
- self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.stage4 = RSU4(256, 128, 512)
- self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.stage5 = RSU4F(512, 256, 512)
- self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.stage6 = RSU4F(512, 256, 512)
-
- # decoder
- self.stage5d = RSU4F(1024, 256, 512)
- self.stage4d = RSU4(1024, 128, 256)
- self.stage3d = RSU5(512, 64, 128)
- self.stage2d = RSU6(256, 32, 64)
- self.stage1d = RSU7(128, 16, 64)
-
- self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)
- self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)
- self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)
- self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)
- self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)
- self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)
-
- self.outconv = nn.Conv2d(6, out_ch, 1)
-
- def forward(self, x):
- x = self.edge(x)
- hx = x
-
- # stage 1
- hx1 = self.stage1(hx)
- hx = self.pool12(hx1)
-
- # stage 2
- hx2 = self.stage2(hx)
- hx = self.pool23(hx2)
-
- # stage 3
- hx3 = self.stage3(hx)
- hx = self.pool34(hx3)
-
- # stage 4
- hx4 = self.stage4(hx)
- hx = self.pool45(hx4)
-
- # stage 5
- hx5 = self.stage5(hx)
- hx = self.pool56(hx5)
-
- # stage 6
- hx6 = self.stage6(hx)
- hx6up = _upsample_like(hx6, hx5)
-
- # -------------------- decoder --------------------
- hx5d = self.stage5d(torch.cat((hx6up, hx5), 1))
- hx5dup = _upsample_like(hx5d, hx4)
-
- hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1))
- hx4dup = _upsample_like(hx4d, hx3)
-
- hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1))
- hx3dup = _upsample_like(hx3d, hx2)
-
- hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1))
- hx2dup = _upsample_like(hx2d, hx1)
-
- hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1))
-
- # side output
- d1 = self.side1(hx1d)
-
- d2 = self.side2(hx2d)
- d2 = _upsample_like(d2, d1)
-
- d3 = self.side3(hx3d)
- d3 = _upsample_like(d3, d1)
-
- d4 = self.side4(hx4d)
- d4 = _upsample_like(d4, d1)
-
- d5 = self.side5(hx5d)
- d5 = _upsample_like(d5, d1)
-
- d6 = self.side6(hx6)
- d6 = _upsample_like(d6, d1)
-
- d0 = self.outconv(torch.cat((d1, d2, d3, d4, d5, d6), 1))
-
- return torch.sigmoid(d0), torch.sigmoid(d1), torch.sigmoid(d2), torch.sigmoid(d3), torch.sigmoid(
- d4), torch.sigmoid(d5), torch.sigmoid(d6)
-
-
-### U^2-Net small ###
-class U2NETP(nn.Module):
-
- def __init__(self, in_ch=3, out_ch=1):
- super(U2NETP, self).__init__()
-
- self.stage1 = RSU7(in_ch, 16, 64)
- self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.stage2 = RSU6(64, 16, 64)
- self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.stage3 = RSU5(64, 16, 64)
- self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.stage4 = RSU4(64, 16, 64)
- self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.stage5 = RSU4F(64, 16, 64)
- self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.stage6 = RSU4F(64, 16, 64)
-
- # decoder
- self.stage5d = RSU4F(128, 16, 64)
- self.stage4d = RSU4(128, 16, 64)
- self.stage3d = RSU5(128, 16, 64)
- self.stage2d = RSU6(128, 16, 64)
- self.stage1d = RSU7(128, 16, 64)
-
- self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)
- self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)
- self.side3 = nn.Conv2d(64, out_ch, 3, padding=1)
- self.side4 = nn.Conv2d(64, out_ch, 3, padding=1)
- self.side5 = nn.Conv2d(64, out_ch, 3, padding=1)
- self.side6 = nn.Conv2d(64, out_ch, 3, padding=1)
-
- self.outconv = nn.Conv2d(6, out_ch, 1)
-
- def forward(self, x):
- hx = x
-
- # stage 1
- hx1 = self.stage1(hx)
- hx = self.pool12(hx1)
-
- # stage 2
- hx2 = self.stage2(hx)
- hx = self.pool23(hx2)
-
- # stage 3
- hx3 = self.stage3(hx)
- hx = self.pool34(hx3)
-
- # stage 4
- hx4 = self.stage4(hx)
- hx = self.pool45(hx4)
-
- # stage 5
- hx5 = self.stage5(hx)
- hx = self.pool56(hx5)
-
- # stage 6
- hx6 = self.stage6(hx)
- hx6up = _upsample_like(hx6, hx5)
-
- # decoder
- hx5d = self.stage5d(torch.cat((hx6up, hx5), 1))
- hx5dup = _upsample_like(hx5d, hx4)
-
- hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1))
- hx4dup = _upsample_like(hx4d, hx3)
-
- hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1))
- hx3dup = _upsample_like(hx3d, hx2)
-
- hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1))
- hx2dup = _upsample_like(hx2d, hx1)
-
- hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1))
-
- # side output
- d1 = self.side1(hx1d)
-
- d2 = self.side2(hx2d)
- d2 = _upsample_like(d2, d1)
-
- d3 = self.side3(hx3d)
- d3 = _upsample_like(d3, d1)
-
- d4 = self.side4(hx4d)
- d4 = _upsample_like(d4, d1)
-
- d5 = self.side5(hx5d)
- d5 = _upsample_like(d5, d1)
-
- d6 = self.side6(hx6)
- d6 = _upsample_like(d6, d1)
-
- d0 = self.outconv(torch.cat((d1, d2, d3, d4, d5, d6), 1))
-
- return torch.sigmoid(d0), torch.sigmoid(d1), torch.sigmoid(d2), torch.sigmoid(d3), torch.sigmoid(
- d4), torch.sigmoid(d5), torch.sigmoid(d6)
-
-
-def get_parameter_number(net):
- total_num = sum(p.numel() for p in net.parameters())
- trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)
- return {'Total': total_num, 'Trainable': trainable_num}
-
-
-if __name__ == '__main__':
- net = U2NET(4, 1)#.cuda()
- print(get_parameter_number(net)) # 69090500 加attention后69442032
- with torch.no_grad():
- inputs = torch.zeros(1, 3, 256, 256)#.cuda()
- outs = net(inputs)
- print(outs[0].shape) # torch.Size([2, 3, 256, 256]) torch.Size([2, 2, 256, 256])
diff --git a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/tts_infer/__init__.py b/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/tts_infer/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/HuggingFaceH4/open_llm_leaderboard/Makefile b/spaces/HuggingFaceH4/open_llm_leaderboard/Makefile
deleted file mode 100644
index b5685772804c8af4235a8504dc6752bfc9ae5d1d..0000000000000000000000000000000000000000
--- a/spaces/HuggingFaceH4/open_llm_leaderboard/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-.PHONY: style format
-
-
-style:
- python -m black --line-length 119 .
- python -m isort .
- ruff check --fix .
-
-
-quality:
- python -m black --check --line-length 119 .
- python -m isort --check-only .
- ruff check .
diff --git a/spaces/HuggingFaceH4/reward-modeling-chat-ui/utils.py b/spaces/HuggingFaceH4/reward-modeling-chat-ui/utils.py
deleted file mode 100644
index 01e46f8c0a8f941495266a81ebd183c05a2f9950..0000000000000000000000000000000000000000
--- a/spaces/HuggingFaceH4/reward-modeling-chat-ui/utils.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import re
-
-
-def wrap_html_code(text):
- pattern = r"<.*?>"
- matches = re.findall(pattern, text)
- if len(matches) > 0:
- return f"```{text}```"
- else:
- return text
-
-
-def get_full_text(response):
- output = ""
- for resp in response:
- if resp.token.special:
- continue
- output += resp.token.text
- return output
diff --git a/spaces/HugoDzz/super-godot-galaxy/build/_app/immutable/nodes/2.ae94ff6d.js b/spaces/HugoDzz/super-godot-galaxy/build/_app/immutable/nodes/2.ae94ff6d.js
deleted file mode 100644
index 8e6915c41e6214769062c620a0a684995ad1ee7e..0000000000000000000000000000000000000000
--- a/spaces/HugoDzz/super-godot-galaxy/build/_app/immutable/nodes/2.ae94ff6d.js
+++ /dev/null
@@ -1,25 +0,0 @@
-import{S as Zr,i as Jr,s as ti,k as U,l as Y,m as G,h as I,n as A,p as ke,b as ie,J as rn,H as Ar,K as nn,a as bt,q as _t,c as wt,r as ct,L as ei,G as D,g as Ce,f as sn,d as Ze,M as an,I as on,o as un,y as fn,z as ln,A as hn,B as _n,N as cn,O as dn,u as pn,v as mn}from"../chunks/index.9af7eb9c.js";import{p as gn}from"../chunks/stores.be116e24.js";function Mt(u){if(u===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return u}function ri(u,t){u.prototype=Object.create(t.prototype),u.prototype.constructor=u,u.__proto__=t}/*!
- * GSAP 3.12.2
- * https://greensock.com
- *
- * @license Copyright 2008-2023, GreenSock. All rights reserved.
- * Subject to the terms at https://greensock.com/standard-license or for
- * Club GreenSock members, the agreement issued with that membership.
- * @author: Jack Doyle, jack@greensock.com
-*/var ut={autoSleep:120,force3D:"auto",nullTargetWarn:1,units:{lineHeight:""}},oe={duration:.5,overwrite:!1,delay:0},dr,J,W,dt=1e8,B=1/dt,Je=Math.PI*2,yn=Je/4,xn=0,ii=Math.sqrt,vn=Math.cos,Tn=Math.sin,K=function(t){return typeof t=="string"},$=function(t){return typeof t=="function"},At=function(t){return typeof t=="number"},pr=function(t){return typeof t>"u"},Ot=function(t){return typeof t=="object"},rt=function(t){return t!==!1},mr=function(){return typeof window<"u"},Oe=function(t){return $(t)||K(t)},ni=typeof ArrayBuffer=="function"&&ArrayBuffer.isView||function(){},tt=Array.isArray,tr=/(?:-?\.?\d|\.)+/gi,si=/[-+=.]*\d+[.e\-+]*\d*[e\-+]*\d*/g,ee=/[-+=.]*\d+[.e-]*\d*[a-z%]*/g,Xe=/[-+=.]*\d+\.?\d*(?:e-|e\+)?\d*/gi,ai=/[+-]=-?[.\d]+/,oi=/[^,'"\[\]\s]+/gi,bn=/^[+\-=e\s\d]*\d+[.\d]*([a-z]*|%)\s*$/i,X,ht,er,gr,ft={},Ee={},ui,fi=function(t){return(Ee=Qt(t,ft))&&st},yr=function(t,e){return console.warn("Invalid property",t,"set to",e,"Missing plugin? gsap.registerPlugin()")},Re=function(t,e){return!e&&console.warn(t)},li=function(t,e){return t&&(ft[t]=e)&&Ee&&(Ee[t]=e)||ft},ye=function(){return 0},wn={suppressEvents:!0,isStart:!0,kill:!1},Me={suppressEvents:!0,kill:!1},Sn={suppressEvents:!0},xr={},Lt=[],rr={},hi,at={},qe={},Er=30,De=[],vr="",Tr=function(t){var e=t[0],r,i;if(Ot(e)||$(e)||(t=[t]),!(r=(e._gsap||{}).harness)){for(i=De.length;i--&&!De[i].targetTest(e););r=De[i]}for(i=t.length;i--;)t[i]&&(t[i]._gsap||(t[i]._gsap=new zi(t[i],r)))||t.splice(i,1);return t},$t=function(t){return t._gsap||Tr(pt(t))[0]._gsap},_i=function(t,e,r){return(r=t[e])&&$(r)?t[e]():pr(r)&&t.getAttribute&&t.getAttribute(e)||r},it=function(t,e){return(t=t.split(",")).forEach(e)||t},j=function(t){return Math.round(t*1e5)/1e5||0},Q=function(t){return Math.round(t*1e7)/1e7||0},ne=function(t,e){var r=e.charAt(0),i=parseFloat(e.substr(2));return t=parseFloat(t),r==="+"?t+i:r==="-"?t-i:r==="*"?t*i:t/i},Pn=function(t,e){for(var r=e.length,i=0;t.indexOf(e[i])<0&&++ia;)s=s._prev;return s?(e._next=s._next,s._next=e):(e._next=t[r],t[r]=e),e._next?e._next._prev=e:t[i]=e,e._prev=s,e.parent=e._dp=t,e},Ne=function(t,e,r,i){r===void 0&&(r="_first"),i===void 0&&(i="_last");var n=e._prev,s=e._next;n?n._next=s:t[r]===e&&(t[r]=s),s?s._prev=n:t[i]===e&&(t[i]=n),e._next=e._prev=e.parent=null},Vt=function(t,e){t.parent&&(!e||t.parent.autoRemoveChildren)&&t.parent.remove&&t.parent.remove(t),t._act=0},jt=function(t,e){if(t&&(!e||e._end>t._dur||e._start<0))for(var r=t;r;)r._dirty=1,r=r.parent;return t},Cn=function(t){for(var e=t.parent;e&&e.parent;)e._dirty=1,e.totalDuration(),e=e.parent;return t},ir=function(t,e,r,i){return t._startAt&&(J?t._startAt.revert(Me):t.vars.immediateRender&&!t.vars.autoRevert||t._startAt.render(e,!0,i))},Mn=function u(t){return!t||t._ts&&u(t.parent)},Fr=function(t){return t._repeat?ue(t._tTime,t=t.duration()+t._rDelay)*t:0},ue=function(t,e){var r=Math.floor(t/=e);return t&&r===t?r-1:r},Ie=function(t,e){return(t-e._start)*e._ts+(e._ts>=0?0:e._dirty?e.totalDuration():e._tDur)},Ue=function(t){return t._end=Q(t._start+(t._tDur/Math.abs(t._ts||t._rts||B)||0))},Ye=function(t,e){var r=t._dp;return r&&r.smoothChildTiming&&t._ts&&(t._start=Q(r._time-(t._ts>0?e/t._ts:((t._dirty?t.totalDuration():t._tDur)-e)/-t._ts)),Ue(t),r._dirty||jt(r,t)),t},gi=function(t,e){var r;if((e._time||!e._dur&&e._initted||e._startB)&&e.render(r,!0)),jt(t,e)._dp&&t._initted&&t._time>=t._dur&&t._ts){if(t._dur=0&&r.totalTime(r._tTime),r=r._dp;t._zTime=-B}},St=function(t,e,r,i){return e.parent&&Vt(e),e._start=Q((At(r)?r:r||t!==X?lt(t,r,e):t._time)+e._delay),e._end=Q(e._start+(e.totalDuration()/Math.abs(e.timeScale())||0)),mi(t,e,"_first","_last",t._sort?"_start":0),nr(e)||(t._recent=e),i||gi(t,e),t._ts<0&&Ye(t,t._tTime),t},yi=function(t,e){return(ft.ScrollTrigger||yr("scrollTrigger",e))&&ft.ScrollTrigger.create(e,t)},xi=function(t,e,r,i,n){if(wr(t,e,n),!t._initted)return 1;if(!r&&t._pt&&!J&&(t._dur&&t.vars.lazy!==!1||!t._dur&&t.vars.lazy)&&hi!==ot.frame)return Lt.push(t),t._lazy=[n,i],1},Dn=function u(t){var e=t.parent;return e&&e._ts&&e._initted&&!e._lock&&(e.rawTime()<0||u(e))},nr=function(t){var e=t.data;return e==="isFromStart"||e==="isStart"},An=function(t,e,r,i){var n=t.ratio,s=e<0||!e&&(!t._start&&Dn(t)&&!(!t._initted&&nr(t))||(t._ts<0||t._dp._ts<0)&&!nr(t))?0:1,a=t._rDelay,o=0,f,l,c;if(a&&t._repeat&&(o=Pe(0,t._tDur,e),l=ue(o,a),t._yoyo&&l&1&&(s=1-s),l!==ue(t._tTime,a)&&(n=1-s,t.vars.repeatRefresh&&t._initted&&t.invalidate())),s!==n||J||i||t._zTime===B||!e&&t._zTime){if(!t._initted&&xi(t,e,i,r,o))return;for(c=t._zTime,t._zTime=e||(r?B:0),r||(r=e&&!c),t.ratio=s,t._from&&(s=1-s),t._time=0,t._tTime=o,f=t._pt;f;)f.r(s,f.d),f=f._next;e<0&&ir(t,e,r,!0),t._onUpdate&&!r&&mt(t,"onUpdate"),o&&t._repeat&&!r&&t.parent&&mt(t,"onRepeat"),(e>=t._tDur||e<0)&&t.ratio===s&&(s&&Vt(t,1),!r&&!J&&(mt(t,s?"onComplete":"onReverseComplete",!0),t._prom&&t._prom()))}else t._zTime||(t._zTime=e)},En=function(t,e,r){var i;if(r>e)for(i=t._first;i&&i._start<=r;){if(i.data==="isPause"&&i._start>e)return i;i=i._next}else for(i=t._last;i&&i._start>=r;){if(i.data==="isPause"&&i._start0&&!i&&Ye(t,t._tTime=t._tDur*a),t.parent&&Ue(t),r||jt(t.parent,t),t},zr=function(t){return t instanceof et?jt(t):fe(t,t._dur)},Rn={_start:0,endTime:ye,totalDuration:ye},lt=function u(t,e,r){var i=t.labels,n=t._recent||Rn,s=t.duration()>=dt?n.endTime(!1):t._dur,a,o,f;return K(e)&&(isNaN(e)||e in i)?(o=e.charAt(0),f=e.substr(-1)==="%",a=e.indexOf("="),o==="<"||o===">"?(a>=0&&(e=e.replace(/=/,"")),(o==="<"?n._start:n.endTime(n._repeat>=0))+(parseFloat(e.substr(1))||0)*(f?(a<0?n:r).totalDuration()/100:1)):a<0?(e in i||(i[e]=s),i[e]):(o=parseFloat(e.charAt(a-1)+e.substr(a+1)),f&&r&&(o=o/100*(tt(r)?r[0]:r).totalDuration()),a>1?u(t,e.substr(0,a-1),r)+o:s+o)):e==null?s:+e},me=function(t,e,r){var i=At(e[1]),n=(i?2:1)+(t<2?0:1),s=e[n],a,o;if(i&&(s.duration=e[1]),s.parent=r,t){for(a=s,o=r;o&&!("immediateRender"in a);)a=o.vars.defaults||{},o=rt(o.vars.inherit)&&o.parent;s.immediateRender=rt(a.immediateRender),t<2?s.runBackwards=1:s.startAt=e[n-1]}return new H(e[0],s,e[n+1])},Ut=function(t,e){return t||t===0?e(t):e},Pe=function(t,e,r){return re?e:r},Z=function(t,e){return!K(t)||!(e=bn.exec(t))?"":e[1]},Fn=function(t,e,r){return Ut(r,function(i){return Pe(t,e,i)})},sr=[].slice,vi=function(t,e){return t&&Ot(t)&&"length"in t&&(!e&&!t.length||t.length-1 in t&&Ot(t[0]))&&!t.nodeType&&t!==ht},zn=function(t,e,r){return r===void 0&&(r=[]),t.forEach(function(i){var n;return K(i)&&!e||vi(i,1)?(n=r).push.apply(n,pt(i)):r.push(i)})||r},pt=function(t,e,r){return W&&!e&&W.selector?W.selector(t):K(t)&&!r&&(er||!le())?sr.call((e||gr).querySelectorAll(t),0):tt(t)?zn(t,r):vi(t)?sr.call(t,0):t?[t]:[]},ar=function(t){return t=pt(t)[0]||Re("Invalid scope")||{},function(e){var r=t.current||t.nativeElement||t;return pt(e,r.querySelectorAll?r:r===t?Re("Invalid scope")||gr.createElement("div"):t)}},Ti=function(t){return t.sort(function(){return .5-Math.random()})},bi=function(t){if($(t))return t;var e=Ot(t)?t:{each:t},r=Ht(e.ease),i=e.from||0,n=parseFloat(e.base)||0,s={},a=i>0&&i<1,o=isNaN(i)||a,f=e.axis,l=i,c=i;return K(i)?l=c={center:.5,edges:.5,end:1}[i]||0:!a&&o&&(l=i[0],c=i[1]),function(_,d,p){var h=(p||e).length,m=s[h],x,g,y,T,v,S,P,w,b;if(!m){if(b=e.grid==="auto"?0:(e.grid||[1,dt])[1],!b){for(P=-dt;P<(P=p[b++].getBoundingClientRect().left)&&bP&&(P=v),vh?h-1:f?f==="y"?h/b:b:Math.max(b,h/b))||0)*(i==="edges"?-1:1),m.b=h<0?n-h:n,m.u=Z(e.amount||e.each)||0,r=r&&h<0?Ei(r):r}return h=(m[_]-m.min)/m.max||0,Q(m.b+(r?r(h):h)*m.v)+m.u}},or=function(t){var e=Math.pow(10,((t+"").split(".")[1]||"").length);return function(r){var i=Q(Math.round(parseFloat(r)/t)*t*e);return(i-i%1)/e+(At(r)?0:Z(r))}},wi=function(t,e){var r=tt(t),i,n;return!r&&Ot(t)&&(i=r=t.radius||dt,t.values?(t=pt(t.values),(n=!At(t[0]))&&(i*=i)):t=or(t.increment)),Ut(e,r?$(t)?function(s){return n=t(s),Math.abs(n-s)<=i?n:s}:function(s){for(var a=parseFloat(n?s.x:s),o=parseFloat(n?s.y:0),f=dt,l=0,c=t.length,_,d;c--;)n?(_=t[c].x-a,d=t[c].y-o,_=_*_+d*d):_=Math.abs(t[c]-a),_i?n-s:s)})},xe=function(t){for(var e=0,r="",i,n,s,a;~(i=t.indexOf("random(",e));)s=t.indexOf(")",i),a=t.charAt(i+7)==="[",n=t.substr(i+7,s-i-7).match(a?oi:tr),r+=t.substr(e,i-e)+Si(a?n:+n[0],a?0:+n[1],+n[2]||1e-5),e=s+1;return r+t.substr(e,t.length-e)},ki=function(t,e,r,i,n){var s=e-t,a=i-r;return Ut(n,function(o){return r+((o-t)/s*a||0)})},Un=function u(t,e,r,i){var n=isNaN(t+e)?0:function(d){return(1-d)*t+d*e};if(!n){var s=K(t),a={},o,f,l,c,_;if(r===!0&&(i=1)&&(r=null),s)t={p:t},e={p:e};else if(tt(t)&&!tt(e)){for(l=[],c=t.length,_=c-2,f=1;f(a=Math.abs(a))&&(o=s,n=a);return o},mt=function(t,e,r){var i=t.vars,n=i[e],s=W,a=t._ctx,o,f,l;if(n)return o=i[e+"Params"],f=i.callbackScope||t,r&&Lt.length&&Fe(),a&&(W=a),l=o?n.apply(f,o):n.call(f),W=s,l},ce=function(t){return Vt(t),t.scrollTrigger&&t.scrollTrigger.kill(!!J),t.progress()<1&&mt(t,"onInterrupt"),t},re,Oi=[],Ci=function(t){if(mr()&&t){t=!t.name&&t.default||t;var e=t.name,r=$(t),i=e&&!r&&t.init?function(){this._props=[]}:t,n={init:ye,render:kr,add:br,kill:is,modifier:rs,rawVars:0},s={targetTest:0,get:0,getSetter:Pr,aliases:{},register:0};if(le(),t!==i){if(at[e])return;gt(i,gt(ze(t,n),s)),Qt(i.prototype,Qt(n,ze(t,s))),at[i.prop=e]=i,t.targetTest&&(De.push(i),xr[e]=1),e=(e==="css"?"CSS":e.charAt(0).toUpperCase()+e.substr(1))+"Plugin"}li(e,i),t.register&&t.register(st,i,nt)}else t&&Oi.push(t)},L=255,de={aqua:[0,L,L],lime:[0,L,0],silver:[192,192,192],black:[0,0,0],maroon:[128,0,0],teal:[0,128,128],blue:[0,0,L],navy:[0,0,128],white:[L,L,L],olive:[128,128,0],yellow:[L,L,0],orange:[L,165,0],gray:[128,128,128],purple:[128,0,128],green:[0,128,0],red:[L,0,0],pink:[L,192,203],cyan:[0,L,L],transparent:[L,L,L,0]},Ge=function(t,e,r){return t+=t<0?1:t>1?-1:0,(t*6<1?e+(r-e)*t*6:t<.5?r:t*3<2?e+(r-e)*(2/3-t)*6:e)*L+.5|0},Mi=function(t,e,r){var i=t?At(t)?[t>>16,t>>8&L,t&L]:0:de.black,n,s,a,o,f,l,c,_,d,p;if(!i){if(t.substr(-1)===","&&(t=t.substr(0,t.length-1)),de[t])i=de[t];else if(t.charAt(0)==="#"){if(t.length<6&&(n=t.charAt(1),s=t.charAt(2),a=t.charAt(3),t="#"+n+n+s+s+a+a+(t.length===5?t.charAt(4)+t.charAt(4):"")),t.length===9)return i=parseInt(t.substr(1,6),16),[i>>16,i>>8&L,i&L,parseInt(t.substr(7),16)/255];t=parseInt(t.substr(1),16),i=[t>>16,t>>8&L,t&L]}else if(t.substr(0,3)==="hsl"){if(i=p=t.match(tr),!e)o=+i[0]%360/360,f=+i[1]/100,l=+i[2]/100,s=l<=.5?l*(f+1):l+f-l*f,n=l*2-s,i.length>3&&(i[3]*=1),i[0]=Ge(o+1/3,n,s),i[1]=Ge(o,n,s),i[2]=Ge(o-1/3,n,s);else if(~t.indexOf("="))return i=t.match(si),r&&i.length<4&&(i[3]=1),i}else i=t.match(tr)||de.transparent;i=i.map(Number)}return e&&!p&&(n=i[0]/L,s=i[1]/L,a=i[2]/L,c=Math.max(n,s,a),_=Math.min(n,s,a),l=(c+_)/2,c===_?o=f=0:(d=c-_,f=l>.5?d/(2-c-_):d/(c+_),o=c===n?(s-a)/d+(st&&(r+=x-e),i+=x,v=i-r,y=v-s,(y>0||g)&&(S=++c.frame,_=v-c.time*1e3,c.time=v=v/1e3,s+=y+(y>=n?4:n-y),T=1),g||(o=f(h)),T)for(d=0;d=x&&d--},_listeners:a},c}(),le=function(){return!ve&&ot.wake()},F={},Xn=/^[\d.\-M][\d.\-,\s]/,qn=/["']/g,Gn=function(t){for(var e={},r=t.substr(1,t.length-3).split(":"),i=r[0],n=1,s=r.length,a,o,f;n1&&r.config?r.config.apply(null,~t.indexOf("{")?[Gn(e[1])]:Wn(t).split(",").map(di)):F._CE&&Xn.test(t)?F._CE("",t):r},Ei=function(t){return function(e){return 1-t(1-e)}},Ri=function u(t,e){for(var r=t._first,i;r;)r instanceof et?u(r,e):r.vars.yoyoEase&&(!r._yoyo||!r._repeat)&&r._yoyo!==e&&(r.timeline?u(r.timeline,e):(i=r._ease,r._ease=r._yEase,r._yEase=i,r._yoyo=e)),r=r._next},Ht=function(t,e){return t&&($(t)?t:F[t]||$n(t))||e},Zt=function(t,e,r,i){r===void 0&&(r=function(o){return 1-e(1-o)}),i===void 0&&(i=function(o){return o<.5?e(o*2)/2:1-e((1-o)*2)/2});var n={easeIn:e,easeOut:r,easeInOut:i},s;return it(t,function(a){F[a]=ft[a]=n,F[s=a.toLowerCase()]=r;for(var o in n)F[s+(o==="easeIn"?".in":o==="easeOut"?".out":".inOut")]=F[a+"."+o]=n[o]}),n},Fi=function(t){return function(e){return e<.5?(1-t(1-e*2))/2:.5+t((e-.5)*2)/2}},We=function u(t,e,r){var i=e>=1?e:1,n=(r||(t?.3:.45))/(e<1?e:1),s=n/Je*(Math.asin(1/i)||0),a=function(l){return l===1?1:i*Math.pow(2,-10*l)*Tn((l-s)*n)+1},o=t==="out"?a:t==="in"?function(f){return 1-a(1-f)}:Fi(a);return n=Je/n,o.config=function(f,l){return u(t,f,l)},o},$e=function u(t,e){e===void 0&&(e=1.70158);var r=function(s){return s?--s*s*((e+1)*s+e)+1:0},i=t==="out"?r:t==="in"?function(n){return 1-r(1-n)}:Fi(r);return i.config=function(n){return u(t,n)},i};it("Linear,Quad,Cubic,Quart,Quint,Strong",function(u,t){var e=t<5?t+1:t;Zt(u+",Power"+(e-1),t?function(r){return Math.pow(r,e)}:function(r){return r},function(r){return 1-Math.pow(1-r,e)},function(r){return r<.5?Math.pow(r*2,e)/2:1-Math.pow((1-r)*2,e)/2})});F.Linear.easeNone=F.none=F.Linear.easeIn;Zt("Elastic",We("in"),We("out"),We());(function(u,t){var e=1/t,r=2*e,i=2.5*e,n=function(a){return a0?r+(r+this._rDelay)*this._repeat:r):this.totalDuration()&&this._dur},t.totalDuration=function(r){return arguments.length?(this._dirty=0,fe(this,this._repeat<0?r:(r-this._repeat*this._rDelay)/(this._repeat+1))):this._tDur},t.totalTime=function(r,i){if(le(),!arguments.length)return this._tTime;var n=this._dp;if(n&&n.smoothChildTiming&&this._ts){for(Ye(this,r),!n._dp||n.parent||gi(n,this);n&&n.parent;)n.parent._time!==n._start+(n._ts>=0?n._tTime/n._ts:(n.totalDuration()-n._tTime)/-n._ts)&&n.totalTime(n._tTime,!0),n=n.parent;!this.parent&&this._dp.autoRemoveChildren&&(this._ts>0&&r0||!this._tDur&&!r)&&St(this._dp,this,this._start-this._delay)}return(this._tTime!==r||!this._dur&&!i||this._initted&&Math.abs(this._zTime)===B||!r&&!this._initted&&(this.add||this._ptLookup))&&(this._ts||(this._pTime=r),ci(this,r,i)),this},t.time=function(r,i){return arguments.length?this.totalTime(Math.min(this.totalDuration(),r+Fr(this))%(this._dur+this._rDelay)||(r?this._dur:0),i):this._time},t.totalProgress=function(r,i){return arguments.length?this.totalTime(this.totalDuration()*r,i):this.totalDuration()?Math.min(1,this._tTime/this._tDur):this.ratio},t.progress=function(r,i){return arguments.length?this.totalTime(this.duration()*(this._yoyo&&!(this.iteration()&1)?1-r:r)+Fr(this),i):this.duration()?Math.min(1,this._time/this._dur):this.ratio},t.iteration=function(r,i){var n=this.duration()+this._rDelay;return arguments.length?this.totalTime(this._time+(r-1)*n,i):this._repeat?ue(this._tTime,n)+1:1},t.timeScale=function(r){if(!arguments.length)return this._rts===-B?0:this._rts;if(this._rts===r)return this;var i=this.parent&&this._ts?Ie(this.parent._time,this):this._tTime;return this._rts=+r||0,this._ts=this._ps||r===-B?0:this._rts,this.totalTime(Pe(-Math.abs(this._delay),this._tDur,i),!0),Ue(this),Cn(this)},t.paused=function(r){return arguments.length?(this._ps!==r&&(this._ps=r,r?(this._pTime=this._tTime||Math.max(-this._delay,this.rawTime()),this._ts=this._act=0):(le(),this._ts=this._rts,this.totalTime(this.parent&&!this.parent.smoothChildTiming?this.rawTime():this._tTime||this._pTime,this.progress()===1&&Math.abs(this._zTime)!==B&&(this._tTime-=B)))),this):this._ps},t.startTime=function(r){if(arguments.length){this._start=r;var i=this.parent||this._dp;return i&&(i._sort||!this.parent)&&St(i,this,r-this._delay),this}return this._start},t.endTime=function(r){return this._start+(rt(r)?this.totalDuration():this.duration())/Math.abs(this._ts||1)},t.rawTime=function(r){var i=this.parent||this._dp;return i?r&&(!this._ts||this._repeat&&this._time&&this.totalProgress()<1)?this._tTime%(this._dur+this._rDelay):this._ts?Ie(i.rawTime(r),this):this._tTime:this._tTime},t.revert=function(r){r===void 0&&(r=Sn);var i=J;return J=r,(this._initted||this._startAt)&&(this.timeline&&this.timeline.revert(r),this.totalTime(-.01,r.suppressEvents)),this.data!=="nested"&&r.kill!==!1&&this.kill(),J=i,this},t.globalTime=function(r){for(var i=this,n=arguments.length?r:i.rawTime();i;)n=i._start+n/(i._ts||1),i=i._dp;return!this.parent&&this._sat?this._sat.vars.immediateRender?-1/0:this._sat.globalTime(r):n},t.repeat=function(r){return arguments.length?(this._repeat=r===1/0?-2:r,zr(this)):this._repeat===-2?1/0:this._repeat},t.repeatDelay=function(r){if(arguments.length){var i=this._time;return this._rDelay=r,zr(this),i?this.time(i):this}return this._rDelay},t.yoyo=function(r){return arguments.length?(this._yoyo=r,this):this._yoyo},t.seek=function(r,i){return this.totalTime(lt(this,r),rt(i))},t.restart=function(r,i){return this.play().totalTime(r?-this._delay:0,rt(i))},t.play=function(r,i){return r!=null&&this.seek(r,i),this.reversed(!1).paused(!1)},t.reverse=function(r,i){return r!=null&&this.seek(r||this.totalDuration(),i),this.reversed(!0).paused(!1)},t.pause=function(r,i){return r!=null&&this.seek(r,i),this.paused(!0)},t.resume=function(){return this.paused(!1)},t.reversed=function(r){return arguments.length?(!!r!==this.reversed()&&this.timeScale(-this._rts||(r?-B:0)),this):this._rts<0},t.invalidate=function(){return this._initted=this._act=0,this._zTime=-B,this},t.isActive=function(){var r=this.parent||this._dp,i=this._start,n;return!!(!r||this._ts&&this._initted&&r.isActive()&&(n=r.rawTime(!0))>=i&&n1?(i?(s[r]=i,n&&(s[r+"Params"]=n),r==="onUpdate"&&(this._onUpdate=i)):delete s[r],this):s[r]},t.then=function(r){var i=this;return new Promise(function(n){var s=$(r)?r:pi,a=function(){var f=i.then;i.then=null,$(s)&&(s=s(i))&&(s.then||s===i)&&(i.then=f),n(s),i.then=f};i._initted&&i.totalProgress()===1&&i._ts>=0||!i._tTime&&i._ts<0?a():i._prom=a})},t.kill=function(){ce(this)},u}();gt(Te.prototype,{_time:0,_start:0,_end:0,_tTime:0,_tDur:0,_dirty:0,_repeat:0,_yoyo:!1,parent:null,_initted:!1,_rDelay:0,_ts:1,_dp:0,ratio:0,_zTime:-B,_prom:0,_ps:!1,_rts:1});var et=function(u){ri(t,u);function t(r,i){var n;return r===void 0&&(r={}),n=u.call(this,r)||this,n.labels={},n.smoothChildTiming=!!r.smoothChildTiming,n.autoRemoveChildren=!!r.autoRemoveChildren,n._sort=rt(r.sortChildren),X&&St(r.parent||X,Mt(n),i),r.reversed&&n.reverse(),r.paused&&n.paused(!0),r.scrollTrigger&&yi(Mt(n),r.scrollTrigger),n}var e=t.prototype;return e.to=function(i,n,s){return me(0,arguments,this),this},e.from=function(i,n,s){return me(1,arguments,this),this},e.fromTo=function(i,n,s,a){return me(2,arguments,this),this},e.set=function(i,n,s){return n.duration=0,n.parent=this,pe(n).repeatDelay||(n.repeat=0),n.immediateRender=!!n.immediateRender,new H(i,n,lt(this,s),1),this},e.call=function(i,n,s){return St(this,H.delayedCall(0,i,n),s)},e.staggerTo=function(i,n,s,a,o,f,l){return s.duration=n,s.stagger=s.stagger||a,s.onComplete=f,s.onCompleteParams=l,s.parent=this,new H(i,s,lt(this,o)),this},e.staggerFrom=function(i,n,s,a,o,f,l){return s.runBackwards=1,pe(s).immediateRender=rt(s.immediateRender),this.staggerTo(i,n,s,a,o,f,l)},e.staggerFromTo=function(i,n,s,a,o,f,l,c){return a.startAt=s,pe(a).immediateRender=rt(a.immediateRender),this.staggerTo(i,n,a,o,f,l,c)},e.render=function(i,n,s){var a=this._time,o=this._dirty?this.totalDuration():this._tDur,f=this._dur,l=i<=0?0:Q(i),c=this._zTime<0!=i<0&&(this._initted||!f),_,d,p,h,m,x,g,y,T,v,S,P;if(this!==X&&l>o&&i>=0&&(l=o),l!==this._tTime||s||c){if(a!==this._time&&f&&(l+=this._time-a,i+=this._time-a),_=l,T=this._start,y=this._ts,x=!y,c&&(f||(a=this._zTime),(i||!n)&&(this._zTime=i)),this._repeat){if(S=this._yoyo,m=f+this._rDelay,this._repeat<-1&&i<0)return this.totalTime(m*100+i,n,s);if(_=Q(l%m),l===o?(h=this._repeat,_=f):(h=~~(l/m),h&&h===l/m&&(_=f,h--),_>f&&(_=f)),v=ue(this._tTime,m),!a&&this._tTime&&v!==h&&this._tTime-v*m-this._dur<=0&&(v=h),S&&h&1&&(_=f-_,P=1),h!==v&&!this._lock){var w=S&&v&1,b=w===(S&&h&1);if(h=a&&i>=0)for(d=this._first;d;){if(p=d._next,(d._act||_>=d._start)&&d._ts&&g!==d){if(d.parent!==this)return this.render(i,n,s);if(d.render(d._ts>0?(_-d._start)*d._ts:(d._dirty?d.totalDuration():d._tDur)+(_-d._start)*d._ts,n,s),_!==this._time||!this._ts&&!x){g=0,p&&(l+=this._zTime=-B);break}}d=p}else{d=this._last;for(var k=i<0?i:_;d;){if(p=d._prev,(d._act||k<=d._end)&&d._ts&&g!==d){if(d.parent!==this)return this.render(i,n,s);if(d.render(d._ts>0?(k-d._start)*d._ts:(d._dirty?d.totalDuration():d._tDur)+(k-d._start)*d._ts,n,s||J&&(d._initted||d._startAt)),_!==this._time||!this._ts&&!x){g=0,p&&(l+=this._zTime=k?-B:B);break}}d=p}}if(g&&!n&&(this.pause(),g.render(_>=a?0:-B)._zTime=_>=a?1:-1,this._ts))return this._start=T,Ue(this),this.render(i,n,s);this._onUpdate&&!n&&mt(this,"onUpdate",!0),(l===o&&this._tTime>=this.totalDuration()||!l&&a)&&(T===this._start||Math.abs(y)!==Math.abs(this._ts))&&(this._lock||((i||!f)&&(l===o&&this._ts>0||!l&&this._ts<0)&&Vt(this,1),!n&&!(i<0&&!a)&&(l||a||!o)&&(mt(this,l===o&&i>=0?"onComplete":"onReverseComplete",!0),this._prom&&!(l0)&&this._prom())))}return this},e.add=function(i,n){var s=this;if(At(n)||(n=lt(this,n,i)),!(i instanceof Te)){if(tt(i))return i.forEach(function(a){return s.add(a,n)}),this;if(K(i))return this.addLabel(i,n);if($(i))i=H.delayedCall(0,i);else return this}return this!==i?St(this,i,n):this},e.getChildren=function(i,n,s,a){i===void 0&&(i=!0),n===void 0&&(n=!0),s===void 0&&(s=!0),a===void 0&&(a=-dt);for(var o=[],f=this._first;f;)f._start>=a&&(f instanceof H?n&&o.push(f):(s&&o.push(f),i&&o.push.apply(o,f.getChildren(!0,n,s)))),f=f._next;return o},e.getById=function(i){for(var n=this.getChildren(1,1,1),s=n.length;s--;)if(n[s].vars.id===i)return n[s]},e.remove=function(i){return K(i)?this.removeLabel(i):$(i)?this.killTweensOf(i):(Ne(this,i),i===this._recent&&(this._recent=this._last),jt(this))},e.totalTime=function(i,n){return arguments.length?(this._forcing=1,!this._dp&&this._ts&&(this._start=Q(ot.time-(this._ts>0?i/this._ts:(this.totalDuration()-i)/-this._ts))),u.prototype.totalTime.call(this,i,n),this._forcing=0,this):this._tTime},e.addLabel=function(i,n){return this.labels[i]=lt(this,n),this},e.removeLabel=function(i){return delete this.labels[i],this},e.addPause=function(i,n,s){var a=H.delayedCall(0,n||ye,s);return a.data="isPause",this._hasPause=1,St(this,a,lt(this,i))},e.removePause=function(i){var n=this._first;for(i=lt(this,i);n;)n._start===i&&n.data==="isPause"&&Vt(n),n=n._next},e.killTweensOf=function(i,n,s){for(var a=this.getTweensOf(i,s),o=a.length;o--;)Ft!==a[o]&&a[o].kill(i,n);return this},e.getTweensOf=function(i,n){for(var s=[],a=pt(i),o=this._first,f=At(n),l;o;)o instanceof H?Pn(o._targets,a)&&(f?(!Ft||o._initted&&o._ts)&&o.globalTime(0)<=n&&o.globalTime(o.totalDuration())>n:!n||o.isActive())&&s.push(o):(l=o.getTweensOf(a,n)).length&&s.push.apply(s,l),o=o._next;return s},e.tweenTo=function(i,n){n=n||{};var s=this,a=lt(s,i),o=n,f=o.startAt,l=o.onStart,c=o.onStartParams,_=o.immediateRender,d,p=H.to(s,gt({ease:n.ease||"none",lazy:!1,immediateRender:!1,time:a,overwrite:"auto",duration:n.duration||Math.abs((a-(f&&"time"in f?f.time:s._time))/s.timeScale())||B,onStart:function(){if(s.pause(),!d){var m=n.duration||Math.abs((a-(f&&"time"in f?f.time:s._time))/s.timeScale());p._dur!==m&&fe(p,m,0,1).render(p._time,!0,!0),d=1}l&&l.apply(p,c||[])}},n));return _?p.render(0):p},e.tweenFromTo=function(i,n,s){return this.tweenTo(n,gt({startAt:{time:lt(this,i)}},s))},e.recent=function(){return this._recent},e.nextLabel=function(i){return i===void 0&&(i=this._time),Ir(this,lt(this,i))},e.previousLabel=function(i){return i===void 0&&(i=this._time),Ir(this,lt(this,i),1)},e.currentLabel=function(i){return arguments.length?this.seek(i,!0):this.previousLabel(this._time+B)},e.shiftChildren=function(i,n,s){s===void 0&&(s=0);for(var a=this._first,o=this.labels,f;a;)a._start>=s&&(a._start+=i,a._end+=i),a=a._next;if(n)for(f in o)o[f]>=s&&(o[f]+=i);return jt(this)},e.invalidate=function(i){var n=this._first;for(this._lock=0;n;)n.invalidate(i),n=n._next;return u.prototype.invalidate.call(this,i)},e.clear=function(i){i===void 0&&(i=!0);for(var n=this._first,s;n;)s=n._next,this.remove(n),n=s;return this._dp&&(this._time=this._tTime=this._pTime=0),i&&(this.labels={}),jt(this)},e.totalDuration=function(i){var n=0,s=this,a=s._last,o=dt,f,l,c;if(arguments.length)return s.timeScale((s._repeat<0?s.duration():s.totalDuration())/(s.reversed()?-i:i));if(s._dirty){for(c=s.parent;a;)f=a._prev,a._dirty&&a.totalDuration(),l=a._start,l>o&&s._sort&&a._ts&&!s._lock?(s._lock=1,St(s,a,l-a._delay,1)._lock=0):o=l,l<0&&a._ts&&(n-=l,(!c&&!s._dp||c&&c.smoothChildTiming)&&(s._start+=l/s._ts,s._time-=l,s._tTime-=l),s.shiftChildren(-l,!1,-1/0),o=0),a._end>n&&a._ts&&(n=a._end),a=f;fe(s,s===X&&s._time>n?s._time:n,1,1),s._dirty=0}return s._tDur},t.updateRoot=function(i){if(X._ts&&(ci(X,Ie(i,X)),hi=ot.frame),ot.frame>=Er){Er+=ut.autoSleep||120;var n=X._first;if((!n||!n._ts)&&ut.autoSleep&&ot._listeners.length<2){for(;n&&!n._ts;)n=n._next;n||ot.sleep()}}},t}(Te);gt(et.prototype,{_lock:0,_hasPause:0,_forcing:0});var jn=function(t,e,r,i,n,s,a){var o=new nt(this._pt,t,e,0,1,Ui,null,n),f=0,l=0,c,_,d,p,h,m,x,g;for(o.b=r,o.e=i,r+="",i+="",(x=~i.indexOf("random("))&&(i=xe(i)),s&&(g=[r,i],s(g,t,e),r=g[0],i=g[1]),_=r.match(Xe)||[];c=Xe.exec(i);)p=c[0],h=i.substring(f,c.index),d?d=(d+1)%5:h.substr(-5)==="rgba("&&(d=1),p!==_[l++]&&(m=parseFloat(_[l-1])||0,o._pt={_next:o._pt,p:h||l===1?h:",",s:m,c:p.charAt(1)==="="?ne(m,p)-m:parseFloat(p)-m,m:d&&d<4?Math.round:0},f=Xe.lastIndex);return o.c=f")}),T.duration();else{S={};for(w in p)w==="ease"||w==="easeEach"||Zn(w,p[w],S,p.easeEach);for(w in S)for(R=S[w].sort(function(M,V){return M.t-V.t}),E=0,v=0;vo-B&&!l?o:if&&(_=f)),x=this._yoyo&&p&1,x&&(T=this._yEase,_=f-_),m=ue(this._tTime,h),_===a&&!s&&this._initted)return this._tTime=c,this;p!==m&&(y&&this._yEase&&Ri(y,x),this.vars.repeatRefresh&&!x&&!this._lock&&(this._lock=s=1,this.render(Q(h*p),!0).invalidate()._lock=0))}if(!this._initted){if(xi(this,l?i:_,s,n,c))return this._tTime=0,this;if(a!==this._time)return this;if(f!==this._dur)return this.render(i,n,s)}if(this._tTime=c,this._time=_,!this._act&&this._ts&&(this._act=1,this._lazy=0),this.ratio=g=(T||this._ease)(_/f),this._from&&(this.ratio=g=1-g),_&&!a&&!n&&!p&&(mt(this,"onStart"),this._tTime!==c))return this;for(d=this._pt;d;)d.r(g,d.d),d=d._next;y&&y.render(i<0?i:!_&&x?-B:y._dur*y._ease(_/this._dur),n,s)||this._startAt&&(this._zTime=i),this._onUpdate&&!n&&(l&&ir(this,i,n,s),mt(this,"onUpdate")),this._repeat&&p!==m&&this.vars.onRepeat&&!n&&this.parent&&mt(this,"onRepeat"),(c===this._tDur||!c)&&this._tTime===c&&(l&&!this._onUpdate&&ir(this,i,!0,!0),(i||!f)&&(c===this._tDur&&this._ts>0||!c&&this._ts<0)&&Vt(this,1),!n&&!(l&&!a)&&(c||a||x)&&(mt(this,c===o?"onComplete":"onReverseComplete",!0),this._prom&&!(c0)&&this._prom()))}return this},e.targets=function(){return this._targets},e.invalidate=function(i){return(!i||!this.vars.runBackwards)&&(this._startAt=0),this._pt=this._op=this._onUpdate=this._lazy=this.ratio=0,this._ptLookup=[],this.timeline&&this.timeline.invalidate(i),u.prototype.invalidate.call(this,i)},e.resetTo=function(i,n,s,a){ve||ot.wake(),this._ts||this.play();var o=Math.min(this._dur,(this._dp._time-this._start)*this._ts),f;return this._initted||wr(this,o),f=this._ease(o/this._dur),Kn(this,i,n,s,a,f,o)?this.resetTo(i,n,s,a):(Ye(this,0),this.parent||mi(this._dp,this,"_first","_last",this._dp._sort?"_start":0),this.render(0))},e.kill=function(i,n){if(n===void 0&&(n="all"),!i&&(!n||n==="all"))return this._lazy=this._pt=0,this.parent?ce(this):this;if(this.timeline){var s=this.timeline.totalDuration();return this.timeline.killTweensOf(i,n,Ft&&Ft.vars.overwrite!==!0)._first||ce(this),this.parent&&s!==this.timeline.totalDuration()&&fe(this,this._dur*this.timeline._tDur/s,0,1),this}var a=this._targets,o=i?pt(i):a,f=this._ptLookup,l=this._pt,c,_,d,p,h,m,x;if((!n||n==="all")&&On(a,o))return n==="all"&&(this._pt=0),ce(this);for(c=this._op=this._op||[],n!=="all"&&(K(n)&&(h={},it(n,function(g){return h[g]=1}),n=h),n=Qn(a,n)),x=a.length;x--;)if(~o.indexOf(a[x])){_=f[x],n==="all"?(c[x]=n,p=_,d={}):(d=c[x]=c[x]||{},p=n);for(h in p)m=_&&_[h],m&&((!("kill"in m.d)||m.d.kill(h)===!0)&&Ne(this,m,"_pt"),delete _[h]),d!=="all"&&(d[h]=1)}return this._initted&&!this._pt&&l&&ce(this),this},t.to=function(i,n){return new t(i,n,arguments[2])},t.from=function(i,n){return me(1,arguments)},t.delayedCall=function(i,n,s,a){return new t(n,0,{immediateRender:!1,lazy:!1,overwrite:!1,delay:i,onComplete:n,onReverseComplete:n,onCompleteParams:s,onReverseCompleteParams:s,callbackScope:a})},t.fromTo=function(i,n,s){return me(2,arguments)},t.set=function(i,n){return n.duration=0,n.repeatDelay||(n.repeat=0),new t(i,n)},t.killTweensOf=function(i,n,s){return X.killTweensOf(i,n,s)},t}(Te);gt(H.prototype,{_targets:[],_lazy:0,_startAt:0,_op:0,_onInit:0});it("staggerTo,staggerFrom,staggerFromTo",function(u){H[u]=function(){var t=new et,e=sr.call(arguments,0);return e.splice(u==="staggerFromTo"?5:4,0,0),t[u].apply(t,e)}});var Sr=function(t,e,r){return t[e]=r},Vi=function(t,e,r){return t[e](r)},Jn=function(t,e,r,i){return t[e](i.fp,r)},ts=function(t,e,r){return t.setAttribute(e,r)},Pr=function(t,e){return $(t[e])?Vi:pr(t[e])&&t.setAttribute?ts:Sr},Ni=function(t,e){return e.set(e.t,e.p,Math.round((e.s+e.c*t)*1e6)/1e6,e)},es=function(t,e){return e.set(e.t,e.p,!!(e.s+e.c*t),e)},Ui=function(t,e){var r=e._pt,i="";if(!t&&e.b)i=e.b;else if(t===1&&e.e)i=e.e;else{for(;r;)i=r.p+(r.m?r.m(r.s+r.c*t):Math.round((r.s+r.c*t)*1e4)/1e4)+i,r=r._next;i+=e.c}e.set(e.t,e.p,i,e)},kr=function(t,e){for(var r=e._pt;r;)r.r(t,r.d),r=r._next},rs=function(t,e,r,i){for(var n=this._pt,s;n;)s=n._next,n.p===i&&n.modifier(t,e,r),n=s},is=function(t){for(var e=this._pt,r,i;e;)i=e._next,e.p===t&&!e.op||e.op===t?Ne(this,e,"_pt"):e.dep||(r=1),e=i;return!r},ns=function(t,e,r,i){i.mSet(t,e,i.m.call(i.tween,r,i.mt),i)},Yi=function(t){for(var e=t._pt,r,i,n,s;e;){for(r=e._next,i=n;i&&i.pr>e.pr;)i=i._next;(e._prev=i?i._prev:s)?e._prev._next=e:n=e,(e._next=i)?i._prev=e:s=e,e=r}t._pt=n},nt=function(){function u(e,r,i,n,s,a,o,f,l){this.t=r,this.s=n,this.c=s,this.p=i,this.r=a||Ni,this.d=o||this,this.set=f||Sr,this.pr=l||0,this._next=e,e&&(e._prev=this)}var t=u.prototype;return t.modifier=function(r,i,n){this.mSet=this.mSet||this.set,this.set=ns,this.m=r,this.mt=n,this.tween=i},u}();it(vr+"parent,duration,ease,delay,overwrite,runBackwards,startAt,yoyo,immediateRender,repeat,repeatDelay,data,paused,reversed,lazy,callbackScope,stringFilter,id,yoyoEase,stagger,inherit,repeatRefresh,keyframes,autoRevert,scrollTrigger",function(u){return xr[u]=1});ft.TweenMax=ft.TweenLite=H;ft.TimelineLite=ft.TimelineMax=et;X=new et({sortChildren:!1,defaults:oe,autoRemoveChildren:!0,id:"root",smoothChildTiming:!0});ut.stringFilter=Ai;var Kt=[],Ae={},ss=[],Br=0,as=0,je=function(t){return(Ae[t]||ss).map(function(e){return e()})},fr=function(){var t=Date.now(),e=[];t-Br>2&&(je("matchMediaInit"),Kt.forEach(function(r){var i=r.queries,n=r.conditions,s,a,o,f;for(a in i)s=ht.matchMedia(i[a]).matches,s&&(o=1),s!==n[a]&&(n[a]=s,f=1);f&&(r.revert(),o&&e.push(r))}),je("matchMediaRevert"),e.forEach(function(r){return r.onMatch(r)}),Br=t,je("matchMedia"))},Xi=function(){function u(e,r){this.selector=r&&ar(r),this.data=[],this._r=[],this.isReverted=!1,this.id=as++,e&&this.add(e)}var t=u.prototype;return t.add=function(r,i,n){$(r)&&(n=i,i=r,r=$);var s=this,a=function(){var f=W,l=s.selector,c;return f&&f!==s&&f.data.push(s),n&&(s.selector=ar(n)),W=s,c=i.apply(s,arguments),$(c)&&s._r.push(c),W=f,s.selector=l,s.isReverted=!1,c};return s.last=a,r===$?a(s):r?s[r]=a:a},t.ignore=function(r){var i=W;W=null,r(this),W=i},t.getTweens=function(){var r=[];return this.data.forEach(function(i){return i instanceof u?r.push.apply(r,i.getTweens()):i instanceof H&&!(i.parent&&i.parent.data==="nested")&&r.push(i)}),r},t.clear=function(){this._r.length=this.data.length=0},t.kill=function(r,i){var n=this;if(r){var s=this.getTweens();this.data.forEach(function(o){o.data==="isFlip"&&(o.revert(),o.getChildren(!0,!0,!1).forEach(function(f){return s.splice(s.indexOf(f),1)}))}),s.map(function(o){return{g:o.globalTime(0),t:o}}).sort(function(o,f){return f.g-o.g||-1/0}).forEach(function(o){return o.t.revert(r)}),this.data.forEach(function(o){return!(o instanceof H)&&o.revert&&o.revert(r)}),this._r.forEach(function(o){return o(r,n)}),this.isReverted=!0}else this.data.forEach(function(o){return o.kill&&o.kill()});if(this.clear(),i)for(var a=Kt.length;a--;)Kt[a].id===this.id&&Kt.splice(a,1)},t.revert=function(r){this.kill(r||{})},u}(),os=function(){function u(e){this.contexts=[],this.scope=e}var t=u.prototype;return t.add=function(r,i,n){Ot(r)||(r={matches:r});var s=new Xi(0,n||this.scope),a=s.conditions={},o,f,l;W&&!s.selector&&(s.selector=W.selector),this.contexts.push(s),i=s.add("onMatch",i),s.queries=r;for(f in r)f==="all"?l=1:(o=ht.matchMedia(r[f]),o&&(Kt.indexOf(s)<0&&Kt.push(s),(a[f]=o.matches)&&(l=1),o.addListener?o.addListener(fr):o.addEventListener("change",fr)));return l&&i(s),this},t.revert=function(r){this.kill(r||{})},t.kill=function(r){this.contexts.forEach(function(i){return i.kill(r,!0)})},u}(),Le={registerPlugin:function(){for(var t=arguments.length,e=new Array(t),r=0;r1){var i=t.map(function(l){return st.quickSetter(l,e,r)}),n=i.length;return function(l){for(var c=n;c--;)i[c](l)}}t=t[0]||{};var s=at[e],a=$t(t),o=a.harness&&(a.harness.aliases||{})[e]||e,f=s?function(l){var c=new s;re._pt=0,c.init(t,r?l+r:l,re,0,[t]),c.render(1,c),re._pt&&kr(1,re)}:a.set(t,o);return s?f:function(l){return f(t,o,r?l+r:l,a,1)}},quickTo:function(t,e,r){var i,n=st.to(t,Qt((i={},i[e]="+=0.1",i.paused=!0,i),r||{})),s=function(o,f,l){return n.resetTo(e,o,f,l)};return s.tween=n,s},isTweening:function(t){return X.getTweensOf(t,!0).length>0},defaults:function(t){return t&&t.ease&&(t.ease=Ht(t.ease,oe.ease)),Rr(oe,t||{})},config:function(t){return Rr(ut,t||{})},registerEffect:function(t){var e=t.name,r=t.effect,i=t.plugins,n=t.defaults,s=t.extendTimeline;(i||"").split(",").forEach(function(a){return a&&!at[a]&&!ft[a]&&Re(e+" effect requires "+a+" plugin.")}),qe[e]=function(a,o,f){return r(pt(a),gt(o||{},n),f)},s&&(et.prototype[e]=function(a,o,f){return this.add(qe[e](a,Ot(o)?o:(f=o)&&{},this),f)})},registerEase:function(t,e){F[t]=Ht(e)},parseEase:function(t,e){return arguments.length?Ht(t,e):F},getById:function(t){return X.getById(t)},exportRoot:function(t,e){t===void 0&&(t={});var r=new et(t),i,n;for(r.smoothChildTiming=rt(t.smoothChildTiming),X.remove(r),r._dp=0,r._time=r._tTime=X._time,i=X._first;i;)n=i._next,(e||!(!i._dur&&i instanceof H&&i.vars.onComplete===i._targets[0]))&&St(r,i,i._start-i._delay),i=n;return St(X,r,0),r},context:function(t,e){return t?new Xi(t,e):W},matchMedia:function(t){return new os(t)},matchMediaRefresh:function(){return Kt.forEach(function(t){var e=t.conditions,r,i;for(i in e)e[i]&&(e[i]=!1,r=1);r&&t.revert()})||fr()},addEventListener:function(t,e){var r=Ae[t]||(Ae[t]=[]);~r.indexOf(e)||r.push(e)},removeEventListener:function(t,e){var r=Ae[t],i=r&&r.indexOf(e);i>=0&&r.splice(i,1)},utils:{wrap:Vn,wrapYoyo:Nn,distribute:bi,random:Si,snap:wi,normalize:Bn,getUnit:Z,clamp:Fn,splitColor:Mi,toArray:pt,selector:ar,mapRange:ki,pipe:In,unitize:Ln,interpolate:Un,shuffle:Ti},install:fi,effects:qe,ticker:ot,updateRoot:et.updateRoot,plugins:at,globalTimeline:X,core:{PropTween:nt,globals:li,Tween:H,Timeline:et,Animation:Te,getCache:$t,_removeLinkedListItem:Ne,reverting:function(){return J},context:function(t){return t&&W&&(W.data.push(t),t._ctx=W),W},suppressOverwrites:function(t){return dr=t}}};it("to,from,fromTo,delayedCall,set,killTweensOf",function(u){return Le[u]=H[u]});ot.add(et.updateRoot);re=Le.to({},{duration:0});var us=function(t,e){for(var r=t._pt;r&&r.p!==e&&r.op!==e&&r.fp!==e;)r=r._next;return r},fs=function(t,e){var r=t._targets,i,n,s;for(i in e)for(n=r.length;n--;)s=t._ptLookup[n][i],s&&(s=s.d)&&(s._pt&&(s=us(s,i)),s&&s.modifier&&s.modifier(e[i],t,r[n],i))},He=function(t,e){return{name:t,rawVars:1,init:function(i,n,s){s._onInit=function(a){var o,f;if(K(n)&&(o={},it(n,function(l){return o[l]=1}),n=o),e){o={};for(f in n)o[f]=e(n[f]);n=o}fs(a,n)}}}},st=Le.registerPlugin({name:"attr",init:function(t,e,r,i,n){var s,a,o;this.tween=r;for(s in e)o=t.getAttribute(s)||"",a=this.add(t,"setAttribute",(o||0)+"",e[s],i,n,0,0,s),a.op=s,a.b=o,this._props.push(s)},render:function(t,e){for(var r=e._pt;r;)J?r.set(r.t,r.p,r.b,r):r.r(t,r.d),r=r._next}},{name:"endArray",init:function(t,e){for(var r=e.length;r--;)this.add(t,r,t[r]||0,e[r],0,0,0,0,0,1)}},He("roundProps",or),He("modifiers"),He("snap",wi))||Le;H.version=et.version=st.version="3.12.2";ui=1;mr()&&le();F.Power0;F.Power1;F.Power2;F.Power3;F.Power4;F.Linear;F.Quad;F.Cubic;F.Quart;F.Quint;F.Strong;F.Elastic;F.Back;F.SteppedEase;F.Bounce;F.Sine;F.Expo;F.Circ;/*!
- * CSSPlugin 3.12.2
- * https://greensock.com
- *
- * Copyright 2008-2023, GreenSock. All rights reserved.
- * Subject to the terms at https://greensock.com/standard-license or for
- * Club GreenSock members, the agreement issued with that membership.
- * @author: Jack Doyle, jack@greensock.com
-*/var Vr,zt,se,Or,Wt,Nr,Cr,ls=function(){return typeof window<"u"},Et={},Gt=180/Math.PI,ae=Math.PI/180,te=Math.atan2,Ur=1e8,Mr=/([A-Z])/g,hs=/(left|right|width|margin|padding|x)/i,_s=/[\s,\(]\S/,Pt={autoAlpha:"opacity,visibility",scale:"scaleX,scaleY",alpha:"opacity"},lr=function(t,e){return e.set(e.t,e.p,Math.round((e.s+e.c*t)*1e4)/1e4+e.u,e)},cs=function(t,e){return e.set(e.t,e.p,t===1?e.e:Math.round((e.s+e.c*t)*1e4)/1e4+e.u,e)},ds=function(t,e){return e.set(e.t,e.p,t?Math.round((e.s+e.c*t)*1e4)/1e4+e.u:e.b,e)},ps=function(t,e){var r=e.s+e.c*t;e.set(e.t,e.p,~~(r+(r<0?-.5:.5))+e.u,e)},qi=function(t,e){return e.set(e.t,e.p,t?e.e:e.b,e)},Gi=function(t,e){return e.set(e.t,e.p,t!==1?e.b:e.e,e)},ms=function(t,e,r){return t.style[e]=r},gs=function(t,e,r){return t.style.setProperty(e,r)},ys=function(t,e,r){return t._gsap[e]=r},xs=function(t,e,r){return t._gsap.scaleX=t._gsap.scaleY=r},vs=function(t,e,r,i,n){var s=t._gsap;s.scaleX=s.scaleY=r,s.renderTransform(n,s)},Ts=function(t,e,r,i,n){var s=t._gsap;s[e]=r,s.renderTransform(n,s)},q="transform",yt=q+"Origin",bs=function u(t,e){var r=this,i=this.target,n=i.style;if(t in Et&&n){if(this.tfm=this.tfm||{},t!=="transform")t=Pt[t]||t,~t.indexOf(",")?t.split(",").forEach(function(s){return r.tfm[s]=Dt(i,s)}):this.tfm[t]=i._gsap.x?i._gsap[t]:Dt(i,t);else return Pt.transform.split(",").forEach(function(s){return u.call(r,s,e)});if(this.props.indexOf(q)>=0)return;i._gsap.svg&&(this.svgo=i.getAttribute("data-svg-origin"),this.props.push(yt,e,"")),t=q}(n||e)&&this.props.push(t,e,n[t])},Wi=function(t){t.translate&&(t.removeProperty("translate"),t.removeProperty("scale"),t.removeProperty("rotate"))},ws=function(){var t=this.props,e=this.target,r=e.style,i=e._gsap,n,s;for(n=0;n=0?Yr[s]:"")+t},_r=function(){ls()&&window.document&&(Vr=window,zt=Vr.document,se=zt.documentElement,Wt=hr("div")||{style:{}},hr("div"),q=he(q),yt=q+"Origin",Wt.style.cssText="border-width:0;line-height:0;position:absolute;padding:0",ji=!!he("perspective"),Cr=st.core.reverting,Or=1)},Ke=function u(t){var e=hr("svg",this.ownerSVGElement&&this.ownerSVGElement.getAttribute("xmlns")||"http://www.w3.org/2000/svg"),r=this.parentNode,i=this.nextSibling,n=this.style.cssText,s;if(se.appendChild(e),e.appendChild(this),this.style.display="block",t)try{s=this.getBBox(),this._gsapBBox=this.getBBox,this.getBBox=u}catch{}else this._gsapBBox&&(s=this._gsapBBox());return r&&(i?r.insertBefore(this,i):r.appendChild(this)),se.removeChild(e),this.style.cssText=n,s},Xr=function(t,e){for(var r=e.length;r--;)if(t.hasAttribute(e[r]))return t.getAttribute(e[r])},Hi=function(t){var e;try{e=t.getBBox()}catch{e=Ke.call(t,!0)}return e&&(e.width||e.height)||t.getBBox===Ke||(e=Ke.call(t,!0)),e&&!e.width&&!e.x&&!e.y?{x:+Xr(t,["x","cx","x1"])||0,y:+Xr(t,["y","cy","y1"])||0,width:0,height:0}:e},Ki=function(t){return!!(t.getCTM&&(!t.parentNode||t.ownerSVGElement)&&Hi(t))},be=function(t,e){if(e){var r=t.style;e in Et&&e!==yt&&(e=q),r.removeProperty?((e.substr(0,2)==="ms"||e.substr(0,6)==="webkit")&&(e="-"+e),r.removeProperty(e.replace(Mr,"-$1").toLowerCase())):r.removeAttribute(e)}},It=function(t,e,r,i,n,s){var a=new nt(t._pt,e,r,0,1,s?Gi:qi);return t._pt=a,a.b=i,a.e=n,t._props.push(r),a},qr={deg:1,rad:1,turn:1},Ss={grid:1,flex:1},Nt=function u(t,e,r,i){var n=parseFloat(r)||0,s=(r+"").trim().substr((n+"").length)||"px",a=Wt.style,o=hs.test(e),f=t.tagName.toLowerCase()==="svg",l=(f?"client":"offset")+(o?"Width":"Height"),c=100,_=i==="px",d=i==="%",p,h,m,x;return i===s||!n||qr[i]||qr[s]?n:(s!=="px"&&!_&&(n=u(t,e,r,"px")),x=t.getCTM&&Ki(t),(d||s==="%")&&(Et[e]||~e.indexOf("adius"))?(p=x?t.getBBox()[o?"width":"height"]:t[l],j(d?n/p*c:n/100*p)):(a[o?"width":"height"]=c+(_?s:i),h=~e.indexOf("adius")||i==="em"&&t.appendChild&&!f?t:t.parentNode,x&&(h=(t.ownerSVGElement||{}).parentNode),(!h||h===zt||!h.appendChild)&&(h=zt.body),m=h._gsap,m&&d&&m.width&&o&&m.time===ot.time&&!m.uncache?j(n/m.width*c):((d||s==="%")&&!Ss[kt(h,"display")]&&(a.position=kt(t,"position")),h===t&&(a.position="static"),h.appendChild(Wt),p=Wt[l],h.removeChild(Wt),a.position="absolute",o&&d&&(m=$t(h),m.time=ot.time,m.width=h[l]),j(_?p*n/c:p&&n?c/p*n:0))))},Dt=function(t,e,r,i){var n;return Or||_r(),e in Pt&&e!=="transform"&&(e=Pt[e],~e.indexOf(",")&&(e=e.split(",")[0])),Et[e]&&e!=="transform"?(n=Se(t,i),n=e!=="transformOrigin"?n[e]:n.svg?n.origin:Ve(kt(t,yt))+" "+n.zOrigin+"px"):(n=t.style[e],(!n||n==="auto"||i||~(n+"").indexOf("calc("))&&(n=Be[e]&&Be[e](t,e,r)||kt(t,e)||_i(t,e)||(e==="opacity"?1:0))),r&&!~(n+"").trim().indexOf(" ")?Nt(t,e,n,r)+r:n},Ps=function(t,e,r,i){if(!r||r==="none"){var n=he(e,t,1),s=n&&kt(t,n,1);s&&s!==r?(e=n,r=s):e==="borderColor"&&(r=kt(t,"borderTopColor"))}var a=new nt(this._pt,t.style,e,0,1,Ui),o=0,f=0,l,c,_,d,p,h,m,x,g,y,T,v;if(a.b=r,a.e=i,r+="",i+="",i==="auto"&&(t.style[e]=i,i=kt(t,e)||i,t.style[e]=r),l=[r,i],Ai(l),r=l[0],i=l[1],_=r.match(ee)||[],v=i.match(ee)||[],v.length){for(;c=ee.exec(i);)m=c[0],g=i.substring(o,c.index),p?p=(p+1)%5:(g.substr(-5)==="rgba("||g.substr(-5)==="hsla(")&&(p=1),m!==(h=_[f++]||"")&&(d=parseFloat(h)||0,T=h.substr((d+"").length),m.charAt(1)==="="&&(m=ne(d,m)+T),x=parseFloat(m),y=m.substr((x+"").length),o=ee.lastIndex-y.length,y||(y=y||ut.units[e]||T,o===i.length&&(i+=y,a.e+=y)),T!==y&&(d=Nt(t,e,h,y)||0),a._pt={_next:a._pt,p:g||f===1?g:",",s:d,c:x-d,m:p&&p<4||e==="zIndex"?Math.round:0});a.c=o-1;)a=n[f],Et[a]&&(o=1,a=a==="transformOrigin"?yt:q),be(r,a);o&&(be(r,q),s&&(s.svg&&r.removeAttribute("transform"),Se(r,1),s.uncache=1,Wi(i)))}},Be={clearProps:function(t,e,r,i,n){if(n.data!=="isFromStart"){var s=t._pt=new nt(t._pt,e,r,0,0,Os);return s.u=i,s.pr=-10,s.tween=n,t._props.push(r),1}}},we=[1,0,0,1,0,0],Qi={},Zi=function(t){return t==="matrix(1, 0, 0, 1, 0, 0)"||t==="none"||!t},Wr=function(t){var e=kt(t,q);return Zi(e)?we:e.substr(7).match(si).map(j)},Dr=function(t,e){var r=t._gsap||$t(t),i=t.style,n=Wr(t),s,a,o,f;return r.svg&&t.getAttribute("transform")?(o=t.transform.baseVal.consolidate().matrix,n=[o.a,o.b,o.c,o.d,o.e,o.f],n.join(",")==="1,0,0,1,0,0"?we:n):(n===we&&!t.offsetParent&&t!==se&&!r.svg&&(o=i.display,i.display="block",s=t.parentNode,(!s||!t.offsetParent)&&(f=1,a=t.nextElementSibling,se.appendChild(t)),n=Wr(t),o?i.display=o:be(t,"display"),f&&(a?s.insertBefore(t,a):s?s.appendChild(t):se.removeChild(t))),e&&n.length>6?[n[0],n[1],n[4],n[5],n[12],n[13]]:n)},cr=function(t,e,r,i,n,s){var a=t._gsap,o=n||Dr(t,!0),f=a.xOrigin||0,l=a.yOrigin||0,c=a.xOffset||0,_=a.yOffset||0,d=o[0],p=o[1],h=o[2],m=o[3],x=o[4],g=o[5],y=e.split(" "),T=parseFloat(y[0])||0,v=parseFloat(y[1])||0,S,P,w,b;r?o!==we&&(P=d*m-p*h)&&(w=T*(m/P)+v*(-h/P)+(h*g-m*x)/P,b=T*(-p/P)+v*(d/P)-(d*g-p*x)/P,T=w,v=b):(S=Hi(t),T=S.x+(~y[0].indexOf("%")?T/100*S.width:T),v=S.y+(~(y[1]||y[0]).indexOf("%")?v/100*S.height:v)),i||i!==!1&&a.smooth?(x=T-f,g=v-l,a.xOffset=c+(x*d+g*h)-x,a.yOffset=_+(x*p+g*m)-g):a.xOffset=a.yOffset=0,a.xOrigin=T,a.yOrigin=v,a.smooth=!!i,a.origin=e,a.originIsAbsolute=!!r,t.style[yt]="0px 0px",s&&(It(s,a,"xOrigin",f,T),It(s,a,"yOrigin",l,v),It(s,a,"xOffset",c,a.xOffset),It(s,a,"yOffset",_,a.yOffset)),t.setAttribute("data-svg-origin",T+" "+v)},Se=function(t,e){var r=t._gsap||new zi(t);if("x"in r&&!e&&!r.uncache)return r;var i=t.style,n=r.scaleX<0,s="px",a="deg",o=getComputedStyle(t),f=kt(t,yt)||"0",l,c,_,d,p,h,m,x,g,y,T,v,S,P,w,b,k,z,E,R,O,C,M,V,N,xt,vt,Rt,Tt,Jt,Ct,Yt;return l=c=_=h=m=x=g=y=T=0,d=p=1,r.svg=!!(t.getCTM&&Ki(t)),o.translate&&((o.translate!=="none"||o.scale!=="none"||o.rotate!=="none")&&(i[q]=(o.translate!=="none"?"translate3d("+(o.translate+" 0 0").split(" ").slice(0,3).join(", ")+") ":"")+(o.rotate!=="none"?"rotate("+o.rotate+") ":"")+(o.scale!=="none"?"scale("+o.scale.split(" ").join(",")+") ":"")+(o[q]!=="none"?o[q]:"")),i.scale=i.rotate=i.translate="none"),P=Dr(t,r.svg),r.svg&&(r.uncache?(N=t.getBBox(),f=r.xOrigin-N.x+"px "+(r.yOrigin-N.y)+"px",V=""):V=!e&&t.getAttribute("data-svg-origin"),cr(t,V||f,!!V||r.originIsAbsolute,r.smooth!==!1,P)),v=r.xOrigin||0,S=r.yOrigin||0,P!==we&&(z=P[0],E=P[1],R=P[2],O=P[3],l=C=P[4],c=M=P[5],P.length===6?(d=Math.sqrt(z*z+E*E),p=Math.sqrt(O*O+R*R),h=z||E?te(E,z)*Gt:0,g=R||O?te(R,O)*Gt+h:0,g&&(p*=Math.abs(Math.cos(g*ae))),r.svg&&(l-=v-(v*z+S*R),c-=S-(v*E+S*O))):(Yt=P[6],Jt=P[7],vt=P[8],Rt=P[9],Tt=P[10],Ct=P[11],l=P[12],c=P[13],_=P[14],w=te(Yt,Tt),m=w*Gt,w&&(b=Math.cos(-w),k=Math.sin(-w),V=C*b+vt*k,N=M*b+Rt*k,xt=Yt*b+Tt*k,vt=C*-k+vt*b,Rt=M*-k+Rt*b,Tt=Yt*-k+Tt*b,Ct=Jt*-k+Ct*b,C=V,M=N,Yt=xt),w=te(-R,Tt),x=w*Gt,w&&(b=Math.cos(-w),k=Math.sin(-w),V=z*b-vt*k,N=E*b-Rt*k,xt=R*b-Tt*k,Ct=O*k+Ct*b,z=V,E=N,R=xt),w=te(E,z),h=w*Gt,w&&(b=Math.cos(w),k=Math.sin(w),V=z*b+E*k,N=C*b+M*k,E=E*b-z*k,M=M*b-C*k,z=V,C=N),m&&Math.abs(m)+Math.abs(h)>359.9&&(m=h=0,x=180-x),d=j(Math.sqrt(z*z+E*E+R*R)),p=j(Math.sqrt(M*M+Yt*Yt)),w=te(C,M),g=Math.abs(w)>2e-4?w*Gt:0,T=Ct?1/(Ct<0?-Ct:Ct):0),r.svg&&(V=t.getAttribute("transform"),r.forceCSS=t.setAttribute("transform","")||!Zi(kt(t,q)),V&&t.setAttribute("transform",V))),Math.abs(g)>90&&Math.abs(g)<270&&(n?(d*=-1,g+=h<=0?180:-180,h+=h<=0?180:-180):(p*=-1,g+=g<=0?180:-180)),e=e||r.uncache,r.x=l-((r.xPercent=l&&(!e&&r.xPercent||(Math.round(t.offsetWidth/2)===Math.round(-l)?-50:0)))?t.offsetWidth*r.xPercent/100:0)+s,r.y=c-((r.yPercent=c&&(!e&&r.yPercent||(Math.round(t.offsetHeight/2)===Math.round(-c)?-50:0)))?t.offsetHeight*r.yPercent/100:0)+s,r.z=_+s,r.scaleX=j(d),r.scaleY=j(p),r.rotation=j(h)+a,r.rotationX=j(m)+a,r.rotationY=j(x)+a,r.skewX=g+a,r.skewY=y+a,r.transformPerspective=T+s,(r.zOrigin=parseFloat(f.split(" ")[2])||0)&&(i[yt]=Ve(f)),r.xOffset=r.yOffset=0,r.force3D=ut.force3D,r.renderTransform=r.svg?Ms:ji?Ji:Cs,r.uncache=0,r},Ve=function(t){return(t=t.split(" "))[0]+" "+t[1]},Qe=function(t,e,r){var i=Z(e);return j(parseFloat(e)+parseFloat(Nt(t,"x",r+"px",i)))+i},Cs=function(t,e){e.z="0px",e.rotationY=e.rotationX="0deg",e.force3D=0,Ji(t,e)},Xt="0deg",_e="0px",qt=") ",Ji=function(t,e){var r=e||this,i=r.xPercent,n=r.yPercent,s=r.x,a=r.y,o=r.z,f=r.rotation,l=r.rotationY,c=r.rotationX,_=r.skewX,d=r.skewY,p=r.scaleX,h=r.scaleY,m=r.transformPerspective,x=r.force3D,g=r.target,y=r.zOrigin,T="",v=x==="auto"&&t&&t!==1||x===!0;if(y&&(c!==Xt||l!==Xt)){var S=parseFloat(l)*ae,P=Math.sin(S),w=Math.cos(S),b;S=parseFloat(c)*ae,b=Math.cos(S),s=Qe(g,s,P*b*-y),a=Qe(g,a,-Math.sin(S)*-y),o=Qe(g,o,w*b*-y+y)}m!==_e&&(T+="perspective("+m+qt),(i||n)&&(T+="translate("+i+"%, "+n+"%) "),(v||s!==_e||a!==_e||o!==_e)&&(T+=o!==_e||v?"translate3d("+s+", "+a+", "+o+") ":"translate("+s+", "+a+qt),f!==Xt&&(T+="rotate("+f+qt),l!==Xt&&(T+="rotateY("+l+qt),c!==Xt&&(T+="rotateX("+c+qt),(_!==Xt||d!==Xt)&&(T+="skew("+_+", "+d+qt),(p!==1||h!==1)&&(T+="scale("+p+", "+h+qt),g.style[q]=T||"translate(0, 0)"},Ms=function(t,e){var r=e||this,i=r.xPercent,n=r.yPercent,s=r.x,a=r.y,o=r.rotation,f=r.skewX,l=r.skewY,c=r.scaleX,_=r.scaleY,d=r.target,p=r.xOrigin,h=r.yOrigin,m=r.xOffset,x=r.yOffset,g=r.forceCSS,y=parseFloat(s),T=parseFloat(a),v,S,P,w,b;o=parseFloat(o),f=parseFloat(f),l=parseFloat(l),l&&(l=parseFloat(l),f+=l,o+=l),o||f?(o*=ae,f*=ae,v=Math.cos(o)*c,S=Math.sin(o)*c,P=Math.sin(o-f)*-_,w=Math.cos(o-f)*_,f&&(l*=ae,b=Math.tan(f-l),b=Math.sqrt(1+b*b),P*=b,w*=b,l&&(b=Math.tan(l),b=Math.sqrt(1+b*b),v*=b,S*=b)),v=j(v),S=j(S),P=j(P),w=j(w)):(v=c,w=_,S=P=0),(y&&!~(s+"").indexOf("px")||T&&!~(a+"").indexOf("px"))&&(y=Nt(d,"x",s,"px"),T=Nt(d,"y",a,"px")),(p||h||m||x)&&(y=j(y+p-(p*v+h*P)+m),T=j(T+h-(p*S+h*w)+x)),(i||n)&&(b=d.getBBox(),y=j(y+i/100*b.width),T=j(T+n/100*b.height)),b="matrix("+v+","+S+","+P+","+w+","+y+","+T+")",d.setAttribute("transform",b),g&&(d.style[q]=b)},Ds=function(t,e,r,i,n){var s=360,a=K(n),o=parseFloat(n)*(a&&~n.indexOf("rad")?Gt:1),f=o-i,l=i+f+"deg",c,_;return a&&(c=n.split("_")[1],c==="short"&&(f%=s,f!==f%(s/2)&&(f+=f<0?s:-s)),c==="cw"&&f<0?f=(f+s*Ur)%s-~~(f/s)*s:c==="ccw"&&f>0&&(f=(f-s*Ur)%s-~~(f/s)*s)),t._pt=_=new nt(t._pt,e,r,i,f,cs),_.e=l,_.u="deg",t._props.push(r),_},$r=function(t,e){for(var r in e)t[r]=e[r];return t},As=function(t,e,r){var i=$r({},r._gsap),n="perspective,force3D,transformOrigin,svgOrigin",s=r.style,a,o,f,l,c,_,d,p;i.svg?(f=r.getAttribute("transform"),r.setAttribute("transform",""),s[q]=e,a=Se(r,1),be(r,q),r.setAttribute("transform",f)):(f=getComputedStyle(r)[q],s[q]=e,a=Se(r,1),s[q]=f);for(o in Et)f=i[o],l=a[o],f!==l&&n.indexOf(o)<0&&(d=Z(f),p=Z(l),c=d!==p?Nt(r,o,f,p):parseFloat(f),_=parseFloat(l),t._pt=new nt(t._pt,a,o,c,_-c,lr),t._pt.u=p||0,t._props.push(o));$r(a,i)};it("padding,margin,Width,Radius",function(u,t){var e="Top",r="Right",i="Bottom",n="Left",s=(t<3?[e,r,i,n]:[e+n,e+r,i+r,i+n]).map(function(a){return t<2?u+a:"border"+a+u});Be[t>1?"border"+u:u]=function(a,o,f,l,c){var _,d;if(arguments.length<4)return _=s.map(function(p){return Dt(a,p,f)}),d=_.join(" "),d.split(_[0]).length===5?_[0]:d;_=(l+"").split(" "),d={},s.forEach(function(p,h){return d[p]=_[h]=_[h]||_[(h-1)/2|0]}),a.init(o,d,c)}});var tn={name:"css",register:_r,targetTest:function(t){return t.style&&t.nodeType},init:function(t,e,r,i,n){var s=this._props,a=t.style,o=r.vars.startAt,f,l,c,_,d,p,h,m,x,g,y,T,v,S,P,w;Or||_r(),this.styles=this.styles||$i(t),w=this.styles.props,this.tween=r;for(h in e)if(h!=="autoRound"&&(l=e[h],!(at[h]&&Ii(h,e,r,i,t,n)))){if(d=typeof l,p=Be[h],d==="function"&&(l=l.call(r,i,t,n),d=typeof l),d==="string"&&~l.indexOf("random(")&&(l=xe(l)),p)p(this,t,h,l,r)&&(P=1);else if(h.substr(0,2)==="--")f=(getComputedStyle(t).getPropertyValue(h)+"").trim(),l+="",Bt.lastIndex=0,Bt.test(f)||(m=Z(f),x=Z(l)),x?m!==x&&(f=Nt(t,h,f,x)+x):m&&(l+=m),this.add(a,"setProperty",f,l,i,n,0,0,h),s.push(h),w.push(h,0,a[h]);else if(d!=="undefined"){if(o&&h in o?(f=typeof o[h]=="function"?o[h].call(r,i,t,n):o[h],K(f)&&~f.indexOf("random(")&&(f=xe(f)),Z(f+"")||(f+=ut.units[h]||Z(Dt(t,h))||""),(f+"").charAt(1)==="="&&(f=Dt(t,h))):f=Dt(t,h),_=parseFloat(f),g=d==="string"&&l.charAt(1)==="="&&l.substr(0,2),g&&(l=l.substr(2)),c=parseFloat(l),h in Pt&&(h==="autoAlpha"&&(_===1&&Dt(t,"visibility")==="hidden"&&c&&(_=0),w.push("visibility",0,a.visibility),It(this,a,"visibility",_?"inherit":"hidden",c?"inherit":"hidden",!c)),h!=="scale"&&h!=="transform"&&(h=Pt[h],~h.indexOf(",")&&(h=h.split(",")[0]))),y=h in Et,y){if(this.styles.save(h),T||(v=t._gsap,v.renderTransform&&!e.parseTransform||Se(t,e.parseTransform),S=e.smoothOrigin!==!1&&v.smooth,T=this._pt=new nt(this._pt,a,q,0,1,v.renderTransform,v,0,-1),T.dep=1),h==="scale")this._pt=new nt(this._pt,v,"scaleY",v.scaleY,(g?ne(v.scaleY,g+c):c)-v.scaleY||0,lr),this._pt.u=0,s.push("scaleY",h),h+="X";else if(h==="transformOrigin"){w.push(yt,0,a[yt]),l=ks(l),v.svg?cr(t,l,0,S,0,this):(x=parseFloat(l.split(" ")[2])||0,x!==v.zOrigin&&It(this,v,"zOrigin",v.zOrigin,x),It(this,a,h,Ve(f),Ve(l)));continue}else if(h==="svgOrigin"){cr(t,l,1,S,0,this);continue}else if(h in Qi){Ds(this,v,h,_,g?ne(_,g+l):l);continue}else if(h==="smoothOrigin"){It(this,v,"smooth",v.smooth,l);continue}else if(h==="force3D"){v[h]=l;continue}else if(h==="transform"){As(this,l,t);continue}}else h in a||(h=he(h)||h);if(y||(c||c===0)&&(_||_===0)&&!_s.test(l)&&h in a)m=(f+"").substr((_+"").length),c||(c=0),x=Z(l)||(h in ut.units?ut.units[h]:m),m!==x&&(_=Nt(t,h,f,x)),this._pt=new nt(this._pt,y?v:a,h,_,(g?ne(_,g+c):c)-_,!y&&(x==="px"||h==="zIndex")&&e.autoRound!==!1?ps:lr),this._pt.u=x||0,m!==x&&x!=="%"&&(this._pt.b=f,this._pt.r=ds);else if(h in a)Ps.call(this,t,h,f,g?g+l:l);else if(h in t)this.add(t,h,f||t[h],g?g+l:l,i,n);else if(h!=="parseTransform"){yr(h,l);continue}y||(h in a?w.push(h,0,a[h]):w.push(h,1,f||t[h])),s.push(h)}}P&&Yi(this)},render:function(t,e){if(e.tween._time||!Cr())for(var r=e._pt;r;)r.r(t,r.d),r=r._next;else e.styles.revert()},get:Dt,aliases:Pt,getSetter:function(t,e,r){var i=Pt[e];return i&&i.indexOf(",")<0&&(e=i),e in Et&&e!==yt&&(t._gsap.x||Dt(t,"x"))?r&&Nr===r?e==="scale"?xs:ys:(Nr=r||{})&&(e==="scale"?vs:Ts):t.style&&!pr(t.style[e])?ms:~e.indexOf("-")?gs:Pr(t,e)},core:{_removeProperty:be,_getMatrix:Dr}};st.utils.checkPrefix=he;st.core.getStyleSaver=$i;(function(u,t,e,r){var i=it(u+","+t+","+e,function(n){Et[n]=1});it(t,function(n){ut.units[n]="deg",Qi[n]=1}),Pt[i[13]]=u+","+t,it(r,function(n){var s=n.split(":");Pt[s[1]]=i[s[0]]})})("x,y,z,scale,scaleX,scaleY,xPercent,yPercent","rotation,rotationX,rotationY,skewX,skewY","transform,transformOrigin,svgOrigin,force3D,smoothOrigin,transformPerspective","0:translateX,1:translateY,2:translateZ,8:rotate,8:rotationZ,8:rotateZ,9:rotateX,10:rotateY");it("x,y,z,top,right,bottom,left,width,height,fontSize,padding,margin,perspective",function(u){ut.units[u]="px"});st.registerPlugin(tn);var en=st.registerPlugin(tn)||st;en.core.Tween;function Es(u){let t,e,r;return{c(){t=U("div"),this.h()},l(i){t=Y(i,"DIV",{class:!0,style:!0}),G(t).forEach(I),this.h()},h(){A(t,"class","absolute w-[2px] h-[2px] bg-blue-400 rounded-full rotate-45"),ke(t,"top",u[0].start.y+"px"),ke(t,"left",u[0].start.x+"px")},m(i,n){ie(i,t,n),e||(r=rn(u[1].call(null,t)),e=!0)},p(i,[n]){n&1&&ke(t,"top",i[0].start.y+"px"),n&1&&ke(t,"left",i[0].start.x+"px")},i:Ar,o:Ar,d(i){i&&I(t),e=!1,r()}}}function Rs(u,t,e){let{star:r}=t;const i=nn();function n(s){let a=r.end.x-r.start.x,o=r.end.y-r.start.y;en.to(s,{translateX:a,translateY:o,height:4,width:4,opacity:0,scaleY:100,duration:1.5,backgroundColor:"white",ease:"power1.inOut",onComplete:()=>{i("remove",r)}})}return u.$$set=s=>{"star"in s&&e(0,r=s.star)},[r,n]}class Fs extends Zr{constructor(t){super(),Jr(this,t,Rs,Es,ti,{star:0})}}function jr(u,t,e){const r=u.slice();return r[7]=t[e],r}function Hr(u){let t,e;return t=new Fs({props:{star:u[7]}}),t.$on("remove",u[4]),{c(){fn(t.$$.fragment)},l(r){ln(t.$$.fragment,r)},m(r,i){hn(t,r,i),e=!0},p(r,i){const n={};i&4&&(n.star=r[7]),t.$set(n)},i(r){e||(Ce(t.$$.fragment,r),e=!0)},o(r){Ze(t.$$.fragment,r),e=!1},d(r){_n(t,r)}}}function Kr(u){let t,e,r,i,n,s,a,o,f,l,c,_,d,p,h,m,x;return{c(){t=U("div"),e=U("iframe"),i=bt(),n=U("div"),s=bt(),a=U("div"),o=bt(),f=U("div"),l=bt(),c=U("div"),_=bt(),d=U("div"),p=U("p"),h=_t("ZQD to move SPACE to jump. "),m=U("a"),x=_t("Full shaders game demo"),this.h()},l(g){t=Y(g,"DIV",{class:!0});var y=G(t);e=Y(y,"IFRAME",{src:!0,frameborder:!0,title:!0,height:!0,width:!0,class:!0}),G(e).forEach(I),i=wt(y),n=Y(y,"DIV",{class:!0}),G(n).forEach(I),s=wt(y),a=Y(y,"DIV",{class:!0}),G(a).forEach(I),o=wt(y),f=Y(y,"DIV",{class:!0}),G(f).forEach(I),l=wt(y),c=Y(y,"DIV",{class:!0}),G(c).forEach(I),y.forEach(I),_=wt(g),d=Y(g,"DIV",{class:!0});var T=G(d);p=Y(T,"P",{});var v=G(p);h=ct(v,"ZQD to move SPACE to jump. "),m=Y(v,"A",{href:!0,target:!0,class:!0});var S=G(m);x=ct(S,"Full shaders game demo"),S.forEach(I),v.forEach(I),T.forEach(I),this.h()},h(){ei(e.src,r="smg/index.html")||A(e,"src",r),A(e,"frameborder","0"),A(e,"title","Spaceship Drift"),A(e,"height","512"),A(e,"width","768"),A(e,"class",""),A(n,"class","h-[3px] bg-[#0C0F19] w-[3px] z-10 absolute -top-[3px] -left-[3px]"),A(a,"class","h-[3px] bg-[#0C0F19] w-[3px] z-10 absolute -bottom-[3px] -left-[3px]"),A(f,"class","h-[3px] bg-[#0C0F19] w-[3px] z-10 absolute -top-[3px] -right-[3px]"),A(c,"class","h-[3px] bg-[#0C0F19] w-[3px] z-10 absolute -bottom-[3px] -right-[3px]"),A(t,"class","relative mt-6 border-slate-800 border-[3px]"),A(m,"href","https://x.com/HugoDuprez/status/1712093324528541831?s=20"),A(m,"target","_blank"),A(m,"class","underline"),A(d,"class","flex flex-row justify-center items-center text-[9px] mt-4 text-slate-500")},m(g,y){ie(g,t,y),D(t,e),D(t,i),D(t,n),D(t,s),D(t,a),D(t,o),D(t,f),D(t,l),D(t,c),ie(g,_,y),ie(g,d,y),D(d,p),D(p,h),D(p,m),D(m,x)},d(g){g&&I(t),g&&I(_),g&&I(d)}}}function Qr(u){let t,e,r,i,n,s,a=u[1]?"Copied!":"Copy the link for later",o,f,l;return{c(){t=U("div"),e=U("p"),r=_t("Looks like you're on mobile! Please visit on your laptop."),i=bt(),n=U("button"),s=U("p"),o=_t(a),this.h()},l(c){t=Y(c,"DIV",{class:!0});var _=G(t);e=Y(_,"P",{class:!0});var d=G(e);r=ct(d,"Looks like you're on mobile! Please visit on your laptop."),d.forEach(I),i=wt(_),n=Y(_,"BUTTON",{class:!0});var p=G(n);s=Y(p,"P",{class:!0});var h=G(s);o=ct(h,a),h.forEach(I),p.forEach(I),_.forEach(I),this.h()},h(){A(e,"class","text-xs text-slate-500 mt-6"),A(s,"class","mt-1"),A(n,"class","flex flex-row justify-center items-center px-3 py-5 text-xs w-full bg-slate-800 mt-6"),A(t,"class","flex flex-col justify-center items-center mt-10 text-center")},m(c,_){ie(c,t,_),D(t,e),D(e,r),D(t,i),D(t,n),D(n,s),D(s,o),f||(l=cn(n,"click",dn(u[3])),f=!0)},p(c,_){_&2&&a!==(a=c[1]?"Copied!":"Copy the link for later")&&pn(o,a)},d(c){c&&I(t),f=!1,l()}}}function zs(u){let t,e,r,i,n,s,a,o,f,l,c,_,d,p,h,m,x,g,y,T,v,S,P,w,b=u[2],k=[];for(let O=0;OZe(k[O],1,1,()=>{k[O]=null});let E=!u[0]&&Kr(),R=u[0]&&Qr(u);return{c(){t=U("div"),e=U("div"),r=U("img"),n=bt();for(let O=0;Oe(5,r=l));let i=!1,n=!1,s=[];un(()=>{window.innerWidth<768&&e(0,i=!0),setInterval(()=>{o()},5e3)});function a(){navigator.clipboard.writeText(r.url.toString()),e(1,n=!0)}function o(){const l=Is(800,1500),c=135*(Math.PI/180),_=Math.random()*window.innerWidth,d=Math.random()*window.innerHeight,p=_+Math.cos(c)*l,h=d+Math.sin(c)*l;e(2,s=[...s,{start:{x:_,y:d},end:{x:p,y:h}}])}function f(l){e(2,s=s.filter(c=>c!==l.detail))}return[i,n,s,a,f]}class Ns extends Zr{constructor(t){super(),Jr(this,t,Ls,zs,ti,{})}}export{Ns as component};
diff --git a/spaces/ICML2022/OFA/fairseq/examples/latent_depth/latent_depth_src/models/__init__.py b/spaces/ICML2022/OFA/fairseq/examples/latent_depth/latent_depth_src/models/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/ICML2022/OFA/fairseq/examples/translation_moe/translation_moe_src/__init__.py b/spaces/ICML2022/OFA/fairseq/examples/translation_moe/translation_moe_src/__init__.py
deleted file mode 100644
index c0abe53e973b4bb31cfb062708965d002c79b6e7..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/translation_moe/translation_moe_src/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from . import translation_moe # noqa
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/models/speech_to_text/modules/augmented_memory_attention.py b/spaces/ICML2022/OFA/fairseq/fairseq/models/speech_to_text/modules/augmented_memory_attention.py
deleted file mode 100644
index e7465bc889fd1ba6ca2c60905a2eb6ff5cc62b9d..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/models/speech_to_text/modules/augmented_memory_attention.py
+++ /dev/null
@@ -1,488 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from typing import Tuple, List
-
-import torch
-import torch.nn.functional as F
-from fairseq.models import FairseqEncoder
-from fairseq.models.speech_to_text import (
- ConvTransformerEncoder,
-)
-from fairseq.models.speech_to_text.utils import attention_suppression
-from fairseq.models.speech_to_text.utils import (
- lengths_to_encoder_padding_mask,
- segments_to_sequence,
- sequence_to_segments,
-)
-from fairseq.modules import MultiheadAttention, TransformerEncoderLayer
-from torch import nn, Tensor
-
-# ------------------------------------------------------------------------------
-# AugmentedMemoryConvTransformerEncoder
-# ------------------------------------------------------------------------------
-
-
-class AugmentedMemoryConvTransformerEncoder(ConvTransformerEncoder):
- def __init__(self, args):
- super().__init__(args)
-
- args.encoder_stride = self.stride()
-
- self.left_context = args.left_context // args.encoder_stride
-
- self.right_context = args.right_context // args.encoder_stride
-
- self.left_context_after_stride = args.left_context // args.encoder_stride
- self.right_context_after_stride = args.right_context // args.encoder_stride
-
- self.transformer_layers = nn.ModuleList([])
- self.transformer_layers.extend(
- [
- AugmentedMemoryTransformerEncoderLayer(args)
- for i in range(args.encoder_layers)
- ]
- )
-
- def stride(self):
- # Hard coded here. Should infer from convs in future
- stride = 4
- return stride
-
- def forward(self, src_tokens, src_lengths, states=None):
- """Encode input sequence.
- :param torch.Tensor xs: input tensor
- :param torch.Tensor masks: input mask
- :return: position embedded tensor and mask
- :rtype Tuple[torch.Tensor, torch.Tensor]:
- """
- bsz, max_seq_len, _ = src_tokens.size()
- x = (
- src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
- .transpose(1, 2)
- .contiguous()
- )
- x = self.conv(x)
- bsz, _, output_seq_len, _ = x.size()
- x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1)
- x = self.out(x)
- x = self.embed_scale * x
-
- subsampling_factor = 1.0 * max_seq_len / output_seq_len
- input_lengths = torch.max(
- (src_lengths.float() / subsampling_factor).ceil().long(),
- x.size(0) * src_lengths.new_ones([src_lengths.size(0)]).long(),
- )
-
- encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
- input_lengths, batch_first=True
- )
-
- # TODO: fix positional embedding
- positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
-
- x += positions
- x = F.dropout(x, p=self.dropout, training=self.training)
-
- # State to store memory banks etc.
- if states is None:
- states = [
- {"memory_banks": None, "encoder_states": None}
- for i in range(len(self.transformer_layers))
- ]
-
- for i, layer in enumerate(self.transformer_layers):
- # x size:
- # (self.left_size + self.segment_size + self.right_size)
- # / self.stride, num_heads, dim
- # TODO: Consider mask here
- x = layer(x, states[i])
- states[i]["encoder_states"] = x[
- self.left_context_after_stride : -self.right_context_after_stride
- ]
-
- lengths = (
- (
- ~encoder_padding_mask[
- :, self.left_context_after_stride : -self.right_context_after_stride
- ]
- )
- .sum(dim=1, keepdim=True)
- .long()
- )
-
- return states[-1]["encoder_states"], lengths, states
-
-
-# ------------------------------------------------------------------------------
-# AugmentedMemoryTransformerEncoderLayer
-# ------------------------------------------------------------------------------
-class AugmentedMemoryTransformerEncoderLayer(TransformerEncoderLayer):
- def __init__(self, args):
- super().__init__(args)
-
- self.left_context = args.left_context // args.encoder_stride
- self.right_context = args.right_context // args.encoder_stride
-
- def forward(self, x, state):
-
- length, batch_size, x_dim = x.size()
-
- residual = x
-
- if self.normalize_before:
- x = self.self_attn_layer_norm(x)
-
- # init_state
- if state.get("memory_banks", None) is None:
- state["memory_banks"] = []
-
- # TODO reseach new sum_query method
- seg_start = self.left_context
- seg_end = length - self.right_context
- if seg_start < seg_end:
- summarization_query = torch.mean(x[seg_start:seg_end], keepdim=True, dim=0)
- else:
- summarization_query = x.new_zeros(1, batch_size, x_dim)
-
- x = torch.cat([x, summarization_query], dim=0)
-
- x = self.self_attn(input_and_summary=x, state=state)
-
- x = self.dropout_module(x)
- x = residual + x
-
- if not self.normalize_before:
- x = self.self_attn_layer_norm(x)
-
- residual = x
- if self.normalize_before:
- x = self.final_layer_norm(x)
-
- x = self.activation_fn(self.fc1(x))
- x = self.activation_dropout_module(x)
- x = self.fc2(x)
- x = self.dropout_module(x)
- x = residual + x
- if not self.normalize_before:
- x = self.final_layer_norm(x)
-
- return x
-
- def build_self_attention(self, embed_dim, args):
- return AugmentedMemoryMultiheadAttention(
- embed_dim=embed_dim,
- num_heads=args.encoder_attention_heads,
- dropout=args.attention_dropout,
- self_attention=True,
- q_noise=self.quant_noise,
- qn_block_size=self.quant_noise_block_size,
- tanh_on_mem=True,
- max_memory_size=args.max_memory_size,
- )
-
-
-# ------------------------------------------------------------------------------
-# AugmentedMemoryMultiheadAttention
-# ------------------------------------------------------------------------------
-class AugmentedMemoryMultiheadAttention(MultiheadAttention):
- """
- Augmented Memory Attention from
- Streaming Transformer-based Acoustic Models
- Using Self-attention with Augmented Memory
- https://arxiv.org/abs/2005.08042
- """
-
- def __init__(
- self,
- embed_dim,
- num_heads,
- kdim=None,
- vdim=None,
- dropout=0.0,
- bias=True,
- add_bias_kv=False,
- add_zero_attn=False,
- self_attention=False,
- encoder_decoder_attention=False,
- q_noise=0.0,
- qn_block_size=8,
- tanh_on_mem=False,
- memory_dim=None,
- std_scale=0.5, # 0.5 based on https://arxiv.org/abs/2005.09137
- max_memory_size=-1,
- disable_mem_on_mem_attn=True,
- ):
- super().__init__(
- embed_dim,
- num_heads,
- kdim,
- vdim,
- dropout,
- bias,
- add_bias_kv,
- add_zero_attn,
- self_attention,
- encoder_decoder_attention,
- q_noise,
- qn_block_size,
- )
-
- self.memory_dim = memory_dim if memory_dim is not None else embed_dim
- self.std_scale = std_scale
- self.disable_mem_on_mem_attn = disable_mem_on_mem_attn
-
- # This Operator was used for factorization in PySpeech
- self.v2e = lambda x: x
-
- if tanh_on_mem:
- self.squash_mem = torch.tanh
- self.nonlinear_squash_mem = True
- else:
- self.squash_mem = lambda x: x
- self.nonlinear_squash_mem = False
-
- self.max_memory_size = max_memory_size
-
- def forward(self, input_and_summary, state):
- """
- input: Encoder states of current segment with left or right context,
- plus one summarization query
-
- """
-
- length, batch_size, _ = input_and_summary.shape
- length = length - 1 # not include sum_query, last index
-
- memory = state["memory_banks"]
- # TODO: positional embedding on memory
-
- if self.max_memory_size > -1 and len(memory) > self.max_memory_size:
- # TODO: need to fix here
- if self.max_memory_size == 0:
- memory = memory.new_zeros(1, memory.size(1), self.memory_dim)
- else:
- memory = memory[-self.max_memory_size :]
-
- memory_and_input = torch.cat(memory + [input_and_summary[:-1]], dim=0)
- input_and_sum_query = input_and_summary
-
- q = self.q_proj(self.v2e(input_and_sum_query))
- k = self.k_proj(self.v2e(memory_and_input))
- v = self.v_proj(self.v2e(memory_and_input))
-
- q = (
- q.contiguous()
- .view(-1, batch_size * self.num_heads, self.head_dim)
- .transpose(0, 1)
- * self.scaling
- )
- k = (
- k.contiguous()
- .view(-1, batch_size * self.num_heads, self.head_dim)
- .transpose(0, 1)
- )
-
- v = (
- v.contiguous()
- .view(-1, batch_size * self.num_heads, self.head_dim)
- .transpose(0, 1)
- )
-
- attention_weights = torch.bmm(q, k.transpose(1, 2))
-
- if self.disable_mem_on_mem_attn:
- attention_weights = self.suppress_mem_on_mem_attention(
- batch_size, self.num_heads, len(memory), attention_weights
- )
-
- if self.std_scale is not None:
- attention_weights = attention_suppression(attention_weights, self.std_scale)
-
- assert list(attention_weights.shape) == [
- batch_size * self.num_heads,
- length + 1,
- length + len(memory),
- ]
-
- attention_weights = torch.nn.functional.softmax(
- attention_weights.float(), dim=-1
- ).type_as(attention_weights)
-
- attention_probs = self.dropout_module(attention_weights)
-
- # [T, T, B, n_head] + [T, B, n_head, d_head] -> [T, B, n_head, d_head]
- attention = torch.bmm(attention_probs, v)
-
- assert list(attention.shape) == [
- batch_size * self.num_heads,
- length + 1,
- self.head_dim,
- ]
-
- attention = (
- attention.transpose(0, 1)
- .contiguous()
- .view(length + 1, batch_size, self.embed_dim)
- )
-
- output_and_memory = self.out_proj(attention)
-
- next_m = output_and_memory[-1:]
- next_m = self.squash_mem(next_m)
- output = output_and_memory[:-1]
-
- state["memory_banks"].append(next_m)
-
- return output
-
- def suppress_mem_on_mem_attention(
- self, B: int, num_heads: int, mem_size: int, attention_weight: Tensor
- ):
- """
- Arguments:
- - B: batch size
- - num_heads: number of attention heads
- - mem_size: size of memory bank
- - attention_weight: a [B*num_heads, T + 1, T + mem_size] vector
-
- Return:
- modified attention_weight with [B*num_heads, -1, :mem_size] = -inf
- """
- attention_weight[:, -1, :mem_size] = float("-inf")
- return attention_weight
-
-
-# ------------------------------------------------------------------------------
-# SequenceEncoder
-# ------------------------------------------------------------------------------
-class SequenceEncoder(FairseqEncoder):
- """
- SequenceEncoder encodes sequences.
-
- More specifically, `src_tokens` and `src_lengths` in `forward()` should
- describe a batch of "complete" sequences rather than segments.
-
- Segment-by-segment inference can be triggered by `segment_size`:
- 1) `segment_size` is None:
- SequenceEncoder treats the input sequence as one single segment.
- 2) `segment_size` is not None (some int instead):
- SequenceEncoder does the following:
- 1. breaks the input sequence into several segments
- 2. inference on each segment and collect the outputs
- 3. concatanete segment outputs into the output sequence.
- Note that `segment_size` here shouldn't include additional left/right
- contexts needed, for example if we wish to infer with LC-BLSTM where the
- middle chunk size is 100 and right context is 20, `segment_size` should be
- 100.
- """
-
- def __init__(self, args, module):
- super().__init__(None)
-
- self.module = module
- self.input_time_axis = 1
- self.output_time_axis = 0
- self.segment_size = args.segment_size
- self.left_context = args.left_context
- self.right_context = args.right_context
-
- def forward(
- self,
- src_tokens: Tensor,
- src_lengths: Tensor,
- states=None,
- ):
-
- seg_src_tokens_lengths = sequence_to_segments(
- sequence=src_tokens,
- time_axis=self.input_time_axis,
- lengths=src_lengths,
- segment_size=self.segment_size,
- extra_left_context=self.left_context,
- extra_right_context=self.right_context,
- )
-
- seg_encoder_states_lengths: List[Tuple[Tensor, Tensor]] = []
-
- for seg_src_tokens, seg_src_lengths in seg_src_tokens_lengths:
- (seg_encoder_states, seg_enc_lengths, states) = self.module(
- seg_src_tokens,
- seg_src_lengths,
- states=states,
- )
-
- seg_encoder_states_lengths.append((seg_encoder_states, seg_enc_lengths))
-
- encoder_out, enc_lengths = segments_to_sequence(
- segments=seg_encoder_states_lengths, time_axis=self.output_time_axis
- )
-
- encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
- enc_lengths, batch_first=True
- )
-
- if not encoder_padding_mask.any():
- encoder_padding_mask = None
-
- return {
- "encoder_out": [encoder_out],
- "encoder_padding_mask": [encoder_padding_mask],
- "encoder_embedding": [],
- "encoder_states": [states],
- "src_tokens": [],
- "src_lengths": [],
- }
-
- def incremental_encode(
- self,
- seg_src_tokens: Tensor,
- seg_src_lengths: Tensor,
- states=None,
- ):
- """
- Different from forward function, this function takes segmented speech
- as input, and append encoder states to previous states
- """
- (seg_encoder_states, seg_enc_lengths, states) = self.module(
- seg_src_tokens,
- seg_src_lengths,
- states=states,
- )
- return seg_encoder_states, seg_enc_lengths, states
-
-
-# ------------------------------------------------------------------------------
-# Augmented memory model decorator
-# ------------------------------------------------------------------------------
-def augmented_memory(klass):
- class StreamSeq2SeqModel(klass):
- @staticmethod
- def add_args(parser):
- super(StreamSeq2SeqModel, StreamSeq2SeqModel).add_args(parser)
- parser.add_argument(
- "--segment-size", type=int, required=True, help="Length of the segment."
- )
- parser.add_argument(
- "--left-context",
- type=int,
- default=0,
- help="Left context for the segment.",
- )
- parser.add_argument(
- "--right-context",
- type=int,
- default=0,
- help="Right context for the segment.",
- )
- parser.add_argument(
- "--max-memory-size",
- type=int,
- default=-1,
- help="Right context for the segment.",
- )
-
- StreamSeq2SeqModel.__name__ = klass.__name__
- return StreamSeq2SeqModel
diff --git a/spaces/IntelligenzaArtificiale/ChatGLM-6B-Int4-API-OpenAI-Compatible/Dockerfile b/spaces/IntelligenzaArtificiale/ChatGLM-6B-Int4-API-OpenAI-Compatible/Dockerfile
deleted file mode 100644
index e68afebed9e8f941e10aedff43c9b5314d6be596..0000000000000000000000000000000000000000
--- a/spaces/IntelligenzaArtificiale/ChatGLM-6B-Int4-API-OpenAI-Compatible/Dockerfile
+++ /dev/null
@@ -1,22 +0,0 @@
-FROM python:3.10
-
-WORKDIR /code
-
-COPY ./requirements.txt /code/requirements.txt
-
-RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
-
-RUN useradd -m -u 1000 user
-USER user
-ENV HOME=/home/user \
- PATH=/home/user/.local/bin:$PATH
-
-WORKDIR $HOME/app
-
-COPY --chown=user . $HOME/app
-
-ENV TRANSFORMERS_CACHE=./.cache/
-
-EXPOSE 8000
-
-CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000","--reload","--app-dir","."]
diff --git a/spaces/JackBAI/MassageMateNLP/models/bert/force/README.md b/spaces/JackBAI/MassageMateNLP/models/bert/force/README.md
deleted file mode 100644
index 70832a58813895f8014a318eeca3f6a30c1a1f71..0000000000000000000000000000000000000000
--- a/spaces/JackBAI/MassageMateNLP/models/bert/force/README.md
+++ /dev/null
@@ -1,56 +0,0 @@
----
-license: apache-2.0
-tags:
-- generated_from_trainer
-metrics:
-- accuracy
-model-index:
-- name: force
- results: []
----
-
-
-
-# force
-
-This model is a fine-tuned version of [albert-base-v2](https://huggingface.co/albert-base-v2) on an unknown dataset.
-It achieves the following results on the evaluation set:
-- Loss: 0.2742
-- Accuracy: 0.9134
-
-## Model description
-
-More information needed
-
-## Intended uses & limitations
-
-More information needed
-
-## Training and evaluation data
-
-More information needed
-
-## Training procedure
-
-### Training hyperparameters
-
-The following hyperparameters were used during training:
-- learning_rate: 2e-05
-- train_batch_size: 128
-- eval_batch_size: 8
-- seed: 42
-- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
-- lr_scheduler_type: linear
-- num_epochs: 12.0
-
-### Training results
-
-
-
-### Framework versions
-
-- Transformers 4.25.1
-- Pytorch 1.11.0+cu113
-- Datasets 2.8.0
-- Tokenizers 0.13.2
diff --git a/spaces/Kangarroar/ApplioRVC-Inference/gui_v1.py b/spaces/Kangarroar/ApplioRVC-Inference/gui_v1.py
deleted file mode 100644
index becba80cdda6987c1ad70c89e68a4e3a4da44639..0000000000000000000000000000000000000000
--- a/spaces/Kangarroar/ApplioRVC-Inference/gui_v1.py
+++ /dev/null
@@ -1,708 +0,0 @@
-import os
-import logging
-import sys
-from dotenv import load_dotenv
-
-load_dotenv()
-
-os.environ["OMP_NUM_THREADS"] = "4"
-if sys.platform == "darwin":
- os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
-
-now_dir = os.getcwd()
-sys.path.append(now_dir)
-import multiprocessing
-
-logger = logging.getLogger(__name__)
-
-
-class Harvest(multiprocessing.Process):
- def __init__(self, inp_q, opt_q):
- multiprocessing.Process.__init__(self)
- self.inp_q = inp_q
- self.opt_q = opt_q
-
- def run(self):
- import numpy as np
- import pyworld
-
- while 1:
- idx, x, res_f0, n_cpu, ts = self.inp_q.get()
- f0, t = pyworld.harvest(
- x.astype(np.double),
- fs=16000,
- f0_ceil=1100,
- f0_floor=50,
- frame_period=10,
- )
- res_f0[idx] = f0
- if len(res_f0.keys()) >= n_cpu:
- self.opt_q.put(ts)
-
-
-if __name__ == "__main__":
- import json
- import multiprocessing
- import re
- import threading
- import time
- import traceback
- from multiprocessing import Queue, cpu_count
- from queue import Empty
-
- import librosa
- from tools.torchgate import TorchGate
- import numpy as np
- import PySimpleGUI as sg
- import sounddevice as sd
- import torch
- import torch.nn.functional as F
- import torchaudio.transforms as tat
-
- import tools.rvc_for_realtime as rvc_for_realtime
- from i18n.i18n import I18nAuto
-
- i18n = I18nAuto()
- device = rvc_for_realtime.config.device
- # device = torch.device(
- # "cuda"
- # if torch.cuda.is_available()
- # else ("mps" if torch.backends.mps.is_available() else "cpu")
- # )
- current_dir = os.getcwd()
- inp_q = Queue()
- opt_q = Queue()
- n_cpu = min(cpu_count(), 8)
- for _ in range(n_cpu):
- Harvest(inp_q, opt_q).start()
-
- class GUIConfig:
- def __init__(self) -> None:
- self.pth_path: str = ""
- self.index_path: str = ""
- self.pitch: int = 0
- self.samplerate: int = 40000
- self.block_time: float = 1.0 # s
- self.buffer_num: int = 1
- self.threhold: int = -60
- self.crossfade_time: float = 0.04
- self.extra_time: float = 2.0
- self.I_noise_reduce = False
- self.O_noise_reduce = False
- self.rms_mix_rate = 0.0
- self.index_rate = 0.3
- self.n_cpu = min(n_cpu, 6)
- self.f0method = "harvest"
- self.sg_input_device = ""
- self.sg_output_device = ""
-
- class GUI:
- def __init__(self) -> None:
- self.config = GUIConfig()
- self.flag_vc = False
-
- self.launcher()
-
- def load(self):
- input_devices, output_devices, _, _ = self.get_devices()
- try:
- with open("configs/config.json", "r") as j:
- data = json.load(j)
- data["pm"] = data["f0method"] == "pm"
- data["harvest"] = data["f0method"] == "harvest"
- data["crepe"] = data["f0method"] == "crepe"
- data["rmvpe"] = data["f0method"] == "rmvpe"
- except:
- with open("configs/config.json", "w") as j:
- data = {
- "pth_path": " ",
- "index_path": " ",
- "sg_input_device": input_devices[sd.default.device[0]],
- "sg_output_device": output_devices[sd.default.device[1]],
- "threhold": "-60",
- "pitch": "0",
- "index_rate": "0",
- "rms_mix_rate": "0",
- "block_time": "0.25",
- "crossfade_length": "0.04",
- "extra_time": "2",
- "f0method": "rmvpe",
- }
- data["pm"] = data["f0method"] == "pm"
- data["harvest"] = data["f0method"] == "harvest"
- data["crepe"] = data["f0method"] == "crepe"
- data["rmvpe"] = data["f0method"] == "rmvpe"
- return data
-
- def launcher(self):
- data = self.load()
- sg.theme("LightBlue3")
- input_devices, output_devices, _, _ = self.get_devices()
- layout = [
- [
- sg.Frame(
- title=i18n("加载模型"),
- layout=[
- [
- sg.Input(
- default_text=data.get("pth_path", ""),
- key="pth_path",
- ),
- sg.FileBrowse(
- i18n("选择.pth文件"),
- initial_folder=os.path.join(
- os.getcwd(), "assets/weights"
- ),
- file_types=((". pth"),),
- ),
- ],
- [
- sg.Input(
- default_text=data.get("index_path", ""),
- key="index_path",
- ),
- sg.FileBrowse(
- i18n("选择.index文件"),
- initial_folder=os.path.join(os.getcwd(), "logs"),
- file_types=((". index"),),
- ),
- ],
- ],
- )
- ],
- [
- sg.Frame(
- layout=[
- [
- sg.Text(i18n("输入设备")),
- sg.Combo(
- input_devices,
- key="sg_input_device",
- default_value=data.get("sg_input_device", ""),
- ),
- ],
- [
- sg.Text(i18n("输出设备")),
- sg.Combo(
- output_devices,
- key="sg_output_device",
- default_value=data.get("sg_output_device", ""),
- ),
- ],
- [sg.Button(i18n("重载设备列表"), key="reload_devices")],
- ],
- title=i18n("音频设备(请使用同种类驱动)"),
- )
- ],
- [
- sg.Frame(
- layout=[
- [
- sg.Text(i18n("响应阈值")),
- sg.Slider(
- range=(-60, 0),
- key="threhold",
- resolution=1,
- orientation="h",
- default_value=data.get("threhold", "-60"),
- enable_events=True,
- ),
- ],
- [
- sg.Text(i18n("音调设置")),
- sg.Slider(
- range=(-24, 24),
- key="pitch",
- resolution=1,
- orientation="h",
- default_value=data.get("pitch", "0"),
- enable_events=True,
- ),
- ],
- [
- sg.Text(i18n("Index Rate")),
- sg.Slider(
- range=(0.0, 1.0),
- key="index_rate",
- resolution=0.01,
- orientation="h",
- default_value=data.get("index_rate", "0"),
- enable_events=True,
- ),
- ],
- [
- sg.Text(i18n("响度因子")),
- sg.Slider(
- range=(0.0, 1.0),
- key="rms_mix_rate",
- resolution=0.01,
- orientation="h",
- default_value=data.get("rms_mix_rate", "0"),
- enable_events=True,
- ),
- ],
- [
- sg.Text(i18n("音高算法")),
- sg.Radio(
- "pm",
- "f0method",
- key="pm",
- default=data.get("pm", "") == True,
- enable_events=True,
- ),
- sg.Radio(
- "harvest",
- "f0method",
- key="harvest",
- default=data.get("harvest", "") == True,
- enable_events=True,
- ),
- sg.Radio(
- "crepe",
- "f0method",
- key="crepe",
- default=data.get("crepe", "") == True,
- enable_events=True,
- ),
- sg.Radio(
- "rmvpe",
- "f0method",
- key="rmvpe",
- default=data.get("rmvpe", "") == True,
- enable_events=True,
- ),
- ],
- ],
- title=i18n("常规设置"),
- ),
- sg.Frame(
- layout=[
- [
- sg.Text(i18n("采样长度")),
- sg.Slider(
- range=(0.05, 2.4),
- key="block_time",
- resolution=0.01,
- orientation="h",
- default_value=data.get("block_time", "0.25"),
- enable_events=True,
- ),
- ],
- [
- sg.Text(i18n("harvest进程数")),
- sg.Slider(
- range=(1, n_cpu),
- key="n_cpu",
- resolution=1,
- orientation="h",
- default_value=data.get(
- "n_cpu", min(self.config.n_cpu, n_cpu)
- ),
- enable_events=True,
- ),
- ],
- [
- sg.Text(i18n("淡入淡出长度")),
- sg.Slider(
- range=(0.01, 0.15),
- key="crossfade_length",
- resolution=0.01,
- orientation="h",
- default_value=data.get("crossfade_length", "0.04"),
- enable_events=True,
- ),
- ],
- [
- sg.Text(i18n("额外推理时长")),
- sg.Slider(
- range=(0.05, 5.00),
- key="extra_time",
- resolution=0.01,
- orientation="h",
- default_value=data.get("extra_time", "2.0"),
- enable_events=True,
- ),
- ],
- [
- sg.Checkbox(
- i18n("输入降噪"),
- key="I_noise_reduce",
- enable_events=True,
- ),
- sg.Checkbox(
- i18n("输出降噪"),
- key="O_noise_reduce",
- enable_events=True,
- ),
- ],
- ],
- title=i18n("性能设置"),
- ),
- ],
- [
- sg.Button(i18n("开始音频转换"), key="start_vc"),
- sg.Button(i18n("停止音频转换"), key="stop_vc"),
- sg.Text(i18n("推理时间(ms):")),
- sg.Text("0", key="infer_time"),
- ],
- ]
- self.window = sg.Window("RVC - GUI", layout=layout, finalize=True)
- self.event_handler()
-
- def event_handler(self):
- while True:
- event, values = self.window.read()
- if event == sg.WINDOW_CLOSED:
- self.flag_vc = False
- exit()
- if event == "reload_devices":
- prev_input = self.window["sg_input_device"].get()
- prev_output = self.window["sg_output_device"].get()
- input_devices, output_devices, _, _ = self.get_devices(update=True)
- if prev_input not in input_devices:
- self.config.sg_input_device = input_devices[0]
- else:
- self.config.sg_input_device = prev_input
- self.window["sg_input_device"].Update(values=input_devices)
- self.window["sg_input_device"].Update(
- value=self.config.sg_input_device
- )
- if prev_output not in output_devices:
- self.config.sg_output_device = output_devices[0]
- else:
- self.config.sg_output_device = prev_output
- self.window["sg_output_device"].Update(values=output_devices)
- self.window["sg_output_device"].Update(
- value=self.config.sg_output_device
- )
- if event == "start_vc" and self.flag_vc == False:
- if self.set_values(values) == True:
- logger.info("Use CUDA: %s", torch.cuda.is_available())
- self.start_vc()
- settings = {
- "pth_path": values["pth_path"],
- "index_path": values["index_path"],
- "sg_input_device": values["sg_input_device"],
- "sg_output_device": values["sg_output_device"],
- "threhold": values["threhold"],
- "pitch": values["pitch"],
- "rms_mix_rate": values["rms_mix_rate"],
- "index_rate": values["index_rate"],
- "block_time": values["block_time"],
- "crossfade_length": values["crossfade_length"],
- "extra_time": values["extra_time"],
- "n_cpu": values["n_cpu"],
- "f0method": ["pm", "harvest", "crepe", "rmvpe"][
- [
- values["pm"],
- values["harvest"],
- values["crepe"],
- values["rmvpe"],
- ].index(True)
- ],
- }
- with open("configs/config.json", "w") as j:
- json.dump(settings, j)
- if event == "stop_vc" and self.flag_vc == True:
- self.flag_vc = False
-
- # Parameter hot update
- if event == "threhold":
- self.config.threhold = values["threhold"]
- elif event == "pitch":
- self.config.pitch = values["pitch"]
- if hasattr(self, "rvc"):
- self.rvc.change_key(values["pitch"])
- elif event == "index_rate":
- self.config.index_rate = values["index_rate"]
- if hasattr(self, "rvc"):
- self.rvc.change_index_rate(values["index_rate"])
- elif event == "rms_mix_rate":
- self.config.rms_mix_rate = values["rms_mix_rate"]
- elif event in ["pm", "harvest", "crepe", "rmvpe"]:
- self.config.f0method = event
- elif event == "I_noise_reduce":
- self.config.I_noise_reduce = values["I_noise_reduce"]
- elif event == "O_noise_reduce":
- self.config.O_noise_reduce = values["O_noise_reduce"]
- elif event != "start_vc" and self.flag_vc == True:
- # Other parameters do not support hot update
- self.flag_vc = False
-
- def set_values(self, values):
- if len(values["pth_path"].strip()) == 0:
- sg.popup(i18n("请选择pth文件"))
- return False
- if len(values["index_path"].strip()) == 0:
- sg.popup(i18n("请选择index文件"))
- return False
- pattern = re.compile("[^\x00-\x7F]+")
- if pattern.findall(values["pth_path"]):
- sg.popup(i18n("pth文件路径不可包含中文"))
- return False
- if pattern.findall(values["index_path"]):
- sg.popup(i18n("index文件路径不可包含中文"))
- return False
- self.set_devices(values["sg_input_device"], values["sg_output_device"])
- self.config.pth_path = values["pth_path"]
- self.config.index_path = values["index_path"]
- self.config.threhold = values["threhold"]
- self.config.pitch = values["pitch"]
- self.config.block_time = values["block_time"]
- self.config.crossfade_time = values["crossfade_length"]
- self.config.extra_time = values["extra_time"]
- self.config.I_noise_reduce = values["I_noise_reduce"]
- self.config.O_noise_reduce = values["O_noise_reduce"]
- self.config.rms_mix_rate = values["rms_mix_rate"]
- self.config.index_rate = values["index_rate"]
- self.config.n_cpu = values["n_cpu"]
- self.config.f0method = ["pm", "harvest", "crepe", "rmvpe"][
- [
- values["pm"],
- values["harvest"],
- values["crepe"],
- values["rmvpe"],
- ].index(True)
- ]
- return True
-
- def start_vc(self):
- torch.cuda.empty_cache()
- self.flag_vc = True
- self.rvc = rvc_for_realtime.RVC(
- self.config.pitch,
- self.config.pth_path,
- self.config.index_path,
- self.config.index_rate,
- self.config.n_cpu,
- inp_q,
- opt_q,
- device,
- self.rvc if hasattr(self, "rvc") else None
- )
- self.config.samplerate = self.rvc.tgt_sr
- self.zc = self.rvc.tgt_sr // 100
- self.block_frame = int(np.round(self.config.block_time * self.config.samplerate / self.zc)) * self.zc
- self.block_frame_16k = 160 * self.block_frame // self.zc
- self.crossfade_frame = int(np.round(self.config.crossfade_time * self.config.samplerate / self.zc)) * self.zc
- self.sola_search_frame = self.zc
- self.extra_frame = int(np.round(self.config.extra_time * self.config.samplerate / self.zc)) * self.zc
- self.input_wav: torch.Tensor = torch.zeros(
- self.extra_frame
- + self.crossfade_frame
- + self.sola_search_frame
- + self.block_frame,
- device=device,
- dtype=torch.float32,
- )
- self.input_wav_res: torch.Tensor= torch.zeros(160 * self.input_wav.shape[0] // self.zc, device=device,dtype=torch.float32)
- self.pitch: np.ndarray = np.zeros(
- self.input_wav.shape[0] // self.zc,
- dtype="int32",
- )
- self.pitchf: np.ndarray = np.zeros(
- self.input_wav.shape[0] // self.zc,
- dtype="float64",
- )
- self.sola_buffer: torch.Tensor = torch.zeros(
- self.crossfade_frame, device=device, dtype=torch.float32
- )
- self.nr_buffer: torch.Tensor = self.sola_buffer.clone()
- self.output_buffer: torch.Tensor = self.input_wav.clone()
- self.res_buffer: torch.Tensor = torch.zeros(2 * self.zc, device=device,dtype=torch.float32)
- self.valid_rate = 1 - (self.extra_frame - 1) / self.input_wav.shape[0]
- self.fade_in_window: torch.Tensor = (
- torch.sin(
- 0.5
- * np.pi
- * torch.linspace(
- 0.0,
- 1.0,
- steps=self.crossfade_frame,
- device=device,
- dtype=torch.float32,
- )
- )
- ** 2
- )
- self.fade_out_window: torch.Tensor = 1 - self.fade_in_window
- self.resampler = tat.Resample(
- orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32
- ).to(device)
- self.tg = TorchGate(sr=self.config.samplerate, n_fft=4*self.zc, prop_decrease=0.9).to(device)
- thread_vc = threading.Thread(target=self.soundinput)
- thread_vc.start()
-
- def soundinput(self):
- """
- 接受音频输入
- """
- channels = 1 if sys.platform == "darwin" else 2
- with sd.Stream(
- channels=channels,
- callback=self.audio_callback,
- blocksize=self.block_frame,
- samplerate=self.config.samplerate,
- dtype="float32",
- ):
- while self.flag_vc:
- time.sleep(self.config.block_time)
- logger.debug("Audio block passed.")
- logger.debug("ENDing VC")
-
- def audio_callback(
- self, indata: np.ndarray, outdata: np.ndarray, frames, times, status
- ):
- """
- 音频处理
- """
- start_time = time.perf_counter()
- indata = librosa.to_mono(indata.T)
- if self.config.threhold > -60:
- rms = librosa.feature.rms(
- y=indata, frame_length=4*self.zc, hop_length=self.zc
- )
- db_threhold = (
- librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold
- )
- for i in range(db_threhold.shape[0]):
- if db_threhold[i]:
- indata[i * self.zc : (i + 1) * self.zc] = 0
- self.input_wav[: -self.block_frame] = self.input_wav[self.block_frame :].clone()
- self.input_wav[-self.block_frame: ] = torch.from_numpy(indata).to(device)
- self.input_wav_res[ : -self.block_frame_16k] = self.input_wav_res[self.block_frame_16k :].clone()
- # input noise reduction and resampling
- if self.config.I_noise_reduce:
- input_wav = self.input_wav[-self.crossfade_frame -self.block_frame-2*self.zc: ]
- input_wav = self.tg(input_wav.unsqueeze(0), self.input_wav.unsqueeze(0))[0, 2*self.zc:]
- input_wav[: self.crossfade_frame] *= self.fade_in_window
- input_wav[: self.crossfade_frame] += self.nr_buffer * self.fade_out_window
- self.nr_buffer[:] = input_wav[-self.crossfade_frame: ]
- input_wav = torch.cat((self.res_buffer[:], input_wav[: self.block_frame]))
- self.res_buffer[:] = input_wav[-2*self.zc: ]
- self.input_wav_res[-self.block_frame_16k-160: ] = self.resampler(input_wav)[160: ]
- else:
- self.input_wav_res[-self.block_frame_16k-160: ] = self.resampler(self.input_wav[-self.block_frame-2*self.zc: ])[160: ]
- # infer
- f0_extractor_frame = self.block_frame_16k + 800
- if self.config.f0method == 'rmvpe':
- f0_extractor_frame = 5120 * ((f0_extractor_frame - 1) // 5120 + 1)
- infer_wav = self.rvc.infer(
- self.input_wav_res,
- self.input_wav_res[-f0_extractor_frame :].cpu().numpy(),
- self.block_frame_16k,
- self.valid_rate,
- self.pitch,
- self.pitchf,
- self.config.f0method,
- )
- infer_wav = infer_wav[
- -self.crossfade_frame - self.sola_search_frame - self.block_frame :
- ]
- # output noise reduction
- if self.config.O_noise_reduce:
- self.output_buffer[: -self.block_frame] = self.output_buffer[self.block_frame :].clone()
- self.output_buffer[-self.block_frame: ] = infer_wav[-self.block_frame:]
- infer_wav = self.tg(infer_wav.unsqueeze(0), self.output_buffer.unsqueeze(0)).squeeze(0)
- # volume envelop mixing
- if self.config.rms_mix_rate < 1:
- rms1 = librosa.feature.rms(
- y=self.input_wav_res[-160*infer_wav.shape[0]//self.zc :].cpu().numpy(),
- frame_length=640,
- hop_length=160,
- )
- rms1 = torch.from_numpy(rms1).to(device)
- rms1 = F.interpolate(
- rms1.unsqueeze(0), size=infer_wav.shape[0] + 1, mode="linear",align_corners=True,
- )[0,0,:-1]
- rms2 = librosa.feature.rms(
- y=infer_wav[:].cpu().numpy(), frame_length=4*self.zc, hop_length=self.zc
- )
- rms2 = torch.from_numpy(rms2).to(device)
- rms2 = F.interpolate(
- rms2.unsqueeze(0), size=infer_wav.shape[0] + 1, mode="linear",align_corners=True,
- )[0,0,:-1]
- rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-3)
- infer_wav *= torch.pow(rms1 / rms2, torch.tensor(1 - self.config.rms_mix_rate))
- # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC
- conv_input = infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame]
- cor_nom = F.conv1d(conv_input, self.sola_buffer[None, None, :])
- cor_den = torch.sqrt(
- F.conv1d(conv_input ** 2, torch.ones(1, 1, self.crossfade_frame, device=device)) + 1e-8)
- if sys.platform == "darwin":
- _, sola_offset = torch.max(cor_nom[0, 0] / cor_den[0, 0])
- sola_offset = sola_offset.item()
- else:
- sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0])
- logger.debug("sola_offset = %d", int(sola_offset))
- infer_wav = infer_wav[sola_offset: sola_offset + self.block_frame + self.crossfade_frame]
- infer_wav[: self.crossfade_frame] *= self.fade_in_window
- infer_wav[: self.crossfade_frame] += self.sola_buffer *self.fade_out_window
- self.sola_buffer[:] = infer_wav[-self.crossfade_frame:]
- if sys.platform == "darwin":
- outdata[:] = infer_wav[:-self.crossfade_frame].cpu().numpy()[:, np.newaxis]
- else:
- outdata[:] = infer_wav[:-self.crossfade_frame].repeat(2, 1).t().cpu().numpy()
- total_time = time.perf_counter() - start_time
- self.window["infer_time"].update(int(total_time * 1000))
- logger.info("Infer time: %.2f", total_time)
-
- def get_devices(self, update: bool = True):
- """获取设备列表"""
- if update:
- sd._terminate()
- sd._initialize()
- devices = sd.query_devices()
- hostapis = sd.query_hostapis()
- for hostapi in hostapis:
- for device_idx in hostapi["devices"]:
- devices[device_idx]["hostapi_name"] = hostapi["name"]
- input_devices = [
- f"{d['name']} ({d['hostapi_name']})"
- for d in devices
- if d["max_input_channels"] > 0
- ]
- output_devices = [
- f"{d['name']} ({d['hostapi_name']})"
- for d in devices
- if d["max_output_channels"] > 0
- ]
- input_devices_indices = [
- d["index"] if "index" in d else d["name"]
- for d in devices
- if d["max_input_channels"] > 0
- ]
- output_devices_indices = [
- d["index"] if "index" in d else d["name"]
- for d in devices
- if d["max_output_channels"] > 0
- ]
- return (
- input_devices,
- output_devices,
- input_devices_indices,
- output_devices_indices,
- )
-
- def set_devices(self, input_device, output_device):
- """设置输出设备"""
- (
- input_devices,
- output_devices,
- input_device_indices,
- output_device_indices,
- ) = self.get_devices()
- sd.default.device[0] = input_device_indices[
- input_devices.index(input_device)
- ]
- sd.default.device[1] = output_device_indices[
- output_devices.index(output_device)
- ]
- logger.info(
- "Input device: %s:%s", str(sd.default.device[0]), input_device
- )
- logger.info(
- "Output device: %s:%s", str(sd.default.device[1]), output_device
- )
-
- gui = GUI()
\ No newline at end of file
diff --git a/spaces/Kangarroar/ApplioRVC-Inference/infer_batch_rvc.py b/spaces/Kangarroar/ApplioRVC-Inference/infer_batch_rvc.py
deleted file mode 100644
index 15c862a3d6bf815fa68003cc7054b694cae50c2a..0000000000000000000000000000000000000000
--- a/spaces/Kangarroar/ApplioRVC-Inference/infer_batch_rvc.py
+++ /dev/null
@@ -1,215 +0,0 @@
-"""
-v1
-runtime\python.exe myinfer-v2-0528.py 0 "E:\codes\py39\RVC-beta\todo-songs" "E:\codes\py39\logs\mi-test\added_IVF677_Flat_nprobe_7.index" harvest "E:\codes\py39\RVC-beta\output" "E:\codes\py39\test-20230416b\weights\mi-test.pth" 0.66 cuda:0 True 3 0 1 0.33
-v2
-runtime\python.exe myinfer-v2-0528.py 0 "E:\codes\py39\RVC-beta\todo-songs" "E:\codes\py39\test-20230416b\logs\mi-test-v2\aadded_IVF677_Flat_nprobe_1_v2.index" harvest "E:\codes\py39\RVC-beta\output_v2" "E:\codes\py39\test-20230416b\weights\mi-test-v2.pth" 0.66 cuda:0 True 3 0 1 0.33
-"""
-import os, sys, pdb, torch
-
-now_dir = os.getcwd()
-sys.path.append(now_dir)
-import sys
-import torch
-import tqdm as tq
-from multiprocessing import cpu_count
-
-
-class Config:
- def __init__(self, device, is_half):
- self.device = device
- self.is_half = is_half
- self.n_cpu = 0
- self.gpu_name = None
- self.gpu_mem = None
- self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
-
- def device_config(self) -> tuple:
- if torch.cuda.is_available():
- i_device = int(self.device.split(":")[-1])
- self.gpu_name = torch.cuda.get_device_name(i_device)
- if (
- ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
- or "P40" in self.gpu_name.upper()
- or "1060" in self.gpu_name
- or "1070" in self.gpu_name
- or "1080" in self.gpu_name
- ):
- print("16系/10系显卡和P40强制单精度")
- self.is_half = False
- for config_file in ["32k.json", "40k.json", "48k.json"]:
- with open(f"configs/{config_file}", "r") as f:
- strr = f.read().replace("true", "false")
- with open(f"configs/{config_file}", "w") as f:
- f.write(strr)
- with open("infer/modules/train/preprocess.py", "r") as f:
- strr = f.read().replace("3.7", "3.0")
- with open("infer/modules/train/preprocess.py", "w") as f:
- f.write(strr)
- else:
- self.gpu_name = None
- self.gpu_mem = int(
- torch.cuda.get_device_properties(i_device).total_memory
- / 1024
- / 1024
- / 1024
- + 0.4
- )
- if self.gpu_mem <= 4:
- with open("infer/modules/train/preprocess.py", "r") as f:
- strr = f.read().replace("3.7", "3.0")
- with open("infer/modules/train/preprocess.py", "w") as f:
- f.write(strr)
- elif torch.backends.mps.is_available():
- print("没有发现支持的N卡, 使用MPS进行推理")
- self.device = "mps"
- else:
- print("没有发现支持的N卡, 使用CPU进行推理")
- self.device = "cpu"
- self.is_half = True
-
- if self.n_cpu == 0:
- self.n_cpu = cpu_count()
-
- if self.is_half:
- # 6G显存配置
- x_pad = 3
- x_query = 10
- x_center = 60
- x_max = 65
- else:
- # 5G显存配置
- x_pad = 1
- x_query = 6
- x_center = 38
- x_max = 41
-
- if self.gpu_mem != None and self.gpu_mem <= 4:
- x_pad = 1
- x_query = 5
- x_center = 30
- x_max = 32
-
- return x_pad, x_query, x_center, x_max
-
-
-f0up_key = sys.argv[1]
-input_path = sys.argv[2]
-index_path = sys.argv[3]
-f0method = sys.argv[4] # harvest or pm
-opt_path = sys.argv[5]
-model_path = sys.argv[6]
-index_rate = float(sys.argv[7])
-device = sys.argv[8]
-is_half = sys.argv[9].lower() != "false"
-filter_radius = int(sys.argv[10])
-resample_sr = int(sys.argv[11])
-rms_mix_rate = float(sys.argv[12])
-protect = float(sys.argv[13])
-print(sys.argv)
-config = Config(device, is_half)
-now_dir = os.getcwd()
-sys.path.append(now_dir)
-from infer.modules.vc.modules import VC
-from lib.infer_pack.models import (
- SynthesizerTrnMs256NSFsid,
- SynthesizerTrnMs256NSFsid_nono,
- SynthesizerTrnMs768NSFsid,
- SynthesizerTrnMs768NSFsid_nono,
-)
-from infer.lib.audio import load_audio
-from fairseq import checkpoint_utils
-from scipy.io import wavfile
-
-hubert_model = None
-
-
-def load_hubert():
- global hubert_model
- models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
- ["hubert_base.pt"],
- suffix="",
- )
- hubert_model = models[0]
- hubert_model = hubert_model.to(device)
- if is_half:
- hubert_model = hubert_model.half()
- else:
- hubert_model = hubert_model.float()
- hubert_model.eval()
-
-
-def vc_single(sid, input_audio, f0_up_key, f0_file, f0_method, file_index, index_rate):
- global tgt_sr, net_g, vc, hubert_model, version
- if input_audio is None:
- return "You need to upload an audio", None
- f0_up_key = int(f0_up_key)
- audio = load_audio(input_audio, 16000)
- times = [0, 0, 0]
- if hubert_model == None:
- load_hubert()
- if_f0 = cpt.get("f0", 1)
- # audio_opt=vc.pipeline(hubert_model,net_g,sid,audio,times,f0_up_key,f0_method,file_index,file_big_npy,index_rate,if_f0,f0_file=f0_file)
- audio_opt = vc.pipeline(
- hubert_model,
- net_g,
- sid,
- audio,
- input_audio,
- times,
- f0_up_key,
- f0_method,
- file_index,
- index_rate,
- if_f0,
- filter_radius,
- tgt_sr,
- resample_sr,
- rms_mix_rate,
- version,
- protect,
- f0_file=f0_file,
- )
- print(times)
- return audio_opt
-
-
-def get_vc(model_path):
- global n_spk, tgt_sr, net_g, vc, cpt, device, is_half, version
- print("loading pth %s" % model_path)
- cpt = torch.load(model_path, map_location="cpu")
- tgt_sr = cpt["config"][-1]
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
- if_f0 = cpt.get("f0", 1)
- version = cpt.get("version", "v1")
- if version == "v1":
- if if_f0 == 1:
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
- else:
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
- elif version == "v2":
- if if_f0 == 1: #
- net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=is_half)
- else:
- net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
- del net_g.enc_q
- print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净,真奇葩
- net_g.eval().to(device)
- if is_half:
- net_g = net_g.half()
- else:
- net_g = net_g.float()
- vc = VC(tgt_sr, config)
- n_spk = cpt["config"][-3]
- # return {"visible": True,"maximum": n_spk, "__type__": "update"}
-
-
-get_vc(model_path)
-audios = os.listdir(input_path)
-for file in tq.tqdm(audios):
- if file.endswith(".wav"):
- file_path = input_path + "/" + file
- wav_opt = vc_single(
- 0, file_path, f0up_key, None, f0method, index_path, index_rate
- )
- out_path = opt_path + "/" + file
- wavfile.write(out_path, tgt_sr, wav_opt)
diff --git a/spaces/KashiwaByte/SparkDebate-V2.0/README.md b/spaces/KashiwaByte/SparkDebate-V2.0/README.md
deleted file mode 100644
index 8f0fa0349bfd6dd61065ae09a96bcfebee1ca1ce..0000000000000000000000000000000000000000
--- a/spaces/KashiwaByte/SparkDebate-V2.0/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: SparkDebate
-emoji: 🌖
-colorFrom: pink
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Kevin676/Clone-Your-Voice/synthesizer/utils/plot.py b/spaces/Kevin676/Clone-Your-Voice/synthesizer/utils/plot.py
deleted file mode 100644
index a470d169ddf1ab977d22f649454c74a07ba70adf..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/Clone-Your-Voice/synthesizer/utils/plot.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import numpy as np
-
-
-def split_title_line(title_text, max_words=5):
- """
- A function that splits any string based on specific character
- (returning it with the string), with maximum number of words on it
- """
- seq = title_text.split()
- return "\n".join([" ".join(seq[i:i + max_words]) for i in range(0, len(seq), max_words)])
-
-
-def plot_alignment(alignment, path, title=None, split_title=False, max_len=None):
- import matplotlib
- matplotlib.use("Agg")
- import matplotlib.pyplot as plt
-
- if max_len is not None:
- alignment = alignment[:, :max_len]
-
- fig = plt.figure(figsize=(8, 6))
- ax = fig.add_subplot(111)
-
- im = ax.imshow(
- alignment,
- aspect="auto",
- origin="lower",
- interpolation="none")
- fig.colorbar(im, ax=ax)
- xlabel = "Decoder timestep"
-
- if split_title:
- title = split_title_line(title)
-
- plt.xlabel(xlabel)
- plt.title(title)
- plt.ylabel("Encoder timestep")
- plt.tight_layout()
- plt.savefig(path, format="png")
- plt.close()
-
-
-def plot_spectrogram(pred_spectrogram, path, title=None, split_title=False, target_spectrogram=None, max_len=None, auto_aspect=False):
- import matplotlib
- matplotlib.use("Agg")
- import matplotlib.pyplot as plt
-
- if max_len is not None:
- target_spectrogram = target_spectrogram[:max_len]
- pred_spectrogram = pred_spectrogram[:max_len]
-
- if split_title:
- title = split_title_line(title)
-
- fig = plt.figure(figsize=(10, 8))
- # Set common labels
- fig.text(0.5, 0.18, title, horizontalalignment="center", fontsize=16)
-
- #target spectrogram subplot
- if target_spectrogram is not None:
- ax1 = fig.add_subplot(311)
- ax2 = fig.add_subplot(312)
-
- if auto_aspect:
- im = ax1.imshow(np.rot90(target_spectrogram), aspect="auto", interpolation="none")
- else:
- im = ax1.imshow(np.rot90(target_spectrogram), interpolation="none")
- ax1.set_title("Target Mel-Spectrogram")
- fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax1)
- ax2.set_title("Predicted Mel-Spectrogram")
- else:
- ax2 = fig.add_subplot(211)
-
- if auto_aspect:
- im = ax2.imshow(np.rot90(pred_spectrogram), aspect="auto", interpolation="none")
- else:
- im = ax2.imshow(np.rot90(pred_spectrogram), interpolation="none")
- fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax2)
-
- plt.tight_layout()
- plt.savefig(path, format="png")
- plt.close()
diff --git a/spaces/Lajonbot/Chatbot-Share/utils/audio_utils.py b/spaces/Lajonbot/Chatbot-Share/utils/audio_utils.py
deleted file mode 100644
index d033238d801315b705748ce8c947b355d97fb763..0000000000000000000000000000000000000000
--- a/spaces/Lajonbot/Chatbot-Share/utils/audio_utils.py
+++ /dev/null
@@ -1,247 +0,0 @@
-STR_CLIP_ID = 'clip_id'
-STR_AUDIO_SIGNAL = 'audio_signal'
-STR_TARGET_VECTOR = 'target_vector'
-
-
-STR_CH_FIRST = 'channels_first'
-STR_CH_LAST = 'channels_last'
-
-import io
-import os
-import tqdm
-import logging
-import subprocess
-from typing import Tuple
-from pathlib import Path
-
-# import librosa
-import numpy as np
-import soundfile as sf
-
-import itertools
-from numpy.fft import irfft
-
-def _resample_load_ffmpeg(path: str, sample_rate: int, downmix_to_mono: bool) -> Tuple[np.ndarray, int]:
- """
- Decoding, downmixing, and downsampling by librosa.
- Returns a channel-first audio signal.
-
- Args:
- path:
- sample_rate:
- downmix_to_mono:
-
- Returns:
- (audio signal, sample rate)
- """
-
- def _decode_resample_by_ffmpeg(filename, sr):
- """decode, downmix, and resample audio file"""
- channel_cmd = '-ac 1 ' if downmix_to_mono else '' # downmixing option
- resampling_cmd = f'-ar {str(sr)}' if sr else '' # downsampling option
- cmd = f"ffmpeg -i \"{filename}\" {channel_cmd} {resampling_cmd} -f wav -"
- p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, err = p.communicate()
- return out
-
- src, sr = sf.read(io.BytesIO(_decode_resample_by_ffmpeg(path, sr=sample_rate)))
- return src.T, sr
-
-
-def _resample_load_librosa(path: str, sample_rate: int, downmix_to_mono: bool, **kwargs) -> Tuple[np.ndarray, int]:
- """
- Decoding, downmixing, and downsampling by librosa.
- Returns a channel-first audio signal.
- """
- src, sr = librosa.load(path, sr=sample_rate, mono=downmix_to_mono, **kwargs)
- return src, sr
-
-
-def load_audio(
- path: str or Path,
- ch_format: str,
- sample_rate: int = None,
- downmix_to_mono: bool = False,
- resample_by: str = 'ffmpeg',
- **kwargs,
-) -> Tuple[np.ndarray, int]:
- """A wrapper of librosa.load that:
- - forces the returned audio to be 2-dim,
- - defaults to sr=None, and
- - defaults to downmix_to_mono=False.
-
- The audio decoding is done by `audioread` or `soundfile` package and ultimately, often by ffmpeg.
- The resampling is done by `librosa`'s child package `resampy`.
-
- Args:
- path: audio file path
- ch_format: one of 'channels_first' or 'channels_last'
- sample_rate: target sampling rate. if None, use the rate of the audio file
- downmix_to_mono:
- resample_by (str): 'librosa' or 'ffmpeg'. it decides backend for audio decoding and resampling.
- **kwargs: keyword args for librosa.load - offset, duration, dtype, res_type.
-
- Returns:
- (audio, sr) tuple
- """
- if ch_format not in (STR_CH_FIRST, STR_CH_LAST):
- raise ValueError(f'ch_format is wrong here -> {ch_format}')
-
- if os.stat(path).st_size > 8000:
- if resample_by == 'librosa':
- src, sr = _resample_load_librosa(path, sample_rate, downmix_to_mono, **kwargs)
- elif resample_by == 'ffmpeg':
- src, sr = _resample_load_ffmpeg(path, sample_rate, downmix_to_mono)
- else:
- raise NotImplementedError(f'resample_by: "{resample_by}" is not supposred yet')
- else:
- raise ValueError('Given audio is too short!')
- return src, sr
-
- # if src.ndim == 1:
- # src = np.expand_dims(src, axis=0)
- # # now always 2d and channels_first
-
- # if ch_format == STR_CH_FIRST:
- # return src, sr
- # else:
- # return src.T, sr
-
-def ms(x):
- """Mean value of signal `x` squared.
- :param x: Dynamic quantity.
- :returns: Mean squared of `x`.
- """
- return (np.abs(x)**2.0).mean()
-
-def normalize(y, x=None):
- """normalize power in y to a (standard normal) white noise signal.
- Optionally normalize to power in signal `x`.
- #The mean power of a Gaussian with :math:`\\mu=0` and :math:`\\sigma=1` is 1.
- """
- if x is not None:
- x = ms(x)
- else:
- x = 1.0
- return y * np.sqrt(x / ms(y))
-
-def noise(N, color='white', state=None):
- """Noise generator.
- :param N: Amount of samples.
- :param color: Color of noise.
- :param state: State of PRNG.
- :type state: :class:`np.random.RandomState`
- """
- try:
- return _noise_generators[color](N, state)
- except KeyError:
- raise ValueError("Incorrect color.")
-
-def white(N, state=None):
- """
- White noise.
- :param N: Amount of samples.
- :param state: State of PRNG.
- :type state: :class:`np.random.RandomState`
- White noise has a constant power density. It's narrowband spectrum is therefore flat.
- The power in white noise will increase by a factor of two for each octave band,
- and therefore increases with 3 dB per octave.
- """
- state = np.random.RandomState() if state is None else state
- return state.randn(N)
-
-def pink(N, state=None):
- """
- Pink noise.
- :param N: Amount of samples.
- :param state: State of PRNG.
- :type state: :class:`np.random.RandomState`
- Pink noise has equal power in bands that are proportionally wide.
- Power density decreases with 3 dB per octave.
- """
- state = np.random.RandomState() if state is None else state
- uneven = N % 2
- X = state.randn(N // 2 + 1 + uneven) + 1j * state.randn(N // 2 + 1 + uneven)
- S = np.sqrt(np.arange(len(X)) + 1.) # +1 to avoid divide by zero
- y = (irfft(X / S)).real
- if uneven:
- y = y[:-1]
- return normalize(y)
-
-def blue(N, state=None):
- """
- Blue noise.
- :param N: Amount of samples.
- :param state: State of PRNG.
- :type state: :class:`np.random.RandomState`
- Power increases with 6 dB per octave.
- Power density increases with 3 dB per octave.
- """
- state = np.random.RandomState() if state is None else state
- uneven = N % 2
- X = state.randn(N // 2 + 1 + uneven) + 1j * state.randn(N // 2 + 1 + uneven)
- S = np.sqrt(np.arange(len(X))) # Filter
- y = (irfft(X * S)).real
- if uneven:
- y = y[:-1]
- return normalize(y)
-
-def brown(N, state=None):
- """
- Violet noise.
- :param N: Amount of samples.
- :param state: State of PRNG.
- :type state: :class:`np.random.RandomState`
- Power decreases with -3 dB per octave.
- Power density decreases with 6 dB per octave.
- """
- state = np.random.RandomState() if state is None else state
- uneven = N % 2
- X = state.randn(N // 2 + 1 + uneven) + 1j * state.randn(N // 2 + 1 + uneven)
- S = (np.arange(len(X)) + 1) # Filter
- y = (irfft(X / S)).real
- if uneven:
- y = y[:-1]
- return normalize(y)
-
-def violet(N, state=None):
- """
- Violet noise. Power increases with 6 dB per octave.
- :param N: Amount of samples.
- :param state: State of PRNG.
- :type state: :class:`np.random.RandomState`
- Power increases with +9 dB per octave.
- Power density increases with +6 dB per octave.
- """
- state = np.random.RandomState() if state is None else state
- uneven = N % 2
- X = state.randn(N // 2 + 1 + uneven) + 1j * state.randn(N // 2 + 1 + uneven)
- S = (np.arange(len(X))) # Filter
- y = (irfft(X * S)).real
- if uneven:
- y = y[:-1]
- return normalize(y)
-
-_noise_generators = {
- 'white': white,
- 'pink': pink,
- 'blue': blue,
- 'brown': brown,
- 'violet': violet,
-}
-
-def noise_generator(N=44100, color='white', state=None):
- """Noise generator.
- :param N: Amount of unique samples to generate.
- :param color: Color of noise.
- Generate `N` amount of unique samples and cycle over these samples.
- """
- #yield from itertools.cycle(noise(N, color)) # Python 3.3
- for sample in itertools.cycle(noise(N, color, state)):
- yield sample
-
-def heaviside(N):
- """Heaviside.
- Returns the value 0 for `x < 0`, 1 for `x > 0`, and 1/2 for `x = 0`.
- """
- return 0.5 * (np.sign(N) + 1)
\ No newline at end of file
diff --git a/spaces/LucasCodeBreak/MusicGen/audiocraft/quantization/__init__.py b/spaces/LucasCodeBreak/MusicGen/audiocraft/quantization/__init__.py
deleted file mode 100644
index 836d6eb518978480c6b95d6f29ce4f84a9428793..0000000000000000000000000000000000000000
--- a/spaces/LucasCodeBreak/MusicGen/audiocraft/quantization/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-# flake8: noqa
-from .vq import ResidualVectorQuantizer
-from .base import BaseQuantizer, DummyQuantizer, QuantizedResult
diff --git a/spaces/MMYang/microsoft-BioGPT-Large/README.md b/spaces/MMYang/microsoft-BioGPT-Large/README.md
deleted file mode 100644
index a592681c7f23d65945708b4457564abaaa552a58..0000000000000000000000000000000000000000
--- a/spaces/MMYang/microsoft-BioGPT-Large/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Microsoft BioGPT Large
-emoji: 👁
-colorFrom: indigo
-colorTo: blue
-sdk: gradio
-sdk_version: 3.18.0
-app_file: app.py
-pinned: false
-duplicated_from: ZaddyMattty/microsoft-BioGPT-Large
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Makiing/coolb-in-gtest/src/components/button-scroll-to-bottom.tsx b/spaces/Makiing/coolb-in-gtest/src/components/button-scroll-to-bottom.tsx
deleted file mode 100644
index b68ab9c0e48320c356e51a52d11b9ca63909e6c5..0000000000000000000000000000000000000000
--- a/spaces/Makiing/coolb-in-gtest/src/components/button-scroll-to-bottom.tsx
+++ /dev/null
@@ -1,34 +0,0 @@
-'use client'
-
-import * as React from 'react'
-
-import { cn } from '@/lib/utils'
-import { useAtBottom } from '@/lib/hooks/use-at-bottom'
-import { Button, type ButtonProps } from '@/components/ui/button'
-import { IconArrowDown } from '@/components/ui/icons'
-
-export function ButtonScrollToBottom({ className, ...props }: ButtonProps) {
- const isAtBottom = useAtBottom()
-
- return (
-
- )
-}
diff --git a/spaces/Marshalls/testmtd/feature_extraction/generate_ddc_features.py b/spaces/Marshalls/testmtd/feature_extraction/generate_ddc_features.py
deleted file mode 100644
index 80ce8a9241eba428f01c427f87085340faa33eff..0000000000000000000000000000000000000000
--- a/spaces/Marshalls/testmtd/feature_extraction/generate_ddc_features.py
+++ /dev/null
@@ -1,129 +0,0 @@
-import numpy as np
-from pathlib import Path
-import json
-import os.path
-import sys
-import argparse
-import time
-import json, pickle
-import torch
-from math import ceil
-from scipy import signal
-
-THIS_DIR = os.path.dirname(os.path.abspath(__file__))
-ROOT_DIR = os.path.abspath(os.path.join(THIS_DIR, os.pardir))
-DATA_DIR = os.path.join(ROOT_DIR, 'data')
-sys.path.append(ROOT_DIR)
-
-PAD_STATE = 0
-START_STATE = 1
-END_STATE = 2
-EMPTY_STATE = 3
-NUM_SPECIAL_STATES = 4
-
-HUMAN_DELTA = 0.125 # minimum block separation that we are happy with
-
-#from models import create_model
-from models.ddc_model import DDCModel
-from feature_extraction.utils import distribute_tasks,ResampleLinear1D
-
-parser = argparse.ArgumentParser(description='Get DDC features from song features')
-parser.add_argument("data_path", type=str, help="Directory contining Beat Saber level folders")
-parser.add_argument('--checkpoints_dir', type=str)
-parser.add_argument('--audio_format', type=str, default="wav")
-parser.add_argument('--experiment_name', type=str)
-parser.add_argument('--peak_threshold', type=float, default=0.0148)
-parser.add_argument('--fps', type=float, default=60)
-parser.add_argument('--checkpoint', type=str, default="latest")
-parser.add_argument('--temperature', type=float, default=1.00)
-parser.add_argument('--cuda', action="store_true")
-parser.add_argument("--step_size", metavar='', type=float, default=0.01666666666)
-parser.add_argument("--replace_existing", action="store_true")
-
-args = parser.parse_args()
-
-# makes arugments into global variables of the same name, used later in the code
-globals().update(vars(args))
-data_path = Path(data_path)
-
-## distributing tasks accross nodes ##
-from mpi4py import MPI
-comm = MPI.COMM_WORLD
-rank = comm.Get_rank()
-size = comm.Get_size()
-print(rank)
-#print("creating {} of size {}".format(feature_name, feature_size))
-
-experiment_name = args.experiment_name+"/"
-checkpoint = args.checkpoint
-temperature=args.temperature
-
-from pathlib import Path
-
-''' LOAD MODEL, OPTS, AND WEIGHTS'''
-
-##loading opt object from experiment
-opt = json.loads(open(ROOT_DIR.__str__()+"/feature_extraction/"+experiment_name+"opt.json","r").read())#read json file
-# we assume we have 1 GPU in generating machine :P
-if args.cuda:
- opt["gpu_ids"] = [0]
-else:
- opt["gpu_ids"] = []
-opt["checkpoints_dir"] = args.checkpoints_dir
-# print(opt["checkpoints_dir"])
-opt["load_iter"] = int(checkpoint)
-if args.cuda:
- opt["cuda"] = True
-else:
- opt["cuda"] = False
-opt["experiment_name"] = args.experiment_name.split("/")[0]
-if "dropout" not in opt: #for older experiments
- opt["dropout"] = 0.0
-# construct opt Struct object
-class Struct:
- def __init__(self, **entries):
- self.__dict__.update(entries)
-opt = Struct(**opt)
-
-assert opt.binarized
-
-#model = create_model(opt)
-model = DDCModel(opt)
-#model.setup()
-receptive_field = 1
-
-checkpoint = "iter_"+checkpoint
-model.load_networks(checkpoint)#load model in/block_placement_ddc2/iter_130000_net.pth
-
-#assuming mp3 for now. TODO: generalize
-candidate_feature_files = sorted(data_path.glob('**/*'+audio_format+'_multi_mel_80.npy'), key=lambda path: path.parent.__str__())
-tasks = distribute_tasks(candidate_feature_files,rank,size)
-
-for i in tasks:
- path = candidate_feature_files[i]
- features_file = str(path)+"_"+"ddc_hidden"+".npy"
- if replace_existing or not os.path.isfile(features_file):
- print(path)
-
- sr = opt.sampling_rate
- hop = int(opt.step_size*sr)
- features = np.load(path)
-
- #generate level
- # first_samples basically works as a padding, for the first few outputs, which don't have any "past part" of the song to look at.
- first_samples = torch.full((1,opt.output_channels,receptive_field//2),START_STATE,dtype=torch.float)
- print(features.shape)
- features, peak_probs = model.generate_features(features)
- peak_probs = peak_probs[0,:,-1].cpu().detach().numpy()
- features = features.cpu().detach().numpy()
- features = features[0]
- features = ResampleLinear1D(features,int(np.floor(features.shape[0]*0.01*fps)))
- # features = downsample_signal(features[0], 0.01666666666667/0.01)
- print(features.shape)
- np.save(features_file,features)
- window = signal.hamming(ceil(HUMAN_DELTA/opt.step_size))
- smoothed_peaks = np.convolve(peak_probs,window,mode='same')
-
- thresholded_peaks = smoothed_peaks*(smoothed_peaks>args.peak_threshold)
- peaks = signal.find_peaks(thresholded_peaks)[0]
- print("number of peaks", len(peaks))
diff --git a/spaces/MattyWhite/ChatGPT-ImageCaptioner2/detic/data/transforms/custom_transform.py b/spaces/MattyWhite/ChatGPT-ImageCaptioner2/detic/data/transforms/custom_transform.py
deleted file mode 100644
index 3cc28b6b313dc084394ec5c9686169176987a44b..0000000000000000000000000000000000000000
--- a/spaces/MattyWhite/ChatGPT-ImageCaptioner2/detic/data/transforms/custom_transform.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-# Part of the code is from https://github.com/rwightman/efficientdet-pytorch/blob/master/effdet/data/transforms.py
-# Modified by Xingyi Zhou
-# The original code is under Apache-2.0 License
-import numpy as np
-import torch
-import torch.nn.functional as F
-from fvcore.transforms.transform import (
- CropTransform,
- HFlipTransform,
- NoOpTransform,
- Transform,
- TransformList,
-)
-from PIL import Image
-
-try:
- import cv2 # noqa
-except ImportError:
- # OpenCV is an optional dependency at the moment
- pass
-
-__all__ = [
- "EfficientDetResizeCropTransform",
-]
-
-class EfficientDetResizeCropTransform(Transform):
- """
- """
-
- def __init__(self, scaled_h, scaled_w, offset_y, offset_x, img_scale, \
- target_size, interp=None):
- """
- Args:
- h, w (int): original image size
- new_h, new_w (int): new image size
- interp: PIL interpolation methods, defaults to bilinear.
- """
- # TODO decide on PIL vs opencv
- super().__init__()
- if interp is None:
- interp = Image.BILINEAR
- self._set_attributes(locals())
-
- def apply_image(self, img, interp=None):
- assert len(img.shape) <= 4
-
- if img.dtype == np.uint8:
- pil_image = Image.fromarray(img)
- interp_method = interp if interp is not None else self.interp
- pil_image = pil_image.resize((self.scaled_w, self.scaled_h), interp_method)
- ret = np.asarray(pil_image)
- right = min(self.scaled_w, self.offset_x + self.target_size[1])
- lower = min(self.scaled_h, self.offset_y + self.target_size[0])
- if len(ret.shape) <= 3:
- ret = ret[self.offset_y: lower, self.offset_x: right]
- else:
- ret = ret[..., self.offset_y: lower, self.offset_x: right, :]
- else:
- # PIL only supports uint8
- img = torch.from_numpy(img)
- shape = list(img.shape)
- shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]
- img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw
- _PIL_RESIZE_TO_INTERPOLATE_MODE = {Image.BILINEAR: "bilinear", Image.BICUBIC: "bicubic"}
- mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[self.interp]
- img = F.interpolate(img, (self.scaled_h, self.scaled_w), mode=mode, align_corners=False)
- shape[:2] = (self.scaled_h, self.scaled_w)
- ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c)
- right = min(self.scaled_w, self.offset_x + self.target_size[1])
- lower = min(self.scaled_h, self.offset_y + self.target_size[0])
- if len(ret.shape) <= 3:
- ret = ret[self.offset_y: lower, self.offset_x: right]
- else:
- ret = ret[..., self.offset_y: lower, self.offset_x: right, :]
- return ret
-
-
- def apply_coords(self, coords):
- coords[:, 0] = coords[:, 0] * self.img_scale
- coords[:, 1] = coords[:, 1] * self.img_scale
- coords[:, 0] -= self.offset_x
- coords[:, 1] -= self.offset_y
- return coords
-
-
- def apply_segmentation(self, segmentation):
- segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
- return segmentation
-
-
- def inverse(self):
- raise NotImplementedError
-
-
- def inverse_apply_coords(self, coords):
- coords[:, 0] += self.offset_x
- coords[:, 1] += self.offset_y
- coords[:, 0] = coords[:, 0] / self.img_scale
- coords[:, 1] = coords[:, 1] / self.img_scale
- return coords
-
-
- def inverse_apply_box(self, box: np.ndarray) -> np.ndarray:
- """
- """
- idxs = np.array([(0, 1), (2, 1), (0, 3), (2, 3)]).flatten()
- coords = np.asarray(box).reshape(-1, 4)[:, idxs].reshape(-1, 2)
- coords = self.inverse_apply_coords(coords).reshape((-1, 4, 2))
- minxy = coords.min(axis=1)
- maxxy = coords.max(axis=1)
- trans_boxes = np.concatenate((minxy, maxxy), axis=1)
- return trans_boxes
\ No newline at end of file
diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/configs/_base_/models/pspnet_r50-d8.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/configs/_base_/models/pspnet_r50-d8.py
deleted file mode 100644
index f451e08ad2eb0732dcb806b1851eb978d4acf136..0000000000000000000000000000000000000000
--- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/configs/_base_/models/pspnet_r50-d8.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- type='EncoderDecoder',
- pretrained='open-mmlab://resnet50_v1c',
- backbone=dict(
- type='ResNetV1c',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- dilations=(1, 1, 2, 4),
- strides=(1, 2, 1, 1),
- norm_cfg=norm_cfg,
- norm_eval=False,
- style='pytorch',
- contract_dilation=True),
- decode_head=dict(
- type='PSPHead',
- in_channels=2048,
- in_index=3,
- channels=512,
- pool_scales=(1, 2, 3, 6),
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
- auxiliary_head=dict(
- type='FCNHead',
- in_channels=1024,
- in_index=2,
- channels=256,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='whole'))
diff --git a/spaces/MiloSobral/PortiloopDemo/setup.py b/spaces/MiloSobral/PortiloopDemo/setup.py
deleted file mode 100644
index aefd84a9291acad596c695020c7afc32b3c02ff3..0000000000000000000000000000000000000000
--- a/spaces/MiloSobral/PortiloopDemo/setup.py
+++ /dev/null
@@ -1,39 +0,0 @@
-from setuptools import setup, find_packages
-import io
-
-
-def is_coral():
- try:
- with io.open('/sys/firmware/devicetree/base/model', 'r') as m:
- line = m.read().lower()
- if 'phanbell' in line or "coral" in line: return True
- except Exception: pass
- return False
-
-requirements_list = ['wheel',
- 'pyEDFLib',
- 'numpy',
- 'portilooplot',
- 'ipywidgets',
- 'python-periphery',
- 'scipy',
- 'matplotlib']
-
-if is_coral():
- requirements_list += ['spidev',
- 'pylsl-coral',
- 'pyalsaaudio']
-else:
- requirements_list += ['gradio',
- 'tensorflow',
- 'pyxdf',
- 'wonambi']
-
-
-setup(
- name='portiloop',
- version='0.0.1',
- packages=[package for package in find_packages()],
- description='Portiloop software library',
- install_requires=requirements_list,
-)
diff --git a/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/lib/renderer/gl/prt_render.py b/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/lib/renderer/gl/prt_render.py
deleted file mode 100644
index 92c8a6257f776ab0c803a78a3af7c43a4333c3f9..0000000000000000000000000000000000000000
--- a/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/lib/renderer/gl/prt_render.py
+++ /dev/null
@@ -1,350 +0,0 @@
-import numpy as np
-import random
-
-from .framework import *
-from .cam_render import CamRender
-
-class PRTRender(CamRender):
- def __init__(self, width=1600, height=1200, name='PRT Renderer', uv_mode=False, ms_rate=1, egl=False):
- program_files = ['prt.vs', 'prt.fs'] if not uv_mode else ['prt_uv.vs', 'prt_uv.fs']
- CamRender.__init__(self, width, height, name, program_files=program_files, color_size=8, ms_rate=ms_rate, egl=egl)
-
- # WARNING: this differs from vertex_buffer and vertex_data in Render
- self.vert_buffer = {}
- self.vert_data = {}
-
- self.norm_buffer = {}
- self.norm_data = {}
-
- self.tan_buffer = {}
- self.tan_data = {}
-
- self.btan_buffer = {}
- self.btan_data = {}
-
- self.prt1_buffer = {}
- self.prt1_data = {}
- self.prt2_buffer = {}
- self.prt2_data = {}
- self.prt3_buffer = {}
- self.prt3_data = {}
-
- self.uv_buffer = {}
- self.uv_data = {}
-
- self.render_texture_mat = {}
-
- self.vertex_dim = {}
- self.n_vertices = {}
-
- self.norm_mat_unif = glGetUniformLocation(self.program, 'NormMat')
- self.normalize_matrix = np.eye(4)
-
- self.shcoeff_unif = glGetUniformLocation(self.program, 'SHCoeffs')
- self.shcoeffs = np.zeros((9,3))
- self.shcoeffs[0,:] = 1.0
- #self.shcoeffs[1:,:] = np.random.rand(8,3)
-
- self.hasAlbedoUnif = glGetUniformLocation(self.program, 'hasAlbedoMap')
- self.hasNormalUnif = glGetUniformLocation(self.program, 'hasNormalMap')
-
- self.analyticUnif = glGetUniformLocation(self.program, 'analytic')
- self.analytic = False
-
- self.rot_mat_unif = glGetUniformLocation(self.program, 'RotMat')
- self.rot_matrix = np.eye(3)
-
- def set_texture(self, mat_name, smplr_name, texture):
- # texture_image: H x W x 3
- width = texture.shape[1]
- height = texture.shape[0]
- texture = np.flip(texture, 0)
- img_data = np.fromstring(texture.tostring(), np.uint8)
-
- if mat_name not in self.render_texture_mat:
- self.render_texture_mat[mat_name] = {}
- if smplr_name in self.render_texture_mat[mat_name].keys():
- glDeleteTextures([self.render_texture_mat[mat_name][smplr_name]])
- del self.render_texture_mat[mat_name][smplr_name]
- self.render_texture_mat[mat_name][smplr_name] = glGenTextures(1)
- glActiveTexture(GL_TEXTURE0)
-
- glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
- glBindTexture(GL_TEXTURE_2D, self.render_texture_mat[mat_name][smplr_name])
-
- glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, img_data)
-
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 3)
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)
-
- glGenerateMipmap(GL_TEXTURE_2D)
-
- def set_albedo(self, texture_image, mat_name='all'):
- self.set_texture(mat_name, 'AlbedoMap', texture_image)
-
- def set_normal_map(self, texture_image, mat_name='all'):
- self.set_texture(mat_name, 'NormalMap', texture_image)
-
- def set_mesh(self, vertices, faces, norms, faces_nml, uvs, faces_uvs, prt, faces_prt, tans, bitans, mat_name='all'):
- self.vert_data[mat_name] = vertices[faces.reshape([-1])]
- self.n_vertices[mat_name] = self.vert_data[mat_name].shape[0]
- self.vertex_dim[mat_name] = self.vert_data[mat_name].shape[1]
-
- if mat_name not in self.vert_buffer.keys():
- self.vert_buffer[mat_name] = glGenBuffers(1)
- glBindBuffer(GL_ARRAY_BUFFER, self.vert_buffer[mat_name])
- glBufferData(GL_ARRAY_BUFFER, self.vert_data[mat_name], GL_STATIC_DRAW)
-
- self.uv_data[mat_name] = uvs[faces_uvs.reshape([-1])]
- if mat_name not in self.uv_buffer.keys():
- self.uv_buffer[mat_name] = glGenBuffers(1)
- glBindBuffer(GL_ARRAY_BUFFER, self.uv_buffer[mat_name])
- glBufferData(GL_ARRAY_BUFFER, self.uv_data[mat_name], GL_STATIC_DRAW)
-
- self.norm_data[mat_name] = norms[faces_nml.reshape([-1])]
- if mat_name not in self.norm_buffer.keys():
- self.norm_buffer[mat_name] = glGenBuffers(1)
- glBindBuffer(GL_ARRAY_BUFFER, self.norm_buffer[mat_name])
- glBufferData(GL_ARRAY_BUFFER, self.norm_data[mat_name], GL_STATIC_DRAW)
-
- self.tan_data[mat_name] = tans[faces_nml.reshape([-1])]
- if mat_name not in self.tan_buffer.keys():
- self.tan_buffer[mat_name] = glGenBuffers(1)
- glBindBuffer(GL_ARRAY_BUFFER, self.tan_buffer[mat_name])
- glBufferData(GL_ARRAY_BUFFER, self.tan_data[mat_name], GL_STATIC_DRAW)
-
- self.btan_data[mat_name] = bitans[faces_nml.reshape([-1])]
- if mat_name not in self.btan_buffer.keys():
- self.btan_buffer[mat_name] = glGenBuffers(1)
- glBindBuffer(GL_ARRAY_BUFFER, self.btan_buffer[mat_name])
- glBufferData(GL_ARRAY_BUFFER, self.btan_data[mat_name], GL_STATIC_DRAW)
-
- self.prt1_data[mat_name] = prt[faces_prt.reshape([-1])][:,:3]
- self.prt2_data[mat_name] = prt[faces_prt.reshape([-1])][:,3:6]
- self.prt3_data[mat_name] = prt[faces_prt.reshape([-1])][:,6:]
-
- if mat_name not in self.prt1_buffer.keys():
- self.prt1_buffer[mat_name] = glGenBuffers(1)
- if mat_name not in self.prt2_buffer.keys():
- self.prt2_buffer[mat_name] = glGenBuffers(1)
- if mat_name not in self.prt3_buffer.keys():
- self.prt3_buffer[mat_name] = glGenBuffers(1)
- glBindBuffer(GL_ARRAY_BUFFER, self.prt1_buffer[mat_name])
- glBufferData(GL_ARRAY_BUFFER, self.prt1_data[mat_name], GL_STATIC_DRAW)
- glBindBuffer(GL_ARRAY_BUFFER, self.prt2_buffer[mat_name])
- glBufferData(GL_ARRAY_BUFFER, self.prt2_data[mat_name], GL_STATIC_DRAW)
- glBindBuffer(GL_ARRAY_BUFFER, self.prt3_buffer[mat_name])
- glBufferData(GL_ARRAY_BUFFER, self.prt3_data[mat_name], GL_STATIC_DRAW)
-
- glBindBuffer(GL_ARRAY_BUFFER, 0)
-
- def set_mesh_mtl(self, vertices, faces, norms, faces_nml, uvs, faces_uvs, tans, bitans, prt):
- for key in faces:
- self.vert_data[key] = vertices[faces[key].reshape([-1])]
- self.n_vertices[key] = self.vert_data[key].shape[0]
- self.vertex_dim[key] = self.vert_data[key].shape[1]
-
- if key not in self.vert_buffer.keys():
- self.vert_buffer[key] = glGenBuffers(1)
- glBindBuffer(GL_ARRAY_BUFFER, self.vert_buffer[key])
- glBufferData(GL_ARRAY_BUFFER, self.vert_data[key], GL_STATIC_DRAW)
-
- self.uv_data[key] = uvs[faces_uvs[key].reshape([-1])]
- if key not in self.uv_buffer.keys():
- self.uv_buffer[key] = glGenBuffers(1)
- glBindBuffer(GL_ARRAY_BUFFER, self.uv_buffer[key])
- glBufferData(GL_ARRAY_BUFFER, self.uv_data[key], GL_STATIC_DRAW)
-
- self.norm_data[key] = norms[faces_nml[key].reshape([-1])]
- if key not in self.norm_buffer.keys():
- self.norm_buffer[key] = glGenBuffers(1)
- glBindBuffer(GL_ARRAY_BUFFER, self.norm_buffer[key])
- glBufferData(GL_ARRAY_BUFFER, self.norm_data[key], GL_STATIC_DRAW)
-
- self.tan_data[key] = tans[faces_nml[key].reshape([-1])]
- if key not in self.tan_buffer.keys():
- self.tan_buffer[key] = glGenBuffers(1)
- glBindBuffer(GL_ARRAY_BUFFER, self.tan_buffer[key])
- glBufferData(GL_ARRAY_BUFFER, self.tan_data[key], GL_STATIC_DRAW)
-
- self.btan_data[key] = bitans[faces_nml[key].reshape([-1])]
- if key not in self.btan_buffer.keys():
- self.btan_buffer[key] = glGenBuffers(1)
- glBindBuffer(GL_ARRAY_BUFFER, self.btan_buffer[key])
- glBufferData(GL_ARRAY_BUFFER, self.btan_data[key], GL_STATIC_DRAW)
-
- self.prt1_data[key] = prt[faces[key].reshape([-1])][:,:3]
- self.prt2_data[key] = prt[faces[key].reshape([-1])][:,3:6]
- self.prt3_data[key] = prt[faces[key].reshape([-1])][:,6:]
-
- if key not in self.prt1_buffer.keys():
- self.prt1_buffer[key] = glGenBuffers(1)
- if key not in self.prt2_buffer.keys():
- self.prt2_buffer[key] = glGenBuffers(1)
- if key not in self.prt3_buffer.keys():
- self.prt3_buffer[key] = glGenBuffers(1)
- glBindBuffer(GL_ARRAY_BUFFER, self.prt1_buffer[key])
- glBufferData(GL_ARRAY_BUFFER, self.prt1_data[key], GL_STATIC_DRAW)
- glBindBuffer(GL_ARRAY_BUFFER, self.prt2_buffer[key])
- glBufferData(GL_ARRAY_BUFFER, self.prt2_data[key], GL_STATIC_DRAW)
- glBindBuffer(GL_ARRAY_BUFFER, self.prt3_buffer[key])
- glBufferData(GL_ARRAY_BUFFER, self.prt3_data[key], GL_STATIC_DRAW)
-
- glBindBuffer(GL_ARRAY_BUFFER, 0)
-
- def cleanup(self):
-
- glBindBuffer(GL_ARRAY_BUFFER, 0)
- for key in self.vert_data:
- glDeleteBuffers(1, [self.vert_buffer[key]])
- glDeleteBuffers(1, [self.norm_buffer[key]])
- glDeleteBuffers(1, [self.uv_buffer[key]])
-
- glDeleteBuffers(1, [self.tan_buffer[key]])
- glDeleteBuffers(1, [self.btan_buffer[key]])
- glDeleteBuffers(1, [self.prt1_buffer[key]])
- glDeleteBuffers(1, [self.prt2_buffer[key]])
- glDeleteBuffers(1, [self.prt3_buffer[key]])
-
- glDeleteBuffers(1, [])
-
- for smplr in self.render_texture_mat[key]:
- glDeleteTextures([self.render_texture_mat[key][smplr]])
-
- self.vert_buffer = {}
- self.vert_data = {}
-
- self.norm_buffer = {}
- self.norm_data = {}
-
- self.tan_buffer = {}
- self.tan_data = {}
-
- self.btan_buffer = {}
- self.btan_data = {}
-
- self.prt1_buffer = {}
- self.prt1_data = {}
-
- self.prt2_buffer = {}
- self.prt2_data = {}
-
- self.prt3_buffer = {}
- self.prt3_data = {}
-
- self.uv_buffer = {}
- self.uv_data = {}
-
- self.render_texture_mat = {}
-
- self.vertex_dim = {}
- self.n_vertices = {}
-
- def randomize_sh(self):
- self.shcoeffs[0,:] = 0.8
- self.shcoeffs[1:,:] = 1.0*np.random.rand(8,3)
-
- def set_sh(self, sh):
- self.shcoeffs = sh
-
- def set_norm_mat(self, scale, center):
- N = np.eye(4)
- N[:3, :3] = scale*np.eye(3)
- N[:3, 3] = -scale*center
-
- self.normalize_matrix = N
-
- def draw(self):
- self.draw_init()
-
- glDisable(GL_BLEND)
- #glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
- glEnable(GL_MULTISAMPLE)
-
- glUseProgram(self.program)
- glUniformMatrix4fv(self.norm_mat_unif, 1, GL_FALSE, self.normalize_matrix.transpose())
- glUniformMatrix4fv(self.model_mat_unif, 1, GL_FALSE, self.model_view_matrix.transpose())
- glUniformMatrix4fv(self.persp_mat_unif, 1, GL_FALSE, self.projection_matrix.transpose())
-
- if 'AlbedoMap' in self.render_texture_mat['all']:
- glUniform1ui(self.hasAlbedoUnif, GLuint(1))
- else:
- glUniform1ui(self.hasAlbedoUnif, GLuint(0))
-
- if 'NormalMap' in self.render_texture_mat['all']:
- glUniform1ui(self.hasNormalUnif, GLuint(1))
- else:
- glUniform1ui(self.hasNormalUnif, GLuint(0))
-
- glUniform1ui(self.analyticUnif, GLuint(1) if self.analytic else GLuint(0))
-
- glUniform3fv(self.shcoeff_unif, 9, self.shcoeffs)
-
- glUniformMatrix3fv(self.rot_mat_unif, 1, GL_FALSE, self.rot_matrix.transpose())
-
- for mat in self.vert_buffer:
- # Handle vertex buffer
- glBindBuffer(GL_ARRAY_BUFFER, self.vert_buffer[mat])
- glEnableVertexAttribArray(0)
- glVertexAttribPointer(0, self.vertex_dim[mat], GL_DOUBLE, GL_FALSE, 0, None)
-
- # Handle normal buffer
- glBindBuffer(GL_ARRAY_BUFFER, self.norm_buffer[mat])
- glEnableVertexAttribArray(1)
- glVertexAttribPointer(1, 3, GL_DOUBLE, GL_FALSE, 0, None)
-
- # Handle uv buffer
- glBindBuffer(GL_ARRAY_BUFFER, self.uv_buffer[mat])
- glEnableVertexAttribArray(2)
- glVertexAttribPointer(2, 2, GL_DOUBLE, GL_FALSE, 0, None)
-
- # Handle tan buffer
- glBindBuffer(GL_ARRAY_BUFFER, self.tan_buffer[mat])
- glEnableVertexAttribArray(3)
- glVertexAttribPointer(3, 3, GL_DOUBLE, GL_FALSE, 0, None)
-
- # Handle btan buffer
- glBindBuffer(GL_ARRAY_BUFFER, self.btan_buffer[mat])
- glEnableVertexAttribArray(4)
- glVertexAttribPointer(4, 3, GL_DOUBLE, GL_FALSE, 0, None)
-
- # Handle PTR buffer
- glBindBuffer(GL_ARRAY_BUFFER, self.prt1_buffer[mat])
- glEnableVertexAttribArray(5)
- glVertexAttribPointer(5, 3, GL_DOUBLE, GL_FALSE, 0, None)
-
- glBindBuffer(GL_ARRAY_BUFFER, self.prt2_buffer[mat])
- glEnableVertexAttribArray(6)
- glVertexAttribPointer(6, 3, GL_DOUBLE, GL_FALSE, 0, None)
-
- glBindBuffer(GL_ARRAY_BUFFER, self.prt3_buffer[mat])
- glEnableVertexAttribArray(7)
- glVertexAttribPointer(7, 3, GL_DOUBLE, GL_FALSE, 0, None)
-
- for i, smplr in enumerate(self.render_texture_mat[mat]):
- glActiveTexture(GL_TEXTURE0 + i)
- glBindTexture(GL_TEXTURE_2D, self.render_texture_mat[mat][smplr])
- glUniform1i(glGetUniformLocation(self.program, smplr), i)
-
- glDrawArrays(GL_TRIANGLES, 0, self.n_vertices[mat])
-
- glDisableVertexAttribArray(7)
- glDisableVertexAttribArray(6)
- glDisableVertexAttribArray(5)
- glDisableVertexAttribArray(4)
- glDisableVertexAttribArray(3)
- glDisableVertexAttribArray(2)
- glDisableVertexAttribArray(1)
- glDisableVertexAttribArray(0)
-
- glBindBuffer(GL_ARRAY_BUFFER, 0)
-
- glUseProgram(0)
-
- glDisable(GL_BLEND)
- glDisable(GL_MULTISAMPLE)
-
- self.draw_end()
diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/panet/panet_resnet18_fpem-ffm_600e_icdar2015.py b/spaces/Mountchicken/MAERec-Gradio/configs/textdet/panet/panet_resnet18_fpem-ffm_600e_icdar2015.py
deleted file mode 100644
index 1f5bf0e22d13c7bc79c83024a73182ae46cc3ffa..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/panet/panet_resnet18_fpem-ffm_600e_icdar2015.py
+++ /dev/null
@@ -1,35 +0,0 @@
-_base_ = [
- '../_base_/datasets/icdar2015.py',
- '../_base_/default_runtime.py',
- '../_base_/schedules/schedule_adam_600e.py',
- '_base_panet_resnet18_fpem-ffm.py',
-]
-
-default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=20), )
-
-# dataset settings
-icdar2015_textdet_train = _base_.icdar2015_textdet_train
-icdar2015_textdet_test = _base_.icdar2015_textdet_test
-# pipeline settings
-icdar2015_textdet_train.pipeline = _base_.train_pipeline
-icdar2015_textdet_test.pipeline = _base_.test_pipeline
-
-train_dataloader = dict(
- batch_size=64,
- num_workers=8,
- persistent_workers=True,
- sampler=dict(type='DefaultSampler', shuffle=True),
- dataset=icdar2015_textdet_train)
-val_dataloader = dict(
- batch_size=1,
- num_workers=4,
- persistent_workers=True,
- sampler=dict(type='DefaultSampler', shuffle=False),
- dataset=icdar2015_textdet_test)
-test_dataloader = val_dataloader
-
-val_evaluator = dict(
- type='HmeanIOUMetric', pred_score_thrs=dict(start=0.3, stop=1, step=0.05))
-test_evaluator = val_evaluator
-
-auto_scale_lr = dict(base_batch_size=64)
diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/recognizers/aster.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/recognizers/aster.py
deleted file mode 100644
index ce6535448af0473fefee4d4289c88df36bf16707..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/recognizers/aster.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from mmocr.registry import MODELS
-from .encoder_decoder_recognizer import EncoderDecoderRecognizer
-
-
-@MODELS.register_module()
-class ASTER(EncoderDecoderRecognizer):
- """Implement `ASTER: An Attentional Scene Text Recognizer with Flexible
- Rectification.
-
- 0 else None
- self.seed = seed
- self.debug = debug
- # model and optm
- self.task = None
- self.optimizers = []
-
- # trainer state
- self.testing = False
- self.global_step = 0
- self.current_epoch = 0
- self.total_batches = 0
-
- # configure checkpoint
- self.monitor_key = monitor_key
- self.num_ckpt_keep = num_ckpt_keep
- self.save_best = save_best
- self.monitor_op = np.less if monitor_mode == 'min' else np.greater
- self.best_val_results = np.Inf if monitor_mode == 'min' else -np.Inf
- self.mode = 'min'
-
- # allow int, string and gpu list
- self.all_gpu_ids = [
- int(x) for x in os.environ.get("CUDA_VISIBLE_DEVICES", "").split(",") if x != '']
- self.num_gpus = len(self.all_gpu_ids)
- self.on_gpu = self.num_gpus > 0
- self.root_gpu = 0
- logging.info(f'GPU available: {torch.cuda.is_available()}, GPU used: {self.all_gpu_ids}')
- self.use_ddp = self.num_gpus > 1
- self.proc_rank = 0
- # Tensorboard logging
- self.log_save_interval = log_save_interval
- self.val_check_interval = val_check_interval
- self.tb_log_interval = tb_log_interval
- self.amp = amp
- self.amp_scalar = GradScaler()
-
- def test(self, task_cls):
- self.testing = True
- self.fit(task_cls)
-
- def fit(self, task_cls):
- if len(self.all_gpu_ids) > 1:
- mp.spawn(self.ddp_run, nprocs=self.num_gpus, args=(task_cls, copy.deepcopy(hparams)))
- else:
- self.task = task_cls()
- self.task.trainer = self
- self.run_single_process(self.task)
- return 1
-
- def ddp_run(self, gpu_idx, task_cls, hparams_):
- hparams.update(hparams_)
- self.proc_rank = gpu_idx
- self.init_ddp_connection(self.proc_rank, self.num_gpus)
- if dist.get_rank() != 0 and not self.debug:
- sys.stdout = open(os.devnull, "w")
- sys.stderr = open(os.devnull, "w")
- task = task_cls()
- task.trainer = self
- torch.cuda.set_device(gpu_idx)
- self.root_gpu = gpu_idx
- self.task = task
- self.run_single_process(task)
-
- def run_single_process(self, task):
- """Sanity check a few things before starting actual training.
-
- :param task:
- """
- # build model, optm and load checkpoint
- if self.proc_rank == 0:
- self.save_terminal_logs()
- if not self.testing:
- self.save_codes()
-
- model = task.build_model()
- if model is not None:
- task.layers = model
- checkpoint, _ = get_last_checkpoint(self.work_dir, self.resume_from_checkpoint)
- if checkpoint is not None:
- self.restore_weights(checkpoint)
- elif self.on_gpu:
- task.cuda(self.root_gpu)
- if not self.testing:
- self.optimizers = task.configure_optimizers()
- self.fisrt_epoch = True
- if checkpoint is not None:
- self.restore_opt_state(checkpoint)
- del checkpoint
- # clear cache after restore
- if self.on_gpu:
- torch.cuda.empty_cache()
-
- if self.use_ddp:
- self.task = self.configure_ddp(self.task)
- dist.barrier()
-
- task_ref = self.get_task_ref()
- task_ref.trainer = self
- task_ref.testing = self.testing
- # link up experiment object
- if self.proc_rank == 0:
- task_ref.build_tensorboard(save_dir=self.work_dir, name='tb_logs')
- else:
- os.makedirs('tmp', exist_ok=True)
- task_ref.build_tensorboard(save_dir='tmp', name='tb_tmp')
- self.logger = task_ref.logger
- try:
- if self.testing:
- self.run_evaluation(test=True)
- else:
- self.train()
- except KeyboardInterrupt as e:
- traceback.print_exc()
- task_ref.on_keyboard_interrupt()
-
- ####################
- # valid and test
- ####################
- def run_evaluation(self, test=False):
- eval_results = self.evaluate(self.task, test, tqdm_desc='Valid' if not test else 'test',
- max_batches=hparams['eval_max_batches'])
- if eval_results is not None and 'tb_log' in eval_results:
- tb_log_output = eval_results['tb_log']
- self.log_metrics_to_tb(tb_log_output)
- if self.proc_rank == 0 and not test:
- self.save_checkpoint(epoch=self.current_epoch, logs=eval_results)
-
- def evaluate(self, task, test=False, tqdm_desc='Valid', max_batches=None):
- if max_batches == -1:
- max_batches = None
- # enable eval mode
- task.zero_grad()
- task.eval()
- torch.set_grad_enabled(False)
-
- task_ref = self.get_task_ref()
- if test:
- ret = task_ref.test_start()
- if ret == 'EXIT':
- return
- else:
- task_ref.validation_start()
- outputs = []
- dataloader = task_ref.test_dataloader() if test else task_ref.val_dataloader()
- pbar = tqdm.tqdm(dataloader, desc=tqdm_desc, total=max_batches, dynamic_ncols=True, unit='step',
- disable=self.root_gpu > 0)
- # give model a chance to do something with the outputs (and method defined)
- for batch_idx, batch in enumerate(pbar):
- if batch is None: # pragma: no cover
- continue
- # stop short when on fast_dev_run (sets max_batch=1)
- if max_batches is not None and batch_idx >= max_batches:
- break
-
- # make dataloader_idx arg in validation_step optional
- if self.on_gpu:
- batch = move_to_cuda(batch, self.root_gpu)
- args = [batch, batch_idx]
- if self.use_ddp:
- output = task(*args)
- else:
- if test:
- output = task_ref.test_step(*args)
- else:
- output = task_ref.validation_step(*args)
- # track outputs for collation
- outputs.append(output)
- # give model a chance to do something with the outputs (and method defined)
- if test:
- eval_results = task_ref.test_end(outputs)
- else:
- eval_results = task_ref.validation_end(outputs)
- # enable train mode again
- task.train()
- torch.set_grad_enabled(True)
- return eval_results
-
- ####################
- # train
- ####################
- def train(self):
- task_ref = self.get_task_ref()
- task_ref.on_train_start()
- if self.num_sanity_val_steps > 0:
- # run tiny validation (if validation defined) to make sure program won't crash during val
- self.evaluate(self.task, False, 'Sanity Val', max_batches=self.num_sanity_val_steps)
- # clear cache before training
- if self.on_gpu:
- torch.cuda.empty_cache()
- dataloader = task_ref.train_dataloader()
- epoch = self.current_epoch
- # run all epochs
- while True:
- # set seed for distributed sampler (enables shuffling for each epoch)
- if self.use_ddp and hasattr(dataloader.sampler, 'set_epoch'):
- dataloader.sampler.set_epoch(epoch)
- # update training progress in trainer and model
- task_ref.current_epoch = epoch
- self.current_epoch = epoch
- # total batches includes multiple val checks
- self.batch_loss_value = 0 # accumulated grads
- # before epoch hook
- task_ref.on_epoch_start()
-
- # run epoch
- train_pbar = tqdm.tqdm(dataloader, initial=self.global_step, total=float('inf'),
- dynamic_ncols=True, unit='step', disable=self.root_gpu > 0)
- for batch_idx, batch in enumerate(train_pbar):
- if self.global_step % self.val_check_interval == 0 and not self.fisrt_epoch:
- self.run_evaluation()
- pbar_metrics, tb_metrics = self.run_training_batch(batch_idx, batch)
- train_pbar.set_postfix(**pbar_metrics)
- self.fisrt_epoch = False
- # when metrics should be logged
- if (self.global_step + 1) % self.tb_log_interval == 0:
- # logs user requested information to logger
- self.log_metrics_to_tb(tb_metrics)
-
- self.global_step += 1
- task_ref.global_step = self.global_step
- if self.global_step > self.max_updates:
- print("| Training end..")
- break
- # epoch end hook
- task_ref.on_epoch_end()
- epoch += 1
- if self.global_step > self.max_updates:
- break
- task_ref.on_train_end()
-
- def run_training_batch(self, batch_idx, batch):
- if batch is None:
- return {}
- all_progress_bar_metrics = []
- all_log_metrics = []
- task_ref = self.get_task_ref()
- for opt_idx, optimizer in enumerate(self.optimizers):
- if optimizer is None:
- continue
- # make sure only the gradients of the current optimizer's paramaters are calculated
- # in the training step to prevent dangling gradients in multiple-optimizer setup.
- if len(self.optimizers) > 1:
- for param in task_ref.parameters():
- param.requires_grad = False
- for group in optimizer.param_groups:
- for param in group['params']:
- param.requires_grad = True
-
- # forward pass
- with autocast(enabled=self.amp):
- if self.on_gpu:
- batch = move_to_cuda(copy.copy(batch), self.root_gpu)
- args = [batch, batch_idx, opt_idx]
- if self.use_ddp:
- output = self.task(*args)
- else:
- output = task_ref.training_step(*args)
- loss = output['loss']
- if loss is None:
- continue
- progress_bar_metrics = output['progress_bar']
- log_metrics = output['tb_log']
- # accumulate loss
- loss = loss / self.accumulate_grad_batches
-
- # backward pass
- if loss.requires_grad:
- if self.amp:
- self.amp_scalar.scale(loss).backward()
- else:
- loss.backward()
-
- # track progress bar metrics
- all_log_metrics.append(log_metrics)
- all_progress_bar_metrics.append(progress_bar_metrics)
-
- if loss is None:
- continue
-
- # nan grads
- if self.print_nan_grads:
- has_nan_grad = False
- for name, param in task_ref.named_parameters():
- if (param.grad is not None) and torch.isnan(param.grad.float()).any():
- print("| NaN params: ", name, param, param.grad)
- has_nan_grad = True
- if has_nan_grad:
- exit(0)
-
- # gradient update with accumulated gradients
- if (self.global_step + 1) % self.accumulate_grad_batches == 0:
- task_ref.on_before_optimization(opt_idx)
- if self.amp:
- self.amp_scalar.step(optimizer)
- self.amp_scalar.update()
- else:
- optimizer.step()
- optimizer.zero_grad()
- task_ref.on_after_optimization(self.current_epoch, batch_idx, optimizer, opt_idx)
-
- # collapse all metrics into one dict
- all_progress_bar_metrics = {k: v for d in all_progress_bar_metrics for k, v in d.items()}
- all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()}
- return all_progress_bar_metrics, all_log_metrics
-
- ####################
- # load and save checkpoint
- ####################
- def restore_weights(self, checkpoint):
- # load model state
- task_ref = self.get_task_ref()
-
- for k, v in checkpoint['state_dict'].items():
- getattr(task_ref, k).load_state_dict(v)
-
- if self.on_gpu:
- task_ref.cuda(self.root_gpu)
- # load training state (affects trainer only)
- self.best_val_results = checkpoint['checkpoint_callback_best']
- self.global_step = checkpoint['global_step']
- self.current_epoch = checkpoint['epoch']
- task_ref.global_step = self.global_step
-
- # wait for all models to restore weights
- if self.use_ddp:
- # wait for all processes to catch up
- dist.barrier()
-
- def restore_opt_state(self, checkpoint):
- if self.testing:
- return
- # restore the optimizers
- optimizer_states = checkpoint['optimizer_states']
- for optimizer, opt_state in zip(self.optimizers, optimizer_states):
- if optimizer is None:
- return
- try:
- optimizer.load_state_dict(opt_state)
- # move optimizer to GPU 1 weight at a time
- if self.on_gpu:
- for state in optimizer.state.values():
- for k, v in state.items():
- if isinstance(v, torch.Tensor):
- state[k] = v.cuda(self.root_gpu)
- except ValueError:
- print("| WARMING: optimizer parameters not match !!!")
- try:
- if dist.is_initialized() and dist.get_rank() > 0:
- return
- except Exception as e:
- print(e)
- return
- did_restore = True
- return did_restore
-
- def save_checkpoint(self, epoch, logs=None):
- monitor_op = np.less
- ckpt_path = f'{self.work_dir}/model_ckpt_steps_{self.global_step}.ckpt'
- logging.info(f'Epoch {epoch:05d}@{self.global_step}: saving model to {ckpt_path}')
- self._atomic_save(ckpt_path)
- for old_ckpt in get_all_ckpts(self.work_dir)[self.num_ckpt_keep:]:
- remove_file(old_ckpt)
- logging.info(f'Delete ckpt: {os.path.basename(old_ckpt)}')
- current = None
- if logs is not None and self.monitor_key in logs:
- current = logs[self.monitor_key]
- if current is not None and self.save_best:
- if monitor_op(current, self.best_val_results):
- best_filepath = f'{self.work_dir}/model_ckpt_best.pt'
- self.best_val_results = current
- logging.info(
- f'Epoch {epoch:05d}@{self.global_step}: {self.monitor_key} reached {current:0.5f}. '
- f'Saving model to {best_filepath}')
- self._atomic_save(best_filepath)
-
- def _atomic_save(self, filepath):
- checkpoint = self.dump_checkpoint()
- tmp_path = str(filepath) + ".part"
- torch.save(checkpoint, tmp_path, _use_new_zipfile_serialization=False)
- os.replace(tmp_path, filepath)
-
- def dump_checkpoint(self):
- checkpoint = {'epoch': self.current_epoch, 'global_step': self.global_step,
- 'checkpoint_callback_best': self.best_val_results}
- # save optimizers
- optimizer_states = []
- for i, optimizer in enumerate(self.optimizers):
- if optimizer is not None:
- optimizer_states.append(optimizer.state_dict())
-
- checkpoint['optimizer_states'] = optimizer_states
- task_ref = self.get_task_ref()
- checkpoint['state_dict'] = {
- k: v.state_dict() for k, v in task_ref.named_children() if len(list(v.parameters())) > 0}
- return checkpoint
-
- ####################
- # DDP
- ####################
- def configure_ddp(self, task):
- task = DDP(task, device_ids=[self.root_gpu], find_unused_parameters=True)
- random.seed(self.seed)
- np.random.seed(self.seed)
- return task
-
- def init_ddp_connection(self, proc_rank, world_size):
- root_node = '127.0.0.1'
- root_node = self.resolve_root_node_address(root_node)
- os.environ['MASTER_ADDR'] = root_node
- dist.init_process_group('nccl', rank=proc_rank, world_size=world_size)
-
- def resolve_root_node_address(self, root_node):
- if '[' in root_node:
- name = root_node.split('[')[0]
- number = root_node.split(',')[0]
- if '-' in number:
- number = number.split('-')[0]
- number = re.sub('[^0-9]', '', number)
- root_node = name + number
- return root_node
-
- ####################
- # utils
- ####################
- def get_task_ref(self):
- from utils.commons.base_task import BaseTask
- task: BaseTask = self.task.module if isinstance(self.task, DDP) else self.task
- return task
-
- def log_metrics_to_tb(self, metrics, step=None):
- """Logs the metric dict passed in.
-
- :param metrics:
- """
- # turn all tensors to scalars
- scalar_metrics = self.metrics_to_scalars(metrics)
-
- step = step if step is not None else self.global_step
- # log actual metrics
- if self.proc_rank == 0:
- self.log_metrics(self.logger, scalar_metrics, step=step)
-
- @staticmethod
- def log_metrics(logger, metrics, step=None):
- for k, v in metrics.items():
- if isinstance(v, torch.Tensor):
- v = v.item()
- logger.add_scalar(k, v, step)
-
- def metrics_to_scalars(self, metrics):
- new_metrics = {}
- for k, v in metrics.items():
- if isinstance(v, torch.Tensor):
- v = v.item()
-
- if type(v) is dict:
- v = self.metrics_to_scalars(v)
-
- new_metrics[k] = v
-
- return new_metrics
-
- def save_terminal_logs(self):
- t = datetime.now().strftime('%Y%m%d%H%M%S')
- os.makedirs(f'{self.work_dir}/terminal_logs', exist_ok=True)
- Tee(f'{self.work_dir}/terminal_logs/log_{t}.txt', 'w')
-
- def save_codes(self):
- if len(hparams['save_codes']) > 0:
- t = datetime.now().strftime('%Y%m%d%H%M%S')
- code_dir = f'{self.work_dir}/codes/{t}'
- subprocess.check_call(f'mkdir -p "{code_dir}"', shell=True)
- for c in hparams['save_codes']:
- if os.path.exists(c):
- subprocess.check_call(
- f'rsync -aR '
- f'--include="*.py" '
- f'--include="*.yaml" '
- f'--exclude="__pycache__" '
- f'--include="*/" '
- f'--exclude="*" '
- f'"./{c}" "{code_dir}/"',
- shell=True)
- print(f"| Copied codes to {code_dir}.")
diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/__init__.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/README.md b/spaces/NCTCMumbai/NCTC/models/official/vision/detection/README.md
deleted file mode 100644
index 53134ec553f8bb5bbd4d299a69f0e8fbb4176083..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/README.md
+++ /dev/null
@@ -1,395 +0,0 @@
-# Object Detection Models on TensorFlow 2
-
-**Note**: This repository is still under construction.
-More features and instructions will be added soon.
-
-## Prerequsite
-To get started, download the code from TensorFlow models GitHub repository or
-use the pre-installed Google Cloud VM.
-
-```bash
-git clone https://github.com/tensorflow/models.git
-```
-
-Next, make sure to use TensorFlow 2.1+ on Google Cloud. Also here are
-a few package you need to install to get started:
-
-```bash
-sudo apt-get install -y python-tk && \
-pip3 install -r ~/models/official/requirements.txt
-```
-
-## Train RetinaNet on TPU
-
-### Train a vanilla ResNet-50 based RetinaNet.
-
-```bash
-TPU_NAME=""
-MODEL_DIR=""
-RESNET_CHECKPOINT=""
-TRAIN_FILE_PATTERN=""
-EVAL_FILE_PATTERN=""
-VAL_JSON_FILE=""
-python3 ~/models/official/vision/detection/main.py \
- --strategy_type=tpu \
- --tpu="${TPU_NAME?}" \
- --model_dir="${MODEL_DIR?}" \
- --mode=train \
- --params_override="{ type: retinanet, train: { checkpoint: { path: ${RESNET_CHECKPOINT?}, prefix: resnet50/ }, train_file_pattern: ${TRAIN_FILE_PATTERN?} }, eval: { val_json_file: ${VAL_JSON_FILE?}, eval_file_pattern: ${EVAL_FILE_PATTERN?} } }"
-```
-
-The pre-trained ResNet-50 checkpoint can be downloaded [here](https://storage.cloud.google.com/cloud-tpu-checkpoints/model-garden-vision/detection/resnet50-2018-02-07.tar.gz).
-
-Note: The ResNet implementation under
-[detection/](https://github.com/tensorflow/models/tree/master/official/vision/detection)
-is currently different from the one under
-[classification/](https://github.com/tensorflow/models/tree/master/official/vision/image_classification),
-so the checkpoints are not compatible.
-We will unify the implementation soon.
-
-
-
-### Train a custom RetinaNet using the config file.
-
-First, create a YAML config file, e.g. *my_retinanet.yaml*. This file specifies
-the parameters to be overridden, which should at least include the following
-fields.
-
-```YAML
-# my_retinanet.yaml
-type: 'retinanet'
-train:
- train_file_pattern:
-eval:
- eval_file_pattern:
- val_json_file:
-```
-
-Once the YAML config file is created, you can launch the training using the
-following command.
-
-```bash
-TPU_NAME=""
-MODEL_DIR=""
-python3 ~/models/official/vision/detection/main.py \
- --strategy_type=tpu \
- --tpu="${TPU_NAME?}" \
- --model_dir="${MODEL_DIR?}" \
- --mode=train \
- --config_file="my_retinanet.yaml"
-```
-
-## Train RetinaNet on GPU
-
-Training on GPU is similar to that on TPU. The major change is the strategy
-type (use "[mirrored](https://www.tensorflow.org/api_docs/python/tf/distribute/MirroredStrategy)" for multiple GPU and
-"[one_device](https://www.tensorflow.org/api_docs/python/tf/distribute/OneDeviceStrategy)" for single GPU).
-
-Multi-GPUs example (assuming there are 8GPU connected to the host):
-
-```bash
-MODEL_DIR=""
-python3 ~/models/official/vision/detection/main.py \
- --strategy_type=mirrored \
- --num_gpus=8 \
- --model_dir="${MODEL_DIR?}" \
- --mode=train \
- --config_file="my_retinanet.yaml"
-```
-
-```bash
-MODEL_DIR=""
-python3 ~/models/official/vision/detection/main.py \
- --strategy_type=one_device \
- --num_gpus=1 \
- --model_dir="${MODEL_DIR?}" \
- --mode=train \
- --config_file="my_retinanet.yaml"
-```
-
-An example with inline configuration (YAML or JSON format):
-
-```
-python3 ~/models/official/vision/detection/main.py \
- --model_dir= \
- --strategy_type=one_device \
- --num_gpus=1 \
- --mode=train \
- --params_override="eval:
- eval_file_pattern:
- batch_size: 8
- val_json_file:
-predict:
- predict_batch_size: 8
-architecture:
- use_bfloat16: False
-train:
- total_steps: 1
- batch_size: 8
- train_file_pattern:
-use_tpu: False
-"
-```
-
----
-
-## Train Mask R-CNN on TPU
-
-### Train a vanilla ResNet-50 based Mask R-CNN.
-
-```bash
-TPU_NAME=""
-MODEL_DIR=""
-RESNET_CHECKPOINT=""
-TRAIN_FILE_PATTERN=""
-EVAL_FILE_PATTERN=""
-VAL_JSON_FILE=""
-python3 ~/models/official/vision/detection/main.py \
- --strategy_type=tpu \
- --tpu=${TPU_NAME} \
- --model_dir=${MODEL_DIR} \
- --mode=train \
- --model=mask_rcnn \
- --params_override="{train: { checkpoint: { path: ${RESNET_CHECKPOINT}, prefix: resnet50/ }, train_file_pattern: ${TRAIN_FILE_PATTERN} }, eval: { val_json_file: ${VAL_JSON_FILE}, eval_file_pattern: ${EVAL_FILE_PATTERN} } }"
-```
-
-The pre-trained ResNet-50 checkpoint can be downloaded [here](https://storage.cloud.google.com/cloud-tpu-checkpoints/model-garden-vision/detection/resnet50-2018-02-07.tar.gz).
-
-Note: The ResNet implementation under
-[detection/](https://github.com/tensorflow/models/tree/master/official/vision/detection)
-is currently different from the one under
-[classification/](https://github.com/tensorflow/models/tree/master/official/vision/image_classification),
-so the checkpoints are not compatible.
-We will unify the implementation soon.
-
-
-### Train a custom Mask R-CNN using the config file.
-
-First, create a YAML config file, e.g. *my_maskrcnn.yaml*.
-This file specifies the parameters to be overridden,
-which should at least include the following fields.
-
-```YAML
-# my_maskrcnn.yaml
-train:
- train_file_pattern:
-eval:
- eval_file_pattern:
- val_json_file:
-```
-
-Once the YAML config file is created, you can launch the training using the
-following command.
-
-```bash
-TPU_NAME=""
-MODEL_DIR=""
-python3 ~/models/official/vision/detection/main.py \
- --strategy_type=tpu \
- --tpu=${TPU_NAME} \
- --model_dir=${MODEL_DIR} \
- --mode=train \
- --model=mask_rcnn \
- --config_file="my_maskrcnn.yaml"
-```
-
-## Train Mask R-CNN on GPU
-
-Training on GPU is similar to that on TPU. The major change is the strategy type
-(use
-"[mirrored](https://www.tensorflow.org/api_docs/python/tf/distribute/MirroredStrategy)"
-for multiple GPU and
-"[one_device](https://www.tensorflow.org/api_docs/python/tf/distribute/OneDeviceStrategy)"
-for single GPU).
-
-Multi-GPUs example (assuming there are 8GPU connected to the host):
-
-```bash
-MODEL_DIR=""
-python3 ~/models/official/vision/detection/main.py \
- --strategy_type=mirrored \
- --num_gpus=8 \
- --model_dir=${MODEL_DIR} \
- --mode=train \
- --model=mask_rcnn \
- --config_file="my_maskrcnn.yaml"
-```
-
-```bash
-MODEL_DIR=""
-python3 ~/models/official/vision/detection/main.py \
- --strategy_type=one_device \
- --num_gpus=1 \
- --model_dir=${MODEL_DIR} \
- --mode=train \
- --model=mask_rcnn \
- --config_file="my_maskrcnn.yaml"
-```
-
-An example with inline configuration (YAML or JSON format):
-
-```
-python3 ~/models/official/vision/detection/main.py \
- --model_dir= \
- --strategy_type=one_device \
- --num_gpus=1 \
- --mode=train \
- --model=mask_rcnn \
- --params_override="eval:
- eval_file_pattern:
- batch_size: 8
- val_json_file:
-predict:
- predict_batch_size: 8
-architecture:
- use_bfloat16: False
-train:
- total_steps: 1000
- batch_size: 8
- train_file_pattern:
-use_tpu: False
-"
-```
-
-## Train ShapeMask on TPU
-
-### Train a ResNet-50 based ShapeMask.
-
-```bash
-TPU_NAME=""
-MODEL_DIR=""
-RESNET_CHECKPOINT=""
-TRAIN_FILE_PATTERN=""
-EVAL_FILE_PATTERN=""
-VAL_JSON_FILE=""
-SHAPE_PRIOR_PATH=""
-python3 ~/models/official/vision/detection/main.py \
- --strategy_type=tpu \
- --tpu=${TPU_NAME} \
- --model_dir=${MODEL_DIR} \
- --mode=train \
- --model=shapemask \
- --params_override="{train: { checkpoint: { path: ${RESNET_CHECKPOINT}, prefix: resnet50/ }, train_file_pattern: ${TRAIN_FILE_PATTERN} }, eval: { val_json_file: ${VAL_JSON_FILE}, eval_file_pattern: ${EVAL_FILE_PATTERN} } shapemask_head: {use_category_for_mask: true, shape_prior_path: ${SHAPE_PRIOR_PATH}} }"
-```
-
-The pre-trained ResNet-50 checkpoint can be downloaded [here](https://storage.cloud.google.com/cloud-tpu-checkpoints/model-garden-vision/detection/resnet50-2018-02-07.tar.gz).
-
-The shape priors can be downloaded [here]
-(https://storage.googleapis.com/cloud-tpu-checkpoints/shapemask/kmeans_class_priors_91x20x32x32.npy)
-
-
-### Train a custom ShapeMask using the config file.
-
-First, create a YAML config file, e.g. *my_shapemask.yaml*.
-This file specifies the parameters to be overridden:
-
-```YAML
-# my_shapemask.yaml
-train:
- train_file_pattern:
- total_steps:
- batch_size:
-eval:
- eval_file_pattern:
- val_json_file:
- batch_size:
-shapemask_head:
- shape_prior_path:
-```
-
-Once the YAML config file is created, you can launch the training using the
-following command.
-
-```bash
-TPU_NAME=""
-MODEL_DIR=""
-python3 ~/models/official/vision/detection/main.py \
- --strategy_type=tpu \
- --tpu=${TPU_NAME} \
- --model_dir=${MODEL_DIR} \
- --mode=train \
- --model=shapemask \
- --config_file="my_shapemask.yaml"
-```
-
-## Train ShapeMask on GPU
-
-Training on GPU is similar to that on TPU. The major change is the strategy type
-(use
-"[mirrored](https://www.tensorflow.org/api_docs/python/tf/distribute/MirroredStrategy)"
-for multiple GPU and
-"[one_device](https://www.tensorflow.org/api_docs/python/tf/distribute/OneDeviceStrategy)"
-for single GPU).
-
-Multi-GPUs example (assuming there are 8GPU connected to the host):
-
-```bash
-MODEL_DIR=""
-python3 ~/models/official/vision/detection/main.py \
- --strategy_type=mirrored \
- --num_gpus=8 \
- --model_dir=${MODEL_DIR} \
- --mode=train \
- --model=shapemask \
- --config_file="my_shapemask.yaml"
-```
-
-A single GPU example
-
-```bash
-MODEL_DIR=""
-python3 ~/models/official/vision/detection/main.py \
- --strategy_type=one_device \
- --num_gpus=1 \
- --model_dir=${MODEL_DIR} \
- --mode=train \
- --model=shapemask \
- --config_file="my_shapemask.yaml"
-```
-
-
-An example with inline configuration (YAML or JSON format):
-
-```
-python3 ~/models/official/vision/detection/main.py \
- --model_dir= \
- --strategy_type=one_device \
- --num_gpus=1 \
- --mode=train \
- --model=shapemask \
- --params_override="eval:
- eval_file_pattern:
- batch_size: 8
- val_json_file:
-train:
- total_steps: 1000
- batch_size: 8
- train_file_pattern:
-use_tpu: False
-"
-```
-
-
-### Run the evaluation (after training)
-
-```
-python3 /usr/share/models/official/vision/detection/main.py \
- --strategy_type=tpu \
- --tpu=${TPU_NAME} \
- --model_dir=${MODEL_DIR} \
- --mode=eval \
- --model=shapemask \
- --params_override="{eval: { val_json_file: ${VAL_JSON_FILE}, eval_file_pattern: ${EVAL_FILE_PATTERN}, eval_samples: 5000 } }"
-```
-
-`MODEL_DIR` needs to point to the trained path of ShapeMask model.
-Change `strategy_type=mirrored` and `num_gpus=1` to run on a GPU.
-
-Note: The JSON groundtruth file is useful for [COCO dataset](http://cocodataset.org/#home) and can be
-downloaded from the [COCO website](http://cocodataset.org/#download). For custom dataset, it is unncessary because the groundtruth can be included in the TFRecord files.
-
-## References
-
-1. [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002).
- Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, and Piotr Dollár. IEEE
- International Conference on Computer Vision (ICCV), 2017.
diff --git a/spaces/NMEX/rvc-hoyo-game/infer_pack/models_onnx.py b/spaces/NMEX/rvc-hoyo-game/infer_pack/models_onnx.py
deleted file mode 100644
index 3c5be53a572151820de7d82dfce84f2e2979ed56..0000000000000000000000000000000000000000
--- a/spaces/NMEX/rvc-hoyo-game/infer_pack/models_onnx.py
+++ /dev/null
@@ -1,760 +0,0 @@
-import math, pdb, os
-from time import time as ttime
-import torch
-from torch import nn
-from torch.nn import functional as F
-from infer_pack import modules
-from infer_pack import attentions
-from infer_pack import commons
-from infer_pack.commons import init_weights, get_padding
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from infer_pack.commons import init_weights
-import numpy as np
-from infer_pack import commons
-
-
-class TextEncoder256(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(256, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class TextEncoder256Sim(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(256, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- x = self.proj(x) * x_mask
- return x, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0,
- ):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(
- modules.ResidualCouplingLayer(
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- mean_only=True,
- )
- )
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
- def remove_weight_norm(self):
- for i in range(self.n_flows):
- self.flows[i * 2].remove_weight_norm()
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
- def remove_weight_norm(self):
- self.enc.remove_weight_norm()
-
-
-class Generator(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=0,
- ):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class SineGen(torch.nn.Module):
- """Definition of sine generator
- SineGen(samp_rate, harmonic_num = 0,
- sine_amp = 0.1, noise_std = 0.003,
- voiced_threshold = 0,
- flag_for_pulse=False)
- samp_rate: sampling rate in Hz
- harmonic_num: number of harmonic overtones (default 0)
- sine_amp: amplitude of sine-wavefrom (default 0.1)
- noise_std: std of Gaussian noise (default 0.003)
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
- Note: when flag_for_pulse is True, the first time step of a voiced
- segment is always sin(np.pi) or cos(0)
- """
-
- def __init__(
- self,
- samp_rate,
- harmonic_num=0,
- sine_amp=0.1,
- noise_std=0.003,
- voiced_threshold=0,
- flag_for_pulse=False,
- ):
- super(SineGen, self).__init__()
- self.sine_amp = sine_amp
- self.noise_std = noise_std
- self.harmonic_num = harmonic_num
- self.dim = self.harmonic_num + 1
- self.sampling_rate = samp_rate
- self.voiced_threshold = voiced_threshold
-
- def _f02uv(self, f0):
- # generate uv signal
- uv = torch.ones_like(f0)
- uv = uv * (f0 > self.voiced_threshold)
- return uv
-
- def forward(self, f0, upp):
- """sine_tensor, uv = forward(f0)
- input F0: tensor(batchsize=1, length, dim=1)
- f0 for unvoiced steps should be 0
- output sine_tensor: tensor(batchsize=1, length, dim)
- output uv: tensor(batchsize=1, length, 1)
- """
- with torch.no_grad():
- f0 = f0[:, None].transpose(1, 2)
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
- # fundamental component
- f0_buf[:, :, 0] = f0[:, :, 0]
- for idx in np.arange(self.harmonic_num):
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
- idx + 2
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
- rand_ini = torch.rand(
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
- )
- rand_ini[:, 0] = 0
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
- tmp_over_one *= upp
- tmp_over_one = F.interpolate(
- tmp_over_one.transpose(2, 1),
- scale_factor=upp,
- mode="linear",
- align_corners=True,
- ).transpose(2, 1)
- rad_values = F.interpolate(
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(
- 2, 1
- ) #######
- tmp_over_one %= 1
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
- cumsum_shift = torch.zeros_like(rad_values)
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
- sine_waves = torch.sin(
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
- )
- sine_waves = sine_waves * self.sine_amp
- uv = self._f02uv(f0)
- uv = F.interpolate(
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(2, 1)
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
- noise = noise_amp * torch.randn_like(sine_waves)
- sine_waves = sine_waves * uv + noise
- return sine_waves, uv, noise
-
-
-class SourceModuleHnNSF(torch.nn.Module):
- """SourceModule for hn-nsf
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
- add_noise_std=0.003, voiced_threshod=0)
- sampling_rate: sampling_rate in Hz
- harmonic_num: number of harmonic above F0 (default: 0)
- sine_amp: amplitude of sine source signal (default: 0.1)
- add_noise_std: std of additive Gaussian noise (default: 0.003)
- note that amplitude of noise in unvoiced is decided
- by sine_amp
- voiced_threshold: threhold to set U/V given F0 (default: 0)
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
- F0_sampled (batchsize, length, 1)
- Sine_source (batchsize, length, 1)
- noise_source (batchsize, length 1)
- uv (batchsize, length, 1)
- """
-
- def __init__(
- self,
- sampling_rate,
- harmonic_num=0,
- sine_amp=0.1,
- add_noise_std=0.003,
- voiced_threshod=0,
- is_half=True,
- ):
- super(SourceModuleHnNSF, self).__init__()
-
- self.sine_amp = sine_amp
- self.noise_std = add_noise_std
- self.is_half = is_half
- # to produce sine waveforms
- self.l_sin_gen = SineGen(
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
- )
-
- # to merge source harmonics into a single excitation
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
- self.l_tanh = torch.nn.Tanh()
-
- def forward(self, x, upp=None):
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
- if self.is_half:
- sine_wavs = sine_wavs.half()
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
- return sine_merge, None, None # noise, uv
-
-
-class GeneratorNSF(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels,
- sr,
- is_half=False,
- ):
- super(GeneratorNSF, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
-
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
- self.m_source = SourceModuleHnNSF(
- sampling_rate=sr, harmonic_num=0, is_half=is_half
- )
- self.noise_convs = nn.ModuleList()
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- c_cur = upsample_initial_channel // (2 ** (i + 1))
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
- if i + 1 < len(upsample_rates):
- stride_f0 = np.prod(upsample_rates[i + 1 :])
- self.noise_convs.append(
- Conv1d(
- 1,
- c_cur,
- kernel_size=stride_f0 * 2,
- stride=stride_f0,
- padding=stride_f0 // 2,
- )
- )
- else:
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- self.upp = np.prod(upsample_rates)
-
- def forward(self, x, f0, g=None):
- har_source, noi_source, uv = self.m_source(f0, self.upp)
- har_source = har_source.transpose(1, 2)
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- x_source = self.noise_convs[i](har_source)
- x = x + x_source
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-sr2sr = {
- "32k": 32000,
- "40k": 40000,
- "48k": 48000,
-}
-
-
-class SynthesizerTrnMs256NSFsidO(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr,
- **kwargs
- ):
- super().__init__()
- if type(sr) == type("strr"):
- sr = sr2sr[sr]
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder256(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- sr=sr,
- is_half=kwargs["is_half"],
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
- return o
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2, 3, 5, 7, 11, 17]
- # periods = [3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ]
- )
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(
- Conv2d(
- 1,
- 32,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 32,
- 128,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 128,
- 512,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 512,
- 1024,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 1024,
- 1024,
- (kernel_size, 1),
- 1,
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- ]
- )
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
diff --git a/spaces/NeuralInternet/Text-to-Video_Playground/README.md b/spaces/NeuralInternet/Text-to-Video_Playground/README.md
deleted file mode 100644
index 6e0d29c9d418f1a03ee2af1acc6f5c0e19debc22..0000000000000000000000000000000000000000
--- a/spaces/NeuralInternet/Text-to-Video_Playground/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Text-to-Video Playground
-emoji: 🚀
-colorFrom: pink
-colorTo: pink
-sdk: gradio
-sdk_version: 3.22.1
-app_file: app.py
-pinned: false
-duplicated_from: damo-vilab/modelscope-text-to-video-synthesis
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Nightwing25/AICoverGen/src/infer_pack/transforms.py b/spaces/Nightwing25/AICoverGen/src/infer_pack/transforms.py
deleted file mode 100644
index a11f799e023864ff7082c1f49c0cc18351a13b47..0000000000000000000000000000000000000000
--- a/spaces/Nightwing25/AICoverGen/src/infer_pack/transforms.py
+++ /dev/null
@@ -1,209 +0,0 @@
-import torch
-from torch.nn import functional as F
-
-import numpy as np
-
-
-DEFAULT_MIN_BIN_WIDTH = 1e-3
-DEFAULT_MIN_BIN_HEIGHT = 1e-3
-DEFAULT_MIN_DERIVATIVE = 1e-3
-
-
-def piecewise_rational_quadratic_transform(
- inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails=None,
- tail_bound=1.0,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
- if tails is None:
- spline_fn = rational_quadratic_spline
- spline_kwargs = {}
- else:
- spline_fn = unconstrained_rational_quadratic_spline
- spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
-
- outputs, logabsdet = spline_fn(
- inputs=inputs,
- unnormalized_widths=unnormalized_widths,
- unnormalized_heights=unnormalized_heights,
- unnormalized_derivatives=unnormalized_derivatives,
- inverse=inverse,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- **spline_kwargs
- )
- return outputs, logabsdet
-
-
-def searchsorted(bin_locations, inputs, eps=1e-6):
- bin_locations[..., -1] += eps
- return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
-
-
-def unconstrained_rational_quadratic_spline(
- inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails="linear",
- tail_bound=1.0,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
- outside_interval_mask = ~inside_interval_mask
-
- outputs = torch.zeros_like(inputs)
- logabsdet = torch.zeros_like(inputs)
-
- if tails == "linear":
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
- constant = np.log(np.exp(1 - min_derivative) - 1)
- unnormalized_derivatives[..., 0] = constant
- unnormalized_derivatives[..., -1] = constant
-
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
- logabsdet[outside_interval_mask] = 0
- else:
- raise RuntimeError("{} tails are not implemented.".format(tails))
-
- (
- outputs[inside_interval_mask],
- logabsdet[inside_interval_mask],
- ) = rational_quadratic_spline(
- inputs=inputs[inside_interval_mask],
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
- inverse=inverse,
- left=-tail_bound,
- right=tail_bound,
- bottom=-tail_bound,
- top=tail_bound,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- )
-
- return outputs, logabsdet
-
-
-def rational_quadratic_spline(
- inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- left=0.0,
- right=1.0,
- bottom=0.0,
- top=1.0,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
- if torch.min(inputs) < left or torch.max(inputs) > right:
- raise ValueError("Input to a transform is not within its domain")
-
- num_bins = unnormalized_widths.shape[-1]
-
- if min_bin_width * num_bins > 1.0:
- raise ValueError("Minimal bin width too large for the number of bins")
- if min_bin_height * num_bins > 1.0:
- raise ValueError("Minimal bin height too large for the number of bins")
-
- widths = F.softmax(unnormalized_widths, dim=-1)
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
- cumwidths = torch.cumsum(widths, dim=-1)
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
- cumwidths = (right - left) * cumwidths + left
- cumwidths[..., 0] = left
- cumwidths[..., -1] = right
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
-
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
-
- heights = F.softmax(unnormalized_heights, dim=-1)
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
- cumheights = torch.cumsum(heights, dim=-1)
- cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
- cumheights = (top - bottom) * cumheights + bottom
- cumheights[..., 0] = bottom
- cumheights[..., -1] = top
- heights = cumheights[..., 1:] - cumheights[..., :-1]
-
- if inverse:
- bin_idx = searchsorted(cumheights, inputs)[..., None]
- else:
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
-
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
-
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
- delta = heights / widths
- input_delta = delta.gather(-1, bin_idx)[..., 0]
-
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
-
- input_heights = heights.gather(-1, bin_idx)[..., 0]
-
- if inverse:
- a = (inputs - input_cumheights) * (
- input_derivatives + input_derivatives_plus_one - 2 * input_delta
- ) + input_heights * (input_delta - input_derivatives)
- b = input_heights * input_derivatives - (inputs - input_cumheights) * (
- input_derivatives + input_derivatives_plus_one - 2 * input_delta
- )
- c = -input_delta * (inputs - input_cumheights)
-
- discriminant = b.pow(2) - 4 * a * c
- assert (discriminant >= 0).all()
-
- root = (2 * c) / (-b - torch.sqrt(discriminant))
- outputs = root * input_bin_widths + input_cumwidths
-
- theta_one_minus_theta = root * (1 - root)
- denominator = input_delta + (
- (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta
- )
- derivative_numerator = input_delta.pow(2) * (
- input_derivatives_plus_one * root.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - root).pow(2)
- )
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, -logabsdet
- else:
- theta = (inputs - input_cumwidths) / input_bin_widths
- theta_one_minus_theta = theta * (1 - theta)
-
- numerator = input_heights * (
- input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
- )
- denominator = input_delta + (
- (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta
- )
- outputs = input_cumheights + numerator / denominator
-
- derivative_numerator = input_delta.pow(2) * (
- input_derivatives_plus_one * theta.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - theta).pow(2)
- )
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, logabsdet
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/simultaneous_translation/modules/monotonic_transformer_layer.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/simultaneous_translation/modules/monotonic_transformer_layer.py
deleted file mode 100644
index 94bd71fb9c46a64a8b6e1960f47dfc43b78dda43..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/simultaneous_translation/modules/monotonic_transformer_layer.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer
-
-from . import build_monotonic_attention
-
-from typing import Dict, Optional, List
-
-from torch import Tensor
-import torch
-
-
-class TransformerMonotonicEncoderLayer(TransformerEncoderLayer):
- def forward(self, x, encoder_padding_mask):
- seq_len, _, _ = x.size()
- attn_mask = x.new_ones([seq_len, seq_len]).triu(1)
- attn_mask = attn_mask.masked_fill(attn_mask.bool(), float("-inf"))
- return super().forward(x, encoder_padding_mask, attn_mask)
-
-
-class TransformerMonotonicDecoderLayer(TransformerDecoderLayer):
- def __init__(self, args):
- super().__init__(args)
-
- assert args.simul_type is not None, "A --simul-type is needed."
- self.encoder_attn = build_monotonic_attention(args)
-
- def prune_incremental_state(
- self,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
- ):
- input_buffer = self.self_attn._get_input_buffer(incremental_state)
- for key in ["prev_key", "prev_value"]:
- input_buffer_key = input_buffer[key]
- assert input_buffer_key is not None
- if input_buffer_key.size(2) > 1:
- input_buffer[key] = input_buffer_key[:, :, :-1, :]
- else:
- typed_empty_dict: Dict[str, Optional[Tensor]] = {}
- input_buffer = typed_empty_dict
- break
- assert incremental_state is not None
- self.self_attn._set_input_buffer(incremental_state, input_buffer)
-
- def forward(
- self,
- x,
- encoder_out: Optional[Tensor] = None,
- encoder_padding_mask: Optional[Tensor] = None,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- prev_self_attn_state: Optional[List[Tensor]] = None,
- prev_attn_state: Optional[List[Tensor]] = None,
- self_attn_mask: Optional[Tensor] = None,
- self_attn_padding_mask: Optional[Tensor] = None,
- need_attn: bool = False,
- need_head_weights: bool = False,
- ):
- """
- Args:
- x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
- encoder_padding_mask (ByteTensor, optional): binary
- ByteTensor of shape `(batch, src_len)` where padding
- elements are indicated by ``1``.
- need_attn (bool, optional): return attention weights
- need_head_weights (bool, optional): return attention weights
- for each head (default: return average over heads).
-
- Returns:
- encoded output of shape `(seq_len, batch, embed_dim)`
- """
- if need_head_weights:
- need_attn = True
-
- residual = x
- if self.normalize_before:
- x = self.self_attn_layer_norm(x)
- if prev_self_attn_state is not None:
- prev_key, prev_value = prev_self_attn_state[:2]
- saved_state: Dict[str, Optional[Tensor]] = {
- "prev_key": prev_key,
- "prev_value": prev_value,
- }
- if len(prev_self_attn_state) >= 3:
- saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
- assert incremental_state is not None
- self.self_attn._set_input_buffer(incremental_state, saved_state)
- _self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
- if self.cross_self_attention and not (
- incremental_state is not None
- and _self_attn_input_buffer is not None
- and "prev_key" in _self_attn_input_buffer
- ):
- if self_attn_mask is not None:
- assert encoder_out is not None
- self_attn_mask = torch.cat(
- (x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
- )
- if self_attn_padding_mask is not None:
- if encoder_padding_mask is None:
- assert encoder_out is not None
- encoder_padding_mask = self_attn_padding_mask.new_zeros(
- encoder_out.size(1), encoder_out.size(0)
- )
- self_attn_padding_mask = torch.cat(
- (encoder_padding_mask, self_attn_padding_mask), dim=1
- )
- assert encoder_out is not None
- y = torch.cat((encoder_out, x), dim=0)
- else:
- y = x
-
- x, attn = self.self_attn(
- query=x,
- key=y,
- value=y,
- key_padding_mask=self_attn_padding_mask,
- incremental_state=incremental_state,
- need_weights=False,
- attn_mask=self_attn_mask,
- )
- x = self.dropout_module(x)
- x = self.residual_connection(x, residual)
- if not self.normalize_before:
- x = self.self_attn_layer_norm(x)
-
- assert self.encoder_attn is not None
- residual = x
- if self.normalize_before:
- x = self.encoder_attn_layer_norm(x)
- if prev_attn_state is not None:
- prev_key, prev_value = prev_attn_state[:2]
- saved_state: Dict[str, Optional[Tensor]] = {
- "prev_key": prev_key,
- "prev_value": prev_value,
- }
- if len(prev_attn_state) >= 3:
- saved_state["prev_key_padding_mask"] = prev_attn_state[2]
- assert incremental_state is not None
- self.encoder_attn._set_input_buffer(incremental_state, saved_state)
-
- x, attn = self.encoder_attn(
- query=x,
- key=encoder_out,
- value=encoder_out,
- key_padding_mask=encoder_padding_mask,
- incremental_state=incremental_state,
- static_kv=True,
- need_weights=need_attn or (not self.training and self.need_attn),
- need_head_weights=need_head_weights,
- )
- x = self.dropout_module(x)
- x = self.residual_connection(x, residual)
- if not self.normalize_before:
- x = self.encoder_attn_layer_norm(x)
-
- residual = x
- if self.normalize_before:
- x = self.final_layer_norm(x)
-
- x = self.activation_fn(self.fc1(x))
- x = self.activation_dropout_module(x)
- x = self.fc2(x)
- x = self.dropout_module(x)
- x = self.residual_connection(x, residual)
- if not self.normalize_before:
- x = self.final_layer_norm(x)
- if self.onnx_trace and incremental_state is not None:
- saved_state = self.self_attn._get_input_buffer(incremental_state)
- assert saved_state is not None
- if self_attn_padding_mask is not None:
- self_attn_state = [
- saved_state["prev_key"],
- saved_state["prev_value"],
- saved_state["prev_key_padding_mask"],
- ]
- else:
- self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]]
- return x, attn, self_attn_state
- return x, attn, None
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/cmudict.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/cmudict.py
deleted file mode 100644
index 62bfef745c30a56f7b6605d9e3becfbc40edb50d..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/cmudict.py
+++ /dev/null
@@ -1,65 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-
-import re
-
-
-valid_symbols = [
- 'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2',
- 'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2',
- 'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY',
- 'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1',
- 'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0',
- 'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW',
- 'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH'
-]
-
-_valid_symbol_set = set(valid_symbols)
-
-
-class CMUDict:
- '''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict'''
- def __init__(self, file_or_path, keep_ambiguous=True):
- if isinstance(file_or_path, str):
- with open(file_or_path, encoding='latin-1') as f:
- entries = _parse_cmudict(f)
- else:
- entries = _parse_cmudict(file_or_path)
- if not keep_ambiguous:
- entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
- self._entries = entries
-
-
- def __len__(self):
- return len(self._entries)
-
-
- def lookup(self, word):
- '''Returns list of ARPAbet pronunciations of the given word.'''
- return self._entries.get(word.upper())
-
-
-
-_alt_re = re.compile(r'\([0-9]+\)')
-
-
-def _parse_cmudict(file):
- cmudict = {}
- for line in file:
- if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
- parts = line.split(' ')
- word = re.sub(_alt_re, '', parts[0])
- pronunciation = _get_pronunciation(parts[1])
- if pronunciation:
- if word in cmudict:
- cmudict[word].append(pronunciation)
- else:
- cmudict[word] = [pronunciation]
- return cmudict
-
-
-def _get_pronunciation(s):
- parts = s.strip().split(' ')
- for part in parts:
- if part not in _valid_symbol_set:
- return None
- return ' '.join(parts)
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/fairseq_dropout.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/fairseq_dropout.py
deleted file mode 100644
index 3cddca77186f5ddd5cfb9c0ed6def9bafdf3bf1e..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/fairseq_dropout.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-from typing import List, Optional
-
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-logger = logging.getLogger(__name__)
-
-
-class FairseqDropout(nn.Module):
- def __init__(self, p, module_name=None):
- super().__init__()
- self.p = p
- self.module_name = module_name
- self.apply_during_inference = False
-
- def forward(self, x, inplace: bool = False):
- if self.p > 0 and (self.training or self.apply_during_inference):
- return F.dropout(x, p=self.p, training=True, inplace=inplace)
- else:
- return x
-
- def make_generation_fast_(
- self,
- name: str,
- retain_dropout: bool = False,
- retain_dropout_modules: Optional[List[str]] = None,
- **kwargs
- ):
- if retain_dropout:
- if retain_dropout_modules is not None and self.module_name is None:
- logger.warning(
- "Cannot enable dropout during inference for module {} "
- "because module_name was not set".format(name)
- )
- elif (
- retain_dropout_modules is None # if None, apply to all modules
- or self.module_name in retain_dropout_modules
- ):
- logger.info(
- "Enabling dropout during inference for module: {}".format(name)
- )
- self.apply_during_inference = True
- else:
- logger.info("Disabling dropout for module: {}".format(name))
diff --git a/spaces/Omdena-Milan/milan-chapter-agrifoods/app.py b/spaces/Omdena-Milan/milan-chapter-agrifoods/app.py
deleted file mode 100644
index 100c8f3f3090e8a230d4ba24df6e169ac0ae4a78..0000000000000000000000000000000000000000
--- a/spaces/Omdena-Milan/milan-chapter-agrifoods/app.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import streamlit as st
-from PIL import Image
-import folium
-from streamlit_folium import st_folium
-
-st.set_page_config(layout="centered")
-image = Image.open('data/logo.png')
-image=image.resize((100,100))
-APP_SUB_TITLE = "by Omdena Milan chapter 👇 (https://omdena.com/local-chapters/milan-italy-chapter/)"
-with st.sidebar:
- logo = st.image(image)
- st.caption(APP_SUB_TITLE)
-
-import streamlit as st
-
-
-st.title("AI for sustainable agriculture and food systems:Use of Satellite Imagery")
-
-st.markdown('''Over five weeks in October 2022, Omdena-Milan Local Chapter collaborators completed
-a local chapter challenge on applying Artificial Intelligence (AI) and Satellite imagery for
-sustainable agri-food systems.To this end, the Omdena-Milan Local Chapter collaborators explored
-various machine learning (ML) and data science techniques and geographic information systems (GIS)
-methods. The team worked on different tasks which are independent of each other to address the same
-project goal. For demonstration, the team integrated all of them into one dashboard with good usability
-for non-technical decision-makers.
-''')
-
-
diff --git a/spaces/Omnibus/MusicGen/Makefile b/spaces/Omnibus/MusicGen/Makefile
deleted file mode 100644
index 5bfd89dd833d7448b21073eb6ee7cfac1d5157dd..0000000000000000000000000000000000000000
--- a/spaces/Omnibus/MusicGen/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-default: linter tests
-
-install:
- pip install -U pip
- pip install -U -e '.[dev]'
-
-linter:
- flake8 audiocraft && mypy audiocraft
- flake8 tests && mypy tests
-
-tests:
- coverage run -m pytest tests
- coverage report --include 'audiocraft/*'
-
-docs:
- pdoc3 --html -o docs -f audiocraft
-
-dist:
- python setup.py sdist
-
-.PHONY: linter tests docs dist
diff --git a/spaces/Omnibus/MusicGen/README.md b/spaces/Omnibus/MusicGen/README.md
deleted file mode 100644
index 3e22c7564834405991e00c12c99716b4d6beae9a..0000000000000000000000000000000000000000
--- a/spaces/Omnibus/MusicGen/README.md
+++ /dev/null
@@ -1,140 +0,0 @@
----
-title: "MusicGen"
-python_version: "3.9"
-tags:
- - "music generation"
- - "language models"
- - "LLMs"
-app_file: "app.py"
-emoji: 🎵
-colorFrom: white
-colorTo: blue
-sdk: gradio
-sdk_version: 3.34.0
-pinned: true
-license: "cc-by-nc-4.0"
----
-# Audiocraft
-
-
-
-
-Audiocraft is a PyTorch library for deep learning research on audio generation. At the moment, it contains the code for MusicGen, a state-of-the-art controllable text-to-music model.
-
-## MusicGen
-
-Audiocraft provides the code and models for MusicGen, [a simple and controllable model for music generation][arxiv]. MusicGen is a single stage auto-regressive
-Transformer model trained over a 32kHz EnCodec tokenizer with 4 codebooks sampled at 50 Hz. Unlike existing methods like [MusicLM](https://arxiv.org/abs/2301.11325), MusicGen doesn't require a self-supervised semantic representation, and it generates
-all 4 codebooks in one pass. By introducing a small delay between the codebooks, we show we can predict
-them in parallel, thus having only 50 auto-regressive steps per second of audio.
-Check out our [sample page][musicgen_samples] or test the available demo!
-
-
-
-
-
-
-
-
-
-We use 20K hours of licensed music to train MusicGen. Specifically, we rely on an internal dataset of 10K high-quality music tracks, and on the ShutterStock and Pond5 music data.
-
-## Installation
-Audiocraft requires Python 3.9, PyTorch 2.0.0, and a GPU with at least 16 GB of memory (for the medium-sized model). To install Audiocraft, you can run the following:
-
-```shell
-# Best to make sure you have torch installed first, in particular before installing xformers.
-# Don't run this if you already have PyTorch installed.
-pip install 'torch>=2.0'
-# Then proceed to one of the following
-pip install -U audiocraft # stable release
-pip install -U git+https://git@github.com/facebookresearch/audiocraft#egg=audiocraft # bleeding edge
-pip install -e . # or if you cloned the repo locally
-```
-
-## Usage
-We offer a number of way to interact with MusicGen:
-1. A demo is also available on the [`facebook/MusicGen` HuggingFace Space](https://huggingface.co/spaces/facebook/MusicGen) (huge thanks to all the HF team for their support).
-2. You can run the Gradio demo in Colab: [colab notebook](https://colab.research.google.com/drive/1fxGqfg96RBUvGxZ1XXN07s3DthrKUl4-?usp=sharing).
-3. You can use the gradio demo locally by running `python app.py`.
-4. You can play with MusicGen by running the jupyter notebook at [`demo.ipynb`](./demo.ipynb) locally (if you have a GPU).
-5. Finally, checkout [@camenduru Colab page](https://github.com/camenduru/MusicGen-colab) which is regularly
- updated with contributions from @camenduru and the community.
-
-## API
-
-We provide a simple API and 4 pre-trained models. The pre trained models are:
-- `small`: 300M model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-small)
-- `medium`: 1.5B model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-medium)
-- `melody`: 1.5B model, text to music and text+melody to music - [🤗 Hub](https://huggingface.co/facebook/musicgen-melody)
-- `large`: 3.3B model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-large)
-
-We observe the best trade-off between quality and compute with the `medium` or `melody` model.
-In order to use MusicGen locally **you must have a GPU**. We recommend 16GB of memory, but smaller
-GPUs will be able to generate short sequences, or longer sequences with the `small` model.
-
-**Note**: Please make sure to have [ffmpeg](https://ffmpeg.org/download.html) installed when using newer version of `torchaudio`.
-You can install it with:
-```
-apt-get install ffmpeg
-```
-
-See after a quick example for using the API.
-
-```python
-import torchaudio
-from audiocraft.models import MusicGen
-from audiocraft.data.audio import audio_write
-
-model = MusicGen.get_pretrained('melody')
-model.set_generation_params(duration=8) # generate 8 seconds.
-wav = model.generate_unconditional(4) # generates 4 unconditional audio samples
-descriptions = ['happy rock', 'energetic EDM', 'sad jazz']
-wav = model.generate(descriptions) # generates 3 samples.
-
-melody, sr = torchaudio.load('./assets/bach.mp3')
-# generates using the melody from the given audio and the provided descriptions.
-wav = model.generate_with_chroma(descriptions, melody[None].expand(3, -1, -1), sr)
-
-for idx, one_wav in enumerate(wav):
- # Will save under {idx}.wav, with loudness normalization at -14 db LUFS.
- audio_write(f'{idx}', one_wav.cpu(), model.sample_rate, strategy="loudness", loudness_compressor=True)
-```
-
-
-## Model Card
-
-See [the model card page](./MODEL_CARD.md).
-
-## FAQ
-
-#### Will the training code be released?
-
-Yes. We will soon release the training code for MusicGen and EnCodec.
-
-
-#### I need help on Windows
-
-@FurkanGozukara made a complete tutorial for [Audiocraft/MusicGen on Windows](https://youtu.be/v-YpvPkhdO4)
-
-#### I need help for running the demo on Colab
-
-Check [@camenduru tutorial on Youtube](https://www.youtube.com/watch?v=EGfxuTy9Eeo).
-
-
-## Citation
-```
-@article{copet2023simple,
- title={Simple and Controllable Music Generation},
- author={Jade Copet and Felix Kreuk and Itai Gat and Tal Remez and David Kant and Gabriel Synnaeve and Yossi Adi and Alexandre Défossez},
- year={2023},
- journal={arXiv preprint arXiv:2306.05284},
-}
-```
-
-## License
-* The code in this repository is released under the MIT license as found in the [LICENSE file](LICENSE).
-* The weights in this repository are released under the CC-BY-NC 4.0 license as found in the [LICENSE_weights file](LICENSE_weights).
-
-[arxiv]: https://arxiv.org/abs/2306.05284
-[musicgen_samples]: https://ai.honu.io/papers/musicgen/
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tests/modeling/test_rpn.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tests/modeling/test_rpn.py
deleted file mode 100644
index f14faae56e580d3d4762d31273b9f65c5774346b..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/tests/modeling/test_rpn.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import logging
-import unittest
-import torch
-
-from detectron2.config import get_cfg
-from detectron2.export import scripting_with_instances
-from detectron2.layers import ShapeSpec
-from detectron2.modeling.backbone import build_backbone
-from detectron2.modeling.proposal_generator import RPN, build_proposal_generator
-from detectron2.modeling.proposal_generator.proposal_utils import (
- add_ground_truth_to_proposals,
- find_top_rpn_proposals,
-)
-from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
-from detectron2.utils.events import EventStorage
-
-logger = logging.getLogger(__name__)
-
-
-class RPNTest(unittest.TestCase):
- def get_gt_and_features(self):
- num_images = 2
- images_tensor = torch.rand(num_images, 20, 30)
- image_sizes = [(10, 10), (20, 30)]
- images = ImageList(images_tensor, image_sizes)
- image_shape = (15, 15)
- num_channels = 1024
- features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
- gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
- gt_instances = Instances(image_shape)
- gt_instances.gt_boxes = Boxes(gt_boxes)
- return (gt_instances, features, images, image_sizes)
-
- def test_rpn(self):
- torch.manual_seed(121)
- cfg = get_cfg()
- backbone = build_backbone(cfg)
- proposal_generator = RPN(cfg, backbone.output_shape())
- (gt_instances, features, images, image_sizes) = self.get_gt_and_features()
- with EventStorage(): # capture events in a new storage to discard them
- proposals, proposal_losses = proposal_generator(
- images, features, [gt_instances[0], gt_instances[1]]
- )
-
- expected_losses = {
- "loss_rpn_cls": torch.tensor(0.08011703193),
- "loss_rpn_loc": torch.tensor(0.101470276),
- }
- for name in expected_losses.keys():
- err_msg = "proposal_losses[{}] = {}, expected losses = {}".format(
- name, proposal_losses[name], expected_losses[name]
- )
- self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
-
- self.assertEqual(len(proposals), len(image_sizes))
- for proposal, im_size in zip(proposals, image_sizes):
- self.assertEqual(proposal.image_size, im_size)
-
- expected_proposal_box = torch.tensor([[0, 0, 10, 10], [7.2702, 0, 10, 10]])
- expected_objectness_logit = torch.tensor([0.1596, -0.0007])
- self.assertTrue(
- torch.allclose(proposals[0].proposal_boxes.tensor, expected_proposal_box, atol=1e-4)
- )
- self.assertTrue(
- torch.allclose(proposals[0].objectness_logits, expected_objectness_logit, atol=1e-4)
- )
-
- def verify_rpn(self, conv_dims, expected_conv_dims):
- torch.manual_seed(121)
- cfg = get_cfg()
- cfg.MODEL.RPN.CONV_DIMS = conv_dims
- backbone = build_backbone(cfg)
- proposal_generator = RPN(cfg, backbone.output_shape())
- for k, conv in enumerate(proposal_generator.rpn_head.conv):
- self.assertEqual(expected_conv_dims[k], conv.out_channels)
- return proposal_generator
-
- def test_rpn_larger_num_convs(self):
- conv_dims = [64, 64, 64, 64, 64]
- proposal_generator = self.verify_rpn(conv_dims, conv_dims)
- (gt_instances, features, images, image_sizes) = self.get_gt_and_features()
- with EventStorage(): # capture events in a new storage to discard them
- proposals, proposal_losses = proposal_generator(
- images, features, [gt_instances[0], gt_instances[1]]
- )
- expected_losses = {
- "loss_rpn_cls": torch.tensor(0.08122821152),
- "loss_rpn_loc": torch.tensor(0.10064548254),
- }
- for name in expected_losses.keys():
- err_msg = "proposal_losses[{}] = {}, expected losses = {}".format(
- name, proposal_losses[name], expected_losses[name]
- )
- self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
-
- def test_rpn_conv_dims_not_set(self):
- conv_dims = [-1, -1, -1]
- expected_conv_dims = [1024, 1024, 1024]
- self.verify_rpn(conv_dims, expected_conv_dims)
-
- def test_rpn_scriptability(self):
- cfg = get_cfg()
- proposal_generator = RPN(cfg, {"res4": ShapeSpec(channels=1024, stride=16)}).eval()
- num_images = 2
- images_tensor = torch.rand(num_images, 30, 40)
- image_sizes = [(32, 32), (30, 40)]
- images = ImageList(images_tensor, image_sizes)
- features = {"res4": torch.rand(num_images, 1024, 1, 2)}
-
- fields = {"proposal_boxes": Boxes, "objectness_logits": torch.Tensor}
- proposal_generator_ts = scripting_with_instances(proposal_generator, fields)
-
- proposals, _ = proposal_generator(images, features)
- proposals_ts, _ = proposal_generator_ts(images, features)
-
- for proposal, proposal_ts in zip(proposals, proposals_ts):
- self.assertEqual(proposal.image_size, proposal_ts.image_size)
- self.assertTrue(
- torch.equal(proposal.proposal_boxes.tensor, proposal_ts.proposal_boxes.tensor)
- )
- self.assertTrue(torch.equal(proposal.objectness_logits, proposal_ts.objectness_logits))
-
- def test_rrpn(self):
- torch.manual_seed(121)
- cfg = get_cfg()
- cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN"
- cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator"
- cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
- cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1]]
- cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [[0, 60]]
- cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1)
- cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead"
- backbone = build_backbone(cfg)
- proposal_generator = build_proposal_generator(cfg, backbone.output_shape())
- num_images = 2
- images_tensor = torch.rand(num_images, 20, 30)
- image_sizes = [(10, 10), (20, 30)]
- images = ImageList(images_tensor, image_sizes)
- image_shape = (15, 15)
- num_channels = 1024
- features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
- gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32)
- gt_instances = Instances(image_shape)
- gt_instances.gt_boxes = RotatedBoxes(gt_boxes)
- with EventStorage(): # capture events in a new storage to discard them
- proposals, proposal_losses = proposal_generator(
- images, features, [gt_instances[0], gt_instances[1]]
- )
-
- expected_losses = {
- "loss_rpn_cls": torch.tensor(0.04291602224),
- "loss_rpn_loc": torch.tensor(0.145077362),
- }
- for name in expected_losses.keys():
- err_msg = "proposal_losses[{}] = {}, expected losses = {}".format(
- name, proposal_losses[name], expected_losses[name]
- )
- self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
-
- expected_proposal_box = torch.tensor(
- [
- [-1.77999556, 0.78155339, 68.04367828, 14.78156471, 60.59333801],
- [13.82740974, -1.50282836, 34.67269897, 29.19676590, -3.81942749],
- [8.10392570, -0.99071521, 145.39100647, 32.13126373, 3.67242432],
- [5.00000000, 4.57370186, 10.00000000, 9.14740372, 0.89196777],
- ]
- )
-
- expected_objectness_logit = torch.tensor([0.10924313, 0.09881870, 0.07649877, 0.05858029])
-
- torch.set_printoptions(precision=8, sci_mode=False)
-
- self.assertEqual(len(proposals), len(image_sizes))
-
- proposal = proposals[0]
- # It seems that there's some randomness in the result across different machines:
- # This test can be run on a local machine for 100 times with exactly the same result,
- # However, a different machine might produce slightly different results,
- # thus the atol here.
- err_msg = "computed proposal boxes = {}, expected {}".format(
- proposal.proposal_boxes.tensor, expected_proposal_box
- )
- self.assertTrue(
- torch.allclose(proposal.proposal_boxes.tensor[:4], expected_proposal_box, atol=1e-5),
- err_msg,
- )
-
- err_msg = "computed objectness logits = {}, expected {}".format(
- proposal.objectness_logits, expected_objectness_logit
- )
- self.assertTrue(
- torch.allclose(proposal.objectness_logits[:4], expected_objectness_logit, atol=1e-5),
- err_msg,
- )
-
- def test_find_rpn_proposals_inf(self):
- N, Hi, Wi, A = 3, 3, 3, 3
- proposals = [torch.rand(N, Hi * Wi * A, 4)]
- pred_logits = [torch.rand(N, Hi * Wi * A)]
- pred_logits[0][1][3:5].fill_(float("inf"))
- find_top_rpn_proposals(proposals, pred_logits, [(10, 10)], 0.5, 1000, 1000, 0, False)
-
- def test_find_rpn_proposals_tracing(self):
- N, Hi, Wi, A = 3, 50, 50, 9
- proposal = torch.rand(N, Hi * Wi * A, 4)
- pred_logit = torch.rand(N, Hi * Wi * A)
-
- def func(proposal, logit, image_size):
- r = find_top_rpn_proposals(
- [proposal], [logit], [image_size], 0.7, 1000, 1000, 0, False
- )[0]
- size = r.image_size
- if not isinstance(size, torch.Tensor):
- size = torch.tensor(size)
- return (size, r.proposal_boxes.tensor, r.objectness_logits)
-
- other_inputs = []
- # test that it generalizes to other shapes
- for Hi, Wi, shp in [(30, 30, 60), (10, 10, 800)]:
- other_inputs.append(
- (
- torch.rand(N, Hi * Wi * A, 4),
- torch.rand(N, Hi * Wi * A),
- torch.tensor([shp, shp]),
- )
- )
- torch.jit.trace(
- func, (proposal, pred_logit, torch.tensor([100, 100])), check_inputs=other_inputs
- )
-
- def test_append_gt_to_proposal(self):
- proposals = Instances(
- (10, 10),
- **{
- "proposal_boxes": Boxes(torch.empty((0, 4))),
- "objectness_logits": torch.tensor([]),
- "custom_attribute": torch.tensor([]),
- }
- )
- gt_boxes = Boxes(torch.tensor([[0, 0, 1, 1]]))
-
- self.assertRaises(AssertionError, add_ground_truth_to_proposals, [gt_boxes], [proposals])
-
- gt_instances = Instances((10, 10))
- gt_instances.gt_boxes = gt_boxes
-
- self.assertRaises(
- AssertionError, add_ground_truth_to_proposals, [gt_instances], [proposals]
- )
-
- gt_instances.custom_attribute = torch.tensor([1])
- gt_instances.custom_attribute2 = torch.tensor([1])
- new_proposals = add_ground_truth_to_proposals([gt_instances], [proposals])[0]
-
- self.assertEqual(new_proposals.custom_attribute[0], 1)
- # new proposals should only include the attributes in proposals
- self.assertRaises(AttributeError, lambda: new_proposals.custom_attribute2)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/spaces/OptimalScale/Robin-33b/lmflow/pipeline/utils/raft_trainer.py b/spaces/OptimalScale/Robin-33b/lmflow/pipeline/utils/raft_trainer.py
deleted file mode 100644
index 0b69fa9fb2fa7abf6826eb8ec690b43503b12107..0000000000000000000000000000000000000000
--- a/spaces/OptimalScale/Robin-33b/lmflow/pipeline/utils/raft_trainer.py
+++ /dev/null
@@ -1,3782 +0,0 @@
-import contextlib
-import functools
-import glob
-import inspect
-import math
-import os
-import random
-import re
-import shutil
-import sys
-import time
-import warnings
-from collections.abc import Mapping
-from distutils.util import strtobool
-from pathlib import Path
-from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
-
-from tqdm.auto import tqdm
-
-
-# Integrations must be imported before ML frameworks:
-# isort: off
-from transformers.integrations import (
- default_hp_search_backend,
- get_reporting_integration_callbacks,
- hp_params,
- is_fairscale_available,
- is_optuna_available,
- is_ray_tune_available,
- is_sigopt_available,
- is_wandb_available,
- run_hp_search_optuna,
- run_hp_search_ray,
- run_hp_search_sigopt,
- run_hp_search_wandb,
-)
-
-# isort: on
-
-import numpy as np
-import torch
-import torch.distributed as dist
-from huggingface_hub import Repository, create_repo
-from packaging import version
-from torch import nn
-from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
-from torch.utils.data.distributed import DistributedSampler
-
-
-from transformers.configuration_utils import PretrainedConfig
-from transformers.data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
-from transformers.debug_utils import DebugOption, DebugUnderflowOverflow
-from transformers.deepspeed import deepspeed_init, is_deepspeed_zero3_enabled
-from transformers.dependency_versions_check import dep_version_check
-from transformers.modelcard import TrainingSummary
-from transformers.modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model
-from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_MAPPING_NAMES
-from transformers.optimization import Adafactor, get_scheduler
-from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_10, is_torch_less_than_1_11
-from transformers.tokenization_utils_base import PreTrainedTokenizerBase
-from transformers.trainer_callback import (
- CallbackHandler,
- DefaultFlowCallback,
- PrinterCallback,
- ProgressCallback,
- TrainerCallback,
- TrainerControl,
- TrainerState,
-)
-from transformers.trainer_pt_utils import (
- DistributedLengthGroupedSampler,
- DistributedSamplerWithLoop,
- DistributedTensorGatherer,
- IterableDatasetShard,
- LabelSmoother,
- LengthGroupedSampler,
- SequentialDistributedSampler,
- ShardSampler,
- distributed_broadcast_scalars,
- distributed_concat,
- find_batch_size,
- get_module_class_from_name,
- get_parameter_names,
- nested_concat,
- nested_detach,
- nested_numpify,
- nested_truncate,
- nested_xla_mesh_reduce,
- reissue_pt_warnings,
-)
-from transformers.trainer_utils import (
- PREFIX_CHECKPOINT_DIR,
- BestRun,
- EvalLoopOutput,
- EvalPrediction,
- FSDPOption,
- HPSearchBackend,
- HubStrategy,
- IntervalStrategy,
- PredictionOutput,
- RemoveColumnsCollator,
- ShardedDDPOption,
- TrainerMemoryTracker,
- TrainOutput,
- default_compute_objective,
- default_hp_space,
- denumpify_detensorize,
- enable_full_determinism,
- find_executable_batch_size,
- get_last_checkpoint,
- has_length,
- number_of_arguments,
- seed_worker,
- set_seed,
- speed_metrics,
-)
-from transformers.training_args import OptimizerNames, ParallelMode, TrainingArguments
-from transformers.utils import (
- CONFIG_NAME,
- WEIGHTS_INDEX_NAME,
- WEIGHTS_NAME,
- can_return_loss,
- find_labels,
- get_full_repo_name,
- is_accelerate_available,
- is_apex_available,
- is_datasets_available,
- is_in_notebook,
- is_ipex_available,
- is_sagemaker_dp_enabled,
- is_sagemaker_mp_enabled,
- is_torch_compile_available,
- is_torch_neuroncore_available,
- is_torch_tpu_available,
- logging,
-)
-from transformers.utils.generic import ContextManagers
-
-
-_is_native_cpu_amp_available = is_torch_greater_or_equal_than_1_10
-
-DEFAULT_CALLBACKS = [DefaultFlowCallback]
-DEFAULT_PROGRESS_CALLBACK = ProgressCallback
-
-if is_in_notebook():
- from transformers.utils.notebook import NotebookProgressCallback
-
- DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
-
-if is_apex_available():
- from apex import amp
-
-if is_datasets_available():
- import datasets
-
-if is_torch_tpu_available(check_device=False):
- import torch_xla.core.xla_model as xm
- import torch_xla.debug.metrics as met
- import torch_xla.distributed.parallel_loader as pl
-
-if is_fairscale_available():
- dep_version_check("fairscale")
- import fairscale
- from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
- from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
- from fairscale.nn.wrap import auto_wrap
- from fairscale.optim import OSS
- from fairscale.optim.grad_scaler import ShardedGradScaler
-
-
-if is_sagemaker_mp_enabled():
- import smdistributed.modelparallel.torch as smp
- from smdistributed.modelparallel import __version__ as SMP_VERSION
-
- IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse("1.10")
-
- from transformers.trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat
-else:
- IS_SAGEMAKER_MP_POST_1_10 = False
-
-
-skip_first_batches = None
-
-
-
-logger = logging.get_logger(__name__)
-
-
-# Name of the files used for checkpointing
-TRAINING_ARGS_NAME = "training_args.bin"
-TRAINER_STATE_NAME = "trainer_state.json"
-OPTIMIZER_NAME = "optimizer.pt"
-SCHEDULER_NAME = "scheduler.pt"
-SCALER_NAME = "scaler.pt"
-
-
-class RaftTrainer:
- """
- Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
- Args:
- model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*):
- The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed.
-
- [`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use
- your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers
- models.
-
- args ([`TrainingArguments`], *optional*):
- The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the
- `output_dir` set to a directory named *tmp_trainer* in the current directory if not provided.
- data_collator (`DataCollator`, *optional*):
- The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will
- default to [`default_data_collator`] if no `tokenizer` is provided, an instance of
- [`DataCollatorWithPadding`] otherwise.
- train_dataset (`torch.utils.data.Dataset` or `torch.utils.data.IterableDataset`, *optional*):
- The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the
- `model.forward()` method are automatically removed.
- Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a
- distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a
- `torch.Generator` for the randomization that must be identical on all processes (and the Trainer will
- manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally
- sets the seed of the RNGs used.
- eval_dataset (Union[`torch.utils.data.Dataset`, Dict[str, `torch.utils.data.Dataset`]), *optional*):
- The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the
- `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each
- dataset prepending the dictionary key to the metric name.
- tokenizer ([`PreTrainedTokenizerBase`], *optional*):
- The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs to the
- maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
- interrupted training or reuse the fine-tuned model.
- model_init (`Callable[[], PreTrainedModel]`, *optional*):
- A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start
- from a new instance of the model as given by this function.
- The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to
- be able to choose different architectures according to hyper parameters (such as layer count, sizes of
- inner layers, dropout probabilities etc).
- compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*):
- The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return
- a dictionary string to metric values.
- callbacks (List of [`TrainerCallback`], *optional*):
- A list of callbacks to customize the training loop. Will add those to the list of default callbacks
- detailed in [here](callback).
- If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method.
- optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*): A tuple
- containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your model
- and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`.
- preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*):
- A function that preprocess the logits right before caching them at each evaluation step. Must take two
- tensors, the logits and the labels, and return the logits once processed as desired. The modifications made
- by this function will be reflected in the predictions received by `compute_metrics`.
- Note that the labels (second parameter) will be `None` if the dataset does not have them.
- Important attributes:
- - **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`]
- subclass.
- - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
- original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`,
- the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner
- model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`.
- - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
- data parallelism, this means some of the model layers are split on different GPUs).
- - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
- to `False` if model parallel or deepspeed is used, or if the default
- `TrainingArguments.place_model_on_device` is overridden to return `False` .
- - **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while
- in `train`)
- """
-
- from transformers.trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
-
- def __init__(
- self,
- model: Union[PreTrainedModel, nn.Module] = None,
- args: TrainingArguments = None,
- data_collator: Optional[DataCollator] = None,
- train_dataset: Optional[Dataset] = None,
- eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,
- tokenizer: Optional[PreTrainedTokenizerBase] = None,
- model_init: Optional[Callable[[], PreTrainedModel]] = None,
- compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
- callbacks: Optional[List[TrainerCallback]] = None,
- optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
- preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
- ):
- ############
- self.save_counter = 0
- ##############
- if args is None:
- output_dir = "tmp_trainer"
- logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
- args = TrainingArguments(output_dir=output_dir)
- self.args = args
- # Seed must be set before instantiating the model when using model
- enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed)
- self.hp_name = None
- self.deepspeed = None
- self.is_in_train = False
-
- # memory metrics - must set up as early as possible
- self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
- self._memory_tracker.start()
-
- # set the correct log level depending on the node
- log_level = args.get_process_log_level()
- logging.set_verbosity(log_level)
-
- # force device and distributed setup init explicitly
- args._setup_devices
-
- if model is None:
- if model_init is not None:
- self.model_init = model_init
- model = self.call_model_init()
- else:
- raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
- else:
- if model_init is not None:
- warnings.warn(
- "`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will"
- " overwrite your model when calling the `train` method. This will become a fatal error in the next"
- " release.",
- FutureWarning,
- )
- self.model_init = model_init
-
- if model.__class__.__name__ in MODEL_MAPPING_NAMES:
- raise ValueError(
- f"The model you have picked ({model.__class__.__name__}) cannot be used as is for training: it only "
- "computes hidden states and does not accept any labels. You should choose a model with a head "
- "suitable for your task like any of the `AutoModelForXxx` listed at "
- "https://huggingface.co/docs/transformers/model_doc/auto."
- )
-
- if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
- self.is_model_parallel = True
- else:
- self.is_model_parallel = False
-
- # At this stage the model is already loaded
- if getattr(model, "is_loaded_in_8bit", False):
- if getattr(model, "_is_int8_training_enabled", False):
- logger.info(
- "The model is loaded in 8-bit precision. To train this model you need to add additional modules"
- " inside the model such as adapters using `peft` library and freeze the model weights. Please"
- " check "
- " the examples in https://github.com/huggingface/peft for more details."
- )
- else:
- raise ValueError(
- "The model you want to train is loaded in 8-bit precision. if you want to fine-tune an 8-bit"
- " model, please make sure that you have installed `bitsandbytes>=0.37.0`. "
- )
-
- # Setup Sharded DDP training
- self.sharded_ddp = None
- if len(args.sharded_ddp) > 0:
- if args.deepspeed:
- raise ValueError(
- "Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
- )
- if len(args.fsdp) > 0:
- raise ValueError(
- "Using --sharded_ddp xxx together with --fsdp is not possible, deactivate one of those flags."
- )
-
- if args.local_rank == -1:
- raise ValueError("Using sharded DDP only works in distributed training.")
- elif not is_fairscale_available():
- raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
- elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
- raise ImportError(
- "Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
- f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
- )
- elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
- self.sharded_ddp = ShardedDDPOption.SIMPLE
- elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
- self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
- elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
- self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
-
- self.fsdp = None
- if len(args.fsdp) > 0:
- if args.deepspeed:
- raise ValueError(
- "Using --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags."
- )
- if not args.fsdp_config["xla"] and args.local_rank == -1:
- raise ValueError("Using fsdp only works in distributed training.")
-
- # dep_version_check("torch>=1.12.0")
- # Would have to update setup.py with torch>=1.12.0
- # which isn't ideally given that it will force people not using FSDP to also use torch>=1.12.0
- # below is the current alternative.
- if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.12.0"):
- raise ValueError("FSDP requires PyTorch >= 1.12.0")
-
- from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, ShardingStrategy
-
- if FSDPOption.FULL_SHARD in args.fsdp:
- self.fsdp = ShardingStrategy.FULL_SHARD
- elif FSDPOption.SHARD_GRAD_OP in args.fsdp:
- self.fsdp = ShardingStrategy.SHARD_GRAD_OP
- elif FSDPOption.NO_SHARD in args.fsdp:
- self.fsdp = ShardingStrategy.NO_SHARD
-
- self.backward_prefetch = BackwardPrefetch.BACKWARD_PRE
- if "backward_prefetch" in self.args.fsdp_config and "backward_pos" not in self.backward_prefetch:
- self.backward_prefetch = BackwardPrefetch.BACKWARD_POST
-
- self.forword_prefetch = False
- if self.args.fsdp_config.get("forword_prefect", False):
- self.forword_prefetch = True
-
- self.limit_all_gathers = False
- if self.args.fsdp_config.get("limit_all_gathers", False):
- self.limit_all_gathers = True
-
- # one place to sort out whether to place the model on device or not
- # postpone switching model to cuda when:
- # 1. MP - since we are trying to fit a much bigger than 1 gpu model
- # 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
- # and we only use deepspeed for training at the moment
- # 3. full bf16 or fp16 eval - since the model needs to be cast to the right dtype first
- # 4. Sharded DDP - same as MP
- # 5. FSDP - same as MP
- self.place_model_on_device = args.place_model_on_device
- if (
- self.is_model_parallel
- or args.deepspeed
- or ((args.fp16_full_eval or args.bf16_full_eval) and not args.do_train)
- or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
- or (self.fsdp is not None)
- ):
- self.place_model_on_device = False
-
- default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
- self.data_collator = data_collator if data_collator is not None else default_collator
- self.train_dataset = train_dataset
- self.eval_dataset = eval_dataset
- self.tokenizer = tokenizer
-
- if self.place_model_on_device and not getattr(model, "is_loaded_in_8bit", False):
- self._move_model_to_device(model, args.device)
-
- # Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
- if self.is_model_parallel:
- self.args._n_gpu = 1
-
- # later use `self.model is self.model_wrapped` to check if it's wrapped or not
- self.model_wrapped = model
- self.model = model
-
- self.compute_metrics = compute_metrics
- self.preprocess_logits_for_metrics = preprocess_logits_for_metrics
- self.optimizer, self.lr_scheduler = optimizers
- if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
- raise RuntimeError(
- "Passing a `model_init` is incompatible with providing the `optimizers` argument. "
- "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
- )
- if is_torch_tpu_available() and self.optimizer is not None:
- for param in self.model.parameters():
- model_device = param.device
- break
- for param_group in self.optimizer.param_groups:
- if len(param_group["params"]) > 0:
- optimizer_device = param_group["params"][0].device
- break
- if model_device != optimizer_device:
- raise ValueError(
- "The model and the optimizer parameters are not on the same device, which probably means you"
- " created an optimizer around your model **before** putting on the device and passing it to the"
- " `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and"
- " `model.to(xm.xla_device())` is performed before the optimizer creation in your script."
- )
- if ((self.sharded_ddp is not None) or args.deepspeed or (self.fsdp is not None)) and (
- self.optimizer is not None or self.lr_scheduler is not None
- ):
- raise RuntimeError(
- "Passing `optimizers` is not allowed if Fairscale, Deepspeed or PyTorch FSDP is enabled."
- "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
- )
- default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
- callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
- self.callback_handler = CallbackHandler(
- callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
- )
- self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
-
- # Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
- self._loggers_initialized = False
-
- # Create clone of distant repo and output directory if needed
- if self.args.push_to_hub:
- self.init_git_repo(at_init=True)
- # In case of pull, we need to make sure every process has the latest.
- if is_torch_tpu_available():
- xm.rendezvous("init git repo")
- elif args.local_rank != -1:
- dist.barrier()
-
- if self.args.should_save:
- os.makedirs(self.args.output_dir, exist_ok=True)
-
- if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
- raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
-
- if args.max_steps > 0:
- logger.info("max_steps is given, it will override any value given in num_train_epochs")
-
- if train_dataset is not None and not has_length(train_dataset) and args.max_steps <= 0:
- raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
-
- if (
- train_dataset is not None
- and isinstance(train_dataset, torch.utils.data.IterableDataset)
- and args.group_by_length
- ):
- raise ValueError("the `--group_by_length` option is only available for `Dataset`, not `IterableDataset")
-
- self._signature_columns = None
-
- # Mixed precision setup
- self.use_apex = False
- self.use_cuda_amp = False
- self.use_cpu_amp = False
-
- # Mixed precision setup for SageMaker Model Parallel
- if is_sagemaker_mp_enabled():
- # BF16 + model parallelism in SageMaker: currently not supported, raise an error
- if args.bf16:
- raise ValueError("SageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead ")
-
- if IS_SAGEMAKER_MP_POST_1_10:
- # When there's mismatch between SMP config and trainer argument, use SMP config as truth
- if args.fp16 != smp.state.cfg.fp16:
- logger.warning(
- f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16},"
- f"but FP16 provided in trainer argument is {args.fp16},"
- f"setting to {smp.state.cfg.fp16}"
- )
- args.fp16 = smp.state.cfg.fp16
- else:
- # smp < 1.10 does not support fp16 in trainer.
- if hasattr(smp.state.cfg, "fp16"):
- logger.warning(
- f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, "
- "but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer."
- )
-
- if args.fp16 or args.bf16:
- if args.half_precision_backend == "auto":
- if args.device == torch.device("cpu"):
- if args.fp16:
- raise ValueError("Tried to use `fp16` but it is not supported on cpu")
- elif _is_native_cpu_amp_available:
- args.half_precision_backend = "cpu_amp"
- else:
- raise ValueError("Tried to use cpu amp but native cpu amp is not available")
- else:
- args.half_precision_backend = "cuda_amp"
-
- logger.info(f"Using {args.half_precision_backend} half precision backend")
-
- self.do_grad_scaling = False
- if (args.fp16 or args.bf16) and not (args.deepspeed or is_sagemaker_mp_enabled() or is_torch_tpu_available()):
- # deepspeed and SageMaker Model Parallel manage their own half precision
- if args.half_precision_backend == "cuda_amp":
- self.use_cuda_amp = True
- self.amp_dtype = torch.float16 if args.fp16 else torch.bfloat16
- # bf16 does not need grad scaling
- self.do_grad_scaling = self.amp_dtype == torch.float16
- if self.do_grad_scaling:
- if self.sharded_ddp is not None:
- self.scaler = ShardedGradScaler()
- elif self.fsdp is not None:
- from torch.distributed.fsdp.sharded_grad_scaler import (
- ShardedGradScaler as FSDPShardedGradScaler,
- )
-
- self.scaler = FSDPShardedGradScaler()
- elif is_torch_tpu_available():
- from torch_xla.amp import GradScaler
-
- self.scaler = GradScaler()
- else:
- self.scaler = torch.cuda.amp.GradScaler()
- elif args.half_precision_backend == "cpu_amp":
- self.use_cpu_amp = True
- self.amp_dtype = torch.bfloat16
- else:
- if not is_apex_available():
- raise ImportError(
- "Using FP16 with APEX but APEX is not installed, please refer to"
- " https://www.github.com/nvidia/apex."
- )
- self.use_apex = True
-
- # FP16 + model parallelism in SageMaker: gradient clipping does not work for now so we raise a helpful error.
- if (
- is_sagemaker_mp_enabled()
- and self.use_cuda_amp
- and args.max_grad_norm is not None
- and args.max_grad_norm > 0
- ):
- raise ValueError(
- "SageMaker Model Parallelism in mixed precision mode does not support gradient clipping yet. Pass "
- "along 'max_grad_norm': 0 in your hyperparameters."
- )
-
- # Label smoothing
- if self.args.label_smoothing_factor != 0:
- self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
- else:
- self.label_smoother = None
-
- self.state = TrainerState(
- is_local_process_zero=self.is_local_process_zero(),
- is_world_process_zero=self.is_world_process_zero(),
- )
-
- self.control = TrainerControl()
- # Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then
- # returned to 0 every time flos need to be logged
- self.current_flos = 0
- self.hp_search_backend = None
- self.use_tune_checkpoints = False
- default_label_names = find_labels(self.model.__class__)
- self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
- self.can_return_loss = can_return_loss(self.model.__class__)
- self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
-
- # Internal variables to keep track of the original batch size
- self._train_batch_size = args.train_batch_size
-
- # very last
- self._memory_tracker.stop_and_update_metrics()
-
- # torch.compile
- if args.torch_compile and not is_torch_compile_available():
- raise RuntimeError("Using torch.compile requires PyTorch 2.0 or higher.")
-
- def add_callback(self, callback):
- """
- Add a callback to the current list of [`~transformer.TrainerCallback`].
- Args:
- callback (`type` or [`~transformer.TrainerCallback`]):
- A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the
- first case, will instantiate a member of that class.
- """
- self.callback_handler.add_callback(callback)
-
- def pop_callback(self, callback):
- """
- Remove a callback from the current list of [`~transformer.TrainerCallback`] and returns it.
- If the callback is not found, returns `None` (and no error is raised).
- Args:
- callback (`type` or [`~transformer.TrainerCallback`]):
- A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the
- first case, will pop the first member of that class found in the list of callbacks.
- Returns:
- [`~transformer.TrainerCallback`]: The callback removed, if found.
- """
- return self.callback_handler.pop_callback(callback)
-
- def remove_callback(self, callback):
- """
- Remove a callback from the current list of [`~transformer.TrainerCallback`].
- Args:
- callback (`type` or [`~transformer.TrainerCallback`]):
- A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the
- first case, will remove the first member of that class found in the list of callbacks.
- """
- self.callback_handler.remove_callback(callback)
-
- def _move_model_to_device(self, model, device):
- model = model.to(device)
- # Moving a model to an XLA device disconnects the tied weights, so we have to retie them.
- if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, "tie_weights"):
- model.tie_weights()
-
- def _set_signature_columns_if_needed(self):
- if self._signature_columns is None:
- # Inspect model forward signature to keep only the arguments it accepts.
- signature = inspect.signature(self.model.forward)
- self._signature_columns = list(signature.parameters.keys())
- # Labels may be named label or label_ids, the default data collator handles that.
- self._signature_columns += list(set(["label", "label_ids"] + self.label_names))
-
- def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
- if not self.args.remove_unused_columns:
- return dataset
- self._set_signature_columns_if_needed()
- signature_columns = self._signature_columns
-
- ignored_columns = list(set(dataset.column_names) - set(signature_columns))
- if len(ignored_columns) > 0:
- dset_description = "" if description is None else f"in the {description} set"
- logger.info(
- f"The following columns {dset_description} don't have a corresponding argument in "
- f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
- f" If {', '.join(ignored_columns)} are not expected by `{self.model.__class__.__name__}.forward`, "
- " you can safely ignore this message."
- )
-
- columns = [k for k in signature_columns if k in dataset.column_names]
-
- if version.parse(datasets.__version__) < version.parse("1.4.0"):
- dataset.set_format(
- type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"]
- )
- return dataset
- else:
- return dataset.remove_columns(ignored_columns)
-
- def _get_collator_with_removed_columns(
- self, data_collator: Callable, description: Optional[str] = None
- ) -> Callable:
- """Wrap the data collator in a callable removing unused columns."""
- if not self.args.remove_unused_columns:
- return data_collator
- self._set_signature_columns_if_needed()
- signature_columns = self._signature_columns
-
- remove_columns_collator = RemoveColumnsCollator(
- data_collator=data_collator,
- signature_columns=signature_columns,
- logger=logger,
- description=description,
- model_name=self.model.__class__.__name__,
- )
- return remove_columns_collator
-
- def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
- if self.train_dataset is None or not has_length(self.train_dataset):
- return None
-
- generator = None
- if self.args.world_size <= 1:
- generator = torch.Generator()
- # for backwards compatibility, we generate a seed here (which is sampled from a generator seeded with
- # `args.seed`) if data_seed isn't provided.
- # Further on in this method, we default to `args.seed` instead.
- if self.args.data_seed is None:
- seed = int(torch.empty((), dtype=torch.int64).random_().item())
- else:
- seed = self.args.data_seed
- generator.manual_seed(seed)
-
- seed = self.args.data_seed if self.args.data_seed is not None else self.args.seed
-
- # Build the sampler.
- if self.args.group_by_length:
- if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset):
- lengths = (
- self.train_dataset[self.args.length_column_name]
- if self.args.length_column_name in self.train_dataset.column_names
- else None
- )
- else:
- lengths = None
- model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
- if self.args.world_size <= 1:
- return LengthGroupedSampler(
- self.args.train_batch_size * self.args.gradient_accumulation_steps,
- dataset=self.train_dataset,
- lengths=lengths,
- model_input_name=model_input_name,
- generator=generator,
- )
- else:
- return DistributedLengthGroupedSampler(
- self.args.train_batch_size * self.args.gradient_accumulation_steps,
- dataset=self.train_dataset,
- num_replicas=self.args.world_size,
- rank=self.args.process_index,
- lengths=lengths,
- model_input_name=model_input_name,
- seed=seed,
- )
-
- else:
- if self.args.world_size <= 1:
- return RandomSampler(self.train_dataset, generator=generator)
- elif (
- self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL]
- and not self.args.dataloader_drop_last
- ):
- # Use a loop for TPUs when drop_last is False to have all batches have the same size.
- return DistributedSamplerWithLoop(
- self.train_dataset,
- batch_size=self.args.per_device_train_batch_size,
- num_replicas=self.args.world_size,
- rank=self.args.process_index,
- seed=seed,
- )
- else:
- return DistributedSampler(
- self.train_dataset,
- num_replicas=self.args.world_size,
- rank=self.args.process_index,
- seed=seed,
- )
-
- def get_train_dataloader(self) -> DataLoader:
- """
- Returns the training [`~torch.utils.data.DataLoader`].
- Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed
- training if necessary) otherwise.
- Subclass and override this method if you want to inject some custom behavior.
- """
- if self.train_dataset is None:
- raise ValueError("Trainer: training requires a train_dataset.")
-
- train_dataset = self.train_dataset
- data_collator = self.data_collator
- if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
- train_dataset = self._remove_unused_columns(train_dataset, description="training")
- else:
- data_collator = self._get_collator_with_removed_columns(data_collator, description="training")
-
- if isinstance(train_dataset, torch.utils.data.IterableDataset):
- if self.args.world_size > 1:
- train_dataset = IterableDatasetShard(
- train_dataset,
- batch_size=self._train_batch_size,
- drop_last=self.args.dataloader_drop_last,
- num_processes=self.args.world_size,
- process_index=self.args.process_index,
- )
-
- return DataLoader(
- train_dataset,
- batch_size=self._train_batch_size,
- collate_fn=data_collator,
- num_workers=self.args.dataloader_num_workers,
- pin_memory=self.args.dataloader_pin_memory,
- )
-
- train_sampler = self._get_train_sampler()
-
- return DataLoader(
- train_dataset,
- batch_size=self._train_batch_size,
- sampler=train_sampler,
- collate_fn=data_collator,
- drop_last=self.args.dataloader_drop_last,
- num_workers=self.args.dataloader_num_workers,
- pin_memory=self.args.dataloader_pin_memory,
- worker_init_fn=seed_worker,
- )
-
- def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.Sampler]:
- # Deprecated code
- if self.args.use_legacy_prediction_loop:
- if is_torch_tpu_available():
- return SequentialDistributedSampler(
- eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
- )
- elif is_sagemaker_mp_enabled():
- return SequentialDistributedSampler(
- eval_dataset,
- num_replicas=smp.dp_size(),
- rank=smp.dp_rank(),
- batch_size=self.args.per_device_eval_batch_size,
- )
- elif self.args.local_rank != -1:
- return SequentialDistributedSampler(eval_dataset)
- else:
- return SequentialSampler(eval_dataset)
-
- if self.args.world_size <= 1:
- return SequentialSampler(eval_dataset)
- else:
- return ShardSampler(
- eval_dataset,
- batch_size=self.args.per_device_eval_batch_size,
- num_processes=self.args.world_size,
- process_index=self.args.process_index,
- )
-
- def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
- """
- Returns the evaluation [`~torch.utils.data.DataLoader`].
- Subclass and override this method if you want to inject some custom behavior.
- Args:
- eval_dataset (`torch.utils.data.Dataset`, *optional*):
- If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted
- by the `model.forward()` method are automatically removed. It must implement `__len__`.
- """
- if eval_dataset is None and self.eval_dataset is None:
- raise ValueError("Trainer: evaluation requires an eval_dataset.")
- eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
- data_collator = self.data_collator
-
- if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
- eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation")
- else:
- data_collator = self._get_collator_with_removed_columns(data_collator, description="evaluation")
-
- if isinstance(eval_dataset, torch.utils.data.IterableDataset):
- if self.args.world_size > 1:
- eval_dataset = IterableDatasetShard(
- eval_dataset,
- batch_size=self.args.per_device_eval_batch_size,
- drop_last=self.args.dataloader_drop_last,
- num_processes=self.args.world_size,
- process_index=self.args.process_index,
- )
- return DataLoader(
- eval_dataset,
- batch_size=self.args.eval_batch_size,
- collate_fn=data_collator,
- num_workers=self.args.dataloader_num_workers,
- pin_memory=self.args.dataloader_pin_memory,
- )
-
- eval_sampler = self._get_eval_sampler(eval_dataset)
-
- return DataLoader(
- eval_dataset,
- sampler=eval_sampler,
- batch_size=self.args.eval_batch_size,
- collate_fn=data_collator,
- drop_last=self.args.dataloader_drop_last,
- num_workers=self.args.dataloader_num_workers,
- pin_memory=self.args.dataloader_pin_memory,
- )
-
- def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
- """
- Returns the test [`~torch.utils.data.DataLoader`].
- Subclass and override this method if you want to inject some custom behavior.
- Args:
- test_dataset (`torch.utils.data.Dataset`, *optional*):
- The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the
- `model.forward()` method are automatically removed. It must implement `__len__`.
- """
- data_collator = self.data_collator
-
- if is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
- test_dataset = self._remove_unused_columns(test_dataset, description="test")
- else:
- data_collator = self._get_collator_with_removed_columns(data_collator, description="test")
-
- if isinstance(test_dataset, torch.utils.data.IterableDataset):
- if self.args.world_size > 1:
- test_dataset = IterableDatasetShard(
- test_dataset,
- batch_size=self.args.eval_batch_size,
- drop_last=self.args.dataloader_drop_last,
- num_processes=self.args.world_size,
- process_index=self.args.process_index,
- )
- return DataLoader(
- test_dataset,
- batch_size=self.args.eval_batch_size,
- collate_fn=data_collator,
- num_workers=self.args.dataloader_num_workers,
- pin_memory=self.args.dataloader_pin_memory,
- )
-
- test_sampler = self._get_eval_sampler(test_dataset)
-
- # We use the same batch_size as for eval.
- return DataLoader(
- test_dataset,
- sampler=test_sampler,
- batch_size=self.args.eval_batch_size,
- collate_fn=data_collator,
- drop_last=self.args.dataloader_drop_last,
- num_workers=self.args.dataloader_num_workers,
- pin_memory=self.args.dataloader_pin_memory,
- )
-
- def create_optimizer_and_scheduler(self, num_training_steps: int):
- """
- Setup the optimizer and the learning rate scheduler.
- We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
- Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or
- `create_scheduler`) in a subclass.
- """
- self.create_optimizer()
- if IS_SAGEMAKER_MP_POST_1_10 and smp.state.cfg.fp16:
- # If smp >= 1.10 and fp16 is enabled, we unwrap the optimizer
- optimizer = self.optimizer.optimizer
- else:
- optimizer = self.optimizer
- self.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer)
-
- def create_optimizer(self):
- """
- Setup the optimizer.
- We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
- Trainer's init through `optimizers`, or subclass and override this method in a subclass.
- """
- opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
-
- if self.optimizer is None:
- decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS)
- decay_parameters = [name for name in decay_parameters if "bias" not in name]
- optimizer_grouped_parameters = [
- {
- "params": [
- p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad)
- ],
- "weight_decay": self.args.weight_decay,
- },
- {
- "params": [
- p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad)
- ],
- "weight_decay": 0.0,
- },
- ]
-
- optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args)
-
- if self.sharded_ddp == ShardedDDPOption.SIMPLE:
- self.optimizer = OSS(
- params=optimizer_grouped_parameters,
- optim=optimizer_cls,
- **optimizer_kwargs,
- )
- else:
- self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
- if optimizer_cls.__name__ == "Adam8bit":
- import bitsandbytes
-
- manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
-
- skipped = 0
- for module in opt_model.modules():
- if isinstance(module, nn.Embedding):
- skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())
- print(f"skipped {module}: {skipped/2**20}M params")
- manager.register_module_override(module, "weight", {"optim_bits": 32})
- logger.debug(f"bitsandbytes: will optimize {module} in fp32")
- print(f"skipped: {skipped/2**20}M params")
-
- if is_sagemaker_mp_enabled():
- self.optimizer = smp.DistributedOptimizer(self.optimizer)
-
- return self.optimizer
-
- @staticmethod
- def get_optimizer_cls_and_kwargs(args: TrainingArguments) -> Tuple[Any, Any]:
- """
- Returns the optimizer class and optimizer parameters based on the training arguments.
- Args:
- args (`transformers.training_args.TrainingArguments`):
- The training arguments for the training session.
- """
-
- # parse args.optim_args
- optim_args = {}
- if args.optim_args:
- for mapping in args.optim_args.replace(" ", "").split(","):
- key, value = mapping.split("=")
- optim_args[key] = value
-
- optimizer_kwargs = {"lr": args.learning_rate}
-
- adam_kwargs = {
- "betas": (args.adam_beta1, args.adam_beta2),
- "eps": args.adam_epsilon,
- }
- if args.optim == OptimizerNames.ADAFACTOR:
- optimizer_cls = Adafactor
- optimizer_kwargs.update({"scale_parameter": False, "relative_step": False})
- elif args.optim == OptimizerNames.ADAMW_HF:
- from transformers.optimization import AdamW
-
- optimizer_cls = AdamW
- optimizer_kwargs.update(adam_kwargs)
- elif args.optim in [OptimizerNames.ADAMW_TORCH, OptimizerNames.ADAMW_TORCH_FUSED]:
- from torch.optim import AdamW
-
- optimizer_cls = AdamW
- optimizer_kwargs.update(adam_kwargs)
- if args.optim == OptimizerNames.ADAMW_TORCH_FUSED:
- optimizer_kwargs.update({"fused": True})
- elif args.optim == OptimizerNames.ADAMW_TORCH_XLA:
- try:
- from torch_xla.amp.syncfree import AdamW
-
- optimizer_cls = AdamW
- optimizer_kwargs.update(adam_kwargs)
- except ImportError:
- raise ValueError("Trainer failed to import syncfree AdamW from torch_xla.")
- elif args.optim == OptimizerNames.ADAMW_APEX_FUSED:
- try:
- from apex.optimizers import FusedAdam
-
- optimizer_cls = FusedAdam
- optimizer_kwargs.update(adam_kwargs)
- except ImportError:
- raise ValueError("Trainer tried to instantiate apex FusedAdam but apex is not installed!")
- elif args.optim == OptimizerNames.ADAMW_BNB:
- try:
- from bitsandbytes.optim import Adam8bit
-
- optimizer_cls = Adam8bit
- optimizer_kwargs.update(adam_kwargs)
- except ImportError:
- raise ValueError("Trainer tried to instantiate bnb Adam8bit but bnb is not installed!")
- elif args.optim == OptimizerNames.ADAMW_ANYPRECISION:
- try:
- from torchdistx.optimizers import AnyPrecisionAdamW
-
- optimizer_cls = AnyPrecisionAdamW
- optimizer_kwargs.update(adam_kwargs)
-
- # TODO Change dtypes back to M=FP32, Var = BF16, Kahan = False once they can be cast together in torchdistx.
- optimizer_kwargs.update(
- {
- "use_kahan_summation": strtobool(optim_args.get("use_kahan_summation", "False")),
- "momentum_dtype": getattr(torch, optim_args.get("momentum_dtype", "float32")),
- "variance_dtype": getattr(torch, optim_args.get("variance_dtype", "float32")),
- "compensation_buffer_dtype": getattr(
- torch, optim_args.get("compensation_buffer_dtype", "bfloat16")
- ),
- }
- )
- except ImportError:
- raise ValueError("Please install https://github.com/pytorch/torchdistx")
- elif args.optim == OptimizerNames.SGD:
- optimizer_cls = torch.optim.SGD
- elif args.optim == OptimizerNames.ADAGRAD:
- optimizer_cls = torch.optim.Adagrad
- else:
- raise ValueError(f"Trainer cannot instantiate unsupported optimizer: {args.optim}")
- return optimizer_cls, optimizer_kwargs
-
- def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):
- """
- Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
- passed as an argument.
- Args:
- num_training_steps (int): The number of training steps to do.
- """
- ############
- num_training_steps *= 3
- ############
- if self.lr_scheduler is None:
- self.lr_scheduler = get_scheduler(
- self.args.lr_scheduler_type,
- optimizer=self.optimizer if optimizer is None else optimizer,
- num_warmup_steps=self.args.get_warmup_steps(num_training_steps),
- num_training_steps=num_training_steps,
- )
- return self.lr_scheduler
-
- def num_examples(self, dataloader: DataLoader) -> int:
- """
- Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When
- dataloader.dataset does not exist or has no length, estimates as best it can
- """
- try:
- dataset = dataloader.dataset
- # Special case for IterableDatasetShard, we need to dig deeper
- if isinstance(dataset, IterableDatasetShard):
- return len(dataloader.dataset.dataset)
- return len(dataloader.dataset)
- except (NameError, AttributeError, TypeError): # no dataset or length, estimate by length of dataloader
- return len(dataloader) * self.args.per_device_train_batch_size
-
- def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
- """HP search setup code"""
- self._trial = trial
-
- if self.hp_search_backend is None or trial is None:
- return
- if self.hp_search_backend == HPSearchBackend.OPTUNA:
- params = self.hp_space(trial)
- elif self.hp_search_backend == HPSearchBackend.RAY:
- params = trial
- params.pop("wandb", None)
- elif self.hp_search_backend == HPSearchBackend.SIGOPT:
- params = {k: int(v) if isinstance(v, str) else v for k, v in trial.assignments.items()}
- elif self.hp_search_backend == HPSearchBackend.WANDB:
- params = trial
-
- for key, value in params.items():
- if not hasattr(self.args, key):
- logger.warning(
- f"Trying to set {key} in the hyperparameter search but there is no corresponding field in"
- " `TrainingArguments`."
- )
- continue
- old_attr = getattr(self.args, key, None)
- # Casting value to the proper type
- if old_attr is not None:
- value = type(old_attr)(value)
- setattr(self.args, key, value)
- if self.hp_search_backend == HPSearchBackend.OPTUNA:
- logger.info(f"Trial: {trial.params}")
- if self.hp_search_backend == HPSearchBackend.SIGOPT:
- logger.info(f"SigOpt Assignments: {trial.assignments}")
- if self.hp_search_backend == HPSearchBackend.WANDB:
- logger.info(f"W&B Sweep parameters: {trial}")
- if self.args.deepspeed:
- # Rebuild the deepspeed config to reflect the updated training parameters
- from transformers.deepspeed import HfTrainerDeepSpeedConfig
-
- self.args.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.args.deepspeed)
- self.args.hf_deepspeed_config.trainer_config_process(self.args)
-
- def _report_to_hp_search(self, trial: Union["optuna.Trial", Dict[str, Any]], step: int, metrics: Dict[str, float]):
- if self.hp_search_backend is None or trial is None:
- return
- self.objective = self.compute_objective(metrics.copy())
- if self.hp_search_backend == HPSearchBackend.OPTUNA:
- import optuna
-
- trial.report(self.objective, step)
- if trial.should_prune():
- self.callback_handler.on_train_end(self.args, self.state, self.control)
- raise optuna.TrialPruned()
- elif self.hp_search_backend == HPSearchBackend.RAY:
- from ray import tune
-
- if self.control.should_save:
- self._tune_save_checkpoint()
- tune.report(objective=self.objective, **metrics)
-
- def _tune_save_checkpoint(self):
- from ray import tune
-
- if not self.use_tune_checkpoints:
- return
- with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
- output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
- self.save_model(output_dir, _internal_call=True)
- if self.args.should_save:
- self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME))
- torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
- torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
-
- def call_model_init(self, trial=None):
- model_init_argcount = number_of_arguments(self.model_init)
- if model_init_argcount == 0:
- model = self.model_init()
- elif model_init_argcount == 1:
- model = self.model_init(trial)
- else:
- raise RuntimeError("model_init should have 0 or 1 argument.")
-
- if model is None:
- raise RuntimeError("model_init should not return None.")
-
- return model
-
- def torch_jit_model_eval(self, model, dataloader, training=False):
- if not training:
- if dataloader is None:
- logger.warning("failed to use PyTorch jit mode due to current dataloader is none.")
- return model
- example_batch = next(iter(dataloader))
- example_batch = self._prepare_inputs(example_batch)
- try:
- jit_model = model.eval()
- with ContextManagers([self.autocast_smart_context_manager(cache_enabled=False), torch.no_grad()]):
- if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.14.0"):
- if isinstance(example_batch, dict):
- jit_model = torch.jit.trace(jit_model, example_kwarg_inputs=example_batch, strict=False)
- else:
- jit_model = torch.jit.trace(
- jit_model,
- example_kwarg_inputs={key: example_batch[key] for key in example_batch},
- strict=False,
- )
- else:
- jit_inputs = []
- for key in example_batch:
- example_tensor = torch.ones_like(example_batch[key])
- jit_inputs.append(example_tensor)
- jit_inputs = tuple(jit_inputs)
- jit_model = torch.jit.trace(jit_model, jit_inputs, strict=False)
- jit_model = torch.jit.freeze(jit_model)
- with torch.no_grad():
- jit_model(**example_batch)
- jit_model(**example_batch)
- model = jit_model
- self.use_cpu_amp = False
- self.use_cuda_amp = False
- except (RuntimeError, TypeError, ValueError, NameError, IndexError) as e:
- logger.warning(f"failed to use PyTorch jit mode due to: {e}.")
-
- return model
-
- def ipex_optimize_model(self, model, training=False, dtype=torch.float32):
- if not is_ipex_available():
- raise ImportError(
- "Using IPEX but IPEX is not installed or IPEX's version does not match current PyTorch, please refer"
- " to https://github.com/intel/intel-extension-for-pytorch."
- )
-
- import intel_extension_for_pytorch as ipex
-
- if not training:
- model.eval()
- dtype = torch.bfloat16 if not self.is_in_train and self.args.bf16_full_eval else dtype
- # conv_bn_folding is disabled as it fails in symbolic tracing, resulting in ipex warnings
- model = ipex.optimize(model, dtype=dtype, level="O1", conv_bn_folding=False, inplace=not self.is_in_train)
- else:
- if not model.training:
- model.train()
- model, self.optimizer = ipex.optimize(
- model, dtype=dtype, optimizer=self.optimizer, inplace=True, level="O1"
- )
-
- return model
-
- def _wrap_model(self, model, training=True, dataloader=None):
- if self.args.torch_compile:
- model = torch.compile(model, backend=self.args.torch_compile_backend, mode=self.args.torch_compile_mode)
-
- if self.args.use_ipex:
- dtype = torch.bfloat16 if self.use_cpu_amp else torch.float32
- model = self.ipex_optimize_model(model, training, dtype=dtype)
-
- if is_sagemaker_mp_enabled():
- # Wrapping the base model twice in a DistributedModel will raise an error.
- if isinstance(self.model_wrapped, smp.model.DistributedModel):
- return self.model_wrapped
- return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps)
-
- # already initialized its own DDP and AMP
- if self.deepspeed:
- return self.deepspeed
-
- # train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
- if unwrap_model(model) is not model:
- return model
-
- # Mixed precision training with apex (torch < 1.6)
- if self.use_apex and training:
- model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
-
- # Multi-gpu training (should be after apex fp16 initialization)
- if self.args.n_gpu > 1:
- model = nn.DataParallel(model)
-
- if self.args.jit_mode_eval:
- start_time = time.time()
- model = self.torch_jit_model_eval(model, dataloader, training)
- self.jit_compilation_time = round(time.time() - start_time, 4)
-
- # Note: in torch.distributed mode, there's no point in wrapping the model
- # inside a DistributedDataParallel as we'll be under `no_grad` anyways.
- if not training:
- return model
-
- # Distributed training (should be after apex fp16 initialization)
- if self.sharded_ddp is not None:
- # Sharded DDP!
- if self.sharded_ddp == ShardedDDPOption.SIMPLE:
- model = ShardedDDP(model, self.optimizer)
- else:
- mixed_precision = self.args.fp16 or self.args.bf16
- cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
- zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
- # XXX: Breaking the self.model convention but I see no way around it for now.
- if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
- model = auto_wrap(model)
- self.model = model = FullyShardedDDP(
- model,
- mixed_precision=mixed_precision,
- reshard_after_forward=zero_3,
- cpu_offload=cpu_offload,
- ).to(self.args.device)
- # Distributed training using PyTorch FSDP
- elif self.fsdp is not None:
- if not self.args.fsdp_config["xla"]:
- # PyTorch FSDP!
- from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload, MixedPrecision
- from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
- from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy
-
- if FSDPOption.OFFLOAD in self.args.fsdp:
- cpu_offload = CPUOffload(offload_params=True)
- else:
- cpu_offload = CPUOffload(offload_params=False)
-
- auto_wrap_policy = None
-
- if FSDPOption.AUTO_WRAP in self.args.fsdp:
- if self.args.fsdp_config["fsdp_min_num_params"] > 0:
- auto_wrap_policy = functools.partial(
- size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["fsdp_min_num_params"]
- )
- elif self.args.fsdp_config.get("fsdp_transformer_layer_cls_to_wrap", None) is not None:
- transformer_cls_to_wrap = set()
- for layer_class in self.args.fsdp_config["fsdp_transformer_layer_cls_to_wrap"]:
- transformer_cls = get_module_class_from_name(model, layer_class)
- if transformer_cls is None:
- raise Exception("Could not find the transformer layer class to wrap in the model.")
- else:
- transformer_cls_to_wrap.add(transformer_cls)
- auto_wrap_policy = functools.partial(
- transformer_auto_wrap_policy,
- # Transformer layer class to wrap
- transformer_layer_cls=transformer_cls_to_wrap,
- )
- mixed_precision_policy = None
- dtype = None
- if self.args.fp16:
- dtype = torch.float16
- elif self.args.bf16:
- dtype = torch.bfloat16
- if dtype is not None:
- mixed_precision_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype)
- if type(model) != FSDP:
- # XXX: Breaking the self.model convention but I see no way around it for now.
- self.model = model = FSDP(
- model,
- sharding_strategy=self.fsdp,
- cpu_offload=cpu_offload,
- auto_wrap_policy=auto_wrap_policy,
- mixed_precision=mixed_precision_policy,
- device_id=self.args.device,
- backward_prefetch=self.backward_prefetch,
- forward_prefetch=self.forword_prefetch,
- limit_all_gathers=self.limit_all_gathers,
- )
- else:
- try:
- from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as FSDP
- from torch_xla.distributed.fsdp import checkpoint_module
- from torch_xla.distributed.fsdp.wrap import (
- size_based_auto_wrap_policy,
- transformer_auto_wrap_policy,
- )
- except ImportError:
- raise ImportError("Missing XLA FSDP related module; please make sure to use torch-xla >= 2.0.")
- auto_wrap_policy = None
- auto_wrapper_callable = None
- if self.args.fsdp_config["fsdp_min_num_params"] > 0:
- auto_wrap_policy = functools.partial(
- size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["fsdp_min_num_params"]
- )
- elif self.args.fsdp_config.get("fsdp_transformer_layer_cls_to_wrap", None) is not None:
- transformer_cls_to_wrap = set()
- for layer_class in self.args.fsdp_config["fsdp_transformer_layer_cls_to_wrap"]:
- transformer_cls = get_module_class_from_name(model, layer_class)
- if transformer_cls is None:
- raise Exception("Could not find the transformer layer class to wrap in the model.")
- else:
- transformer_cls_to_wrap.add(transformer_cls)
- auto_wrap_policy = functools.partial(
- transformer_auto_wrap_policy,
- # Transformer layer class to wrap
- transformer_layer_cls=transformer_cls_to_wrap,
- )
- fsdp_kwargs = self.args.xla_fsdp_config
- if self.args.fsdp_config["xla_fsdp_grad_ckpt"]:
- # Apply gradient checkpointing to auto-wrapped sub-modules if specified
- def auto_wrapper_callable(m, *args, **kwargs):
- return FSDP(checkpoint_module(m), *args, **kwargs)
-
- # Wrap the base model with an outer FSDP wrapper
- self.model = model = FSDP(
- model,
- auto_wrap_policy=auto_wrap_policy,
- auto_wrapper_callable=auto_wrapper_callable,
- **fsdp_kwargs,
- )
-
- # Patch `xm.optimizer_step` should not reduce gradients in this case,
- # as FSDP does not need gradient reduction over sharded parameters.
- def patched_optimizer_step(optimizer, barrier=False, optimizer_args={}):
- loss = optimizer.step(**optimizer_args)
- if barrier:
- xm.mark_step()
- return loss
-
- xm.optimizer_step = patched_optimizer_step
- elif is_sagemaker_dp_enabled():
- model = nn.parallel.DistributedDataParallel(
- model, device_ids=[int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))]
- )
- elif self.args.local_rank != -1:
- kwargs = {}
- if self.args.ddp_find_unused_parameters is not None:
- kwargs["find_unused_parameters"] = self.args.ddp_find_unused_parameters
- elif isinstance(model, PreTrainedModel):
- # find_unused_parameters breaks checkpointing as per
- # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
- kwargs["find_unused_parameters"] = not model.is_gradient_checkpointing
- else:
- kwargs["find_unused_parameters"] = True
-
- if self.args.ddp_bucket_cap_mb is not None:
- kwargs["bucket_cap_mb"] = self.args.ddp_bucket_cap_mb
- if is_torch_neuroncore_available():
- return model
- model = nn.parallel.DistributedDataParallel(
- model,
- device_ids=[self.args.local_rank] if self.args._n_gpu != 0 else None,
- output_device=self.args.local_rank if self.args._n_gpu != 0 else None,
- **kwargs,
- )
-
- return model
-
- def train(
- self,
- resume_from_checkpoint: Optional[Union[str, bool]] = None,
- trial: Union["optuna.Trial", Dict[str, Any]] = None,
- ignore_keys_for_eval: Optional[List[str]] = None,
- is_first_time = False,
- **kwargs,
- ):
- """
- Main training entry point.
- Args:
- resume_from_checkpoint (`str` or `bool`, *optional*):
- If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a
- `bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance
- of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here.
- trial (`optuna.Trial` or `Dict[str, Any]`, *optional*):
- The trial run or the hyperparameter dictionary for hyperparameter search.
- ignore_keys_for_eval (`List[str]`, *optional*)
- A list of keys in the output of your model (if it is a dictionary) that should be ignored when
- gathering predictions for evaluation during the training.
- kwargs:
- Additional keyword arguments used to hide deprecated arguments
- """
- if resume_from_checkpoint is False:
- resume_from_checkpoint = None
-
- # memory metrics - must set up as early as possible
- self._memory_tracker.start()
-
- args = self.args
-
- #self.is_in_train = True
-
- # do_train is not a reliable argument, as it might not be set and .train() still called, so
- # the following is a workaround:
- if (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train:
- self._move_model_to_device(self.model, args.device)
-
- if "model_path" in kwargs:
- resume_from_checkpoint = kwargs.pop("model_path")
- warnings.warn(
- "`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
- "instead.",
- FutureWarning,
- )
- if len(kwargs) > 0:
- raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
- # This might change the seed so needs to run first.
- self._hp_search_setup(trial)
- self._train_batch_size = self.args.train_batch_size
-
- # Model re-init
- model_reloaded = False
- if self.model_init is not None:
- # Seed must be set before instantiating the model when using model_init.
- enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed)
- self.model = self.call_model_init(trial)
- model_reloaded = True
- # Reinitializes optimizer and scheduler
- self.optimizer, self.lr_scheduler = None, None
-
- # Load potential model checkpoint
- if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
- resume_from_checkpoint = get_last_checkpoint(args.output_dir)
- if resume_from_checkpoint is None:
- raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
-
- if resume_from_checkpoint is not None and not is_sagemaker_mp_enabled() and args.deepspeed is None:
- self._load_from_checkpoint(resume_from_checkpoint)
-
- # If model was re-initialized, put it on the right device and update self.model_wrapped
- if model_reloaded:
- if self.place_model_on_device:
- self._move_model_to_device(self.model, args.device)
- self.model_wrapped = self.model
- if is_first_time:
- inner_training_loop1 = find_executable_batch_size(
- self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size
- )
- return inner_training_loop1(
- args=args,
- resume_from_checkpoint=resume_from_checkpoint,
- trial=trial,
- ignore_keys_for_eval=ignore_keys_for_eval,
- )
- else:
- inner_training_loop2 = find_executable_batch_size(
- self._one_train, self._train_batch_size, args.auto_find_batch_size
- )
- return inner_training_loop2(
- args=args,
- resume_from_checkpoint=resume_from_checkpoint,
- trial=trial,
- ignore_keys_for_eval=ignore_keys_for_eval,
- )
-
-
- def _one_train(
- self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None
- ):
- #print(self.lr_scheduler)
- #print(dir(self.lr_scheduler))
-
-
- self.state = TrainerState()
- self.state.is_hyper_param_search = trial is not None
- # Get dataloader
- self._train_batch_size = batch_size
- # Data loader and number of training steps
- train_dataloader = self.get_train_dataloader()
- #print("AAAAAAA", len(train_dataloader))
-
- total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size
-
- len_dataloader = None
- if has_length(train_dataloader):
- len_dataloader = len(train_dataloader)
- num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps
- num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
- num_examples = self.num_examples(train_dataloader)
- if args.max_steps > 0:
- max_steps = args.max_steps
- num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(
- args.max_steps % num_update_steps_per_epoch > 0
- )
- # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's
- # the best we can do.
- num_train_samples = args.max_steps * total_train_batch_size
- else:
- max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)
- num_train_epochs = math.ceil(args.num_train_epochs)
- num_train_samples = self.num_examples(train_dataloader) * args.num_train_epochs
- elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size
- max_steps = args.max_steps
- # Setting a very large number of epochs so we go as many times as necessary over the iterator.
- num_train_epochs = sys.maxsize
- num_update_steps_per_epoch = max_steps
- num_examples = total_train_batch_size * args.max_steps
- num_train_samples = args.max_steps * total_train_batch_size
- else:
- raise ValueError(
- "args.max_steps must be set to a positive value if dataloader does not have a length, was"
- f" {args.max_steps}"
- )
- ###########
- #num_train_epochs = 5
-
- # Train!
- logger.info("***** Running training *****")
- logger.info(f" Num examples = {num_examples}")
- logger.info(f" Num Epochs = {num_train_epochs}")
- logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
- logger.info(f" Total optimization steps = {max_steps}")
- logger.info(
- f" Number of trainable parameters = {sum(p.numel() for p in self.tmp_model.parameters() if p.requires_grad)}"
- )
-
- self.state.epoch = 0
- start_time = time.time()
- epochs_trained = 0
- steps_trained_in_current_epoch = 0
- steps_trained_progress_bar = None
-
- # Update the references
- self.callback_handler.model = self.model
- self.callback_handler.optimizer = self.optimizer
- self.callback_handler.lr_scheduler = self.lr_scheduler
- self.callback_handler.train_dataloader = train_dataloader
- if self.hp_name is not None and self._trial is not None:
- # use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial
- # parameter to Train when using DDP.
- self.state.trial_name = self.hp_name(self._trial)
- if trial is not None:
- assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial
- self.state.trial_params = hp_params(assignments)
- else:
- self.state.trial_params = None
- # This should be the same if the state has been saved but in case the training arguments changed, it's safer
- # to set this after the load.
- self.state.max_steps = max_steps
- self.state.num_train_epochs = num_train_epochs
- self.state.is_local_process_zero = self.is_local_process_zero()
- self.state.is_world_process_zero = self.is_world_process_zero()
-
- # tr_loss is a tensor to avoid synchronization of TPUs through .item()
- tr_loss = torch.tensor(0.0).to(args.device)
- # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
- self._total_loss_scalar = 0.0
- self._globalstep_last_logged = self.state.global_step
- #model.zero_grad()
- self.tmp_model.zero_grad()
-
- self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
-
- # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
- if not args.ignore_data_skip:
- #print("I skip!") called
- for epoch in range(epochs_trained):
- is_random_sampler = hasattr(train_dataloader, "sampler") and isinstance(
- train_dataloader.sampler, RandomSampler
- )
- if is_torch_less_than_1_11 or not is_random_sampler:
- # We just need to begin an iteration to create the randomization of the sampler.
- # That was before PyTorch 1.11 however...
- for _ in train_dataloader:
- break
- else:
- # Otherwise we need to call the whooooole sampler cause there is some random operation added
- # AT THE VERY END!
- _ = list(train_dataloader.sampler)
-
- ###############
- #num_train_epochs = 10
- self.is_in_train = True
- #print("The number of epoches: ", num_train_epochs)
- #############
- total_batched_samples = 0
- for epoch in range(epochs_trained, num_train_epochs):
- if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
- train_dataloader.sampler.set_epoch(epoch)
- elif hasattr(train_dataloader, "dataset") and isinstance(train_dataloader.dataset, IterableDatasetShard):
- train_dataloader.dataset.set_epoch(epoch)
-
- if is_torch_tpu_available():
- parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device)
- epoch_iterator = parallel_loader
- else:
- epoch_iterator = train_dataloader
-
- # Reset the past mems state at the beginning of each epoch if necessary.
- if args.past_index >= 0:
- self._past = None
-
- steps_in_epoch = (
- len(epoch_iterator)
- if len_dataloader is not None
- else args.max_steps * args.gradient_accumulation_steps
- )
- self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)
-
- if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0:
- self._load_rng_state(resume_from_checkpoint)
-
- rng_to_sync = False
- steps_skipped = 0
- if skip_first_batches is not None and steps_trained_in_current_epoch > 0:
- epoch_iterator = skip_first_batches(epoch_iterator, steps_trained_in_current_epoch)
- steps_skipped = steps_trained_in_current_epoch
- steps_trained_in_current_epoch = 0
- rng_to_sync = True
-
- #print("The number of one epoch: ", len(epoch_iterator))
- step = -1
- for step, inputs in enumerate(epoch_iterator):
- total_batched_samples += 1
- if rng_to_sync:
- self._load_rng_state(resume_from_checkpoint)
- rng_to_sync = False
-
- # Skip past any already trained steps if resuming training
- if steps_trained_in_current_epoch > 0:
- steps_trained_in_current_epoch -= 1
- if steps_trained_progress_bar is not None:
- steps_trained_progress_bar.update(1)
- if steps_trained_in_current_epoch == 0:
- self._load_rng_state(resume_from_checkpoint)
- continue
- elif steps_trained_progress_bar is not None:
- steps_trained_progress_bar.close()
- steps_trained_progress_bar = None
-
- if step % args.gradient_accumulation_steps == 0:
- self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
-
- if (
- (total_batched_samples % args.gradient_accumulation_steps != 0)
- and args.local_rank != -1
- and args._no_sync_in_gradient_accumulation
- ):
- # Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
- with self.tmp_model.no_sync():
- tr_loss_step = self.training_step(self.tmp_model, inputs)
- #with model.no_sync():
- #tr_loss_step = self.training_step(model, inputs)
- else:
- tr_loss_step = self.training_step(self.tmp_model, inputs)
-
- if (
- args.logging_nan_inf_filter
- and not is_torch_tpu_available()
- and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step))
- ):
- # if loss is nan or inf simply add the average of previous logged losses
- tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged)
- else:
- tr_loss += tr_loss_step
-
- self.current_flos += float(self.floating_point_ops(inputs))
-
- # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
- if self.deepspeed:
- self.deepspeed.step()
-
- if total_batched_samples % args.gradient_accumulation_steps == 0 or (
- # last step in epoch but step is always smaller than gradient_accumulation_steps
- steps_in_epoch <= args.gradient_accumulation_steps
- and (step + 1) == steps_in_epoch
- ):
- # Gradient clipping
- if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:
- # deepspeed does its own clipping
-
- if self.do_grad_scaling:
- # Reduce gradients first for XLA
- if is_torch_tpu_available():
- gradients = xm._fetch_gradients(self.optimizer)
- xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size())
- # AMP: gradients need unscaling
- self.scaler.unscale_(self.optimizer)
-
- if is_sagemaker_mp_enabled() and args.fp16:
- self.optimizer.clip_master_grads(args.max_grad_norm)
- elif hasattr(self.optimizer, "clip_grad_norm"):
- # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
- self.optimizer.clip_grad_norm(args.max_grad_norm)
- elif hasattr(model, "clip_grad_norm_"):
- # Some models (like FullyShardedDDP) have a specific way to do gradient clipping
- model.clip_grad_norm_(args.max_grad_norm)
- else:
- # Revert to normal clipping otherwise, handling Apex or full precision
- nn.utils.clip_grad_norm_(
- amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
- args.max_grad_norm,
- )
-
- # Optimizer step
- optimizer_was_run = True
- if self.deepspeed:
- pass # called outside the loop
- elif is_torch_tpu_available():
- if self.do_grad_scaling:
- self.scaler.step(self.optimizer)
- self.scaler.update()
- else:
- xm.optimizer_step(self.optimizer)
- elif self.do_grad_scaling:
- scale_before = self.scaler.get_scale()
- self.scaler.step(self.optimizer)
- self.scaler.update()
- scale_after = self.scaler.get_scale()
- optimizer_was_run = scale_before <= scale_after
- else:
- self.optimizer.step()
-
- if optimizer_was_run and not self.deepspeed:
- self.lr_scheduler.step()
-
- self.tmp_model.zero_grad()
- self.state.global_step += 1
- self.state.epoch = epoch + (step + 1 + steps_skipped) / steps_in_epoch
- self.control = self.callback_handler.on_step_end(args, self.state, self.control)
-
- self._maybe_log_save_evaluate(tr_loss, self.tmp_model, trial, epoch, ignore_keys_for_eval)
- else:
- self.control = self.callback_handler.on_substep_end(args, self.state, self.control)
-
- if self.control.should_epoch_stop or self.control.should_training_stop:
- break
- if step < 0:
- logger.warning(
- "There seems to be not a single sample in your epoch_iterator, stopping training at step"
- f" {self.state.global_step}! This is expected if you're using an IterableDataset and set"
- f" num_steps ({max_steps}) higher than the number of available samples."
- )
- self.control.should_training_stop = True
-
- self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)
- self._maybe_log_save_evaluate(tr_loss, self.tmp_model, trial, epoch, ignore_keys_for_eval)
-
- if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
- if is_torch_tpu_available():
- # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
- xm.master_print(met.metrics_report())
- else:
- logger.warning(
- "You enabled PyTorch/XLA debug metrics but you don't have a TPU "
- "configured. Check your training configuration if this is unexpected."
- )
- if self.control.should_training_stop:
- break
-
- if args.past_index and hasattr(self, "_past"):
- # Clean the state at the end of training
- delattr(self, "_past")
-
- logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
- if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
- # Wait for everyone to get here so we are sur the model has been saved by process 0.
- if is_torch_tpu_available():
- xm.rendezvous("load_best_model_at_end")
- elif args.local_rank != -1:
- dist.barrier()
- elif is_sagemaker_mp_enabled():
- smp.barrier()
-
- self._load_best_model()
-
- # add remaining tr_loss
- self._total_loss_scalar += tr_loss.item()
- train_loss = self._total_loss_scalar / self.state.global_step
-
- metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps)
- self.store_flos()
- metrics["total_flos"] = self.state.total_flos
- metrics["train_loss"] = train_loss
-
- self.is_in_train = False
-
- self._memory_tracker.stop_and_update_metrics(metrics)
-
- self.log(metrics)
-
- run_dir = self._get_output_dir(trial)
- checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir)
-
- # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint and process allowed to save.
- if self.args.should_save and self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1:
- for checkpoint in checkpoints_sorted:
- if checkpoint != self.state.best_model_checkpoint:
- logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
- shutil.rmtree(checkpoint)
-
- self.control = self.callback_handler.on_train_end(args, self.state, self.control)
-
- return TrainOutput(self.state.global_step, train_loss, metrics)
-
- def _inner_training_loop(
- self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None
- ):
- '''
- 0 This function serves to train one time
- 1 Update the self.train_dataset before calling this function
- '''
- # 1 Get dataloader
- self._train_batch_size = batch_size
- # Data loader and number of training steps
- train_dataloader = self.get_train_dataloader()
- total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size
-
- len_dataloader = None
- if has_length(train_dataloader):
- len_dataloader = len(train_dataloader)
- num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps
- num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
- num_examples = self.num_examples(train_dataloader)
- if args.max_steps > 0:
- max_steps = args.max_steps
- num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(
- args.max_steps % num_update_steps_per_epoch > 0
- )
- # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's
- # the best we can do.
- num_train_samples = args.max_steps * total_train_batch_size
- else:
- max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)
- num_train_epochs = math.ceil(args.num_train_epochs)
- num_train_samples = self.num_examples(train_dataloader) * args.num_train_epochs
- elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size
- max_steps = args.max_steps
- # Setting a very large number of epochs so we go as many times as necessary over the iterator.
- num_train_epochs = sys.maxsize
- num_update_steps_per_epoch = max_steps
- num_examples = total_train_batch_size * args.max_steps
- num_train_samples = args.max_steps * total_train_batch_size
- else:
- raise ValueError(
- "args.max_steps must be set to a positive value if dataloader does not have a length, was"
- f" {args.max_steps}"
- )
-
- if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:
- if self.args.n_gpu > 1:
- # nn.DataParallel(model) replicates the model, creating new variables and module
- # references registered here no longer work on other gpus, breaking the module
- raise ValueError(
- "Currently --debug underflow_overflow is not supported under DP. Please use DDP"
- " (torch.distributed.launch)."
- )
- else:
- debug_overflow = DebugUnderflowOverflow(self.model) # noqa
-
- delay_optimizer_creation = (
- self.sharded_ddp is not None
- and self.sharded_ddp != ShardedDDPOption.SIMPLE
- or is_sagemaker_mp_enabled()
- or self.fsdp is not None
- )
- if args.deepspeed:
- deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
- self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
- )
- self.model = deepspeed_engine.module
- self.model_wrapped = deepspeed_engine
- self.deepspeed = deepspeed_engine
- self.optimizer = optimizer
- self.lr_scheduler = lr_scheduler
- #print("I just create a optimizer here!") # called
- elif not delay_optimizer_creation:
- self.create_optimizer_and_scheduler(num_training_steps=max_steps)
-
- self.state = TrainerState()
- self.state.is_hyper_param_search = trial is not None
-
- # Activate gradient checkpointing if needed
- if args.gradient_checkpointing:
- self.model.gradient_checkpointing_enable()
-
- #model = self._wrap_model(self.model_wrapped)
- self.tmp_model = self._wrap_model(self.model_wrapped)
-
-
- #if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None:
- # self._load_from_checkpoint(resume_from_checkpoint, model)
-
- # for the rest of this function `model` is the outside model, whether it was wrapped or not
- if self.tmp_model is not self.model:
- self.model_wrapped = self.tmp_model
-
- if delay_optimizer_creation:
- print("I create here!") # not called
- self.create_optimizer_and_scheduler(num_training_steps=max_steps)
-
- return True
- # Check if saved optimizer or scheduler states exist
- #self._load_optimizer_and_scheduler(resume_from_checkpoint)
-
- # important: at this point:
- # self.model is the Transformers Model
- # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
-
-
-
- def _get_output_dir(self, trial):
- if self.hp_search_backend is not None and trial is not None:
- if self.hp_search_backend == HPSearchBackend.OPTUNA:
- run_id = trial.number
- elif self.hp_search_backend == HPSearchBackend.RAY:
- from ray import tune
-
- run_id = tune.get_trial_id()
- elif self.hp_search_backend == HPSearchBackend.SIGOPT:
- run_id = trial.id
- elif self.hp_search_backend == HPSearchBackend.WANDB:
- import wandb
-
- run_id = wandb.run.id
- run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
- run_dir = os.path.join(self.args.output_dir, run_name)
- else:
- run_dir = self.args.output_dir
- return run_dir
-
- def _load_from_checkpoint(self, resume_from_checkpoint, model=None):
- if model is None:
- model = self.model
-
- if not os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)) and not os.path.isfile(
- os.path.join(resume_from_checkpoint, WEIGHTS_INDEX_NAME)
- ):
- raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")
-
- logger.info(f"Loading model from {resume_from_checkpoint}.")
-
- if os.path.isfile(os.path.join(resume_from_checkpoint, CONFIG_NAME)):
- config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME))
- checkpoint_version = config.transformers_version
- if checkpoint_version is not None and checkpoint_version != __version__:
- logger.warning(
- f"You are resuming training from a checkpoint trained with {checkpoint_version} of "
- f"Transformers but your current version is {__version__}. This is not recommended and could "
- "yield to errors or unwanted behaviors."
- )
-
- if os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
- # If the model is on the GPU, it still works!
- if is_sagemaker_mp_enabled():
- if os.path.isfile(os.path.join(resume_from_checkpoint, "user_content.pt")):
- # If the 'user_content.pt' file exists, load with the new smp api.
- # Checkpoint must have been saved with the new smp api.
- smp.resume_from_checkpoint(
- path=resume_from_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False
- )
- else:
- # If the 'user_content.pt' file does NOT exist, load with the old smp api.
- # Checkpoint must have been saved with the old smp api.
- if hasattr(self.args, "fp16") and self.args.fp16 is True:
- logger.warning(
- "Enabling FP16 and loading from smp < 1.10 checkpoint together is not suppported."
- )
- state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
- # Required for smp to not auto-translate state_dict from hf to smp (is already smp).
- state_dict["_smp_is_partial"] = False
- load_result = model.load_state_dict(state_dict, strict=True)
- # release memory
- del state_dict
- else:
- # We load the model state dict on the CPU to avoid an OOM error.
- state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
- # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963
- # which takes *args instead of **kwargs
- load_result = model.load_state_dict(state_dict, False)
- # release memory
- del state_dict
- self._issue_warnings_after_load(load_result)
- else:
- # We load the sharded checkpoint
- load_result = load_sharded_checkpoint(model, resume_from_checkpoint, strict=is_sagemaker_mp_enabled())
- if not is_sagemaker_mp_enabled():
- self._issue_warnings_after_load(load_result)
-
- def _load_best_model(self):
- logger.info(f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).")
- best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME)
- model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
- if os.path.exists(best_model_path):
- if self.deepspeed:
- if self.model_wrapped is not None:
- # this removes the pre-hooks from the previous engine
- self.model_wrapped.destroy()
- self.model_wrapped = None
-
- # temp hack until Deepspeed fixes the problem with resume from an existing engine that did some stepping
- deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
- self,
- num_training_steps=self.args.max_steps,
- resume_from_checkpoint=self.state.best_model_checkpoint,
- )
- self.model = deepspeed_engine.module
- self.model_wrapped = deepspeed_engine
- self.deepspeed = deepspeed_engine
- self.optimizer = optimizer
- self.lr_scheduler = lr_scheduler
- else:
- if is_sagemaker_mp_enabled():
- if os.path.isfile(os.path.join(self.state.best_model_checkpoint, "user_content.pt")):
- # If the 'user_content.pt' file exists, load with the new smp api.
- # Checkpoint must have been saved with the new smp api.
- smp.resume_from_checkpoint(
- path=self.state.best_model_checkpoint,
- tag=WEIGHTS_NAME,
- partial=False,
- load_optimizer=False,
- )
- else:
- # If the 'user_content.pt' file does NOT exist, load with the old smp api.
- # Checkpoint must have been saved with the old smp api.
- state_dict = torch.load(best_model_path, map_location="cpu")
- state_dict["_smp_is_partial"] = False
- load_result = model.load_state_dict(state_dict, strict=True)
- else:
- # We load the model state dict on the CPU to avoid an OOM error.
- state_dict = torch.load(best_model_path, map_location="cpu")
- # If the model is on the GPU, it still works!
- # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963
- # which takes *args instead of **kwargs
- load_result = model.load_state_dict(state_dict, False)
- if not is_sagemaker_mp_enabled():
- self._issue_warnings_after_load(load_result)
- elif os.path.exists(os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME)):
- load_result = load_sharded_checkpoint(
- model, self.state.best_model_checkpoint, strict=is_sagemaker_mp_enabled()
- )
- if not is_sagemaker_mp_enabled():
- self._issue_warnings_after_load(load_result)
- else:
- logger.warning(
- f"Could not locate the best model at {best_model_path}, if you are running a distributed training "
- "on multiple nodes, you should activate `--save_on_each_node`."
- )
-
- def _issue_warnings_after_load(self, load_result):
- if len(load_result.missing_keys) != 0:
- if self.model._keys_to_ignore_on_save is not None and set(load_result.missing_keys) == set(
- self.model._keys_to_ignore_on_save
- ):
- self.model.tie_weights()
- else:
- logger.warning(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.")
- if len(load_result.unexpected_keys) != 0:
- logger.warning(
- f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}."
- )
-
- def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval):
- if self.control.should_log:
- if is_torch_tpu_available():
- xm.mark_step()
-
- logs: Dict[str, float] = {}
-
- # all_gather + mean() to get average loss over all processes
- tr_loss_scalar = self._nested_gather(tr_loss).mean().item()
-
- # reset tr_loss to zero
- tr_loss -= tr_loss
-
- logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
- logs["learning_rate"] = self._get_learning_rate()
-
- self._total_loss_scalar += tr_loss_scalar
- self._globalstep_last_logged = self.state.global_step
- self.store_flos()
-
- self.log(logs)
-
- metrics = None
- if self.control.should_evaluate:
- if isinstance(self.eval_dataset, dict):
- for eval_dataset_name, eval_dataset in self.eval_dataset.items():
- metrics = self.evaluate(
- eval_dataset=eval_dataset,
- ignore_keys=ignore_keys_for_eval,
- metric_key_prefix=f"eval_{eval_dataset_name}",
- )
- else:
- metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)
- self._report_to_hp_search(trial, self.state.global_step, metrics)
-
- if self.control.should_save:
- self._save_checkpoint(model, trial, metrics=metrics)
- self.control = self.callback_handler.on_save(self.args, self.state, self.control)
-
- def _load_rng_state(self, checkpoint):
- # Load RNG states from `checkpoint`
- if checkpoint is None:
- return
-
- if self.args.world_size > 1:
- process_index = self.args.process_index
- rng_file = os.path.join(checkpoint, f"rng_state_{process_index}.pth")
- if not os.path.isfile(rng_file):
- logger.info(
- f"Didn't find an RNG file for process {process_index}, if you are resuming a training that "
- "wasn't launched in a distributed fashion, reproducibility is not guaranteed."
- )
- return
- else:
- rng_file = os.path.join(checkpoint, "rng_state.pth")
- if not os.path.isfile(rng_file):
- logger.info(
- "Didn't find an RNG file, if you are resuming a training that was launched in a distributed "
- "fashion, reproducibility is not guaranteed."
- )
- return
-
- checkpoint_rng_state = torch.load(rng_file)
- random.setstate(checkpoint_rng_state["python"])
- np.random.set_state(checkpoint_rng_state["numpy"])
- torch.random.set_rng_state(checkpoint_rng_state["cpu"])
- if torch.cuda.is_available():
- if self.args.local_rank != -1:
- torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"])
- else:
- try:
- torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"])
- except Exception as e:
- logger.info(
- f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}"
- "\nThis won't yield the same results as if the training had not been interrupted."
- )
- if is_torch_tpu_available():
- xm.set_rng_state(checkpoint_rng_state["xla"])
-
- def _save_checkpoint(self, model, trial, metrics=None):
- # In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
- # want to save except FullyShardedDDP.
- # assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
-
- # Save model checkpoint
- #checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
- checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.save_counter}"
- ##########
- self.save_counter += 1
- ##########
- if self.hp_search_backend is None and trial is None:
- self.store_flos()
-
- run_dir = self._get_output_dir(trial=trial)
- output_dir = os.path.join(run_dir, checkpoint_folder)
- self.save_model(output_dir, _internal_call=True)
- if self.deepspeed:
- # under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed
- # config `stage3_gather_16bit_weights_on_model_save` is True
- self.deepspeed.save_checkpoint(output_dir)
-
- # Save optimizer and scheduler
- if self.sharded_ddp == ShardedDDPOption.SIMPLE:
- self.optimizer.consolidate_state_dict()
-
- if is_torch_tpu_available():
- xm.rendezvous("saving_optimizer_states")
- xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
- with warnings.catch_warnings(record=True) as caught_warnings:
- xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
- reissue_pt_warnings(caught_warnings)
- elif is_sagemaker_mp_enabled():
- opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False)
- smp.barrier()
- if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state:
- smp.save(
- opt_state_dict,
- os.path.join(output_dir, OPTIMIZER_NAME),
- partial=True,
- v3=smp.state.cfg.shard_optimizer_state,
- )
- if self.args.should_save:
- with warnings.catch_warnings(record=True) as caught_warnings:
- torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
- reissue_pt_warnings(caught_warnings)
- if self.do_grad_scaling:
- torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))
- elif self.args.should_save and not self.deepspeed:
- # deepspeed.save_checkpoint above saves model/optim/sched
- torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
- with warnings.catch_warnings(record=True) as caught_warnings:
- torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
- reissue_pt_warnings(caught_warnings)
- if self.do_grad_scaling:
- torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))
-
- # Determine the new best metric / best model checkpoint
- if metrics is not None and self.args.metric_for_best_model is not None:
- metric_to_check = self.args.metric_for_best_model
- if not metric_to_check.startswith("eval_"):
- metric_to_check = f"eval_{metric_to_check}"
- metric_value = metrics[metric_to_check]
-
- operator = np.greater if self.args.greater_is_better else np.less
- if (
- self.state.best_metric is None
- or self.state.best_model_checkpoint is None
- or operator(metric_value, self.state.best_metric)
- ):
- self.state.best_metric = metric_value
- self.state.best_model_checkpoint = output_dir
-
- # Save the Trainer state
- if self.args.should_save:
- self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME))
-
- # Save RNG state in non-distributed training
- rng_states = {
- "python": random.getstate(),
- "numpy": np.random.get_state(),
- "cpu": torch.random.get_rng_state(),
- }
- if torch.cuda.is_available():
- if self.args.local_rank == -1:
- # In non distributed, we save the global CUDA RNG state (will take care of DataParallel)
- rng_states["cuda"] = torch.cuda.random.get_rng_state_all()
- else:
- rng_states["cuda"] = torch.cuda.random.get_rng_state()
-
- if is_torch_tpu_available():
- rng_states["xla"] = xm.get_rng_state()
-
- # A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may
- # not yet exist.
- os.makedirs(output_dir, exist_ok=True)
-
- if self.args.world_size <= 1:
- torch.save(rng_states, os.path.join(output_dir, "rng_state.pth"))
- else:
- torch.save(rng_states, os.path.join(output_dir, f"rng_state_{self.args.process_index}.pth"))
-
- if self.args.push_to_hub:
- self._push_from_checkpoint(output_dir)
-
- # Maybe delete some older checkpoints.
- if self.args.should_save:
- self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
-
- def _load_optimizer_and_scheduler(self, checkpoint):
- """If optimizer and scheduler states exist, load them."""
- if checkpoint is None:
- return
-
- if self.deepspeed:
- # deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init
- return
-
- checkpoint_file_exists = (
- glob.glob(os.path.join(checkpoint, OPTIMIZER_NAME) + "_*")
- if is_sagemaker_mp_enabled()
- else os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME))
- )
- if checkpoint_file_exists and os.path.isfile(os.path.join(checkpoint, SCHEDULER_NAME)):
- # Load in optimizer and scheduler states
- if is_torch_tpu_available():
- # On TPU we have to take some extra precautions to properly load the states on the right device.
- optimizer_state = torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location="cpu")
- with warnings.catch_warnings(record=True) as caught_warnings:
- lr_scheduler_state = torch.load(os.path.join(checkpoint, SCHEDULER_NAME), map_location="cpu")
- reissue_pt_warnings(caught_warnings)
-
- xm.send_cpu_data_to_device(optimizer_state, self.args.device)
- xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
-
- self.optimizer.load_state_dict(optimizer_state)
- self.lr_scheduler.load_state_dict(lr_scheduler_state)
- else:
- map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device
- if is_sagemaker_mp_enabled():
- if os.path.isfile(os.path.join(checkpoint, "user_content.pt")):
- # Optimizer checkpoint was saved with smp >= 1.10
- def opt_load_hook(mod, opt):
- opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True))
-
- else:
- # Optimizer checkpoint was saved with smp < 1.10
- def opt_load_hook(mod, opt):
- if IS_SAGEMAKER_MP_POST_1_10:
- opt.load_state_dict(
- smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True, back_compat=True)
- )
- else:
- opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True))
-
- self.model_wrapped.register_post_step_hook(opt_load_hook)
- else:
- self.optimizer.load_state_dict(
- torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location)
- )
- with warnings.catch_warnings(record=True) as caught_warnings:
- self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME)))
- reissue_pt_warnings(caught_warnings)
- if self.do_grad_scaling and os.path.isfile(os.path.join(checkpoint, SCALER_NAME)):
- self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, SCALER_NAME)))
-
- def hyperparameter_search(
- self,
- hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
- compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
- n_trials: int = 20,
- direction: str = "minimize",
- backend: Optional[Union["str", HPSearchBackend]] = None,
- hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
- **kwargs,
- ) -> BestRun:
- """
- Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined
- by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided,
- the sum of all metrics otherwise.
-
- To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to
- reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to
- subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom
- optimizer/scheduler.
-
- Args:
- hp_space (`Callable[["optuna.Trial"], Dict[str, float]]`, *optional*):
- A function that defines the hyperparameter search space. Will default to
- [`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or
- [`~trainer_utils.default_hp_space_sigopt`] depending on your backend.
- compute_objective (`Callable[[Dict[str, float]], float]`, *optional*):
- A function computing the objective to minimize or maximize from the metrics returned by the `evaluate`
- method. Will default to [`~trainer_utils.default_compute_objective`].
- n_trials (`int`, *optional*, defaults to 100):
- The number of trial runs to test.
- direction (`str`, *optional*, defaults to `"minimize"`):
- Whether to optimize greater or lower objects. Can be `"minimize"` or `"maximize"`, you should pick
- `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics.
- backend (`str` or [`~training_utils.HPSearchBackend`], *optional*):
- The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending
- on which one is installed. If all are installed, will default to optuna.
- hp_name (`Callable[["optuna.Trial"], str]]`, *optional*):
- A function that defines the trial/run name. Will default to None.
- kwargs (`Dict[str, Any]`, *optional*):
- Additional keyword arguments passed along to `optuna.create_study` or `ray.tune.run`. For more
- information see:
- - the documentation of
- [optuna.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html)
- - the documentation of [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run)
- - the documentation of [sigopt](https://app.sigopt.com/docs/endpoints/experiments/create)
- Returns:
- [`trainer_utils.BestRun`]: All the information about the best run. Experiment summary can be found in
- `run_summary` attribute for Ray backend.
- """
- if backend is None:
- backend = default_hp_search_backend()
- if backend is None:
- raise RuntimeError(
- "At least one of optuna or ray should be installed. "
- "To install optuna run `pip install optuna`. "
- "To install ray run `pip install ray[tune]`. "
- "To install sigopt run `pip install sigopt`."
- )
- backend = HPSearchBackend(backend)
- if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
- raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
- if backend == HPSearchBackend.RAY and not is_ray_tune_available():
- raise RuntimeError(
- "You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
- )
- if backend == HPSearchBackend.SIGOPT and not is_sigopt_available():
- raise RuntimeError("You picked the sigopt backend, but it is not installed. Use `pip install sigopt`.")
- if backend == HPSearchBackend.WANDB and not is_wandb_available():
- raise RuntimeError("You picked the wandb backend, but it is not installed. Use `pip install wandb`.")
- self.hp_search_backend = backend
- if self.model_init is None:
- raise RuntimeError(
- "To use hyperparameter search, you need to pass your model through a model_init function."
- )
-
- self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
- self.hp_name = hp_name
- self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
-
- backend_dict = {
- HPSearchBackend.OPTUNA: run_hp_search_optuna,
- HPSearchBackend.RAY: run_hp_search_ray,
- HPSearchBackend.SIGOPT: run_hp_search_sigopt,
- HPSearchBackend.WANDB: run_hp_search_wandb,
- }
- best_run = backend_dict[backend](self, n_trials, direction, **kwargs)
-
- self.hp_search_backend = None
- return best_run
-
- def log(self, logs: Dict[str, float]) -> None:
- """
- Log `logs` on the various objects watching training.
- Subclass and override this method to inject custom behavior.
- Args:
- logs (`Dict[str, float]`):
- The values to log.
- """
- if self.state.epoch is not None:
- logs["epoch"] = round(self.state.epoch, 2)
-
- output = {**logs, **{"step": self.state.global_step}}
- self.state.log_history.append(output)
- self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
-
- def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]:
- """
- Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors.
- """
- if isinstance(data, Mapping):
- return type(data)({k: self._prepare_input(v) for k, v in data.items()})
- elif isinstance(data, (tuple, list)):
- return type(data)(self._prepare_input(v) for v in data)
- elif isinstance(data, torch.Tensor):
- kwargs = {"device": self.args.device}
- if self.deepspeed and (torch.is_floating_point(data) or torch.is_complex(data)):
- # NLP models inputs are int/uint and those get adjusted to the right dtype of the
- # embedding. Other models such as wav2vec2's inputs are already float and thus
- # may need special handling to match the dtypes of the model
- kwargs.update({"dtype": self.args.hf_deepspeed_config.dtype()})
- return data.to(**kwargs)
- return data
-
- def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
- """
- Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and
- handling potential state.
- """
- inputs = self._prepare_input(inputs)
- if len(inputs) == 0:
- raise ValueError(
- "The batch received was empty, your model won't be able to train on it. Double-check that your "
- f"training dataset contains keys expected by the model: {','.join(self._signature_columns)}."
- )
- if self.args.past_index >= 0 and self._past is not None:
- inputs["mems"] = self._past
-
- return inputs
-
- def compute_loss_context_manager(self):
- """
- A helper wrapper to group together context managers.
- """
- return self.autocast_smart_context_manager()
-
- def autocast_smart_context_manager(self, cache_enabled: Optional[bool] = True):
- """
- A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired
- arguments, depending on the situation.
- """
- if self.use_cuda_amp or self.use_cpu_amp:
- if is_torch_greater_or_equal_than_1_10:
- ctx_manager = (
- torch.cpu.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype)
- if self.use_cpu_amp
- else torch.cuda.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype)
- )
- else:
- ctx_manager = torch.cuda.amp.autocast()
- else:
- ctx_manager = contextlib.nullcontext() if sys.version_info >= (3, 7) else contextlib.suppress()
-
- return ctx_manager
-
- def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
- """
- Perform a training step on a batch of inputs.
- Subclass and override to inject custom behavior.
- Args:
- model (`nn.Module`):
- The model to train.
- inputs (`Dict[str, Union[torch.Tensor, Any]]`):
- The inputs and targets of the model.
- The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
- argument `labels`. Check your model's documentation for all accepted arguments.
- Return:
- `torch.Tensor`: The tensor with training loss on this batch.
- """
- model.train()
- inputs = self._prepare_inputs(inputs)
-
- if is_sagemaker_mp_enabled():
- loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)
- return loss_mb.reduce_mean().detach().to(self.args.device)
-
- with self.compute_loss_context_manager():
- loss = self.compute_loss(model, inputs)
-
- if self.args.n_gpu > 1:
- loss = loss.mean() # mean() to average on multi-gpu parallel training
-
- if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
- # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
- loss = loss / self.args.gradient_accumulation_steps
-
- if self.do_grad_scaling:
- self.scaler.scale(loss).backward()
- elif self.use_apex:
- with amp.scale_loss(loss, self.optimizer) as scaled_loss:
- scaled_loss.backward()
- elif self.deepspeed:
- # loss gets scaled under gradient_accumulation_steps in deepspeed
- loss = self.deepspeed.backward(loss)
- else:
- loss.backward()
-
- return loss.detach()
-
- def compute_loss(self, model, inputs, return_outputs=False):
- """
- How the loss is computed by Trainer. By default, all models return the loss in the first element.
- Subclass and override for custom behavior.
- """
- if self.label_smoother is not None and "labels" in inputs:
- labels = inputs.pop("labels")
- else:
- labels = None
- outputs = model(**inputs)
- # Save past state if it exists
- # TODO: this needs to be fixed and made cleaner later.
- if self.args.past_index >= 0:
- self._past = outputs[self.args.past_index]
-
- if labels is not None:
- if unwrap_model(model)._get_name() in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values():
- loss = self.label_smoother(outputs, labels, shift_labels=True)
- else:
- loss = self.label_smoother(outputs, labels)
- else:
- if isinstance(outputs, dict) and "loss" not in outputs:
- raise ValueError(
- "The model did not return a loss from the inputs, only the following keys: "
- f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}."
- )
- # We don't use .loss here since the model may return tuples instead of ModelOutput.
- loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
-
- return (loss, outputs) if return_outputs else loss
-
- def is_local_process_zero(self) -> bool:
- """
- Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
- machines) main process.
- """
- return self.args.local_process_index == 0
-
- def is_world_process_zero(self) -> bool:
- """
- Whether or not this process is the global main process (when training in a distributed fashion on several
- machines, this is only going to be `True` for one process).
- """
- # Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global
- # process index.
- if is_sagemaker_mp_enabled():
- return smp.rank() == 0
- else:
- return self.args.process_index == 0
-
- def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False):
- """
- Will save the model, so you can reload it using `from_pretrained()`.
- Will only save from the main process.
- """
-
- if output_dir is None:
- output_dir = self.args.output_dir
-
- if is_torch_tpu_available():
- self._save_tpu(output_dir)
- elif is_sagemaker_mp_enabled():
- # Calling the state_dict needs to be done on the wrapped model and on all processes.
- os.makedirs(output_dir, exist_ok=True)
- state_dict = self.model_wrapped.state_dict()
- if self.args.should_save:
- self._save(output_dir, state_dict=state_dict)
- if IS_SAGEMAKER_MP_POST_1_10:
- # 'user_content.pt' indicates model state_dict saved with smp >= 1.10
- Path(os.path.join(output_dir, "user_content.pt")).touch()
- elif (
- ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp
- or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
- or self.fsdp is not None
- ):
- state_dict = self.model.state_dict()
-
- if self.args.should_save:
- self._save(output_dir, state_dict=state_dict)
- elif self.deepspeed:
- # this takes care of everything as long as we aren't under zero3
- if self.args.should_save:
- self._save(output_dir)
-
- if is_deepspeed_zero3_enabled():
- # It's too complicated to try to override different places where the weights dump gets
- # saved, so since under zero3 the file is bogus, simply delete it. The user should
- # either user deepspeed checkpoint to resume or to recover full weights use
- # zero_to_fp32.py stored in the checkpoint.
- if self.args.should_save:
- file = os.path.join(output_dir, WEIGHTS_NAME)
- if os.path.isfile(file):
- # logger.info(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights")
- os.remove(file)
-
- # now save the real model if stage3_gather_16bit_weights_on_model_save=True
- # if false it will not be saved.
- # This must be called on all ranks
- if not self.deepspeed.save_16bit_model(output_dir, WEIGHTS_NAME):
- logger.warning(
- "deepspeed.save_16bit_model didn't save the model, since"
- " stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use"
- " zero_to_fp32.py to recover weights"
- )
- self.deepspeed.save_checkpoint(output_dir)
-
- elif self.args.should_save:
- self._save(output_dir)
-
- # Push to the Hub when `save_model` is called by the user.
- if self.args.push_to_hub and not _internal_call:
- self.push_to_hub(commit_message="Model save")
-
- def _save_tpu(self, output_dir: Optional[str] = None):
- output_dir = output_dir if output_dir is not None else self.args.output_dir
- logger.info(f"Saving model checkpoint to {output_dir}")
-
- if xm.is_master_ordinal():
- os.makedirs(output_dir, exist_ok=True)
- torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
-
- # Save a trained model and configuration using `save_pretrained()`.
- # They can then be reloaded using `from_pretrained()`
- xm.rendezvous("saving_checkpoint")
- if not isinstance(self.model, PreTrainedModel):
- if isinstance(unwrap_model(self.model), PreTrainedModel):
- unwrap_model(self.model).save_pretrained(
- output_dir,
- is_main_process=self.args.should_save,
- state_dict=self.model.state_dict(),
- save_function=xm.save,
- )
- else:
- logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
- state_dict = self.model.state_dict()
- xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
- else:
- self.model.save_pretrained(output_dir, is_main_process=self.args.should_save, save_function=xm.save)
- if self.tokenizer is not None and self.args.should_save:
- self.tokenizer.save_pretrained(output_dir)
-
- def _save(self, output_dir: Optional[str] = None, state_dict=None):
- # If we are executing this function, we are the process zero, so we don't check for that.
- output_dir = output_dir if output_dir is not None else self.args.output_dir
- os.makedirs(output_dir, exist_ok=True)
- logger.info(f"Saving model checkpoint to {output_dir}")
- # Save a trained model and configuration using `save_pretrained()`.
- # They can then be reloaded using `from_pretrained()`
- if not isinstance(self.model, PreTrainedModel):
- if isinstance(unwrap_model(self.model), PreTrainedModel):
- if state_dict is None:
- state_dict = self.model.state_dict()
- unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)
- else:
- logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
- if state_dict is None:
- state_dict = self.model.state_dict()
- torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
- else:
- self.model.save_pretrained(output_dir, state_dict=state_dict)
- if self.tokenizer is not None:
- self.tokenizer.save_pretrained(output_dir)
-
- # Good practice: save your training arguments together with the trained model
- torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
-
- def store_flos(self):
- # Storing the number of floating-point operations that went into the model
- if self.args.local_rank != -1:
- self.state.total_flos += (
- distributed_broadcast_scalars([self.current_flos], device=self.args.device).sum().item()
- )
- self.current_flos = 0
- else:
- self.state.total_flos += self.current_flos
- self.current_flos = 0
-
- def _sorted_checkpoints(
- self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
- ) -> List[str]:
- ordering_and_checkpoint_path = []
-
- glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)]
-
- for path in glob_checkpoints:
- if use_mtime:
- ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
- else:
- regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
- if regex_match is not None and regex_match.groups() is not None:
- ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
-
- checkpoints_sorted = sorted(ordering_and_checkpoint_path)
- checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
- # Make sure we don't delete the best model.
- if self.state.best_model_checkpoint is not None:
- best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
- for i in range(best_model_index, len(checkpoints_sorted) - 2):
- checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i]
- return checkpoints_sorted
-
- def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
- if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
- return
-
- # Check if we should delete older checkpoint(s)
- checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
- if len(checkpoints_sorted) <= self.args.save_total_limit:
- return
-
- # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which
- # we don't do to allow resuming.
- save_total_limit = self.args.save_total_limit
- if (
- self.state.best_model_checkpoint is not None
- and self.args.save_total_limit == 1
- and checkpoints_sorted[-1] != self.state.best_model_checkpoint
- ):
- save_total_limit = 2
-
- number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
- checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
- for checkpoint in checkpoints_to_be_deleted:
- logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
- shutil.rmtree(checkpoint, ignore_errors=True)
-
- def evaluate(
- self,
- eval_dataset: Optional[Dataset] = None,
- ignore_keys: Optional[List[str]] = None,
- metric_key_prefix: str = "eval",
- ) -> Dict[str, float]:
- """
- Run evaluation and returns metrics.
- The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
- (pass it to the init `compute_metrics` argument).
- You can also subclass and override this method to inject custom behavior.
- Args:
- eval_dataset (`Dataset`, *optional*):
- Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns
- not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__`
- method.
- ignore_keys (`Lst[str]`, *optional*):
- A list of keys in the output of your model (if it is a dictionary) that should be ignored when
- gathering predictions.
- metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
- An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
- "eval_bleu" if the prefix is "eval" (default)
- Returns:
- A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
- dictionary also contains the epoch number which comes from the training state.
- """
- # memory metrics - must set up as early as possible
- self._memory_tracker.start()
-
- eval_dataloader = self.get_eval_dataloader(eval_dataset)
- start_time = time.time()
-
- eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
- output = eval_loop(
- eval_dataloader,
- description="Evaluation",
- # No point gathering the predictions if there are no metrics, otherwise we defer to
- # self.args.prediction_loss_only
- prediction_loss_only=True if self.compute_metrics is None else None,
- ignore_keys=ignore_keys,
- metric_key_prefix=metric_key_prefix,
- )
-
- total_batch_size = self.args.eval_batch_size * self.args.world_size
- if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
- start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
- output.metrics.update(
- speed_metrics(
- metric_key_prefix,
- start_time,
- num_samples=output.num_samples,
- num_steps=math.ceil(output.num_samples / total_batch_size),
- )
- )
-
- self.log(output.metrics)
-
- if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
- # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
- xm.master_print(met.metrics_report())
-
- self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
-
- self._memory_tracker.stop_and_update_metrics(output.metrics)
-
- return output.metrics
-
- def predict(
- self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test"
- ) -> PredictionOutput:
- """
- Run prediction and returns predictions and potential metrics.
- Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
- will also return metrics, like in `evaluate()`.
- Args:
- test_dataset (`Dataset`):
- Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the
- `model.forward()` method are automatically removed. Has to implement the method `__len__`
- ignore_keys (`Lst[str]`, *optional*):
- A list of keys in the output of your model (if it is a dictionary) that should be ignored when
- gathering predictions.
- metric_key_prefix (`str`, *optional*, defaults to `"test"`):
- An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
- "test_bleu" if the prefix is "test" (default)
-
- If your predictions or labels have different sequence length (for instance because you're doing dynamic padding
- in a token classification task) the predictions will be padded (on the right) to allow for concatenation into
- one array. The padding index is -100.
-
- Returns: *NamedTuple* A namedtuple with the following keys:
- - predictions (`np.ndarray`): The predictions on `test_dataset`.
- - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
- - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
- labels).
- """
- # memory metrics - must set up as early as possible
- self._memory_tracker.start()
-
- test_dataloader = self.get_test_dataloader(test_dataset)
- start_time = time.time()
-
- eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
- output = eval_loop(
- test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
- )
- total_batch_size = self.args.eval_batch_size * self.args.world_size
- if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
- start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
- output.metrics.update(
- speed_metrics(
- metric_key_prefix,
- start_time,
- num_samples=output.num_samples,
- num_steps=math.ceil(output.num_samples / total_batch_size),
- )
- )
-
- self.control = self.callback_handler.on_predict(self.args, self.state, self.control, output.metrics)
- self._memory_tracker.stop_and_update_metrics(output.metrics)
-
- return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics)
-
- def evaluation_loop(
- self,
- dataloader: DataLoader,
- description: str,
- prediction_loss_only: Optional[bool] = None,
- ignore_keys: Optional[List[str]] = None,
- metric_key_prefix: str = "eval",
- ) -> EvalLoopOutput:
- """
- Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
- Works both with or without labels.
- """
- args = self.args
-
- prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only
-
- # if eval is called w/o train init deepspeed here
- if args.deepspeed and not self.deepspeed:
- # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
- # from the checkpoint eventually
- deepspeed_engine, _, _ = deepspeed_init(
- self, num_training_steps=0, resume_from_checkpoint=None, inference=True
- )
- self.model = deepspeed_engine.module
- self.model_wrapped = deepspeed_engine
- self.deepspeed = deepspeed_engine
-
- model = self._wrap_model(self.model, training=False, dataloader=dataloader)
-
- # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called
- # while ``train`` is running, cast it to the right dtype first and then put on device
- if not self.is_in_train:
- if args.fp16_full_eval:
- model = model.to(dtype=torch.float16, device=args.device)
- elif args.bf16_full_eval:
- model = model.to(dtype=torch.bfloat16, device=args.device)
-
- batch_size = self.args.eval_batch_size
-
- logger.info(f"***** Running {description} *****")
- if has_length(dataloader):
- logger.info(f" Num examples = {self.num_examples(dataloader)}")
- else:
- logger.info(" Num examples: Unknown")
- logger.info(f" Batch size = {batch_size}")
-
- model.eval()
-
- self.callback_handler.eval_dataloader = dataloader
- # Do this before wrapping.
- eval_dataset = getattr(dataloader, "dataset", None)
-
- if is_torch_tpu_available():
- dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device)
-
- if args.past_index >= 0:
- self._past = None
-
- # Initialize containers
- # losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)
- losses_host = None
- preds_host = None
- labels_host = None
- inputs_host = None
-
- # losses/preds/labels on CPU (final containers)
- all_losses = None
- all_preds = None
- all_labels = None
- all_inputs = None
- # Will be useful when we have an iterable dataset so don't know its length.
-
- observed_num_examples = 0
- # Main evaluation loop
- for step, inputs in enumerate(dataloader):
- # Update the observed num examples
- observed_batch_size = find_batch_size(inputs)
- if observed_batch_size is not None:
- observed_num_examples += observed_batch_size
- # For batch samplers, batch_size is not known by the dataloader in advance.
- if batch_size is None:
- batch_size = observed_batch_size
-
- # Prediction step
- loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
- inputs_decode = self._prepare_input(inputs["input_ids"]) if args.include_inputs_for_metrics else None
-
- if is_torch_tpu_available():
- xm.mark_step()
-
- # Update containers on host
- if loss is not None:
- losses = self._nested_gather(loss.repeat(batch_size))
- losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
- if labels is not None:
- labels = self._pad_across_processes(labels)
- labels = self._nested_gather(labels)
- labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
- if inputs_decode is not None:
- inputs_decode = self._pad_across_processes(inputs_decode)
- inputs_decode = self._nested_gather(inputs_decode)
- inputs_host = (
- inputs_decode
- if inputs_host is None
- else nested_concat(inputs_host, inputs_decode, padding_index=-100)
- )
- if logits is not None:
- logits = self._pad_across_processes(logits)
- logits = self._nested_gather(logits)
- if self.preprocess_logits_for_metrics is not None:
- logits = self.preprocess_logits_for_metrics(logits, labels)
- preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
- self.control = self.callback_handler.on_prediction_step(args, self.state, self.control)
-
- # Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
- if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0:
- if losses_host is not None:
- losses = nested_numpify(losses_host)
- all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
- if preds_host is not None:
- logits = nested_numpify(preds_host)
- all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
- if inputs_host is not None:
- inputs_decode = nested_numpify(inputs_host)
- all_inputs = (
- inputs_decode
- if all_inputs is None
- else nested_concat(all_inputs, inputs_decode, padding_index=-100)
- )
- if labels_host is not None:
- labels = nested_numpify(labels_host)
- all_labels = (
- labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
- )
-
- # Set back to None to begin a new accumulation
- losses_host, preds_host, inputs_host, labels_host = None, None, None, None
-
- if args.past_index and hasattr(self, "_past"):
- # Clean the state at the end of the evaluation loop
- delattr(self, "_past")
-
- # Gather all remaining tensors and put them back on the CPU
- if losses_host is not None:
- losses = nested_numpify(losses_host)
- all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
- if preds_host is not None:
- logits = nested_numpify(preds_host)
- all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
- if inputs_host is not None:
- inputs_decode = nested_numpify(inputs_host)
- all_inputs = (
- inputs_decode if all_inputs is None else nested_concat(all_inputs, inputs_decode, padding_index=-100)
- )
- if labels_host is not None:
- labels = nested_numpify(labels_host)
- all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
-
- # Number of samples
- if has_length(eval_dataset):
- num_samples = len(eval_dataset)
- # The instance check is weird and does not actually check for the type, but whether the dataset has the right
- # methods. Therefore we need to make sure it also has the attribute.
- elif isinstance(eval_dataset, IterableDatasetShard) and getattr(eval_dataset, "num_examples", 0) > 0:
- num_samples = eval_dataset.num_examples
- else:
- if has_length(dataloader):
- num_samples = self.num_examples(dataloader)
- else: # both len(dataloader.dataset) and len(dataloader) fail
- num_samples = observed_num_examples
- if num_samples == 0 and observed_num_examples > 0:
- num_samples = observed_num_examples
-
- # Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of
- # samplers has been rounded to a multiple of batch_size, so we truncate.
- if all_losses is not None:
- all_losses = all_losses[:num_samples]
- if all_preds is not None:
- all_preds = nested_truncate(all_preds, num_samples)
- if all_labels is not None:
- all_labels = nested_truncate(all_labels, num_samples)
- if all_inputs is not None:
- all_inputs = nested_truncate(all_inputs, num_samples)
-
- # Metrics!
- if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
- if args.include_inputs_for_metrics:
- metrics = self.compute_metrics(
- EvalPrediction(predictions=all_preds, label_ids=all_labels, inputs=all_inputs)
- )
- else:
- metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
- else:
- metrics = {}
-
- # To be JSON-serializable, we need to remove numpy types or zero-d tensors
- metrics = denumpify_detensorize(metrics)
-
- if all_losses is not None:
- metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item()
- if hasattr(self, "jit_compilation_time"):
- metrics[f"{metric_key_prefix}_jit_compilation_time"] = self.jit_compilation_time
-
- # Prefix all keys with metric_key_prefix + '_'
- for key in list(metrics.keys()):
- if not key.startswith(f"{metric_key_prefix}_"):
- metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
-
- return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
-
- def _nested_gather(self, tensors, name=None):
- """
- Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
- concatenating them to `gathered`
- """
- if tensors is None:
- return
- if is_torch_tpu_available():
- if name is None:
- name = "nested_gather"
- tensors = nested_xla_mesh_reduce(tensors, name)
- elif is_sagemaker_mp_enabled():
- tensors = smp_gather(tensors)
- elif self.args.local_rank != -1:
- tensors = distributed_concat(tensors)
- return tensors
-
- # Copied from Accelerate.
- def _pad_across_processes(self, tensor, pad_index=-100):
- """
- Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
- they can safely be gathered.
- """
- if isinstance(tensor, (list, tuple)):
- return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor)
- elif isinstance(tensor, dict):
- return type(tensor)({k: self._pad_across_processes(v, pad_index=pad_index) for k, v in tensor.items()})
- elif not isinstance(tensor, torch.Tensor):
- raise TypeError(
- f"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors."
- )
-
- if len(tensor.shape) < 2:
- return tensor
- # Gather all sizes
- size = torch.tensor(tensor.shape, device=tensor.device)[None]
- sizes = self._nested_gather(size).cpu()
-
- max_size = max(s[1] for s in sizes)
- # When extracting XLA graphs for compilation, max_size is 0,
- # so use inequality to avoid errors.
- if tensor.shape[1] >= max_size:
- return tensor
-
- # Then pad to the maximum size
- old_size = tensor.shape
- new_size = list(old_size)
- new_size[1] = max_size
- new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
- new_tensor[:, : old_size[1]] = tensor
- return new_tensor
-
- def prediction_step(
- self,
- model: nn.Module,
- inputs: Dict[str, Union[torch.Tensor, Any]],
- prediction_loss_only: bool,
- ignore_keys: Optional[List[str]] = None,
- ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
- """
- Perform an evaluation step on `model` using `inputs`.
- Subclass and override to inject custom behavior.
- Args:
- model (`nn.Module`):
- The model to evaluate.
- inputs (`Dict[str, Union[torch.Tensor, Any]]`):
- The inputs and targets of the model.
- The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
- argument `labels`. Check your model's documentation for all accepted arguments.
- prediction_loss_only (`bool`):
- Whether or not to return the loss only.
- ignore_keys (`Lst[str]`, *optional*):
- A list of keys in the output of your model (if it is a dictionary) that should be ignored when
- gathering predictions.
- Return:
- Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
- logits and labels (each being optional).
- """
- has_labels = False if len(self.label_names) == 0 else all(inputs.get(k) is not None for k in self.label_names)
- # For CLIP-like models capable of returning loss values.
- # If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss`
- # is `True` in `model.forward`.
- return_loss = inputs.get("return_loss", None)
- if return_loss is None:
- return_loss = self.can_return_loss
- loss_without_labels = True if len(self.label_names) == 0 and return_loss else False
-
- inputs = self._prepare_inputs(inputs)
- if ignore_keys is None:
- if hasattr(self.model, "config"):
- ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
- else:
- ignore_keys = []
-
- # labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
- if has_labels or loss_without_labels:
- labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
- if len(labels) == 1:
- labels = labels[0]
- else:
- labels = None
-
- with torch.no_grad():
- if is_sagemaker_mp_enabled():
- raw_outputs = smp_forward_only(model, inputs)
- if has_labels or loss_without_labels:
- if isinstance(raw_outputs, dict):
- loss_mb = raw_outputs["loss"]
- logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
- else:
- loss_mb = raw_outputs[0]
- logits_mb = raw_outputs[1:]
-
- loss = loss_mb.reduce_mean().detach().cpu()
- logits = smp_nested_concat(logits_mb)
- else:
- loss = None
- if isinstance(raw_outputs, dict):
- logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
- else:
- logits_mb = raw_outputs
- logits = smp_nested_concat(logits_mb)
- else:
- if has_labels or loss_without_labels:
- with self.compute_loss_context_manager():
- loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
- loss = loss.mean().detach()
-
- if isinstance(outputs, dict):
- logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
- else:
- logits = outputs[1:]
- else:
- loss = None
- with self.compute_loss_context_manager():
- outputs = model(**inputs)
- if isinstance(outputs, dict):
- logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
- else:
- logits = outputs
- # TODO: this needs to be fixed and made cleaner later.
- if self.args.past_index >= 0:
- self._past = outputs[self.args.past_index - 1]
-
- if prediction_loss_only:
- return (loss, None, None)
-
- logits = nested_detach(logits)
- if len(logits) == 1:
- logits = logits[0]
-
- return (loss, logits, labels)
-
- def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
- """
- For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point
- operations for every backward + forward pass. If using another model, either implement such a method in the
- model or subclass and override this method.
- Args:
- inputs (`Dict[str, Union[torch.Tensor, Any]]`):
- The inputs and targets of the model.
- Returns:
- `int`: The number of floating-point operations.
- """
- if hasattr(self.model, "floating_point_ops"):
- return self.model.floating_point_ops(inputs)
- else:
- return 0
-
- def init_git_repo(self, at_init: bool = False):
- """
- Initializes a git repo in `self.args.hub_model_id`.
- Args:
- at_init (`bool`, *optional*, defaults to `False`):
- Whether this function is called before any training or not. If `self.args.overwrite_output_dir` is
- `True` and `at_init` is `True`, the path to the repo (which is `self.args.output_dir`) might be wiped
- out.
- """
- if not self.is_world_process_zero():
- return
- if self.args.hub_model_id is None:
- repo_name = Path(self.args.output_dir).absolute().name
- else:
- repo_name = self.args.hub_model_id
- if "/" not in repo_name:
- repo_name = get_full_repo_name(repo_name, token=self.args.hub_token)
-
- # Make sure the repo exists.
- create_repo(repo_name, token=self.args.hub_token, private=self.args.hub_private_repo, exist_ok=True)
- try:
- self.repo = Repository(self.args.output_dir, clone_from=repo_name, token=self.args.hub_token)
- except EnvironmentError:
- if self.args.overwrite_output_dir and at_init:
- # Try again after wiping output_dir
- shutil.rmtree(self.args.output_dir)
- self.repo = Repository(self.args.output_dir, clone_from=repo_name, token=self.args.hub_token)
- else:
- raise
-
- self.repo.git_pull()
-
- # By default, ignore the checkpoint folders
- if (
- not os.path.exists(os.path.join(self.args.output_dir, ".gitignore"))
- and self.args.hub_strategy != HubStrategy.ALL_CHECKPOINTS
- ):
- with open(os.path.join(self.args.output_dir, ".gitignore"), "w", encoding="utf-8") as writer:
- writer.writelines(["checkpoint-*/"])
-
- # Add "*.sagemaker" to .gitignore if using SageMaker
- if os.environ.get("SM_TRAINING_ENV"):
- self._add_sm_patterns_to_gitignore()
-
- self.push_in_progress = None
-
- def create_model_card(
- self,
- language: Optional[str] = None,
- license: Optional[str] = None,
- tags: Union[str, List[str], None] = None,
- model_name: Optional[str] = None,
- finetuned_from: Optional[str] = None,
- tasks: Union[str, List[str], None] = None,
- dataset_tags: Union[str, List[str], None] = None,
- dataset: Union[str, List[str], None] = None,
- dataset_args: Union[str, List[str], None] = None,
- ):
- """
- Creates a draft of a model card using the information available to the `Trainer`.
- Args:
- language (`str`, *optional*):
- The language of the model (if applicable)
- license (`str`, *optional*):
- The license of the model. Will default to the license of the pretrained model used, if the original
- model given to the `Trainer` comes from a repo on the Hub.
- tags (`str` or `List[str]`, *optional*):
- Some tags to be included in the metadata of the model card.
- model_name (`str`, *optional*):
- The name of the model.
- finetuned_from (`str`, *optional*):
- The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo
- of the original model given to the `Trainer` (if it comes from the Hub).
- tasks (`str` or `List[str]`, *optional*):
- One or several task identifiers, to be included in the metadata of the model card.
- dataset_tags (`str` or `List[str]`, *optional*):
- One or several dataset tags, to be included in the metadata of the model card.
- dataset (`str` or `List[str]`, *optional*):
- One or several dataset identifiers, to be included in the metadata of the model card.
- dataset_args (`str` or `List[str]`, *optional*):
- One or several dataset arguments, to be included in the metadata of the model card.
- """
- if not self.is_world_process_zero():
- return
-
- training_summary = TrainingSummary.from_trainer(
- self,
- language=language,
- license=license,
- tags=tags,
- model_name=model_name,
- finetuned_from=finetuned_from,
- tasks=tasks,
- dataset_tags=dataset_tags,
- dataset=dataset,
- dataset_args=dataset_args,
- )
- model_card = training_summary.to_model_card()
- with open(os.path.join(self.args.output_dir, "README.md"), "w") as f:
- f.write(model_card)
-
- def _push_from_checkpoint(self, checkpoint_folder):
- # Only push from one node.
- if not self.is_world_process_zero() or self.args.hub_strategy == HubStrategy.END:
- return
- # If we haven't finished the last push, we don't do this one.
- if self.push_in_progress is not None and not self.push_in_progress.is_done:
- return
-
- output_dir = self.args.output_dir
- # To avoid a new synchronization of all model weights, we just copy the file from the checkpoint folder
- modeling_files = [CONFIG_NAME, WEIGHTS_NAME]
- for modeling_file in modeling_files:
- if os.path.isfile(os.path.join(checkpoint_folder, modeling_file)):
- shutil.copy(os.path.join(checkpoint_folder, modeling_file), os.path.join(output_dir, modeling_file))
- # Saving the tokenizer is fast and we don't know how many files it may have spawned, so we resave it to be sure.
- if self.tokenizer is not None:
- self.tokenizer.save_pretrained(output_dir)
- # Same for the training arguments
- torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
-
- try:
- if self.args.hub_strategy == HubStrategy.CHECKPOINT:
- # Temporarily move the checkpoint just saved for the push
- tmp_checkpoint = os.path.join(output_dir, "last-checkpoint")
- # We have to remove the "last-checkpoint" dir if it exists, otherwise the checkpoint is moved as a
- # subfolder.
- if os.path.isdir(tmp_checkpoint):
- shutil.rmtree(tmp_checkpoint)
- shutil.move(checkpoint_folder, tmp_checkpoint)
-
- if self.args.save_strategy == IntervalStrategy.STEPS:
- commit_message = f"Training in progress, step {self.state.global_step}"
- else:
- commit_message = f"Training in progress, epoch {int(self.state.epoch)}"
- _, self.push_in_progress = self.repo.push_to_hub(
- commit_message=commit_message, blocking=False, auto_lfs_prune=True
- )
- finally:
- if self.args.hub_strategy == HubStrategy.CHECKPOINT:
- # Move back the checkpoint to its place
- shutil.move(tmp_checkpoint, checkpoint_folder)
-
- def push_to_hub(self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs) -> str:
- """
- Upload *self.model* and *self.tokenizer* to the 🤗 model hub on the repo *self.args.hub_model_id*.
- Parameters:
- commit_message (`str`, *optional*, defaults to `"End of training"`):
- Message to commit while pushing.
- blocking (`bool`, *optional*, defaults to `True`):
- Whether the function should return only when the `git push` has finished.
- kwargs:
- Additional keyword arguments passed along to [`~Trainer.create_model_card`].
- Returns:
- The url of the commit of your model in the given repository if `blocking=False`, a tuple with the url of
- the commit and an object to track the progress of the commit if `blocking=True`
- """
- # If a user calls manually `push_to_hub` with `self.args.push_to_hub = False`, we try to create the repo but
- # it might fail.
- if not hasattr(self, "repo"):
- self.init_git_repo()
-
- model_name = kwargs.pop("model_name", None)
- if model_name is None and self.args.should_save:
- if self.args.hub_model_id is None:
- model_name = Path(self.args.output_dir).name
- else:
- model_name = self.args.hub_model_id.split("/")[-1]
-
- # Needs to be executed on all processes for TPU training, but will only save on the processed determined by
- # self.args.should_save.
- self.save_model(_internal_call=True)
-
- # Only push from one node.
- if not self.is_world_process_zero():
- return
-
- # Cancel any async push in progress if blocking=True. The commits will all be pushed together.
- if blocking and self.push_in_progress is not None and not self.push_in_progress.is_done:
- self.push_in_progress._process.kill()
- self.push_in_progress = None
-
- git_head_commit_url = self.repo.push_to_hub(
- commit_message=commit_message, blocking=blocking, auto_lfs_prune=True
- )
- # push separately the model card to be independant from the rest of the model
- if self.args.should_save:
- self.create_model_card(model_name=model_name, **kwargs)
- try:
- self.repo.push_to_hub(
- commit_message="update model card README.md", blocking=blocking, auto_lfs_prune=True
- )
- except EnvironmentError as exc:
- logger.error(f"Error pushing update to the model card. Please read logs and retry.\n${exc}")
-
- return git_head_commit_url
-
- #
- # Deprecated code
- #
-
- def prediction_loop(
- self,
- dataloader: DataLoader,
- description: str,
- prediction_loss_only: Optional[bool] = None,
- ignore_keys: Optional[List[str]] = None,
- metric_key_prefix: str = "eval",
- ) -> EvalLoopOutput:
- """
- Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
- Works both with or without labels.
- """
- args = self.args
-
- if not has_length(dataloader):
- raise ValueError("dataloader must implement a working __len__")
-
- prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only
-
- # if eval is called w/o train init deepspeed here
- if args.deepspeed and not self.deepspeed:
- # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
- # from the checkpoint eventually
- deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
- self.model = deepspeed_engine.module
- self.model_wrapped = deepspeed_engine
- self.deepspeed = deepspeed_engine
- # XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
- # for example the Z3-optimizer is a must for zero3 to work even for inference - what we
- # don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
- deepspeed_engine.optimizer.optimizer = None
- deepspeed_engine.lr_scheduler = None
-
- model = self._wrap_model(self.model, training=False, dataloader=dataloader)
-
- # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called
- # while ``train`` is running, cast it to the right dtype first and then put on device
- if not self.is_in_train:
- if args.fp16_full_eval:
- model = model.to(dtype=torch.float16, device=args.device)
- elif args.bf16_full_eval:
- model = model.to(dtype=torch.bfloat16, device=args.device)
-
- batch_size = dataloader.batch_size
- num_examples = self.num_examples(dataloader)
- logger.info(f"***** Running {description} *****")
- logger.info(f" Num examples = {num_examples}")
- logger.info(f" Batch size = {batch_size}")
- losses_host: torch.Tensor = None
- preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
- labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
- inputs_host: Union[torch.Tensor, List[torch.Tensor]] = None
-
- world_size = max(1, args.world_size)
-
- eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
- if not prediction_loss_only:
- # The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
- # a batch size to the sampler)
- make_multiple_of = None
- if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
- make_multiple_of = dataloader.sampler.batch_size
- preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
- labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
- inputs_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
-
- model.eval()
-
- if is_torch_tpu_available():
- dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device)
-
- if args.past_index >= 0:
- self._past = None
-
- self.callback_handler.eval_dataloader = dataloader
-
- for step, inputs in enumerate(dataloader):
- loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
- inputs_decode = self._prepare_input(inputs["input_ids"]) if args.include_inputs_for_metrics else None
-
- if loss is not None:
- losses = loss.repeat(batch_size)
- losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
- if logits is not None:
- preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
- if labels is not None:
- labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
- if inputs_decode is not None:
- inputs_host = (
- inputs_decode
- if inputs_host is None
- else nested_concat(inputs_host, inputs_decode, padding_index=-100)
- )
- self.control = self.callback_handler.on_prediction_step(args, self.state, self.control)
-
- # Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
- if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0:
- eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
- if not prediction_loss_only:
- preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
- labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
- inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids"))
-
- # Set back to None to begin a new accumulation
- losses_host, preds_host, labels_host, inputs_host = None, None, None, None
-
- if args.past_index and hasattr(self, "_past"):
- # Clean the state at the end of the evaluation loop
- delattr(self, "_past")
-
- # Gather all remaining tensors and put them back on the CPU
- eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
- if not prediction_loss_only:
- preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
- labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
- inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids"))
-
- eval_loss = eval_losses_gatherer.finalize()
- preds = preds_gatherer.finalize() if not prediction_loss_only else None
- label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
- inputs_ids = inputs_gatherer.finalize() if not prediction_loss_only else None
-
- if self.compute_metrics is not None and preds is not None and label_ids is not None:
- if args.include_inputs_for_metrics:
- metrics = self.compute_metrics(
- EvalPrediction(predictions=preds, label_ids=label_ids, inputs=inputs_ids)
- )
- else:
- metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
- else:
- metrics = {}
-
- # To be JSON-serializable, we need to remove numpy types or zero-d tensors
- metrics = denumpify_detensorize(metrics)
-
- if eval_loss is not None:
- metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
-
- # Prefix all keys with metric_key_prefix + '_'
- for key in list(metrics.keys()):
- if not key.startswith(f"{metric_key_prefix}_"):
- metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
-
- return EvalLoopOutput(predictions=preds, label_ids=label_ids, metrics=metrics, num_samples=num_examples)
-
- def _gather_and_numpify(self, tensors, name):
- """
- Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
- concatenating them to `gathered`
- """
- if tensors is None:
- return
- if is_torch_tpu_available():
- tensors = nested_xla_mesh_reduce(tensors, name)
- elif is_sagemaker_mp_enabled():
- tensors = smp_gather(tensors)
- elif self.args.local_rank != -1:
- tensors = distributed_concat(tensors)
-
- return nested_numpify(tensors)
-
- def _add_sm_patterns_to_gitignore(self) -> None:
- """Add SageMaker Checkpointing patterns to .gitignore file."""
- # Make sure we only do this on the main process
- if not self.is_world_process_zero():
- return
-
- patterns = ["*.sagemaker-uploading", "*.sagemaker-uploaded"]
-
- # Get current .gitignore content
- if os.path.exists(os.path.join(self.repo.local_dir, ".gitignore")):
- with open(os.path.join(self.repo.local_dir, ".gitignore"), "r") as f:
- current_content = f.read()
- else:
- current_content = ""
-
- # Add the patterns to .gitignore
- content = current_content
- for pattern in patterns:
- if pattern not in content:
- if content.endswith("\n"):
- content += pattern
- else:
- content += f"\n{pattern}"
-
- # Write the .gitignore file if it has changed
- if content != current_content:
- with open(os.path.join(self.repo.local_dir, ".gitignore"), "w") as f:
- logger.debug(f"Writing .gitignore file. Content: {content}")
- f.write(content)
-
- self.repo.git_add(".gitignore")
-
- # avoid race condition with git status
- time.sleep(0.5)
-
- if not self.repo.is_repo_clean():
- self.repo.git_commit("Add *.sagemaker patterns to .gitignore.")
- self.repo.git_push()
diff --git a/spaces/Pengyey/bingo-chuchu/src/components/ui/select.tsx b/spaces/Pengyey/bingo-chuchu/src/components/ui/select.tsx
deleted file mode 100644
index 77f12c2996f541b97663de4c9e20ab34d4ec2fac..0000000000000000000000000000000000000000
--- a/spaces/Pengyey/bingo-chuchu/src/components/ui/select.tsx
+++ /dev/null
@@ -1,123 +0,0 @@
-'use client'
-
-import * as React from 'react'
-import * as SelectPrimitive from '@radix-ui/react-select'
-
-import { cn } from '@/lib/utils'
-import {
- IconArrowDown,
- IconCheck,
- IconChevronUpDown
-} from '@/components/ui/icons'
-
-const Select = SelectPrimitive.Root
-
-const SelectGroup = SelectPrimitive.Group
-
-const SelectValue = SelectPrimitive.Value
-
-const SelectTrigger = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, children, ...props }, ref) => (
-
- {children}
-
-
-
-
-))
-SelectTrigger.displayName = SelectPrimitive.Trigger.displayName
-
-const SelectContent = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, children, position = 'popper', ...props }, ref) => (
-
-
-
- {children}
-
-
-
-))
-SelectContent.displayName = SelectPrimitive.Content.displayName
-
-const SelectLabel = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-SelectLabel.displayName = SelectPrimitive.Label.displayName
-
-const SelectItem = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, children, ...props }, ref) => (
-
-
-
-
-
-
- {children}
-
-))
-SelectItem.displayName = SelectPrimitive.Item.displayName
-
-const SelectSeparator = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-SelectSeparator.displayName = SelectPrimitive.Separator.displayName
-
-export {
- Select,
- SelectGroup,
- SelectValue,
- SelectTrigger,
- SelectContent,
- SelectLabel,
- SelectItem,
- SelectSeparator
-}
diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/points_in_boxes.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/points_in_boxes.py
deleted file mode 100644
index 4003173a53052161dbcd687a2fa1d755642fdab8..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/points_in_boxes.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import torch
-
-from ..utils import ext_loader
-
-ext_module = ext_loader.load_ext('_ext', [
- 'points_in_boxes_part_forward', 'points_in_boxes_cpu_forward',
- 'points_in_boxes_all_forward'
-])
-
-
-def points_in_boxes_part(points, boxes):
- """Find the box in which each point is (CUDA).
-
- Args:
- points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate
- boxes (torch.Tensor): [B, T, 7],
- num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz] in
- LiDAR/DEPTH coordinate, (x, y, z) is the bottom center
-
- Returns:
- box_idxs_of_pts (torch.Tensor): (B, M), default background = -1
- """
- assert points.shape[0] == boxes.shape[0], \
- 'Points and boxes should have the same batch size, ' \
- f'but got {points.shape[0]} and {boxes.shape[0]}'
- assert boxes.shape[2] == 7, \
- 'boxes dimension should be 7, ' \
- f'but got unexpected shape {boxes.shape[2]}'
- assert points.shape[2] == 3, \
- 'points dimension should be 3, ' \
- f'but got unexpected shape {points.shape[2]}'
- batch_size, num_points, _ = points.shape
-
- box_idxs_of_pts = points.new_zeros((batch_size, num_points),
- dtype=torch.int).fill_(-1)
-
- # If manually put the tensor 'points' or 'boxes' on a device
- # which is not the current device, some temporary variables
- # will be created on the current device in the cuda op,
- # and the output will be incorrect.
- # Therefore, we force the current device to be the same
- # as the device of the tensors if it was not.
- # Please refer to https://github.com/open-mmlab/mmdetection3d/issues/305
- # for the incorrect output before the fix.
- points_device = points.get_device()
- assert points_device == boxes.get_device(), \
- 'Points and boxes should be put on the same device'
- if torch.cuda.current_device() != points_device:
- torch.cuda.set_device(points_device)
-
- ext_module.points_in_boxes_part_forward(boxes.contiguous(),
- points.contiguous(),
- box_idxs_of_pts)
-
- return box_idxs_of_pts
-
-
-def points_in_boxes_cpu(points, boxes):
- """Find all boxes in which each point is (CPU). The CPU version of
- :meth:`points_in_boxes_all`.
-
- Args:
- points (torch.Tensor): [B, M, 3], [x, y, z] in
- LiDAR/DEPTH coordinate
- boxes (torch.Tensor): [B, T, 7],
- num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz],
- (x, y, z) is the bottom center.
-
- Returns:
- box_idxs_of_pts (torch.Tensor): (B, M, T), default background = 0.
- """
- assert points.shape[0] == boxes.shape[0], \
- 'Points and boxes should have the same batch size, ' \
- f'but got {points.shape[0]} and {boxes.shape[0]}'
- assert boxes.shape[2] == 7, \
- 'boxes dimension should be 7, ' \
- f'but got unexpected shape {boxes.shape[2]}'
- assert points.shape[2] == 3, \
- 'points dimension should be 3, ' \
- f'but got unexpected shape {points.shape[2]}'
- batch_size, num_points, _ = points.shape
- num_boxes = boxes.shape[1]
-
- point_indices = points.new_zeros((batch_size, num_boxes, num_points),
- dtype=torch.int)
- for b in range(batch_size):
- ext_module.points_in_boxes_cpu_forward(boxes[b].float().contiguous(),
- points[b].float().contiguous(),
- point_indices[b])
- point_indices = point_indices.transpose(1, 2)
-
- return point_indices
-
-
-def points_in_boxes_all(points, boxes):
- """Find all boxes in which each point is (CUDA).
-
- Args:
- points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate
- boxes (torch.Tensor): [B, T, 7],
- num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz],
- (x, y, z) is the bottom center.
-
- Returns:
- box_idxs_of_pts (torch.Tensor): (B, M, T), default background = 0.
- """
- assert boxes.shape[0] == points.shape[0], \
- 'Points and boxes should have the same batch size, ' \
- f'but got {boxes.shape[0]} and {boxes.shape[0]}'
- assert boxes.shape[2] == 7, \
- 'boxes dimension should be 7, ' \
- f'but got unexpected shape {boxes.shape[2]}'
- assert points.shape[2] == 3, \
- 'points dimension should be 3, ' \
- f'but got unexpected shape {points.shape[2]}'
- batch_size, num_points, _ = points.shape
- num_boxes = boxes.shape[1]
-
- box_idxs_of_pts = points.new_zeros((batch_size, num_points, num_boxes),
- dtype=torch.int).fill_(0)
-
- # Same reason as line 25-32
- points_device = points.get_device()
- assert points_device == boxes.get_device(), \
- 'Points and boxes should be put on the same device'
- if torch.cuda.current_device() != points_device:
- torch.cuda.set_device(points_device)
-
- ext_module.points_in_boxes_all_forward(boxes.contiguous(),
- points.contiguous(),
- box_idxs_of_pts)
-
- return box_idxs_of_pts
diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/rpn/dyhead.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/rpn/dyhead.py
deleted file mode 100644
index 3114c73d7c81ecbbc2f2179877c83120212b4b93..0000000000000000000000000000000000000000
--- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/rpn/dyhead.py
+++ /dev/null
@@ -1,377 +0,0 @@
-import math
-import torch
-import torch.nn.functional as F
-from torch import nn
-
-from .inference import make_atss_postprocessor
-from .loss import make_atss_loss_evaluator
-from .anchor_generator import make_anchor_generator_complex
-
-from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
-from maskrcnn_benchmark.layers import Scale, DYReLU, SELayer, ModulatedDeformConv
-from maskrcnn_benchmark.layers import NaiveSyncBatchNorm2d, FrozenBatchNorm2d
-from maskrcnn_benchmark.modeling.backbone.fbnet import *
-
-
-class h_sigmoid(nn.Module):
- def __init__(self, inplace=True, h_max=1):
- super(h_sigmoid, self).__init__()
- self.relu = nn.ReLU6(inplace=inplace)
- self.h_max = h_max
-
- def forward(self, x):
- return self.relu(x + 3) * self.h_max / 6
-
-
-class BoxCoder(object):
-
- def __init__(self, cfg):
- self.cfg = cfg
-
- def encode(self, gt_boxes, anchors):
- TO_REMOVE = 1 # TODO remove
- ex_widths = anchors[:, 2] - anchors[:, 0] + TO_REMOVE
- ex_heights = anchors[:, 3] - anchors[:, 1] + TO_REMOVE
- ex_ctr_x = (anchors[:, 2] + anchors[:, 0]) / 2
- ex_ctr_y = (anchors[:, 3] + anchors[:, 1]) / 2
-
- gt_widths = gt_boxes[:, 2] - gt_boxes[:, 0] + TO_REMOVE
- gt_heights = gt_boxes[:, 3] - gt_boxes[:, 1] + TO_REMOVE
- gt_ctr_x = (gt_boxes[:, 2] + gt_boxes[:, 0]) / 2
- gt_ctr_y = (gt_boxes[:, 3] + gt_boxes[:, 1]) / 2
-
- wx, wy, ww, wh = (10., 10., 5., 5.)
- targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
- targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
- targets_dw = ww * torch.log(gt_widths / ex_widths)
- targets_dh = wh * torch.log(gt_heights / ex_heights)
- targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh), dim=1)
-
- return targets
-
- def decode(self, preds, anchors):
- anchors = anchors.to(preds.dtype)
-
- TO_REMOVE = 1 # TODO remove
- widths = anchors[:, 2] - anchors[:, 0] + TO_REMOVE
- heights = anchors[:, 3] - anchors[:, 1] + TO_REMOVE
- ctr_x = (anchors[:, 2] + anchors[:, 0]) / 2
- ctr_y = (anchors[:, 3] + anchors[:, 1]) / 2
-
- wx, wy, ww, wh = (10., 10., 5., 5.)
- dx = preds[:, 0::4] / wx
- dy = preds[:, 1::4] / wy
- dw = preds[:, 2::4] / ww
- dh = preds[:, 3::4] / wh
-
- # Prevent sending too large values into torch.exp()
- dw = torch.clamp(dw, max=math.log(1000. / 16))
- dh = torch.clamp(dh, max=math.log(1000. / 16))
-
- pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
- pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
- pred_w = torch.exp(dw) * widths[:, None]
- pred_h = torch.exp(dh) * heights[:, None]
-
- pred_boxes = torch.zeros_like(preds)
- pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * (pred_w - 1)
- pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * (pred_h - 1)
- pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * (pred_w - 1)
- pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * (pred_h - 1)
-
- return pred_boxes
-
-
-class Conv3x3Norm(torch.nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- stride,
- groups=1,
- deformable=False,
- bn_type=None):
- super(Conv3x3Norm, self).__init__()
-
- if deformable:
- self.conv = ModulatedDeformConv(in_channels, out_channels, kernel_size=3, stride=stride, padding=1,
- groups=groups)
- else:
- self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups)
-
- if isinstance(bn_type, (list, tuple)):
- assert len(bn_type) == 2
- assert bn_type[0] == "gn"
- gn_group = bn_type[1]
- bn_type = bn_type[0]
-
- if bn_type == "bn":
- bn_op = nn.BatchNorm2d(out_channels)
- elif bn_type == "sbn":
- bn_op = nn.SyncBatchNorm(out_channels)
- elif bn_type == "nsbn":
- bn_op = NaiveSyncBatchNorm2d(out_channels)
- elif bn_type == "gn":
- bn_op = nn.GroupNorm(num_groups=gn_group, num_channels=out_channels)
- elif bn_type == "af":
- bn_op = FrozenBatchNorm2d(out_channels)
- if bn_type is not None:
- self.bn = bn_op
- else:
- self.bn = None
-
- def forward(self, input, **kwargs):
- x = self.conv(input, **kwargs)
- if self.bn:
- x = self.bn(x)
- return x
-
-
-class DyConv(torch.nn.Module):
- def __init__(self,
- in_channels=256,
- out_channels=256,
- conv_func=nn.Conv2d,
- use_dyfuse=True,
- use_dyrelu=False,
- use_deform=False
- ):
- super(DyConv, self).__init__()
-
- self.DyConv = nn.ModuleList()
- self.DyConv.append(conv_func(in_channels, out_channels, 1))
- self.DyConv.append(conv_func(in_channels, out_channels, 1))
- self.DyConv.append(conv_func(in_channels, out_channels, 2))
-
- if use_dyfuse:
- self.AttnConv = nn.Sequential(
- nn.AdaptiveAvgPool2d(1),
- nn.Conv2d(in_channels, 1, kernel_size=1),
- nn.ReLU(inplace=True))
- self.h_sigmoid = h_sigmoid()
- else:
- self.AttnConv = None
-
- if use_dyrelu:
- self.relu = DYReLU(in_channels, out_channels)
- else:
- self.relu = nn.ReLU()
-
- if use_deform:
- self.offset = nn.Conv2d(in_channels, 27, kernel_size=3, stride=1, padding=1)
- else:
- self.offset = None
-
- self.init_weights()
-
- def init_weights(self):
- for m in self.DyConv.modules():
- if isinstance(m, nn.Conv2d):
- nn.init.normal_(m.weight.data, 0, 0.01)
- if m.bias is not None:
- m.bias.data.zero_()
- if self.AttnConv is not None:
- for m in self.AttnConv.modules():
- if isinstance(m, nn.Conv2d):
- nn.init.normal_(m.weight.data, 0, 0.01)
- if m.bias is not None:
- m.bias.data.zero_()
-
- def forward(self, x):
- next_x = []
- for level, feature in enumerate(x):
-
- conv_args = dict()
- if self.offset is not None:
- offset_mask = self.offset(feature)
- offset = offset_mask[:, :18, :, :]
- mask = offset_mask[:, 18:, :, :].sigmoid()
- conv_args = dict(offset=offset, mask=mask)
-
- temp_fea = [self.DyConv[1](feature, **conv_args)]
-
- if level > 0:
- temp_fea.append(self.DyConv[2](x[level - 1], **conv_args))
- if level < len(x) - 1:
- temp_fea.append(F.upsample_bilinear(self.DyConv[0](x[level + 1], **conv_args),
- size=[feature.size(2), feature.size(3)]))
- mean_fea = torch.mean(torch.stack(temp_fea), dim=0, keepdim=False)
-
- if self.AttnConv is not None:
- attn_fea = []
- res_fea = []
- for fea in temp_fea:
- res_fea.append(fea)
- attn_fea.append(self.AttnConv(fea))
-
- res_fea = torch.stack(res_fea)
- spa_pyr_attn = self.h_sigmoid(torch.stack(attn_fea))
-
- mean_fea = torch.mean(res_fea * spa_pyr_attn, dim=0, keepdim=False)
-
- next_x.append(mean_fea)
-
- next_x = [self.relu(item) for item in next_x]
- return next_x
-
-
-class DyHead(torch.nn.Module):
- def __init__(self, cfg):
- super(DyHead, self).__init__()
- self.cfg = cfg
- num_classes = cfg.MODEL.DYHEAD.NUM_CLASSES - 1
- num_anchors = len(cfg.MODEL.RPN.ASPECT_RATIOS) * cfg.MODEL.RPN.SCALES_PER_OCTAVE
- in_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS
- channels = cfg.MODEL.DYHEAD.CHANNELS
- if cfg.MODEL.DYHEAD.USE_GN:
- bn_type = ['gn', cfg.MODEL.GROUP_NORM.NUM_GROUPS]
- elif cfg.MODEL.DYHEAD.USE_NSYNCBN:
- bn_type = 'nsbn'
- elif cfg.MODEL.DYHEAD.USE_SYNCBN:
- bn_type = 'sbn'
- else:
- bn_type = None
-
- use_dyrelu = cfg.MODEL.DYHEAD.USE_DYRELU
- use_dyfuse = cfg.MODEL.DYHEAD.USE_DYFUSE
- use_deform = cfg.MODEL.DYHEAD.USE_DFCONV
-
- if cfg.MODEL.DYHEAD.CONV_FUNC:
- conv_func = lambda i, o, s: eval(cfg.MODEL.DYHEAD.CONV_FUNC)(i, o, s, bn_type=bn_type)
- else:
- conv_func = lambda i, o, s: Conv3x3Norm(i, o, s, deformable=use_deform, bn_type=bn_type)
-
- dyhead_tower = []
- for i in range(cfg.MODEL.DYHEAD.NUM_CONVS):
- dyhead_tower.append(
- DyConv(
- in_channels if i == 0 else channels,
- channels,
- conv_func=conv_func,
- use_dyrelu=(use_dyrelu and in_channels == channels) if i == 0 else use_dyrelu,
- use_dyfuse=(use_dyfuse and in_channels == channels) if i == 0 else use_dyfuse,
- use_deform=(use_deform and in_channels == channels) if i == 0 else use_deform,
- )
- )
-
- self.add_module('dyhead_tower', nn.Sequential(*dyhead_tower))
- if cfg.MODEL.DYHEAD.COSINE_SCALE <= 0:
- self.cls_logits = nn.Conv2d(channels, num_anchors * num_classes, kernel_size=1)
- self.cls_logits_bias = None
- else:
- self.cls_logits = nn.Conv2d(channels, num_anchors * num_classes, kernel_size=1, bias=False)
- self.cls_logits_bias = nn.Parameter(torch.zeros(num_anchors * num_classes, requires_grad=True))
- self.cosine_scale = nn.Parameter(torch.ones(1) * cfg.MODEL.DYHEAD.COSINE_SCALE)
- self.bbox_pred = nn.Conv2d(channels, num_anchors * 4, kernel_size=1)
- self.centerness = nn.Conv2d(channels, num_anchors * 1, kernel_size=1)
-
- # initialization
- for modules in [self.cls_logits, self.bbox_pred,
- self.centerness]:
- for l in modules.modules():
- if isinstance(l, nn.Conv2d):
- torch.nn.init.normal_(l.weight, std=0.01)
- if hasattr(l, 'bias') and l.bias is not None:
- torch.nn.init.constant_(l.bias, 0)
-
- # initialize the bias for focal loss
- prior_prob = cfg.MODEL.DYHEAD.PRIOR_PROB
- bias_value = -math.log((1 - prior_prob) / prior_prob)
- if self.cls_logits_bias is None:
- torch.nn.init.constant_(self.cls_logits.bias, bias_value)
- else:
- torch.nn.init.constant_(self.cls_logits_bias, bias_value)
-
- self.scales = nn.ModuleList([Scale(init_value=1.0) for _ in range(5)])
-
- def extract_feature(self, x):
- output = []
- for i in range(len(self.dyhead_tower)):
- x = self.dyhead_tower[i](x)
- output.append(x)
- return output
-
- def forward(self, x):
- logits = []
- bbox_reg = []
- centerness = []
-
- dyhead_tower = self.dyhead_tower(x)
-
- for l, feature in enumerate(x):
- if self.cls_logits_bias is None:
- logit = self.cls_logits(dyhead_tower[l])
- else:
- # CosineSimOutputLayers: https://github.com/ucbdrive/few-shot-object-detection/blob/master/fsdet/modeling/roi_heads/fast_rcnn.py#L448-L464
- # normalize the input x along the `channel` dimension
- x_norm = torch.norm(dyhead_tower[l], p=2, dim=1, keepdim=True).expand_as(dyhead_tower[l])
- x_normalized = dyhead_tower[l].div(x_norm + 1e-5)
- # normalize weight
- temp_norm = (
- torch.norm(self.cls_logits.weight.data, p=2, dim=1, keepdim=True)
- .expand_as(self.cls_logits.weight.data)
- )
- self.cls_logits.weight.data = self.cls_logits.weight.data.div(
- temp_norm + 1e-5
- )
- cos_dist = self.cls_logits(x_normalized)
- logit = self.cosine_scale * cos_dist + self.cls_logits_bias.reshape(1, len(self.cls_logits_bias), 1, 1)
- logits.append(logit)
-
- bbox_pred = self.scales[l](self.bbox_pred(dyhead_tower[l]))
- bbox_reg.append(bbox_pred)
-
- centerness.append(self.centerness(dyhead_tower[l]))
- return logits, bbox_reg, centerness
-
-
-class DyHeadModule(torch.nn.Module):
-
- def __init__(self, cfg):
- super(DyHeadModule, self).__init__()
- self.cfg = cfg
- self.head = DyHead(cfg)
- box_coder = BoxCoder(cfg)
- self.loss_evaluator = make_atss_loss_evaluator(cfg, box_coder)
- self.box_selector_train = make_atss_postprocessor(cfg, box_coder, is_train=True)
- self.box_selector_test = make_atss_postprocessor(cfg, box_coder, is_train=False)
- self.anchor_generator = make_anchor_generator_complex(cfg)
-
- def forward(self, images, features, targets=None):
- box_cls, box_regression, centerness = self.head(features)
- anchors = self.anchor_generator(images, features)
-
- if self.training:
- return self._forward_train(box_cls, box_regression, centerness, targets, anchors)
- else:
- return self._forward_test(box_cls, box_regression, centerness, anchors)
-
- def _forward_train(self, box_cls, box_regression, centerness, targets, anchors):
- loss_box_cls, loss_box_reg, loss_centerness, _, _, _, _ = self.loss_evaluator(
- box_cls, box_regression, centerness, targets, anchors
- )
- losses = {
- "loss_cls": loss_box_cls,
- "loss_reg": loss_box_reg,
- "loss_centerness": loss_centerness
- }
- if self.cfg.MODEL.RPN_ONLY:
- return None, losses
- else:
- # boxes = self.box_selector_train(box_cls, box_regression, centerness, anchors)
- boxes = self.box_selector_train(box_regression, centerness, anchors, box_cls)
- train_boxes = []
- # for b, a in zip(boxes, anchors):
- # a = cat_boxlist(a)
- # b.add_field("visibility", torch.ones(b.bbox.shape[0], dtype=torch.bool, device=b.bbox.device))
- # del b.extra_fields['scores']
- # del b.extra_fields['labels']
- # train_boxes.append(cat_boxlist([b, a]))
- for b, t in zip(boxes, targets):
- tb = t.copy_with_fields(["labels"])
- tb.add_field("scores", torch.ones(tb.bbox.shape[0], dtype=torch.bool, device=tb.bbox.device))
- train_boxes.append(cat_boxlist([b, tb]))
- return train_boxes, losses
-
- def _forward_test(self, box_cls, box_regression, centerness, anchors):
- boxes = self.box_selector_test(box_regression, centerness, anchors, box_cls)
- return boxes, {}
diff --git a/spaces/Pippoz/All_in_one/pages/fill_mask.py b/spaces/Pippoz/All_in_one/pages/fill_mask.py
deleted file mode 100644
index 77df450fa6a0ac21d2685e1f49549f6b8f359bf8..0000000000000000000000000000000000000000
--- a/spaces/Pippoz/All_in_one/pages/fill_mask.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import streamlit as st
-import time
-import pandas as pd
-import altair as alt
-from multipage import MultiPage
-from transformers import pipeline
-
-def app():
- st.markdown('## Mask Fill task')
- st.write('Write a sentence with a [MASK] gap to fill')
- st.markdown('## ')
-
-
- @st.cache(allow_output_mutation=True, suppress_st_warning =True, show_spinner=False)
- def get_model(model):
- return pipeline('fill-mask', model=model)
-
- def create_graph(answer):
- x_bar = [i['token_str'] for i in answer]
- y_bar = [i['score'] for i in answer]
- chart_data = pd.DataFrame(y_bar, index=x_bar)
- data = pd.melt(chart_data.reset_index(), id_vars=["index"])
- # Horizontal stacked bar chart
- chart = (
- alt.Chart(data)
- .mark_bar(color='#d7abf5')
- .encode(
- x=alt.X("index", type="nominal", title='',sort=alt.EncodingSortField(field="index", op="count", order='ascending')),
- y=alt.Y("value", type="quantitative", title="Score", sort='-x'),
- )
- )
- st.altair_chart(chart, use_container_width=True)
-
-
- col1, col2 = st.columns([2,1])
-
-
- with col1:
- prompt= st.text_area('Your prompt here',
- '''Who is Elon [MASK]?''')
-
- with col2:
- select_model = st.radio(
- "Select the model to use:",
- ('Bert cased', 'Bert Un-cased'), index = 1)
-
- if select_model == 'Bert cased':
- model = 'bert-base-cased'
- elif select_model == 'Bert Un-cased':
- model = 'bert-base-uncased'
-
- with st.spinner('Loading Model... (This may take a while)'):
- unmasker = get_model(model)
- st.success('Model loaded correctly!')
-
- gen = st.info('Generating Mask...')
- answer = unmasker(prompt)
- gen.empty()
-
- with col1:
- create_graph(answer)
-
-
diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/adversarial/discriminators/mpd.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/adversarial/discriminators/mpd.py
deleted file mode 100644
index 8debd1fa72d77ca03df680facb60bdf79638cade..0000000000000000000000000000000000000000
--- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/adversarial/discriminators/mpd.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import typing as tp
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from ...modules import NormConv2d
-from .base import MultiDiscriminator, MultiDiscriminatorOutputType
-
-
-def get_padding(kernel_size: int, dilation: int = 1) -> int:
- return int((kernel_size * dilation - dilation) / 2)
-
-
-class PeriodDiscriminator(nn.Module):
- """Period sub-discriminator.
-
- Args:
- period (int): Period between samples of audio.
- in_channels (int): Number of input channels.
- out_channels (int): Number of output channels.
- n_layers (int): Number of convolutional layers.
- kernel_sizes (list of int): Kernel sizes for convolutions.
- stride (int): Stride for convolutions.
- filters (int): Initial number of filters in convolutions.
- filters_scale (int): Multiplier of number of filters as we increase depth.
- max_filters (int): Maximum number of filters.
- norm (str): Normalization method.
- activation (str): Activation function.
- activation_params (dict): Parameters to provide to the activation function.
- """
- def __init__(self, period: int, in_channels: int = 1, out_channels: int = 1,
- n_layers: int = 5, kernel_sizes: tp.List[int] = [5, 3], stride: int = 3,
- filters: int = 8, filters_scale: int = 4, max_filters: int = 1024,
- norm: str = 'weight_norm', activation: str = 'LeakyReLU',
- activation_params: dict = {'negative_slope': 0.2}):
- super().__init__()
- self.period = period
- self.n_layers = n_layers
- self.activation = getattr(torch.nn, activation)(**activation_params)
- self.convs = nn.ModuleList()
- in_chs = in_channels
- for i in range(self.n_layers):
- out_chs = min(filters * (filters_scale ** (i + 1)), max_filters)
- eff_stride = 1 if i == self.n_layers - 1 else stride
- self.convs.append(NormConv2d(in_chs, out_chs, kernel_size=(kernel_sizes[0], 1), stride=(eff_stride, 1),
- padding=((kernel_sizes[0] - 1) // 2, 0), norm=norm))
- in_chs = out_chs
- self.conv_post = NormConv2d(in_chs, out_channels, kernel_size=(kernel_sizes[1], 1), stride=1,
- padding=((kernel_sizes[1] - 1) // 2, 0), norm=norm)
-
- def forward(self, x: torch.Tensor):
- fmap = []
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), 'reflect')
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for conv in self.convs:
- x = conv(x)
- x = self.activation(x)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- # x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(MultiDiscriminator):
- """Multi-Period (MPD) Discriminator.
-
- Args:
- in_channels (int): Number of input channels.
- out_channels (int): Number of output channels.
- periods (Sequence[int]): Periods between samples of audio for the sub-discriminators.
- **kwargs: Additional args for `PeriodDiscriminator`
- """
- def __init__(self, in_channels: int = 1, out_channels: int = 1,
- periods: tp.Sequence[int] = [2, 3, 5, 7, 11], **kwargs):
- super().__init__()
- self.discriminators = nn.ModuleList([
- PeriodDiscriminator(p, in_channels, out_channels, **kwargs) for p in periods
- ])
-
- @property
- def num_discriminators(self):
- return len(self.discriminators)
-
- def forward(self, x: torch.Tensor) -> MultiDiscriminatorOutputType:
- logits = []
- fmaps = []
- for disc in self.discriminators:
- logit, fmap = disc(x)
- logits.append(logit)
- fmaps.append(fmap)
- return logits, fmaps
diff --git a/spaces/RMXK/RVC_HFF/Applio-RVC-Fork/utils/clonerepo_experimental.py b/spaces/RMXK/RVC_HFF/Applio-RVC-Fork/utils/clonerepo_experimental.py
deleted file mode 100644
index b0ae02648c1307562cf48033908edcf2996db5e2..0000000000000000000000000000000000000000
--- a/spaces/RMXK/RVC_HFF/Applio-RVC-Fork/utils/clonerepo_experimental.py
+++ /dev/null
@@ -1,253 +0,0 @@
-import os
-import subprocess
-import shutil
-from concurrent.futures import ThreadPoolExecutor, as_completed
-from tqdm.notebook import tqdm
-from pathlib import Path
-import requests
-
-def run_script():
- def run_cmd(cmd):
- process = subprocess.run(cmd, shell=True, check=True, text=True)
- return process.stdout
-
- # Change the current directory to /content/
- os.chdir('/content/')
- print("Changing dir to /content/")
-
- # Your function to edit the file
- def edit_file(file_path):
- temp_file_path = "/tmp/temp_file.py"
- changes_made = False
- with open(file_path, "r") as file, open(temp_file_path, "w") as temp_file:
- previous_line = ""
- second_previous_line = ""
- for line in file:
- new_line = line.replace("value=160", "value=128")
- if new_line != line:
- print("Replaced 'value=160' with 'value=128'")
- changes_made = True
- line = new_line
-
- new_line = line.replace("crepe hop length: 160", "crepe hop length: 128")
- if new_line != line:
- print("Replaced 'crepe hop length: 160' with 'crepe hop length: 128'")
- changes_made = True
- line = new_line
-
- new_line = line.replace("value=0.88", "value=0.75")
- if new_line != line:
- print("Replaced 'value=0.88' with 'value=0.75'")
- changes_made = True
- line = new_line
-
- if "label=i18n(\"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络\")" in previous_line and "value=1," in line:
- new_line = line.replace("value=1,", "value=0.25,")
- if new_line != line:
- print("Replaced 'value=1,' with 'value=0.25,' based on the condition")
- changes_made = True
- line = new_line
-
- if "label=i18n(\"总训练轮数total_epoch\")" in previous_line and "value=20," in line:
- new_line = line.replace("value=20,", "value=500,")
- if new_line != line:
- print("Replaced 'value=20,' with 'value=500,' based on the condition for DEFAULT EPOCH")
- changes_made = True
- line = new_line
-
- if 'choices=["pm", "harvest", "dio", "crepe", "crepe-tiny", "mangio-crepe", "mangio-crepe-tiny"], # Fork Feature. Add Crepe-Tiny' in previous_line:
- if 'value="pm",' in line:
- new_line = line.replace('value="pm",', 'value="mangio-crepe",')
- if new_line != line:
- print("Replaced 'value=\"pm\",' with 'value=\"mangio-crepe\",' based on the condition")
- changes_made = True
- line = new_line
-
- new_line = line.replace('label=i18n("输入训练文件夹路径"), value="E:\\\\语音音频+标注\\\\米津玄师\\\\src"', 'label=i18n("输入训练文件夹路径"), value="/content/dataset/"')
- if new_line != line:
- print("Replaced 'label=i18n(\"输入训练文件夹路径\"), value=\"E:\\\\语音音频+标注\\\\米津玄师\\\\src\"' with 'label=i18n(\"输入训练文件夹路径\"), value=\"/content/dataset/\"'")
- changes_made = True
- line = new_line
-
- if 'label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"),' in second_previous_line:
- if 'value=i18n("否"),' in line:
- new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),')
- if new_line != line:
- print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE ONLY LATEST")
- changes_made = True
- line = new_line
-
- if 'label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"),' in second_previous_line:
- if 'value=i18n("否"),' in line:
- new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),')
- if new_line != line:
- print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE SMALL WEIGHTS")
- changes_made = True
- line = new_line
-
- temp_file.write(line)
- second_previous_line = previous_line
- previous_line = line
-
- # After finished, we replace the original file with the temp one
- import shutil
- shutil.move(temp_file_path, file_path)
-
- if changes_made:
- print("Changes made and file saved successfully.")
- else:
- print("No changes were needed.")
-
- # Define the repo path
- repo_path = '/content/Applio-RVC-Fork'
-
- def copy_all_files_in_directory(src_dir, dest_dir):
- # Iterate over all files in source directory
- for item in Path(src_dir).glob('*'):
- if item.is_file():
- # Copy each file to destination directory
- shutil.copy(item, dest_dir)
- else:
- # If it's a directory, make a new directory in the destination and copy the files recursively
- new_dest = Path(dest_dir) / item.name
- new_dest.mkdir(exist_ok=True)
- copy_all_files_in_directory(str(item), str(new_dest))
-
- def clone_and_copy_repo(repo_path):
- # New repository link
- new_repo_link = "https://github.com/IAHispano/Applio-RVC-Fork/"
- # Temporary path to clone the repository
- temp_repo_path = "/content/temp_Applio-RVC-Fork"
- # New folder name
- new_folder_name = "Applio-RVC-Fork"
-
- # Clone the latest code from the new repository to a temporary location
- run_cmd(f"git clone {new_repo_link} {temp_repo_path}")
- os.chdir(temp_repo_path)
-
- run_cmd(f"git checkout 3fa4dad3d8961e5ca2522e9e12c0b4ddb71ad402")
- run_cmd(f"git checkout f9e606c279cb49420597519b0a83b92be81e42e4")
- run_cmd(f"git checkout 9e305588844c5442d58add1061b29beeca89d679")
- run_cmd(f"git checkout bf92dc1eb54b4f28d6396a4d1820a25896cc9af8")
- run_cmd(f"git checkout c3810e197d3cb98039973b2f723edf967ecd9e61")
- run_cmd(f"git checkout a33159efd134c2413b0afe26a76b7dc87926d2de")
- run_cmd(f"git checkout 24e251fb62c662e39ac5cf9253cc65deb9be94ec")
- run_cmd(f"git checkout ad5667d3017e93232dba85969cddac1322ba2902")
- run_cmd(f"git checkout ce9715392cf52dd5a0e18e00d1b5e408f08dbf27")
- run_cmd(f"git checkout 7c7da3f2ac68f3bd8f3ad5ca5c700f18ab9f90eb")
- run_cmd(f"git checkout 4ac395eab101955e8960b50d772c26f592161764")
- run_cmd(f"git checkout b15b358702294c7375761584e5276c811ffab5e8")
- run_cmd(f"git checkout 1501793dc490982db9aca84a50647764caa66e51")
- run_cmd(f"git checkout 21f7faf57219c75e6ba837062350391a803e9ae2")
- run_cmd(f"git checkout b5eb689fbc409b49f065a431817f822f554cebe7")
- run_cmd(f"git checkout 7e02fae1ebf24cb151bf6cbe787d06734aa65862")
- run_cmd(f"git checkout 6aea5ea18ed0b9a1e03fa5d268d6bc3c616672a9")
- run_cmd(f"git checkout f0f9b25717e59116473fb42bd7f9252cfc32b398")
- run_cmd(f"git checkout b394de424088a81fc081224bc27338a8651ad3b2")
- run_cmd(f"git checkout f1999406a88b80c965d2082340f5ea2bfa9ab67a")
- run_cmd(f"git checkout d98a0fa8dc715308dfc73eac5c553b69c6ee072b")
- run_cmd(f"git checkout d73267a415fb0eba98477afa43ef71ffd82a7157")
- run_cmd(f"git checkout 1a03d01356ae79179e1fb8d8915dc9cc79925742")
- run_cmd(f"git checkout 81497bb3115e92c754300c9b3992df428886a3e9")
- run_cmd(f"git checkout c5af1f8edcf79cb70f065c0110e279e78e48caf9")
- run_cmd(f"git checkout cdb3c90109387fa4dfa92f53c3864c71170ffc77")
-
- # Edit the file here, before copying
- #edit_file(f"{temp_repo_path}/infer-web.py")
-
- # Copy all files from the cloned repository to the existing path
- copy_all_files_in_directory(temp_repo_path, repo_path)
- print(f"Copying all {new_folder_name} files from GitHub.")
-
- # Change working directory back to /content/
- os.chdir('/content/')
- print("Changed path back to /content/")
-
- # Remove the temporary cloned repository
- shutil.rmtree(temp_repo_path)
-
- # Call the function
- clone_and_copy_repo(repo_path)
-
- # Download the credentials file for RVC archive sheet
- os.makedirs('/content/Applio-RVC-Fork/stats/', exist_ok=True)
- run_cmd("wget -q https://cdn.discordapp.com/attachments/945486970883285045/1114717554481569802/peppy-generator-388800-07722f17a188.json -O /content/Applio-RVC-Fork/stats/peppy-generator-388800-07722f17a188.json")
-
- # Forcefully delete any existing torchcrepe dependencies downloaded from an earlier run just in case
- shutil.rmtree('/content/Applio-RVC-Fork/torchcrepe', ignore_errors=True)
- shutil.rmtree('/content/torchcrepe', ignore_errors=True)
-
- # Download the torchcrepe folder from the maxrmorrison/torchcrepe repository
- run_cmd("git clone https://github.com/maxrmorrison/torchcrepe.git")
- shutil.move('/content/torchcrepe/torchcrepe', '/content/Applio-RVC-Fork/')
- shutil.rmtree('/content/torchcrepe', ignore_errors=True) # Delete the torchcrepe repository folder
-
- # Change the current directory to /content/Applio-RVC-Fork
- os.chdir('/content/Applio-RVC-Fork')
- os.makedirs('pretrained', exist_ok=True)
- os.makedirs('uvr5_weights', exist_ok=True)
-
-def download_file(url, filepath):
- response = requests.get(url, stream=True)
- response.raise_for_status()
-
- with open(filepath, "wb") as file:
- for chunk in response.iter_content(chunk_size=8192):
- if chunk:
- file.write(chunk)
-
-def download_pretrained_models():
- pretrained_models = {
- "pretrained": [
- "D40k.pth",
- "G40k.pth",
- "f0D40k.pth",
- "f0G40k.pth"
- ],
- "pretrained_v2": [
- "D40k.pth",
- "G40k.pth",
- "f0D40k.pth",
- "f0G40k.pth",
- "f0G48k.pth",
- "f0D48k.pth"
- ],
- "uvr5_weights": [
- "HP2-人声vocals+非人声instrumentals.pth",
- "HP5-主旋律人声vocals+其他instrumentals.pth",
- "VR-DeEchoNormal.pth",
- "VR-DeEchoDeReverb.pth",
- "VR-DeEchoAggressive.pth",
- "HP5_only_main_vocal.pth",
- "HP3_all_vocals.pth",
- "HP2_all_vocals.pth"
- ]
- }
- part2 = "I"
- base_url = "https://huggingface.co/lj1995/VoiceConversionWebU" + part2 + "/resolve/main/"
- base_path = "/content/Applio-RVC-Fork/"
- base_pathm = base_path
-
- # Calculate total number of files to download
- total_files = sum(len(files) for files in pretrained_models.values()) + 1 # +1 for hubert_base.pt
-
- with tqdm(total=total_files, desc="Downloading files") as pbar:
- for folder, models in pretrained_models.items():
- folder_path = os.path.join(base_path, folder)
- os.makedirs(folder_path, exist_ok=True)
- for model in models:
- url = base_url + folder + "/" + model
- filepath = os.path.join(folder_path, model)
- download_file(url, filepath)
- pbar.update()
-
- # Download hubert_base.pt to the base path
- hubert_url = base_url + "hubert_base.pt"
- hubert_filepath = os.path.join(base_pathm, "hubert_base.pt")
- download_file(hubert_url, hubert_filepath)
- pbar.update()
-def clone_repository(run_download):
- with ThreadPoolExecutor(max_workers=2) as executor:
- executor.submit(run_script)
- if run_download:
- executor.submit(download_pretrained_models)
diff --git a/spaces/RMXK/RVC_HFF/utils/backups_test.py b/spaces/RMXK/RVC_HFF/utils/backups_test.py
deleted file mode 100644
index f3edf15811b5035ee82f21e54e87b7e87ce413eb..0000000000000000000000000000000000000000
--- a/spaces/RMXK/RVC_HFF/utils/backups_test.py
+++ /dev/null
@@ -1,138 +0,0 @@
-
-import os
-import shutil
-import hashlib
-import time
-
-LOGS_FOLDER = '/content/Applio-RVC-Fork/logs'
-WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights'
-GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup'
-
-def import_google_drive_backup():
- print("Importing Google Drive backup...")
- GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup' # change this to your Google Drive path
- LOGS_FOLDER = '/content/Applio-RVC-Fork/logs'
- WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights'
- weights_exist = False
- files_to_copy = []
- weights_to_copy = []
-
- def handle_files(root, files, is_weight_files=False):
- for filename in files:
- filepath = os.path.join(root, filename)
- if filename.endswith('.pth') and is_weight_files:
- weights_exist = True
- backup_filepath = os.path.join(WEIGHTS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH))
- else:
- backup_filepath = os.path.join(LOGS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH))
- backup_folderpath = os.path.dirname(backup_filepath)
- if not os.path.exists(backup_folderpath):
- os.makedirs(backup_folderpath)
- print(f'Created folder: {backup_folderpath}', flush=True)
- if is_weight_files:
- weights_to_copy.append((filepath, backup_filepath))
- else:
- files_to_copy.append((filepath, backup_filepath))
-
- for root, dirs, files in os.walk(os.path.join(GOOGLE_DRIVE_PATH, 'logs')):
- handle_files(root, files)
-
- for root, dirs, files in os.walk(os.path.join(GOOGLE_DRIVE_PATH, 'weights')):
- handle_files(root, files, True)
-
- # Copy files in batches
- total_files = len(files_to_copy)
- start_time = time.time()
- for i, (source, dest) in enumerate(files_to_copy, start=1):
- with open(source, 'rb') as src, open(dest, 'wb') as dst:
- shutil.copyfileobj(src, dst, 1024*1024) # 1MB buffer size
- # Report progress every 5 seconds or after every 100 files, whichever is less frequent
- if time.time() - start_time > 5 or i % 100 == 0:
- print(f'\rCopying file {i} of {total_files} ({i * 100 / total_files:.2f}%)', end="")
- start_time = time.time()
- print(f'\nImported {len(files_to_copy)} files from Google Drive backup')
-
- # Copy weights in batches
- total_weights = len(weights_to_copy)
- start_time = time.time()
- for i, (source, dest) in enumerate(weights_to_copy, start=1):
- with open(source, 'rb') as src, open(dest, 'wb') as dst:
- shutil.copyfileobj(src, dst, 1024*1024) # 1MB buffer size
- # Report progress every 5 seconds or after every 100 files, whichever is less frequent
- if time.time() - start_time > 5 or i % 100 == 0:
- print(f'\rCopying weight file {i} of {total_weights} ({i * 100 / total_weights:.2f}%)', end="")
- start_time = time.time()
- if weights_exist:
- print(f'\nImported {len(weights_to_copy)} weight files')
- print("Copied weights from Google Drive backup to local weights folder.")
- else:
- print("\nNo weights found in Google Drive backup.")
- print("Google Drive backup import completed.")
-
-def backup_files():
- print("\n Starting backup loop...")
- last_backup_timestamps_path = os.path.join(LOGS_FOLDER, 'last_backup_timestamps.txt')
- fully_updated = False # boolean to track if all files are up to date
- try:
- with open(last_backup_timestamps_path, 'r') as f:
- last_backup_timestamps = dict(line.strip().split(':') for line in f)
- except:
- last_backup_timestamps = {}
-
- while True:
- updated = False
- files_to_copy = []
- files_to_delete = []
-
- for root, dirs, files in os.walk(LOGS_FOLDER):
- for filename in files:
- if filename != 'last_backup_timestamps.txt':
- filepath = os.path.join(root, filename)
- if os.path.isfile(filepath):
- backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER))
- backup_folderpath = os.path.dirname(backup_filepath)
-
- if not os.path.exists(backup_folderpath):
- os.makedirs(backup_folderpath)
- print(f'Created backup folder: {backup_folderpath}', flush=True)
-
- # check if file has changed since last backup
- last_backup_timestamp = last_backup_timestamps.get(filepath)
- current_timestamp = os.path.getmtime(filepath)
- if last_backup_timestamp is None or float(last_backup_timestamp) < current_timestamp:
- files_to_copy.append((filepath, backup_filepath)) # add to list of files to copy
- last_backup_timestamps[filepath] = str(current_timestamp) # update last backup timestamp
- updated = True
- fully_updated = False # if a file is updated, all files are not up to date
-
- # check if any files were deleted in Colab and delete them from the backup drive
- for filepath in list(last_backup_timestamps.keys()):
- if not os.path.exists(filepath):
- backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER))
- if os.path.exists(backup_filepath):
- files_to_delete.append(backup_filepath) # add to list of files to delete
- del last_backup_timestamps[filepath]
- updated = True
- fully_updated = False # if a file is deleted, all files are not up to date
-
- # Copy files in batches
- if files_to_copy:
- for source, dest in files_to_copy:
- shutil.copy2(source, dest)
- print(f'Copied or updated {len(files_to_copy)} files')
-
- # Delete files in batches
- if files_to_delete:
- for file in files_to_delete:
- os.remove(file)
- print(f'Deleted {len(files_to_delete)} files')
-
- if not updated and not fully_updated:
- print("Files are up to date.")
- fully_updated = True # if all files are up to date, set the boolean to True
- copy_weights_folder_to_drive()
-
- with open(last_backup_timestamps_path, 'w') as f:
- for filepath, timestamp in last_backup_timestamps.items():
- f.write(f'{filepath}:{timestamp}\n')
- time.sleep(15) # wait for 15 seconds before checking again
diff --git a/spaces/Raaniel/Keyword_demo/app.py b/spaces/Raaniel/Keyword_demo/app.py
deleted file mode 100644
index 758bdb1ccf8d55807660d6d82900439f288ea24e..0000000000000000000000000000000000000000
--- a/spaces/Raaniel/Keyword_demo/app.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import gradio as gr
-import requests
-import os
-from io import BytesIO
-
-
-def CustomKeywording(image_bytes, client_id = os.getenv("CLIENT_ID"), client_secret = os.getenv("CLIENT_SECRET")):
-
- data = {'data': image_bytes}
- keywords = requests.post('https://api.everypixel.com/v1/keywords', files=data, params={'num_keywords' : 10},
- auth=(client_id, client_secret)).json()
-
- if keywords['status'] != 'ok':
- raise Exception("Error: API returned status '{}'".format(keywords['status']))
-
- keywords_list = [keyword_dict['keyword'] for keyword_dict in keywords['keywords']]
-
- return ", ".join(keywords_list)
-
-def get_photo(image):
- if image is None:
- return "Please upload an image first, dingus!"
-
- with BytesIO() as output:
- image.save(output, format="PNG")
- image_bytes = output.getvalue()
-
- return CustomKeywording(image_bytes)
-
-desc = """The image keywords generator analyzes visual features of an image to extract meaningful information,
-then generates accurate and descriptive keywords to categorize and organize image collections,
-saving time and effort in manually tagging images and aiding in efficient management and use of visual assets.
-Built on EveryPixel API. Get your free API key by registering on their website! https://labs.everypixel.com/api"""
-
-
-title = """
🔥 Find your keywords! 🔥"""
-
-# demo = gradio.Interface(fn=get_photo, inputs = inputs,
-# outputs = "text", title = "Find your keywords!", description = desc,
-# theme=gradio.themes.Soft(primary_hue=gradio.themes.colors.neutral, secondary_hue=gradio.themes.colors.rose))
-
-with gr.Blocks(theme=gr.themes.Soft(primary_hue=gr.themes.colors.neutral, secondary_hue=gr.themes.colors.rose)) as demo:
- gr.HTML(title)
- gr.HTML('''
Duplicate the Space and run securely with your EveryPixel API Key
''')
- gr.Markdown(desc)
- with gr.Row():
- with gr.Column():
- image = gr.Image(type = "pil", label = "Your image: ")
- btn = gr.Button("Generate keywords!")
- with gr.Column():
- output = gr.Textbox(label = "Keywords: ")
- btn.click(fn=get_photo, inputs=image, outputs=output)
- with gr.Accordion(label="Examples for Images:", open=True):
- examples = gr.Examples(examples=['batman.jpg', 'cat1.png', 'godzilla.jpg',
- 'hampter.jpg', 'room.png', 'science.jpg',
- 'woman1.png', 'woman2.jpg'], inputs=image)
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/requests/sessions.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/requests/sessions.py
deleted file mode 100644
index 6cb3b4dae397930fba60e4c08b25b9444783b6f7..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/requests/sessions.py
+++ /dev/null
@@ -1,831 +0,0 @@
-"""
-requests.sessions
-~~~~~~~~~~~~~~~~~
-
-This module provides a Session object to manage and persist settings across
-requests (cookies, auth, proxies).
-"""
-import os
-import sys
-import time
-from collections import OrderedDict
-from datetime import timedelta
-
-from ._internal_utils import to_native_string
-from .adapters import HTTPAdapter
-from .auth import _basic_auth_str
-from .compat import Mapping, cookielib, urljoin, urlparse
-from .cookies import (
- RequestsCookieJar,
- cookiejar_from_dict,
- extract_cookies_to_jar,
- merge_cookies,
-)
-from .exceptions import (
- ChunkedEncodingError,
- ContentDecodingError,
- InvalidSchema,
- TooManyRedirects,
-)
-from .hooks import default_hooks, dispatch_hook
-
-# formerly defined here, reexposed here for backward compatibility
-from .models import ( # noqa: F401
- DEFAULT_REDIRECT_LIMIT,
- REDIRECT_STATI,
- PreparedRequest,
- Request,
-)
-from .status_codes import codes
-from .structures import CaseInsensitiveDict
-from .utils import ( # noqa: F401
- DEFAULT_PORTS,
- default_headers,
- get_auth_from_url,
- get_environ_proxies,
- get_netrc_auth,
- requote_uri,
- resolve_proxies,
- rewind_body,
- should_bypass_proxies,
- to_key_val_list,
-)
-
-# Preferred clock, based on which one is more accurate on a given system.
-if sys.platform == "win32":
- preferred_clock = time.perf_counter
-else:
- preferred_clock = time.time
-
-
-def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
- """Determines appropriate setting for a given request, taking into account
- the explicit setting on that request, and the setting in the session. If a
- setting is a dictionary, they will be merged together using `dict_class`
- """
-
- if session_setting is None:
- return request_setting
-
- if request_setting is None:
- return session_setting
-
- # Bypass if not a dictionary (e.g. verify)
- if not (
- isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping)
- ):
- return request_setting
-
- merged_setting = dict_class(to_key_val_list(session_setting))
- merged_setting.update(to_key_val_list(request_setting))
-
- # Remove keys that are set to None. Extract keys first to avoid altering
- # the dictionary during iteration.
- none_keys = [k for (k, v) in merged_setting.items() if v is None]
- for key in none_keys:
- del merged_setting[key]
-
- return merged_setting
-
-
-def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
- """Properly merges both requests and session hooks.
-
- This is necessary because when request_hooks == {'response': []}, the
- merge breaks Session hooks entirely.
- """
- if session_hooks is None or session_hooks.get("response") == []:
- return request_hooks
-
- if request_hooks is None or request_hooks.get("response") == []:
- return session_hooks
-
- return merge_setting(request_hooks, session_hooks, dict_class)
-
-
-class SessionRedirectMixin:
- def get_redirect_target(self, resp):
- """Receives a Response. Returns a redirect URI or ``None``"""
- # Due to the nature of how requests processes redirects this method will
- # be called at least once upon the original response and at least twice
- # on each subsequent redirect response (if any).
- # If a custom mixin is used to handle this logic, it may be advantageous
- # to cache the redirect location onto the response object as a private
- # attribute.
- if resp.is_redirect:
- location = resp.headers["location"]
- # Currently the underlying http module on py3 decode headers
- # in latin1, but empirical evidence suggests that latin1 is very
- # rarely used with non-ASCII characters in HTTP headers.
- # It is more likely to get UTF8 header rather than latin1.
- # This causes incorrect handling of UTF8 encoded location headers.
- # To solve this, we re-encode the location in latin1.
- location = location.encode("latin1")
- return to_native_string(location, "utf8")
- return None
-
- def should_strip_auth(self, old_url, new_url):
- """Decide whether Authorization header should be removed when redirecting"""
- old_parsed = urlparse(old_url)
- new_parsed = urlparse(new_url)
- if old_parsed.hostname != new_parsed.hostname:
- return True
- # Special case: allow http -> https redirect when using the standard
- # ports. This isn't specified by RFC 7235, but is kept to avoid
- # breaking backwards compatibility with older versions of requests
- # that allowed any redirects on the same host.
- if (
- old_parsed.scheme == "http"
- and old_parsed.port in (80, None)
- and new_parsed.scheme == "https"
- and new_parsed.port in (443, None)
- ):
- return False
-
- # Handle default port usage corresponding to scheme.
- changed_port = old_parsed.port != new_parsed.port
- changed_scheme = old_parsed.scheme != new_parsed.scheme
- default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None)
- if (
- not changed_scheme
- and old_parsed.port in default_port
- and new_parsed.port in default_port
- ):
- return False
-
- # Standard case: root URI must match
- return changed_port or changed_scheme
-
- def resolve_redirects(
- self,
- resp,
- req,
- stream=False,
- timeout=None,
- verify=True,
- cert=None,
- proxies=None,
- yield_requests=False,
- **adapter_kwargs,
- ):
- """Receives a Response. Returns a generator of Responses or Requests."""
-
- hist = [] # keep track of history
-
- url = self.get_redirect_target(resp)
- previous_fragment = urlparse(req.url).fragment
- while url:
- prepared_request = req.copy()
-
- # Update history and keep track of redirects.
- # resp.history must ignore the original request in this loop
- hist.append(resp)
- resp.history = hist[1:]
-
- try:
- resp.content # Consume socket so it can be released
- except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
- resp.raw.read(decode_content=False)
-
- if len(resp.history) >= self.max_redirects:
- raise TooManyRedirects(
- f"Exceeded {self.max_redirects} redirects.", response=resp
- )
-
- # Release the connection back into the pool.
- resp.close()
-
- # Handle redirection without scheme (see: RFC 1808 Section 4)
- if url.startswith("//"):
- parsed_rurl = urlparse(resp.url)
- url = ":".join([to_native_string(parsed_rurl.scheme), url])
-
- # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2)
- parsed = urlparse(url)
- if parsed.fragment == "" and previous_fragment:
- parsed = parsed._replace(fragment=previous_fragment)
- elif parsed.fragment:
- previous_fragment = parsed.fragment
- url = parsed.geturl()
-
- # Facilitate relative 'location' headers, as allowed by RFC 7231.
- # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
- # Compliant with RFC3986, we percent encode the url.
- if not parsed.netloc:
- url = urljoin(resp.url, requote_uri(url))
- else:
- url = requote_uri(url)
-
- prepared_request.url = to_native_string(url)
-
- self.rebuild_method(prepared_request, resp)
-
- # https://github.com/psf/requests/issues/1084
- if resp.status_code not in (
- codes.temporary_redirect,
- codes.permanent_redirect,
- ):
- # https://github.com/psf/requests/issues/3490
- purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding")
- for header in purged_headers:
- prepared_request.headers.pop(header, None)
- prepared_request.body = None
-
- headers = prepared_request.headers
- headers.pop("Cookie", None)
-
- # Extract any cookies sent on the response to the cookiejar
- # in the new request. Because we've mutated our copied prepared
- # request, use the old one that we haven't yet touched.
- extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
- merge_cookies(prepared_request._cookies, self.cookies)
- prepared_request.prepare_cookies(prepared_request._cookies)
-
- # Rebuild auth and proxy information.
- proxies = self.rebuild_proxies(prepared_request, proxies)
- self.rebuild_auth(prepared_request, resp)
-
- # A failed tell() sets `_body_position` to `object()`. This non-None
- # value ensures `rewindable` will be True, allowing us to raise an
- # UnrewindableBodyError, instead of hanging the connection.
- rewindable = prepared_request._body_position is not None and (
- "Content-Length" in headers or "Transfer-Encoding" in headers
- )
-
- # Attempt to rewind consumed file-like object.
- if rewindable:
- rewind_body(prepared_request)
-
- # Override the original request.
- req = prepared_request
-
- if yield_requests:
- yield req
- else:
-
- resp = self.send(
- req,
- stream=stream,
- timeout=timeout,
- verify=verify,
- cert=cert,
- proxies=proxies,
- allow_redirects=False,
- **adapter_kwargs,
- )
-
- extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
-
- # extract redirect url, if any, for the next loop
- url = self.get_redirect_target(resp)
- yield resp
-
- def rebuild_auth(self, prepared_request, response):
- """When being redirected we may want to strip authentication from the
- request to avoid leaking credentials. This method intelligently removes
- and reapplies authentication where possible to avoid credential loss.
- """
- headers = prepared_request.headers
- url = prepared_request.url
-
- if "Authorization" in headers and self.should_strip_auth(
- response.request.url, url
- ):
- # If we get redirected to a new host, we should strip out any
- # authentication headers.
- del headers["Authorization"]
-
- # .netrc might have more auth for us on our new host.
- new_auth = get_netrc_auth(url) if self.trust_env else None
- if new_auth is not None:
- prepared_request.prepare_auth(new_auth)
-
- def rebuild_proxies(self, prepared_request, proxies):
- """This method re-evaluates the proxy configuration by considering the
- environment variables. If we are redirected to a URL covered by
- NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
- proxy keys for this URL (in case they were stripped by a previous
- redirect).
-
- This method also replaces the Proxy-Authorization header where
- necessary.
-
- :rtype: dict
- """
- headers = prepared_request.headers
- scheme = urlparse(prepared_request.url).scheme
- new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env)
-
- if "Proxy-Authorization" in headers:
- del headers["Proxy-Authorization"]
-
- try:
- username, password = get_auth_from_url(new_proxies[scheme])
- except KeyError:
- username, password = None, None
-
- if username and password:
- headers["Proxy-Authorization"] = _basic_auth_str(username, password)
-
- return new_proxies
-
- def rebuild_method(self, prepared_request, response):
- """When being redirected we may want to change the method of the request
- based on certain specs or browser behavior.
- """
- method = prepared_request.method
-
- # https://tools.ietf.org/html/rfc7231#section-6.4.4
- if response.status_code == codes.see_other and method != "HEAD":
- method = "GET"
-
- # Do what the browsers do, despite standards...
- # First, turn 302s into GETs.
- if response.status_code == codes.found and method != "HEAD":
- method = "GET"
-
- # Second, if a POST is responded to with a 301, turn it into a GET.
- # This bizarre behaviour is explained in Issue 1704.
- if response.status_code == codes.moved and method == "POST":
- method = "GET"
-
- prepared_request.method = method
-
-
-class Session(SessionRedirectMixin):
- """A Requests session.
-
- Provides cookie persistence, connection-pooling, and configuration.
-
- Basic Usage::
-
- >>> import requests
- >>> s = requests.Session()
- >>> s.get('https://httpbin.org/get')
-
-
- Or as a context manager::
-
- >>> with requests.Session() as s:
- ... s.get('https://httpbin.org/get')
-
- """
-
- __attrs__ = [
- "headers",
- "cookies",
- "auth",
- "proxies",
- "hooks",
- "params",
- "verify",
- "cert",
- "adapters",
- "stream",
- "trust_env",
- "max_redirects",
- ]
-
- def __init__(self):
-
- #: A case-insensitive dictionary of headers to be sent on each
- #: :class:`Request ` sent from this
- #: :class:`Session `.
- self.headers = default_headers()
-
- #: Default Authentication tuple or object to attach to
- #: :class:`Request `.
- self.auth = None
-
- #: Dictionary mapping protocol or protocol and host to the URL of the proxy
- #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
- #: be used on each :class:`Request `.
- self.proxies = {}
-
- #: Event-handling hooks.
- self.hooks = default_hooks()
-
- #: Dictionary of querystring data to attach to each
- #: :class:`Request `. The dictionary values may be lists for
- #: representing multivalued query parameters.
- self.params = {}
-
- #: Stream response content default.
- self.stream = False
-
- #: SSL Verification default.
- #: Defaults to `True`, requiring requests to verify the TLS certificate at the
- #: remote end.
- #: If verify is set to `False`, requests will accept any TLS certificate
- #: presented by the server, and will ignore hostname mismatches and/or
- #: expired certificates, which will make your application vulnerable to
- #: man-in-the-middle (MitM) attacks.
- #: Only set this to `False` for testing.
- self.verify = True
-
- #: SSL client certificate default, if String, path to ssl client
- #: cert file (.pem). If Tuple, ('cert', 'key') pair.
- self.cert = None
-
- #: Maximum number of redirects allowed. If the request exceeds this
- #: limit, a :class:`TooManyRedirects` exception is raised.
- #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
- #: 30.
- self.max_redirects = DEFAULT_REDIRECT_LIMIT
-
- #: Trust environment settings for proxy configuration, default
- #: authentication and similar.
- self.trust_env = True
-
- #: A CookieJar containing all currently outstanding cookies set on this
- #: session. By default it is a
- #: :class:`RequestsCookieJar `, but
- #: may be any other ``cookielib.CookieJar`` compatible object.
- self.cookies = cookiejar_from_dict({})
-
- # Default connection adapters.
- self.adapters = OrderedDict()
- self.mount("https://", HTTPAdapter())
- self.mount("http://", HTTPAdapter())
-
- def __enter__(self):
- return self
-
- def __exit__(self, *args):
- self.close()
-
- def prepare_request(self, request):
- """Constructs a :class:`PreparedRequest ` for
- transmission and returns it. The :class:`PreparedRequest` has settings
- merged from the :class:`Request ` instance and those of the
- :class:`Session`.
-
- :param request: :class:`Request` instance to prepare with this
- session's settings.
- :rtype: requests.PreparedRequest
- """
- cookies = request.cookies or {}
-
- # Bootstrap CookieJar.
- if not isinstance(cookies, cookielib.CookieJar):
- cookies = cookiejar_from_dict(cookies)
-
- # Merge with session cookies
- merged_cookies = merge_cookies(
- merge_cookies(RequestsCookieJar(), self.cookies), cookies
- )
-
- # Set environment's basic authentication if not explicitly set.
- auth = request.auth
- if self.trust_env and not auth and not self.auth:
- auth = get_netrc_auth(request.url)
-
- p = PreparedRequest()
- p.prepare(
- method=request.method.upper(),
- url=request.url,
- files=request.files,
- data=request.data,
- json=request.json,
- headers=merge_setting(
- request.headers, self.headers, dict_class=CaseInsensitiveDict
- ),
- params=merge_setting(request.params, self.params),
- auth=merge_setting(auth, self.auth),
- cookies=merged_cookies,
- hooks=merge_hooks(request.hooks, self.hooks),
- )
- return p
-
- def request(
- self,
- method,
- url,
- params=None,
- data=None,
- headers=None,
- cookies=None,
- files=None,
- auth=None,
- timeout=None,
- allow_redirects=True,
- proxies=None,
- hooks=None,
- stream=None,
- verify=None,
- cert=None,
- json=None,
- ):
- """Constructs a :class:`Request `, prepares it and sends it.
- Returns :class:`Response ` object.
-
- :param method: method for the new :class:`Request` object.
- :param url: URL for the new :class:`Request` object.
- :param params: (optional) Dictionary or bytes to be sent in the query
- string for the :class:`Request`.
- :param data: (optional) Dictionary, list of tuples, bytes, or file-like
- object to send in the body of the :class:`Request`.
- :param json: (optional) json to send in the body of the
- :class:`Request`.
- :param headers: (optional) Dictionary of HTTP Headers to send with the
- :class:`Request`.
- :param cookies: (optional) Dict or CookieJar object to send with the
- :class:`Request`.
- :param files: (optional) Dictionary of ``'filename': file-like-objects``
- for multipart encoding upload.
- :param auth: (optional) Auth tuple or callable to enable
- Basic/Digest/Custom HTTP Auth.
- :param timeout: (optional) How long to wait for the server to send
- data before giving up, as a float, or a :ref:`(connect timeout,
- read timeout) ` tuple.
- :type timeout: float or tuple
- :param allow_redirects: (optional) Set to True by default.
- :type allow_redirects: bool
- :param proxies: (optional) Dictionary mapping protocol or protocol and
- hostname to the URL of the proxy.
- :param stream: (optional) whether to immediately download the response
- content. Defaults to ``False``.
- :param verify: (optional) Either a boolean, in which case it controls whether we verify
- the server's TLS certificate, or a string, in which case it must be a path
- to a CA bundle to use. Defaults to ``True``. When set to
- ``False``, requests will accept any TLS certificate presented by
- the server, and will ignore hostname mismatches and/or expired
- certificates, which will make your application vulnerable to
- man-in-the-middle (MitM) attacks. Setting verify to ``False``
- may be useful during local development or testing.
- :param cert: (optional) if String, path to ssl client cert file (.pem).
- If Tuple, ('cert', 'key') pair.
- :rtype: requests.Response
- """
- # Create the Request.
- req = Request(
- method=method.upper(),
- url=url,
- headers=headers,
- files=files,
- data=data or {},
- json=json,
- params=params or {},
- auth=auth,
- cookies=cookies,
- hooks=hooks,
- )
- prep = self.prepare_request(req)
-
- proxies = proxies or {}
-
- settings = self.merge_environment_settings(
- prep.url, proxies, stream, verify, cert
- )
-
- # Send the request.
- send_kwargs = {
- "timeout": timeout,
- "allow_redirects": allow_redirects,
- }
- send_kwargs.update(settings)
- resp = self.send(prep, **send_kwargs)
-
- return resp
-
- def get(self, url, **kwargs):
- r"""Sends a GET request. Returns :class:`Response` object.
-
- :param url: URL for the new :class:`Request` object.
- :param \*\*kwargs: Optional arguments that ``request`` takes.
- :rtype: requests.Response
- """
-
- kwargs.setdefault("allow_redirects", True)
- return self.request("GET", url, **kwargs)
-
- def options(self, url, **kwargs):
- r"""Sends a OPTIONS request. Returns :class:`Response` object.
-
- :param url: URL for the new :class:`Request` object.
- :param \*\*kwargs: Optional arguments that ``request`` takes.
- :rtype: requests.Response
- """
-
- kwargs.setdefault("allow_redirects", True)
- return self.request("OPTIONS", url, **kwargs)
-
- def head(self, url, **kwargs):
- r"""Sends a HEAD request. Returns :class:`Response` object.
-
- :param url: URL for the new :class:`Request` object.
- :param \*\*kwargs: Optional arguments that ``request`` takes.
- :rtype: requests.Response
- """
-
- kwargs.setdefault("allow_redirects", False)
- return self.request("HEAD", url, **kwargs)
-
- def post(self, url, data=None, json=None, **kwargs):
- r"""Sends a POST request. Returns :class:`Response` object.
-
- :param url: URL for the new :class:`Request` object.
- :param data: (optional) Dictionary, list of tuples, bytes, or file-like
- object to send in the body of the :class:`Request`.
- :param json: (optional) json to send in the body of the :class:`Request`.
- :param \*\*kwargs: Optional arguments that ``request`` takes.
- :rtype: requests.Response
- """
-
- return self.request("POST", url, data=data, json=json, **kwargs)
-
- def put(self, url, data=None, **kwargs):
- r"""Sends a PUT request. Returns :class:`Response` object.
-
- :param url: URL for the new :class:`Request` object.
- :param data: (optional) Dictionary, list of tuples, bytes, or file-like
- object to send in the body of the :class:`Request`.
- :param \*\*kwargs: Optional arguments that ``request`` takes.
- :rtype: requests.Response
- """
-
- return self.request("PUT", url, data=data, **kwargs)
-
- def patch(self, url, data=None, **kwargs):
- r"""Sends a PATCH request. Returns :class:`Response` object.
-
- :param url: URL for the new :class:`Request` object.
- :param data: (optional) Dictionary, list of tuples, bytes, or file-like
- object to send in the body of the :class:`Request`.
- :param \*\*kwargs: Optional arguments that ``request`` takes.
- :rtype: requests.Response
- """
-
- return self.request("PATCH", url, data=data, **kwargs)
-
- def delete(self, url, **kwargs):
- r"""Sends a DELETE request. Returns :class:`Response` object.
-
- :param url: URL for the new :class:`Request` object.
- :param \*\*kwargs: Optional arguments that ``request`` takes.
- :rtype: requests.Response
- """
-
- return self.request("DELETE", url, **kwargs)
-
- def send(self, request, **kwargs):
- """Send a given PreparedRequest.
-
- :rtype: requests.Response
- """
- # Set defaults that the hooks can utilize to ensure they always have
- # the correct parameters to reproduce the previous request.
- kwargs.setdefault("stream", self.stream)
- kwargs.setdefault("verify", self.verify)
- kwargs.setdefault("cert", self.cert)
- if "proxies" not in kwargs:
- kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env)
-
- # It's possible that users might accidentally send a Request object.
- # Guard against that specific failure case.
- if isinstance(request, Request):
- raise ValueError("You can only send PreparedRequests.")
-
- # Set up variables needed for resolve_redirects and dispatching of hooks
- allow_redirects = kwargs.pop("allow_redirects", True)
- stream = kwargs.get("stream")
- hooks = request.hooks
-
- # Get the appropriate adapter to use
- adapter = self.get_adapter(url=request.url)
-
- # Start time (approximately) of the request
- start = preferred_clock()
-
- # Send the request
- r = adapter.send(request, **kwargs)
-
- # Total elapsed time of the request (approximately)
- elapsed = preferred_clock() - start
- r.elapsed = timedelta(seconds=elapsed)
-
- # Response manipulation hooks
- r = dispatch_hook("response", hooks, r, **kwargs)
-
- # Persist cookies
- if r.history:
-
- # If the hooks create history then we want those cookies too
- for resp in r.history:
- extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
-
- extract_cookies_to_jar(self.cookies, request, r.raw)
-
- # Resolve redirects if allowed.
- if allow_redirects:
- # Redirect resolving generator.
- gen = self.resolve_redirects(r, request, **kwargs)
- history = [resp for resp in gen]
- else:
- history = []
-
- # Shuffle things around if there's history.
- if history:
- # Insert the first (original) request at the start
- history.insert(0, r)
- # Get the last request made
- r = history.pop()
- r.history = history
-
- # If redirects aren't being followed, store the response on the Request for Response.next().
- if not allow_redirects:
- try:
- r._next = next(
- self.resolve_redirects(r, request, yield_requests=True, **kwargs)
- )
- except StopIteration:
- pass
-
- if not stream:
- r.content
-
- return r
-
- def merge_environment_settings(self, url, proxies, stream, verify, cert):
- """
- Check the environment and merge it with some settings.
-
- :rtype: dict
- """
- # Gather clues from the surrounding environment.
- if self.trust_env:
- # Set environment's proxies.
- no_proxy = proxies.get("no_proxy") if proxies is not None else None
- env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
- for (k, v) in env_proxies.items():
- proxies.setdefault(k, v)
-
- # Look for requests environment configuration
- # and be compatible with cURL.
- if verify is True or verify is None:
- verify = (
- os.environ.get("REQUESTS_CA_BUNDLE")
- or os.environ.get("CURL_CA_BUNDLE")
- or verify
- )
-
- # Merge all the kwargs.
- proxies = merge_setting(proxies, self.proxies)
- stream = merge_setting(stream, self.stream)
- verify = merge_setting(verify, self.verify)
- cert = merge_setting(cert, self.cert)
-
- return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert}
-
- def get_adapter(self, url):
- """
- Returns the appropriate connection adapter for the given URL.
-
- :rtype: requests.adapters.BaseAdapter
- """
- for (prefix, adapter) in self.adapters.items():
-
- if url.lower().startswith(prefix.lower()):
- return adapter
-
- # Nothing matches :-/
- raise InvalidSchema(f"No connection adapters were found for {url!r}")
-
- def close(self):
- """Closes all adapters and as such the session"""
- for v in self.adapters.values():
- v.close()
-
- def mount(self, prefix, adapter):
- """Registers a connection adapter to a prefix.
-
- Adapters are sorted in descending order by prefix length.
- """
- self.adapters[prefix] = adapter
- keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
-
- for key in keys_to_move:
- self.adapters[key] = self.adapters.pop(key)
-
- def __getstate__(self):
- state = {attr: getattr(self, attr, None) for attr in self.__attrs__}
- return state
-
- def __setstate__(self, state):
- for attr, value in state.items():
- setattr(self, attr, value)
-
-
-def session():
- """
- Returns a :class:`Session` for context-management.
-
- .. deprecated:: 1.0.0
-
- This method has been deprecated since version 1.0.0 and is only kept for
- backwards compatibility. New code should use :class:`~requests.sessions.Session`
- to create a session. This may be removed at a future date.
-
- :rtype: Session
- """
- return Session()
diff --git a/spaces/Realcat/image-matching-webui/third_party/DarkFeat/datasets/InvISP/README.md b/spaces/Realcat/image-matching-webui/third_party/DarkFeat/datasets/InvISP/README.md
deleted file mode 100644
index 654d33dae8e00fcd61b6f38f8e2763ae87dfefa4..0000000000000000000000000000000000000000
--- a/spaces/Realcat/image-matching-webui/third_party/DarkFeat/datasets/InvISP/README.md
+++ /dev/null
@@ -1,117 +0,0 @@
-# Invertible Image Signal Processing
-
-
-
-
-
-**This repository includes official codes for "[Invertible Image Signal Processing (CVPR2021)](https://arxiv.org/abs/2103.15061)".**
-
-
-**Figure:** *Our framework*
-
-Unprocessed RAW data is a highly valuable image format for image editing and computer vision. However, since the file size of RAW data is huge, most users can only get access to processed and compressed sRGB images. To bridge this gap, we design an Invertible Image Signal Processing (InvISP) pipeline, which not only enables rendering visually appealing sRGB images but also allows recovering nearly perfect RAW data. Due to our framework's inherent reversibility, we can reconstruct realistic RAW data instead of synthesizing RAW data from sRGB images, without any memory overhead. We also integrate a differentiable JPEG compression simulator that empowers our framework to reconstruct RAW data from JPEG images. Extensive quantitative and qualitative experiments on two DSLR demonstrate that our method obtains much higher quality in both rendered sRGB images and reconstructed RAW data than alternative methods.
-
-> **Invertible Image Signal Processing**
-> Yazhou Xing*, Zian Qian*, Qifeng Chen (* indicates joint first authors)
-> HKUST
-
-[[Paper](https://arxiv.org/abs/2103.15061)]
-[[Project Page](https://yzxing87.github.io/InvISP/index.html)]
-[[Technical Video (Coming soon)](https://yzxing87.github.io/TBA)]
-
-
-**Figure:** *Our results*
-
-
-## Known issue (10/2021)
-There exists some errors in the bilinear demosaicing implementation of the python library ``colour_demosaicing``. You can fix it through add the 'constant' parameter in convolve method in [this file](https://colour-demosaicing.readthedocs.io/en/latest/_modules/colour_demosaicing/bayer/demosaicing/bilinear.html#demosaicing_CFA_Bayer_bilinear) of your package. Otherwise the demosaicing results will be out of its original range and the trained results will face some incorrect color issues.
-
-## Installation
-Clone this repo.
-```bash
-git clone https://github.com/yzxing87/Invertible-ISP.git
-cd Invertible-ISP/
-```
-
-We have tested our code on Ubuntu 18.04 LTS with PyTorch 1.4.0, CUDA 10.1 and cudnn7.6.5. Please install dependencies by
-```bash
-conda env create -f environment.yml
-```
-
-## Preparing datasets
-We use [MIT-Adobe FiveK Dataset](https://data.csail.mit.edu/graphics/fivek/) for training and evaluation. To reproduce our results, you need to first download the NIKON D700 and Canon EOS 5D subsets from their website. The images (DNG) can be downloaded by
-```bash
-cd data/
-bash data_preprocess.sh
-```
-The downloading may take a while. After downloading, we need to prepare the bilinearly demosaiced RAW and white balance parameters as network input, and ground truth sRGB (in JPEG format) as supervision.
-```bash
-python data_preprocess.py --camera="NIKON_D700"
-python data_preprocess.py --camera="Canon_EOS_5D"
-```
-The dataset will be organized into
-| Path | Size | Files | Format | Description
-| :--- | :--: | ----: | :----: | :----------
-| data | 585 GB | 1 | | Main folder
-| ├ Canon_EOS_5D | 448 GB | 1 | | Canon sub-folder
-| ├ NIKON_D700 | 137 GB | 1 | | NIKON sub-folder
-| ├ DNG | 2.9 GB | 487 | DNG | In-the-wild RAW.
-| ├ RAW | 133 GB | 487 | NPZ | Preprocessed RAW.
-| ├ RGB | 752 MB | 487 | JPG | Ground-truth RGB.
-| ├ NIKON_D700_train.txt | 1 KB | 1 | TXT | Training data split.
-| ├ NIKON_D700_test.txt | 5 KB | 1 | TXT | Test data split.
-
-## Training networks
-We specify the training arguments into `train.sh`. Simply run
-```bash
-cd ../
-bash train.sh
-```
-The checkpoints will be saved into `./exps/{exp_name}/checkpoint/`.
-
-## Test and evaluation
-### Use your trained model
-To reconstruct the RAW from JPEG RGB, we need to first save the rendered RGB into disk then do test to recover RAW.
-Original RAW images are too huge to be directly tested on one 2080 Ti GPU. We provide two ways to test the model.
-
-1. Subsampling the RAW for visualization purpose:
- ```bash
- python test_rgb.py --task=EXPERIMENT_NAME \
- --data_path="./data/" \
- --gamma \
- --camera=CAMERA_NAME \
- --out_path=OUTPUT_PATH \
- --ckpt=CKPT_PATH
- ```
- After finish, run
- ```bash
- python test_raw.py --task=EXPERIMENT_NAME \
- --data_path="./data/" \
- --gamma \
- --camera=CAMERA_NAME \
- --out_path=OUTPUT_PATH \
- --ckpt=CKPT_PATH
- ```
-2. Spliting the RAW data into patches, for quantitatively evaluation purpose. Turn on the `--split_to_patch` argument. See `test.sh.` The PSNR and SSIM metrics can be obtained by
- ```bash
- python cal_metrics.py --path=PATH_TO_SAVED_PATCHES
- ```
-### Use our pretrained weights
-We also provide our trained model for a reference. The checkpoints are placed in `pretrained/` folder. Specify the correct PATH in `test.sh`, then you can get similar results as our paper. Please note that in the context of ISP, one trained model can only be applied for a specific camera. This is due to the camera-dependent proprietary raw color space and photo-finishing steps.
-
-
-## Citation
-
-```
-@inproceedings{xing21invertible,
- title = {Invertible Image Signal Processing},
- author = {Xing, Yazhou and Qian, Zian and Chen, Qifeng},
- booktitle = {CVPR},
- year = {2021}
-}
-```
-## Acknowledgement
-Part of the codes benefit from [DiffJPEG](https://github.com/mlomnitz/DiffJPEG) and [Invertible-Image-Rescaling](https://github.com/pkuxmq/Invertible-Image-Rescaling).
-
-## Contact
-Feel free to contact me if there is any question. (Yazhou Xing, yzxing87@gmail.com)
diff --git a/spaces/Realcat/image-matching-webui/third_party/SGMNet/datadump/check_training_data.py b/spaces/Realcat/image-matching-webui/third_party/SGMNet/datadump/check_training_data.py
deleted file mode 100644
index 0b2df392358206d702b60d9d06d28e4f969f570a..0000000000000000000000000000000000000000
--- a/spaces/Realcat/image-matching-webui/third_party/SGMNet/datadump/check_training_data.py
+++ /dev/null
@@ -1,100 +0,0 @@
-import argparse
-import os
-import numpy as np
-import h5py
-import cv2
-from numpy.core.numeric import indices
-import pyxis as px
-from tqdm import trange
-
-import sys
-
-ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
-sys.path.insert(0, ROOT_DIR)
-
-from utils import evaluation_utils, train_utils
-
-parser = argparse.ArgumentParser(description="checking training data.")
-parser.add_argument("--meta_dir", type=str, default="dataset/valid")
-parser.add_argument("--dataset_dir", type=str, default="dataset")
-parser.add_argument("--desc_dir", type=str, default="desc")
-parser.add_argument("--raw_dir", type=str, default="raw_data")
-parser.add_argument("--desc_suffix", type=str, default="_root_1000.hdf5")
-parser.add_argument("--vis_folder", type=str, default=None)
-args = parser.parse_args()
-
-
-if __name__ == "__main__":
- if args.vis_folder is not None and not os.path.exists(args.vis_folder):
- os.mkdir(args.vis_folder)
-
- pair_num_list = np.loadtxt(os.path.join(args.meta_dir, "pair_num.txt"), dtype=str)
- pair_seq_list, accu_pair_list = train_utils.parse_pair_seq(pair_num_list)
- total_pair = int(pair_num_list[0, 1])
- total_inlier_rate, total_corr_num, total_incorr_num = [], [], []
- pair_num_list = pair_num_list[1:]
-
- for index in trange(total_pair):
- seq = pair_seq_list[index]
- index_within_seq = index - accu_pair_list[seq]
- with h5py.File(os.path.join(args.dataset_dir, seq, "info.h5py"), "r") as data:
- corr = data["corr"][str(index_within_seq)][()]
- corr1, corr2 = corr[:, 0], corr[:, 1]
- incorr1, incorr2 = (
- data["incorr1"][str(index_within_seq)][()],
- data["incorr2"][str(index_within_seq)][()],
- )
- img_path1, img_path2 = (
- data["img_path1"][str(index_within_seq)][()][0].decode(),
- data["img_path2"][str(index_within_seq)][()][0].decode(),
- )
- img_name1, img_name2 = img_path1.split("/")[-1], img_path2.split("/")[-1]
- fea_path1, fea_path2 = os.path.join(
- args.desc_dir, seq, img_name1 + args.desc_suffix
- ), os.path.join(args.desc_dir, seq, img_name2 + args.desc_suffix)
- with h5py.File(fea_path1, "r") as fea1, h5py.File(fea_path2, "r") as fea2:
- desc1, kpt1 = fea1["descriptors"][()], fea1["keypoints"][()][:, :2]
- desc2, kpt2 = fea2["descriptors"][()], fea2["keypoints"][()][:, :2]
- sim_mat = desc1 @ desc2.T
- nn_index1, nn_index2 = np.argmax(sim_mat, axis=1), np.argmax(
- sim_mat, axis=0
- )
- mask_mutual = (nn_index2[nn_index1] == np.arange(len(nn_index1)))[corr1]
- mask_inlier = nn_index1[corr1] == corr2
- mask_nn_correct = np.logical_and(mask_mutual, mask_inlier)
- # statistics
- total_inlier_rate.append(mask_nn_correct.mean())
- total_corr_num.append(len(corr1))
- total_incorr_num.append((len(incorr1) + len(incorr2)) / 2)
- # dump visualization
- if args.vis_folder is not None:
- # draw corr
- img1, img2 = cv2.imread(
- os.path.join(args.raw_dir, img_path1)
- ), cv2.imread(os.path.join(args.raw_dir, img_path2))
- corr1_pos, corr2_pos = np.take_along_axis(
- kpt1, corr1[:, np.newaxis], axis=0
- ), np.take_along_axis(kpt2, corr2[:, np.newaxis], axis=0)
- dis_corr = evaluation_utils.draw_match(img1, img2, corr1_pos, corr2_pos)
- cv2.imwrite(
- os.path.join(args.vis_folder, str(index) + ".png"), dis_corr
- )
- # draw incorr
- incorr1_pos, incorr2_pos = np.take_along_axis(
- kpt1, incorr1[:, np.newaxis], axis=0
- ), np.take_along_axis(kpt2, incorr2[:, np.newaxis], axis=0)
- dis_incorr1, dis_incorr2 = evaluation_utils.draw_points(
- img1, incorr1_pos
- ), evaluation_utils.draw_points(img2, incorr2_pos)
- cv2.imwrite(
- os.path.join(args.vis_folder, str(index) + "_incorr1.png"),
- dis_incorr1,
- )
- cv2.imwrite(
- os.path.join(args.vis_folder, str(index) + "_incorr2.png"),
- dis_incorr2,
- )
-
- print("NN matching accuracy: ", np.asarray(total_inlier_rate).mean())
- print("mean corr number: ", np.asarray(total_corr_num).mean())
- print("mean incorr number: ", np.asarray(total_incorr_num).mean())
diff --git a/spaces/Ritori/play_with_baby_llama2/train.py b/spaces/Ritori/play_with_baby_llama2/train.py
deleted file mode 100644
index 34248b87043d7d9843f3cf6ec8cf4b8d08155bea..0000000000000000000000000000000000000000
--- a/spaces/Ritori/play_with_baby_llama2/train.py
+++ /dev/null
@@ -1,331 +0,0 @@
-"""
-This training script can be run both on a single gpu in debug mode,
-and also in a larger training run with distributed data parallel (ddp).
-
-To run on a single GPU small debug run, example:
-$ python -m train.py --compile=False --eval_iters=10 --batch_size=8
-
-To run with DDP on 4 gpus on 1 node, example:
-$ torchrun --standalone --nproc_per_node=4 train.py
-
-To run with DDP on 4 gpus across 2 nodes, example:
-- Run on the first (master) node with example IP 123.456.123.456:
-$ torchrun --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr=123.456.123.456 --master_port=1234 train.py
-- Run on the worker node:
-$ torchrun --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr=123.456.123.456 --master_port=1234 train.py
-(If your cluster does not have Infiniband interconnect prepend NCCL_IB_DISABLE=1)
-"""
-
-import math
-import os
-import time
-from contextlib import nullcontext
-from datetime import datetime
-from functools import partial
-
-import torch
-from model import Transformer, ModelArgs
-from torch.distributed import destroy_process_group, init_process_group
-from torch.nn.parallel import DistributedDataParallel as DDP
-
-from tinystories import Task
-
-# -----------------------------------------------------------------------------
-# I/O
-out_dir = "out"
-eval_interval = 2000
-log_interval = 1
-eval_iters = 100
-eval_only = False # if True, script exits right after the first eval
-always_save_checkpoint = False # if True, always save a checkpoint after each eval
-init_from = "scratch" # 'scratch' or 'resume'
-# wandb logging
-wandb_log = False # disabled by default
-wandb_project = "llamac"
-wandb_run_name = "run" + datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
-# data
-batch_size = 128 # if gradient_accumulation_steps > 1, this is the micro-batch size
-max_seq_len = 256
-# model
-dim = 288
-n_layers = 6
-n_heads = 6
-multiple_of = 32
-dropout = 0.0
-# adamw optimizer
-gradient_accumulation_steps = 4 # used to simulate larger batch sizes
-learning_rate = 5e-4 # max learning rate
-max_iters = 100000 # total number of training iterations
-weight_decay = 1e-1
-beta1 = 0.9
-beta2 = 0.95
-grad_clip = 1.0 # clip gradients at this value, or disable if == 0.0
-# learning rate decay settings
-decay_lr = True # whether to decay the learning rate
-warmup_iters = 1000 # how many steps to warm up for
-# system
-device = "cuda" # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1' etc., or try 'mps' on macbooks
-dtype = "bfloat16" # float32|bfloat16|float16
-compile = True # use PyTorch 2.0 to compile the model to be faster
-# -----------------------------------------------------------------------------
-config_keys = [
- k
- for k, v in globals().items()
- if not k.startswith("_") and isinstance(v, (int, float, bool, str))
-]
-exec(open("configurator.py").read()) # overrides from command line or config file
-config = {k: globals()[k] for k in config_keys} # will be useful for logging
-# -----------------------------------------------------------------------------
-
-# fixing some hyperparams to sensible defaults
-lr_decay_iters = max_iters # should be ~= max_iters per Chinchilla
-min_lr = 0.0 # minimum learning rate, should be ~= learning_rate/10 per Chinchilla
-
-# various inits, derived attributes, I/O setup
-ddp = int(os.environ.get("RANK", -1)) != -1 # is this a ddp run?
-if ddp:
- init_process_group(backend="nccl")
- ddp_rank = int(os.environ["RANK"])
- ddp_local_rank = int(os.environ["LOCAL_RANK"])
- ddp_world_size = int(os.environ["WORLD_SIZE"])
- device = f"cuda:{ddp_local_rank}"
- torch.cuda.set_device(device)
- master_process = ddp_rank == 0 # this process will do logging, checkpointing etc.
- seed_offset = ddp_rank # each process gets a different seed
- # world_size number of processes will be training simultaneously, so we can scale
- # down the desired gradient accumulation iterations per process proportionally
- assert gradient_accumulation_steps % ddp_world_size == 0
- gradient_accumulation_steps //= ddp_world_size
-else:
- # if not ddp, we are running on a single gpu, and one process
- master_process = True
- seed_offset = 0
- ddp_world_size = 1
-tokens_per_iter = gradient_accumulation_steps * ddp_world_size * batch_size * max_seq_len
-if master_process:
- print(f"tokens per iteration will be: {tokens_per_iter:,}")
- print(f"breaks down as: {gradient_accumulation_steps} grad accum steps * {ddp_world_size} processes * {batch_size} batch size * {max_seq_len} max seq len")
-
-if master_process:
- os.makedirs(out_dir, exist_ok=True)
-torch.manual_seed(1337 + seed_offset)
-torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
-torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
-device_type = "cuda" if "cuda" in device else "cpu" # for later use in torch.autocast
-# note: float16 data type will automatically use a GradScaler
-ptdtype = {"float32": torch.float32, "bfloat16": torch.bfloat16, "float16": torch.float16}[dtype]
-ctx = (
- nullcontext()
- if device_type == "cpu"
- else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
-)
-
-# task-specific setup
-iter_batches = partial(
- Task.iter_batches,
- batch_size=batch_size,
- max_seq_len=max_seq_len,
- device=device,
- num_workers=0,
-)
-
-# init these up here, can override if init_from='resume' (i.e. from a checkpoint)
-iter_num = 0
-best_val_loss = 1e9
-
-# model init
-model_args = dict(
- dim=dim,
- n_layers=n_layers,
- n_heads=n_heads,
- n_kv_heads=n_heads,
- vocab_size=32000,
- multiple_of=multiple_of,
- max_seq_len=max_seq_len,
- #dropout=dropout,
-) # start with model_args from command line
-if init_from == "scratch":
- # init a new model from scratch
- print("Initializing a new model from scratch")
- gptconf = ModelArgs(**model_args)
- model = Transformer(gptconf)
-elif init_from == "resume":
- print(f"Resuming training from {out_dir}")
- # resume training from a checkpoint.
- ckpt_path = os.path.join(out_dir, "ckpt.pt")
- checkpoint = torch.load(ckpt_path, map_location=device)
- checkpoint_model_args = checkpoint["model_args"]
- # force these config attributes to be equal otherwise we can't even resume training
- # the rest of the attributes (e.g. dropout) can stay as desired from command line
- for k in ["dim", "n_layers", "n_heads", "n_kv_heads", "vocab_size", "multiple_of", "max_seq_len"]:
- model_args[k] = checkpoint_model_args[k]
- # create the model
- gptconf = ModelArgs(**model_args)
- model = Transformer(gptconf)
- state_dict = checkpoint["model"]
- # fix the keys of the state dictionary :(
- # honestly no idea how checkpoints sometimes get this prefix, have to debug more
- unwanted_prefix = "_orig_mod."
- for k, v in list(state_dict.items()):
- if k.startswith(unwanted_prefix):
- state_dict[k[len(unwanted_prefix) :]] = state_dict.pop(k)
- model.load_state_dict(state_dict)
- iter_num = checkpoint["iter_num"]
- best_val_loss = checkpoint["best_val_loss"]
-model.to(device)
-
-# initialize a GradScaler. If enabled=False scaler is a no-op
-scaler = torch.cuda.amp.GradScaler(enabled=(dtype == "float16"))
-
-# optimizer
-optimizer = model.configure_optimizers(weight_decay, learning_rate, (beta1, beta2), device_type)
-if init_from == "resume":
- optimizer.load_state_dict(checkpoint["optimizer"])
-checkpoint = None # free up memory
-
-# compile the model
-if compile:
- print("compiling the model... (takes a ~minute)")
- unoptimized_model = model
- model = torch.compile(model) # requires PyTorch 2.0
-
-# wrap model into DDP container
-if ddp:
- # Ignore the `freqs_cis` buffer so that DDP does not broadcast it at
- # construction time since NCCL does not support `ComplexFloat`
- prefix = "_orig_mod." if compile else ""
- model._ddp_params_and_buffers_to_ignore = {prefix + "freqs_cis"}
- model = DDP(model, device_ids=[ddp_local_rank])
-
-# helps estimate an arbitrarily accurate loss over either split using many batches
-@torch.no_grad()
-def estimate_loss():
- out = {}
- model.eval()
- for split in ["train", "val"]:
- batch_iter = iter_batches(split)
- losses = torch.zeros(eval_iters) # keep on CPU
- for k in range(eval_iters):
- X, Y = next(batch_iter)
- with ctx:
- logits, loss = model(X, Y)
- losses[k] = loss.item()
- out[split] = losses.mean()
- model.train()
- return out
-
-# learning rate decay scheduler (cosine with warmup)
-def get_lr(it):
- # 1) linear warmup for warmup_iters steps
- if it < warmup_iters:
- return learning_rate * it / warmup_iters
- # 2) if it > lr_decay_iters, return min learning rate
- if it > lr_decay_iters:
- return min_lr
- # 3) in between, use cosine decay down to min learning rate
- decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters)
- assert 0 <= decay_ratio <= 1
- coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) # coeff ranges 0..1
- return min_lr + coeff * (learning_rate - min_lr)
-
-# logging
-if wandb_log and master_process:
- import wandb
- wandb.init(project=wandb_project, name=wandb_run_name, config=config)
-
-# training loop
-train_batch_iter = iter_batches("train")
-X, Y = next(train_batch_iter) # fetch the very first batch
-t0 = time.time()
-local_iter_num = 0 # number of iterations in the lifetime of this process
-raw_model = model.module if ddp else model # unwrap DDP container if needed
-running_mfu = -1.0
-while True:
- # determine and set the learning rate for this iteration
- lr = get_lr(iter_num) if decay_lr else learning_rate
- for param_group in optimizer.param_groups:
- param_group["lr"] = lr
-
- # evaluate the loss on train/val sets and write checkpoints
- if iter_num % eval_interval == 0 and master_process:
- losses = estimate_loss()
- print(f"step {iter_num}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}")
- if wandb_log:
- try:
- wandb.log(
- {
- "iter": iter_num,
- "tokens": iter_num * tokens_per_iter,
- "loss/train": losses["train"],
- "loss/val": losses["val"],
- "lr": lr,
- "mfu": running_mfu * 100, # convert to percentage
- }
- )
- except Exception as e:
- print(f"logging to wandb failed: {e}")
- if losses["val"] < best_val_loss or always_save_checkpoint:
- best_val_loss = losses["val"]
- if iter_num > 0:
- checkpoint = {
- "model": raw_model.state_dict(),
- "optimizer": optimizer.state_dict(),
- "model_args": model_args,
- "iter_num": iter_num,
- "best_val_loss": best_val_loss,
- "config": config,
- }
- print(f"saving checkpoint to {out_dir}")
- torch.save(checkpoint, os.path.join(out_dir, "ckpt.pt"))
- raw_model.export(os.path.join(out_dir, "model.bin"))
- if iter_num == 0 and eval_only:
- break
-
- # forward backward update, with optional gradient accumulation to simulate larger batch size
- # and using the GradScaler if data type is float16
- for micro_step in range(gradient_accumulation_steps):
- if ddp:
- # in DDP training we only need to sync gradients at the last micro step.
- # the official way to do this is with model.no_sync() context manager, but
- # I really dislike that this bloats the code and forces us to repeat code
- # looking at the source of that context manager, it just toggles this variable
- model.require_backward_grad_sync = micro_step == gradient_accumulation_steps - 1
- with ctx:
- logits, loss = model(X, Y)
- loss = loss / gradient_accumulation_steps
- # immediately async prefetch next batch while model is doing the forward pass on the GPU
- X, Y = next(train_batch_iter)
- # backward pass, with gradient scaling if training in fp16
- scaler.scale(loss).backward()
- # clip the gradient
- if grad_clip != 0.0:
- scaler.unscale_(optimizer)
- torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
- # step the optimizer and scaler if training in fp16
- scaler.step(optimizer)
- scaler.update()
- # flush the gradients as soon as we can, no need for this memory anymore
- optimizer.zero_grad(set_to_none=True)
-
- # timing and logging
- t1 = time.time()
- dt = t1 - t0
- t0 = t1
- if iter_num % log_interval == 0 and master_process:
- # get loss as float, scale up due to the divide above. note: this is a CPU-GPU sync point
- lossf = loss.item() * gradient_accumulation_steps
- if local_iter_num >= 5: # let the training loop settle a bit
- mfu = raw_model.estimate_mfu(batch_size * gradient_accumulation_steps, dt)
- running_mfu = mfu if running_mfu == -1.0 else 0.9 * running_mfu + 0.1 * mfu
- print(
- f"{iter_num} | loss {lossf:.4f} | lr {lr:e} | {dt*1000:.2f}ms | mfu {running_mfu*100:.2f}%"
- )
- iter_num += 1
- local_iter_num += 1
-
- # termination conditions
- if iter_num > max_iters:
- break
-
-if ddp:
- destroy_process_group()
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py
deleted file mode 100644
index be777123a886503172a95fe0719e956a147bbd68..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- type='EncoderDecoder',
- pretrained='open-mmlab://resnet50_v1c',
- backbone=dict(
- type='ResNetV1c',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- dilations=(1, 1, 2, 4),
- strides=(1, 2, 1, 1),
- norm_cfg=norm_cfg,
- norm_eval=False,
- style='pytorch',
- contract_dilation=True),
- decode_head=dict(
- type='EncHead',
- in_channels=[512, 1024, 2048],
- in_index=(1, 2, 3),
- channels=512,
- num_codes=32,
- use_se_loss=True,
- add_lateral=False,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
- loss_se_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)),
- auxiliary_head=dict(
- type='FCNHead',
- in_channels=1024,
- in_index=2,
- channels=256,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='whole'))
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/backbones/trident_resnet.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/backbones/trident_resnet.py
deleted file mode 100644
index e6100132b0f4120585da8a309cba4488b4b0ea72..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/backbones/trident_resnet.py
+++ /dev/null
@@ -1,292 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.utils.checkpoint as cp
-from mmcv.cnn import build_conv_layer, build_norm_layer, kaiming_init
-from torch.nn.modules.utils import _pair
-
-from mmdet.models.backbones.resnet import Bottleneck, ResNet
-from mmdet.models.builder import BACKBONES
-
-
-class TridentConv(nn.Module):
- """Trident Convolution Module.
-
- Args:
- in_channels (int): Number of channels in input.
- out_channels (int): Number of channels in output.
- kernel_size (int): Size of convolution kernel.
- stride (int, optional): Convolution stride. Default: 1.
- trident_dilations (tuple[int, int, int], optional): Dilations of
- different trident branch. Default: (1, 2, 3).
- test_branch_idx (int, optional): In inference, all 3 branches will
- be used if `test_branch_idx==-1`, otherwise only branch with
- index `test_branch_idx` will be used. Default: 1.
- bias (bool, optional): Whether to use bias in convolution or not.
- Default: False.
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- kernel_size,
- stride=1,
- trident_dilations=(1, 2, 3),
- test_branch_idx=1,
- bias=False):
- super(TridentConv, self).__init__()
- self.num_branch = len(trident_dilations)
- self.with_bias = bias
- self.test_branch_idx = test_branch_idx
- self.stride = _pair(stride)
- self.kernel_size = _pair(kernel_size)
- self.paddings = _pair(trident_dilations)
- self.dilations = trident_dilations
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.bias = bias
-
- self.weight = nn.Parameter(
- torch.Tensor(out_channels, in_channels, *self.kernel_size))
- if bias:
- self.bias = nn.Parameter(torch.Tensor(out_channels))
- else:
- self.bias = None
- self.init_weights()
-
- def init_weights(self):
- kaiming_init(self, distribution='uniform', mode='fan_in')
-
- def extra_repr(self):
- tmpstr = f'in_channels={self.in_channels}'
- tmpstr += f', out_channels={self.out_channels}'
- tmpstr += f', kernel_size={self.kernel_size}'
- tmpstr += f', num_branch={self.num_branch}'
- tmpstr += f', test_branch_idx={self.test_branch_idx}'
- tmpstr += f', stride={self.stride}'
- tmpstr += f', paddings={self.paddings}'
- tmpstr += f', dilations={self.dilations}'
- tmpstr += f', bias={self.bias}'
- return tmpstr
-
- def forward(self, inputs):
- if self.training or self.test_branch_idx == -1:
- outputs = [
- F.conv2d(input, self.weight, self.bias, self.stride, padding,
- dilation) for input, dilation, padding in zip(
- inputs, self.dilations, self.paddings)
- ]
- else:
- assert len(inputs) == 1
- outputs = [
- F.conv2d(inputs[0], self.weight, self.bias, self.stride,
- self.paddings[self.test_branch_idx],
- self.dilations[self.test_branch_idx])
- ]
-
- return outputs
-
-
-# Since TridentNet is defined over ResNet50 and ResNet101, here we
-# only support TridentBottleneckBlock.
-class TridentBottleneck(Bottleneck):
- """BottleBlock for TridentResNet.
-
- Args:
- trident_dilations (tuple[int, int, int]): Dilations of different
- trident branch.
- test_branch_idx (int): In inference, all 3 branches will be used
- if `test_branch_idx==-1`, otherwise only branch with index
- `test_branch_idx` will be used.
- concat_output (bool): Whether to concat the output list to a Tensor.
- `True` only in the last Block.
- """
-
- def __init__(self, trident_dilations, test_branch_idx, concat_output,
- **kwargs):
-
- super(TridentBottleneck, self).__init__(**kwargs)
- self.trident_dilations = trident_dilations
- self.num_branch = len(trident_dilations)
- self.concat_output = concat_output
- self.test_branch_idx = test_branch_idx
- self.conv2 = TridentConv(
- self.planes,
- self.planes,
- kernel_size=3,
- stride=self.conv2_stride,
- bias=False,
- trident_dilations=self.trident_dilations,
- test_branch_idx=test_branch_idx)
-
- def forward(self, x):
-
- def _inner_forward(x):
- num_branch = (
- self.num_branch
- if self.training or self.test_branch_idx == -1 else 1)
- identity = x
- if not isinstance(x, list):
- x = (x, ) * num_branch
- identity = x
- if self.downsample is not None:
- identity = [self.downsample(b) for b in x]
-
- out = [self.conv1(b) for b in x]
- out = [self.norm1(b) for b in out]
- out = [self.relu(b) for b in out]
-
- if self.with_plugins:
- for k in range(len(out)):
- out[k] = self.forward_plugin(out[k],
- self.after_conv1_plugin_names)
-
- out = self.conv2(out)
- out = [self.norm2(b) for b in out]
- out = [self.relu(b) for b in out]
- if self.with_plugins:
- for k in range(len(out)):
- out[k] = self.forward_plugin(out[k],
- self.after_conv2_plugin_names)
-
- out = [self.conv3(b) for b in out]
- out = [self.norm3(b) for b in out]
-
- if self.with_plugins:
- for k in range(len(out)):
- out[k] = self.forward_plugin(out[k],
- self.after_conv3_plugin_names)
-
- out = [
- out_b + identity_b for out_b, identity_b in zip(out, identity)
- ]
- return out
-
- if self.with_cp and x.requires_grad:
- out = cp.checkpoint(_inner_forward, x)
- else:
- out = _inner_forward(x)
-
- out = [self.relu(b) for b in out]
- if self.concat_output:
- out = torch.cat(out, dim=0)
- return out
-
-
-def make_trident_res_layer(block,
- inplanes,
- planes,
- num_blocks,
- stride=1,
- trident_dilations=(1, 2, 3),
- style='pytorch',
- with_cp=False,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- dcn=None,
- plugins=None,
- test_branch_idx=-1):
- """Build Trident Res Layers."""
-
- downsample = None
- if stride != 1 or inplanes != planes * block.expansion:
- downsample = []
- conv_stride = stride
- downsample.extend([
- build_conv_layer(
- conv_cfg,
- inplanes,
- planes * block.expansion,
- kernel_size=1,
- stride=conv_stride,
- bias=False),
- build_norm_layer(norm_cfg, planes * block.expansion)[1]
- ])
- downsample = nn.Sequential(*downsample)
-
- layers = []
- for i in range(num_blocks):
- layers.append(
- block(
- inplanes=inplanes,
- planes=planes,
- stride=stride if i == 0 else 1,
- trident_dilations=trident_dilations,
- downsample=downsample if i == 0 else None,
- style=style,
- with_cp=with_cp,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- dcn=dcn,
- plugins=plugins,
- test_branch_idx=test_branch_idx,
- concat_output=True if i == num_blocks - 1 else False))
- inplanes = planes * block.expansion
- return nn.Sequential(*layers)
-
-
-@BACKBONES.register_module()
-class TridentResNet(ResNet):
- """The stem layer, stage 1 and stage 2 in Trident ResNet are identical to
- ResNet, while in stage 3, Trident BottleBlock is utilized to replace the
- normal BottleBlock to yield trident output. Different branch shares the
- convolution weight but uses different dilations to achieve multi-scale
- output.
-
- / stage3(b0) \
- x - stem - stage1 - stage2 - stage3(b1) - output
- \ stage3(b2) /
-
- Args:
- depth (int): Depth of resnet, from {50, 101, 152}.
- num_branch (int): Number of branches in TridentNet.
- test_branch_idx (int): In inference, all 3 branches will be used
- if `test_branch_idx==-1`, otherwise only branch with index
- `test_branch_idx` will be used.
- trident_dilations (tuple[int]): Dilations of different trident branch.
- len(trident_dilations) should be equal to num_branch.
- """ # noqa
-
- def __init__(self, depth, num_branch, test_branch_idx, trident_dilations,
- **kwargs):
-
- assert num_branch == len(trident_dilations)
- assert depth in (50, 101, 152)
- super(TridentResNet, self).__init__(depth, **kwargs)
- assert self.num_stages == 3
- self.test_branch_idx = test_branch_idx
- self.num_branch = num_branch
-
- last_stage_idx = self.num_stages - 1
- stride = self.strides[last_stage_idx]
- dilation = trident_dilations
- dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None
- if self.plugins is not None:
- stage_plugins = self.make_stage_plugins(self.plugins,
- last_stage_idx)
- else:
- stage_plugins = None
- planes = self.base_channels * 2**last_stage_idx
- res_layer = make_trident_res_layer(
- TridentBottleneck,
- inplanes=(self.block.expansion * self.base_channels *
- 2**(last_stage_idx - 1)),
- planes=planes,
- num_blocks=self.stage_blocks[last_stage_idx],
- stride=stride,
- trident_dilations=dilation,
- style=self.style,
- with_cp=self.with_cp,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- dcn=dcn,
- plugins=stage_plugins,
- test_branch_idx=self.test_branch_idx)
-
- layer_name = f'layer{last_stage_idx + 1}'
-
- self.__setattr__(layer_name, res_layer)
- self.res_layers.pop(last_stage_idx)
- self.res_layers.insert(last_stage_idx, layer_name)
-
- self._freeze_stages()
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/arraymisc/quantization.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/arraymisc/quantization.py
deleted file mode 100644
index 8e47a3545780cf071a1ef8195efb0b7b662c8186..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/arraymisc/quantization.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import numpy as np
-
-
-def quantize(arr, min_val, max_val, levels, dtype=np.int64):
- """Quantize an array of (-inf, inf) to [0, levels-1].
-
- Args:
- arr (ndarray): Input array.
- min_val (scalar): Minimum value to be clipped.
- max_val (scalar): Maximum value to be clipped.
- levels (int): Quantization levels.
- dtype (np.type): The type of the quantized array.
-
- Returns:
- tuple: Quantized array.
- """
- if not (isinstance(levels, int) and levels > 1):
- raise ValueError(
- f'levels must be a positive integer, but got {levels}')
- if min_val >= max_val:
- raise ValueError(
- f'min_val ({min_val}) must be smaller than max_val ({max_val})')
-
- arr = np.clip(arr, min_val, max_val) - min_val
- quantized_arr = np.minimum(
- np.floor(levels * arr / (max_val - min_val)).astype(dtype), levels - 1)
-
- return quantized_arr
-
-
-def dequantize(arr, min_val, max_val, levels, dtype=np.float64):
- """Dequantize an array.
-
- Args:
- arr (ndarray): Input array.
- min_val (scalar): Minimum value to be clipped.
- max_val (scalar): Maximum value to be clipped.
- levels (int): Quantization levels.
- dtype (np.type): The type of the dequantized array.
-
- Returns:
- tuple: Dequantized array.
- """
- if not (isinstance(levels, int) and levels > 1):
- raise ValueError(
- f'levels must be a positive integer, but got {levels}')
- if min_val >= max_val:
- raise ValueError(
- f'min_val ({min_val}) must be smaller than max_val ({max_val})')
-
- dequantized_arr = (arr + 0.5).astype(dtype) * (max_val -
- min_val) / levels + min_val
-
- return dequantized_arr
diff --git a/spaces/Sakil/essay_generator_app/app.py b/spaces/Sakil/essay_generator_app/app.py
deleted file mode 100644
index 9a7be631df20633140d3b49231f8d21fc4f3ea42..0000000000000000000000000000000000000000
--- a/spaces/Sakil/essay_generator_app/app.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import gradio as gr
-from transformers import pipeline
-generator = pipeline("text-generation", model="distilgpt2")
-def essay_generation(input_text):
- return generator(input_text,max_length=250)
-iface = gr.Interface(essay_generation,inputs="text",outputs="text",title='Essay Generator APP',theme = "dark-peach",examples=["what is data science?","What is Natural Language Processing","What is Time series?"], description="This app helps in generating text upto 250 characters.")
-iface.launch(inline=False)
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/tests/test_oinspect.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/tests/test_oinspect.py
deleted file mode 100644
index fa0c69414fd802f88ad4f4b95940a27b9388e548..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/tests/test_oinspect.py
+++ /dev/null
@@ -1,579 +0,0 @@
-"""Tests for the object inspection functionality.
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-
-from contextlib import contextmanager
-from inspect import signature, Signature, Parameter
-import inspect
-import os
-import pytest
-import re
-import sys
-
-from .. import oinspect
-
-from decorator import decorator
-
-from IPython.testing.tools import AssertPrints, AssertNotPrints
-from IPython.utils.path import compress_user
-
-
-#-----------------------------------------------------------------------------
-# Globals and constants
-#-----------------------------------------------------------------------------
-
-inspector = None
-
-def setup_module():
- global inspector
- inspector = oinspect.Inspector()
-
-
-class SourceModuleMainTest:
- __module__ = "__main__"
-
-
-#-----------------------------------------------------------------------------
-# Local utilities
-#-----------------------------------------------------------------------------
-
-# WARNING: since this test checks the line number where a function is
-# defined, if any code is inserted above, the following line will need to be
-# updated. Do NOT insert any whitespace between the next line and the function
-# definition below.
-THIS_LINE_NUMBER = 47 # Put here the actual number of this line
-
-
-def test_find_source_lines():
- assert oinspect.find_source_lines(test_find_source_lines) == THIS_LINE_NUMBER + 3
- assert oinspect.find_source_lines(type) is None
- assert oinspect.find_source_lines(SourceModuleMainTest) is None
- assert oinspect.find_source_lines(SourceModuleMainTest()) is None
-
-
-def test_getsource():
- assert oinspect.getsource(type) is None
- assert oinspect.getsource(SourceModuleMainTest) is None
- assert oinspect.getsource(SourceModuleMainTest()) is None
-
-
-def test_inspect_getfile_raises_exception():
- """Check oinspect.find_file/getsource/find_source_lines expectations"""
- with pytest.raises(TypeError):
- inspect.getfile(type)
- with pytest.raises(OSError if sys.version_info >= (3, 10) else TypeError):
- inspect.getfile(SourceModuleMainTest)
-
-
-# A couple of utilities to ensure these tests work the same from a source or a
-# binary install
-def pyfile(fname):
- return os.path.normcase(re.sub('.py[co]$', '.py', fname))
-
-
-def match_pyfiles(f1, f2):
- assert pyfile(f1) == pyfile(f2)
-
-
-def test_find_file():
- match_pyfiles(oinspect.find_file(test_find_file), os.path.abspath(__file__))
- assert oinspect.find_file(type) is None
- assert oinspect.find_file(SourceModuleMainTest) is None
- assert oinspect.find_file(SourceModuleMainTest()) is None
-
-
-def test_find_file_decorated1():
-
- @decorator
- def noop1(f):
- def wrapper(*a, **kw):
- return f(*a, **kw)
- return wrapper
-
- @noop1
- def f(x):
- "My docstring"
-
- match_pyfiles(oinspect.find_file(f), os.path.abspath(__file__))
- assert f.__doc__ == "My docstring"
-
-
-def test_find_file_decorated2():
-
- @decorator
- def noop2(f, *a, **kw):
- return f(*a, **kw)
-
- @noop2
- @noop2
- @noop2
- def f(x):
- "My docstring 2"
-
- match_pyfiles(oinspect.find_file(f), os.path.abspath(__file__))
- assert f.__doc__ == "My docstring 2"
-
-
-def test_find_file_magic():
- run = ip.find_line_magic('run')
- assert oinspect.find_file(run) is not None
-
-
-# A few generic objects we can then inspect in the tests below
-
-class Call(object):
- """This is the class docstring."""
-
- def __init__(self, x, y=1):
- """This is the constructor docstring."""
-
- def __call__(self, *a, **kw):
- """This is the call docstring."""
-
- def method(self, x, z=2):
- """Some method's docstring"""
-
-class HasSignature(object):
- """This is the class docstring."""
- __signature__ = Signature([Parameter('test', Parameter.POSITIONAL_OR_KEYWORD)])
-
- def __init__(self, *args):
- """This is the init docstring"""
-
-
-class SimpleClass(object):
- def method(self, x, z=2):
- """Some method's docstring"""
-
-
-class Awkward(object):
- def __getattr__(self, name):
- raise Exception(name)
-
-class NoBoolCall:
- """
- callable with `__bool__` raising should still be inspect-able.
- """
-
- def __call__(self):
- """does nothing"""
- pass
-
- def __bool__(self):
- """just raise NotImplemented"""
- raise NotImplementedError('Must be implemented')
-
-
-class SerialLiar(object):
- """Attribute accesses always get another copy of the same class.
-
- unittest.mock.call does something similar, but it's not ideal for testing
- as the failure mode is to eat all your RAM. This gives up after 10k levels.
- """
- def __init__(self, max_fibbing_twig, lies_told=0):
- if lies_told > 10000:
- raise RuntimeError('Nose too long, honesty is the best policy')
- self.max_fibbing_twig = max_fibbing_twig
- self.lies_told = lies_told
- max_fibbing_twig[0] = max(max_fibbing_twig[0], lies_told)
-
- def __getattr__(self, item):
- return SerialLiar(self.max_fibbing_twig, self.lies_told + 1)
-
-#-----------------------------------------------------------------------------
-# Tests
-#-----------------------------------------------------------------------------
-
-def test_info():
- "Check that Inspector.info fills out various fields as expected."
- i = inspector.info(Call, oname="Call")
- assert i["type_name"] == "type"
- expected_class = str(type(type)) # (Python 3) or
- assert i["base_class"] == expected_class
- assert re.search(
- "",
- i["string_form"],
- )
- fname = __file__
- if fname.endswith(".pyc"):
- fname = fname[:-1]
- # case-insensitive comparison needed on some filesystems
- # e.g. Windows:
- assert i["file"].lower() == compress_user(fname).lower()
- assert i["definition"] == None
- assert i["docstring"] == Call.__doc__
- assert i["source"] == None
- assert i["isclass"] is True
- assert i["init_definition"] == "Call(x, y=1)"
- assert i["init_docstring"] == Call.__init__.__doc__
-
- i = inspector.info(Call, detail_level=1)
- assert i["source"] is not None
- assert i["docstring"] == None
-
- c = Call(1)
- c.__doc__ = "Modified instance docstring"
- i = inspector.info(c)
- assert i["type_name"] == "Call"
- assert i["docstring"] == "Modified instance docstring"
- assert i["class_docstring"] == Call.__doc__
- assert i["init_docstring"] == Call.__init__.__doc__
- assert i["call_docstring"] == Call.__call__.__doc__
-
-
-def test_class_signature():
- info = inspector.info(HasSignature, "HasSignature")
- assert info["init_definition"] == "HasSignature(test)"
- assert info["init_docstring"] == HasSignature.__init__.__doc__
-
-
-def test_info_awkward():
- # Just test that this doesn't throw an error.
- inspector.info(Awkward())
-
-def test_bool_raise():
- inspector.info(NoBoolCall())
-
-def test_info_serialliar():
- fib_tracker = [0]
- inspector.info(SerialLiar(fib_tracker))
-
- # Nested attribute access should be cut off at 100 levels deep to avoid
- # infinite loops: https://github.com/ipython/ipython/issues/9122
- assert fib_tracker[0] < 9000
-
-def support_function_one(x, y=2, *a, **kw):
- """A simple function."""
-
-def test_calldef_none():
- # We should ignore __call__ for all of these.
- for obj in [support_function_one, SimpleClass().method, any, str.upper]:
- i = inspector.info(obj)
- assert i["call_def"] is None
-
-
-def f_kwarg(pos, *, kwonly):
- pass
-
-def test_definition_kwonlyargs():
- i = inspector.info(f_kwarg, oname="f_kwarg") # analysis:ignore
- assert i["definition"] == "f_kwarg(pos, *, kwonly)"
-
-
-def test_getdoc():
- class A(object):
- """standard docstring"""
- pass
-
- class B(object):
- """standard docstring"""
- def getdoc(self):
- return "custom docstring"
-
- class C(object):
- """standard docstring"""
- def getdoc(self):
- return None
-
- a = A()
- b = B()
- c = C()
-
- assert oinspect.getdoc(a) == "standard docstring"
- assert oinspect.getdoc(b) == "custom docstring"
- assert oinspect.getdoc(c) == "standard docstring"
-
-
-def test_empty_property_has_no_source():
- i = inspector.info(property(), detail_level=1)
- assert i["source"] is None
-
-
-def test_property_sources():
- # A simple adder whose source and signature stays
- # the same across Python distributions
- def simple_add(a, b):
- "Adds two numbers"
- return a + b
-
- class A(object):
- @property
- def foo(self):
- return 'bar'
-
- foo = foo.setter(lambda self, v: setattr(self, 'bar', v))
-
- dname = property(oinspect.getdoc)
- adder = property(simple_add)
-
- i = inspector.info(A.foo, detail_level=1)
- assert "def foo(self):" in i["source"]
- assert "lambda self, v:" in i["source"]
-
- i = inspector.info(A.dname, detail_level=1)
- assert "def getdoc(obj)" in i["source"]
-
- i = inspector.info(A.adder, detail_level=1)
- assert "def simple_add(a, b)" in i["source"]
-
-
-def test_property_docstring_is_in_info_for_detail_level_0():
- class A(object):
- @property
- def foobar(self):
- """This is `foobar` property."""
- pass
-
- ip.user_ns["a_obj"] = A()
- assert (
- "This is `foobar` property."
- == ip.object_inspect("a_obj.foobar", detail_level=0)["docstring"]
- )
-
- ip.user_ns["a_cls"] = A
- assert (
- "This is `foobar` property."
- == ip.object_inspect("a_cls.foobar", detail_level=0)["docstring"]
- )
-
-
-def test_pdef():
- # See gh-1914
- def foo(): pass
- inspector.pdef(foo, 'foo')
-
-
-@contextmanager
-def cleanup_user_ns(**kwargs):
- """
- On exit delete all the keys that were not in user_ns before entering.
-
- It does not restore old values !
-
- Parameters
- ----------
-
- **kwargs
- used to update ip.user_ns
-
- """
- try:
- known = set(ip.user_ns.keys())
- ip.user_ns.update(kwargs)
- yield
- finally:
- added = set(ip.user_ns.keys()) - known
- for k in added:
- del ip.user_ns[k]
-
-
-def test_pinfo_bool_raise():
- """
- Test that bool method is not called on parent.
- """
-
- class RaiseBool:
- attr = None
-
- def __bool__(self):
- raise ValueError("pinfo should not access this method")
-
- raise_bool = RaiseBool()
-
- with cleanup_user_ns(raise_bool=raise_bool):
- ip._inspect("pinfo", "raise_bool.attr", detail_level=0)
-
-
-def test_pinfo_getindex():
- def dummy():
- """
- MARKER
- """
-
- container = [dummy]
- with cleanup_user_ns(container=container):
- with AssertPrints("MARKER"):
- ip._inspect("pinfo", "container[0]", detail_level=0)
- assert "container" not in ip.user_ns.keys()
-
-
-def test_qmark_getindex():
- def dummy():
- """
- MARKER 2
- """
-
- container = [dummy]
- with cleanup_user_ns(container=container):
- with AssertPrints("MARKER 2"):
- ip.run_cell("container[0]?")
- assert "container" not in ip.user_ns.keys()
-
-
-def test_qmark_getindex_negatif():
- def dummy():
- """
- MARKER 3
- """
-
- container = [dummy]
- with cleanup_user_ns(container=container):
- with AssertPrints("MARKER 3"):
- ip.run_cell("container[-1]?")
- assert "container" not in ip.user_ns.keys()
-
-
-
-def test_pinfo_nonascii():
- # See gh-1177
- from . import nonascii2
- ip.user_ns['nonascii2'] = nonascii2
- ip._inspect('pinfo', 'nonascii2', detail_level=1)
-
-def test_pinfo_type():
- """
- type can fail in various edge case, for example `type.__subclass__()`
- """
- ip._inspect('pinfo', 'type')
-
-
-def test_pinfo_docstring_no_source():
- """Docstring should be included with detail_level=1 if there is no source"""
- with AssertPrints('Docstring:'):
- ip._inspect('pinfo', 'str.format', detail_level=0)
- with AssertPrints('Docstring:'):
- ip._inspect('pinfo', 'str.format', detail_level=1)
-
-
-def test_pinfo_no_docstring_if_source():
- """Docstring should not be included with detail_level=1 if source is found"""
- def foo():
- """foo has a docstring"""
-
- ip.user_ns['foo'] = foo
-
- with AssertPrints('Docstring:'):
- ip._inspect('pinfo', 'foo', detail_level=0)
- with AssertPrints('Source:'):
- ip._inspect('pinfo', 'foo', detail_level=1)
- with AssertNotPrints('Docstring:'):
- ip._inspect('pinfo', 'foo', detail_level=1)
-
-
-def test_pinfo_docstring_if_detail_and_no_source():
- """ Docstring should be displayed if source info not available """
- obj_def = '''class Foo(object):
- """ This is a docstring for Foo """
- def bar(self):
- """ This is a docstring for Foo.bar """
- pass
- '''
-
- ip.run_cell(obj_def)
- ip.run_cell('foo = Foo()')
-
- with AssertNotPrints("Source:"):
- with AssertPrints('Docstring:'):
- ip._inspect('pinfo', 'foo', detail_level=0)
- with AssertPrints('Docstring:'):
- ip._inspect('pinfo', 'foo', detail_level=1)
- with AssertPrints('Docstring:'):
- ip._inspect('pinfo', 'foo.bar', detail_level=0)
-
- with AssertNotPrints('Docstring:'):
- with AssertPrints('Source:'):
- ip._inspect('pinfo', 'foo.bar', detail_level=1)
-
-
-def test_pinfo_docstring_dynamic():
- obj_def = """class Bar:
- __custom_documentations__ = {
- "prop" : "cdoc for prop",
- "non_exist" : "cdoc for non_exist",
- }
- @property
- def prop(self):
- '''
- Docstring for prop
- '''
- return self._prop
-
- @prop.setter
- def prop(self, v):
- self._prop = v
- """
- ip.run_cell(obj_def)
-
- ip.run_cell("b = Bar()")
-
- with AssertPrints("Docstring: cdoc for prop"):
- ip.run_line_magic("pinfo", "b.prop")
-
- with AssertPrints("Docstring: cdoc for non_exist"):
- ip.run_line_magic("pinfo", "b.non_exist")
-
- with AssertPrints("Docstring: cdoc for prop"):
- ip.run_cell("b.prop?")
-
- with AssertPrints("Docstring: cdoc for non_exist"):
- ip.run_cell("b.non_exist?")
-
- with AssertPrints("Docstring: "):
- ip.run_cell("b.undefined?")
-
-
-def test_pinfo_magic():
- with AssertPrints("Docstring:"):
- ip._inspect("pinfo", "lsmagic", detail_level=0)
-
- with AssertPrints("Source:"):
- ip._inspect("pinfo", "lsmagic", detail_level=1)
-
-
-def test_init_colors():
- # ensure colors are not present in signature info
- info = inspector.info(HasSignature)
- init_def = info["init_definition"]
- assert "[0m" not in init_def
-
-
-def test_builtin_init():
- info = inspector.info(list)
- init_def = info['init_definition']
- assert init_def is not None
-
-
-def test_render_signature_short():
- def short_fun(a=1): pass
- sig = oinspect._render_signature(
- signature(short_fun),
- short_fun.__name__,
- )
- assert sig == "short_fun(a=1)"
-
-
-def test_render_signature_long():
- from typing import Optional
-
- def long_function(
- a_really_long_parameter: int,
- and_another_long_one: bool = False,
- let_us_make_sure_this_is_looong: Optional[str] = None,
- ) -> bool: pass
-
- sig = oinspect._render_signature(
- signature(long_function),
- long_function.__name__,
- )
- expected = """\
-long_function(
- a_really_long_parameter: int,
- and_another_long_one: bool = False,
- let_us_make_sure_this_is_looong: Optional[str] = None,
-) -> bool\
-"""
-
- assert sig == expected
diff --git a/spaces/Suniilkumaar/SwapMukham/upscaler/RealESRGAN/arch_utils.py b/spaces/Suniilkumaar/SwapMukham/upscaler/RealESRGAN/arch_utils.py
deleted file mode 100644
index 90e18463b983f645e0bd189d55ade4b627c5418e..0000000000000000000000000000000000000000
--- a/spaces/Suniilkumaar/SwapMukham/upscaler/RealESRGAN/arch_utils.py
+++ /dev/null
@@ -1,197 +0,0 @@
-import math
-import torch
-from torch import nn as nn
-from torch.nn import functional as F
-from torch.nn import init as init
-from torch.nn.modules.batchnorm import _BatchNorm
-
-@torch.no_grad()
-def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs):
- """Initialize network weights.
-
- Args:
- module_list (list[nn.Module] | nn.Module): Modules to be initialized.
- scale (float): Scale initialized weights, especially for residual
- blocks. Default: 1.
- bias_fill (float): The value to fill bias. Default: 0
- kwargs (dict): Other arguments for initialization function.
- """
- if not isinstance(module_list, list):
- module_list = [module_list]
- for module in module_list:
- for m in module.modules():
- if isinstance(m, nn.Conv2d):
- init.kaiming_normal_(m.weight, **kwargs)
- m.weight.data *= scale
- if m.bias is not None:
- m.bias.data.fill_(bias_fill)
- elif isinstance(m, nn.Linear):
- init.kaiming_normal_(m.weight, **kwargs)
- m.weight.data *= scale
- if m.bias is not None:
- m.bias.data.fill_(bias_fill)
- elif isinstance(m, _BatchNorm):
- init.constant_(m.weight, 1)
- if m.bias is not None:
- m.bias.data.fill_(bias_fill)
-
-
-def make_layer(basic_block, num_basic_block, **kwarg):
- """Make layers by stacking the same blocks.
-
- Args:
- basic_block (nn.module): nn.module class for basic block.
- num_basic_block (int): number of blocks.
-
- Returns:
- nn.Sequential: Stacked blocks in nn.Sequential.
- """
- layers = []
- for _ in range(num_basic_block):
- layers.append(basic_block(**kwarg))
- return nn.Sequential(*layers)
-
-
-class ResidualBlockNoBN(nn.Module):
- """Residual block without BN.
-
- It has a style of:
- ---Conv-ReLU-Conv-+-
- |________________|
-
- Args:
- num_feat (int): Channel number of intermediate features.
- Default: 64.
- res_scale (float): Residual scale. Default: 1.
- pytorch_init (bool): If set to True, use pytorch default init,
- otherwise, use default_init_weights. Default: False.
- """
-
- def __init__(self, num_feat=64, res_scale=1, pytorch_init=False):
- super(ResidualBlockNoBN, self).__init__()
- self.res_scale = res_scale
- self.conv1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True)
- self.conv2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True)
- self.relu = nn.ReLU(inplace=True)
-
- if not pytorch_init:
- default_init_weights([self.conv1, self.conv2], 0.1)
-
- def forward(self, x):
- identity = x
- out = self.conv2(self.relu(self.conv1(x)))
- return identity + out * self.res_scale
-
-
-class Upsample(nn.Sequential):
- """Upsample module.
-
- Args:
- scale (int): Scale factor. Supported scales: 2^n and 3.
- num_feat (int): Channel number of intermediate features.
- """
-
- def __init__(self, scale, num_feat):
- m = []
- if (scale & (scale - 1)) == 0: # scale = 2^n
- for _ in range(int(math.log(scale, 2))):
- m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(2))
- elif scale == 3:
- m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(3))
- else:
- raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
- super(Upsample, self).__init__(*m)
-
-
-def flow_warp(x, flow, interp_mode='bilinear', padding_mode='zeros', align_corners=True):
- """Warp an image or feature map with optical flow.
-
- Args:
- x (Tensor): Tensor with size (n, c, h, w).
- flow (Tensor): Tensor with size (n, h, w, 2), normal value.
- interp_mode (str): 'nearest' or 'bilinear'. Default: 'bilinear'.
- padding_mode (str): 'zeros' or 'border' or 'reflection'.
- Default: 'zeros'.
- align_corners (bool): Before pytorch 1.3, the default value is
- align_corners=True. After pytorch 1.3, the default value is
- align_corners=False. Here, we use the True as default.
-
- Returns:
- Tensor: Warped image or feature map.
- """
- assert x.size()[-2:] == flow.size()[1:3]
- _, _, h, w = x.size()
- # create mesh grid
- grid_y, grid_x = torch.meshgrid(torch.arange(0, h).type_as(x), torch.arange(0, w).type_as(x))
- grid = torch.stack((grid_x, grid_y), 2).float() # W(x), H(y), 2
- grid.requires_grad = False
-
- vgrid = grid + flow
- # scale grid to [-1,1]
- vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(w - 1, 1) - 1.0
- vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(h - 1, 1) - 1.0
- vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)
- output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode, align_corners=align_corners)
-
- # TODO, what if align_corners=False
- return output
-
-
-def resize_flow(flow, size_type, sizes, interp_mode='bilinear', align_corners=False):
- """Resize a flow according to ratio or shape.
-
- Args:
- flow (Tensor): Precomputed flow. shape [N, 2, H, W].
- size_type (str): 'ratio' or 'shape'.
- sizes (list[int | float]): the ratio for resizing or the final output
- shape.
- 1) The order of ratio should be [ratio_h, ratio_w]. For
- downsampling, the ratio should be smaller than 1.0 (i.e., ratio
- < 1.0). For upsampling, the ratio should be larger than 1.0 (i.e.,
- ratio > 1.0).
- 2) The order of output_size should be [out_h, out_w].
- interp_mode (str): The mode of interpolation for resizing.
- Default: 'bilinear'.
- align_corners (bool): Whether align corners. Default: False.
-
- Returns:
- Tensor: Resized flow.
- """
- _, _, flow_h, flow_w = flow.size()
- if size_type == 'ratio':
- output_h, output_w = int(flow_h * sizes[0]), int(flow_w * sizes[1])
- elif size_type == 'shape':
- output_h, output_w = sizes[0], sizes[1]
- else:
- raise ValueError(f'Size type should be ratio or shape, but got type {size_type}.')
-
- input_flow = flow.clone()
- ratio_h = output_h / flow_h
- ratio_w = output_w / flow_w
- input_flow[:, 0, :, :] *= ratio_w
- input_flow[:, 1, :, :] *= ratio_h
- resized_flow = F.interpolate(
- input=input_flow, size=(output_h, output_w), mode=interp_mode, align_corners=align_corners)
- return resized_flow
-
-
-# TODO: may write a cpp file
-def pixel_unshuffle(x, scale):
- """ Pixel unshuffle.
-
- Args:
- x (Tensor): Input feature with shape (b, c, hh, hw).
- scale (int): Downsample ratio.
-
- Returns:
- Tensor: the pixel unshuffled feature.
- """
- b, c, hh, hw = x.size()
- out_channel = c * (scale**2)
- assert hh % scale == 0 and hw % scale == 0
- h = hh // scale
- w = hw // scale
- x_view = x.view(b, c, h, scale, w, scale)
- return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)
\ No newline at end of file
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/parallel/distributed_deprecated.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/parallel/distributed_deprecated.py
deleted file mode 100644
index 676937a2085d4da20fa87923041a200fca6214eb..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/parallel/distributed_deprecated.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.distributed as dist
-import torch.nn as nn
-from torch._utils import (_flatten_dense_tensors, _take_tensors,
- _unflatten_dense_tensors)
-
-from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version
-from .registry import MODULE_WRAPPERS
-from .scatter_gather import scatter_kwargs
-
-
-@MODULE_WRAPPERS.register_module()
-class MMDistributedDataParallel(nn.Module):
-
- def __init__(self,
- module,
- dim=0,
- broadcast_buffers=True,
- bucket_cap_mb=25):
- super(MMDistributedDataParallel, self).__init__()
- self.module = module
- self.dim = dim
- self.broadcast_buffers = broadcast_buffers
-
- self.broadcast_bucket_size = bucket_cap_mb * 1024 * 1024
- self._sync_params()
-
- def _dist_broadcast_coalesced(self, tensors, buffer_size):
- for tensors in _take_tensors(tensors, buffer_size):
- flat_tensors = _flatten_dense_tensors(tensors)
- dist.broadcast(flat_tensors, 0)
- for tensor, synced in zip(
- tensors, _unflatten_dense_tensors(flat_tensors, tensors)):
- tensor.copy_(synced)
-
- def _sync_params(self):
- module_states = list(self.module.state_dict().values())
- if len(module_states) > 0:
- self._dist_broadcast_coalesced(module_states,
- self.broadcast_bucket_size)
- if self.broadcast_buffers:
- if (TORCH_VERSION != 'parrots'
- and digit_version(TORCH_VERSION) < digit_version('1.0')):
- buffers = [b.data for b in self.module._all_buffers()]
- else:
- buffers = [b.data for b in self.module.buffers()]
- if len(buffers) > 0:
- self._dist_broadcast_coalesced(buffers,
- self.broadcast_bucket_size)
-
- def scatter(self, inputs, kwargs, device_ids):
- return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
-
- def forward(self, *inputs, **kwargs):
- inputs, kwargs = self.scatter(inputs, kwargs,
- [torch.cuda.current_device()])
- return self.module(*inputs[0], **kwargs[0])
-
- def train_step(self, *inputs, **kwargs):
- inputs, kwargs = self.scatter(inputs, kwargs,
- [torch.cuda.current_device()])
- output = self.module.train_step(*inputs[0], **kwargs[0])
- return output
-
- def val_step(self, *inputs, **kwargs):
- inputs, kwargs = self.scatter(inputs, kwargs,
- [torch.cuda.current_device()])
- output = self.module.val_step(*inputs[0], **kwargs[0])
- return output
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/segmentors/__init__.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/segmentors/__init__.py
deleted file mode 100644
index dca2f09405330743c476e190896bee39c45498ea..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/segmentors/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from .base import BaseSegmentor
-from .cascade_encoder_decoder import CascadeEncoderDecoder
-from .encoder_decoder import EncoderDecoder
-
-__all__ = ['BaseSegmentor', 'EncoderDecoder', 'CascadeEncoderDecoder']
diff --git a/spaces/TRI-ML/risk_biased_prediction/risk_biased/utils/callbacks.py b/spaces/TRI-ML/risk_biased_prediction/risk_biased/utils/callbacks.py
deleted file mode 100644
index 3dedeb4003f37fb8794a2c80f78908e5f4e57161..0000000000000000000000000000000000000000
--- a/spaces/TRI-ML/risk_biased_prediction/risk_biased/utils/callbacks.py
+++ /dev/null
@@ -1,595 +0,0 @@
-import copy
-from dataclasses import dataclass
-
-from mmcv import Config
-import matplotlib.pyplot as plt
-import numpy as np
-from pydantic import NoneBytes
-import pytorch_lightning as pl
-import torch
-import wandb
-
-from risk_biased.scene_dataset.loaders import SceneDataLoaders
-from risk_biased.scene_dataset.scene import RandomScene, RandomSceneParams
-from risk_biased.scene_dataset.scene_plotter import ScenePlotter
-from risk_biased.utils.cost import (
- DistanceCostNumpy,
- DistanceCostParams,
- TTCCostNumpy,
- TTCCostParams,
-)
-from risk_biased.utils.risk import get_risk_level_sampler
-
-
-class SwitchTrainingModeCallback(pl.Callback):
- """
- This callback switches between CVAE traning and biasing training for the biased_latent_cvae_model
- Args:
- switch_at_epoch: The number of epoch after which to make the switch. The CVAE is not trained anymore after that point.
- """
-
- def __init__(self, switch_at_epoch: int) -> None:
- super().__init__()
- self._switch_at_epoch = switch_at_epoch
- self._train_has_started = False
-
- def on_train_start(
- self, trainer: pl.Trainer, pl_module: pl.LightningModule
- ) -> None:
- """Store the optimizer list and set the trainer to the first optimizer."""
- self._optimizers = trainer.optimizers
- trainer.optimizers = [self._optimizers[0]]
- self._train_has_started = True
-
- def on_epoch_start(
- self, trainer: pl.Trainer, pl_module: pl.LightningModule
- ) -> None:
- """
- Check if the switch should be made and if so,
- set the trainer on the second optimizer.
- """
- if trainer.current_epoch == self._switch_at_epoch and self._train_has_started:
- print("Switching to bias training.")
- pl_module.set_training_mode("bias")
- trainer.optimizers = [self._optimizers[1]]
-
-
-def get_fast_slow_scenes(params: RandomSceneParams, n_samples: int):
- """Define and return two RandomScene objects, one initialized such that slow
- pedestrians are safer and the other such that fast pedestrians are safer.
-
- Args:
- params: dataclass containing the necessary parameters for a RandomScene object
- n_samples: number of samples to draw in each scene
- """
- params = copy.deepcopy(params)
- params.batch_size = n_samples
- scene_safe_slow = RandomScene(
- params,
- is_torch=False,
- )
- percent_right = 0.8
- percent_top = 1.1
- angle = 5 * np.pi / 4
- positions = np.array([[[percent_right, percent_top]]] * n_samples)
- angles = np.array([[angle]] * n_samples)
- scene_safe_slow.set_pedestrians_states(positions, angles)
-
- scene_safe_fast = RandomScene(
- params,
- is_torch=False,
- )
- percent_right = 0.8
- percent_top = 0.6
- angle = 5 * np.pi / 4
- positions = np.array([[[percent_right, percent_top]]] * n_samples)
- angles = np.array([[angle]] * n_samples)
- scene_safe_fast.set_pedestrians_states(positions, angles)
- return scene_safe_fast, scene_safe_slow
-
-
-@dataclass
-class DrawCallbackParams:
- """
- Args:
- scene_params: dataclass parameters for the RandomScene
- dist_cost_params: dataclass parameters for the DistanceCost
- ttc_cost_params: dataclass parameters for the TTCCost
- plot_interval_epoch: number of epochs between each plot drawing
- histogram_interval_epoch: number of epochs between each histogram drawing
- num_steps: number of time steps as defined in the config
- num_steps_future: number of time steps in the future as defined in the config
- risk_distribution: dict object describing a risk distribution
- dt: time step size as defined in the config
- """
-
- scene_params: RandomSceneParams
- dist_cost_params: DistanceCostParams
- ttc_cost_params: TTCCostParams
- plot_interval_epoch: int
- histogram_interval_epoch: int
- num_steps: int
- num_steps_future: int
- risk_distribution: dict
- dt: float
-
- @staticmethod
- def from_config(cfg: Config):
- return DrawCallbackParams(
- scene_params=RandomSceneParams.from_config(cfg),
- dist_cost_params=DistanceCostParams.from_config(cfg),
- ttc_cost_params=TTCCostParams.from_config(cfg),
- plot_interval_epoch=cfg.plot_interval_epoch,
- histogram_interval_epoch=cfg.histogram_interval_epoch,
- num_steps=cfg.num_steps,
- num_steps_future=cfg.num_steps_future,
- risk_distribution=cfg.risk_distribution,
- dt=cfg.dt,
- )
-
-
-class HistogramCallback(pl.Callback):
- """Logs histograms of distances, distance cost and ttc cost for the data, the predictions at risk_level=0, the predictions at risk_level=1
- Args:
- params: dataclass defining the necessary parameters
- n_samples: Number of samples to use for the histogram plot
- """
-
- def __init__(
- self,
- params: DrawCallbackParams,
- n_samples=1000,
- ):
- super().__init__()
- self.scene_safe_fast, self.scene_safe_slow = get_fast_slow_scenes(
- params.scene_params, n_samples
- )
- self.num_steps = params.num_steps
- self.n_scenes = n_samples
- self.sample_times = params.scene_params.sample_times
- self.dist_cost_func = DistanceCostNumpy(params.dist_cost_params)
- self.ttc_cost_func = TTCCostNumpy(params.ttc_cost_params)
- self.histogram_interval_epoch = params.histogram_interval_epoch
-
- self.ego_traj = self.scene_safe_fast.get_ego_ref_trajectory(self.sample_times)
-
- self._risk_sampler = get_risk_level_sampler(params.risk_distribution)
-
- def _log_scene(self, pl_module: pl.LightningModule, scene: RandomScene, name: str):
- """
- Log in WandB three histogram for the given scene: One for the data, one for the predictions at risk_level=0 and one for the predictions at risk_level=1
- Args:
- pl_module: LightningModule object
- scene: RandomScene object
- name: name of the given scene
- """
- ped_trajs = scene.get_pedestrians_trajectories()
- device = pl_module.device
- n_agents = ped_trajs.shape[1]
-
- input_traj = ped_trajs[..., : self.num_steps, :]
-
- normalized_input, offset = SceneDataLoaders.normalize_trajectory(
- torch.from_numpy(input_traj.astype("float32")).contiguous().to(device)
- )
- mask_input = torch.ones_like(normalized_input[..., 0])
- ego_history = (
- torch.from_numpy(self.ego_traj[..., : self.num_steps, :].astype("float32"))
- .expand_as(normalized_input)
- .contiguous()
- .to(device)
- )
- ego_future = (
- torch.from_numpy(self.ego_traj[..., self.num_steps :, :].astype("float32"))
- .expand(normalized_input.shape[0], n_agents, -1, -1)
- .contiguous()
- .to(device)
- )
- map = torch.empty(ego_history.shape[0], 0, 0, 2, device=mask_input.device)
- mask_map = torch.empty(ego_history.shape[0], 0, 0, device=mask_input.device)
-
- pred_riskier = (
- pl_module.predict_step(
- (
- normalized_input,
- mask_input,
- map,
- mask_map,
- offset,
- ego_history,
- ego_future,
- ),
- 0,
- risk_level=self._risk_sampler.get_highest_risk(
- batch_size=self.n_scenes, device=device
- )
- .unsqueeze(1)
- .repeat(1, n_agents),
- )
- .cpu()
- .detach()
- .numpy()
- )
-
- pred = (
- pl_module.predict_step(
- (
- normalized_input,
- mask_input,
- map,
- mask_map,
- offset,
- ego_history,
- ego_future,
- ),
- 0,
- risk_level=None,
- )
- .cpu()
- .detach()
- .numpy()
- )
-
- ped_trajs_pred = np.concatenate((input_traj, pred), axis=-2)
- ped_trajs_pred_riskier = np.concatenate((input_traj, pred_riskier), axis=-2)
-
- travel_distances = np.sqrt(
- np.square(ped_trajs[..., -1, :] - ped_trajs[..., 0, :]).sum(-1)
- )
-
- dist_cost, dist = self.dist_cost_func(
- self.ego_traj[..., self.num_steps :, :],
- ped_trajs[..., self.num_steps :, :],
- )
-
- ttc_cost, (ttc, dist) = self.ttc_cost_func(
- self.ego_traj[..., self.num_steps :, :],
- ped_trajs[..., self.num_steps :, :],
- scene.get_ego_ref_velocity(),
- scene.get_pedestrians_velocities(),
- )
-
- travel_distances_pred = np.sqrt(
- np.square(ped_trajs_pred[..., -1, :] - ped_trajs_pred[..., 0, :]).sum(-1)
- )
- dist_cost_pred, dist_pred = self.dist_cost_func(
- self.ego_traj[..., self.num_steps :, :],
- ped_trajs_pred[..., self.num_steps :, :],
- )
- sample_times = np.array(self.sample_times)
- ped_velocities_pred = (
- ped_trajs_pred[..., 1:, :] - ped_trajs_pred[..., :-1, :]
- ) / ((sample_times[1:] - sample_times[:-1])[None, None, :, None])
- ped_velocities_pred = np.concatenate(
- (ped_velocities_pred[..., 0:1, :], ped_velocities_pred), -2
- )
- ttc_cost_pred, (ttc_pred, dist_pred) = self.ttc_cost_func(
- self.ego_traj[..., self.num_steps :, :],
- ped_trajs_pred[..., self.num_steps :, :],
- scene.get_ego_ref_velocity(),
- ped_velocities_pred[..., self.num_steps :, :],
- )
-
- travel_distances_pred_riskier = np.sqrt(
- np.square(
- ped_trajs_pred_riskier[..., -1, :] - ped_trajs_pred_riskier[..., 0, :]
- ).sum(-1)
- )
-
- dist_cost_pred_riskier, dist_pred_riskier = self.dist_cost_func(
- self.ego_traj[..., self.num_steps :, :],
- ped_trajs_pred_riskier[..., self.num_steps :, :],
- )
- sample_times = np.array(self.sample_times)
- ped_velocities_pred_riskier = (
- ped_trajs_pred_riskier[..., 1:, :] - ped_trajs_pred_riskier[..., :-1, :]
- ) / ((sample_times[1:] - sample_times[:-1])[None, None, :, None])
- ped_velocities_pred_riskier = np.concatenate(
- (ped_velocities_pred_riskier[..., 0:1, :], ped_velocities_pred_riskier), -2
- )
- ttc_cost_pred_riskier, (ttc_pred, dist_pred_riskier) = self.ttc_cost_func(
- self.ego_traj[..., self.num_steps :, :],
- ped_trajs_pred_riskier[..., self.num_steps :, :],
- scene.get_ego_ref_velocity(),
- ped_velocities_pred_riskier[..., self.num_steps :, :],
- )
- data = [
- [dist, dist_pred, dist_risk]
- for (dist, dist_pred, dist_risk) in zip(
- travel_distances.flatten(),
- travel_distances_pred.flatten(),
- travel_distances_pred_riskier.flatten(),
- )
- ]
- table_travel_distance = wandb.Table(
- data=data,
- columns=[
- "Travel distance data " + name,
- "Travel distance prediction " + name,
- "Travel distance riskier " + name,
- ],
- )
- data = [
- [cost, cost_pred, cost_risk]
- for (cost, cost_pred, cost_risk) in zip(
- dist_cost.flatten(),
- dist_cost_pred.flatten(),
- dist_cost_pred_riskier.flatten(),
- )
- ]
- table_distance_cost = wandb.Table(
- data=data,
- columns=[
- "Distance cost data " + name,
- "Distance cost prediction " + name,
- "Distance cost riskier " + name,
- ],
- )
- data = [
- [ttc, ttc_pred, ttc_risk]
- for (ttc, ttc_pred, ttc_risk) in zip(
- ttc_cost.flatten(),
- ttc_cost_pred.flatten(),
- ttc_cost_pred_riskier.flatten(),
- )
- ]
- table_ttc_cost = wandb.Table(
- data=data,
- columns=[
- "TTC cost data " + name,
- "TTC cost prediction " + name,
- "TTC cost riskier " + name,
- ],
- )
- wandb.log(
- {
- "Travel distance data "
- + name: wandb.plot_table(
- vega_spec_name="jmercat/histogram_01_bins",
- data_table=table_travel_distance,
- fields={
- "value": "Travel distance data " + name,
- "title": "Travel distance data " + name,
- },
- ),
- "Travel distance prediction "
- + name: wandb.plot_table(
- vega_spec_name="jmercat/histogram_01_bins",
- data_table=table_travel_distance,
- fields={
- "value": "Travel distance prediction " + name,
- "title": "Travel distance prediction " + name,
- },
- ),
- "Travel distance riskier "
- + name: wandb.plot_table(
- vega_spec_name="jmercat/histogram_01_bins",
- data_table=table_travel_distance,
- fields={
- "value": "Travel distance riskier " + name,
- "title": "Travel distance riskier " + name,
- },
- ),
- "Distance cost data "
- + name: wandb.plot_table(
- vega_spec_name="jmercat/histogram_0025_bins",
- data_table=table_distance_cost,
- fields={
- "value": "Distance cost data " + name,
- "title": "Distance cost data " + name,
- },
- ),
- "Distance cost prediction "
- + name: wandb.plot_table(
- vega_spec_name="jmercat/histogram_0025_bins",
- data_table=table_distance_cost,
- fields={
- "value": "Distance cost prediction " + name,
- "title": "Distance cost prediction " + name,
- },
- ),
- "Distance cost riskier "
- + name: wandb.plot_table(
- vega_spec_name="jmercat/histogram_0025_bins",
- data_table=table_distance_cost,
- fields={
- "value": "Distance cost riskier " + name,
- "title": "Distance cost riskier " + name,
- },
- ),
- "TTC cost data "
- + name: wandb.plot_table(
- vega_spec_name="jmercat/histogram_005_bins",
- data_table=table_ttc_cost,
- fields={
- "value": "TTC cost data " + name,
- "title": "TTC cost data " + name,
- },
- ),
- "TTC cost prediction "
- + name: wandb.plot_table(
- vega_spec_name="jmercat/histogram_005_bins",
- data_table=table_ttc_cost,
- fields={
- "value": "TTC cost prediction " + name,
- "title": "TTC cost prediction " + name,
- },
- ),
- "TTC cost riskier "
- + name: wandb.plot_table(
- vega_spec_name="jmercat/histogram_005_bins",
- data_table=table_ttc_cost,
- fields={
- "value": "TTC cost riskier " + name,
- "title": "TTC cost riskier " + name,
- },
- ),
- }
- )
-
- def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
- """After a validation at the end of every histogram_interval_epoch,
- log the histograms for two scenes: the safer fast scene and the safer slow scene.
- """
- if (
- trainer.current_epoch % self.histogram_interval_epoch
- == self.histogram_interval_epoch - 1
- ):
- self._log_scene(pl_module, self.scene_safe_fast, name="Safer fast")
- self._log_scene(pl_module, self.scene_safe_slow, name="Safer slow")
-
-
-class PlotTrajCallback(pl.Callback):
- """Plot trajectory samples for two scenes:
- One that is safer for the slow pedestrians
- One that is safer for the fast pedestrians
- Samples of ground truth, prediction, and biased predictions are superposed.
- Last positions are marked to visualize the clusters.
-
- Args:
- params: dataclass containing the necessary parameters for a
- n_samples: number of sample trajectories to draw
- """
-
- def __init__(
- self,
- params: DrawCallbackParams,
- n_samples: int = 1,
- ):
- super().__init__()
- self.n_samples = n_samples
- self.num_steps = params.num_steps
- self.dt = params.scene_params.dt
- self.scene_params = params.scene_params
- self.plot_interval_epoch = params.plot_interval_epoch
- self.scene_safe_fast, self.scene_safe_slow = get_fast_slow_scenes(
- params.scene_params, n_samples
- )
- self.ego_traj = self.scene_safe_fast.get_ego_ref_trajectory(
- params.scene_params.sample_times
- )
- self._risk_sampler = get_risk_level_sampler(params.risk_distribution)
-
- def _log_scene(self, epoch: int, pl_module, scene: RandomScene, name: str) -> None:
- """Add drawing of samples of prediction, biased prediction and ground truth in the scene.
-
- Args:
- epoch: current epoch calling the log
- pl_module: pytorch lightning module being trained
- scene: scene to draw
- name: name of the scene
- """
- ped_trajs = scene.get_pedestrians_trajectories()
- device = pl_module.device
- n_agents = ped_trajs.shape[1]
-
- input_traj = ped_trajs[..., : self.num_steps, :]
-
- normalized_input, offset = SceneDataLoaders.normalize_trajectory(
- torch.from_numpy(input_traj.astype("float32")).contiguous().to(device)
- )
- mask_input = torch.ones_like(normalized_input[..., 0])
- ego_history = (
- torch.from_numpy(self.ego_traj[..., : self.num_steps, :].astype("float32"))
- .expand_as(normalized_input)
- .contiguous()
- .to(device)
- )
- ego_future = (
- torch.from_numpy(self.ego_traj[..., self.num_steps :, :].astype("float32"))
- .expand(normalized_input.shape[0], n_agents, -1, -1)
- .contiguous()
- .to(device)
- )
- map = torch.empty(ego_history.shape[0], 0, 0, 2, device=mask_input.device)
- mask_map = torch.empty(ego_history.shape[0], 0, 0, device=mask_input.device)
-
- pred_riskier = (
- pl_module.predict_step(
- (
- normalized_input,
- mask_input,
- map,
- mask_map,
- offset,
- ego_history,
- ego_future,
- ),
- 0,
- risk_level=self._risk_sampler.get_highest_risk(
- batch_size=self.n_samples, device=device
- )
- .unsqueeze(1)
- .repeat(1, n_agents),
- )
- .cpu()
- .detach()
- .numpy()
- )
-
- pred = (
- pl_module.predict_step(
- (
- normalized_input,
- mask_input,
- map,
- mask_map,
- offset,
- ego_history,
- ego_future,
- ),
- 0,
- risk_level=None,
- )
- .cpu()
- .detach()
- .numpy()
- )
-
- fig, ax = plt.subplots()
- plotter = ScenePlotter(scene, ax=ax)
- fig.set_size_inches(h=scene.road_width / 3 + 1, w=scene.road_length / 3)
-
- time = self.dt * self.num_steps
- plotter.draw_scene(0, time=time)
- alpha = 0.5 / np.log(self.n_samples)
- plotter.draw_all_trajectories(
- ped_trajs[..., self.num_steps :, :],
- color="g",
- alpha=alpha,
- label="Future ground truth",
- )
- plotter.draw_all_trajectories(
- input_traj, color="b", alpha=alpha, label="Past input"
- )
- plotter.draw_all_trajectories(
- pred, color="orange", alpha=alpha, label="Prediction"
- )
- plotter.draw_all_trajectories(
- pred_riskier, color="r", alpha=alpha, label="Prediction risk-seeking"
- )
- plotter.draw_legend()
- plt.tight_layout()
- wandb.log({"Road scene " + name: wandb.Image(fig), "epoch": epoch})
- plt.close()
-
- def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
- """After a validation at the end of every plot_interval_epoch,
- log the prediction samples for two scenes: the safer fast scene and the safer slow scene.
- """
- if (
- trainer.current_epoch % self.plot_interval_epoch
- == self.plot_interval_epoch - 1
- ):
- self.scene_safe_fast, self.scene_safe_slow = get_fast_slow_scenes(
- self.scene_params, self.n_samples
- )
- self._log_scene(
- trainer.current_epoch, pl_module, self.scene_safe_slow, "Safer slow"
- )
- self._log_scene(
- trainer.current_epoch, pl_module, self.scene_safe_fast, "Safer fast"
- )
-
-
-# TODO: make the same kind of logs for the Waymo dataset
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/network/session.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/network/session.py
deleted file mode 100644
index 887dc14e796cad0257e5ccfd51ed3a21b7908821..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/network/session.py
+++ /dev/null
@@ -1,519 +0,0 @@
-"""PipSession and supporting code, containing all pip-specific
-network request configuration and behavior.
-"""
-
-import email.utils
-import io
-import ipaddress
-import json
-import logging
-import mimetypes
-import os
-import platform
-import shutil
-import subprocess
-import sys
-import urllib.parse
-import warnings
-from typing import (
- TYPE_CHECKING,
- Any,
- Dict,
- Generator,
- List,
- Mapping,
- Optional,
- Sequence,
- Tuple,
- Union,
-)
-
-from pip._vendor import requests, urllib3
-from pip._vendor.cachecontrol import CacheControlAdapter as _BaseCacheControlAdapter
-from pip._vendor.requests.adapters import DEFAULT_POOLBLOCK, BaseAdapter
-from pip._vendor.requests.adapters import HTTPAdapter as _BaseHTTPAdapter
-from pip._vendor.requests.models import PreparedRequest, Response
-from pip._vendor.requests.structures import CaseInsensitiveDict
-from pip._vendor.urllib3.connectionpool import ConnectionPool
-from pip._vendor.urllib3.exceptions import InsecureRequestWarning
-
-from pip import __version__
-from pip._internal.metadata import get_default_environment
-from pip._internal.models.link import Link
-from pip._internal.network.auth import MultiDomainBasicAuth
-from pip._internal.network.cache import SafeFileCache
-
-# Import ssl from compat so the initial import occurs in only one place.
-from pip._internal.utils.compat import has_tls
-from pip._internal.utils.glibc import libc_ver
-from pip._internal.utils.misc import build_url_from_netloc, parse_netloc
-from pip._internal.utils.urls import url_to_path
-
-if TYPE_CHECKING:
- from ssl import SSLContext
-
- from pip._vendor.urllib3.poolmanager import PoolManager
-
-
-logger = logging.getLogger(__name__)
-
-SecureOrigin = Tuple[str, str, Optional[Union[int, str]]]
-
-
-# Ignore warning raised when using --trusted-host.
-warnings.filterwarnings("ignore", category=InsecureRequestWarning)
-
-
-SECURE_ORIGINS: List[SecureOrigin] = [
- # protocol, hostname, port
- # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
- ("https", "*", "*"),
- ("*", "localhost", "*"),
- ("*", "127.0.0.0/8", "*"),
- ("*", "::1/128", "*"),
- ("file", "*", None),
- # ssh is always secure.
- ("ssh", "*", "*"),
-]
-
-
-# These are environment variables present when running under various
-# CI systems. For each variable, some CI systems that use the variable
-# are indicated. The collection was chosen so that for each of a number
-# of popular systems, at least one of the environment variables is used.
-# This list is used to provide some indication of and lower bound for
-# CI traffic to PyPI. Thus, it is okay if the list is not comprehensive.
-# For more background, see: https://github.com/pypa/pip/issues/5499
-CI_ENVIRONMENT_VARIABLES = (
- # Azure Pipelines
- "BUILD_BUILDID",
- # Jenkins
- "BUILD_ID",
- # AppVeyor, CircleCI, Codeship, Gitlab CI, Shippable, Travis CI
- "CI",
- # Explicit environment variable.
- "PIP_IS_CI",
-)
-
-
-def looks_like_ci() -> bool:
- """
- Return whether it looks like pip is running under CI.
- """
- # We don't use the method of checking for a tty (e.g. using isatty())
- # because some CI systems mimic a tty (e.g. Travis CI). Thus that
- # method doesn't provide definitive information in either direction.
- return any(name in os.environ for name in CI_ENVIRONMENT_VARIABLES)
-
-
-def user_agent() -> str:
- """
- Return a string representing the user agent.
- """
- data: Dict[str, Any] = {
- "installer": {"name": "pip", "version": __version__},
- "python": platform.python_version(),
- "implementation": {
- "name": platform.python_implementation(),
- },
- }
-
- if data["implementation"]["name"] == "CPython":
- data["implementation"]["version"] = platform.python_version()
- elif data["implementation"]["name"] == "PyPy":
- pypy_version_info = sys.pypy_version_info # type: ignore
- if pypy_version_info.releaselevel == "final":
- pypy_version_info = pypy_version_info[:3]
- data["implementation"]["version"] = ".".join(
- [str(x) for x in pypy_version_info]
- )
- elif data["implementation"]["name"] == "Jython":
- # Complete Guess
- data["implementation"]["version"] = platform.python_version()
- elif data["implementation"]["name"] == "IronPython":
- # Complete Guess
- data["implementation"]["version"] = platform.python_version()
-
- if sys.platform.startswith("linux"):
- from pip._vendor import distro
-
- linux_distribution = distro.name(), distro.version(), distro.codename()
- distro_infos: Dict[str, Any] = dict(
- filter(
- lambda x: x[1],
- zip(["name", "version", "id"], linux_distribution),
- )
- )
- libc = dict(
- filter(
- lambda x: x[1],
- zip(["lib", "version"], libc_ver()),
- )
- )
- if libc:
- distro_infos["libc"] = libc
- if distro_infos:
- data["distro"] = distro_infos
-
- if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
- data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]}
-
- if platform.system():
- data.setdefault("system", {})["name"] = platform.system()
-
- if platform.release():
- data.setdefault("system", {})["release"] = platform.release()
-
- if platform.machine():
- data["cpu"] = platform.machine()
-
- if has_tls():
- import _ssl as ssl
-
- data["openssl_version"] = ssl.OPENSSL_VERSION
-
- setuptools_dist = get_default_environment().get_distribution("setuptools")
- if setuptools_dist is not None:
- data["setuptools_version"] = str(setuptools_dist.version)
-
- if shutil.which("rustc") is not None:
- # If for any reason `rustc --version` fails, silently ignore it
- try:
- rustc_output = subprocess.check_output(
- ["rustc", "--version"], stderr=subprocess.STDOUT, timeout=0.5
- )
- except Exception:
- pass
- else:
- if rustc_output.startswith(b"rustc "):
- # The format of `rustc --version` is:
- # `b'rustc 1.52.1 (9bc8c42bb 2021-05-09)\n'`
- # We extract just the middle (1.52.1) part
- data["rustc_version"] = rustc_output.split(b" ")[1].decode()
-
- # Use None rather than False so as not to give the impression that
- # pip knows it is not being run under CI. Rather, it is a null or
- # inconclusive result. Also, we include some value rather than no
- # value to make it easier to know that the check has been run.
- data["ci"] = True if looks_like_ci() else None
-
- user_data = os.environ.get("PIP_USER_AGENT_USER_DATA")
- if user_data is not None:
- data["user_data"] = user_data
-
- return "{data[installer][name]}/{data[installer][version]} {json}".format(
- data=data,
- json=json.dumps(data, separators=(",", ":"), sort_keys=True),
- )
-
-
-class LocalFSAdapter(BaseAdapter):
- def send(
- self,
- request: PreparedRequest,
- stream: bool = False,
- timeout: Optional[Union[float, Tuple[float, float]]] = None,
- verify: Union[bool, str] = True,
- cert: Optional[Union[str, Tuple[str, str]]] = None,
- proxies: Optional[Mapping[str, str]] = None,
- ) -> Response:
- pathname = url_to_path(request.url)
-
- resp = Response()
- resp.status_code = 200
- resp.url = request.url
-
- try:
- stats = os.stat(pathname)
- except OSError as exc:
- # format the exception raised as a io.BytesIO object,
- # to return a better error message:
- resp.status_code = 404
- resp.reason = type(exc).__name__
- resp.raw = io.BytesIO(f"{resp.reason}: {exc}".encode("utf8"))
- else:
- modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
- content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
- resp.headers = CaseInsensitiveDict(
- {
- "Content-Type": content_type,
- "Content-Length": stats.st_size,
- "Last-Modified": modified,
- }
- )
-
- resp.raw = open(pathname, "rb")
- resp.close = resp.raw.close
-
- return resp
-
- def close(self) -> None:
- pass
-
-
-class _SSLContextAdapterMixin:
- """Mixin to add the ``ssl_context`` constructor argument to HTTP adapters.
-
- The additional argument is forwarded directly to the pool manager. This allows us
- to dynamically decide what SSL store to use at runtime, which is used to implement
- the optional ``truststore`` backend.
- """
-
- def __init__(
- self,
- *,
- ssl_context: Optional["SSLContext"] = None,
- **kwargs: Any,
- ) -> None:
- self._ssl_context = ssl_context
- super().__init__(**kwargs)
-
- def init_poolmanager(
- self,
- connections: int,
- maxsize: int,
- block: bool = DEFAULT_POOLBLOCK,
- **pool_kwargs: Any,
- ) -> "PoolManager":
- if self._ssl_context is not None:
- pool_kwargs.setdefault("ssl_context", self._ssl_context)
- return super().init_poolmanager( # type: ignore[misc]
- connections=connections,
- maxsize=maxsize,
- block=block,
- **pool_kwargs,
- )
-
-
-class HTTPAdapter(_SSLContextAdapterMixin, _BaseHTTPAdapter):
- pass
-
-
-class CacheControlAdapter(_SSLContextAdapterMixin, _BaseCacheControlAdapter):
- pass
-
-
-class InsecureHTTPAdapter(HTTPAdapter):
- def cert_verify(
- self,
- conn: ConnectionPool,
- url: str,
- verify: Union[bool, str],
- cert: Optional[Union[str, Tuple[str, str]]],
- ) -> None:
- super().cert_verify(conn=conn, url=url, verify=False, cert=cert)
-
-
-class InsecureCacheControlAdapter(CacheControlAdapter):
- def cert_verify(
- self,
- conn: ConnectionPool,
- url: str,
- verify: Union[bool, str],
- cert: Optional[Union[str, Tuple[str, str]]],
- ) -> None:
- super().cert_verify(conn=conn, url=url, verify=False, cert=cert)
-
-
-class PipSession(requests.Session):
- timeout: Optional[int] = None
-
- def __init__(
- self,
- *args: Any,
- retries: int = 0,
- cache: Optional[str] = None,
- trusted_hosts: Sequence[str] = (),
- index_urls: Optional[List[str]] = None,
- ssl_context: Optional["SSLContext"] = None,
- **kwargs: Any,
- ) -> None:
- """
- :param trusted_hosts: Domains not to emit warnings for when not using
- HTTPS.
- """
- super().__init__(*args, **kwargs)
-
- # Namespace the attribute with "pip_" just in case to prevent
- # possible conflicts with the base class.
- self.pip_trusted_origins: List[Tuple[str, Optional[int]]] = []
-
- # Attach our User Agent to the request
- self.headers["User-Agent"] = user_agent()
-
- # Attach our Authentication handler to the session
- self.auth = MultiDomainBasicAuth(index_urls=index_urls)
-
- # Create our urllib3.Retry instance which will allow us to customize
- # how we handle retries.
- retries = urllib3.Retry(
- # Set the total number of retries that a particular request can
- # have.
- total=retries,
- # A 503 error from PyPI typically means that the Fastly -> Origin
- # connection got interrupted in some way. A 503 error in general
- # is typically considered a transient error so we'll go ahead and
- # retry it.
- # A 500 may indicate transient error in Amazon S3
- # A 520 or 527 - may indicate transient error in CloudFlare
- status_forcelist=[500, 503, 520, 527],
- # Add a small amount of back off between failed requests in
- # order to prevent hammering the service.
- backoff_factor=0.25,
- ) # type: ignore
-
- # Our Insecure HTTPAdapter disables HTTPS validation. It does not
- # support caching so we'll use it for all http:// URLs.
- # If caching is disabled, we will also use it for
- # https:// hosts that we've marked as ignoring
- # TLS errors for (trusted-hosts).
- insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
-
- # We want to _only_ cache responses on securely fetched origins or when
- # the host is specified as trusted. We do this because
- # we can't validate the response of an insecurely/untrusted fetched
- # origin, and we don't want someone to be able to poison the cache and
- # require manual eviction from the cache to fix it.
- if cache:
- secure_adapter = CacheControlAdapter(
- cache=SafeFileCache(cache),
- max_retries=retries,
- ssl_context=ssl_context,
- )
- self._trusted_host_adapter = InsecureCacheControlAdapter(
- cache=SafeFileCache(cache),
- max_retries=retries,
- )
- else:
- secure_adapter = HTTPAdapter(max_retries=retries, ssl_context=ssl_context)
- self._trusted_host_adapter = insecure_adapter
-
- self.mount("https://", secure_adapter)
- self.mount("http://", insecure_adapter)
-
- # Enable file:// urls
- self.mount("file://", LocalFSAdapter())
-
- for host in trusted_hosts:
- self.add_trusted_host(host, suppress_logging=True)
-
- def update_index_urls(self, new_index_urls: List[str]) -> None:
- """
- :param new_index_urls: New index urls to update the authentication
- handler with.
- """
- self.auth.index_urls = new_index_urls
-
- def add_trusted_host(
- self, host: str, source: Optional[str] = None, suppress_logging: bool = False
- ) -> None:
- """
- :param host: It is okay to provide a host that has previously been
- added.
- :param source: An optional source string, for logging where the host
- string came from.
- """
- if not suppress_logging:
- msg = f"adding trusted host: {host!r}"
- if source is not None:
- msg += f" (from {source})"
- logger.info(msg)
-
- parsed_host, parsed_port = parse_netloc(host)
- if parsed_host is None:
- raise ValueError(f"Trusted host URL must include a host part: {host!r}")
- if (parsed_host, parsed_port) not in self.pip_trusted_origins:
- self.pip_trusted_origins.append((parsed_host, parsed_port))
-
- self.mount(
- build_url_from_netloc(host, scheme="http") + "/", self._trusted_host_adapter
- )
- self.mount(build_url_from_netloc(host) + "/", self._trusted_host_adapter)
- if not parsed_port:
- self.mount(
- build_url_from_netloc(host, scheme="http") + ":",
- self._trusted_host_adapter,
- )
- # Mount wildcard ports for the same host.
- self.mount(build_url_from_netloc(host) + ":", self._trusted_host_adapter)
-
- def iter_secure_origins(self) -> Generator[SecureOrigin, None, None]:
- yield from SECURE_ORIGINS
- for host, port in self.pip_trusted_origins:
- yield ("*", host, "*" if port is None else port)
-
- def is_secure_origin(self, location: Link) -> bool:
- # Determine if this url used a secure transport mechanism
- parsed = urllib.parse.urlparse(str(location))
- origin_protocol, origin_host, origin_port = (
- parsed.scheme,
- parsed.hostname,
- parsed.port,
- )
-
- # The protocol to use to see if the protocol matches.
- # Don't count the repository type as part of the protocol: in
- # cases such as "git+ssh", only use "ssh". (I.e., Only verify against
- # the last scheme.)
- origin_protocol = origin_protocol.rsplit("+", 1)[-1]
-
- # Determine if our origin is a secure origin by looking through our
- # hardcoded list of secure origins, as well as any additional ones
- # configured on this PackageFinder instance.
- for secure_origin in self.iter_secure_origins():
- secure_protocol, secure_host, secure_port = secure_origin
- if origin_protocol != secure_protocol and secure_protocol != "*":
- continue
-
- try:
- addr = ipaddress.ip_address(origin_host or "")
- network = ipaddress.ip_network(secure_host)
- except ValueError:
- # We don't have both a valid address or a valid network, so
- # we'll check this origin against hostnames.
- if (
- origin_host
- and origin_host.lower() != secure_host.lower()
- and secure_host != "*"
- ):
- continue
- else:
- # We have a valid address and network, so see if the address
- # is contained within the network.
- if addr not in network:
- continue
-
- # Check to see if the port matches.
- if (
- origin_port != secure_port
- and secure_port != "*"
- and secure_port is not None
- ):
- continue
-
- # If we've gotten here, then this origin matches the current
- # secure origin and we should return True
- return True
-
- # If we've gotten to this point, then the origin isn't secure and we
- # will not accept it as a valid location to search. We will however
- # log a warning that we are ignoring it.
- logger.warning(
- "The repository located at %s is not a trusted or secure host and "
- "is being ignored. If this repository is available via HTTPS we "
- "recommend you use HTTPS instead, otherwise you may silence "
- "this warning and allow it anyway with '--trusted-host %s'.",
- origin_host,
- origin_host,
- )
-
- return False
-
- def request(self, method: str, url: str, *args: Any, **kwargs: Any) -> Response:
- # Allow setting a default timeout on a session
- kwargs.setdefault("timeout", self.timeout)
- # Allow setting a default proxies on a session
- kwargs.setdefault("proxies", self.proxies)
-
- # Dispatch the actual request
- return super().request(method, url, *args, **kwargs)
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distlib/util.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distlib/util.py
deleted file mode 100644
index dd01849d997e5ae9dc9809295e29ceb871b14216..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distlib/util.py
+++ /dev/null
@@ -1,1932 +0,0 @@
-#
-# Copyright (C) 2012-2021 The Python Software Foundation.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-import codecs
-from collections import deque
-import contextlib
-import csv
-from glob import iglob as std_iglob
-import io
-import json
-import logging
-import os
-import py_compile
-import re
-import socket
-try:
- import ssl
-except ImportError: # pragma: no cover
- ssl = None
-import subprocess
-import sys
-import tarfile
-import tempfile
-import textwrap
-
-try:
- import threading
-except ImportError: # pragma: no cover
- import dummy_threading as threading
-import time
-
-from . import DistlibException
-from .compat import (string_types, text_type, shutil, raw_input, StringIO,
- cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
- splittype, HTTPHandler, BaseConfigurator, valid_ident,
- Container, configparser, URLError, ZipFile, fsdecode,
- unquote, urlparse)
-
-logger = logging.getLogger(__name__)
-
-#
-# Requirement parsing code as per PEP 508
-#
-
-IDENTIFIER = re.compile(r'^([\w\.-]+)\s*')
-VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*')
-COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*')
-MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*')
-OR = re.compile(r'^or\b\s*')
-AND = re.compile(r'^and\b\s*')
-NON_SPACE = re.compile(r'(\S+)\s*')
-STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)')
-
-
-def parse_marker(marker_string):
- """
- Parse a marker string and return a dictionary containing a marker expression.
-
- The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in
- the expression grammar, or strings. A string contained in quotes is to be
- interpreted as a literal string, and a string not contained in quotes is a
- variable (such as os_name).
- """
- def marker_var(remaining):
- # either identifier, or literal string
- m = IDENTIFIER.match(remaining)
- if m:
- result = m.groups()[0]
- remaining = remaining[m.end():]
- elif not remaining:
- raise SyntaxError('unexpected end of input')
- else:
- q = remaining[0]
- if q not in '\'"':
- raise SyntaxError('invalid expression: %s' % remaining)
- oq = '\'"'.replace(q, '')
- remaining = remaining[1:]
- parts = [q]
- while remaining:
- # either a string chunk, or oq, or q to terminate
- if remaining[0] == q:
- break
- elif remaining[0] == oq:
- parts.append(oq)
- remaining = remaining[1:]
- else:
- m = STRING_CHUNK.match(remaining)
- if not m:
- raise SyntaxError('error in string literal: %s' % remaining)
- parts.append(m.groups()[0])
- remaining = remaining[m.end():]
- else:
- s = ''.join(parts)
- raise SyntaxError('unterminated string: %s' % s)
- parts.append(q)
- result = ''.join(parts)
- remaining = remaining[1:].lstrip() # skip past closing quote
- return result, remaining
-
- def marker_expr(remaining):
- if remaining and remaining[0] == '(':
- result, remaining = marker(remaining[1:].lstrip())
- if remaining[0] != ')':
- raise SyntaxError('unterminated parenthesis: %s' % remaining)
- remaining = remaining[1:].lstrip()
- else:
- lhs, remaining = marker_var(remaining)
- while remaining:
- m = MARKER_OP.match(remaining)
- if not m:
- break
- op = m.groups()[0]
- remaining = remaining[m.end():]
- rhs, remaining = marker_var(remaining)
- lhs = {'op': op, 'lhs': lhs, 'rhs': rhs}
- result = lhs
- return result, remaining
-
- def marker_and(remaining):
- lhs, remaining = marker_expr(remaining)
- while remaining:
- m = AND.match(remaining)
- if not m:
- break
- remaining = remaining[m.end():]
- rhs, remaining = marker_expr(remaining)
- lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs}
- return lhs, remaining
-
- def marker(remaining):
- lhs, remaining = marker_and(remaining)
- while remaining:
- m = OR.match(remaining)
- if not m:
- break
- remaining = remaining[m.end():]
- rhs, remaining = marker_and(remaining)
- lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs}
- return lhs, remaining
-
- return marker(marker_string)
-
-
-def parse_requirement(req):
- """
- Parse a requirement passed in as a string. Return a Container
- whose attributes contain the various parts of the requirement.
- """
- remaining = req.strip()
- if not remaining or remaining.startswith('#'):
- return None
- m = IDENTIFIER.match(remaining)
- if not m:
- raise SyntaxError('name expected: %s' % remaining)
- distname = m.groups()[0]
- remaining = remaining[m.end():]
- extras = mark_expr = versions = uri = None
- if remaining and remaining[0] == '[':
- i = remaining.find(']', 1)
- if i < 0:
- raise SyntaxError('unterminated extra: %s' % remaining)
- s = remaining[1:i]
- remaining = remaining[i + 1:].lstrip()
- extras = []
- while s:
- m = IDENTIFIER.match(s)
- if not m:
- raise SyntaxError('malformed extra: %s' % s)
- extras.append(m.groups()[0])
- s = s[m.end():]
- if not s:
- break
- if s[0] != ',':
- raise SyntaxError('comma expected in extras: %s' % s)
- s = s[1:].lstrip()
- if not extras:
- extras = None
- if remaining:
- if remaining[0] == '@':
- # it's a URI
- remaining = remaining[1:].lstrip()
- m = NON_SPACE.match(remaining)
- if not m:
- raise SyntaxError('invalid URI: %s' % remaining)
- uri = m.groups()[0]
- t = urlparse(uri)
- # there are issues with Python and URL parsing, so this test
- # is a bit crude. See bpo-20271, bpo-23505. Python doesn't
- # always parse invalid URLs correctly - it should raise
- # exceptions for malformed URLs
- if not (t.scheme and t.netloc):
- raise SyntaxError('Invalid URL: %s' % uri)
- remaining = remaining[m.end():].lstrip()
- else:
-
- def get_versions(ver_remaining):
- """
- Return a list of operator, version tuples if any are
- specified, else None.
- """
- m = COMPARE_OP.match(ver_remaining)
- versions = None
- if m:
- versions = []
- while True:
- op = m.groups()[0]
- ver_remaining = ver_remaining[m.end():]
- m = VERSION_IDENTIFIER.match(ver_remaining)
- if not m:
- raise SyntaxError('invalid version: %s' % ver_remaining)
- v = m.groups()[0]
- versions.append((op, v))
- ver_remaining = ver_remaining[m.end():]
- if not ver_remaining or ver_remaining[0] != ',':
- break
- ver_remaining = ver_remaining[1:].lstrip()
- # Some packages have a trailing comma which would break things
- # See issue #148
- if not ver_remaining:
- break
- m = COMPARE_OP.match(ver_remaining)
- if not m:
- raise SyntaxError('invalid constraint: %s' % ver_remaining)
- if not versions:
- versions = None
- return versions, ver_remaining
-
- if remaining[0] != '(':
- versions, remaining = get_versions(remaining)
- else:
- i = remaining.find(')', 1)
- if i < 0:
- raise SyntaxError('unterminated parenthesis: %s' % remaining)
- s = remaining[1:i]
- remaining = remaining[i + 1:].lstrip()
- # As a special diversion from PEP 508, allow a version number
- # a.b.c in parentheses as a synonym for ~= a.b.c (because this
- # is allowed in earlier PEPs)
- if COMPARE_OP.match(s):
- versions, _ = get_versions(s)
- else:
- m = VERSION_IDENTIFIER.match(s)
- if not m:
- raise SyntaxError('invalid constraint: %s' % s)
- v = m.groups()[0]
- s = s[m.end():].lstrip()
- if s:
- raise SyntaxError('invalid constraint: %s' % s)
- versions = [('~=', v)]
-
- if remaining:
- if remaining[0] != ';':
- raise SyntaxError('invalid requirement: %s' % remaining)
- remaining = remaining[1:].lstrip()
-
- mark_expr, remaining = parse_marker(remaining)
-
- if remaining and remaining[0] != '#':
- raise SyntaxError('unexpected trailing data: %s' % remaining)
-
- if not versions:
- rs = distname
- else:
- rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions]))
- return Container(name=distname, extras=extras, constraints=versions,
- marker=mark_expr, url=uri, requirement=rs)
-
-
-def get_resources_dests(resources_root, rules):
- """Find destinations for resources files"""
-
- def get_rel_path(root, path):
- # normalizes and returns a lstripped-/-separated path
- root = root.replace(os.path.sep, '/')
- path = path.replace(os.path.sep, '/')
- assert path.startswith(root)
- return path[len(root):].lstrip('/')
-
- destinations = {}
- for base, suffix, dest in rules:
- prefix = os.path.join(resources_root, base)
- for abs_base in iglob(prefix):
- abs_glob = os.path.join(abs_base, suffix)
- for abs_path in iglob(abs_glob):
- resource_file = get_rel_path(resources_root, abs_path)
- if dest is None: # remove the entry if it was here
- destinations.pop(resource_file, None)
- else:
- rel_path = get_rel_path(abs_base, abs_path)
- rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
- destinations[resource_file] = rel_dest + '/' + rel_path
- return destinations
-
-
-def in_venv():
- if hasattr(sys, 'real_prefix'):
- # virtualenv venvs
- result = True
- else:
- # PEP 405 venvs
- result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
- return result
-
-
-def get_executable():
-# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
-# changes to the stub launcher mean that sys.executable always points
-# to the stub on OS X
-# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
-# in os.environ):
-# result = os.environ['__PYVENV_LAUNCHER__']
-# else:
-# result = sys.executable
-# return result
- # Avoid normcasing: see issue #143
- # result = os.path.normcase(sys.executable)
- result = sys.executable
- if not isinstance(result, text_type):
- result = fsdecode(result)
- return result
-
-
-def proceed(prompt, allowed_chars, error_prompt=None, default=None):
- p = prompt
- while True:
- s = raw_input(p)
- p = prompt
- if not s and default:
- s = default
- if s:
- c = s[0].lower()
- if c in allowed_chars:
- break
- if error_prompt:
- p = '%c: %s\n%s' % (c, error_prompt, prompt)
- return c
-
-
-def extract_by_key(d, keys):
- if isinstance(keys, string_types):
- keys = keys.split()
- result = {}
- for key in keys:
- if key in d:
- result[key] = d[key]
- return result
-
-def read_exports(stream):
- if sys.version_info[0] >= 3:
- # needs to be a text stream
- stream = codecs.getreader('utf-8')(stream)
- # Try to load as JSON, falling back on legacy format
- data = stream.read()
- stream = StringIO(data)
- try:
- jdata = json.load(stream)
- result = jdata['extensions']['python.exports']['exports']
- for group, entries in result.items():
- for k, v in entries.items():
- s = '%s = %s' % (k, v)
- entry = get_export_entry(s)
- assert entry is not None
- entries[k] = entry
- return result
- except Exception:
- stream.seek(0, 0)
-
- def read_stream(cp, stream):
- if hasattr(cp, 'read_file'):
- cp.read_file(stream)
- else:
- cp.readfp(stream)
-
- cp = configparser.ConfigParser()
- try:
- read_stream(cp, stream)
- except configparser.MissingSectionHeaderError:
- stream.close()
- data = textwrap.dedent(data)
- stream = StringIO(data)
- read_stream(cp, stream)
-
- result = {}
- for key in cp.sections():
- result[key] = entries = {}
- for name, value in cp.items(key):
- s = '%s = %s' % (name, value)
- entry = get_export_entry(s)
- assert entry is not None
- #entry.dist = self
- entries[name] = entry
- return result
-
-
-def write_exports(exports, stream):
- if sys.version_info[0] >= 3:
- # needs to be a text stream
- stream = codecs.getwriter('utf-8')(stream)
- cp = configparser.ConfigParser()
- for k, v in exports.items():
- # TODO check k, v for valid values
- cp.add_section(k)
- for entry in v.values():
- if entry.suffix is None:
- s = entry.prefix
- else:
- s = '%s:%s' % (entry.prefix, entry.suffix)
- if entry.flags:
- s = '%s [%s]' % (s, ', '.join(entry.flags))
- cp.set(k, entry.name, s)
- cp.write(stream)
-
-
-@contextlib.contextmanager
-def tempdir():
- td = tempfile.mkdtemp()
- try:
- yield td
- finally:
- shutil.rmtree(td)
-
-@contextlib.contextmanager
-def chdir(d):
- cwd = os.getcwd()
- try:
- os.chdir(d)
- yield
- finally:
- os.chdir(cwd)
-
-
-@contextlib.contextmanager
-def socket_timeout(seconds=15):
- cto = socket.getdefaulttimeout()
- try:
- socket.setdefaulttimeout(seconds)
- yield
- finally:
- socket.setdefaulttimeout(cto)
-
-
-class cached_property(object):
- def __init__(self, func):
- self.func = func
- #for attr in ('__name__', '__module__', '__doc__'):
- # setattr(self, attr, getattr(func, attr, None))
-
- def __get__(self, obj, cls=None):
- if obj is None:
- return self
- value = self.func(obj)
- object.__setattr__(obj, self.func.__name__, value)
- #obj.__dict__[self.func.__name__] = value = self.func(obj)
- return value
-
-def convert_path(pathname):
- """Return 'pathname' as a name that will work on the native filesystem.
-
- The path is split on '/' and put back together again using the current
- directory separator. Needed because filenames in the setup script are
- always supplied in Unix style, and have to be converted to the local
- convention before we can actually use them in the filesystem. Raises
- ValueError on non-Unix-ish systems if 'pathname' either starts or
- ends with a slash.
- """
- if os.sep == '/':
- return pathname
- if not pathname:
- return pathname
- if pathname[0] == '/':
- raise ValueError("path '%s' cannot be absolute" % pathname)
- if pathname[-1] == '/':
- raise ValueError("path '%s' cannot end with '/'" % pathname)
-
- paths = pathname.split('/')
- while os.curdir in paths:
- paths.remove(os.curdir)
- if not paths:
- return os.curdir
- return os.path.join(*paths)
-
-
-class FileOperator(object):
- def __init__(self, dry_run=False):
- self.dry_run = dry_run
- self.ensured = set()
- self._init_record()
-
- def _init_record(self):
- self.record = False
- self.files_written = set()
- self.dirs_created = set()
-
- def record_as_written(self, path):
- if self.record:
- self.files_written.add(path)
-
- def newer(self, source, target):
- """Tell if the target is newer than the source.
-
- Returns true if 'source' exists and is more recently modified than
- 'target', or if 'source' exists and 'target' doesn't.
-
- Returns false if both exist and 'target' is the same age or younger
- than 'source'. Raise PackagingFileError if 'source' does not exist.
-
- Note that this test is not very accurate: files created in the same
- second will have the same "age".
- """
- if not os.path.exists(source):
- raise DistlibException("file '%r' does not exist" %
- os.path.abspath(source))
- if not os.path.exists(target):
- return True
-
- return os.stat(source).st_mtime > os.stat(target).st_mtime
-
- def copy_file(self, infile, outfile, check=True):
- """Copy a file respecting dry-run and force flags.
- """
- self.ensure_dir(os.path.dirname(outfile))
- logger.info('Copying %s to %s', infile, outfile)
- if not self.dry_run:
- msg = None
- if check:
- if os.path.islink(outfile):
- msg = '%s is a symlink' % outfile
- elif os.path.exists(outfile) and not os.path.isfile(outfile):
- msg = '%s is a non-regular file' % outfile
- if msg:
- raise ValueError(msg + ' which would be overwritten')
- shutil.copyfile(infile, outfile)
- self.record_as_written(outfile)
-
- def copy_stream(self, instream, outfile, encoding=None):
- assert not os.path.isdir(outfile)
- self.ensure_dir(os.path.dirname(outfile))
- logger.info('Copying stream %s to %s', instream, outfile)
- if not self.dry_run:
- if encoding is None:
- outstream = open(outfile, 'wb')
- else:
- outstream = codecs.open(outfile, 'w', encoding=encoding)
- try:
- shutil.copyfileobj(instream, outstream)
- finally:
- outstream.close()
- self.record_as_written(outfile)
-
- def write_binary_file(self, path, data):
- self.ensure_dir(os.path.dirname(path))
- if not self.dry_run:
- if os.path.exists(path):
- os.remove(path)
- with open(path, 'wb') as f:
- f.write(data)
- self.record_as_written(path)
-
- def write_text_file(self, path, data, encoding):
- self.write_binary_file(path, data.encode(encoding))
-
- def set_mode(self, bits, mask, files):
- if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
- # Set the executable bits (owner, group, and world) on
- # all the files specified.
- for f in files:
- if self.dry_run:
- logger.info("changing mode of %s", f)
- else:
- mode = (os.stat(f).st_mode | bits) & mask
- logger.info("changing mode of %s to %o", f, mode)
- os.chmod(f, mode)
-
- set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
-
- def ensure_dir(self, path):
- path = os.path.abspath(path)
- if path not in self.ensured and not os.path.exists(path):
- self.ensured.add(path)
- d, f = os.path.split(path)
- self.ensure_dir(d)
- logger.info('Creating %s' % path)
- if not self.dry_run:
- os.mkdir(path)
- if self.record:
- self.dirs_created.add(path)
-
- def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False):
- dpath = cache_from_source(path, not optimize)
- logger.info('Byte-compiling %s to %s', path, dpath)
- if not self.dry_run:
- if force or self.newer(path, dpath):
- if not prefix:
- diagpath = None
- else:
- assert path.startswith(prefix)
- diagpath = path[len(prefix):]
- compile_kwargs = {}
- if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'):
- compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH
- py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error
- self.record_as_written(dpath)
- return dpath
-
- def ensure_removed(self, path):
- if os.path.exists(path):
- if os.path.isdir(path) and not os.path.islink(path):
- logger.debug('Removing directory tree at %s', path)
- if not self.dry_run:
- shutil.rmtree(path)
- if self.record:
- if path in self.dirs_created:
- self.dirs_created.remove(path)
- else:
- if os.path.islink(path):
- s = 'link'
- else:
- s = 'file'
- logger.debug('Removing %s %s', s, path)
- if not self.dry_run:
- os.remove(path)
- if self.record:
- if path in self.files_written:
- self.files_written.remove(path)
-
- def is_writable(self, path):
- result = False
- while not result:
- if os.path.exists(path):
- result = os.access(path, os.W_OK)
- break
- parent = os.path.dirname(path)
- if parent == path:
- break
- path = parent
- return result
-
- def commit(self):
- """
- Commit recorded changes, turn off recording, return
- changes.
- """
- assert self.record
- result = self.files_written, self.dirs_created
- self._init_record()
- return result
-
- def rollback(self):
- if not self.dry_run:
- for f in list(self.files_written):
- if os.path.exists(f):
- os.remove(f)
- # dirs should all be empty now, except perhaps for
- # __pycache__ subdirs
- # reverse so that subdirs appear before their parents
- dirs = sorted(self.dirs_created, reverse=True)
- for d in dirs:
- flist = os.listdir(d)
- if flist:
- assert flist == ['__pycache__']
- sd = os.path.join(d, flist[0])
- os.rmdir(sd)
- os.rmdir(d) # should fail if non-empty
- self._init_record()
-
-def resolve(module_name, dotted_path):
- if module_name in sys.modules:
- mod = sys.modules[module_name]
- else:
- mod = __import__(module_name)
- if dotted_path is None:
- result = mod
- else:
- parts = dotted_path.split('.')
- result = getattr(mod, parts.pop(0))
- for p in parts:
- result = getattr(result, p)
- return result
-
-
-class ExportEntry(object):
- def __init__(self, name, prefix, suffix, flags):
- self.name = name
- self.prefix = prefix
- self.suffix = suffix
- self.flags = flags
-
- @cached_property
- def value(self):
- return resolve(self.prefix, self.suffix)
-
- def __repr__(self): # pragma: no cover
- return '' % (self.name, self.prefix,
- self.suffix, self.flags)
-
- def __eq__(self, other):
- if not isinstance(other, ExportEntry):
- result = False
- else:
- result = (self.name == other.name and
- self.prefix == other.prefix and
- self.suffix == other.suffix and
- self.flags == other.flags)
- return result
-
- __hash__ = object.__hash__
-
-
-ENTRY_RE = re.compile(r'''(?P(\w|[-.+])+)
- \s*=\s*(?P(\w+)([:\.]\w+)*)
- \s*(\[\s*(?P[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
- ''', re.VERBOSE)
-
-def get_export_entry(specification):
- m = ENTRY_RE.search(specification)
- if not m:
- result = None
- if '[' in specification or ']' in specification:
- raise DistlibException("Invalid specification "
- "'%s'" % specification)
- else:
- d = m.groupdict()
- name = d['name']
- path = d['callable']
- colons = path.count(':')
- if colons == 0:
- prefix, suffix = path, None
- else:
- if colons != 1:
- raise DistlibException("Invalid specification "
- "'%s'" % specification)
- prefix, suffix = path.split(':')
- flags = d['flags']
- if flags is None:
- if '[' in specification or ']' in specification:
- raise DistlibException("Invalid specification "
- "'%s'" % specification)
- flags = []
- else:
- flags = [f.strip() for f in flags.split(',')]
- result = ExportEntry(name, prefix, suffix, flags)
- return result
-
-
-def get_cache_base(suffix=None):
- """
- Return the default base location for distlib caches. If the directory does
- not exist, it is created. Use the suffix provided for the base directory,
- and default to '.distlib' if it isn't provided.
-
- On Windows, if LOCALAPPDATA is defined in the environment, then it is
- assumed to be a directory, and will be the parent directory of the result.
- On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
- directory - using os.expanduser('~') - will be the parent directory of
- the result.
-
- The result is just the directory '.distlib' in the parent directory as
- determined above, or with the name specified with ``suffix``.
- """
- if suffix is None:
- suffix = '.distlib'
- if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
- result = os.path.expandvars('$localappdata')
- else:
- # Assume posix, or old Windows
- result = os.path.expanduser('~')
- # we use 'isdir' instead of 'exists', because we want to
- # fail if there's a file with that name
- if os.path.isdir(result):
- usable = os.access(result, os.W_OK)
- if not usable:
- logger.warning('Directory exists but is not writable: %s', result)
- else:
- try:
- os.makedirs(result)
- usable = True
- except OSError:
- logger.warning('Unable to create %s', result, exc_info=True)
- usable = False
- if not usable:
- result = tempfile.mkdtemp()
- logger.warning('Default location unusable, using %s', result)
- return os.path.join(result, suffix)
-
-
-def path_to_cache_dir(path):
- """
- Convert an absolute path to a directory name for use in a cache.
-
- The algorithm used is:
-
- #. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
- #. Any occurrence of ``os.sep`` is replaced with ``'--'``.
- #. ``'.cache'`` is appended.
- """
- d, p = os.path.splitdrive(os.path.abspath(path))
- if d:
- d = d.replace(':', '---')
- p = p.replace(os.sep, '--')
- return d + p + '.cache'
-
-
-def ensure_slash(s):
- if not s.endswith('/'):
- return s + '/'
- return s
-
-
-def parse_credentials(netloc):
- username = password = None
- if '@' in netloc:
- prefix, netloc = netloc.rsplit('@', 1)
- if ':' not in prefix:
- username = prefix
- else:
- username, password = prefix.split(':', 1)
- if username:
- username = unquote(username)
- if password:
- password = unquote(password)
- return username, password, netloc
-
-
-def get_process_umask():
- result = os.umask(0o22)
- os.umask(result)
- return result
-
-def is_string_sequence(seq):
- result = True
- i = None
- for i, s in enumerate(seq):
- if not isinstance(s, string_types):
- result = False
- break
- assert i is not None
- return result
-
-PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
- '([a-z0-9_.+-]+)', re.I)
-PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
-
-
-def split_filename(filename, project_name=None):
- """
- Extract name, version, python version from a filename (no extension)
-
- Return name, version, pyver or None
- """
- result = None
- pyver = None
- filename = unquote(filename).replace(' ', '-')
- m = PYTHON_VERSION.search(filename)
- if m:
- pyver = m.group(1)
- filename = filename[:m.start()]
- if project_name and len(filename) > len(project_name) + 1:
- m = re.match(re.escape(project_name) + r'\b', filename)
- if m:
- n = m.end()
- result = filename[:n], filename[n + 1:], pyver
- if result is None:
- m = PROJECT_NAME_AND_VERSION.match(filename)
- if m:
- result = m.group(1), m.group(3), pyver
- return result
-
-# Allow spaces in name because of legacy dists like "Twisted Core"
-NAME_VERSION_RE = re.compile(r'(?P[\w .-]+)\s*'
- r'\(\s*(?P[^\s)]+)\)$')
-
-def parse_name_and_version(p):
- """
- A utility method used to get name and version from a string.
-
- From e.g. a Provides-Dist value.
-
- :param p: A value in a form 'foo (1.0)'
- :return: The name and version as a tuple.
- """
- m = NAME_VERSION_RE.match(p)
- if not m:
- raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
- d = m.groupdict()
- return d['name'].strip().lower(), d['ver']
-
-def get_extras(requested, available):
- result = set()
- requested = set(requested or [])
- available = set(available or [])
- if '*' in requested:
- requested.remove('*')
- result |= available
- for r in requested:
- if r == '-':
- result.add(r)
- elif r.startswith('-'):
- unwanted = r[1:]
- if unwanted not in available:
- logger.warning('undeclared extra: %s' % unwanted)
- if unwanted in result:
- result.remove(unwanted)
- else:
- if r not in available:
- logger.warning('undeclared extra: %s' % r)
- result.add(r)
- return result
-#
-# Extended metadata functionality
-#
-
-def _get_external_data(url):
- result = {}
- try:
- # urlopen might fail if it runs into redirections,
- # because of Python issue #13696. Fixed in locators
- # using a custom redirect handler.
- resp = urlopen(url)
- headers = resp.info()
- ct = headers.get('Content-Type')
- if not ct.startswith('application/json'):
- logger.debug('Unexpected response for JSON request: %s', ct)
- else:
- reader = codecs.getreader('utf-8')(resp)
- #data = reader.read().decode('utf-8')
- #result = json.loads(data)
- result = json.load(reader)
- except Exception as e:
- logger.exception('Failed to get external data for %s: %s', url, e)
- return result
-
-_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
-
-def get_project_data(name):
- url = '%s/%s/project.json' % (name[0].upper(), name)
- url = urljoin(_external_data_base_url, url)
- result = _get_external_data(url)
- return result
-
-def get_package_data(name, version):
- url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
- url = urljoin(_external_data_base_url, url)
- return _get_external_data(url)
-
-
-class Cache(object):
- """
- A class implementing a cache for resources that need to live in the file system
- e.g. shared libraries. This class was moved from resources to here because it
- could be used by other modules, e.g. the wheel module.
- """
-
- def __init__(self, base):
- """
- Initialise an instance.
-
- :param base: The base directory where the cache should be located.
- """
- # we use 'isdir' instead of 'exists', because we want to
- # fail if there's a file with that name
- if not os.path.isdir(base): # pragma: no cover
- os.makedirs(base)
- if (os.stat(base).st_mode & 0o77) != 0:
- logger.warning('Directory \'%s\' is not private', base)
- self.base = os.path.abspath(os.path.normpath(base))
-
- def prefix_to_dir(self, prefix):
- """
- Converts a resource prefix to a directory name in the cache.
- """
- return path_to_cache_dir(prefix)
-
- def clear(self):
- """
- Clear the cache.
- """
- not_removed = []
- for fn in os.listdir(self.base):
- fn = os.path.join(self.base, fn)
- try:
- if os.path.islink(fn) or os.path.isfile(fn):
- os.remove(fn)
- elif os.path.isdir(fn):
- shutil.rmtree(fn)
- except Exception:
- not_removed.append(fn)
- return not_removed
-
-
-class EventMixin(object):
- """
- A very simple publish/subscribe system.
- """
- def __init__(self):
- self._subscribers = {}
-
- def add(self, event, subscriber, append=True):
- """
- Add a subscriber for an event.
-
- :param event: The name of an event.
- :param subscriber: The subscriber to be added (and called when the
- event is published).
- :param append: Whether to append or prepend the subscriber to an
- existing subscriber list for the event.
- """
- subs = self._subscribers
- if event not in subs:
- subs[event] = deque([subscriber])
- else:
- sq = subs[event]
- if append:
- sq.append(subscriber)
- else:
- sq.appendleft(subscriber)
-
- def remove(self, event, subscriber):
- """
- Remove a subscriber for an event.
-
- :param event: The name of an event.
- :param subscriber: The subscriber to be removed.
- """
- subs = self._subscribers
- if event not in subs:
- raise ValueError('No subscribers: %r' % event)
- subs[event].remove(subscriber)
-
- def get_subscribers(self, event):
- """
- Return an iterator for the subscribers for an event.
- :param event: The event to return subscribers for.
- """
- return iter(self._subscribers.get(event, ()))
-
- def publish(self, event, *args, **kwargs):
- """
- Publish a event and return a list of values returned by its
- subscribers.
-
- :param event: The event to publish.
- :param args: The positional arguments to pass to the event's
- subscribers.
- :param kwargs: The keyword arguments to pass to the event's
- subscribers.
- """
- result = []
- for subscriber in self.get_subscribers(event):
- try:
- value = subscriber(event, *args, **kwargs)
- except Exception:
- logger.exception('Exception during event publication')
- value = None
- result.append(value)
- logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
- event, args, kwargs, result)
- return result
-
-#
-# Simple sequencing
-#
-class Sequencer(object):
- def __init__(self):
- self._preds = {}
- self._succs = {}
- self._nodes = set() # nodes with no preds/succs
-
- def add_node(self, node):
- self._nodes.add(node)
-
- def remove_node(self, node, edges=False):
- if node in self._nodes:
- self._nodes.remove(node)
- if edges:
- for p in set(self._preds.get(node, ())):
- self.remove(p, node)
- for s in set(self._succs.get(node, ())):
- self.remove(node, s)
- # Remove empties
- for k, v in list(self._preds.items()):
- if not v:
- del self._preds[k]
- for k, v in list(self._succs.items()):
- if not v:
- del self._succs[k]
-
- def add(self, pred, succ):
- assert pred != succ
- self._preds.setdefault(succ, set()).add(pred)
- self._succs.setdefault(pred, set()).add(succ)
-
- def remove(self, pred, succ):
- assert pred != succ
- try:
- preds = self._preds[succ]
- succs = self._succs[pred]
- except KeyError: # pragma: no cover
- raise ValueError('%r not a successor of anything' % succ)
- try:
- preds.remove(pred)
- succs.remove(succ)
- except KeyError: # pragma: no cover
- raise ValueError('%r not a successor of %r' % (succ, pred))
-
- def is_step(self, step):
- return (step in self._preds or step in self._succs or
- step in self._nodes)
-
- def get_steps(self, final):
- if not self.is_step(final):
- raise ValueError('Unknown: %r' % final)
- result = []
- todo = []
- seen = set()
- todo.append(final)
- while todo:
- step = todo.pop(0)
- if step in seen:
- # if a step was already seen,
- # move it to the end (so it will appear earlier
- # when reversed on return) ... but not for the
- # final step, as that would be confusing for
- # users
- if step != final:
- result.remove(step)
- result.append(step)
- else:
- seen.add(step)
- result.append(step)
- preds = self._preds.get(step, ())
- todo.extend(preds)
- return reversed(result)
-
- @property
- def strong_connections(self):
- #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
- index_counter = [0]
- stack = []
- lowlinks = {}
- index = {}
- result = []
-
- graph = self._succs
-
- def strongconnect(node):
- # set the depth index for this node to the smallest unused index
- index[node] = index_counter[0]
- lowlinks[node] = index_counter[0]
- index_counter[0] += 1
- stack.append(node)
-
- # Consider successors
- try:
- successors = graph[node]
- except Exception:
- successors = []
- for successor in successors:
- if successor not in lowlinks:
- # Successor has not yet been visited
- strongconnect(successor)
- lowlinks[node] = min(lowlinks[node],lowlinks[successor])
- elif successor in stack:
- # the successor is in the stack and hence in the current
- # strongly connected component (SCC)
- lowlinks[node] = min(lowlinks[node],index[successor])
-
- # If `node` is a root node, pop the stack and generate an SCC
- if lowlinks[node] == index[node]:
- connected_component = []
-
- while True:
- successor = stack.pop()
- connected_component.append(successor)
- if successor == node: break
- component = tuple(connected_component)
- # storing the result
- result.append(component)
-
- for node in graph:
- if node not in lowlinks:
- strongconnect(node)
-
- return result
-
- @property
- def dot(self):
- result = ['digraph G {']
- for succ in self._preds:
- preds = self._preds[succ]
- for pred in preds:
- result.append(' %s -> %s;' % (pred, succ))
- for node in self._nodes:
- result.append(' %s;' % node)
- result.append('}')
- return '\n'.join(result)
-
-#
-# Unarchiving functionality for zip, tar, tgz, tbz, whl
-#
-
-ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
- '.tgz', '.tbz', '.whl')
-
-def unarchive(archive_filename, dest_dir, format=None, check=True):
-
- def check_path(path):
- if not isinstance(path, text_type):
- path = path.decode('utf-8')
- p = os.path.abspath(os.path.join(dest_dir, path))
- if not p.startswith(dest_dir) or p[plen] != os.sep:
- raise ValueError('path outside destination: %r' % p)
-
- dest_dir = os.path.abspath(dest_dir)
- plen = len(dest_dir)
- archive = None
- if format is None:
- if archive_filename.endswith(('.zip', '.whl')):
- format = 'zip'
- elif archive_filename.endswith(('.tar.gz', '.tgz')):
- format = 'tgz'
- mode = 'r:gz'
- elif archive_filename.endswith(('.tar.bz2', '.tbz')):
- format = 'tbz'
- mode = 'r:bz2'
- elif archive_filename.endswith('.tar'):
- format = 'tar'
- mode = 'r'
- else: # pragma: no cover
- raise ValueError('Unknown format for %r' % archive_filename)
- try:
- if format == 'zip':
- archive = ZipFile(archive_filename, 'r')
- if check:
- names = archive.namelist()
- for name in names:
- check_path(name)
- else:
- archive = tarfile.open(archive_filename, mode)
- if check:
- names = archive.getnames()
- for name in names:
- check_path(name)
- if format != 'zip' and sys.version_info[0] < 3:
- # See Python issue 17153. If the dest path contains Unicode,
- # tarfile extraction fails on Python 2.x if a member path name
- # contains non-ASCII characters - it leads to an implicit
- # bytes -> unicode conversion using ASCII to decode.
- for tarinfo in archive.getmembers():
- if not isinstance(tarinfo.name, text_type):
- tarinfo.name = tarinfo.name.decode('utf-8')
- archive.extractall(dest_dir)
-
- finally:
- if archive:
- archive.close()
-
-
-def zip_dir(directory):
- """zip a directory tree into a BytesIO object"""
- result = io.BytesIO()
- dlen = len(directory)
- with ZipFile(result, "w") as zf:
- for root, dirs, files in os.walk(directory):
- for name in files:
- full = os.path.join(root, name)
- rel = root[dlen:]
- dest = os.path.join(rel, name)
- zf.write(full, dest)
- return result
-
-#
-# Simple progress bar
-#
-
-UNITS = ('', 'K', 'M', 'G','T','P')
-
-
-class Progress(object):
- unknown = 'UNKNOWN'
-
- def __init__(self, minval=0, maxval=100):
- assert maxval is None or maxval >= minval
- self.min = self.cur = minval
- self.max = maxval
- self.started = None
- self.elapsed = 0
- self.done = False
-
- def update(self, curval):
- assert self.min <= curval
- assert self.max is None or curval <= self.max
- self.cur = curval
- now = time.time()
- if self.started is None:
- self.started = now
- else:
- self.elapsed = now - self.started
-
- def increment(self, incr):
- assert incr >= 0
- self.update(self.cur + incr)
-
- def start(self):
- self.update(self.min)
- return self
-
- def stop(self):
- if self.max is not None:
- self.update(self.max)
- self.done = True
-
- @property
- def maximum(self):
- return self.unknown if self.max is None else self.max
-
- @property
- def percentage(self):
- if self.done:
- result = '100 %'
- elif self.max is None:
- result = ' ?? %'
- else:
- v = 100.0 * (self.cur - self.min) / (self.max - self.min)
- result = '%3d %%' % v
- return result
-
- def format_duration(self, duration):
- if (duration <= 0) and self.max is None or self.cur == self.min:
- result = '??:??:??'
- #elif duration < 1:
- # result = '--:--:--'
- else:
- result = time.strftime('%H:%M:%S', time.gmtime(duration))
- return result
-
- @property
- def ETA(self):
- if self.done:
- prefix = 'Done'
- t = self.elapsed
- #import pdb; pdb.set_trace()
- else:
- prefix = 'ETA '
- if self.max is None:
- t = -1
- elif self.elapsed == 0 or (self.cur == self.min):
- t = 0
- else:
- #import pdb; pdb.set_trace()
- t = float(self.max - self.min)
- t /= self.cur - self.min
- t = (t - 1) * self.elapsed
- return '%s: %s' % (prefix, self.format_duration(t))
-
- @property
- def speed(self):
- if self.elapsed == 0:
- result = 0.0
- else:
- result = (self.cur - self.min) / self.elapsed
- for unit in UNITS:
- if result < 1000:
- break
- result /= 1000.0
- return '%d %sB/s' % (result, unit)
-
-#
-# Glob functionality
-#
-
-RICH_GLOB = re.compile(r'\{([^}]*)\}')
-_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
-_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
-
-
-def iglob(path_glob):
- """Extended globbing function that supports ** and {opt1,opt2,opt3}."""
- if _CHECK_RECURSIVE_GLOB.search(path_glob):
- msg = """invalid glob %r: recursive glob "**" must be used alone"""
- raise ValueError(msg % path_glob)
- if _CHECK_MISMATCH_SET.search(path_glob):
- msg = """invalid glob %r: mismatching set marker '{' or '}'"""
- raise ValueError(msg % path_glob)
- return _iglob(path_glob)
-
-
-def _iglob(path_glob):
- rich_path_glob = RICH_GLOB.split(path_glob, 1)
- if len(rich_path_glob) > 1:
- assert len(rich_path_glob) == 3, rich_path_glob
- prefix, set, suffix = rich_path_glob
- for item in set.split(','):
- for path in _iglob(''.join((prefix, item, suffix))):
- yield path
- else:
- if '**' not in path_glob:
- for item in std_iglob(path_glob):
- yield item
- else:
- prefix, radical = path_glob.split('**', 1)
- if prefix == '':
- prefix = '.'
- if radical == '':
- radical = '*'
- else:
- # we support both
- radical = radical.lstrip('/')
- radical = radical.lstrip('\\')
- for path, dir, files in os.walk(prefix):
- path = os.path.normpath(path)
- for fn in _iglob(os.path.join(path, radical)):
- yield fn
-
-if ssl:
- from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
- CertificateError)
-
-
-#
-# HTTPSConnection which verifies certificates/matches domains
-#
-
- class HTTPSConnection(httplib.HTTPSConnection):
- ca_certs = None # set this to the path to the certs file (.pem)
- check_domain = True # only used if ca_certs is not None
-
- # noinspection PyPropertyAccess
- def connect(self):
- sock = socket.create_connection((self.host, self.port), self.timeout)
- if getattr(self, '_tunnel_host', False):
- self.sock = sock
- self._tunnel()
-
- context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- if hasattr(ssl, 'OP_NO_SSLv2'):
- context.options |= ssl.OP_NO_SSLv2
- if self.cert_file:
- context.load_cert_chain(self.cert_file, self.key_file)
- kwargs = {}
- if self.ca_certs:
- context.verify_mode = ssl.CERT_REQUIRED
- context.load_verify_locations(cafile=self.ca_certs)
- if getattr(ssl, 'HAS_SNI', False):
- kwargs['server_hostname'] = self.host
-
- self.sock = context.wrap_socket(sock, **kwargs)
- if self.ca_certs and self.check_domain:
- try:
- match_hostname(self.sock.getpeercert(), self.host)
- logger.debug('Host verified: %s', self.host)
- except CertificateError: # pragma: no cover
- self.sock.shutdown(socket.SHUT_RDWR)
- self.sock.close()
- raise
-
- class HTTPSHandler(BaseHTTPSHandler):
- def __init__(self, ca_certs, check_domain=True):
- BaseHTTPSHandler.__init__(self)
- self.ca_certs = ca_certs
- self.check_domain = check_domain
-
- def _conn_maker(self, *args, **kwargs):
- """
- This is called to create a connection instance. Normally you'd
- pass a connection class to do_open, but it doesn't actually check for
- a class, and just expects a callable. As long as we behave just as a
- constructor would have, we should be OK. If it ever changes so that
- we *must* pass a class, we'll create an UnsafeHTTPSConnection class
- which just sets check_domain to False in the class definition, and
- choose which one to pass to do_open.
- """
- result = HTTPSConnection(*args, **kwargs)
- if self.ca_certs:
- result.ca_certs = self.ca_certs
- result.check_domain = self.check_domain
- return result
-
- def https_open(self, req):
- try:
- return self.do_open(self._conn_maker, req)
- except URLError as e:
- if 'certificate verify failed' in str(e.reason):
- raise CertificateError('Unable to verify server certificate '
- 'for %s' % req.host)
- else:
- raise
-
- #
- # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
- # Middle proxy using HTTP listens on port 443, or an index mistakenly serves
- # HTML containing a http://xyz link when it should be https://xyz),
- # you can use the following handler class, which does not allow HTTP traffic.
- #
- # It works by inheriting from HTTPHandler - so build_opener won't add a
- # handler for HTTP itself.
- #
- class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
- def http_open(self, req):
- raise URLError('Unexpected HTTP request on what should be a secure '
- 'connection: %s' % req)
-
-#
-# XML-RPC with timeouts
-#
-class Transport(xmlrpclib.Transport):
- def __init__(self, timeout, use_datetime=0):
- self.timeout = timeout
- xmlrpclib.Transport.__init__(self, use_datetime)
-
- def make_connection(self, host):
- h, eh, x509 = self.get_host_info(host)
- if not self._connection or host != self._connection[0]:
- self._extra_headers = eh
- self._connection = host, httplib.HTTPConnection(h)
- return self._connection[1]
-
-if ssl:
- class SafeTransport(xmlrpclib.SafeTransport):
- def __init__(self, timeout, use_datetime=0):
- self.timeout = timeout
- xmlrpclib.SafeTransport.__init__(self, use_datetime)
-
- def make_connection(self, host):
- h, eh, kwargs = self.get_host_info(host)
- if not kwargs:
- kwargs = {}
- kwargs['timeout'] = self.timeout
- if not self._connection or host != self._connection[0]:
- self._extra_headers = eh
- self._connection = host, httplib.HTTPSConnection(h, None,
- **kwargs)
- return self._connection[1]
-
-
-class ServerProxy(xmlrpclib.ServerProxy):
- def __init__(self, uri, **kwargs):
- self.timeout = timeout = kwargs.pop('timeout', None)
- # The above classes only come into play if a timeout
- # is specified
- if timeout is not None:
- # scheme = splittype(uri) # deprecated as of Python 3.8
- scheme = urlparse(uri)[0]
- use_datetime = kwargs.get('use_datetime', 0)
- if scheme == 'https':
- tcls = SafeTransport
- else:
- tcls = Transport
- kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
- self.transport = t
- xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
-
-#
-# CSV functionality. This is provided because on 2.x, the csv module can't
-# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
-#
-
-def _csv_open(fn, mode, **kwargs):
- if sys.version_info[0] < 3:
- mode += 'b'
- else:
- kwargs['newline'] = ''
- # Python 3 determines encoding from locale. Force 'utf-8'
- # file encoding to match other forced utf-8 encoding
- kwargs['encoding'] = 'utf-8'
- return open(fn, mode, **kwargs)
-
-
-class CSVBase(object):
- defaults = {
- 'delimiter': str(','), # The strs are used because we need native
- 'quotechar': str('"'), # str in the csv API (2.x won't take
- 'lineterminator': str('\n') # Unicode)
- }
-
- def __enter__(self):
- return self
-
- def __exit__(self, *exc_info):
- self.stream.close()
-
-
-class CSVReader(CSVBase):
- def __init__(self, **kwargs):
- if 'stream' in kwargs:
- stream = kwargs['stream']
- if sys.version_info[0] >= 3:
- # needs to be a text stream
- stream = codecs.getreader('utf-8')(stream)
- self.stream = stream
- else:
- self.stream = _csv_open(kwargs['path'], 'r')
- self.reader = csv.reader(self.stream, **self.defaults)
-
- def __iter__(self):
- return self
-
- def next(self):
- result = next(self.reader)
- if sys.version_info[0] < 3:
- for i, item in enumerate(result):
- if not isinstance(item, text_type):
- result[i] = item.decode('utf-8')
- return result
-
- __next__ = next
-
-class CSVWriter(CSVBase):
- def __init__(self, fn, **kwargs):
- self.stream = _csv_open(fn, 'w')
- self.writer = csv.writer(self.stream, **self.defaults)
-
- def writerow(self, row):
- if sys.version_info[0] < 3:
- r = []
- for item in row:
- if isinstance(item, text_type):
- item = item.encode('utf-8')
- r.append(item)
- row = r
- self.writer.writerow(row)
-
-#
-# Configurator functionality
-#
-
-class Configurator(BaseConfigurator):
-
- value_converters = dict(BaseConfigurator.value_converters)
- value_converters['inc'] = 'inc_convert'
-
- def __init__(self, config, base=None):
- super(Configurator, self).__init__(config)
- self.base = base or os.getcwd()
-
- def configure_custom(self, config):
- def convert(o):
- if isinstance(o, (list, tuple)):
- result = type(o)([convert(i) for i in o])
- elif isinstance(o, dict):
- if '()' in o:
- result = self.configure_custom(o)
- else:
- result = {}
- for k in o:
- result[k] = convert(o[k])
- else:
- result = self.convert(o)
- return result
-
- c = config.pop('()')
- if not callable(c):
- c = self.resolve(c)
- props = config.pop('.', None)
- # Check for valid identifiers
- args = config.pop('[]', ())
- if args:
- args = tuple([convert(o) for o in args])
- items = [(k, convert(config[k])) for k in config if valid_ident(k)]
- kwargs = dict(items)
- result = c(*args, **kwargs)
- if props:
- for n, v in props.items():
- setattr(result, n, convert(v))
- return result
-
- def __getitem__(self, key):
- result = self.config[key]
- if isinstance(result, dict) and '()' in result:
- self.config[key] = result = self.configure_custom(result)
- return result
-
- def inc_convert(self, value):
- """Default converter for the inc:// protocol."""
- if not os.path.isabs(value):
- value = os.path.join(self.base, value)
- with codecs.open(value, 'r', encoding='utf-8') as f:
- result = json.load(f)
- return result
-
-
-class SubprocessMixin(object):
- """
- Mixin for running subprocesses and capturing their output
- """
- def __init__(self, verbose=False, progress=None):
- self.verbose = verbose
- self.progress = progress
-
- def reader(self, stream, context):
- """
- Read lines from a subprocess' output stream and either pass to a progress
- callable (if specified) or write progress information to sys.stderr.
- """
- progress = self.progress
- verbose = self.verbose
- while True:
- s = stream.readline()
- if not s:
- break
- if progress is not None:
- progress(s, context)
- else:
- if not verbose:
- sys.stderr.write('.')
- else:
- sys.stderr.write(s.decode('utf-8'))
- sys.stderr.flush()
- stream.close()
-
- def run_command(self, cmd, **kwargs):
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, **kwargs)
- t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
- t1.start()
- t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
- t2.start()
- p.wait()
- t1.join()
- t2.join()
- if self.progress is not None:
- self.progress('done.', 'main')
- elif self.verbose:
- sys.stderr.write('done.\n')
- return p
-
-
-def normalize_name(name):
- """Normalize a python package name a la PEP 503"""
- # https://www.python.org/dev/peps/pep-0503/#normalized-names
- return re.sub('[-_.]+', '-', name).lower()
-
-# def _get_pypirc_command():
- # """
- # Get the distutils command for interacting with PyPI configurations.
- # :return: the command.
- # """
- # from distutils.core import Distribution
- # from distutils.config import PyPIRCCommand
- # d = Distribution()
- # return PyPIRCCommand(d)
-
-class PyPIRCFile(object):
-
- DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/'
- DEFAULT_REALM = 'pypi'
-
- def __init__(self, fn=None, url=None):
- if fn is None:
- fn = os.path.join(os.path.expanduser('~'), '.pypirc')
- self.filename = fn
- self.url = url
-
- def read(self):
- result = {}
-
- if os.path.exists(self.filename):
- repository = self.url or self.DEFAULT_REPOSITORY
-
- config = configparser.RawConfigParser()
- config.read(self.filename)
- sections = config.sections()
- if 'distutils' in sections:
- # let's get the list of servers
- index_servers = config.get('distutils', 'index-servers')
- _servers = [server.strip() for server in
- index_servers.split('\n')
- if server.strip() != '']
- if _servers == []:
- # nothing set, let's try to get the default pypi
- if 'pypi' in sections:
- _servers = ['pypi']
- else:
- for server in _servers:
- result = {'server': server}
- result['username'] = config.get(server, 'username')
-
- # optional params
- for key, default in (('repository', self.DEFAULT_REPOSITORY),
- ('realm', self.DEFAULT_REALM),
- ('password', None)):
- if config.has_option(server, key):
- result[key] = config.get(server, key)
- else:
- result[key] = default
-
- # work around people having "repository" for the "pypi"
- # section of their config set to the HTTP (rather than
- # HTTPS) URL
- if (server == 'pypi' and
- repository in (self.DEFAULT_REPOSITORY, 'pypi')):
- result['repository'] = self.DEFAULT_REPOSITORY
- elif (result['server'] != repository and
- result['repository'] != repository):
- result = {}
- elif 'server-login' in sections:
- # old format
- server = 'server-login'
- if config.has_option(server, 'repository'):
- repository = config.get(server, 'repository')
- else:
- repository = self.DEFAULT_REPOSITORY
- result = {
- 'username': config.get(server, 'username'),
- 'password': config.get(server, 'password'),
- 'repository': repository,
- 'server': server,
- 'realm': self.DEFAULT_REALM
- }
- return result
-
- def update(self, username, password):
- # import pdb; pdb.set_trace()
- config = configparser.RawConfigParser()
- fn = self.filename
- config.read(fn)
- if not config.has_section('pypi'):
- config.add_section('pypi')
- config.set('pypi', 'username', username)
- config.set('pypi', 'password', password)
- with open(fn, 'w') as f:
- config.write(f)
-
-def _load_pypirc(index):
- """
- Read the PyPI access configuration as supported by distutils.
- """
- return PyPIRCFile(url=index.url).read()
-
-def _store_pypirc(index):
- PyPIRCFile().update(index.username, index.password)
-
-#
-# get_platform()/get_host_platform() copied from Python 3.10.a0 source, with some minor
-# tweaks
-#
-
-def get_host_platform():
- """Return a string that identifies the current platform. This is used mainly to
- distinguish platform-specific build directories and platform-specific built
- distributions. Typically includes the OS name and version and the
- architecture (as supplied by 'os.uname()'), although the exact information
- included depends on the OS; eg. on Linux, the kernel version isn't
- particularly important.
-
- Examples of returned values:
- linux-i586
- linux-alpha (?)
- solaris-2.6-sun4u
-
- Windows will return one of:
- win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
- win32 (all others - specifically, sys.platform is returned)
-
- For other non-POSIX platforms, currently just returns 'sys.platform'.
-
- """
- if os.name == 'nt':
- if 'amd64' in sys.version.lower():
- return 'win-amd64'
- if '(arm)' in sys.version.lower():
- return 'win-arm32'
- if '(arm64)' in sys.version.lower():
- return 'win-arm64'
- return sys.platform
-
- # Set for cross builds explicitly
- if "_PYTHON_HOST_PLATFORM" in os.environ:
- return os.environ["_PYTHON_HOST_PLATFORM"]
-
- if os.name != 'posix' or not hasattr(os, 'uname'):
- # XXX what about the architecture? NT is Intel or Alpha,
- # Mac OS is M68k or PPC, etc.
- return sys.platform
-
- # Try to distinguish various flavours of Unix
-
- (osname, host, release, version, machine) = os.uname()
-
- # Convert the OS name to lowercase, remove '/' characters, and translate
- # spaces (for "Power Macintosh")
- osname = osname.lower().replace('/', '')
- machine = machine.replace(' ', '_').replace('/', '-')
-
- if osname[:5] == 'linux':
- # At least on Linux/Intel, 'machine' is the processor --
- # i386, etc.
- # XXX what about Alpha, SPARC, etc?
- return "%s-%s" % (osname, machine)
-
- elif osname[:5] == 'sunos':
- if release[0] >= '5': # SunOS 5 == Solaris 2
- osname = 'solaris'
- release = '%d.%s' % (int(release[0]) - 3, release[2:])
- # We can't use 'platform.architecture()[0]' because a
- # bootstrap problem. We use a dict to get an error
- # if some suspicious happens.
- bitness = {2147483647:'32bit', 9223372036854775807:'64bit'}
- machine += '.%s' % bitness[sys.maxsize]
- # fall through to standard osname-release-machine representation
- elif osname[:3] == 'aix':
- from _aix_support import aix_platform
- return aix_platform()
- elif osname[:6] == 'cygwin':
- osname = 'cygwin'
- rel_re = re.compile (r'[\d.]+', re.ASCII)
- m = rel_re.match(release)
- if m:
- release = m.group()
- elif osname[:6] == 'darwin':
- import _osx_support, distutils.sysconfig
- osname, release, machine = _osx_support.get_platform_osx(
- distutils.sysconfig.get_config_vars(),
- osname, release, machine)
-
- return '%s-%s-%s' % (osname, release, machine)
-
-
-_TARGET_TO_PLAT = {
- 'x86' : 'win32',
- 'x64' : 'win-amd64',
- 'arm' : 'win-arm32',
-}
-
-
-def get_platform():
- if os.name != 'nt':
- return get_host_platform()
- cross_compilation_target = os.environ.get('VSCMD_ARG_TGT_ARCH')
- if cross_compilation_target not in _TARGET_TO_PLAT:
- return get_host_platform()
- return _TARGET_TO_PLAT[cross_compilation_target]
diff --git a/spaces/TechnoByte/wd-v1-4-tags/README.md b/spaces/TechnoByte/wd-v1-4-tags/README.md
deleted file mode 100644
index f24ff6910ec48ab44d796cdd7d785e73314c916d..0000000000000000000000000000000000000000
--- a/spaces/TechnoByte/wd-v1-4-tags/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-title: WaifuDiffusion v1.4 Tags
-emoji: 👀
-colorFrom: blue
-colorTo: green
-sdk: gradio
-sdk_version: 3.41.0
-app_file: app.py
-pinned: true
-duplicated_from: NoCrypt/DeepDanbooru_string
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio`, `streamlit`, or `static`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
\ No newline at end of file
diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/utils/file_io.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/utils/file_io.py
deleted file mode 100644
index 46ee4ec31d04eee77976ff3edbbf84762a3409ed..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/utils/file_io.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-from iopath.common.file_io import HTTPURLHandler, OneDrivePathHandler, PathHandler
-from iopath.common.file_io import PathManager as PathManagerBase
-
-__all__ = ["PathManager", "PathHandler"]
-
-
-PathManager = PathManagerBase()
-"""
-This is a detectron2 project-specific PathManager.
-We try to stay away from global PathManager in fvcore as it
-introduces potential conflicts among other libraries.
-"""
-
-
-class Detectron2Handler(PathHandler):
- """
- Resolve anything that's hosted under detectron2's namespace.
- """
-
- PREFIX = "detectron2://"
- S3_DETECTRON2_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/"
-
- def _get_supported_prefixes(self):
- return [self.PREFIX]
-
- def _get_local_path(self, path, **kwargs):
- name = path[len(self.PREFIX) :]
- return PathManager.get_local_path(self.S3_DETECTRON2_PREFIX + name, **kwargs)
-
- def _open(self, path, mode="r", **kwargs):
- return PathManager.open(self._get_local_path(path), mode, **kwargs)
-
-
-PathManager.register_handler(HTTPURLHandler())
-PathManager.register_handler(OneDrivePathHandler())
-PathManager.register_handler(Detectron2Handler())
diff --git a/spaces/TeraTTS/TTS/tokenizer/gruut/__init__.py b/spaces/TeraTTS/TTS/tokenizer/gruut/__init__.py
deleted file mode 100644
index cda8600e9b44c22748d7d71c12c345440c087332..0000000000000000000000000000000000000000
--- a/spaces/TeraTTS/TTS/tokenizer/gruut/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .tokenizer import Tokenizer
\ No newline at end of file
diff --git a/spaces/ThomasSimonini/ML-Agents-SnowballTarget/TemplateData/style.css b/spaces/ThomasSimonini/ML-Agents-SnowballTarget/TemplateData/style.css
deleted file mode 100644
index cdc3477fb8c1c824db96f451631bca7cde305923..0000000000000000000000000000000000000000
--- a/spaces/ThomasSimonini/ML-Agents-SnowballTarget/TemplateData/style.css
+++ /dev/null
@@ -1,105 +0,0 @@
-html {
- box-sizing: border-box;
-}
-*, *:before, *:after {
- box-sizing: inherit;
-}
-html, body {
- height: 100%;
-}
-canvas {
- display: block;
-}
-body {
- margin: 0;
-}
-#unity-container {
- width: 100%;
- height: 100%;
-}
-#unity-canvas {
- width: 100%;
- height: 100%;
- background: #231F20;
-}
-#loading-cover {
- position: absolute;
- top: 0;
- left: 0;
- width: 100%;
- height: 100%;
- display: flex;
- justify-content: center;
- align-items: center;
-}
-#unity-loading-bar {
- flex: 1 1 auto;
- display: flex;
- flex-direction: column;
- justify-content: center;
- align-items: center;
-}
-#unity-logo {
- text-align: center;
-}
-#unity-logo img {
- max-width: 80%;
-}
-#unity-progress-bar-empty {
- width: 80%;
- height: 24px;
- margin: 10px 20px 20px 10px;
- text-align: left;
- border: 1px solid white;
- padding: 2px;
-}
-#unity-progress-bar-full {
- width: 0%;
- height: 100%;
- background: #ffd21e;
-}
-.light #unity-progress-bar-empty {
- border-color: black;
-}
-.light #unity-progress-bar-full {
- background: black;
-}
-
-#unity-fullscreen-button {
- position: absolute;
- right: 10px;
- bottom: 10px;
- width: 38px;
- height: 38px;
- background: url('fullscreen-button.png') no-repeat center;
- background-size: contain;
-}
-
-.spinner,
-.spinner:after {
- border-radius: 50%;
- width: 5em;
- height: 5em;
-}
-.spinner {
- margin: 10px;
- font-size: 10px;
- position: relative;
- text-indent: -9999em;
- border-top: 1.1em solid rgba(255, 255, 255, 0.2);
- border-right: 1.1em solid rgba(255, 255, 255, 0.2);
- border-bottom: 1.1em solid rgba(255, 255, 255, 0.2);
- border-left: 1.1em solid #ffffff;
- transform: translateZ(0);
- animation: spinner-spin 1.1s infinite linear;
-}
-@keyframes spinner-spin {
- 0% {
- transform: rotate(0deg);
- }
- 100% {
- transform: rotate(360deg);
- }
-}
-
-
diff --git a/spaces/ViktorTsoi13/GPT4/README.md b/spaces/ViktorTsoi13/GPT4/README.md
deleted file mode 100644
index c3962478afe22f02f9abb61f9aa518f064d82640..0000000000000000000000000000000000000000
--- a/spaces/ViktorTsoi13/GPT4/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: GPT4
-emoji: 😻
-colorFrom: green
-colorTo: yellow
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Volkopat/SegmentAnythingxGroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py b/spaces/Volkopat/SegmentAnythingxGroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py
deleted file mode 100644
index 9158d5f6260ec74bded95377d382387430d7cd70..0000000000000000000000000000000000000000
--- a/spaces/Volkopat/SegmentAnythingxGroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py
+++ /dev/null
@@ -1,43 +0,0 @@
-batch_size = 1
-modelname = "groundingdino"
-backbone = "swin_T_224_1k"
-position_embedding = "sine"
-pe_temperatureH = 20
-pe_temperatureW = 20
-return_interm_indices = [1, 2, 3]
-backbone_freeze_keywords = None
-enc_layers = 6
-dec_layers = 6
-pre_norm = False
-dim_feedforward = 2048
-hidden_dim = 256
-dropout = 0.0
-nheads = 8
-num_queries = 900
-query_dim = 4
-num_patterns = 0
-num_feature_levels = 4
-enc_n_points = 4
-dec_n_points = 4
-two_stage_type = "standard"
-two_stage_bbox_embed_share = False
-two_stage_class_embed_share = False
-transformer_activation = "relu"
-dec_pred_bbox_embed_share = True
-dn_box_noise_scale = 1.0
-dn_label_noise_ratio = 0.5
-dn_label_coef = 1.0
-dn_bbox_coef = 1.0
-embed_init_tgt = True
-dn_labelbook_size = 2000
-max_text_len = 256
-text_encoder_type = "bert-base-uncased"
-use_text_enhancer = True
-use_fusion_layer = True
-use_checkpoint = True
-use_transformer_ckpt = True
-use_text_cross_attention = True
-text_dropout = 0.0
-fusion_dropout = 0.0
-fusion_droppath = 0.1
-sub_sentence_present = True
diff --git a/spaces/Wauplin/gradio-user-history/src/gradio_user_history/_user_history.py b/spaces/Wauplin/gradio-user-history/src/gradio_user_history/_user_history.py
deleted file mode 100644
index b14ddb6828a7c23f0b665c64611a4938b3457bce..0000000000000000000000000000000000000000
--- a/spaces/Wauplin/gradio-user-history/src/gradio_user_history/_user_history.py
+++ /dev/null
@@ -1,406 +0,0 @@
-import json
-import os
-import shutil
-import warnings
-from datetime import datetime
-from functools import cache
-from pathlib import Path
-from typing import Callable, Dict, List, Tuple
-from uuid import uuid4
-
-import gradio as gr
-import numpy as np
-import requests
-from filelock import FileLock
-from PIL.Image import Image
-
-
-def setup(folder_path: str | Path | None = None) -> None:
- user_history = _UserHistory()
- user_history.folder_path = _resolve_folder_path(folder_path)
- user_history.initialized = True
-
-
-def render() -> None:
- user_history = _UserHistory()
-
- # initialize with default config
- if not user_history.initialized:
- print("Initializing user history with default config. Use `user_history.setup(...)` to customize folder_path.")
- setup()
-
- # Render user history tab
- gr.Markdown(
- "## Your past generations\n\nLog in to keep a gallery of your previous generations. Your history will be saved"
- " and available on your next visit. Make sure to export your images from time to time as this gallery may be"
- " deleted in the future."
- )
-
- if os.getenv("SYSTEM") == "spaces" and not os.path.exists("/data"):
- gr.Markdown(
- "**⚠️ Persistent storage is disabled, meaning your history will be lost if the Space gets restarted."
- " Only the Space owner can setup a Persistent Storage. If you are not the Space owner, consider"
- " duplicating this Space to set your own storage.⚠️**"
- )
-
- with gr.Row():
- gr.LoginButton(min_width=250)
- gr.LogoutButton(min_width=250)
- refresh_button = gr.Button(
- "Refresh",
- icon="https://huggingface.co/spaces/Wauplin/gradio-user-history/resolve/main/assets/icon_refresh.png",
- )
- export_button = gr.Button(
- "Export",
- icon="https://huggingface.co/spaces/Wauplin/gradio-user-history/resolve/main/assets/icon_download.png",
- )
- delete_button = gr.Button(
- "Delete history",
- icon="https://huggingface.co/spaces/Wauplin/gradio-user-history/resolve/main/assets/icon_delete.png",
- )
-
- # "Export zip" row (hidden by default)
- with gr.Row():
- export_file = gr.File(file_count="single", file_types=[".zip"], label="Exported history", visible=False)
-
- # "Config deletion" row (hidden by default)
- with gr.Row():
- confirm_button = gr.Button("Confirm delete all history", variant="stop", visible=False)
- cancel_button = gr.Button("Cancel", visible=False)
-
- # Gallery
- gallery = gr.Gallery(
- label="Past images",
- show_label=True,
- elem_id="gradio_user_history_gallery",
- object_fit="contain",
- columns=5,
- height=600,
- preview=False,
- show_share_button=False,
- show_download_button=False,
- )
- gr.Markdown(
- "User history is powered by"
- " [Wauplin/gradio-user-history](https://huggingface.co/spaces/Wauplin/gradio-user-history). Integrate it to"
- " your own Space in just a few lines of code!"
- )
- gallery.attach_load_event(_fetch_user_history, every=None)
-
- # Interactions
- refresh_button.click(fn=_fetch_user_history, inputs=[], outputs=[gallery], queue=False)
- export_button.click(fn=_export_user_history, inputs=[], outputs=[export_file], queue=False)
-
- # Taken from https://github.com/gradio-app/gradio/issues/3324#issuecomment-1446382045
- delete_button.click(
- lambda: [gr.update(visible=True), gr.update(visible=True)],
- outputs=[confirm_button, cancel_button],
- queue=False,
- )
- cancel_button.click(
- lambda: [gr.update(visible=False), gr.update(visible=False)],
- outputs=[confirm_button, cancel_button],
- queue=False,
- )
- confirm_button.click(_delete_user_history).then(
- lambda: [gr.update(visible=False), gr.update(visible=False)],
- outputs=[confirm_button, cancel_button],
- queue=False,
- )
-
- # Admin section (only shown locally or when logged in as Space owner)
- _admin_section()
-
-
-def save_image(
- profile: gr.OAuthProfile | None,
- image: Image | np.ndarray | str | Path,
- label: str | None = None,
- metadata: Dict | None = None,
-):
- # Ignore images from logged out users
- if profile is None:
- return
- username = profile["preferred_username"]
-
- # Ignore images if user history not used
- user_history = _UserHistory()
- if not user_history.initialized:
- warnings.warn(
- "User history is not set in Gradio demo. Saving image is ignored. You must use `user_history.render(...)`"
- " first."
- )
- return
-
- # Copy image to storage
- image_path = _copy_image(image, dst_folder=user_history._user_images_path(username))
-
- # Save new image + metadata
- if metadata is None:
- metadata = {}
- if "datetime" not in metadata:
- metadata["datetime"] = str(datetime.now())
- data = {"path": str(image_path), "label": label, "metadata": metadata}
- with user_history._user_lock(username):
- with user_history._user_jsonl_path(username).open("a") as f:
- f.write(json.dumps(data) + "\n")
-
-
-#############
-# Internals #
-#############
-
-
-class _UserHistory(object):
- _instance = None
- initialized: bool = False
- folder_path: Path
-
- def __new__(cls):
- # Using singleton pattern => we don't want to expose an object (more complex to use) but still want to keep
- # state between `render` and `save_image` calls.
- if cls._instance is None:
- cls._instance = super(_UserHistory, cls).__new__(cls)
- return cls._instance
-
- def _user_path(self, username: str) -> Path:
- path = self.folder_path / username
- path.mkdir(parents=True, exist_ok=True)
- return path
-
- def _user_lock(self, username: str) -> FileLock:
- """Ensure history is not corrupted if concurrent calls."""
- return FileLock(self.folder_path / f"{username}.lock") # lock outside of folder => better when exporting ZIP
-
- def _user_jsonl_path(self, username: str) -> Path:
- return self._user_path(username) / "history.jsonl"
-
- def _user_images_path(self, username: str) -> Path:
- path = self._user_path(username) / "images"
- path.mkdir(parents=True, exist_ok=True)
- return path
-
-
-def _fetch_user_history(profile: gr.OAuthProfile | None) -> List[Tuple[str, str]]:
- """Return saved history for that user, if it exists."""
- # Cannot load history for logged out users
- if profile is None:
- return []
- username = profile["preferred_username"]
-
- user_history = _UserHistory()
- if not user_history.initialized:
- warnings.warn("User history is not set in Gradio demo. You must use `user_history.render(...)` first.")
- return []
-
- with user_history._user_lock(username):
- # No file => no history saved yet
- jsonl_path = user_history._user_jsonl_path(username)
- if not jsonl_path.is_file():
- return []
-
- # Read history
- images = []
- for line in jsonl_path.read_text().splitlines():
- data = json.loads(line)
- images.append((data["path"], data["label"] or ""))
- return list(reversed(images))
-
-
-def _export_user_history(profile: gr.OAuthProfile | None) -> Dict | None:
- """Zip all history for that user, if it exists and return it as a downloadable file."""
- # Cannot load history for logged out users
- if profile is None:
- return None
- username = profile["preferred_username"]
-
- user_history = _UserHistory()
- if not user_history.initialized:
- warnings.warn("User history is not set in Gradio demo. You must use `user_history.render(...)` first.")
- return None
-
- # Zip history
- with user_history._user_lock(username):
- path = shutil.make_archive(
- str(_archives_path() / f"history_{username}"), "zip", user_history._user_path(username)
- )
-
- return gr.update(visible=True, value=path)
-
-
-def _delete_user_history(profile: gr.OAuthProfile | None) -> None:
- """Delete all history for that user."""
- # Cannot load history for logged out users
- if profile is None:
- return
- username = profile["preferred_username"]
-
- user_history = _UserHistory()
- if not user_history.initialized:
- warnings.warn("User history is not set in Gradio demo. You must use `user_history.render(...)` first.")
- return
-
- with user_history._user_lock(username):
- shutil.rmtree(user_history._user_path(username))
-
-
-####################
-# Internal helpers #
-####################
-
-
-def _copy_image(image: Image | np.ndarray | str | Path, dst_folder: Path) -> Path:
- """Copy image to the images folder."""
- # Already a path => copy it
- if isinstance(image, str):
- image = Path(image)
- if isinstance(image, Path):
- dst = dst_folder / f"{uuid4().hex}_{Path(image).name}" # keep file ext
- shutil.copyfile(image, dst)
- return dst
-
- # Still a Python object => serialize it
- if isinstance(image, np.ndarray):
- image = Image.fromarray(image)
- if isinstance(image, Image):
- dst = dst_folder / f"{uuid4().hex}.png"
- image.save(dst)
- return dst
-
- raise ValueError(f"Unsupported image type: {type(image)}")
-
-
-def _resolve_folder_path(folder_path: str | Path | None) -> Path:
- if folder_path is not None:
- return Path(folder_path).expanduser().resolve()
-
- if os.getenv("SYSTEM") == "spaces" and os.path.exists("/data"): # Persistent storage is enabled!
- return Path("/data") / "_user_history"
-
- # Not in a Space or Persistent storage not enabled => local folder
- return Path("_user_history").resolve()
-
-
-def _archives_path() -> Path:
- # Doesn't have to be on persistent storage as it's only used for download
- path = Path(__file__).parent / "_user_history_exports"
- path.mkdir(parents=True, exist_ok=True)
- return path
-
-
-#################
-# Admin section #
-#################
-
-
-def _admin_section() -> None:
- title = gr.Markdown()
- title.attach_load_event(_display_if_admin(), every=None)
-
-
-def _display_if_admin() -> Callable:
- def _inner(profile: gr.OAuthProfile | None) -> str:
- if profile is None:
- return ""
- if profile["preferred_username"] in _fetch_admins():
- return _admin_content()
- return ""
-
- return _inner
-
-
-def _admin_content() -> str:
- return f"""
-## Admin section
-
-Running on **{os.getenv("SYSTEM", "local")}** (id: {os.getenv("SPACE_ID")}). {_get_msg_is_persistent_storage_enabled()}
-
-Admins: {', '.join(_fetch_admins())}
-
-{_get_nb_users()} user(s), {_get_nb_images()} image(s)
-
-### Configuration
-
-History folder: *{_UserHistory().folder_path}*
-
-Exports folder: *{_archives_path()}*
-
-### Disk usage
-
-{_disk_space_warning_message()}
-"""
-
-
-def _get_nb_users() -> int:
- user_history = _UserHistory()
- if not user_history.initialized:
- return 0
- if user_history.folder_path is not None and user_history.folder_path.exists():
- return len([path for path in user_history.folder_path.iterdir() if path.is_dir()])
- return 0
-
-
-def _get_nb_images() -> int:
- user_history = _UserHistory()
- if not user_history.initialized:
- return 0
- if user_history.folder_path is not None and user_history.folder_path.exists():
- return len([path for path in user_history.folder_path.glob("*/images/*")])
- return 0
-
-
-def _get_msg_is_persistent_storage_enabled() -> str:
- if os.getenv("SYSTEM") == "spaces":
- if os.path.exists("/data"):
- return "Persistent storage is enabled."
- else:
- return (
- "Persistent storage is not enabled. This means that user histories will be deleted when the Space is"
- " restarted. Consider adding a Persistent Storage in your Space settings."
- )
- return ""
-
-
-def _disk_space_warning_message() -> str:
- user_history = _UserHistory()
- if not user_history.initialized:
- return ""
-
- message = ""
- if user_history.folder_path is not None:
- total, used, _ = _get_disk_usage(user_history.folder_path)
- message += f"History folder: **{used / 1e9 :.0f}/{total / 1e9 :.0f}GB** used ({100*used/total :.0f}%)."
-
- total, used, _ = _get_disk_usage(_archives_path())
- message += f"\n\nExports folder: **{used / 1e9 :.0f}/{total / 1e9 :.0f}GB** used ({100*used/total :.0f}%)."
-
- return f"{message.strip()}"
-
-
-def _get_disk_usage(path: Path) -> Tuple[int, int, int]:
- for path in [path] + list(path.parents): # first check target_dir, then each parents one by one
- try:
- return shutil.disk_usage(path)
- except OSError: # if doesn't exist or can't read => fail silently and try parent one
- pass
- return 0, 0, 0
-
-
-@cache
-def _fetch_admins() -> List[str]:
- # Running locally => fake user is admin
- if os.getenv("SYSTEM") != "spaces":
- return ["FakeGradioUser"]
-
- # Running in Space but no space_id => ???
- space_id = os.getenv("SPACE_ID")
- if space_id is None:
- return ["Unknown"]
-
- # Running in Space => try to fetch organization members
- # Otherwise, it's not an organization => namespace is the user
- namespace = space_id.split("/")[0]
- response = requests.get(f"https://huggingface.co/api/organizations/{namespace}/members")
- if response.status_code == 200:
- return sorted((member["user"] for member in response.json()), key=lambda x: x.lower())
- return [namespace]
diff --git a/spaces/Xenova/next-example-app/_next/static/chunks/b2db8554.501a8fbaf2ca19ba.js b/spaces/Xenova/next-example-app/_next/static/chunks/b2db8554.501a8fbaf2ca19ba.js
deleted file mode 100644
index 23247e60e0aeae7687578980ee84da1c07329d98..0000000000000000000000000000000000000000
--- a/spaces/Xenova/next-example-app/_next/static/chunks/b2db8554.501a8fbaf2ca19ba.js
+++ /dev/null
@@ -1,1679 +0,0 @@
-(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[15],{2018:function(module,__unused_webpack_exports,__webpack_require__){var process=__webpack_require__(2601);/*!
-* ONNX Runtime Web v1.14.0
-* Copyright (c) Microsoft Corporation. All rights reserved.
-* Licensed under the MIT License.
-*/!function(tr,tn){module.exports=tn(__webpack_require__(7731))}(self,__WEBPACK_EXTERNAL_MODULE__1670__=>(()=>{var __webpack_modules__={3474:(tr,tn,ti)=>{var to,ta=(to=(to="undefined"!=typeof document&&document.currentScript?document.currentScript.src:void 0)||"/index.js",function(tr){function tn(){return tF.buffer!=tL&&tX(tF.buffer),tR}function ta(){return tF.buffer!=tL&&tX(tF.buffer),tj}function ts(){return tF.buffer!=tL&&tX(tF.buffer),tM}function tu(){return tF.buffer!=tL&&tX(tF.buffer),tU}function tl(){return tF.buffer!=tL&&tX(tF.buffer),tV}tr=tr||{},tc||(tc=void 0!==tr?tr:{}),tc.ready=new Promise(function(tr,tn){tp=tr,tf=tn});var tc,tp,tf,td,th,tg,tb,tm,ty,t_=Object.assign({},tc),tv="./this.program",tx=(tr,tn)=>{throw tn},tw="object"==typeof window,tT="function"==typeof importScripts,tS="object"==typeof process&&"object"==typeof process.versions&&"string"==typeof process.versions.node,tO=tc.ENVIRONMENT_IS_PTHREAD||!1,tA="";function tE(tr){return tc.locateFile?tc.locateFile(tr,tA):tA+tr}if(tS){let tr;tA=tT?ti(908).dirname(tA)+"/":"//",ty=()=>{tm||(tb=ti(1384),tm=ti(908))},td=function(tr,tn){return ty(),tr=tm.normalize(tr),tb.readFileSync(tr,tn?void 0:"utf8")},tg=tr=>((tr=td(tr,!0)).buffer||(tr=new Uint8Array(tr)),tr),th=(tr,tn,ti)=>{ty(),tr=tm.normalize(tr),tb.readFile(tr,function(tr,to){tr?ti(tr):tn(to.buffer)})},1{if(t1())throw process.exitCode=tr,tn;tn instanceof en||tk("exiting due to exception: "+tn),process.exit(tr)},tc.inspect=function(){return"[Emscripten Module object]"};try{tr=ti(9925)}catch(tr){throw console.error('The "worker_threads" module is not supported in this node.js build - perhaps a newer version is needed?'),tr}ti.g.Worker=tr.Worker}else(tw||tT)&&(tT?tA=self.location.href:"undefined"!=typeof document&&document.currentScript&&(tA=document.currentScript.src),to&&(tA=to),tA=0!==tA.indexOf("blob:")?tA.substr(0,tA.replace(/[?#].*/,"").lastIndexOf("/")+1):"",tS||(td=tr=>{var tn=new XMLHttpRequest;return tn.open("GET",tr,!1),tn.send(null),tn.responseText},tT&&(tg=tr=>{var tn=new XMLHttpRequest;return tn.open("GET",tr,!1),tn.responseType="arraybuffer",tn.send(null),new Uint8Array(tn.response)}),th=(tr,tn,ti)=>{var to=new XMLHttpRequest;to.open("GET",tr,!0),to.responseType="arraybuffer",to.onload=()=>{200==to.status||0==to.status&&to.response?tn(to.response):ti()},to.onerror=ti,to.send(null)}));tS&&"undefined"==typeof performance&&(ti.g.performance=ti(6953).performance);var tI=console.log.bind(console),tP=console.warn.bind(console);tS&&(ty(),tI=tr=>tb.writeSync(1,tr+"\n"),tP=tr=>tb.writeSync(2,tr+"\n"));var tD,t$=tc.print||tI,tk=tc.printErr||tP;Object.assign(tc,t_),t_=null,tc.thisProgram&&(tv=tc.thisProgram),tc.quit&&(tx=tc.quit),tc.wasmBinary&&(tD=tc.wasmBinary);var tC=tc.noExitRuntime||!1;"object"!=typeof WebAssembly&&t5("no native wasm support detected");var tF,tN,tL,tR,tj,tM,tU,tV,tB=!1,tz="undefined"!=typeof TextDecoder?new TextDecoder("utf8"):void 0;function tG(tr,tn,ti){var to=(tn>>>=0)+ti;for(ti=tn;tr[ti]&&!(ti>=to);)++ti;if(16(ta=224==(240&ta)?(15&ta)<<12|ts<<6|tu:(7&ta)<<18|ts<<12|tu<<6|63&tr[tn++])?to+=String.fromCharCode(ta):(ta-=65536,to+=String.fromCharCode(55296|ta>>10,56320|1023&ta))}}else to+=String.fromCharCode(ta)}return to}function tH(tr,tn){return(tr>>>=0)?tG(ta(),tr,tn):""}function tW(tr,tn,ti,to){if(!(0>>=0;to=ti+to-1;for(var ts=0;ts=tu&&(tu=65536+((1023&tu)<<10)|1023&tr.charCodeAt(++ts)),127>=tu){if(ti>=to)break;tn[ti++>>>0]=tu}else{if(2047>=tu){if(ti+1>=to)break;tn[ti++>>>0]=192|tu>>6}else{if(65535>=tu){if(ti+2>=to)break;tn[ti++>>>0]=224|tu>>12}else{if(ti+3>=to)break;tn[ti++>>>0]=240|tu>>18,tn[ti++>>>0]=128|tu>>12&63}tn[ti++>>>0]=128|tu>>6&63}tn[ti++>>>0]=128|63&tu}}return tn[ti>>>0]=0,ti-ta}function tq(tr){for(var tn=0,ti=0;ti=to?tn++:2047>=to?tn+=2:55296<=to&&57343>=to?(tn+=4,++ti):tn+=3}return tn}function tX(tr){tL=tr,tc.HEAP8=tR=new Int8Array(tr),tc.HEAP16=new Int16Array(tr),tc.HEAP32=tM=new Int32Array(tr),tc.HEAPU8=tj=new Uint8Array(tr),tc.HEAPU16=new Uint16Array(tr),tc.HEAPU32=tU=new Uint32Array(tr),tc.HEAPF32=new Float32Array(tr),tc.HEAPF64=tV=new Float64Array(tr)}tO&&(tL=tc.buffer);var tY=tc.INITIAL_MEMORY||16777216;if(tO)tF=tc.wasmMemory,tL=tc.buffer;else if(tc.wasmMemory)tF=tc.wasmMemory;else if(!((tF=new WebAssembly.Memory({initial:tY/65536,maximum:65536,shared:!0})).buffer instanceof SharedArrayBuffer))throw tk("requested a shared WebAssembly.Memory but the returned buffer is not a SharedArrayBuffer, indicating that while the browser has SharedArrayBuffer it does not have WebAssembly threads support - you may need to set a flag"),tS&&console.log("(on node you may need: --experimental-wasm-threads --experimental-wasm-bulk-memory and also use a recent version)"),Error("bad memory");tF&&(tL=tF.buffer),tY=tL.byteLength,tX(tL);var tK,tZ=[],tJ=[],tQ=[],t0=[];function t1(){return tC||!1}function t2(){var tr=tc.preRun.shift();tZ.unshift(tr)}var t3,t4=0,t6=null,t8=null;function t5(tr){throw tO?postMessage({cmd:"onAbort",arg:tr}):tc.onAbort&&tc.onAbort(tr),tk(tr="Aborted("+tr+")"),tB=!0,tf(tr=new WebAssembly.RuntimeError(tr+". Build with -sASSERTIONS for more info.")),tr}function t7(){return t3.startsWith("data:application/octet-stream;base64,")}function t9(){var tr=t3;try{if(tr==t3&&tD)return new Uint8Array(tD);if(tg)return tg(tr);throw"both async and sync fetching of the wasm failed"}catch(tr){t5(tr)}}t3="ort-wasm-threaded.wasm",t7()||(t3=tE(t3));var er={};function en(tr){this.name="ExitStatus",this.message="Program terminated with exit("+tr+")",this.status=tr}function ei(tr){(tr=eu.Vb[tr])||t5(),eu.mc(tr)}function eo(tr){var tn=eu.Cc();if(!tn)return 6;eu.ac.push(tn),eu.Vb[tr.Ub]=tn,tn.Ub=tr.Ub;var ti={cmd:"run",start_routine:tr.Ic,arg:tr.zc,pthread_ptr:tr.Ub};return tn.$b=()=>{ti.time=performance.now(),tn.postMessage(ti,tr.Nc)},tn.loaded&&(tn.$b(),delete tn.$b),0}function ea(tr){if(tO)return eB(1,1,tr);t1()||(eu.oc(),tc.onExit&&tc.onExit(tr),tB=!0),tx(tr,new en(tr))}function es(tr,tn){if(!tn&&tO)throw ep(tr),"unwind";t1()||tO||(ri(),el(tQ),rn(0),eJ[1].length&&eQ(1,10),eJ[2].length&&eQ(2,10),eu.oc()),ea(tr)}var eu={Yb:[],ac:[],qc:[],Vb:{},fc:function(){tO&&eu.Ec()},Pc:function(){},Ec:function(){eu.receiveObjectTransfer=eu.Gc,eu.threadInitTLS=eu.pc,eu.setExitStatus=eu.nc,tC=!1},nc:function(){},oc:function(){for(var tr of Object.values(eu.Vb))eu.mc(tr);for(tr of eu.Yb)tr.terminate();eu.Yb=[]},mc:function(tr){var tn=tr.Ub;delete eu.Vb[tn],eu.Yb.push(tr),eu.ac.splice(eu.ac.indexOf(tr),1),tr.Ub=0,rl(tn)},Gc:function(){},pc:function(){eu.qc.forEach(tr=>tr())},Fc:function(tr,tn){tr.onmessage=ti=>{var to=(ti=ti.data).cmd;if(tr.Ub&&(eu.Bc=tr.Ub),ti.targetThread&&ti.targetThread!=e7()){var ta=eu.Vb[ti.Qc];ta?ta.postMessage(ti,ti.transferList):tk('Internal error! Worker sent a message "'+to+'" to target pthread '+ti.targetThread+", but that thread no longer exists!")}else"processProxyingQueue"===to?eL(ti.queue):"spawnThread"===to?eo(ti):"cleanupThread"===to?ei(ti.thread):"killThread"===to?(ti=ti.thread,to=eu.Vb[ti],delete eu.Vb[ti],to.terminate(),rl(ti),eu.ac.splice(eu.ac.indexOf(to),1),to.Ub=0):"cancelThread"===to?eu.Vb[ti.thread].postMessage({cmd:"cancel"}):"loaded"===to?(tr.loaded=!0,tn&&tn(tr),tr.$b&&(tr.$b(),delete tr.$b)):"print"===to?t$("Thread "+ti.threadId+": "+ti.text):"printErr"===to?tk("Thread "+ti.threadId+": "+ti.text):"alert"===to?alert("Thread "+ti.threadId+": "+ti.text):"setimmediate"===ti.target?tr.postMessage(ti):"onAbort"===to?tc.onAbort&&tc.onAbort(ti.arg):to&&tk("worker sent an unknown command "+to);eu.Bc=void 0},tr.onerror=tr=>{throw tk("worker sent an error! "+tr.filename+":"+tr.lineno+": "+tr.message),tr},tS&&(tr.on("message",function(tn){tr.onmessage({data:tn})}),tr.on("error",function(tn){tr.onerror(tn)}),tr.on("detachedExit",function(){})),tr.postMessage({cmd:"load",urlOrBlob:tc.mainScriptUrlOrBlob||to,wasmMemory:tF,wasmModule:tN})},yc:function(){var tr=tE("ort-wasm-threaded.worker.js");eu.Yb.push(new Worker(tr))},Cc:function(){return 0==eu.Yb.length&&(eu.yc(),eu.Fc(eu.Yb[0])),eu.Yb.pop()}};function el(tr){for(;0>2>>>0];rf(tn,tn-(tr=ts()[tr+48>>2>>>0])),rh(tn)};var ef=[];function ed(tr){var tn=ef[tr];return tn||(tr>=ef.length&&(ef.length=tr+1),ef[tr]=tn=tK.get(tr)),tn}tc.invokeEntryPoint=function(tr,tn){tr=ed(tr)(tn),t1()?eu.nc(tr):rc(tr)};var eh,eg,eb=[],em=0,ey=0;function e_(tr){this.Zb=tr,this.Sb=tr-24,this.xc=function(tr){tu()[this.Sb+4>>2>>>0]=tr},this.bc=function(){return tu()[this.Sb+4>>2>>>0]},this.wc=function(tr){tu()[this.Sb+8>>2>>>0]=tr},this.Dc=function(){return tu()[this.Sb+8>>2>>>0]},this.rc=function(){ts()[this.Sb>>2>>>0]=0},this.hc=function(tr){tr=tr?1:0,tn()[this.Sb+12>>0>>>0]=tr},this.uc=function(){return 0!=tn()[this.Sb+12>>0>>>0]},this.ic=function(tr){tr=tr?1:0,tn()[this.Sb+13>>0>>>0]=tr},this.kc=function(){return 0!=tn()[this.Sb+13>>0>>>0]},this.fc=function(tr,tn){this.cc(0),this.xc(tr),this.wc(tn),this.rc(),this.hc(!1),this.ic(!1)},this.sc=function(){Atomics.add(ts(),this.Sb>>2,1)},this.Hc=function(){return 1===Atomics.sub(ts(),this.Sb>>2,1)},this.cc=function(tr){tu()[this.Sb+16>>2>>>0]=tr},this.tc=function(){return tu()[this.Sb+16>>2>>>0]},this.vc=function(){if(rm(this.bc()))return tu()[this.Zb>>2>>>0];var tr=this.tc();return 0!==tr?tr:this.Zb}}function ev(tr){return rr(new e_(tr).Sb)}function ex(tr,tn,ti,to){return tO?eB(3,1,tr,tn,ti,to):ew(tr,tn,ti,to)}function ew(tr,tn,ti,to){if("undefined"==typeof SharedArrayBuffer)return tk("Current environment does not support SharedArrayBuffer, pthreads are not available!"),6;var ta=[];return tO&&0===ta.length?ex(tr,tn,ti,to):(tr={Ic:ti,Ub:tr,zc:to,Nc:ta},tO?(tr.Oc="spawnThread",postMessage(tr,ta),0):eo(tr))}function eT(tr,tn,ti){return tO?eB(4,1,tr,tn,ti):0}function eS(tr,tn){if(tO)return eB(5,1,tr,tn)}function eO(tr,tn){if(tO)return eB(6,1,tr,tn)}function eA(tr,tn,ti){if(tO)return eB(7,1,tr,tn,ti)}function eE(tr,tn,ti){return tO?eB(8,1,tr,tn,ti):0}function eI(tr,tn){if(tO)return eB(9,1,tr,tn)}function eP(tr,tn,ti){if(tO)return eB(10,1,tr,tn,ti)}function eD(tr,tn,ti,to){if(tO)return eB(11,1,tr,tn,ti,to)}function e$(tr,tn,ti,to){if(tO)return eB(12,1,tr,tn,ti,to)}function ek(tr,tn,ti,to){if(tO)return eB(13,1,tr,tn,ti,to)}function eC(tr){if(tO)return eB(14,1,tr)}function eF(tr,tn){if(tO)return eB(15,1,tr,tn)}function eN(tr,tn,ti){if(tO)return eB(16,1,tr,tn,ti)}function eL(tr){Atomics.store(ts(),tr>>2,1),e7()&&ru(tr),Atomics.compareExchange(ts(),tr>>2,1,0)}function eR(tr){return tu()[tr>>>2]+4294967296*ts()[tr+4>>>2]}function ej(tr,tn,ti,to,ta,ts){return tO?eB(17,1,tr,tn,ti,to,ta,ts):-52}function eM(tr,tn,ti,to,ta,ts){if(tO)return eB(18,1,tr,tn,ti,to,ta,ts)}function eU(tr){var ti=tq(tr)+1,to=e9(ti);return to&&tW(tr,tn(),to,ti),to}function eV(tr,tn,ti){function to(tr){return(tr=tr.toTimeString().match(/\(([A-Za-z ]+)\)$/))?tr[1]:"GMT"}if(tO)return eB(19,1,tr,tn,ti);var ta=(new Date).getFullYear(),tl=new Date(ta,0,1),tc=new Date(ta,6,1);ta=tl.getTimezoneOffset();var tp=tc.getTimezoneOffset(),tf=Math.max(ta,tp);ts()[tr>>2>>>0]=60*tf,ts()[tn>>2>>>0]=Number(ta!=tp),tr=to(tl),tn=to(tc),tr=eU(tr),tn=eU(tn),tp>2>>>0]=tr,tu()[ti+4>>2>>>0]=tn):(tu()[ti>>2>>>0]=tn,tu()[ti+4>>2>>>0]=tr)}function eB(tr,tn){var ti=arguments.length-2,to=arguments;return ec(()=>{for(var ta=rg(8*ti),ts=ta>>3,tu=0;tu>>0]=tc}return rs(tr,ti,ta,tn)})}tc.executeNotifiedProxyingQueue=eL,eg=tS?()=>{var tr=process.hrtime();return 1e3*tr[0]+tr[1]/1e6}:tO?()=>performance.now()-tc.__performance_now_clock_drift:()=>performance.now();var ez,eG=[],eH={};function eW(){if(!ez){var tr,tn={USER:"web_user",LOGNAME:"web_user",PATH:"/",PWD:"/",HOME:"/home/web_user",LANG:("object"==typeof navigator&&navigator.languages&&navigator.languages[0]||"C").replace("-","_")+".UTF-8",_:tv||"./this.program"};for(tr in eH)void 0===eH[tr]?delete tn[tr]:tn[tr]=eH[tr];var ti=[];for(tr in tn)ti.push(tr+"="+tn[tr]);ez=ti}return ez}function eq(tr,ti){if(tO)return eB(20,1,tr,ti);var to=0;return eW().forEach(function(ta,ts){var tl=ti+to;for(ts=tu()[tr+4*ts>>2>>>0]=tl,tl=0;tl>0>>>0]=ta.charCodeAt(tl);tn()[ts>>0>>>0]=0,to+=ta.length+1}),0}function eX(tr,tn){if(tO)return eB(21,1,tr,tn);var ti=eW();tu()[tr>>2>>>0]=ti.length;var to=0;return ti.forEach(function(tr){to+=tr.length+1}),tu()[tn>>2>>>0]=to,0}function eY(tr){return tO?eB(22,1,tr):52}function eK(tr,tn,ti,to){return tO?eB(23,1,tr,tn,ti,to):52}function eZ(tr,tn,ti,to,ta){return tO?eB(24,1,tr,tn,ti,to,ta):70}var eJ=[null,[],[]];function eQ(tr,tn){var ti=eJ[tr];0===tn||10===tn?((1===tr?t$:tk)(tG(ti,0)),ti.length=0):ti.push(tn)}function e0(tr,tn,ti,to){if(tO)return eB(25,1,tr,tn,ti,to);for(var ts=0,tl=0;tl