diff --git a/spaces/17TheWord/RealESRGAN/FAQ.md b/spaces/17TheWord/RealESRGAN/FAQ.md deleted file mode 100644 index caa8c08cfe4302eb8812c823569e8a0be30fa49c..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/RealESRGAN/FAQ.md +++ /dev/null @@ -1,9 +0,0 @@ -# FAQ - -1. **What is the difference of `--netscale` and `outscale`?** - -A: TODO. - -1. **How to select models?** - -A: TODO. diff --git a/spaces/17TheWord/RealESRGAN/inference_realesrgan.py b/spaces/17TheWord/RealESRGAN/inference_realesrgan.py deleted file mode 100644 index 6d5ff4d188faaa16c0131be69a08fd22fb608f80..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/RealESRGAN/inference_realesrgan.py +++ /dev/null @@ -1,128 +0,0 @@ -import argparse -import cv2 -import glob -import os -from basicsr.archs.rrdbnet_arch import RRDBNet - -from realesrgan import RealESRGANer -from realesrgan.archs.srvgg_arch import SRVGGNetCompact - - -def main(): - """Inference demo for Real-ESRGAN. - """ - parser = argparse.ArgumentParser() - parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder') - parser.add_argument( - '-n', - '--model_name', - type=str, - default='RealESRGAN_x4plus', - help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus' - 'RealESRGANv2-anime-xsx2 | RealESRGANv2-animevideo-xsx2-nousm | RealESRGANv2-animevideo-xsx2' - 'RealESRGANv2-anime-xsx4 | RealESRGANv2-animevideo-xsx4-nousm | RealESRGANv2-animevideo-xsx4')) - parser.add_argument('-o', '--output', type=str, default='results', help='Output folder') - parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image') - parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored image') - parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing') - parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding') - parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border') - parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face') - parser.add_argument('--half', action='store_true', help='Use half precision during inference') - parser.add_argument( - '--alpha_upsampler', - type=str, - default='realesrgan', - help='The upsampler for the alpha channels. Options: realesrgan | bicubic') - parser.add_argument( - '--ext', - type=str, - default='auto', - help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs') - args = parser.parse_args() - - # determine models according to model names - args.model_name = args.model_name.split('.')[0] - if args.model_name in ['RealESRGAN_x4plus', 'RealESRNet_x4plus']: # x4 RRDBNet model - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) - netscale = 4 - elif args.model_name in ['RealESRGAN_x4plus_anime_6B']: # x4 RRDBNet model with 6 blocks - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4) - netscale = 4 - elif args.model_name in ['RealESRGAN_x2plus']: # x2 RRDBNet model - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2) - netscale = 2 - elif args.model_name in [ - 'RealESRGANv2-anime-xsx2', 'RealESRGANv2-animevideo-xsx2-nousm', 'RealESRGANv2-animevideo-xsx2' - ]: # x2 VGG-style model (XS size) - model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu') - netscale = 2 - elif args.model_name in [ - 'RealESRGANv2-anime-xsx4', 'RealESRGANv2-animevideo-xsx4-nousm', 'RealESRGANv2-animevideo-xsx4' - ]: # x4 VGG-style model (XS size) - model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu') - netscale = 4 - - # determine model paths - model_path = os.path.join('.', args.model_name + '.pth') - if not os.path.isfile(model_path): - model_path = os.path.join('.', args.model_name + '.pth') - if not os.path.isfile(model_path): - raise ValueError(f'Model {args.model_name} does not exist.') - - # restorer - upsampler = RealESRGANer( - scale=netscale, - model_path=model_path, - model=model, - tile=args.tile, - tile_pad=args.tile_pad, - pre_pad=args.pre_pad, - half=args.half) - - if args.face_enhance: # Use GFPGAN for face enhancement - from gfpgan import GFPGANer - face_enhancer = GFPGANer( - model_path='https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth', - upscale=args.outscale, - arch='clean', - channel_multiplier=2, - bg_upsampler=upsampler) - os.makedirs(args.output, exist_ok=True) - - if os.path.isfile(args.input): - paths = [args.input] - else: - paths = sorted(glob.glob(os.path.join(args.input, '*'))) - - for idx, path in enumerate(paths): - imgname, extension = os.path.splitext(os.path.basename(path)) - print('Testing', idx, imgname) - - img = cv2.imread(path, cv2.IMREAD_UNCHANGED) - if len(img.shape) == 3 and img.shape[2] == 4: - img_mode = 'RGBA' - else: - img_mode = None - - try: - if args.face_enhance: - _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True) - else: - output, _ = upsampler.enhance(img, outscale=args.outscale) - except RuntimeError as error: - print('Error', error) - print('If you encounter CUDA out of memory, try to set --tile with a smaller number.') - else: - if args.ext == 'auto': - extension = extension[1:] - else: - extension = args.ext - if img_mode == 'RGBA': # RGBA images should be saved in png format - extension = 'png' - save_path = os.path.join(args.output, f'{imgname}_{args.suffix}.{extension}') - cv2.imwrite(save_path, output) - - -if __name__ == '__main__': - main() diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/FIFA 22 on Mac Two Ways to Experience the Game of Football.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/FIFA 22 on Mac Two Ways to Experience the Game of Football.md deleted file mode 100644 index cc2e488c4f1fe7059bd0d725e4c573ab0d59f0f8..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/FIFA 22 on Mac Two Ways to Experience the Game of Football.md +++ /dev/null @@ -1,50 +0,0 @@ - -

How to Play FIFA 22 on Mac: A Guide for Football Fans

-

FIFA 22 is the latest installment of the popular football simulation game series developed by EA Sports. It features new gameplay innovations, improved graphics, and more realistic animations powered by HyperMotion technology. However, if you are a Mac user, you might be wondering how to play FIFA 22 on your device, since the game is not officially supported by macOS. In this article, we will show you two ways to play FIFA 22 on Mac: using cloud gaming services or installing Windows 10 on your Mac.

-

What is cloud gaming?

-

Cloud gaming is a technology that allows you to stream games from remote servers to your device via the internet. You don't need to download or install the games on your device, and you don't need to worry about compatibility or hardware requirements. All you need is a stable internet connection and a compatible device, such as a laptop, tablet, smartphone, or smart TV.

-

fifa 22 mac


DOWNLOAD ►►►►► https://byltly.com/2uKzLJ



-

How to play FIFA 22 on Mac using cloud gaming services?

-

There are several cloud gaming services that offer FIFA 22 as part of their library. Two of the most popular ones are Boosteroid and Google Stadia. Here are the steps to play FIFA 22 on Mac using these services:

- -

What are the advantages and disadvantages of cloud gaming?

-

Cloud gaming has some advantages and disadvantages that you should consider before choosing this option. Here are some of them:

-

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Cadence Allegro Extracta Exe Downloa.md b/spaces/1gistliPinn/ChatGPT4/Examples/Cadence Allegro Extracta Exe Downloa.md deleted file mode 100644 index d476010b63525eb743c101671bdfc27ad28df90e..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Cadence Allegro Extracta Exe Downloa.md +++ /dev/null @@ -1,6 +0,0 @@ -

cadence allegro extracta exe downloa


Download ··· https://imgfil.com/2uy1iG



- -There are 2 possibilities for extracting data from Cadence Allegro (and also latest ... for ODB++ which means that you have to download a utility script from Valor. ... All they need to do is to run the executable CDC2FAB in this file structure. 4d29de3e1b
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/FaceRig Pro V1.312 (Inclu ALL DLC) Cheats Tool Download Free.md b/spaces/1gistliPinn/ChatGPT4/Examples/FaceRig Pro V1.312 (Inclu ALL DLC) Cheats Tool Download Free.md deleted file mode 100644 index 2613c980d46be2a5baf153a2ead2dd969602603c..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/FaceRig Pro V1.312 (Inclu ALL DLC) Cheats Tool Download Free.md +++ /dev/null @@ -1,6 +0,0 @@ -

FaceRig Pro v1.312 (Inclu ALL DLC) cheats tool download


Download 🔗 https://imgfil.com/2uxXK0



-
-Multimedia tools downloads - PluralEyes for Edius by Singular ... As for functionality, If all MS did was update the map data annually, I'd ... Acura Navigation Hack or Torrent DVD Downloads As an ... to dig FaceRig Pro v1.312 (Inclu Live2D Module & DLCs) TORRENT Cracked Free Download in magnet. 1fdad05405
-
-
-

diff --git a/spaces/1line/AutoGPT/tests/test_config.py b/spaces/1line/AutoGPT/tests/test_config.py deleted file mode 100644 index b472a24c78edd1f931a76c68e08ed544bbe61d98..0000000000000000000000000000000000000000 --- a/spaces/1line/AutoGPT/tests/test_config.py +++ /dev/null @@ -1,84 +0,0 @@ -from unittest import TestCase - -from autogpt.config import Config - - -class TestConfig(TestCase): - """ - Test cases for the Config class, which handles the configuration settings - for the AI and ensures it behaves as a singleton. - """ - - def setUp(self): - """ - Set up the test environment by creating an instance of the Config class. - """ - self.config = Config() - - def test_singleton(self): - """ - Test if the Config class behaves as a singleton by ensuring that two instances are the same. - """ - config2 = Config() - self.assertIs(self.config, config2) - - def test_initial_values(self): - """ - Test if the initial values of the Config class attributes are set correctly. - """ - self.assertFalse(self.config.debug_mode) - self.assertFalse(self.config.continuous_mode) - self.assertFalse(self.config.speak_mode) - self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo") - self.assertEqual(self.config.smart_llm_model, "gpt-4") - self.assertEqual(self.config.fast_token_limit, 4000) - self.assertEqual(self.config.smart_token_limit, 8000) - - def test_set_continuous_mode(self): - """ - Test if the set_continuous_mode() method updates the continuous_mode attribute. - """ - self.config.set_continuous_mode(True) - self.assertTrue(self.config.continuous_mode) - - def test_set_speak_mode(self): - """ - Test if the set_speak_mode() method updates the speak_mode attribute. - """ - self.config.set_speak_mode(True) - self.assertTrue(self.config.speak_mode) - - def test_set_fast_llm_model(self): - """ - Test if the set_fast_llm_model() method updates the fast_llm_model attribute. - """ - self.config.set_fast_llm_model("gpt-3.5-turbo-test") - self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo-test") - - def test_set_smart_llm_model(self): - """ - Test if the set_smart_llm_model() method updates the smart_llm_model attribute. - """ - self.config.set_smart_llm_model("gpt-4-test") - self.assertEqual(self.config.smart_llm_model, "gpt-4-test") - - def test_set_fast_token_limit(self): - """ - Test if the set_fast_token_limit() method updates the fast_token_limit attribute. - """ - self.config.set_fast_token_limit(5000) - self.assertEqual(self.config.fast_token_limit, 5000) - - def test_set_smart_token_limit(self): - """ - Test if the set_smart_token_limit() method updates the smart_token_limit attribute. - """ - self.config.set_smart_token_limit(9000) - self.assertEqual(self.config.smart_token_limit, 9000) - - def test_set_debug_mode(self): - """ - Test if the set_debug_mode() method updates the debug_mode attribute. - """ - self.config.set_debug_mode(True) - self.assertTrue(self.config.debug_mode) diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Arena of Valor Global and Enjoy Fast and Fun Matches in 15 Minutes or Less.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Arena of Valor Global and Enjoy Fast and Fun Matches in 15 Minutes or Less.md deleted file mode 100644 index eb49421396472f0e64ea818bd7e56c5b5263e98c..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Arena of Valor Global and Enjoy Fast and Fun Matches in 15 Minutes or Less.md +++ /dev/null @@ -1,155 +0,0 @@ - -

How to Download Arena of Valor Global: A Guide for MOBA Fans

-

If you are a fan of multiplayer online battle arena (MOBA) games, you might have heard of Arena of Valor, an epic 5v5 MOBA game developed by TiMi Studio Group and brought to you by Level Infinite. In this game, you can choose from over 100 unique heroes, team up with your friends, and compete in various modes and maps. Whether you prefer classic 5v5 combat, fast-paced 3v3 action, or solo adventure, there is something for everyone in Arena of Valor.

-

But did you know that there is a global version of this game that is available in more than 140 countries and regions? That's right, Arena of Valor Global is the ultimate version of this game that lets you play with players from all over the world, enjoy exclusive content and events, and experience the best performance and graphics. If you want to join the millions of players who are already enjoying this game, you might be wondering how to download it. Don't worry, we've got you covered. In this article, we will show you how to download Arena of Valor Global for your Android, iOS, or PC device. We will also share some tips and tricks for playing this game like a pro. So, without further ado, let's get started!

-

download arena of valor global


Download Ziphttps://urlin.us/2uSRVn



-

What is Arena of Valor Global?

-

Arena of Valor Global is a real-time 5v5 MOBA game that offers a variety of features, modes, and heroes for you to enjoy. Here are some of the highlights of this game:

- -

As you can see, Arena of Valor Global is a game that has something for everyone. Whether you are a casual player or a hardcore gamer, you will find yourself hooked to this game in no time. But before you can start playing, you need to download the game first. Let's see how you can do that for your device.

-

How to Download Arena of Valor Global for Android Devices

-

If you have an Android device, such as a smartphone or a tablet, you can download Arena of Valor Global from the Google Play Store. Here are the steps you need to follow:

-
    -
  1. Open the Google Play Store app on your device.
  2. -
  3. Search for "Arena of Valor Global" in the search bar.
  4. -
  5. Tap on the game icon that appears in the results.
  6. -
  7. Tap on the "Install" button and wait for the game to download and install on your device.
  8. -
  9. Once the installation is complete, tap on the "Open" button to launch the game and start playing.
  10. -
-

That's it! You have successfully downloaded Arena of Valor Global for your Android device. You can now enjoy the game and join millions of players from around the world. But what if you don't have access to the Google Play Store or you want to download the game from another source? Don't worry, there is another way to download the game using an APK file.

-

How to Download Arena of Valor Global APK File

-

An APK file is a file format that contains all the data and code needed to install an Android app on your device. You can download an APK file from various websites that offer them, such as APKPure, APKMirror, or Uptodown. However, you need to be careful when downloading APK files from unknown sources, as they may contain malware or viruses that can harm your device. To avoid this, you should always scan the APK file with an antivirus app before installing it. Here are the steps you need to follow to download Arena of Valor Global APK file:

-
    -
  1. Go to a website that offers Arena of Valor Global APK file, such as APKPure.
  2. -
  3. Search for "Arena of Valor Global" in the search bar.
  4. -
  5. Select the game icon that appears in the results.
  6. -
  7. Tap on the "Download APK" button and wait for the file to download on your device.
  8. -
  9. Once the download is complete, locate the file in your device's storage and tap on it to install it.
  10. -
  11. If you see a warning message that says "For your security, your phone is not allowed to install unknown apps from this source", tap on "Settings" and enable the option that says "Allow from this source".
  12. -
  13. Go back to the installation screen and tap on "Install" to proceed with the installation.
  14. -
  15. Once the installation is complete, tap on "Open" to launch the game and start playing.
  16. -
-

Congratulations! You have successfully downloaded Arena of Valor Global APK file and installed it on your device. You can now enjoy the game and join millions of players from around the world. But what if you have an iOS device instead of an Android device? Don't worry, we have a solution for that too. Let's see how you can download Arena of Valor Global for your iOS device.

-

How to Download Arena of Valor Global for iOS Devices

-

If you have an iOS device, such as an iPhone or an iPad, you can download Arena of Valor Global from the App Store. Here are the steps you need to follow:

-

How to download arena of valor global on android
-Arena of valor global apk download latest version
-Download arena of valor global for pc windows 10
-Arena of valor global tier list 2023
-Best heroes in arena of valor global
-Arena of valor global update patch notes
-Arena of valor global vs mobile legends
-Arena of valor global server status
-Arena of valor global discord server
-Arena of valor global reddit community
-Arena of valor global gameplay tips and tricks
-Arena of valor global review and rating
-Arena of valor global download size and requirements
-Arena of valor global support and customer service
-Arena of valor global free redeem codes 2023
-Arena of valor global events and rewards
-Arena of valor global skins and costumes
-Arena of valor global characters and abilities
-Arena of valor global guides and tutorials
-Arena of valor global news and updates
-Arena of valor global esports and tournaments
-Arena of valor global live stream and videos
-Arena of valor global memes and jokes
-Arena of valor global fan art and wallpapers
-Arena of valor global merchandise and products
-How to download arena of valor global on ios
-Arena of valor global ios app store link
-Download arena of valor global for mac os x
-Arena of valor global crossplay and cross platform
-How to play arena of valor global with friends
-How to join a guild in arena of valor global
-How to rank up in arena of valor global
-How to get more gold and gems in arena of valor global
-How to unlock more heroes in arena of valor global
-How to master a hero in arena of valor global
-How to counter a hero in arena of valor global
-How to build a hero in arena of valor global
-How to report a player in arena of valor global
-How to change your name in arena of valor global
-How to change your region in arena of valor global
-How to contact level infinite in arena of valor global
-How to delete your account in arena of valor global
-How to reinstall arena of valor global without losing data
-How to fix lag and connection issues in arena of valor global
-How to enable voice chat in arena of valor global
-How to mute a player in arena of valor global
-How to customize your controls in arena of valor global
-How to switch between modes in arena of valor global
-How to watch replays in arena of valor global

-
    -
  1. Open the App Store app on your device.
  2. -
  3. Search for "Arena of Valor Global" in the search bar.
  4. -
  5. Tap on the game icon that appears in the results.
  6. -
  7. Tap on the "Get" button and wait for the game to download and install on your device.
  8. -
  9. Once the installation is complete, tap on the game icon on your home screen to launch the game and start playing.
  10. -
-

That's it! You have successfully downloaded Arena of Valor Global for your iOS device. You can now enjoy the game and join millions of players from around the world. But what if you don't have access to the App Store or you want to download the game from another source? Don't worry, there is another way to download the game using an IPA file.

-

How to Download Arena of Valor Global IPA File

-

An IPA file is a file format that contains all the data and code needed to install an iOS app on your device. You can download an IPA file from various websites that offer them, such as Panda Helper, AppValley, or TweakBox. However, you need to be careful when downloading IPA files from unknown sources, as they may contain malware or viruses that can harm your device. To avoid this, you should always scan the IPA file with an antivirus app before installing it. Here are the steps you need to follow to download Arena of Valor Global IPA file:

-
    -
  1. Go to a website that offers Arena of Valor Global IPA file, such as Panda Helper.
  2. -
  3. Search for "Arena of Valor Global" in the search bar.
  4. -
  5. Select the game icon that appears in the results.
  6. -
  7. Tap on the "Download" button and wait for the file to download on your device.
  8. -
  9. Once the download is complete, locate the file in your device's storage and tap on it to install it.
  10. -
  11. If you see a warning message that says "Untrusted Enterprise Developer", tap on "Cancel" and go to your device's settings.
  12. -
  13. Go to General > Profiles & Device Management and find the profile that belongs to the app you just installed.
  14. -
  15. Tap on the profile and then tap on "Trust" to allow the app to run on your device.
  16. -
  17. Go back to your home screen and tap on the game icon to launch the game and start playing.
  18. -
-

Congratulations! You have successfully downloaded Arena of Valor Global IPA file and installed it on your device. You can now enjoy the game and join millions of players from around the world. But what if you want to play the game on a bigger screen and with better controls? Don't worry, we have a solution for that too. Let's see how you can download Arena of Valor Global for your PC.

-

How to Download Arena of Valor Global for PC

-

If you want to play Arena of Valor Global on your PC, you will need an emulator. An emulator is a software that allows you to run mobile apps on your computer. There are many emulators available for playing mobile games on PC, but one of the best ones is BlueStacks. BlueStacks is a free and powerful emulator that offers high-quality performance and graphics, easy controls, and a wide range of features. Here are the steps you need to follow to download Arena of Valor Global for PC using BlueStacks emulator:

-

How to Download and Install BlueStacks Emulator

-
    -
  1. Go to BlueStacks official website and click on the "Download BlueStacks" button.
  2. -
  3. Wait for the file to download on your PC and then double-click on it to run it.
  4. -
  5. Follow the instructions on the screen to install BlueStacks emulator on your PC.
  6. -
  7. Once the installation is complete, launch BlueStacks emulator from your desktop or start menu.
  8. -
-

How to Download and Install Arena of Valor Global on BlueStacks Emulator

-
    -
  1. In BlueStacks emulator, go to Google Play Store and sign in with your Google account.
  2. -
  3. Search for "Arena of Valor Global" in the search bar.
  4. -
  5. Select the game icon that appears in the results.
  6. -
  7. Click on the "Install" button and wait for the game to download and install on BlueStacks emulator.
  8. -
  9. Once the installation is complete, click on the game icon on the home screen of BlueStacks emulator to launch the game and start playing.
  10. -
-

Congratulations! You have successfully downloaded Arena of Valor Global for PC using BlueStacks emulator. You can now enjoy the game on a bigger screen and with better controls. You can also customize your keyboard and mouse settings, record your gameplay, and stream your matches to your friends and fans. But before you jump into the game, you might want to learn some tips and tricks for playing Arena of Valor Global like a pro. Let's see what they are.

-

Tips and Tricks for Playing Arena of Valor Global

-

Arena of Valor Global is a game that requires skill, strategy, and teamwork. If you want to improve your gameplay and win more matches, you need to master some tips and tricks that will give you an edge over your opponents. Here are some of them:

-

Choose Your Role and Hero Wisely

-

In Arena of Valor Global, there are five main roles that you can choose from: Tank, Warrior, Assassin, Mage, and Support. Each role has its own strengths, weaknesses, and responsibilities in the game. You should choose a role that suits your playstyle and preference, and then select a hero that fits that role. For example, if you like to initiate fights and protect your teammates, you should choose a Tank role and a hero like Maloch or Thane. If you like to deal massive damage and eliminate enemies quickly, you should choose an Assassin role and a hero like Quillen or Butterfly. You should also consider your team composition and the enemy team composition when choosing your role and hero. You should try to balance your team with different roles and heroes that can complement each other and counter the enemy team.

-

Communicate and Coordinate with Your Teammates

-

Arena of Valor Global is a team-based game that requires communication and coordination with your teammates. You should use the chat and ping system to communicate with your teammates effectively. You can use the chat to type messages or use voice chat to talk to your teammates. You can also use the ping system to send signals to your teammates, such as "Attack", "Retreat", "Gather", or "Enemy Missing". You should communicate with your teammates about your strategy, objectives, enemy movements, item builds, cooldowns, and other important information. You should also listen to your teammates' suggestions and feedback, and cooperate with them in fights and objectives. By communicating and coordinating with your teammates, you can increase your chances of winning the game.

-

Learn from the Pros and Watch Live Streams

-

Arena of Valor Global is a game that has a competitive scene with professional players and teams from around the world. If you want to learn from the pros and watch live streams of their matches, you can do so in the game itself. You can go to the "Watch" tab in the game menu and select from various live streams of professional players and teams. You can also watch replays of previous matches or highlights of epic moments. By watching live streams of pros, you can learn from their strategies, techniques, item builds, hero choices, map awareness, positioning, teamwork, and more. You can also interact with them through chat or send them gifts to show your support. By learning from the pros and watching live streams, you can improve your gameplay and skills in Arena of Valor Global.

-

Conclusion

-

Arena of Valor Global is an epic 5v5 MOBA game that offers a variety of features, modes, and heroes for you to enjoy. Whether you have an Android device, an iOS device, or a PC device, you can download this game easily using our guide above. You can also use our tips and tricks to play this game like a pro and win more matches. Arena of Valor Global is a game that will keep you entertained and challenged for hours. So, what are you waiting for? Download Arena of Valor Global today and join the global community of MOBA fans. You won't regret it!

-

FAQs

-

Here are some frequently asked questions and answers about Arena of Valor Global:

-

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Chess Board Offline 2 Player - A Simple and Fun Chess App for Everyone.md b/spaces/1phancelerku/anime-remove-background/Chess Board Offline 2 Player - A Simple and Fun Chess App for Everyone.md deleted file mode 100644 index e98b396d4f990512a3ca0cc227a5a142d0ad0001..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Chess Board Offline 2 Player - A Simple and Fun Chess App for Everyone.md +++ /dev/null @@ -1,110 +0,0 @@ -
-

Chess Board Offline 2 Player APK: A Free and Fun Way to Play Chess with a Friend

-

Chess is one of the oldest and most popular board games in the world. It is a game of strategy, logic, and skill that can challenge your mind and entertain you for hours. But what if you want to play chess with a friend without internet connection or creating an account? What if you want to play chess on your phone or tablet without installing multiple apps? What if you want to save and share your games with other chess enthusiasts?

-

If you are looking for a simple and convenient way to play chess with a friend on one screen and completely offline, then you should try Chess Board Offline 2 Player APK. This is a free app that lets you play chess on a virtual board with a friend or by yourself. You can also use a chess clock, create custom setups, save unlimited games, and export them in PGN format. In this article, we will tell you more about this app and how to download and install it on your device.

-

chess board offline 2 player apk


Download Ziphttps://jinyurl.com/2uNO1X



-

What is Chess Board Offline 2 Player APK?

-

A virtual chess board for two players

-

Chess Board Offline 2 Player APK is an app that simulates a real chess board on your screen. You can play chess with a friend by taking turns on the same device. You can also play by yourself against an imaginary opponent or practice different moves and scenarios. The app has a standard 8x8 board with all the pieces and rules of chess. You can move the pieces by dragging them or tapping them.

-

A free and offline app

-

One of the best features of Chess Board Offline 2 Player APK is that it is completely free and offline. You don't need to pay anything to download or use the app. You also don't need to be online or create an account to play chess. You can play anytime and anywhere without worrying about internet connection or data usage. You can also enjoy the app without any ads or in-app purchases.

-

A simple and user-friendly interface

-

Another great feature of Chess Board Offline 2 Player APK is that it has a simple and user-friendly interface. The app has a minimalist design that focuses on the chess board and the pieces. The app also has easy-to-use controls and settings that let you customize your game. You can choose between different board colors, piece styles, sound effects, and languages. You can also enable or disable hints, undo moves, flip board, rotate screen, and more.

-

Why Should You Download Chess Board Offline 2 Player APK?

-

To enjoy chess without internet or accounts

-

If you love chess but don't have access to internet or don't want to create an account on other apps, then Chess Board Offline 2 Player APK is perfect for you. You can play chess with a friend on one screen without any hassle or interruption. You can also play by yourself without any pressure or competition. You can have fun and relax with this app.

-

To practice chess openings and strategies

-

If you want to improve your chess skills or learn new chess openings and strategies, then Chess Board Offline 2 Player APK can help you. You can use the app to practice different moves and scenarios on the board. You can also create custom setups and test your skills. The app has a hint feature that can suggest the best move for you. You can also undo your moves and try different options. The app can help you learn from your mistakes and improve your chess game.

-

To save and export your games in PGN format

-

If you want to save and share your chess games with other chess enthusiasts, then Chess Board Offline 2 Player APK can help you. The app allows you to save unlimited games on your device. You can also export your games in PGN format, which is a standard format for chess games. You can use PGN files to view, analyze, or replay your games on other apps or websites. You can also share your PGN files with your friends or online communities.

-

How to Download and Install Chess Board Offline 2 Player APK?

-

Step 1: Go to the official website or Google Play Store

-

The easiest way to download Chess Board Offline 2 Player APK is to go to the official website of the app or the Google Play Store. You can use the following links to access them:

-

chess board offline 2 player apk download
-chess board offline 2 player apk free
-chess board offline 2 player apk mod
-chess board offline 2 player apk android
-chess board offline 2 player apk latest version
-chess board offline 2 player apk for pc
-chess board offline 2 player apk no ads
-chess board offline 2 player apk full
-chess board offline 2 player apk premium
-chess board offline 2 player apk pro
-chess board offline 2 player apk best
-chess board offline 2 player apk review
-chess board offline 2 player apk online
-chess board offline 2 player apk multiplayer
-chess board offline 2 player apk with friends
-chess board offline 2 player apk without internet
-chess board offline 2 player apk unlimited coins
-chess board offline 2 player apk hack
-chess board offline 2 player apk cheat
-chess board offline 2 player apk cracked
-chess board offline 2 player apk unlocked
-chess board offline 2 player apk update
-chess board offline 2 player apk new features
-chess board offline 2 player apk bug fixes
-chess board offline 2 player apk improvements
-chess board offline 2 player apk tips and tricks
-chess board offline 2 player apk tutorial
-chess board offline 2 player apk guide
-chess board offline 2 player apk how to play
-chess board offline 2 player apk rules and regulations
-chess board offline 2 player apk game modes
-chess board offline 2 player apk difficulty levels
-chess board offline 2 player apk themes and skins
-chess board offline 2 player apk sound and music
-chess board offline 2 player apk graphics and animation
-chess board offline 2 player apk performance and optimization
-chess board offline 2 player apk compatibility and requirements
-chess board offline 2 player apk installation and setup
-chess board offline 2 player apk feedback and support
-chess board offline 2 player apk rating and reviews

- - - - - - - - - -
Official websiteGoogle Play Store
Chess Board Offline 2 Player APKChess Board Offline 2 Player - Apps on Google Play
-

You can also scan the QR codes below to download the app:

- - - - - - - - - -
Official websiteGoogle Play Store
QR code for official websiteQR code for Google Play Store
-

Step 2: Click on the download button or install button

-

Once you are on the official website or the Google Play Store, you will see a download button or an install button. Click on it to start downloading the app. The app is about 5 MB in size, so it should not take long to download.

-

Step 3: Allow unknown sources if prompted

-

If you are downloading the app from the official website, you may need to allow unknown sources on your device. This is because the app is not from the Google Play Store and may not be verified by Google. To allow unknown sources, follow these steps:

- -

If you are downloading the app from the Google Play Store, you don't need to do this step.

-

Step 4: Open the app and start playing

-

Once the app is downloaded and installed, you can open it and start playing chess with a friend or by yourself. You will see a welcome screen that shows you how to use the app and its features. You can also access the settings menu to customize your game. Enjoy playing chess with Chess Board Offline 2 Player APK!

-

Conclusion

-

Chess Board Offline 2 Player APK is a free and fun way to play chess with a friend on one screen and completely offline. You can also play by yourself and practice different moves and scenarios. The app has a simple and user-friendly interface that lets you customize your game. You can also save and export your games in PGN format and share them with other chess enthusiasts. If you love chess and want to play it anytime and anywhere without internet or accounts, then you should download Chess Board Offline 2 Player APK today!

-

FAQs

-

Q: Is Chess Board Offline 2 Player APK safe?

-

A: Yes, Chess Board Offline 2 Player APK is safe to download and use. The app does not require any permissions or access to your device data. The app also does not contain any ads or in-app purchases that may harm your device or privacy.

-

Q: Can I play chess online with Chess Board Offline 2 Player APK?

-

A: No, Chess Board Offline 2 Player APK is an offline app that does not support online play. You can only play chess with a friend on one screen or by yourself against an imaginary opponent. If you want to play chess online with other players, you will need to use a different app that supports online play.

-

Q: Can I play chess with different difficulty levels with Chess Board Offline 2 Player APK?

-

A: No, Chess Board Offline 2 Player APK does not have different difficulty levels or artificial intelligence. The app is designed for playing chess with a friend or by yourself. You can adjust the level of challenge by choosing your opponent or creating custom setups. If you want to play chess with different difficulty levels or artificial intelligence, you will need to use a different app that has these features.

-

Q: Can I play chess with different variants or rules with Chess Board Offline 2 Player APK?

-

A: No, Chess Board Offline 2 Player APK only supports the standard chess rules and variants. The app does not have options for changing the board size, the number of pieces, the movement of pieces, or the game objectives. The app follows the official rules of chess as defined by the World Chess Federation (FIDE). If you want to play chess with different variants or rules, you will need to use a different app that has these options.

-

Q: Can I play chess with other apps or devices with Chess Board Offline 2 Player APK?

-

A: Yes, you can play chess with other apps or devices with Chess Board Offline 2 Player APK. The app allows you to export your games in PGN format, which is a standard format for chess games. You can use PGN files to view, analyze, or replay your games on other apps or devices that support PGN files. You can also share your PGN files with your friends or online communities that use PGN files.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Incredibox Now and Join the Merry Crew of Beatboxers on Your Android Device.md b/spaces/1phancelerku/anime-remove-background/Download Incredibox Now and Join the Merry Crew of Beatboxers on Your Android Device.md deleted file mode 100644 index a297684bb1c976f3daf7d3265bbe697eadb566fb..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Incredibox Now and Join the Merry Crew of Beatboxers on Your Android Device.md +++ /dev/null @@ -1,131 +0,0 @@ - -

Incredibox Free Download Android: How to Create Your Own Music with a Merry Crew of Beatboxers

-

Do you love music and want to create your own songs with a simple and fun app? Do you want to explore different musical genres and mix them together to create unique sounds? Do you want to share your creations with the world and get feedback from other users? If you answered yes to any of these questions, then you should try Incredibox, a music app that lets you create your own music with the help of a merry crew of beatboxers.

-

In this article, we will tell you everything you need to know about Incredibox, how to download it for free on your Android device, how to play it, and why you should play it. Let's get started!

-

incredibox free download android


DOWNLOAD » https://jinyurl.com/2uNLA5



-

What is Incredibox?

-

Incredibox is a music app that was created in 2009 by the French company So Far So Good. It is a combination of a game, a tool, and an educational resource that introduces kids and adults to notions of rhythm and melody in a fun and entertaining way.

-

A fun, interactive music experience

-

Incredibox is a music app that lets you create your own music with the help of a merry crew of beatboxers. You can choose from 9 musical styles among 8 impressive atmospheres and start to lay down, record, and share your mix. You can also find the right sound combos to unlock animated choruses that will enhance your tune. You can save your mix and get a link to share it with anybody so they can listen and vote for it. If your mix gets enough votes from other users, you may join the Top 50 chart and become a legend.

-

A music app with 9 musical styles and 8 characters

-

Incredibox features 9 musical styles that cover a wide range of genres, such as hip-hop, rock, funk, jazz, techno, electro-pop, samba, trap, and Bollywood. Each style has its own atmosphere, graphics, animation, and sound samples. You can switch between styles anytime you want and create your own combinations.

-

Incredibox also features 8 characters that represent different types of sounds, such as beats, effects, melodies, voices, choruses, percussions, basses, and bonuses. Each character has its own personality and appearance. You can drag and drop icons onto the characters to make them sing and start to compose your own music. You can also customize the characters by changing their outfits and accessories.

-

A game, a tool, and an educational resource

-

Incredibox is not only a music app but also a game, a tool, and an educational resource. As a game, Incredibox challenges you to create the best mix possible by finding the right sound combos and unlocking animated choruses. You can also compete with other users by sharing your mix and getting votes from them. As a tool, Incredibox allows you to express your creativity and musical talent by creating your own songs with simple drag-and-drop actions. You can also download your mixes as MP3 files and listen to them anytime you want. As an educational resource, Incredibox introduces you to the basics of musical creation by teaching you about rhythm and melody in an interactive way. You can also learn about different musical genres and cultures by exploring the different styles and atmospheres.

How to play Incredibox on Android devices? -

Now that you have downloaded Incredibox on your Android device, you may wonder how to play it and have fun with it. Incredibox is a very easy and intuitive app that anyone can use, regardless of their age or musical skills. Here are some steps and tips to help you play Incredibox on your Android device.

-

The basic gameplay

-

The basic gameplay of Incredibox is very simple and straightforward. You just need to follow these steps:

-
    -
  1. Open the app and choose a musical style from the 9 available ones. You can swipe left or right to see all the options.
  2. -
  3. Tap on the play button to start the music and see the 8 characters on the screen. Each character represents a type of sound, such as beats, effects, melodies, voices, choruses, percussions, basses, and bonuses.
  4. -
  5. Drag and drop icons from the bottom of the screen onto the characters to make them sing and create your own mix. You can use up to 20 icons at a time, and you can change them anytime you want.
  6. -
  7. Find the right sound combos to unlock animated choruses that will enhance your mix. You can see the progress of the combos on the top of the screen.
  8. -
  9. Tap on the record button to record your mix and save it on your device. You can also share it with other users by tapping on the share button.
  10. -
-

The advanced features

-

Incredibox also has some advanced features that you can use to make your mix more interesting and unique. Here are some of them:

-

incredibox app free download for android
-incredibox apk free download latest version
-incredibox mod apk free download android
-incredibox v8 dystopia free download android
-incredibox v9 wekiddy free download android
-incredibox v7 jeevan free download android
-incredibox v6 alive free download android
-incredibox v5 brazil free download android
-incredibox v4 the love free download android
-incredibox v3 sunrise free download android
-incredibox v2 little miss free download android
-incredibox v1 alpha free download android
-incredibox 0.5.1 apk free download android
-incredibox 0.4.9 apk free download android
-incredibox 0.4.8 apk free download android
-incredibox 0.4.7 apk free download android
-incredibox 0.4.6 apk free download android
-incredibox 0.4.5 apk free download android
-incredibox 0.4.4 apk free download android
-incredibox 0.4.3 apk free download android
-how to get incredibox for free on android
-how to play incredibox offline on android
-how to install incredibox on android
-how to update incredibox on android
-how to record incredibox on android
-how to share incredibox on android
-how to delete incredibox on android
-how to use dark mode in incredibox on android
-how to disable sharing in incredibox on android for kids
-how to access all versions of incredibox on android app
-best music creation app for android like incredibox
-best alternatives to incredibox for android users
-best tips and tricks for playing incredibox on android devices
-best reviews and ratings for incredibox app on android store
-best songs and mixes made with incredibox app on android phone
-best features and benefits of downloading incredibox app on android tablet
-best deals and discounts for buying incredibox app on android market
-best ways and methods to learn music with incredibox app on android online
-best resources and guides for using incredibox app on android offline
-best fun and entertainment with incredibox app on android for adults and kids alike

- -

The tips and tricks

-

If you want to improve your skills and enjoy Incredibox more, here are some tips and tricks that you can follow:

-

Why should you play Incredibox on Android devices?

-

By now, you may have a clear idea of what Incredibox is and how to play it on your Android device. But you may still wonder why you should play it and what benefits it can bring you. Here are some reasons why you should play Incredibox on your Android device.

-

The benefits of playing Incredibox

-

Incredibox is not just a music app, but also a game, a tool, and an educational resource that can offer you many benefits, such as:

- -

The reviews and ratings of Incredibox

-

If you are still not convinced by the benefits of playing Incredibox, you may want to check out the reviews and ratings of Incredibox from other users who have tried it. Incredibox has received overwhelmingly positive feedback from its users, who have praised its originality, simplicity, quality, and fun factor. Here are some examples of what users have said about Incredibox:

-
-

"This is the best app ever! I love making music with this app. It's so easy and fun. The graphics are amazing and the sounds are awesome. I recommend this app to everyone who loves music."

-
-
-

"Incredibox is a masterpiece. It's not just a game, it's an art. It's a way to express yourself through music. It's a way to learn about different musical styles and cultures. It's a way to have fun and relax."

-
-
-

"I'm addicted to this app. I can't stop playing it. It's so cool and creative. I love how I can mix different sounds and create my own songs. I also love how I can share my mixes with other people and listen to theirs."

-
-

Incredibox has also received high ratings from its users, who have given it an average of 4.8 out of 5 stars on the Google Play Store. This shows that Incredibox is a highly rated and popular app that many users enjoy and appreciate.

-

The alternatives to Incredibox

-

If you are looking for some alternatives to Incredibox, you may want to try some other music apps that are similar or related to Incredibox. Here are some of them:

- -

Conclusion

-

In conclusion, Incredibox is a music app that lets you create your own music with the help of a merry crew of beatboxers. You can choose from 9 musical styles among 8 impressive atmospheres and start to lay down, record, and share your mix. You can also find the right sound combos to unlock animated choruses that will enhance your tune.

-

In this article, we have told you everything you need to know about Incredibox, how to download it for free on your Android device, how to play it, and why you should play it. We hope that this article has been helpful and informative for you, and that you have enjoyed reading it as much as we have enjoyed writing it

Now that you have reached the end of the article, you may have some questions or doubts about Incredibox or anything related to it. To help you with that, we have prepared a list of 5 frequently asked questions (FAQs) that may answer some of your queries. Here they are:

-

FAQs

-
    -
  1. Is Incredibox safe for kids?
  2. -

    Yes, Incredibox is safe for kids, as it does not contain any inappropriate or harmful content. It is also suitable for kids, as it is easy to use, fun to play, and educational to learn. In fact, Incredibox is often used by teachers and parents as a way to introduce kids to music and creativity.

    -
  3. Is Incredibox available for other devices?
  4. -

    Yes, Incredibox is available for other devices besides Android devices. You can also play Incredibox on iOS devices, such as iPhones and iPads, by downloading it from the App Store. You can also play Incredibox on your web browser, such as Chrome, Firefox, or Safari, by visiting the official website of Incredibox.

    -
  5. How can I contact the developer of Incredibox?
  6. -

    If you want to contact the developer of Incredibox, you can do so by visiting their website and filling out the contact form. You can also follow them on their social media accounts, such as Facebook, Twitter, Instagram, and YouTube. You can also send them an email at contact@incredibox.com.

    -
  7. How can I support the developer of Incredibox?
  8. -

    If you want to support the developer of Incredibox, you can do so by buying the app from the app store, leaving a positive review and rating for the app, sharing your mixes with other users and friends, and following their social media accounts. You can also donate to them via PayPal or Patreon.

    -
  9. How can I learn more about Incredibox?
  10. -

    If you want to learn more about Incredibox, you can do so by visiting their website and reading their blog posts and news articles. You can also watch their videos and tutorials on their YouTube channel. You can also join their community and forum on their website and interact with other users and fans.

    -
-

We hope that these FAQs have been useful and informative for you. If you have any other questions or comments about Incredibox or this article, please feel free to leave them below. We would love to hear from you and help you out.

-

Thank you for reading this article and playing Incredibox. We hope that you have enjoyed it as much as we have enjoyed writing it and creating music with it. Have a great day and keep on making music!

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Experience Nintendo GameCube and Wii Games on Xbox with Dolphin Emulator A Step-by-Step Guide.md b/spaces/1phancelerku/anime-remove-background/Experience Nintendo GameCube and Wii Games on Xbox with Dolphin Emulator A Step-by-Step Guide.md deleted file mode 100644 index bfbeeb6ce59954e5188aa38d3825570398fe4352..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Experience Nintendo GameCube and Wii Games on Xbox with Dolphin Emulator A Step-by-Step Guide.md +++ /dev/null @@ -1,119 +0,0 @@ -
-

How to Download Dolphin Emulator on Xbox

-

If you are a fan of Nintendo GameCube and Wii games, you might be wondering if there is a way to play them on your Xbox console. The answer is yes, thanks to a powerful emulator called Dolphin. Dolphin is a software that can run GameCube and Wii games on various platforms, including Windows, Linux, Android, and even Xbox. In this article, we will show you how to download and install Dolphin Emulator on your Xbox Series X/S or Xbox One, and how to configure it for the best performance and compatibility. We will also share some tips and tricks to enhance your gaming experience with Dolphin Emulator on Xbox.

-

Requirements for Dolphin Emulator on Xbox

-

Before you start, you will need the following things:

-

how to download dolphin emulator on xbox


Download ››››› https://jinyurl.com/2uNOB2



- -

How to Enable Developer Mode on Xbox

-

The first step is to enable developer mode on your Xbox console. This will allow you to install apps that are not available on the Microsoft Store, such as Dolphin Emulator. Developer mode is free for anyone to use, but it has some limitations and risks. For example, you will not be able to play online multiplayer games or use some features like achievements or game DVR while in developer mode. You will also need to switch back to retail mode if you want to use those features again. To enable developer mode, follow these steps:

-
    -
  1. Go to https://developer.microsoft.com/en-us/xboxactivate on your PC and sign in with your Microsoft account.
  2. -
  3. Select Activate Console and follow the instructions to register your console as a developer device.
  4. -
  5. On your console, go to Settings > System > Console info and select Reset console.
  6. -
  7. Select Reset and keep my games & apps.
  8. -
  9. Wait for the reset process to complete and sign in with your Microsoft account again.
  10. -
  11. Go to Settings > System > Developer settings and select Enable developer mode.
  12. -
  13. Wait for the console to reboot into developer mode.
  14. -
-

How to Download and Install Dolphin Emulator on Xbox

-

Now that you have enabled developer mode, you can download and install Dolphin Emulator on your console. To do this, follow these steps:

-
    -
  1. Copy the Dolphin Emulator app file (DolphinUWP_.appx) from your PC to your USB drive.
  2. -
  3. Plug the USB drive into your console.
  4. -
  5. On your console, go to Settings > System > Developer settings and select Remote Access Settings.
  6. -
  7. Enable Remote Access and set a username and password for authentication.
  8. -
  9. Note down the IP address of your console shown under Remote Access Settings.
  10. -
  11. On your PC, open a web browser and enter the IP address of your console followed by :11443 in the address bar. For example, https://192.168.1.100:11443.
  12. -
  13. You will see a security warning about an untrusted certificate. Click on Advanced and proceed to the website.
  14. -
  15. Enter the username and password that you set for your console and click on Log in.
  16. -
  17. Click on Add and browse to the location of the Dolphin Emulator app file on your USB drive.
  18. -
  19. Select the app file and click on Next.
  20. -
  21. Wait for the app to be uploaded and installed on your console.
  22. -
  23. Once the installation is complete, you will see Dolphin Emulator listed under Installed Apps on the web page.
  24. -
  25. On your console, go to My games & apps > See all > Apps and launch Dolphin Emulator.
  26. -
-

How to Configure Dolphin Emulator Settings on Xbox

-

Before you start playing games, you will need to configure some settings in Dolphin Emulator to optimize its performance and compatibility. To do this, follow these steps:

-
    -
  1. On the main menu of Dolphin Emulator, select Config.
  2. -
  3. Under the General tab, you can adjust some basic settings such as language, theme, and interface options.
  4. -
  5. Under the Graphics tab, you can change some settings related to video output, such as resolution, aspect ratio, vsync, and enhancements. For the best performance, we recommend using the native resolution of your console (1080p for Xbox One and 4K for Xbox Series X/S) and disabling any unnecessary enhancements such as anti-aliasing or anisotropic filtering.
  6. -
  7. Under the Audio tab, you can change some settings related to sound output, such as volume, backend, and latency. For the best compatibility, we recommend using the XAudio2 backend and lowering the latency to 20 ms or less.
  8. -
  9. Under the GameCube tab, you can change some settings related to GameCube emulation, such as system language, memory card size, and controller type. For the best compatibility, we recommend using a standard controller for port 1 and leaving the other ports empty.
  10. -
  11. Under the Wii tab, you can change some settings related to Wii emulation, such as system language, aspect ratio, sensor bar position, and speaker volume. For the best compatibility, we recommend using a horizontal aspect ratio and placing the sensor bar above or below your TV screen.
  12. -
  13. Under the Paths tab, you can add or remove folders where Dolphin Emulator will look for game files. By default, it will scan the internal storage of your console and any USB drives connected to it. You can also add network paths if you have game files stored on a PC or a NAS device.
  14. -
  15. Under the Advanced tab, you can change some settings related to advanced features such as CPU overclocking, dual core mode, cheats, and debug options. For the best stability, we recommend leaving these settings at their default values unless you know what you are doing.
  16. -
-

How to Play GameCube and Wii Games on Xbox with Dolphin Emulator

-

Now that you have configured Dolphin Emulator settings on your console, you are ready to play some games. To do this, follow these steps:

-
    -
  1. Make sure that you have some GameCube or Wii game files (ROMs or ISOs) stored on your console's internal storage or a USB drive. You can also use network paths if you have game files stored on a PC or a NAS device.
  2. -
  3. On the main menu of Dolphin Emulator, select Browse.
  4. -
  5. Navigate to the folder where your game files are located and select one of them.
  6. -
  7. The game will start loading and you will see some information about it on the screen. You can press the Menu button on your controller to access some options such as save states, screenshots, cheats, and more.
  8. -
  9. You can use your controller to play the game as if it was a native Xbox game. You can also use a keyboard and mouse if you prefer. You can customize the controller mappings in Dolphin Emulator by going to Controllers > Configure Controller in the main menu.
  10. -
  11. To exit the game, press the View button on your controller and select Exit from the menu that appears.
  12. -
-

Tips and Tricks for Dolphin Emulator on Xbox

-

To make the most out of Dolphin Emulator on Xbox, here are some tips and tricks that you might find useful:

- -

Conclusion

-

Dolphin Emulator is a great way to enjoy GameCube and Wii games on your Xbox console. It is easy to download and install, and it offers a lot of customization and optimization options. You can play hundreds of games with high compatibility and performance, and even use some features that are not available on the original consoles, such as online multiplayer, motion controls, and graphical enhancements. Dolphin Emulator is a must-have for any Nintendo fan who owns an Xbox console.

-

FAQs

-

Here are some frequently asked questions about Dolphin Emulator on Xbox:

-

How to install dolphin emulator on xbox series x/s and xbox one
-How to play gamecube and wii games on xbox series x/s with dolphin
-How to set up dolphin emulator for uwp on xbox consoles
-How to enable developer mode on xbox for dolphin emulator
-How to use hd texture packs with dolphin emulator on xbox
-How to run dolphin emulator on xbox one x with retroarch
-How to play mario kart double dash online with dolphin emulator on xbox
-How to configure xbox controller for dolphin emulator games
-How to use a usb drive for storing gamecube and wii roms for dolphin emulator on xbox
-How to update dolphin emulator for uwp on xbox series x/s and xbox one
-How to fix performance issues with dolphin emulator on xbox one
-How to play zelda twilight princess with dolphin emulator on xbox series x/s
-How to use broadband adapter with dolphin emulator on xbox consoles
-How to install gamecube and wii mods with dolphin emulator on xbox series x/s
-How to play metroid prime trilogy with dolphin emulator on xbox series s/x
-How to use cheats and codes with dolphin emulator on xbox consoles
-How to play super smash bros melee with dolphin emulator on xbox series x/s
-How to use save states and memory cards with dolphin emulator on xbox consoles
-How to play resident evil 4 with dolphin emulator on xbox series x/s
-How to use shaders and filters with dolphin emulator on xbox consoles
-How to play animal crossing with dolphin emulator on xbox series x/s
-How to use motion controls and wiimote with dolphin emulator on xbox consoles
-How to play pikmin 2 with dolphin emulator on xbox series x/s
-How to use netplay and multiplayer with dolphin emulator on xbox consoles
-How to play luigi's mansion with dolphin emulator on xbox series x/s
-How to use custom resolutions and aspect ratios with dolphin emulator on xbox consoles
-How to play fire emblem path of radiance with dolphin emulator on xbox series x/s
-How to use gamepad and keyboard mapping with dolphin emulator on xbox consoles
-How to play paper mario the thousand year door with dolphin emulator on xbox series x/s
-How to use screenshots and video recording with dolphin emulator on xbox consoles
-How to play sonic adventure 2 battle with dolphin emulator on xbox series x/s
-How to use turbo mode and speed hacks with dolphin emulator on xbox consoles
-How to play star wars rogue squadron ii rogue leader with dolphin emulator on xbox series x/s
-How to use audio settings and enhancements with dolphin emulator on xbox consoles
-How to play f-zero gx with dolphin emulator on xbox series x/s

-
    -
  1. Is Dolphin Emulator legal?
    Dolphin Emulator itself is legal, as it is a software that emulates the hardware and software of GameCube and Wii consoles. However, downloading or distributing game files (ROMs or ISOs) that you do not own is illegal, as it violates the copyright laws of the game developers and publishers. You should only use game files that you have legally obtained from your own discs or digital purchases.
  2. -
  3. Is Dolphin Emulator safe?
    Dolphin Emulator is safe, as long as you download it from its official website or GitHub repository. It does not contain any viruses, malware, or spyware that could harm your console or PC. However, you should be careful when downloading any add-ons or game files from other sources online, as they might contain harmful or malicious content.
  4. -
  5. Does Dolphin Emulator work on Xbox One S or Xbox One X?
    Yes, Dolphin Emulator works on any Xbox One model, including Xbox One S and Xbox One X. However, you might notice some differences in performance and compatibility depending on the model of your console. For example, Xbox One X has more power and memory than Xbox One S, which means it can run some games faster and smoother than Xbox One S.
  6. -
  7. Can I use an external hard drive instead of a USB drive for Dolphin Emulator?
    Yes, you can use an external hard drive instead of a USB drive for Dolphin Emulator, as long as it is formatted as NTFS and has enough space for your game files. However, you might experience some issues with loading times or compatibility depending on the speed and quality of your external hard drive.
  8. -
  9. Can I use a wireless controller instead of a wired controller for Dolphin Emulator?
    Yes, you can use a wireless controller instead of a wired controller for Dolphin Emulator, as long as it is compatible with your console and has enough battery life. However, you might experience some issues with input lag or responsiveness depending on the quality and signal strength of your wireless controller.
  10. -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1toTree/lora_test/ppdiffusers/experimental/rl/value_guided_sampling.py b/spaces/1toTree/lora_test/ppdiffusers/experimental/rl/value_guided_sampling.py deleted file mode 100644 index 4184c0ab362dd23eff61c72997291eaa1a95feee..0000000000000000000000000000000000000000 --- a/spaces/1toTree/lora_test/ppdiffusers/experimental/rl/value_guided_sampling.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import paddle - -from ...models.unet_1d import UNet1DModel -from ...pipeline_utils import DiffusionPipeline -from ...utils.dummy_paddle_objects import DDPMScheduler - - -class ValueGuidedRLPipeline(DiffusionPipeline): - r""" - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - Pipeline for sampling actions from a diffusion model trained to predict sequences of states. - Original implementation inspired by this repository: https://github.com/jannerm/diffuser. - - Parameters: - value_function ([`UNet1DModel`]): A specialized UNet for fine-tuning trajectories base on reward. - unet ([`UNet1DModel`]): U-Net architecture to denoise the encoded trajectories. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this - application is [`DDPMScheduler`]. - env: An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models. - """ - - def __init__( - self, - value_function: UNet1DModel, - unet: UNet1DModel, - scheduler: DDPMScheduler, - env, - ): - super().__init__() - self.value_function = value_function - self.unet = unet - self.scheduler = scheduler - self.env = env - self.data = env.get_dataset() - self.means = dict() - for key in self.data.keys(): - try: - self.means[key] = self.data[key].mean() - except Exception: - pass - self.stds = dict() - for key in self.data.keys(): - try: - self.stds[key] = self.data[key].std() - except Exception: - pass - self.state_dim = env.observation_space.shape[0] - self.action_dim = env.action_space.shape[0] - - def normalize(self, x_in, key): - return (x_in - self.means[key]) / self.stds[key] - - def de_normalize(self, x_in, key): - return x_in * self.stds[key] + self.means[key] - - def to_paddle(self, x_in): - if type(x_in) is dict: - return {k: self.to_paddle(v) for k, v in x_in.items()} - elif paddle.is_tensor(x_in): - return x_in - return paddle.to_tensor(x_in) - - def reset_x0(self, x_in, cond, act_dim): - for key, val in cond.items(): - x_in[:, key, act_dim:] = val.clone() - return x_in - - def run_diffusion(self, x, conditions, n_guide_steps, scale): - batch_size = x.shape[0] - y = None - for i in self.progress_bar(self.scheduler.timesteps): - # create batch of timesteps to pass into model - timesteps = paddle.full((batch_size,), i, dtype="int64") - for _ in range(n_guide_steps): - with paddle.set_grad_enabled(True): - x.stop_gradient = False - # permute to match dimension for pre-trained models - y = self.value_function(x.transpose([0, 2, 1]), timesteps).sample - grad = paddle.autograd.grad([y.sum()], [x])[0] - - posterior_variance = self.scheduler._get_variance(i) - model_std = paddle.exp(0.5 * posterior_variance) - grad = model_std * grad - - grad[timesteps < 2] = 0 - x = x.detach() - x = x + scale * grad - x = self.reset_x0(x, conditions, self.action_dim) - prev_x = self.unet(x.transpose([0, 2, 1]), timesteps).sample.transpose([0, 2, 1]) - # TODO: verify deprecation of this kwarg - x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"] - - # apply conditions to the trajectory (set the initial state) - x = self.reset_x0(x, conditions, self.action_dim) - x = self.to_paddle(x) - return x, y - - def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): - # normalize the observations and create batch dimension - obs = self.normalize(obs, "observations") - obs = obs[None].repeat(batch_size, axis=0) - - conditions = {0: self.to_paddle(obs)} - shape = [batch_size, planning_horizon, self.state_dim + self.action_dim] - - # generate initial noise and apply our conditions (to make the trajectories start at current state) - x1 = paddle.randn(shape) - x = self.reset_x0(x1, conditions, self.action_dim) - x = self.to_paddle(x) - - # run the diffusion process - x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) - - # sort output trajectories by value - sorted_idx = paddle.argsort(y, 0, descending=True).squeeze() - sorted_values = x[sorted_idx] - actions = sorted_values[:, :, : self.action_dim] - actions = actions.detach().numpy() - denorm_actions = self.de_normalize(actions, key="actions") - - # select the action with the highest value - if y is not None: - selected_index = 0 - else: - # if we didn't run value guiding, select a random action - selected_index = np.random.randint(0, batch_size) - denorm_actions = denorm_actions[selected_index, 0] - return denorm_actions diff --git a/spaces/A00001/bingothoo/src/pages/api/healthz.ts b/spaces/A00001/bingothoo/src/pages/api/healthz.ts deleted file mode 100644 index f6ae44ff0fd66ccd3f7feaa550025fbf2a83bf77..0000000000000000000000000000000000000000 --- a/spaces/A00001/bingothoo/src/pages/api/healthz.ts +++ /dev/null @@ -1,7 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - res.status(200).end('ok') -} diff --git a/spaces/AI-ZTH-03-23/README/README.md b/spaces/AI-ZTH-03-23/README/README.md deleted file mode 100644 index 02b96483a48dd036113efcd9b2d52664091b0523..0000000000000000000000000000000000000000 --- a/spaces/AI-ZTH-03-23/README/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: README -emoji: 🐠 -colorFrom: gray -colorTo: purple -sdk: static -pinned: false ---- - -# 03-23-2023 Code Examples: -1. Classroom: https://huggingface.co/AI-ZTH-03-23 -2. Dynamic Architecture Modeling: https://huggingface.co/spaces/awacke1/Streamlit.GraphViz.Dynamic.Architecture.Diagram -3. Aframe VR IOT Motion Sensor WASD: https://huggingface.co/spaces/awacke1/HTML5-Aframe-3dMap-Flight -4. MediaPipe: https://huggingface.co/spaces/awacke1/RealTime-MediaPipe-AI-From-Video-On-Any-Device -5. Wikipedia Fact Check Chat: https://huggingface.co/spaces/awacke1/StreamlitWikipediaChat -6. Dashboard - Tweet, Wiki, Memory: https://huggingface.co/spaces/awacke1/AI.Dashboard.Wiki.Chat.Cognitive.HTML5 -7. Dashboard - Chat, Download, Image Search, OCR, StoryGen, Q, Mermaid HTML5: https://huggingface.co/spaces/awacke1/AI.Dashboard.Gradio.Streamlit.HTML5 -8. Datasets - Biomed NER: https://huggingface.co/spaces/DataScienceEngineering/7-NER-Biomed-ClinicalTerms -9. MN Hospitals Comparative Maps: https://huggingface.co/spaces/awacke1/MN.Map.Hospitals.Top.Five -10. Find Mental Health Providers, Maps, Location: https://huggingface.co/spaces/awacke1/Gradio-Maps-Latitude-Longitude \ No newline at end of file diff --git a/spaces/AI-Zero-to-Hero/01-H5-Play-Canvas-Sim-Physics/style.css b/spaces/AI-Zero-to-Hero/01-H5-Play-Canvas-Sim-Physics/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/AI-Zero-to-Hero/01-H5-Play-Canvas-Sim-Physics/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/AIFILMS/generate_human_motion/pyrender/tests/unit/__init__.py b/spaces/AIFILMS/generate_human_motion/pyrender/tests/unit/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/normalizing_flow/res_flow.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/normalizing_flow/res_flow.py deleted file mode 100644 index a237b05b7fc0fd8626d3da95ad3a39171e7129fc..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/normalizing_flow/res_flow.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch import nn -from text_to_speech.modules.commons.conv import ConditionalConvBlocks -from text_to_speech.modules.commons.wavenet import WN - - -class FlipLayer(nn.Module): - def forward(self, x, nonpadding, cond=None, reverse=False): - x = torch.flip(x, [1]) - return x - - -class CouplingLayer(nn.Module): - def __init__(self, c_in, hidden_size, kernel_size, n_layers, p_dropout=0, c_in_g=0, nn_type='wn'): - super().__init__() - self.channels = c_in - self.hidden_size = hidden_size - self.kernel_size = kernel_size - self.n_layers = n_layers - self.c_half = c_in // 2 - - self.pre = nn.Conv1d(self.c_half, hidden_size, 1) - if nn_type == 'wn': - self.enc = WN(hidden_size, kernel_size, 1, n_layers, p_dropout=p_dropout, - c_cond=c_in_g) - elif nn_type == 'conv': - self.enc = ConditionalConvBlocks( - hidden_size, c_in_g, hidden_size, None, kernel_size, - layers_in_block=1, is_BTC=False, num_layers=n_layers) - self.post = nn.Conv1d(hidden_size, self.c_half, 1) - - def forward(self, x, nonpadding, cond=None, reverse=False): - x0, x1 = x[:, :self.c_half], x[:, self.c_half:] - x_ = self.pre(x0) * nonpadding - x_ = self.enc(x_, nonpadding=nonpadding, cond=cond) - m = self.post(x_) - x1 = m + x1 if not reverse else x1 - m - x = torch.cat([x0, x1], 1) - return x * nonpadding - - -class ResFlow(nn.Module): - def __init__(self, - c_in, - hidden_size, - kernel_size, - n_flow_layers, - n_flow_steps=4, - c_cond=0, - nn_type='wn'): - super().__init__() - self.flows = nn.ModuleList() - for i in range(n_flow_steps): - self.flows.append( - CouplingLayer(c_in, hidden_size, kernel_size, n_flow_layers, c_in_g=c_cond, nn_type=nn_type)) - self.flows.append(FlipLayer()) - - def forward(self, x, nonpadding, cond=None, reverse=False): - for flow in (self.flows if not reverse else reversed(self.flows)): - x = flow(x, nonpadding, cond=cond, reverse=reverse) - return x diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/normalizing_flow/utils.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/normalizing_flow/utils.py deleted file mode 100644 index 7eb56ec514bff822ba1a19a6474207ed82492410..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/normalizing_flow/utils.py +++ /dev/null @@ -1,29 +0,0 @@ -import torch - - -def squeeze(x, x_mask=None, n_sqz=2): - b, c, t = x.size() - - t = (t // n_sqz) * n_sqz - x = x[:, :, :t] - x_sqz = x.view(b, c, t // n_sqz, n_sqz) - x_sqz = x_sqz.permute(0, 3, 1, 2).contiguous().view(b, c * n_sqz, t // n_sqz) - - if x_mask is not None: - x_mask = x_mask[:, :, n_sqz - 1::n_sqz] - else: - x_mask = torch.ones(b, 1, t // n_sqz).to(device=x.device, dtype=x.dtype) - return x_sqz * x_mask, x_mask - - -def unsqueeze(x, x_mask=None, n_sqz=2): - b, c, t = x.size() - - x_unsqz = x.view(b, n_sqz, c // n_sqz, t) - x_unsqz = x_unsqz.permute(0, 2, 3, 1).contiguous().view(b, c // n_sqz, t * n_sqz) - - if x_mask is not None: - x_mask = x_mask.unsqueeze(-1).repeat(1, 1, 1, n_sqz).view(b, 1, t * n_sqz) - else: - x_mask = torch.ones(b, 1, t * n_sqz).to(device=x.device, dtype=x.dtype) - return x_unsqz * x_mask, x_mask diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/ps_flow.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/ps_flow.py deleted file mode 100644 index a52fcd7bc3887f479d02e9ffbf03cb6e717a89d5..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/ps_flow.py +++ /dev/null @@ -1,135 +0,0 @@ -import torch -from text_to_speech.modules.tts.portaspeech.portaspeech_flow import PortaSpeechFlow -from tasks.tts.fs import FastSpeechTask -from tasks.tts.ps import PortaSpeechTask -from text_to_speech.utils.audio.pitch.utils import denorm_f0 -from text_to_speech.utils.commons.hparams import hparams - - -class PortaSpeechFlowTask(PortaSpeechTask): - def __init__(self): - super().__init__() - self.training_post_glow = False - - def build_tts_model(self): - ph_dict_size = len(self.token_encoder) - word_dict_size = len(self.word_encoder) - self.model = PortaSpeechFlow(ph_dict_size, word_dict_size, hparams) - - def _training_step(self, sample, batch_idx, opt_idx): - self.training_post_glow = self.global_step >= hparams['post_glow_training_start'] \ - and hparams['use_post_flow'] - if hparams['two_stage'] and \ - ((opt_idx == 0 and self.training_post_glow) or (opt_idx == 1 and not self.training_post_glow)): - return None - loss_output, _ = self.run_model(sample) - total_loss = sum([v for v in loss_output.values() if isinstance(v, torch.Tensor) and v.requires_grad]) - loss_output['batch_size'] = sample['txt_tokens'].size()[0] - if 'postflow' in loss_output and loss_output['postflow'] is None: - return None - return total_loss, loss_output - - def run_model(self, sample, infer=False, *args, **kwargs): - if not infer: - training_post_glow = self.training_post_glow - spk_embed = sample.get('spk_embed') - spk_id = sample.get('spk_ids') - output = self.model(sample['txt_tokens'], - sample['word_tokens'], - ph2word=sample['ph2word'], - mel2word=sample['mel2word'], - mel2ph=sample['mel2ph'], - word_len=sample['word_lengths'].max(), - tgt_mels=sample['mels'], - pitch=sample.get('pitch'), - spk_embed=spk_embed, - spk_id=spk_id, - infer=False, - forward_post_glow=training_post_glow, - two_stage=hparams['two_stage'], - global_step=self.global_step, - bert_feats=sample.get('bert_feats')) - losses = {} - self.add_mel_loss(output['mel_out'], sample['mels'], losses) - if (training_post_glow or not hparams['two_stage']) and hparams['use_post_flow']: - losses['postflow'] = output['postflow'] - losses['l1'] = losses['l1'].detach() - losses['ssim'] = losses['ssim'].detach() - if not training_post_glow or not hparams['two_stage'] or not self.training: - losses['kl'] = output['kl'] - if self.global_step < hparams['kl_start_steps']: - losses['kl'] = losses['kl'].detach() - else: - losses['kl'] = torch.clamp(losses['kl'], min=hparams['kl_min']) - losses['kl'] = losses['kl'] * hparams['lambda_kl'] - if hparams['dur_level'] == 'word': - self.add_dur_loss( - output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses) - self.get_attn_stats(output['attn'], sample, losses) - else: - super().add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses) - return losses, output - else: - use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur']) - forward_post_glow = self.global_step >= hparams['post_glow_training_start'] + 1000 \ - and hparams['use_post_flow'] - spk_embed = sample.get('spk_embed') - spk_id = sample.get('spk_ids') - output = self.model( - sample['txt_tokens'], - sample['word_tokens'], - ph2word=sample['ph2word'], - word_len=sample['word_lengths'].max(), - pitch=sample.get('pitch'), - mel2ph=sample['mel2ph'] if use_gt_dur else None, - mel2word=sample['mel2word'] if hparams['profile_infer'] or hparams['use_gt_dur'] else None, - infer=True, - forward_post_glow=forward_post_glow, - spk_embed=spk_embed, - spk_id=spk_id, - two_stage=hparams['two_stage'], - bert_feats=sample.get('bert_feats')) - return output - - def validation_step(self, sample, batch_idx): - self.training_post_glow = self.global_step >= hparams['post_glow_training_start'] \ - and hparams['use_post_flow'] - return super().validation_step(sample, batch_idx) - - def save_valid_result(self, sample, batch_idx, model_out): - super(PortaSpeechFlowTask, self).save_valid_result(sample, batch_idx, model_out) - sr = hparams['audio_sample_rate'] - f0_gt = None - if sample.get('f0') is not None: - f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu()) - if self.global_step > 0: - # save FVAE result - if hparams['use_post_flow']: - wav_pred = self.vocoder.spec2wav(model_out['mel_out_fvae'][0].cpu(), f0=f0_gt) - self.logger.add_audio(f'wav_fvae_{batch_idx}', wav_pred, self.global_step, sr) - self.plot_mel(batch_idx, sample['mels'], model_out['mel_out_fvae'][0], - f'mel_fvae_{batch_idx}', f0s=f0_gt) - - def build_optimizer(self, model): - if hparams['two_stage'] and hparams['use_post_flow']: - self.optimizer = torch.optim.AdamW( - [p for name, p in self.model.named_parameters() if 'post_flow' not in name], - lr=hparams['lr'], - betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), - weight_decay=hparams['weight_decay']) - self.post_flow_optimizer = torch.optim.AdamW( - self.model.post_flow.parameters(), - lr=hparams['post_flow_lr'], - betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), - weight_decay=hparams['weight_decay']) - return [self.optimizer, self.post_flow_optimizer] - else: - self.optimizer = torch.optim.AdamW( - self.model.parameters(), - lr=hparams['lr'], - betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), - weight_decay=hparams['weight_decay']) - return [self.optimizer] - - def build_scheduler(self, optimizer): - return FastSpeechTask.build_scheduler(self, optimizer[0]) \ No newline at end of file diff --git a/spaces/AIWaves/SOP_Generation-single/Agent/__init__.py b/spaces/AIWaves/SOP_Generation-single/Agent/__init__.py deleted file mode 100644 index 5919811a5cec1b9d44051cdb1e9ac26a21ee3064..0000000000000000000000000000000000000000 --- a/spaces/AIWaves/SOP_Generation-single/Agent/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .Agent import Agent \ No newline at end of file diff --git a/spaces/AIZeroToHero/04-Image2OCR/README.md b/spaces/AIZeroToHero/04-Image2OCR/README.md deleted file mode 100644 index 3adf279a22a8ec0c46cfbb8247925692edfdcd9e..0000000000000000000000000000000000000000 --- a/spaces/AIZeroToHero/04-Image2OCR/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 04 Image2OCR -emoji: 🚀 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.1.5 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ASJMO/freegpt/g4f/Provider/Providers/Dfehub.py b/spaces/ASJMO/freegpt/g4f/Provider/Providers/Dfehub.py deleted file mode 100644 index 2f66f19b50b6b4ab79c012f123c47241141942eb..0000000000000000000000000000000000000000 --- a/spaces/ASJMO/freegpt/g4f/Provider/Providers/Dfehub.py +++ /dev/null @@ -1,49 +0,0 @@ -import os -import requests -from ...typing import sha256, Dict, get_type_hints - -url = "https://chat.dfehub.com" -model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-4'] -supports_stream = True -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - headers = { - 'Authority': 'chat.dfehub.com', - 'Content-Type': 'application/json', - 'Method': 'POST', - 'Path': '/api/openai/v1/chat/completions', - 'Scheme': 'https', - 'Accept': 'text/event-stream', - 'Accept-Language': 'pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6,zh;q=0.5', - 'Content-Type': 'application/json', - 'Origin': 'https://chat.dfehub.com', - 'Referer': 'https://chat.dfehub.com/', - 'Sec-Ch-Ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - 'Sec-Ch-Ua-Mobile': '?0', - 'Sec-Ch-Ua-Platform': '"Windows"', - 'Sec-Fetch-Dest': 'empty', - 'Sec-Fetch-Mode': 'cors', - 'Sec-Fetch-Site': 'same-origin', - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', - 'X-Requested-With': 'XMLHttpRequest', - } - - data = { - 'model': model, - 'temperature': 0.7, - 'max_tokens': '8000', - 'presence_penalty': 0, - 'messages': messages, - } - - response = requests.post(url + '/api/openai/v1/chat/completions', - headers=headers, json=data, stream=stream) - - yield response.json()['choices'][0]['message']['content'] - - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join( - [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) diff --git a/spaces/Aashir01/Live_Transcription/app.py b/spaces/Aashir01/Live_Transcription/app.py deleted file mode 100644 index ed71e3237ed01aaa15397b34d58651e299e7e605..0000000000000000000000000000000000000000 --- a/spaces/Aashir01/Live_Transcription/app.py +++ /dev/null @@ -1,236 +0,0 @@ -import base64 -import math -import os -import time -from functools import partial -from multiprocessing import Pool - -import gradio as gr -import numpy as np -import pytube -import requests -from processing_whisper import WhisperPrePostProcessor -from transformers.models.whisper.tokenization_whisper import TO_LANGUAGE_CODE -from transformers.pipelines.audio_utils import ffmpeg_read - - -title = "Whisper JAX: The Fastest Whisper API ⚡️" - -description = """Whisper JAX is an optimised implementation of the [Whisper model](https://huggingface.co/openai/whisper-large-v2) by OpenAI. It runs on JAX with a TPU v4-8 in the backend. Compared to PyTorch on an A100 GPU, it is over [**70x faster**](https://github.com/sanchit-gandhi/whisper-jax#benchmarks), making it the fastest Whisper API available. - -Note that at peak times, you may find yourself in the queue for this demo. When you submit a request, your queue position will be shown in the top right-hand side of the demo pane. Once you reach the front of the queue, your audio file will be sent to the TPU and then transcribed, with the progress displayed through a progress bar. - -To skip the queue, you may wish to create your own inference endpoint, details for which can be found in the [Whisper JAX repository](https://github.com/sanchit-gandhi/whisper-jax#creating-an-endpoint). -""" - -article = "Whisper large-v2 model by OpenAI. Backend running JAX on a TPU v4-8 through the generous support of the [TRC](https://sites.research.google/trc/about/) programme. Whisper JAX [code](https://github.com/sanchit-gandhi/whisper-jax) and Gradio demo by 🤗 Hugging Face." - -API_SEND_URL = os.getenv("API_SEND_URL") -API_FORWARD_URL = os.getenv("API_FORWARD_URL") - -language_names = sorted(TO_LANGUAGE_CODE.keys()) -CHUNK_LENGTH_S = 30 -BATCH_SIZE = 16 -NUM_PROC = 16 -FILE_LIMIT_MB = 1000 - - -def query(url, payload): - response = requests.post(url, json=payload) - return response.json(), response.status_code - - -def inference(batch_id, idx, task=None, return_timestamps=False): - payload = {"batch_id": batch_id, "idx": idx, "task": task, "return_timestamps": return_timestamps} - - data, status_code = query(API_FORWARD_URL, payload) - - if status_code == 200: - tokens = {"tokens": np.asarray(data["tokens"])} - return tokens - else: - gr.Error(data["detail"]) - - -def send_chunks(batch, batch_id): - feature_shape = batch["input_features"].shape - batch["input_features"] = base64.b64encode(batch["input_features"].tobytes()).decode() - query(API_SEND_URL, {"batch": batch, "feature_shape": feature_shape, "batch_id": batch_id}) - - -def forward(batch_id, idx, task=None, return_timestamps=False): - outputs = inference(batch_id, idx, task, return_timestamps) - return outputs - - -# Copied from https://github.com/openai/whisper/blob/c09a7ae299c4c34c5839a76380ae407e7d785914/whisper/utils.py#L50 -def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = "."): - if seconds is not None: - milliseconds = round(seconds * 1000.0) - - hours = milliseconds // 3_600_000 - milliseconds -= hours * 3_600_000 - - minutes = milliseconds // 60_000 - milliseconds -= minutes * 60_000 - - seconds = milliseconds // 1_000 - milliseconds -= seconds * 1_000 - - hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else "" - return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}" - else: - # we have a malformed timestamp so just return it as is - return seconds - - -if __name__ == "__main__": - processor = WhisperPrePostProcessor.from_pretrained("openai/whisper-large-v2") - stride_length_s = CHUNK_LENGTH_S / 6 - chunk_len = round(CHUNK_LENGTH_S * processor.feature_extractor.sampling_rate) - stride_left = stride_right = round(stride_length_s * processor.feature_extractor.sampling_rate) - step = chunk_len - stride_left - stride_right - pool = Pool(NUM_PROC) - - def tqdm_generate(inputs: dict, task: str, return_timestamps: bool, progress: gr.Progress): - inputs_len = inputs["array"].shape[0] - all_chunk_start_batch_id = np.arange(0, inputs_len, step) - num_samples = len(all_chunk_start_batch_id) - num_batches = math.ceil(num_samples / BATCH_SIZE) - dummy_batches = list(range(num_batches)) - - dataloader = processor.preprocess_batch(inputs, chunk_length_s=CHUNK_LENGTH_S, batch_size=BATCH_SIZE) - progress(0, desc="Sending audio to TPU...") - batch_id = np.random.randint( - 1000000 - ) # TODO(SG): swap to an iterator - currently taking our 1 in a million chances - pool.map(partial(send_chunks, batch_id=batch_id), dataloader) - - model_outputs = [] - start_time = time.time() - # iterate over our chunked audio samples - for idx in progress.tqdm(dummy_batches, desc="Transcribing..."): - model_outputs.append(forward(batch_id, idx, task=task, return_timestamps=return_timestamps)) - runtime = time.time() - start_time - - post_processed = processor.postprocess(model_outputs, return_timestamps=return_timestamps) - text = post_processed["text"] - timestamps = post_processed.get("chunks") - if timestamps is not None: - timestamps = [ - f"[{format_timestamp(chunk['timestamp'][0])} -> {format_timestamp(chunk['timestamp'][1])}] {chunk['text']}" - for chunk in timestamps - ] - text = "\n".join(str(feature) for feature in timestamps) - return text, runtime - - def transcribe_chunked_audio(inputs, task, return_timestamps, progress=gr.Progress()): - progress(0, desc="Loading audio file...") - if inputs is None: - raise gr.Error("No audio file submitted! Please upload an audio file before submitting your request.") - file_size_mb = os.stat(inputs).st_size / (1024 * 1024) - if file_size_mb > FILE_LIMIT_MB: - raise gr.Error( - f"File size exceeds file size limit. Got file of size {file_size_mb:.2f}MB for a limit of {FILE_LIMIT_MB}MB." - ) - - with open(inputs, "rb") as f: - inputs = f.read() - - inputs = ffmpeg_read(inputs, processor.feature_extractor.sampling_rate) - inputs = {"array": inputs, "sampling_rate": processor.feature_extractor.sampling_rate} - text, runtime = tqdm_generate(inputs, task=task, return_timestamps=return_timestamps, progress=progress) - return text, runtime - - def _return_yt_html_embed(yt_url): - video_id = yt_url.split("?v=")[-1] - HTML_str = ( - f'
' - "
" - ) - return HTML_str - - def transcribe_youtube(yt_url, task, return_timestamps, progress=gr.Progress(), max_filesize=75.0): - progress(0, desc="Loading audio file...") - html_embed_str = _return_yt_html_embed(yt_url) - try: - yt = pytube.YouTube(yt_url) - stream = yt.streams.filter(only_audio=True)[0] - except: - raise gr.Error("An error occurred while loading the YouTube video. Please try again.") - - if stream.filesize_mb > max_filesize: - raise gr.Error(f"Maximum YouTube file size is {max_filesize}MB, got {stream.filesize_mb:.2f}MB.") - - stream.download(filename="audio.mp3") - - with open("audio.mp3", "rb") as f: - inputs = f.read() - - inputs = ffmpeg_read(inputs, processor.feature_extractor.sampling_rate) - inputs = {"array": inputs, "sampling_rate": processor.feature_extractor.sampling_rate} - text, runtime = tqdm_generate(inputs, task=task, return_timestamps=return_timestamps, progress=progress) - return html_embed_str, text, runtime - - microphone_chunked = gr.Interface( - fn=transcribe_chunked_audio, - inputs=[ - gr.inputs.Audio(source="microphone", optional=True, type="filepath"), - gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"), - gr.inputs.Checkbox(default=False, label="Return timestamps"), - ], - outputs=[ - gr.outputs.Textbox(label="Transcription").style(show_copy_button=True), - gr.outputs.Textbox(label="Transcription Time (s)"), - ], - allow_flagging="never", - title=title, - description=description, - article=article, - ) - - audio_chunked = gr.Interface( - fn=transcribe_chunked_audio, - inputs=[ - gr.inputs.Audio(source="upload", optional=True, label="Audio file", type="filepath"), - gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"), - gr.inputs.Checkbox(default=False, label="Return timestamps"), - ], - outputs=[ - gr.outputs.Textbox(label="Transcription").style(show_copy_button=True), - gr.outputs.Textbox(label="Transcription Time (s)"), - ], - allow_flagging="never", - title=title, - description=description, - article=article, - ) - - youtube = gr.Interface( - fn=transcribe_youtube, - inputs=[ - gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"), - gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"), - gr.inputs.Checkbox(default=False, label="Return timestamps"), - ], - outputs=[ - gr.outputs.HTML(label="Video"), - gr.outputs.Textbox(label="Transcription").style(show_copy_button=True), - gr.outputs.Textbox(label="Transcription Time (s)"), - ], - allow_flagging="never", - title=title, - examples=[["https://www.youtube.com/watch?v=m8u-18Q0s7I", "transcribe", False]], - cache_examples=False, - description=description, - article=article, - ) - - demo = gr.Blocks() - - with demo: - gr.TabbedInterface([microphone_chunked, audio_chunked, youtube], ["Microphone", "Audio File", "YouTube"]) - - demo.queue(max_size=10) - demo.launch(show_api=False, max_threads=10) - diff --git a/spaces/Abhaykoul/Wizard-AI/README.md b/spaces/Abhaykoul/Wizard-AI/README.md deleted file mode 100644 index 8042a6baca2655771bc84753e281a8887699ca1b..0000000000000000000000000000000000000000 --- a/spaces/Abhaykoul/Wizard-AI/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Wizard AI -emoji: 🏃 -colorFrom: gray -colorTo: indigo -sdk: streamlit -sdk_version: 1.28.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/layermanager.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/layermanager.d.ts deleted file mode 100644 index 5bfa0d86b952307c67973124a38a0101c8bf1a6c..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/layermanager.d.ts +++ /dev/null @@ -1,2 +0,0 @@ -import LayerManager from './gameobjects/layer/layermanager/LayerManager'; -export default LayerManager; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/alphamaskimage/Factory.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/alphamaskimage/Factory.js deleted file mode 100644 index a0df36403ee53a70343ee4199af9f1141e228f3e..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/alphamaskimage/Factory.js +++ /dev/null @@ -1,13 +0,0 @@ -import AlphaMaskImage from './AlphaMaskImage.js'; -import ObjectFactory from '../ObjectFactory.js'; -import SetValue from '../../../plugins/utils/object/SetValue.js'; - -ObjectFactory.register('alphaMaskImage', function (x, y, key, frame, config) { - var gameObject = new AlphaMaskImage(this.scene, x, y, key, frame, config); - this.scene.add.existing(gameObject); - return gameObject; -}); - -SetValue(window, 'RexPlugins.UI.AlphaMaskImage', AlphaMaskImage); - -export default AlphaMaskImage; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/chart/UpdateChart.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/chart/UpdateChart.js deleted file mode 100644 index 84d6d49322e149b8d58c819b9136826813132096..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/chart/UpdateChart.js +++ /dev/null @@ -1,8 +0,0 @@ -var UpdateChart = function () { - if (this.chart === undefined) { - return this; - } - this.chart.update(); - return this; -} -export default UpdateChart; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateImage.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateImage.js deleted file mode 100644 index e8f2506cbea728a94fdd351536e651516b74bdc0..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateImage.js +++ /dev/null @@ -1,9 +0,0 @@ -import CreateAnyImage from './utils/CreateAnyImage.js'; - -const PhaserImage = Phaser.GameObjects.Image; - -var CreateImage = function (scene, data, view, styles, customBuilders) { - return CreateAnyImage(scene, data, view, styles, customBuilders, PhaserImage); -} - -export default CreateImage; \ No newline at end of file diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/options/__init__.py b/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/options/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/unconditional_image_generation.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/unconditional_image_generation.md deleted file mode 100644 index c0888f94c6c135e429feb42d2026962d3a257f5f..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/unconditional_image_generation.md +++ /dev/null @@ -1,69 +0,0 @@ - - -# Unconditional image generation - -[[open-in-colab]] - -Unconditional image generation is a relatively straightforward task. The model only generates images - without any additional context like text or an image - resembling the training data it was trained on. - -The [`DiffusionPipeline`] is the easiest way to use a pre-trained diffusion system for inference. - -Start by creating an instance of [`DiffusionPipeline`] and specify which pipeline checkpoint you would like to download. -You can use any of the 🧨 Diffusers [checkpoints](https://huggingface.co/models?library=diffusers&sort=downloads) from the Hub (the checkpoint you'll use generates images of butterflies). - - - -💡 Want to train your own unconditional image generation model? Take a look at the training [guide](training/unconditional_training) to learn how to generate your own images. - - - -In this guide, you'll use [`DiffusionPipeline`] for unconditional image generation with [DDPM](https://arxiv.org/abs/2006.11239): - -```python ->>> from diffusers import DiffusionPipeline - ->>> generator = DiffusionPipeline.from_pretrained("anton-l/ddpm-butterflies-128") -``` - -The [`DiffusionPipeline`] downloads and caches all modeling, tokenization, and scheduling components. -Because the model consists of roughly 1.4 billion parameters, we strongly recommend running it on a GPU. -You can move the generator object to a GPU, just like you would in PyTorch: - -```python ->>> generator.to("cuda") -``` - -Now you can use the `generator` to generate an image: - -```python ->>> image = generator().images[0] -``` - -The output is by default wrapped into a [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) object. - -You can save the image by calling: - -```python ->>> image.save("generated_image.png") -``` - -Try out the Spaces below, and feel free to play around with the inference steps parameter to see how it affects the image quality! - - - - diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_tensorrt_inpaint.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_tensorrt_inpaint.py deleted file mode 100644 index 44f3bf5049b892cdf48098f14297e9425c5f0773..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_tensorrt_inpaint.py +++ /dev/null @@ -1,1088 +0,0 @@ -# -# Copyright 2023 The HuggingFace Inc. team. -# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import os -from collections import OrderedDict -from copy import copy -from typing import List, Optional, Union - -import numpy as np -import onnx -import onnx_graphsurgeon as gs -import PIL -import tensorrt as trt -import torch -from huggingface_hub import snapshot_download -from onnx import shape_inference -from polygraphy import cuda -from polygraphy.backend.common import bytes_from_path -from polygraphy.backend.onnx.loader import fold_constants -from polygraphy.backend.trt import ( - CreateConfig, - Profile, - engine_from_bytes, - engine_from_network, - network_from_onnx_path, - save_engine, -) -from polygraphy.backend.trt import util as trt_util -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer - -from diffusers.models import AutoencoderKL, UNet2DConditionModel -from diffusers.pipelines.stable_diffusion import ( - StableDiffusionInpaintPipeline, - StableDiffusionPipelineOutput, - StableDiffusionSafetyChecker, -) -from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image -from diffusers.schedulers import DDIMScheduler -from diffusers.utils import DIFFUSERS_CACHE, logging - - -""" -Installation instructions -python3 -m pip install --upgrade transformers diffusers>=0.16.0 -python3 -m pip install --upgrade tensorrt>=8.6.1 -python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com -python3 -m pip install onnxruntime -""" - -TRT_LOGGER = trt.Logger(trt.Logger.ERROR) -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - -# Map of numpy dtype -> torch dtype -numpy_to_torch_dtype_dict = { - np.uint8: torch.uint8, - np.int8: torch.int8, - np.int16: torch.int16, - np.int32: torch.int32, - np.int64: torch.int64, - np.float16: torch.float16, - np.float32: torch.float32, - np.float64: torch.float64, - np.complex64: torch.complex64, - np.complex128: torch.complex128, -} -if np.version.full_version >= "1.24.0": - numpy_to_torch_dtype_dict[np.bool_] = torch.bool -else: - numpy_to_torch_dtype_dict[np.bool] = torch.bool - -# Map of torch dtype -> numpy dtype -torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()} - - -def device_view(t): - return cuda.DeviceView(ptr=t.data_ptr(), shape=t.shape, dtype=torch_to_numpy_dtype_dict[t.dtype]) - - -def preprocess_image(image): - """ - image: torch.Tensor - """ - w, h = image.size - w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 - image = image.resize((w, h)) - image = np.array(image).astype(np.float32) / 255.0 - image = image[None].transpose(0, 3, 1, 2) - image = torch.from_numpy(image).contiguous() - return 2.0 * image - 1.0 - - -class Engine: - def __init__(self, engine_path): - self.engine_path = engine_path - self.engine = None - self.context = None - self.buffers = OrderedDict() - self.tensors = OrderedDict() - - def __del__(self): - [buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray)] - del self.engine - del self.context - del self.buffers - del self.tensors - - def build( - self, - onnx_path, - fp16, - input_profile=None, - enable_preview=False, - enable_all_tactics=False, - timing_cache=None, - workspace_size=0, - ): - logger.warning(f"Building TensorRT engine for {onnx_path}: {self.engine_path}") - p = Profile() - if input_profile: - for name, dims in input_profile.items(): - assert len(dims) == 3 - p.add(name, min=dims[0], opt=dims[1], max=dims[2]) - - config_kwargs = {} - - config_kwargs["preview_features"] = [trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805] - if enable_preview: - # Faster dynamic shapes made optional since it increases engine build time. - config_kwargs["preview_features"].append(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805) - if workspace_size > 0: - config_kwargs["memory_pool_limits"] = {trt.MemoryPoolType.WORKSPACE: workspace_size} - if not enable_all_tactics: - config_kwargs["tactic_sources"] = [] - - engine = engine_from_network( - network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]), - config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **config_kwargs), - save_timing_cache=timing_cache, - ) - save_engine(engine, path=self.engine_path) - - def load(self): - logger.warning(f"Loading TensorRT engine: {self.engine_path}") - self.engine = engine_from_bytes(bytes_from_path(self.engine_path)) - - def activate(self): - self.context = self.engine.create_execution_context() - - def allocate_buffers(self, shape_dict=None, device="cuda"): - for idx in range(trt_util.get_bindings_per_profile(self.engine)): - binding = self.engine[idx] - if shape_dict and binding in shape_dict: - shape = shape_dict[binding] - else: - shape = self.engine.get_binding_shape(binding) - dtype = trt.nptype(self.engine.get_binding_dtype(binding)) - if self.engine.binding_is_input(binding): - self.context.set_binding_shape(idx, shape) - tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device) - self.tensors[binding] = tensor - self.buffers[binding] = cuda.DeviceView(ptr=tensor.data_ptr(), shape=shape, dtype=dtype) - - def infer(self, feed_dict, stream): - start_binding, end_binding = trt_util.get_active_profile_bindings(self.context) - # shallow copy of ordered dict - device_buffers = copy(self.buffers) - for name, buf in feed_dict.items(): - assert isinstance(buf, cuda.DeviceView) - device_buffers[name] = buf - bindings = [0] * start_binding + [buf.ptr for buf in device_buffers.values()] - noerror = self.context.execute_async_v2(bindings=bindings, stream_handle=stream.ptr) - if not noerror: - raise ValueError("ERROR: inference failed.") - - return self.tensors - - -class Optimizer: - def __init__(self, onnx_graph): - self.graph = gs.import_onnx(onnx_graph) - - def cleanup(self, return_onnx=False): - self.graph.cleanup().toposort() - if return_onnx: - return gs.export_onnx(self.graph) - - def select_outputs(self, keep, names=None): - self.graph.outputs = [self.graph.outputs[o] for o in keep] - if names: - for i, name in enumerate(names): - self.graph.outputs[i].name = name - - def fold_constants(self, return_onnx=False): - onnx_graph = fold_constants(gs.export_onnx(self.graph), allow_onnxruntime_shape_inference=True) - self.graph = gs.import_onnx(onnx_graph) - if return_onnx: - return onnx_graph - - def infer_shapes(self, return_onnx=False): - onnx_graph = gs.export_onnx(self.graph) - if onnx_graph.ByteSize() > 2147483648: - raise TypeError("ERROR: model size exceeds supported 2GB limit") - else: - onnx_graph = shape_inference.infer_shapes(onnx_graph) - - self.graph = gs.import_onnx(onnx_graph) - if return_onnx: - return onnx_graph - - -class BaseModel: - def __init__(self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77): - self.model = model - self.name = "SD Model" - self.fp16 = fp16 - self.device = device - - self.min_batch = 1 - self.max_batch = max_batch_size - self.min_image_shape = 256 # min image resolution: 256x256 - self.max_image_shape = 1024 # max image resolution: 1024x1024 - self.min_latent_shape = self.min_image_shape // 8 - self.max_latent_shape = self.max_image_shape // 8 - - self.embedding_dim = embedding_dim - self.text_maxlen = text_maxlen - - def get_model(self): - return self.model - - def get_input_names(self): - pass - - def get_output_names(self): - pass - - def get_dynamic_axes(self): - return None - - def get_sample_input(self, batch_size, image_height, image_width): - pass - - def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape): - return None - - def get_shape_dict(self, batch_size, image_height, image_width): - return None - - def optimize(self, onnx_graph): - opt = Optimizer(onnx_graph) - opt.cleanup() - opt.fold_constants() - opt.infer_shapes() - onnx_opt_graph = opt.cleanup(return_onnx=True) - return onnx_opt_graph - - def check_dims(self, batch_size, image_height, image_width): - assert batch_size >= self.min_batch and batch_size <= self.max_batch - assert image_height % 8 == 0 or image_width % 8 == 0 - latent_height = image_height // 8 - latent_width = image_width // 8 - assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape - assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape - return (latent_height, latent_width) - - def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape): - min_batch = batch_size if static_batch else self.min_batch - max_batch = batch_size if static_batch else self.max_batch - latent_height = image_height // 8 - latent_width = image_width // 8 - min_image_height = image_height if static_shape else self.min_image_shape - max_image_height = image_height if static_shape else self.max_image_shape - min_image_width = image_width if static_shape else self.min_image_shape - max_image_width = image_width if static_shape else self.max_image_shape - min_latent_height = latent_height if static_shape else self.min_latent_shape - max_latent_height = latent_height if static_shape else self.max_latent_shape - min_latent_width = latent_width if static_shape else self.min_latent_shape - max_latent_width = latent_width if static_shape else self.max_latent_shape - return ( - min_batch, - max_batch, - min_image_height, - max_image_height, - min_image_width, - max_image_width, - min_latent_height, - max_latent_height, - min_latent_width, - max_latent_width, - ) - - -def getOnnxPath(model_name, onnx_dir, opt=True): - return os.path.join(onnx_dir, model_name + (".opt" if opt else "") + ".onnx") - - -def getEnginePath(model_name, engine_dir): - return os.path.join(engine_dir, model_name + ".plan") - - -def build_engines( - models: dict, - engine_dir, - onnx_dir, - onnx_opset, - opt_image_height, - opt_image_width, - opt_batch_size=1, - force_engine_rebuild=False, - static_batch=False, - static_shape=True, - enable_preview=False, - enable_all_tactics=False, - timing_cache=None, - max_workspace_size=0, -): - built_engines = {} - if not os.path.isdir(onnx_dir): - os.makedirs(onnx_dir) - if not os.path.isdir(engine_dir): - os.makedirs(engine_dir) - - # Export models to ONNX - for model_name, model_obj in models.items(): - engine_path = getEnginePath(model_name, engine_dir) - if force_engine_rebuild or not os.path.exists(engine_path): - logger.warning("Building Engines...") - logger.warning("Engine build can take a while to complete") - onnx_path = getOnnxPath(model_name, onnx_dir, opt=False) - onnx_opt_path = getOnnxPath(model_name, onnx_dir) - if force_engine_rebuild or not os.path.exists(onnx_opt_path): - if force_engine_rebuild or not os.path.exists(onnx_path): - logger.warning(f"Exporting model: {onnx_path}") - model = model_obj.get_model() - with torch.inference_mode(), torch.autocast("cuda"): - inputs = model_obj.get_sample_input(opt_batch_size, opt_image_height, opt_image_width) - torch.onnx.export( - model, - inputs, - onnx_path, - export_params=True, - opset_version=onnx_opset, - do_constant_folding=True, - input_names=model_obj.get_input_names(), - output_names=model_obj.get_output_names(), - dynamic_axes=model_obj.get_dynamic_axes(), - ) - del model - torch.cuda.empty_cache() - gc.collect() - else: - logger.warning(f"Found cached model: {onnx_path}") - - # Optimize onnx - if force_engine_rebuild or not os.path.exists(onnx_opt_path): - logger.warning(f"Generating optimizing model: {onnx_opt_path}") - onnx_opt_graph = model_obj.optimize(onnx.load(onnx_path)) - onnx.save(onnx_opt_graph, onnx_opt_path) - else: - logger.warning(f"Found cached optimized model: {onnx_opt_path} ") - - # Build TensorRT engines - for model_name, model_obj in models.items(): - engine_path = getEnginePath(model_name, engine_dir) - engine = Engine(engine_path) - onnx_path = getOnnxPath(model_name, onnx_dir, opt=False) - onnx_opt_path = getOnnxPath(model_name, onnx_dir) - - if force_engine_rebuild or not os.path.exists(engine.engine_path): - engine.build( - onnx_opt_path, - fp16=True, - input_profile=model_obj.get_input_profile( - opt_batch_size, - opt_image_height, - opt_image_width, - static_batch=static_batch, - static_shape=static_shape, - ), - enable_preview=enable_preview, - timing_cache=timing_cache, - workspace_size=max_workspace_size, - ) - built_engines[model_name] = engine - - # Load and activate TensorRT engines - for model_name, model_obj in models.items(): - engine = built_engines[model_name] - engine.load() - engine.activate() - - return built_engines - - -def runEngine(engine, feed_dict, stream): - return engine.infer(feed_dict, stream) - - -class CLIP(BaseModel): - def __init__(self, model, device, max_batch_size, embedding_dim): - super(CLIP, self).__init__( - model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim - ) - self.name = "CLIP" - - def get_input_names(self): - return ["input_ids"] - - def get_output_names(self): - return ["text_embeddings", "pooler_output"] - - def get_dynamic_axes(self): - return {"input_ids": {0: "B"}, "text_embeddings": {0: "B"}} - - def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape): - self.check_dims(batch_size, image_height, image_width) - min_batch, max_batch, _, _, _, _, _, _, _, _ = self.get_minmax_dims( - batch_size, image_height, image_width, static_batch, static_shape - ) - return { - "input_ids": [(min_batch, self.text_maxlen), (batch_size, self.text_maxlen), (max_batch, self.text_maxlen)] - } - - def get_shape_dict(self, batch_size, image_height, image_width): - self.check_dims(batch_size, image_height, image_width) - return { - "input_ids": (batch_size, self.text_maxlen), - "text_embeddings": (batch_size, self.text_maxlen, self.embedding_dim), - } - - def get_sample_input(self, batch_size, image_height, image_width): - self.check_dims(batch_size, image_height, image_width) - return torch.zeros(batch_size, self.text_maxlen, dtype=torch.int32, device=self.device) - - def optimize(self, onnx_graph): - opt = Optimizer(onnx_graph) - opt.select_outputs([0]) # delete graph output#1 - opt.cleanup() - opt.fold_constants() - opt.infer_shapes() - opt.select_outputs([0], names=["text_embeddings"]) # rename network output - opt_onnx_graph = opt.cleanup(return_onnx=True) - return opt_onnx_graph - - -def make_CLIP(model, device, max_batch_size, embedding_dim, inpaint=False): - return CLIP(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim) - - -class UNet(BaseModel): - def __init__( - self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77, unet_dim=4 - ): - super(UNet, self).__init__( - model=model, - fp16=fp16, - device=device, - max_batch_size=max_batch_size, - embedding_dim=embedding_dim, - text_maxlen=text_maxlen, - ) - self.unet_dim = unet_dim - self.name = "UNet" - - def get_input_names(self): - return ["sample", "timestep", "encoder_hidden_states"] - - def get_output_names(self): - return ["latent"] - - def get_dynamic_axes(self): - return { - "sample": {0: "2B", 2: "H", 3: "W"}, - "encoder_hidden_states": {0: "2B"}, - "latent": {0: "2B", 2: "H", 3: "W"}, - } - - def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape): - latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) - ( - min_batch, - max_batch, - _, - _, - _, - _, - min_latent_height, - max_latent_height, - min_latent_width, - max_latent_width, - ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape) - return { - "sample": [ - (2 * min_batch, self.unet_dim, min_latent_height, min_latent_width), - (2 * batch_size, self.unet_dim, latent_height, latent_width), - (2 * max_batch, self.unet_dim, max_latent_height, max_latent_width), - ], - "encoder_hidden_states": [ - (2 * min_batch, self.text_maxlen, self.embedding_dim), - (2 * batch_size, self.text_maxlen, self.embedding_dim), - (2 * max_batch, self.text_maxlen, self.embedding_dim), - ], - } - - def get_shape_dict(self, batch_size, image_height, image_width): - latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) - return { - "sample": (2 * batch_size, self.unet_dim, latent_height, latent_width), - "encoder_hidden_states": (2 * batch_size, self.text_maxlen, self.embedding_dim), - "latent": (2 * batch_size, 4, latent_height, latent_width), - } - - def get_sample_input(self, batch_size, image_height, image_width): - latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) - dtype = torch.float16 if self.fp16 else torch.float32 - return ( - torch.randn( - 2 * batch_size, self.unet_dim, latent_height, latent_width, dtype=torch.float32, device=self.device - ), - torch.tensor([1.0], dtype=torch.float32, device=self.device), - torch.randn(2 * batch_size, self.text_maxlen, self.embedding_dim, dtype=dtype, device=self.device), - ) - - -def make_UNet(model, device, max_batch_size, embedding_dim, inpaint=False, unet_dim=4): - return UNet( - model, - fp16=True, - device=device, - max_batch_size=max_batch_size, - embedding_dim=embedding_dim, - unet_dim=unet_dim, - ) - - -class VAE(BaseModel): - def __init__(self, model, device, max_batch_size, embedding_dim): - super(VAE, self).__init__( - model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim - ) - self.name = "VAE decoder" - - def get_input_names(self): - return ["latent"] - - def get_output_names(self): - return ["images"] - - def get_dynamic_axes(self): - return {"latent": {0: "B", 2: "H", 3: "W"}, "images": {0: "B", 2: "8H", 3: "8W"}} - - def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape): - latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) - ( - min_batch, - max_batch, - _, - _, - _, - _, - min_latent_height, - max_latent_height, - min_latent_width, - max_latent_width, - ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape) - return { - "latent": [ - (min_batch, 4, min_latent_height, min_latent_width), - (batch_size, 4, latent_height, latent_width), - (max_batch, 4, max_latent_height, max_latent_width), - ] - } - - def get_shape_dict(self, batch_size, image_height, image_width): - latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) - return { - "latent": (batch_size, 4, latent_height, latent_width), - "images": (batch_size, 3, image_height, image_width), - } - - def get_sample_input(self, batch_size, image_height, image_width): - latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) - return torch.randn(batch_size, 4, latent_height, latent_width, dtype=torch.float32, device=self.device) - - -def make_VAE(model, device, max_batch_size, embedding_dim, inpaint=False): - return VAE(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim) - - -class TorchVAEEncoder(torch.nn.Module): - def __init__(self, model): - super().__init__() - self.vae_encoder = model - - def forward(self, x): - return self.vae_encoder.encode(x).latent_dist.sample() - - -class VAEEncoder(BaseModel): - def __init__(self, model, device, max_batch_size, embedding_dim): - super(VAEEncoder, self).__init__( - model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim - ) - self.name = "VAE encoder" - - def get_model(self): - vae_encoder = TorchVAEEncoder(self.model) - return vae_encoder - - def get_input_names(self): - return ["images"] - - def get_output_names(self): - return ["latent"] - - def get_dynamic_axes(self): - return {"images": {0: "B", 2: "8H", 3: "8W"}, "latent": {0: "B", 2: "H", 3: "W"}} - - def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape): - assert batch_size >= self.min_batch and batch_size <= self.max_batch - min_batch = batch_size if static_batch else self.min_batch - max_batch = batch_size if static_batch else self.max_batch - self.check_dims(batch_size, image_height, image_width) - ( - min_batch, - max_batch, - min_image_height, - max_image_height, - min_image_width, - max_image_width, - _, - _, - _, - _, - ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape) - - return { - "images": [ - (min_batch, 3, min_image_height, min_image_width), - (batch_size, 3, image_height, image_width), - (max_batch, 3, max_image_height, max_image_width), - ] - } - - def get_shape_dict(self, batch_size, image_height, image_width): - latent_height, latent_width = self.check_dims(batch_size, image_height, image_width) - return { - "images": (batch_size, 3, image_height, image_width), - "latent": (batch_size, 4, latent_height, latent_width), - } - - def get_sample_input(self, batch_size, image_height, image_width): - self.check_dims(batch_size, image_height, image_width) - return torch.randn(batch_size, 3, image_height, image_width, dtype=torch.float32, device=self.device) - - -def make_VAEEncoder(model, device, max_batch_size, embedding_dim, inpaint=False): - return VAEEncoder(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim) - - -class TensorRTStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline): - r""" - Pipeline for inpainting using TensorRT accelerated Stable Diffusion. - - This model inherits from [`StableDiffusionInpaintPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. - text_encoder ([`CLIPTextModel`]): - Frozen text-encoder. Stable Diffusion uses the text portion of - [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically - the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. - tokenizer (`CLIPTokenizer`): - Tokenizer of class - [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). - unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. - feature_extractor ([`CLIPFeatureExtractor`]): - Model that extracts features from generated images to be used as inputs for the `safety_checker`. - """ - - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - tokenizer: CLIPTokenizer, - unet: UNet2DConditionModel, - scheduler: DDIMScheduler, - safety_checker: StableDiffusionSafetyChecker, - feature_extractor: CLIPFeatureExtractor, - requires_safety_checker: bool = True, - stages=["clip", "unet", "vae", "vae_encoder"], - image_height: int = 512, - image_width: int = 512, - max_batch_size: int = 16, - # ONNX export parameters - onnx_opset: int = 17, - onnx_dir: str = "onnx", - # TensorRT engine build parameters - engine_dir: str = "engine", - build_preview_features: bool = True, - force_engine_rebuild: bool = False, - timing_cache: str = "timing_cache", - ): - super().__init__( - vae, text_encoder, tokenizer, unet, scheduler, safety_checker, feature_extractor, requires_safety_checker - ) - - self.vae.forward = self.vae.decode - - self.stages = stages - self.image_height, self.image_width = image_height, image_width - self.inpaint = True - self.onnx_opset = onnx_opset - self.onnx_dir = onnx_dir - self.engine_dir = engine_dir - self.force_engine_rebuild = force_engine_rebuild - self.timing_cache = timing_cache - self.build_static_batch = False - self.build_dynamic_shape = False - self.build_preview_features = build_preview_features - - self.max_batch_size = max_batch_size - # TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation. - if self.build_dynamic_shape or self.image_height > 512 or self.image_width > 512: - self.max_batch_size = 4 - - self.stream = None # loaded in loadResources() - self.models = {} # loaded in __loadModels() - self.engine = {} # loaded in build_engines() - - def __loadModels(self): - # Load pipeline models - self.embedding_dim = self.text_encoder.config.hidden_size - models_args = { - "device": self.torch_device, - "max_batch_size": self.max_batch_size, - "embedding_dim": self.embedding_dim, - "inpaint": self.inpaint, - } - if "clip" in self.stages: - self.models["clip"] = make_CLIP(self.text_encoder, **models_args) - if "unet" in self.stages: - self.models["unet"] = make_UNet(self.unet, **models_args, unet_dim=self.unet.config.in_channels) - if "vae" in self.stages: - self.models["vae"] = make_VAE(self.vae, **models_args) - if "vae_encoder" in self.stages: - self.models["vae_encoder"] = make_VAEEncoder(self.vae, **models_args) - - @classmethod - def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): - cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE) - resume_download = kwargs.pop("resume_download", False) - proxies = kwargs.pop("proxies", None) - local_files_only = kwargs.pop("local_files_only", False) - use_auth_token = kwargs.pop("use_auth_token", None) - revision = kwargs.pop("revision", None) - - cls.cached_folder = ( - pretrained_model_name_or_path - if os.path.isdir(pretrained_model_name_or_path) - else snapshot_download( - pretrained_model_name_or_path, - cache_dir=cache_dir, - resume_download=resume_download, - proxies=proxies, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - revision=revision, - ) - ) - - def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings: bool = False): - super().to(torch_device, silence_dtype_warnings=silence_dtype_warnings) - - self.onnx_dir = os.path.join(self.cached_folder, self.onnx_dir) - self.engine_dir = os.path.join(self.cached_folder, self.engine_dir) - self.timing_cache = os.path.join(self.cached_folder, self.timing_cache) - - # set device - self.torch_device = self._execution_device - logger.warning(f"Running inference on device: {self.torch_device}") - - # load models - self.__loadModels() - - # build engines - self.engine = build_engines( - self.models, - self.engine_dir, - self.onnx_dir, - self.onnx_opset, - opt_image_height=self.image_height, - opt_image_width=self.image_width, - force_engine_rebuild=self.force_engine_rebuild, - static_batch=self.build_static_batch, - static_shape=not self.build_dynamic_shape, - enable_preview=self.build_preview_features, - timing_cache=self.timing_cache, - ) - - return self - - def __initialize_timesteps(self, timesteps, strength): - self.scheduler.set_timesteps(timesteps) - offset = self.scheduler.steps_offset if hasattr(self.scheduler, "steps_offset") else 0 - init_timestep = int(timesteps * strength) + offset - init_timestep = min(init_timestep, timesteps) - t_start = max(timesteps - init_timestep + offset, 0) - timesteps = self.scheduler.timesteps[t_start:].to(self.torch_device) - return timesteps, t_start - - def __preprocess_images(self, batch_size, images=()): - init_images = [] - for image in images: - image = image.to(self.torch_device).float() - image = image.repeat(batch_size, 1, 1, 1) - init_images.append(image) - return tuple(init_images) - - def __encode_image(self, init_image): - init_latents = runEngine(self.engine["vae_encoder"], {"images": device_view(init_image)}, self.stream)[ - "latent" - ] - init_latents = 0.18215 * init_latents - return init_latents - - def __encode_prompt(self, prompt, negative_prompt): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `List[str]`, *optional*): - prompt to be encoded - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. - Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). - """ - # Tokenize prompt - text_input_ids = ( - self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - .input_ids.type(torch.int32) - .to(self.torch_device) - ) - - text_input_ids_inp = device_view(text_input_ids) - # NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt - text_embeddings = runEngine(self.engine["clip"], {"input_ids": text_input_ids_inp}, self.stream)[ - "text_embeddings" - ].clone() - - # Tokenize negative prompt - uncond_input_ids = ( - self.tokenizer( - negative_prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - .input_ids.type(torch.int32) - .to(self.torch_device) - ) - uncond_input_ids_inp = device_view(uncond_input_ids) - uncond_embeddings = runEngine(self.engine["clip"], {"input_ids": uncond_input_ids_inp}, self.stream)[ - "text_embeddings" - ] - - # Concatenate the unconditional and text embeddings into a single batch to avoid doing two forward passes for classifier free guidance - text_embeddings = torch.cat([uncond_embeddings, text_embeddings]).to(dtype=torch.float16) - - return text_embeddings - - def __denoise_latent( - self, latents, text_embeddings, timesteps=None, step_offset=0, mask=None, masked_image_latents=None - ): - if not isinstance(timesteps, torch.Tensor): - timesteps = self.scheduler.timesteps - for step_index, timestep in enumerate(timesteps): - # Expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) - latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep) - if isinstance(mask, torch.Tensor): - latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) - - # Predict the noise residual - timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep - - sample_inp = device_view(latent_model_input) - timestep_inp = device_view(timestep_float) - embeddings_inp = device_view(text_embeddings) - noise_pred = runEngine( - self.engine["unet"], - {"sample": sample_inp, "timestep": timestep_inp, "encoder_hidden_states": embeddings_inp}, - self.stream, - )["latent"] - - # Perform guidance - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) - - latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample - - latents = 1.0 / 0.18215 * latents - return latents - - def __decode_latent(self, latents): - images = runEngine(self.engine["vae"], {"latent": device_view(latents)}, self.stream)["images"] - images = (images / 2 + 0.5).clamp(0, 1) - return images.cpu().permute(0, 2, 3, 1).float().numpy() - - def __loadResources(self, image_height, image_width, batch_size): - self.stream = cuda.Stream() - - # Allocate buffers for TensorRT engine bindings - for model_name, obj in self.models.items(): - self.engine[model_name].allocate_buffers( - shape_dict=obj.get_shape_dict(batch_size, image_height, image_width), device=self.torch_device - ) - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]] = None, - image: Union[torch.FloatTensor, PIL.Image.Image] = None, - mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, - strength: float = 0.75, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - ): - r""" - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. - instead. - image (`PIL.Image.Image`): - `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will - be masked out with `mask_image` and repainted according to `prompt`. - mask_image (`PIL.Image.Image`): - `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be - repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted - to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) - instead of 3, so the expected shape would be `(B, H, W, 1)`. - strength (`float`, *optional*, defaults to 0.8): - Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` - will be used as a starting point, adding more noise to it the larger the `strength`. The number of - denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will - be maximum and the denoising process will run for the full number of iterations specified in - `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead. - Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) - to make generation deterministic. - - """ - self.generator = generator - self.denoising_steps = num_inference_steps - self.guidance_scale = guidance_scale - - # Pre-compute latent input scales and linear multistep coefficients - self.scheduler.set_timesteps(self.denoising_steps, device=self.torch_device) - - # Define call parameters - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - prompt = [prompt] - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - raise ValueError(f"Expected prompt to be of type list or str but got {type(prompt)}") - - if negative_prompt is None: - negative_prompt = [""] * batch_size - - if negative_prompt is not None and isinstance(negative_prompt, str): - negative_prompt = [negative_prompt] - - assert len(prompt) == len(negative_prompt) - - if batch_size > self.max_batch_size: - raise ValueError( - f"Batch size {len(prompt)} is larger than allowed {self.max_batch_size}. If dynamic shape is used, then maximum batch size is 4" - ) - - # Validate image dimensions - mask_width, mask_height = mask_image.size - if mask_height != self.image_height or mask_width != self.image_width: - raise ValueError( - f"Input image height and width {self.image_height} and {self.image_width} are not equal to " - f"the respective dimensions of the mask image {mask_height} and {mask_width}" - ) - - # load resources - self.__loadResources(self.image_height, self.image_width, batch_size) - - with torch.inference_mode(), torch.autocast("cuda"), trt.Runtime(TRT_LOGGER): - # Spatial dimensions of latent tensor - latent_height = self.image_height // 8 - latent_width = self.image_width // 8 - - # Pre-initialize latents - num_channels_latents = self.vae.config.latent_channels - latents = self.prepare_latents( - batch_size, - num_channels_latents, - self.image_height, - self.image_width, - torch.float32, - self.torch_device, - generator, - ) - - # Pre-process input images - mask, masked_image = self.__preprocess_images(batch_size, prepare_mask_and_masked_image(image, mask_image)) - # print(mask) - mask = torch.nn.functional.interpolate(mask, size=(latent_height, latent_width)) - mask = torch.cat([mask] * 2) - - # Initialize timesteps - timesteps, t_start = self.__initialize_timesteps(self.denoising_steps, strength) - - # VAE encode masked image - masked_latents = self.__encode_image(masked_image) - masked_latents = torch.cat([masked_latents] * 2) - - # CLIP text encoder - text_embeddings = self.__encode_prompt(prompt, negative_prompt) - - # UNet denoiser - latents = self.__denoise_latent( - latents, - text_embeddings, - timesteps=timesteps, - step_offset=t_start, - mask=mask, - masked_image_latents=masked_latents, - ) - - # VAE decode latent - images = self.__decode_latent(latents) - - images = self.numpy_to_pil(images) - return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=None) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py deleted file mode 100644 index 56836f0b6d77b8daa25e956101694863e418339f..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py +++ /dev/null @@ -1,17 +0,0 @@ -# This file is autogenerated by the command `make fix-copies`, do not edit. -from ..utils import DummyObject, requires_backends - - -class StableDiffusionKDiffusionPipeline(metaclass=DummyObject): - _backends = ["torch", "transformers", "k_diffusion"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch", "transformers", "k_diffusion"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers", "k_diffusion"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["torch", "transformers", "k_diffusion"]) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py deleted file mode 100644 index 1905af6c695fa6e4e87bac2e6c462a8b9439528c..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py +++ /dev/null @@ -1,424 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import random -import unittest - -import numpy as np -import torch -from PIL import Image -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer - -from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNet2DConditionModel -from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu - - -enable_full_determinism() - - -class StableDiffusionUpscalePipelineFastTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - @property - def dummy_image(self): - batch_size = 1 - num_channels = 3 - sizes = (32, 32) - - image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) - return image - - @property - def dummy_cond_unet_upscale(self): - torch.manual_seed(0) - model = UNet2DConditionModel( - block_out_channels=(32, 32, 64), - layers_per_block=2, - sample_size=32, - in_channels=7, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - # SD2-specific config below - attention_head_dim=8, - use_linear_projection=True, - only_cross_attention=(True, True, False), - num_class_embeds=100, - ) - return model - - @property - def dummy_vae(self): - torch.manual_seed(0) - model = AutoencoderKL( - block_out_channels=[32, 32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=4, - ) - return model - - @property - def dummy_text_encoder(self): - torch.manual_seed(0) - config = CLIPTextConfig( - bos_token_id=0, - eos_token_id=2, - hidden_size=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=1000, - # SD2-specific config below - hidden_act="gelu", - projection_dim=512, - ) - return CLIPTextModel(config) - - def test_stable_diffusion_upscale(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet_upscale - low_res_scheduler = DDPMScheduler() - scheduler = DDIMScheduler(prediction_type="v_prediction") - vae = self.dummy_vae - text_encoder = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] - low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionUpscalePipeline( - unet=unet, - low_res_scheduler=low_res_scheduler, - scheduler=scheduler, - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - max_noise_level=350, - ) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - image=low_res_image, - generator=generator, - guidance_scale=6.0, - noise_level=20, - num_inference_steps=2, - output_type="np", - ) - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = sd_pipe( - [prompt], - image=low_res_image, - generator=generator, - guidance_scale=6.0, - noise_level=20, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - - expected_height_width = low_res_image.size[0] * 4 - assert image.shape == (1, expected_height_width, expected_height_width, 3) - expected_slice = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - - def test_stable_diffusion_upscale_batch(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet_upscale - low_res_scheduler = DDPMScheduler() - scheduler = DDIMScheduler(prediction_type="v_prediction") - vae = self.dummy_vae - text_encoder = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] - low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionUpscalePipeline( - unet=unet, - low_res_scheduler=low_res_scheduler, - scheduler=scheduler, - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - max_noise_level=350, - ) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - output = sd_pipe( - 2 * [prompt], - image=2 * [low_res_image], - guidance_scale=6.0, - noise_level=20, - num_inference_steps=2, - output_type="np", - ) - image = output.images - assert image.shape[0] == 2 - - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - image=low_res_image, - generator=generator, - num_images_per_prompt=2, - guidance_scale=6.0, - noise_level=20, - num_inference_steps=2, - output_type="np", - ) - image = output.images - assert image.shape[0] == 2 - - def test_stable_diffusion_upscale_prompt_embeds(self): - device = "cpu" # ensure determinism for the device-dependent torch.Generator - unet = self.dummy_cond_unet_upscale - low_res_scheduler = DDPMScheduler() - scheduler = DDIMScheduler(prediction_type="v_prediction") - vae = self.dummy_vae - text_encoder = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] - low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionUpscalePipeline( - unet=unet, - low_res_scheduler=low_res_scheduler, - scheduler=scheduler, - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - max_noise_level=350, - ) - sd_pipe = sd_pipe.to(device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.Generator(device=device).manual_seed(0) - output = sd_pipe( - [prompt], - image=low_res_image, - generator=generator, - guidance_scale=6.0, - noise_level=20, - num_inference_steps=2, - output_type="np", - ) - - image = output.images - - generator = torch.Generator(device=device).manual_seed(0) - prompt_embeds = sd_pipe._encode_prompt(prompt, device, 1, False) - image_from_prompt_embeds = sd_pipe( - prompt_embeds=prompt_embeds, - image=[low_res_image], - generator=generator, - guidance_scale=6.0, - noise_level=20, - num_inference_steps=2, - output_type="np", - return_dict=False, - )[0] - - image_slice = image[0, -3:, -3:, -1] - image_from_prompt_embeds_slice = image_from_prompt_embeds[0, -3:, -3:, -1] - - expected_height_width = low_res_image.size[0] * 4 - assert image.shape == (1, expected_height_width, expected_height_width, 3) - expected_slice = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_prompt_embeds_slice.flatten() - expected_slice).max() < 1e-2 - - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") - def test_stable_diffusion_upscale_fp16(self): - """Test that stable diffusion upscale works with fp16""" - unet = self.dummy_cond_unet_upscale - low_res_scheduler = DDPMScheduler() - scheduler = DDIMScheduler(prediction_type="v_prediction") - vae = self.dummy_vae - text_encoder = self.dummy_text_encoder - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] - low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) - - # put models in fp16, except vae as it overflows in fp16 - unet = unet.half() - text_encoder = text_encoder.half() - - # make sure here that pndm scheduler skips prk - sd_pipe = StableDiffusionUpscalePipeline( - unet=unet, - low_res_scheduler=low_res_scheduler, - scheduler=scheduler, - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - max_noise_level=350, - ) - sd_pipe = sd_pipe.to(torch_device) - sd_pipe.set_progress_bar_config(disable=None) - - prompt = "A painting of a squirrel eating a burger" - generator = torch.manual_seed(0) - image = sd_pipe( - [prompt], - image=low_res_image, - generator=generator, - num_inference_steps=2, - output_type="np", - ).images - - expected_height_width = low_res_image.size[0] * 4 - assert image.shape == (1, expected_height_width, expected_height_width, 3) - - -@slow -@require_torch_gpu -class StableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_stable_diffusion_upscale_pipeline(self): - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" - "/sd2-upscale/low_res_cat.png" - ) - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" - "/upsampled_cat.npy" - ) - - model_id = "stabilityai/stable-diffusion-x4-upscaler" - pipe = StableDiffusionUpscalePipeline.from_pretrained(model_id) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - prompt = "a cat sitting on a park bench" - - generator = torch.manual_seed(0) - output = pipe( - prompt=prompt, - image=image, - generator=generator, - output_type="np", - ) - image = output.images[0] - - assert image.shape == (512, 512, 3) - assert np.abs(expected_image - image).max() < 1e-3 - - def test_stable_diffusion_upscale_pipeline_fp16(self): - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" - "/sd2-upscale/low_res_cat.png" - ) - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" - "/upsampled_cat_fp16.npy" - ) - - model_id = "stabilityai/stable-diffusion-x4-upscaler" - pipe = StableDiffusionUpscalePipeline.from_pretrained( - model_id, - torch_dtype=torch.float16, - ) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing() - - prompt = "a cat sitting on a park bench" - - generator = torch.manual_seed(0) - output = pipe( - prompt=prompt, - image=image, - generator=generator, - output_type="np", - ) - image = output.images[0] - - assert image.shape == (512, 512, 3) - assert np.abs(expected_image - image).max() < 5e-1 - - def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): - torch.cuda.empty_cache() - torch.cuda.reset_max_memory_allocated() - torch.cuda.reset_peak_memory_stats() - - image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" - "/sd2-upscale/low_res_cat.png" - ) - - model_id = "stabilityai/stable-diffusion-x4-upscaler" - pipe = StableDiffusionUpscalePipeline.from_pretrained( - model_id, - torch_dtype=torch.float16, - ) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - pipe.enable_attention_slicing(1) - pipe.enable_sequential_cpu_offload() - - prompt = "a cat sitting on a park bench" - - generator = torch.manual_seed(0) - _ = pipe( - prompt=prompt, - image=image, - generator=generator, - num_inference_steps=5, - output_type="np", - ) - - mem_bytes = torch.cuda.max_memory_allocated() - # make sure that less than 2.9 GB is allocated - assert mem_bytes < 2.9 * 10**9 diff --git a/spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py deleted file mode 100644 index 4b87b2ce58b2efc2461046df897038fdd5128cee..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py +++ /dev/null @@ -1,42 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://detectron2/resnet50_caffe', - backbone=dict( - norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe')) -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/atss_assigner.py b/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/atss_assigner.py deleted file mode 100644 index d4fe9d0e3c8704bd780d493eff20a5505dbe9580..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/atss_assigner.py +++ /dev/null @@ -1,178 +0,0 @@ -import torch - -from ..builder import BBOX_ASSIGNERS -from ..iou_calculators import build_iou_calculator -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - - -@BBOX_ASSIGNERS.register_module() -class ATSSAssigner(BaseAssigner): - """Assign a corresponding gt bbox or background to each bbox. - - Each proposals will be assigned with `0` or a positive integer - indicating the ground truth index. - - - 0: negative sample, no assigned gt - - positive integer: positive sample, index (1-based) of assigned gt - - Args: - topk (float): number of bbox selected in each level - """ - - def __init__(self, - topk, - iou_calculator=dict(type='BboxOverlaps2D'), - ignore_iof_thr=-1): - self.topk = topk - self.iou_calculator = build_iou_calculator(iou_calculator) - self.ignore_iof_thr = ignore_iof_thr - - # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py - - def assign(self, - bboxes, - num_level_bboxes, - gt_bboxes, - gt_bboxes_ignore=None, - gt_labels=None): - """Assign gt to bboxes. - - The assignment is done in following steps - - 1. compute iou between all bbox (bbox of all pyramid levels) and gt - 2. compute center distance between all bbox and gt - 3. on each pyramid level, for each gt, select k bbox whose center - are closest to the gt center, so we total select k*l bbox as - candidates for each gt - 4. get corresponding iou for the these candidates, and compute the - mean and std, set mean + std as the iou threshold - 5. select these candidates whose iou are greater than or equal to - the threshold as positive - 6. limit the positive sample's center in gt - - - Args: - bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). - num_level_bboxes (List): num of bboxes in each level - gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`, e.g., crowd boxes in COCO. - gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). - - Returns: - :obj:`AssignResult`: The assign result. - """ - INF = 100000000 - bboxes = bboxes[:, :4] - num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0) - - # compute iou between all bbox and gt - overlaps = self.iou_calculator(bboxes, gt_bboxes) - - # assign 0 by default - assigned_gt_inds = overlaps.new_full((num_bboxes, ), - 0, - dtype=torch.long) - - if num_gt == 0 or num_bboxes == 0: - # No ground truth or boxes, return empty assignment - max_overlaps = overlaps.new_zeros((num_bboxes, )) - if num_gt == 0: - # No truth, assign everything to background - assigned_gt_inds[:] = 0 - if gt_labels is None: - assigned_labels = None - else: - assigned_labels = overlaps.new_full((num_bboxes, ), - -1, - dtype=torch.long) - return AssignResult( - num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) - - # compute center distance between all bbox and gt - gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 - gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 - gt_points = torch.stack((gt_cx, gt_cy), dim=1) - - bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0 - bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0 - bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1) - - distances = (bboxes_points[:, None, :] - - gt_points[None, :, :]).pow(2).sum(-1).sqrt() - - if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None - and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0): - ignore_overlaps = self.iou_calculator( - bboxes, gt_bboxes_ignore, mode='iof') - ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) - ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr - distances[ignore_idxs, :] = INF - assigned_gt_inds[ignore_idxs] = -1 - - # Selecting candidates based on the center distance - candidate_idxs = [] - start_idx = 0 - for level, bboxes_per_level in enumerate(num_level_bboxes): - # on each pyramid level, for each gt, - # select k bbox whose center are closest to the gt center - end_idx = start_idx + bboxes_per_level - distances_per_level = distances[start_idx:end_idx, :] - selectable_k = min(self.topk, bboxes_per_level) - _, topk_idxs_per_level = distances_per_level.topk( - selectable_k, dim=0, largest=False) - candidate_idxs.append(topk_idxs_per_level + start_idx) - start_idx = end_idx - candidate_idxs = torch.cat(candidate_idxs, dim=0) - - # get corresponding iou for the these candidates, and compute the - # mean and std, set mean + std as the iou threshold - candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)] - overlaps_mean_per_gt = candidate_overlaps.mean(0) - overlaps_std_per_gt = candidate_overlaps.std(0) - overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt - - is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :] - - # limit the positive sample's center in gt - for gt_idx in range(num_gt): - candidate_idxs[:, gt_idx] += gt_idx * num_bboxes - ep_bboxes_cx = bboxes_cx.view(1, -1).expand( - num_gt, num_bboxes).contiguous().view(-1) - ep_bboxes_cy = bboxes_cy.view(1, -1).expand( - num_gt, num_bboxes).contiguous().view(-1) - candidate_idxs = candidate_idxs.view(-1) - - # calculate the left, top, right, bottom distance between positive - # bbox center and gt side - l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] - t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] - r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 - is_pos = is_pos & is_in_gts - - # if an anchor box is assigned to multiple gts, - # the one with the highest IoU will be selected. - overlaps_inf = torch.full_like(overlaps, - -INF).t().contiguous().view(-1) - index = candidate_idxs.view(-1)[is_pos.view(-1)] - overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] - overlaps_inf = overlaps_inf.view(num_gt, -1).t() - - max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) - assigned_gt_inds[ - max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 - - if gt_labels is not None: - assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) - pos_inds = torch.nonzero( - assigned_gt_inds > 0, as_tuple=False).squeeze() - if pos_inds.numel() > 0: - assigned_labels[pos_inds] = gt_labels[ - assigned_gt_inds[pos_inds] - 1] - else: - assigned_labels = None - return AssignResult( - num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/roi_extractors/__init__.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/roi_extractors/__init__.py deleted file mode 100644 index a6ec0ecc3063cd23c2463f2f53f1c2a83b04d43b..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/roi_extractors/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .generic_roi_extractor import GenericRoIExtractor -from .single_level_roi_extractor import SingleRoIExtractor - -__all__ = [ - 'SingleRoIExtractor', - 'GenericRoIExtractor', -] diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/utils/collect_env.py b/spaces/Andy1621/uniformer_image_detection/mmdet/utils/collect_env.py deleted file mode 100644 index 89c064accdb10abec4a03de04f601d27aab2da70..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/utils/collect_env.py +++ /dev/null @@ -1,16 +0,0 @@ -from mmcv.utils import collect_env as collect_base_env -from mmcv.utils import get_git_hash - -import mmdet - - -def collect_env(): - """Collect the information of the running environments.""" - env_info = collect_base_env() - env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7] - return env_info - - -if __name__ == '__main__': - for name, val in collect_env().items(): - print(f'{name}: {val}') diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index d2bac38ca6760af6441ede5a04409ed495ef87f3..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ccnet_r50-d8_512x1024_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_80k_pascal_context.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_80k_pascal_context.py deleted file mode 100644 index 71a0fda48aa2538e4d913e73e94a71564377ea50..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_80k_pascal_context.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3_r50-d8.py', - '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=60), - auxiliary_head=dict(num_classes=60), - test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) -optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index ab8a3d3e3fcc12dd41223af190e2ae04f14d1cb8..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py' -model = dict( - pretrained='torchvision://resnet101', - backbone=dict(type='ResNet', depth=101)) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/__init__.py deleted file mode 100644 index 7a17b7b3b6ad49157ee41f3da304fec3d32342d3..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -"""Index interaction code -""" diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/_jaraco_text.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/_jaraco_text.py deleted file mode 100644 index e06947c051a7d2273260343eab37d9437f91e781..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/_jaraco_text.py +++ /dev/null @@ -1,109 +0,0 @@ -"""Functions brought over from jaraco.text. - -These functions are not supposed to be used within `pip._internal`. These are -helper functions brought over from `jaraco.text` to enable vendoring newer -copies of `pkg_resources` without having to vendor `jaraco.text` and its entire -dependency cone; something that our vendoring setup is not currently capable of -handling. - -License reproduced from original source below: - -Copyright Jason R. Coombs - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. -""" - -import functools -import itertools - - -def _nonblank(str): - return str and not str.startswith("#") - - -@functools.singledispatch -def yield_lines(iterable): - r""" - Yield valid lines of a string or iterable. - - >>> list(yield_lines('')) - [] - >>> list(yield_lines(['foo', 'bar'])) - ['foo', 'bar'] - >>> list(yield_lines('foo\nbar')) - ['foo', 'bar'] - >>> list(yield_lines('\nfoo\n#bar\nbaz #comment')) - ['foo', 'baz #comment'] - >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n'])) - ['foo', 'bar', 'baz', 'bing'] - """ - return itertools.chain.from_iterable(map(yield_lines, iterable)) - - -@yield_lines.register(str) -def _(text): - return filter(_nonblank, map(str.strip, text.splitlines())) - - -def drop_comment(line): - """ - Drop comments. - - >>> drop_comment('foo # bar') - 'foo' - - A hash without a space may be in a URL. - - >>> drop_comment('http://example.com/foo#bar') - 'http://example.com/foo#bar' - """ - return line.partition(" #")[0] - - -def join_continuation(lines): - r""" - Join lines continued by a trailing backslash. - - >>> list(join_continuation(['foo \\', 'bar', 'baz'])) - ['foobar', 'baz'] - >>> list(join_continuation(['foo \\', 'bar', 'baz'])) - ['foobar', 'baz'] - >>> list(join_continuation(['foo \\', 'bar \\', 'baz'])) - ['foobarbaz'] - - Not sure why, but... - The character preceeding the backslash is also elided. - - >>> list(join_continuation(['goo\\', 'dly'])) - ['godly'] - - A terrible idea, but... - If no line is available to continue, suppress the lines. - - >>> list(join_continuation(['foo', 'bar\\', 'baz\\'])) - ['foo'] - """ - lines = iter(lines) - for item in lines: - while item.endswith("\\"): - try: - item = item[:-2].strip() + next(lines) - except StopIteration: - return - yield item diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/_version.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/_version.py deleted file mode 100644 index e12dd0e78530cc37bfa6599d3b9121bba90d77cb..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/_version.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file is protected via CODEOWNERS -__version__ = "1.26.15" diff --git a/spaces/Atualli/yoloxTeste/app1.py b/spaces/Atualli/yoloxTeste/app1.py deleted file mode 100644 index 5360eaa34d2fa1ff13d820aa838c94a58182c367..0000000000000000000000000000000000000000 --- a/spaces/Atualli/yoloxTeste/app1.py +++ /dev/null @@ -1,105 +0,0 @@ -import gradio as gr -import os -#os.system("pip -qq install yoloxdetect==0.0.7") -os.system("pip -qq install yoloxdetect") -import torch -import json -import yoloxdetect2.helpers as yoloxdetect -#from yoloxdetect import YoloxDetector - - -# Images -torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg') -torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg') -torch.hub.download_url_to_file('https://raw.githubusercontent.com/Megvii-BaseDetection/YOLOX/main/assets/dog.jpg', 'dog.jpg') - -model = yoloxdetect.YoloxDetector2('kadirnar/yolox_s-v0.1.1', 'configs.yolox_s', device="cuda", hf_model=True) - -def yolox_inference( - image_path: gr.inputs.Image = None, - model_path: gr.inputs.Dropdown = 'kadirnar/yolox_s-v0.1.1', - config_path: gr.inputs.Textbox = 'configs.yolox_s', - image_size: gr.inputs.Slider = 640 -): - """ - YOLOX inference function - Args: - image: Input image - model_path: Path to the model - config_path: Path to the config file - image_size: Image size - Returns: - Rendered image - """ - - #model = YoloxDetector(model_path, config_path=config_path, device="cpu", hf_model=True) - #pred = model.predict(image_path=image_path, image_size=image_size) - pred2 = [] - if model : - print (image_path) - model.torchyolo = True - pred2 = model.predict(image_path=image_path, image_size=image_size) - #text = "Ola" - #print (vars(model)) - #print (pred2[0]) - #print (pred2[1]) - #print (pred2[2]) - #os.remove(image_path) - - - tensor = { - "tensorflow": [ - ] - } - - if pred2 is not None: - #print (pred2[3]) - for i, element in enumerate(pred2[0]): - object = {} - itemclass = round(pred2[2][i].item()) - object["classe"] = itemclass - object["nome"] = pred2[3][itemclass] - object["score"] = pred2[1][i].item() - object["x"] = element[0].item() - object["y"] = element[1].item() - object["w"] = element[2].item() - object["h"] = element[3].item() - tensor["tensorflow"].append(object) - - #print(tensor) - - text = json.dumps(tensor) - return text - - -inputs = [ - gr.inputs.Image(type="filepath", label="Input Image"), - gr.inputs.Textbox(lines=1, label="Model Path", default="kadirnar/yolox_s-v0.1.1"), - gr.inputs.Textbox(lines=1, label="Config Path", default="configs.yolox_s"), - gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"), -] - -outputs = gr.outputs.Image(type="filepath", label="Output Image") -title = "SIMULADOR PARA RECONHECIMENTO DE IMAGEM" - -examples = [ - ["small-vehicles1.jpeg", "kadirnar/yolox_m-v0.1.1", "configs.yolox_m", 640], - ["zidane.jpg", "kadirnar/yolox_s-v0.1.1", "configs.yolox_s", 640], - ["dog.jpg", "kadirnar/yolox_tiny-v0.1.1", "configs.yolox_tiny", 640], -] - -demo_app = gr.Interface( - fn=yolox_inference, - inputs=inputs, - outputs=["text"], - title=title, - examples=examples, - cache_examples=True, - live=True, - theme='huggingface', -) -try: - demo_app.launch(debug=True, server_name="192.168.0.153", server_port=8081, enable_queue=True) -except: - demo_app.close() - diff --git a/spaces/AzinZ/vitscn/README.md b/spaces/AzinZ/vitscn/README.md deleted file mode 100644 index 4273d7fc9f250f8f53f6cee12b2b6229f1b7eb43..0000000000000000000000000000000000000000 --- a/spaces/AzinZ/vitscn/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Vitscn -emoji: 🌖 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.44.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AzulaFire/SparkDebate/utils/API.py b/spaces/AzulaFire/SparkDebate/utils/API.py deleted file mode 100644 index cbbb280d9538d67a425cf3e5ee937c13645556d8..0000000000000000000000000000000000000000 --- a/spaces/AzulaFire/SparkDebate/utils/API.py +++ /dev/null @@ -1,244 +0,0 @@ - -import base64 -import hmac -import json -from datetime import datetime, timezone -from urllib.parse import urlencode, urlparse -from websocket import create_connection, WebSocketConnectionClosedException -from utils.tools import get_prompt, process_response, init_script, create_script - - -class SparkAPI: - __api_url = 'wss://spark-api.xf-yun.com/v1.1/chat' - __max_token = 4096 - - def __init__(self, app_id, api_key, api_secret): - self.__app_id = app_id - self.__api_key = api_key - self.__api_secret = api_secret - - def __set_max_tokens(self, token): - if isinstance(token, int) is False or token < 0: - print("set_max_tokens() error: tokens should be a positive integer!") - return - self.__max_token = token - - def __get_authorization_url(self): - authorize_url = urlparse(self.__api_url) - # 1. generate data - date = datetime.now(timezone.utc).strftime('%a, %d %b %Y %H:%M:%S %Z') - - """ - Generation rule of Authorization parameters - 1) Obtain the APIKey and APISecret parameters from the console. - 2) Use the aforementioned date to dynamically concatenate a string tmp. Here we take Huobi's URL as an example, - the actual usage requires replacing the host and path with the specific request URL. - """ - signature_origin = "host: {}\ndate: {}\nGET {} HTTP/1.1".format( - authorize_url.netloc, date, authorize_url.path - ) - signature = base64.b64encode( - hmac.new( - self.__api_secret.encode(), - signature_origin.encode(), - digestmod='sha256' - ).digest() - ).decode() - authorization_origin = \ - 'api_key="{}",algorithm="{}",headers="{}",signature="{}"'.format( - self.__api_key, "hmac-sha256", "host date request-line", signature - ) - authorization = base64.b64encode( - authorization_origin.encode()).decode() - params = { - "authorization": authorization, - "date": date, - "host": authorize_url.netloc - } - - ws_url = self.__api_url + "?" + urlencode(params) - return ws_url - - def __build_inputs( - self, - message: dict, - user_id: str = "001", - domain: str = "general", - temperature: float = 0.5, - max_tokens: int = 4096 - ): - input_dict = { - "header": { - "app_id": self.__app_id, - "uid": user_id, - }, - "parameter": { - "chat": { - "domain": domain, - "temperature": temperature, - "max_tokens": max_tokens, - } - }, - "payload": { - "message": message - } - } - return json.dumps(input_dict) - - def chat( - self, - query: str, - history: list = None, # store the conversation history - user_id: str = "001", - domain: str = "general", - max_tokens: int = 4096, - temperature: float = 0.5, - ): - if history is None: - history = [] - - # the max of max_length is 4096 - max_tokens = min(max_tokens, 4096) - url = self.__get_authorization_url() - ws = create_connection(url) - message = get_prompt(query, history) - input_str = self.__build_inputs( - message=message, - user_id=user_id, - domain=domain, - temperature=temperature, - max_tokens=max_tokens, - ) - ws.send(input_str) - response_str = ws.recv() - try: - while True: - response, history, status = process_response( - response_str, history) - """ - The final return result, which means a complete conversation. - doc url: https://www.xfyun.cn/doc/spark/Web.html#_1-%E6%8E%A5%E5%8F%A3%E8%AF%B4%E6%98%8E - """ - if len(response) == 0 or status == 2: - break - response_str = ws.recv() - return response - - except WebSocketConnectionClosedException: - print("Connection closed") - finally: - ws.close() - # Stream output statement, used for terminal chat. - - def streaming_output( - self, - query: str, - history: list = None, # store the conversation history - user_id: str = "001", - domain: str = "general", - max_tokens: int = 4096, - temperature: float = 0.5, - ): - if history is None: - history = [] - # the max of max_length is 4096 - max_tokens = min(max_tokens, 4096) - url = self.__get_authorization_url() - ws = create_connection(url) - - message = get_prompt(query, history) - input_str = self.__build_inputs( - message=message, - user_id=user_id, - domain=domain, - temperature=temperature, - max_tokens=max_tokens, - ) - # print(input_str) - # send question or prompt to url, and receive the answer - ws.send(input_str) - response_str = ws.recv() - - # Continuous conversation - try: - while True: - response, history, status = process_response( - response_str, history) - yield response, history - if len(response) == 0 or status == 2: - break - response_str = ws.recv() - - except WebSocketConnectionClosedException: - print("Connection closed") - finally: - ws.close() - - def chat_stream(self): - history = [] - try: - print("输入init来初始化剧本,输入create来创作剧本,输入exit或stop来终止对话\n") - while True: - query = input("Ask: ") - if query == 'init': - jsonfile = input("请输入剧本文件路径:") - script_data = init_script(history, jsonfile) - print( - f"正在导入剧本{script_data['name']},角色信息:{script_data['characters']},剧情介绍:{script_data['summary']}") - query = f"我希望你能够扮演这个剧本杀游戏的主持人,我希望你能够逐步引导玩家到达最终结局,同时希望你在游戏中设定一些随机事件,需要玩家依靠自身的能力解决,当玩家做出偏离主线的行为或者与剧本无关的行为时,你需要委婉地将玩家引导至正常游玩路线中,对于玩家需要决策的事件,你需要提供一些行动推荐,下面是剧本介绍:{script_data}" - if query == 'create': - name = input('请输入剧本名称:') - characters = input('请输入角色信息:') - summary = input('请输入剧情介绍:') - details = input('请输入剧本细节') - create_script(name, characters, summary, details) - print('剧本创建成功!') - continue - if query == "exit" or query == "stop": - break - for response, _ in self.streaming_output(query, history): - print("\r" + response, end="") - print("\n") - finally: - print("\nThank you for using the SparkDesk AI. Welcome to use it again!") - - -from langchain.llms.base import LLM -from typing import Any, List, Mapping, Optional -class Spark_forlangchain(LLM): - - # 类的成员变量,类型为整型 - n: int - app_id: str - api_key: str - api_secret: str - # 用于指定该子类对象的类型 - - @property - def _llm_type(self) -> str: - return "Spark" - - # 重写基类方法,根据用户输入的prompt来响应用户,返回字符串 - def _call( - self, - query: str, - history: list = None, # store the conversation history - user_id: str = "001", - domain: str = "general", - max_tokens: int = 4096, - temperature: float = 0.7, - stop: Optional[List[str]] = None, - ) -> str: - if stop is not None: - raise ValueError("stop kwargs are not permitted.") - bot = SparkAPI(app_id=self.app_id, api_key=self.api_key, - api_secret=self.api_secret) - response = bot.chat(query, history, user_id, - domain, max_tokens, temperature) - return response - - # 返回一个字典类型,包含LLM的唯一标识 - @property - def _identifying_params(self) -> Mapping[str, Any]: - """Get the identifying parameters.""" - return {"n": self.n} \ No newline at end of file diff --git a/spaces/BAAI/vid2vid-zero/vid2vid_zero/pipelines/pipeline_vid2vid_zero.py b/spaces/BAAI/vid2vid-zero/vid2vid_zero/pipelines/pipeline_vid2vid_zero.py deleted file mode 100644 index 13337c70bd6c24dba39b7c9d43577bbfe2de4bad..0000000000000000000000000000000000000000 --- a/spaces/BAAI/vid2vid-zero/vid2vid_zero/pipelines/pipeline_vid2vid_zero.py +++ /dev/null @@ -1,541 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -from typing import Callable, List, Optional, Union -from dataclasses import dataclass - -import numpy as np -import torch - -from diffusers.utils import is_accelerate_available -from packaging import version -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer - -from diffusers.configuration_utils import FrozenDict -from diffusers.models import AutoencoderKL # UNet2DConditionModel -from diffusers.pipeline_utils import DiffusionPipeline -from diffusers.schedulers import ( - DDIMScheduler, - DPMSolverMultistepScheduler, - EulerAncestralDiscreteScheduler, - EulerDiscreteScheduler, - LMSDiscreteScheduler, - PNDMScheduler, -) -from diffusers.utils import deprecate, logging, BaseOutput -from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker - -from einops import rearrange - -from ..models.unet_2d_condition import UNet2DConditionModel - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -@dataclass -class Vid2VidZeroPipelineOutput(BaseOutput): - images: Union[torch.Tensor, np.ndarray] - - -class Vid2VidZeroPipeline(DiffusionPipeline): - r""" - Pipeline for text-to-image generation using Stable Diffusion. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. - text_encoder ([`CLIPTextModel`]): - Frozen text-encoder. Stable Diffusion uses the text portion of - [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically - the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. - tokenizer (`CLIPTokenizer`): - Tokenizer of class - [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). - unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. - feature_extractor ([`CLIPFeatureExtractor`]): - Model that extracts features from generated images to be used as inputs for the `safety_checker`. - """ - _optional_components = ["safety_checker", "feature_extractor"] - - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - tokenizer: CLIPTokenizer, - unet: UNet2DConditionModel, - scheduler: Union[ - DDIMScheduler, - PNDMScheduler, - LMSDiscreteScheduler, - EulerDiscreteScheduler, - EulerAncestralDiscreteScheduler, - DPMSolverMultistepScheduler, - ], - safety_checker: StableDiffusionSafetyChecker, - feature_extractor: CLIPFeatureExtractor, - requires_safety_checker: bool = False, - ): - super().__init__() - - if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" - f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " - "to update the config accordingly as leaving `steps_offset` might led to incorrect results" - " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," - " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" - " file" - ) - deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(scheduler.config) - new_config["steps_offset"] = 1 - scheduler._internal_dict = FrozenDict(new_config) - - if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." - " `clip_sample` should be set to False in the configuration file. Please make sure to update the" - " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" - " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" - " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" - ) - deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(scheduler.config) - new_config["clip_sample"] = False - scheduler._internal_dict = FrozenDict(new_config) - - if safety_checker is None and requires_safety_checker: - logger.warning( - f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" - " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" - " results in services or applications open to the public. Both the diffusers team and Hugging Face" - " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" - " it only for use-cases that involve analyzing network behavior or auditing its results. For more" - " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." - ) - - if safety_checker is not None and feature_extractor is None: - raise ValueError( - "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" - " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." - ) - - is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( - version.parse(unet.config._diffusers_version).base_version - ) < version.parse("0.9.0.dev0") - is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 - if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: - deprecation_message = ( - "The configuration file of the unet has set the default `sample_size` to smaller than" - " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" - " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" - " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" - " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" - " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" - " in the config might lead to incorrect results in future versions. If you have downloaded this" - " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" - " the `unet/config.json` file" - ) - deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(unet.config) - new_config["sample_size"] = 64 - unet._internal_dict = FrozenDict(new_config) - - self.register_modules( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - self.register_to_config(requires_safety_checker=requires_safety_checker) - - def enable_vae_slicing(self): - r""" - Enable sliced VAE decoding. - - When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several - steps. This is useful to save some memory and allow larger batch sizes. - """ - self.vae.enable_slicing() - - def disable_vae_slicing(self): - r""" - Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to - computing decoding in one step. - """ - self.vae.disable_slicing() - - def enable_sequential_cpu_offload(self, gpu_id=0): - r""" - Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, - text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a - `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. - """ - if is_accelerate_available(): - from accelerate import cpu_offload - else: - raise ImportError("Please install accelerate via `pip install accelerate`") - - device = torch.device(f"cuda:{gpu_id}") - - for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: - if cpu_offloaded_model is not None: - cpu_offload(cpu_offloaded_model, device) - - if self.safety_checker is not None: - # TODO(Patrick) - there is currently a bug with cpu offload of nn.Parameter in accelerate - # fix by only offloading self.safety_checker for now - cpu_offload(self.safety_checker.vision_model, device) - - @property - def _execution_device(self): - r""" - Returns the device on which the pipeline's models will be executed. After calling - `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module - hooks. - """ - if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): - return self.device - for module in self.unet.modules(): - if ( - hasattr(module, "_hf_hook") - and hasattr(module._hf_hook, "execution_device") - and module._hf_hook.execution_device is not None - ): - return torch.device(module._hf_hook.execution_device) - return self.device - - def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt, uncond_embeddings=None): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `list(int)`): - prompt to be encoded - device: (`torch.device`): - torch device - num_images_per_prompt (`int`): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - """ - batch_size = len(prompt) if isinstance(prompt, list) else 1 - - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): - removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = text_inputs.attention_mask.to(device) - else: - attention_mask = None - - text_embeddings = self.text_encoder( - text_input_ids.to(device), - attention_mask=attention_mask, - ) - text_embeddings = text_embeddings[0] - - # duplicate text embeddings for each generation per prompt, using mps friendly method - # num_videos_per_prompt = 1, thus nothing happens here - bs_embed, seq_len, _ = text_embeddings.shape - text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) - text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - max_length = text_input_ids.shape[-1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="pt", - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = uncond_input.attention_mask.to(device) - else: - attention_mask = None - - uncond_embeddings = self.text_encoder( - uncond_input.input_ids.to(device), - attention_mask=attention_mask, - ) - uncond_embeddings = uncond_embeddings[0] - - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = uncond_embeddings.shape[1] - uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) - uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) - - return text_embeddings - - def run_safety_checker(self, image, device, dtype): - if self.safety_checker is not None: - safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) - image, has_nsfw_concept = self.safety_checker( - images=image, clip_input=safety_checker_input.pixel_values.to(dtype) - ) - else: - has_nsfw_concept = None - return image, has_nsfw_concept - - def decode_latents(self, latents): - video_length = latents.shape[2] - latents = 1 / 0.18215 * latents - latents = rearrange(latents, "b c f h w -> (b f) c h w") - video = self.vae.decode(latents).sample - video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) - video = (video / 2 + 0.5).clamp(0, 1) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 - video = video.cpu().float().numpy() - return video - - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - def check_inputs(self, prompt, height, width, callback_steps): - if not isinstance(prompt, str) and not isinstance(prompt, list): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None): - shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if latents is None: - rand_device = "cpu" if device.type == "mps" else device - - if isinstance(generator, list): - shape = (1,) + shape[1:] - latents = [ - torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) - for i in range(batch_size) - ] - latents = torch.cat(latents, dim=0).to(device) - else: - latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) - else: - if latents.shape != shape: - raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") - latents = latents.to(device) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - return latents - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]], - video_length: Optional[int], - height: Optional[int] = None, - width: Optional[int] = None, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_videos_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "tensor", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: Optional[int] = 1, - uncond_embeddings: torch.Tensor = None, - null_uncond_ratio: float = 1.0, - **kwargs, - ): - # Default height and width to unet - height = height or self.unet.config.sample_size * self.vae_scale_factor - width = width or self.unet.config.sample_size * self.vae_scale_factor - - # Check inputs. Raise error if not correct - self.check_inputs(prompt, height, width, callback_steps) - - # Define call parameters - batch_size = 1 if isinstance(prompt, str) else len(prompt) - device = self._execution_device - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - # Encode input prompt - with_uncond_embedding = do_classifier_free_guidance if uncond_embeddings is None else False - text_embeddings = self._encode_prompt( - prompt, device, num_videos_per_prompt, with_uncond_embedding, negative_prompt, - ) - - # Prepare timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps = self.scheduler.timesteps - - # Prepare latent variables - num_channels_latents = self.unet.in_channels - latents = self.prepare_latents( - batch_size * num_videos_per_prompt, - num_channels_latents, - video_length, - height, - width, - text_embeddings.dtype, - device, - generator, - latents, - ) - latents_dtype = latents.dtype - - # Prepare extra step kwargs. - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - if uncond_embeddings is not None: - start_time = 50 - assert (timesteps[-start_time:] == timesteps).all() - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - if uncond_embeddings is not None: - use_uncond_this_step = True - if null_uncond_ratio > 0: - if i > len(timesteps) * null_uncond_ratio: - use_uncond_this_step = False - else: - if i < len(timesteps) * (1 + null_uncond_ratio): - use_uncond_this_step = False - if use_uncond_this_step: - text_embeddings_input = torch.cat([uncond_embeddings[i].expand(*text_embeddings.shape), text_embeddings]) - else: - uncond_embeddings_ = self._encode_prompt('', device, num_videos_per_prompt, False, negative_prompt) - text_embeddings_input = torch.cat([uncond_embeddings_.expand(*text_embeddings.shape), text_embeddings]) - else: - text_embeddings_input = text_embeddings - - # predict the noise residual - noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings_input).sample.to(dtype=latents_dtype) - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - # Post-processing - images = self.decode_latents(latents) - - # Convert to tensor - if output_type == "tensor": - images = torch.from_numpy(images) - - if not return_dict: - return images - - return Vid2VidZeroPipelineOutput(images=images) diff --git a/spaces/Benson/text-generation/Examples/2023 Songs Download.md b/spaces/Benson/text-generation/Examples/2023 Songs Download.md deleted file mode 100644 index d3634fba765f2796e71f53a0fe537d1a0a7c0955..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/2023 Songs Download.md +++ /dev/null @@ -1,91 +0,0 @@ -
-

Cómo descargar canciones en 2023: Una guía para amantes de la música

-

La música es una de las formas más universales de entretenimiento y expresión. Si desea relajarse, bailar, estudiar, hacer ejercicio o simplemente disfrutar de algunas melodías, la música puede mejorar su estado de ánimo y experiencia. Pero, ¿cómo se obtiene acceso a sus canciones favoritas en 2023? ¿Se transmiten en línea o descargarlos a su dispositivo?

-

En este artículo, vamos a explorar los pros y los contras de la descarga de música frente a streaming. También compararemos los mejores servicios de transmisión de música en 2023 y le mostraremos cómo descargar música de forma legal y segura. Al final de este artículo, tendrás una mejor idea de cómo disfrutar de la música en 2023.

-

2023 songs download


Download ✯✯✯ https://bltlly.com/2v6IEv



-

¿Por qué descargar música en lugar de streaming?

-

La transmisión de música es conveniente y popular. Puedes escuchar millones de canciones a pedido sin tener que comprarlas o almacenarlas. También puedes descubrir nueva música basada en tus preferencias y recomendaciones. Sin embargo, la transmisión de música también tiene algunos inconvenientes.

-

En primer lugar, la transmisión de música requiere una conexión a Internet. Si tiene una conexión lenta o inestable, puede experimentar buffering o interrupciones. Si tiene un plan de datos limitado, también puede incurrir en cargos adicionales por la transmisión de música. En segundo lugar, la transmisión de música depende de la disponibilidad y calidad del servicio. Si el servicio cambia sus términos, precios, características o catálogo, puede perder el acceso a algunas canciones o listas de reproducción. En tercer lugar, la transmisión de música no le da la propiedad de la música. Solo la alquila mientras paga la suscripción.

- -

¿Cuáles son los mejores servicios de transmisión de música en 2023?

-

Si prefieres transmitir música en vez de descargarla, tienes muchas opciones para elegir. Hay muchos servicios de streaming de música en 2023 que satisfacen diferentes gustos y necesidades. Estos son algunos de los más populares:

- - -ServicioCaracterísticas -Precio -Calidad de audio -Tamaño del catálogo - - -Spotify -- Listas de reproducción y recomendaciones personalizadas - Podcasts y videos - Características sociales e integraciones - Modo sin conexión y sincronización entre dispositivos - Spotify Connect y Spotify Kids -- Gratis (con anuncios y saltos limitados) - Premium: $9.99/mes (individual), $14.99/mes (familiar), $4.99/mes (estudiante), $12.99/mes (dúo) - HiFi: $19.99/mes (próximamente) -- Gratis: 160 kbps - Premium: 320 kbps - HiFi: calidad de CD sin pérdidas -- Más de 70 millones de canciones - Más de 2,2 millones de podcasts - - -Música de Apple -- Listas de reproducción y emisoras de radio - Programas en vivo y a la carta - Letras y vídeos musicales - Modo offline y sincronización entre dispositivos - Apple Music 1, Hits y Country -- Prueba gratuita para 3 meses - Individuo: $9.99/month - Familia: $14.99/month - Estudiante: $4.99/month - Apple One bundle: a partir de $14.95/month -- 256 kbps AAC - Audio espacial con Dolby Atmos -- Más de 75 millones de canciones - Más de 1 millón de podcasts - - -Tidal -- Listas de reproducción seleccionadas y contenido editorial - Lanzamientos y conciertos exclusivos - Modo offline y sincronización entre dispositivos - Tidal X y Tidal Connect -- Prueba gratuita por 30 días - Premium: $9.99/mes (individual), $14.99/mes (familiar), $4.99/mes (estudiante), $5.99/mes (militar), $5.99/mes (primer respondedor) - Alta: $19.99/mes (individual), $29.99/mes (familiar), $9.99/mes (estudiante), $11.99/mes (militar), $11.99/mes/mes (primer respondedor) - -- Más de 70 millones de canciones - Más de 250.000 vídeos musicales - - -Música de Amazon -- Listas de reproducción y estaciones personalizadas - Podcasts y transmisiones en vivo - Letras y videos musicales - Modo sin conexión y sincronización entre dispositivos - Alexa control de voz -- Gratis (con anuncios y saltos limitados) - Prime Music: incluido con membresía Prime ($12.99/mes o $119/año) - Ilimitado: $9.99/mes ($7.99/mes para miembros Prime) o $79/año ($69/año para miembros Prime) - HD: $14.99/mes ($12.99/mes para miembros Prime) o $149/año ($129/año para miembros Prime) -- Gratis: hasta 256 kbps - Prime Music: hasta 256 kbps - Ilimitado: hasta 256 kbps - HD: hasta 850 kbps, calidad de CD sin pérdidas y calidad Ultra HD -- Gratis: más de 2 millones de canciones - Prime Music: más de 2 millones de canciones - Ilimitado: más de 75 millones de canciones - HD: más de 75 millones de canciones en HD y Ultra HD, y más de 7 millones de canciones en 3D Audio - - -Música de YouTube -- Listas de reproducción y mezclas personalizadas - Vídeos musicales y actuaciones en directo - Modo offline y sincronización entre dispositivos - YouTube Premium benefits -- Gratis (con anuncios y sin juego de fondo) - Premium: $9.99/mes (individual), $14.99/mes (familiar), $4.99/mes (estudiante) -- Gratis: hasta 128 kbps AAC - Premium: hasta 256 kbps AAC -- Más de 70 millones de canciones - Más de 2 mil millones de vídeos musicales - - -

¿Cómo descargar música de forma legal y segura?

-

Descargar música puede ser una gran manera de disfrutar de tus canciones favoritas sin conexión, pero tienes que tener cuidado con las fuentes que usas. No todos los sitios web que ofrecen descargas de música son legales o seguros. Algunos pueden violar los derechos de autor de los artistas o las etiquetas, o pueden contener malware o virus que pueden dañar su dispositivo.

-

Para evitar descargas ilegales o inseguras, debes seguir estos consejos:

- -

Si desea descargar música de forma legal y segura, puede utilizar algunos de los sitios web que ofrecen descargas de música gratuitas o pagadas con el permiso de los artistas o las etiquetas. Aquí hay algunos ejemplos:

- -

Conclusión

-

Los mods son una gran manera de mejorar tu experiencia de juego en TABS. Pueden agregar nuevo contenido, características y desafíos al juego, haciéndolo más divertido y diverso. Puedes encontrar y descargar mods para TABS de Nexus Mods y Steam Workshop, que son dos de las fuentes más populares y confiables de mods para muchos juegos. También puede instalar y desinstalar mods fácilmente utilizando métodos manuales o software como Vortex. Sin embargo, siempre debes ser cuidadoso y responsable al usar mods, ya que pueden causar problemas o conflictos con el juego u otros mods. Siempre debes leer cuidadosamente la descripción del mod, las revisiones y las instrucciones, hacer copias de seguridad de tus archivos de juego, probar tus mods y respetar a los creadores del mod y a otros jugadores.

-

Esperamos que este artículo te haya ayudado a aprender a descargar mods en TABS. Si tiene alguna pregunta o sugerencia, no dude en dejar un comentario a continuación. Happy modding!

-

Preguntas frecuentes

-

Aquí están algunas de las preguntas y respuestas más frecuentes sobre los mods de descarga en TABS:

-

Q: ¿Cuáles son los mejores mods para TABS?

-

A: Esta es una pregunta subjetiva, ya que diferentes jugadores pueden tener diferentes preferencias y gustos cuando se trata de mods. Sin embargo, algunos de los mods más populares y bien clasificados para TABS son:

- -

Q: ¿Cómo actualizo mis mods?

-

A: Si has descargado tus mods de Nexus Mods, puedes buscar actualizaciones yendo a la sección Mods en Vortex y haciendo clic en el botón Buscar actualizaciones. Si hay alguna actualización disponible, puede descargarla e instalarla haciendo clic en el botón Instalar actualización. Si has descargado tus mods de Steam Workshop, no necesitas hacer nada, ya que Steam actualizará automáticamente tus mods suscritos cuando estén disponibles.

-

Q: ¿Cómo puedo crear mis propios mods?

-

A: Si quieres crear tus propios mods para TABS, necesitarás algunas habilidades y herramientas para hacerlo. Necesitarás saber cómo usar Unity, que es el motor del juego en el que se basa TABS. También tendrá que descargar el TABS Modding Kit, que es una colección de archivos y scripts que le ayudarán a crear y probar sus mods. Puedes encontrar tutoriales y guías sobre cómo usar estas herramientas en YouTube o Reddit. También puedes unirte a TABS Modding Discord, donde puedes chatear con otros modders y obtener ayuda o comentarios.

-

Q: ¿Cómo comparto mis mods con otros?

-

A: Si quieres compartir tus mods con otros, puedes subirlos a Nexus Mods o Steam Workshop. Para subir tus mods a Nexus Mods, tendrás que crear una cuenta en su sitio web y seguir sus directrices sobre cómo subir tus archivos. Para subir tus mods a Steam Workshop, necesitarás tener una cuenta de Steam y seguir sus instrucciones sobre cómo publicar tus artículos.

-

Q: ¿Cómo puedo reportar un problema con un mod?

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/tz/_common.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/tz/_common.py deleted file mode 100644 index e6ac11831522b266114d5b68ee1da298e3aeb14a..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/tz/_common.py +++ /dev/null @@ -1,419 +0,0 @@ -from six import PY2 - -from functools import wraps - -from datetime import datetime, timedelta, tzinfo - - -ZERO = timedelta(0) - -__all__ = ['tzname_in_python2', 'enfold'] - - -def tzname_in_python2(namefunc): - """Change unicode output into bytestrings in Python 2 - - tzname() API changed in Python 3. It used to return bytes, but was changed - to unicode strings - """ - if PY2: - @wraps(namefunc) - def adjust_encoding(*args, **kwargs): - name = namefunc(*args, **kwargs) - if name is not None: - name = name.encode() - - return name - - return adjust_encoding - else: - return namefunc - - -# The following is adapted from Alexander Belopolsky's tz library -# https://github.com/abalkin/tz -if hasattr(datetime, 'fold'): - # This is the pre-python 3.6 fold situation - def enfold(dt, fold=1): - """ - Provides a unified interface for assigning the ``fold`` attribute to - datetimes both before and after the implementation of PEP-495. - - :param fold: - The value for the ``fold`` attribute in the returned datetime. This - should be either 0 or 1. - - :return: - Returns an object for which ``getattr(dt, 'fold', 0)`` returns - ``fold`` for all versions of Python. In versions prior to - Python 3.6, this is a ``_DatetimeWithFold`` object, which is a - subclass of :py:class:`datetime.datetime` with the ``fold`` - attribute added, if ``fold`` is 1. - - .. versionadded:: 2.6.0 - """ - return dt.replace(fold=fold) - -else: - class _DatetimeWithFold(datetime): - """ - This is a class designed to provide a PEP 495-compliant interface for - Python versions before 3.6. It is used only for dates in a fold, so - the ``fold`` attribute is fixed at ``1``. - - .. versionadded:: 2.6.0 - """ - __slots__ = () - - def replace(self, *args, **kwargs): - """ - Return a datetime with the same attributes, except for those - attributes given new values by whichever keyword arguments are - specified. Note that tzinfo=None can be specified to create a naive - datetime from an aware datetime with no conversion of date and time - data. - - This is reimplemented in ``_DatetimeWithFold`` because pypy3 will - return a ``datetime.datetime`` even if ``fold`` is unchanged. - """ - argnames = ( - 'year', 'month', 'day', 'hour', 'minute', 'second', - 'microsecond', 'tzinfo' - ) - - for arg, argname in zip(args, argnames): - if argname in kwargs: - raise TypeError('Duplicate argument: {}'.format(argname)) - - kwargs[argname] = arg - - for argname in argnames: - if argname not in kwargs: - kwargs[argname] = getattr(self, argname) - - dt_class = self.__class__ if kwargs.get('fold', 1) else datetime - - return dt_class(**kwargs) - - @property - def fold(self): - return 1 - - def enfold(dt, fold=1): - """ - Provides a unified interface for assigning the ``fold`` attribute to - datetimes both before and after the implementation of PEP-495. - - :param fold: - The value for the ``fold`` attribute in the returned datetime. This - should be either 0 or 1. - - :return: - Returns an object for which ``getattr(dt, 'fold', 0)`` returns - ``fold`` for all versions of Python. In versions prior to - Python 3.6, this is a ``_DatetimeWithFold`` object, which is a - subclass of :py:class:`datetime.datetime` with the ``fold`` - attribute added, if ``fold`` is 1. - - .. versionadded:: 2.6.0 - """ - if getattr(dt, 'fold', 0) == fold: - return dt - - args = dt.timetuple()[:6] - args += (dt.microsecond, dt.tzinfo) - - if fold: - return _DatetimeWithFold(*args) - else: - return datetime(*args) - - -def _validate_fromutc_inputs(f): - """ - The CPython version of ``fromutc`` checks that the input is a ``datetime`` - object and that ``self`` is attached as its ``tzinfo``. - """ - @wraps(f) - def fromutc(self, dt): - if not isinstance(dt, datetime): - raise TypeError("fromutc() requires a datetime argument") - if dt.tzinfo is not self: - raise ValueError("dt.tzinfo is not self") - - return f(self, dt) - - return fromutc - - -class _tzinfo(tzinfo): - """ - Base class for all ``dateutil`` ``tzinfo`` objects. - """ - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - - dt = dt.replace(tzinfo=self) - - wall_0 = enfold(dt, fold=0) - wall_1 = enfold(dt, fold=1) - - same_offset = wall_0.utcoffset() == wall_1.utcoffset() - same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None) - - return same_dt and not same_offset - - def _fold_status(self, dt_utc, dt_wall): - """ - Determine the fold status of a "wall" datetime, given a representation - of the same datetime as a (naive) UTC datetime. This is calculated based - on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all - datetimes, and that this offset is the actual number of hours separating - ``dt_utc`` and ``dt_wall``. - - :param dt_utc: - Representation of the datetime as UTC - - :param dt_wall: - Representation of the datetime as "wall time". This parameter must - either have a `fold` attribute or have a fold-naive - :class:`datetime.tzinfo` attached, otherwise the calculation may - fail. - """ - if self.is_ambiguous(dt_wall): - delta_wall = dt_wall - dt_utc - _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst())) - else: - _fold = 0 - - return _fold - - def _fold(self, dt): - return getattr(dt, 'fold', 0) - - def _fromutc(self, dt): - """ - Given a timezone-aware datetime in a given timezone, calculates a - timezone-aware datetime in a new timezone. - - Since this is the one time that we *know* we have an unambiguous - datetime object, we take this opportunity to determine whether the - datetime is ambiguous and in a "fold" state (e.g. if it's the first - occurrence, chronologically, of the ambiguous datetime). - - :param dt: - A timezone-aware :class:`datetime.datetime` object. - """ - - # Re-implement the algorithm from Python's datetime.py - dtoff = dt.utcoffset() - if dtoff is None: - raise ValueError("fromutc() requires a non-None utcoffset() " - "result") - - # The original datetime.py code assumes that `dst()` defaults to - # zero during ambiguous times. PEP 495 inverts this presumption, so - # for pre-PEP 495 versions of python, we need to tweak the algorithm. - dtdst = dt.dst() - if dtdst is None: - raise ValueError("fromutc() requires a non-None dst() result") - delta = dtoff - dtdst - - dt += delta - # Set fold=1 so we can default to being in the fold for - # ambiguous dates. - dtdst = enfold(dt, fold=1).dst() - if dtdst is None: - raise ValueError("fromutc(): dt.dst gave inconsistent " - "results; cannot convert") - return dt + dtdst - - @_validate_fromutc_inputs - def fromutc(self, dt): - """ - Given a timezone-aware datetime in a given timezone, calculates a - timezone-aware datetime in a new timezone. - - Since this is the one time that we *know* we have an unambiguous - datetime object, we take this opportunity to determine whether the - datetime is ambiguous and in a "fold" state (e.g. if it's the first - occurrence, chronologically, of the ambiguous datetime). - - :param dt: - A timezone-aware :class:`datetime.datetime` object. - """ - dt_wall = self._fromutc(dt) - - # Calculate the fold status given the two datetimes. - _fold = self._fold_status(dt, dt_wall) - - # Set the default fold value for ambiguous dates - return enfold(dt_wall, fold=_fold) - - -class tzrangebase(_tzinfo): - """ - This is an abstract base class for time zones represented by an annual - transition into and out of DST. Child classes should implement the following - methods: - - * ``__init__(self, *args, **kwargs)`` - * ``transitions(self, year)`` - this is expected to return a tuple of - datetimes representing the DST on and off transitions in standard - time. - - A fully initialized ``tzrangebase`` subclass should also provide the - following attributes: - * ``hasdst``: Boolean whether or not the zone uses DST. - * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects - representing the respective UTC offsets. - * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short - abbreviations in DST and STD, respectively. - * ``_hasdst``: Whether or not the zone has DST. - - .. versionadded:: 2.6.0 - """ - def __init__(self): - raise NotImplementedError('tzrangebase is an abstract base class') - - def utcoffset(self, dt): - isdst = self._isdst(dt) - - if isdst is None: - return None - elif isdst: - return self._dst_offset - else: - return self._std_offset - - def dst(self, dt): - isdst = self._isdst(dt) - - if isdst is None: - return None - elif isdst: - return self._dst_base_offset - else: - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - if self._isdst(dt): - return self._dst_abbr - else: - return self._std_abbr - - def fromutc(self, dt): - """ Given a datetime in UTC, return local time """ - if not isinstance(dt, datetime): - raise TypeError("fromutc() requires a datetime argument") - - if dt.tzinfo is not self: - raise ValueError("dt.tzinfo is not self") - - # Get transitions - if there are none, fixed offset - transitions = self.transitions(dt.year) - if transitions is None: - return dt + self.utcoffset(dt) - - # Get the transition times in UTC - dston, dstoff = transitions - - dston -= self._std_offset - dstoff -= self._std_offset - - utc_transitions = (dston, dstoff) - dt_utc = dt.replace(tzinfo=None) - - isdst = self._naive_isdst(dt_utc, utc_transitions) - - if isdst: - dt_wall = dt + self._dst_offset - else: - dt_wall = dt + self._std_offset - - _fold = int(not isdst and self.is_ambiguous(dt_wall)) - - return enfold(dt_wall, fold=_fold) - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - if not self.hasdst: - return False - - start, end = self.transitions(dt.year) - - dt = dt.replace(tzinfo=None) - return (end <= dt < end + self._dst_base_offset) - - def _isdst(self, dt): - if not self.hasdst: - return False - elif dt is None: - return None - - transitions = self.transitions(dt.year) - - if transitions is None: - return False - - dt = dt.replace(tzinfo=None) - - isdst = self._naive_isdst(dt, transitions) - - # Handle ambiguous dates - if not isdst and self.is_ambiguous(dt): - return not self._fold(dt) - else: - return isdst - - def _naive_isdst(self, dt, transitions): - dston, dstoff = transitions - - dt = dt.replace(tzinfo=None) - - if dston < dstoff: - isdst = dston <= dt < dstoff - else: - isdst = not dstoff <= dt < dston - - return isdst - - @property - def _dst_base_offset(self): - return self._dst_offset - self._std_offset - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s(...)" % self.__class__.__name__ - - __reduce__ = object.__reduce__ diff --git a/spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/clip.py b/spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/clip.py deleted file mode 100644 index 9d865181787bab4639fdb88334555dbcc25c983d..0000000000000000000000000000000000000000 --- a/spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/clip.py +++ /dev/null @@ -1,213 +0,0 @@ -import hashlib -import os -import urllib -import warnings -from typing import Union, List - -import torch -from PIL import Image -from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize -from tqdm import tqdm - -from .model import build_model -from .simple_tokenizer import SimpleTokenizer as _Tokenizer - -try: - from torchvision.transforms import InterpolationMode - BICUBIC = InterpolationMode.BICUBIC -except ImportError: - BICUBIC = Image.BICUBIC - - -if torch.__version__.split(".") < ["1", "7", "1"]: - warnings.warn("PyTorch version 1.7.1 or higher is recommended") - - -__all__ = ["available_models", "load", "tokenize"] -_tokenizer = _Tokenizer() - -_MODELS = { - "RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", - "RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt", - "RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt", - "RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt", - "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", - "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", -} - - -def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")): - os.makedirs(root, exist_ok=True) - filename = os.path.basename(url) - - expected_sha256 = url.split("/")[-2] - download_target = os.path.join(root, filename) - - if os.path.exists(download_target) and not os.path.isfile(download_target): - raise RuntimeError(f"{download_target} exists and is not a regular file") - - if os.path.isfile(download_target): - if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: - return download_target - else: - warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") - - with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: - with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop: - while True: - buffer = source.read(8192) - if not buffer: - break - - output.write(buffer) - loop.update(len(buffer)) - - if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: - raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match") - - return download_target - - -def _transform(n_px): - return Compose([ - Resize(n_px, interpolation=BICUBIC), - CenterCrop(n_px), - lambda image: image.convert("RGB"), - ToTensor(), - Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), - ]) - - -def available_models() -> List[str]: - """Returns the names of available CLIP models""" - return list(_MODELS.keys()) - - -def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=False): - """Load a CLIP model - Parameters - ---------- - name : str - A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict - device : Union[str, torch.device] - The device to put the loaded model - jit : bool - Whether to load the optimized JIT model or more hackable non-JIT model (default). - Returns - ------- - model : torch.nn.Module - The CLIP model - preprocess : Callable[[PIL.Image], torch.Tensor] - A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input - """ - if name in _MODELS: - model_path = _download(_MODELS[name]) - elif os.path.isfile(name): - model_path = name - else: - raise RuntimeError(f"Model {name} not found; available models = {available_models()}") - - try: - # loading JIT archive - model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval() - state_dict = None - except RuntimeError: - # loading saved state dict - if jit: - warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead") - jit = False - state_dict = torch.load(model_path, map_location="cpu") - - if not jit: - # print("Heree.....") - model = build_model(state_dict or model.state_dict()).to(device) - if str(device) == "cpu": - model.float() - return model, _transform(model.visual.input_resolution) - - # patch the device names - device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) - device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] - - def patch_device(module): - try: - graphs = [module.graph] if hasattr(module, "graph") else [] - except RuntimeError: - graphs = [] - - if hasattr(module, "forward1"): - graphs.append(module.forward1.graph) - - for graph in graphs: - for node in graph.findAllNodes("prim::Constant"): - if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): - node.copyAttributes(device_node) - - model.apply(patch_device) - patch_device(model.encode_image) - patch_device(model.encode_text) - - # patch dtype to float32 on CPU - if str(device) == "cpu": - float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) - float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] - float_node = float_input.node() - - def patch_float(module): - try: - graphs = [module.graph] if hasattr(module, "graph") else [] - except RuntimeError: - graphs = [] - - if hasattr(module, "forward1"): - graphs.append(module.forward1.graph) - - for graph in graphs: - for node in graph.findAllNodes("aten::to"): - inputs = list(node.inputs()) - for i in [1, 2]: # dtype can be the second or third argument to aten::to() - if inputs[i].node()["value"] == 5: - inputs[i].node().copyAttributes(float_node) - - model.apply(patch_float) - patch_float(model.encode_image) - patch_float(model.encode_text) - - model.float() - - return model, _transform(model.input_resolution.item()) - - -def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> torch.LongTensor: - """ - Returns the tokenized representation of given input string(s) - Parameters - ---------- - texts : Union[str, List[str]] - An input string or a list of input strings to tokenize - context_length : int - The context length to use; all CLIP models use 77 as the context length - truncate: bool - Whether to truncate the text in case its encoding is longer than the context length - Returns - ------- - A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] - """ - if isinstance(texts, str): - texts = [texts] - - sot_token = _tokenizer.encoder["<|startoftext|>"] - eot_token = _tokenizer.encoder["<|endoftext|>"] - all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] - result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) - - for i, tokens in enumerate(all_tokens): - if len(tokens) > context_length: - if truncate: - tokens = tokens[:context_length] - tokens[-1] = eot_token - else: - raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}") - result[i, :len(tokens)] = torch.tensor(tokens) - - return result \ No newline at end of file diff --git a/spaces/BradSegal/Literature-Rating/app.py b/spaces/BradSegal/Literature-Rating/app.py deleted file mode 100644 index 7cf1ceb0627470a97564839e50060e34a57e7411..0000000000000000000000000000000000000000 --- a/spaces/BradSegal/Literature-Rating/app.py +++ /dev/null @@ -1,163 +0,0 @@ -import os -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from transformers import AutoModel, AutoConfig, AutoTokenizer -import gradio as gr - -os.system("gdown https://drive.google.com/uc?id=1whDb0yL_Kqoyx-sIw0sS5xTfb6r_9nlJ") -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - -def init_params(module_lst): - for module in module_lst: - for param in module.parameters(): - if param.dim() > 1: - torch.nn.init.xavier_uniform_(param) - return - - -class Custom_bert(nn.Module): - def __init__(self, model_dir): - super().__init__() - - # load base model - config = AutoConfig.from_pretrained(model_dir) - config.update({"output_hidden_states": True, - "hidden_dropout_prob": 0.0, - "layer_norm_eps": 1e-7}) - - self.base = AutoModel.from_pretrained(model_dir, config=config) - - dim = self.base.encoder.layer[0].output.dense.bias.shape[0] - - self.dropout = nn.Dropout(p=0.2) - self.high_dropout = nn.Dropout(p=0.5) - - # weights for weighted layer average - n_weights = 24 - weights_init = torch.zeros(n_weights).float() - weights_init.data[:-1] = -3 - self.layer_weights = torch.nn.Parameter(weights_init) - - # attention head - self.attention = nn.Sequential( - nn.Linear(1024, 1024), - nn.Tanh(), - nn.Linear(1024, 1), - nn.Softmax(dim=1) - ) - self.cls = nn.Sequential( - nn.Linear(dim, 1) - ) - init_params([self.cls, self.attention]) - - def reini_head(self): - init_params([self.cls, self.attention]) - return - - def forward(self, input_ids, attention_mask): - base_output = self.base(input_ids=input_ids, - attention_mask=attention_mask) - - # weighted average of all encoder outputs - cls_outputs = torch.stack( - [self.dropout(layer) for layer in base_output['hidden_states'][-24:]], dim=0 - ) - cls_output = ( - torch.softmax(self.layer_weights, dim=0).unsqueeze(1).unsqueeze(1).unsqueeze(1) * cls_outputs).sum( - 0) - - # multisample dropout - logits = torch.mean( - torch.stack( - [torch.sum(self.attention(self.high_dropout(cls_output)) * cls_output, dim=1) for _ in range(5)], - dim=0, - ), - dim=0, - ) - return self.cls(logits) - - -def get_batches(input, tokenizer, batch_size=128, max_length=256, device='cpu'): - out = tokenizer(input, return_tensors='pt', max_length=max_length, padding='max_length') - out['input_ids'], out['attention_mask'] = out['input_ids'].to(device), out['attention_mask'].to(device) - input_id_split = torch.split(out['input_ids'], max_length, dim=1) - attention_split = torch.split(out['attention_mask'], max_length, dim=1) - - input_id_batches = [] - attention_batches = [] - - i = 0 - input_length = len(input_id_split) - - while i * batch_size < input_length: - if i * batch_size + batch_size <= input_length: - input_id_batches.append(list(input_id_split[i * batch_size:(i + 1) * batch_size])) - attention_batches.append(list(attention_split[i * batch_size:(i + 1) * batch_size])) - else: - input_id_batches.append(list(input_id_split[i * batch_size:input_length])) - attention_batches.append(list(attention_split[i * batch_size:input_length])) - i += 1 - - if input_id_batches[-1][-1].shape[1] < max_length: - input_id_batches[-1][-1] = F.pad(input_id_batches[-1][-1], - (1, max_length - input_id_batches[-1][-1].shape[1] - 1), - value=0) - attention_batches[-1][-1] = F.pad(attention_batches[-1][-1], - (1, max_length - attention_batches[-1][-1].shape[1] - 1), - value=1) - - input_id_batches = [torch.cat(batch, dim=0) for batch in input_id_batches] - attention_batches = [torch.cat(batch, dim=0) for batch in attention_batches] - - return tuple(zip(input_id_batches, attention_batches)) - - -def predict(input, tokenizer, model, batch_size=128, max_length=256, max_val=-4, min_val=3, score=100): - device = model.base.device - batches = get_batches(input, tokenizer, batch_size, max_length, device) - - predictions = [] - - with torch.no_grad(): - for input_ids, attention_mask in batches: - pred = model(input_ids, attention_mask) - pred = score * (pred - min_val) / (max_val - min_val) - predictions.append(pred) - - predictions = torch.cat(predictions, dim=0) - mean, std = predictions.mean().cpu().item(), predictions.std().cpu().item() - mean, std = round(mean, 2), round(std, 2) - if np.isnan(std): - return f"The reading difficulty score is {mean}." - else: - return f"""The reading difficulty score is {mean} with a standard deviation of {std}. - \nThe 95% confidence interval of the score is {mean - 2 * std} to {mean + 2 * std}.""" - - -if __name__ == "__main__": - deberta_loc = "deberta_large_0.pt" - deberta_tokenizer = AutoTokenizer.from_pretrained("microsoft/deberta-large", model_max_length=256) - - model = Custom_bert("microsoft/deberta-large") - model.load_state_dict(torch.load(deberta_loc, map_location=torch.device(device))) - model.eval().to(device) - - - description = """ - This tool attempts to estimate how difficult a piece of text is to read by a school child. - The underlying model has been developed based on expert ranking of text difficulty for students from grade 3 to 12. - The score has been scaled to range from zero (very easy) to one hundred (very difficult). - Very long passages will be broken up and reported with the average as well as the standard deviation of the difficulty score. - """ - - interface = gr.Interface(fn=lambda x: predict(x, deberta_tokenizer, model, batch_size=4), - inputs=gr.inputs.Textbox(lines = 7, label = "Text:", - placeholder = "Insert text to be scored here."), - outputs='text', - title = "Reading Difficulty Analyser", - description = description) - interface.launch() - diff --git a/spaces/BraydenMoore/MARCI-NFL-Betting/Source/Test/xgboost_OU.py b/spaces/BraydenMoore/MARCI-NFL-Betting/Source/Test/xgboost_OU.py deleted file mode 100644 index e666282db845d5b54fd670371f445386c810550e..0000000000000000000000000000000000000000 --- a/spaces/BraydenMoore/MARCI-NFL-Betting/Source/Test/xgboost_OU.py +++ /dev/null @@ -1,59 +0,0 @@ -import xgboost as xgb -import pandas as pd -import pickle as pkl -import numpy as np -import os - -model = 'xgboost_OU_no_odds_60.8%' - -current_directory = os.path.dirname(os.path.abspath(__file__)) -parent_directory = os.path.dirname(current_directory) -data_directory = os.path.join(parent_directory, 'Data') -model_directory = os.path.join(parent_directory, 'Models') -pickle_directory = os.path.join(parent_directory, 'Pickles') - -file_path = os.path.join(model_directory, f'{model}.json') -xgb_ou = xgb.Booster() -xgb_ou.load_model(file_path) - -file_path = os.path.join(pickle_directory, 'test_games_OU_no_odds.pkl') -with open(file_path,'rb') as f: - test_games = pkl.load(f).tolist() - -file_path = os.path.join(data_directory, 'gbg_and_odds.csv') -gbg_and_odds = pd.read_csv(file_path) -test_data = gbg_and_odds.loc[gbg_and_odds['game_id'].isin(test_games)] -test_data_matrix = xgb.DMatrix(test_data.drop(columns=['game_id','Over','Home-Team-Win','Season','home_team','away_team','game_date','Key','Home Score','Away Score','Home Odds Close','Away Odds Close','Home Winnings','Away Winnings','Away Odds','Home Odds']).astype(float).values) - -predicted_probas = xgb_ou.predict(test_data_matrix) -predictions = np.argmax(predicted_probas, axis=1) -test_data['predicted_proba'] = [i[1] for i in predicted_probas] -test_data['prediction'] = (test_data['predicted_proba']>0.5).astype(int) -test_data['correct'] = test_data['Over']==test_data['prediction'] - -bets = test_data.loc[(test_data['predicted_proba']>0.6) | (test_data['predicted_proba']<0.4)] -bets['winnings'] = [0.91 if c else -1 for c in bets[['correct']].values] - -import matplotlib.pyplot as plt -fig = plt.figure(facecolor='black') -ax = fig.add_subplot(1, 1, 1, facecolor='black') - -# Plot data with line color as RGB(0, 128, 0) -ax.plot(bets['winnings'].cumsum().values*100, linewidth=3, color=(0/255, 128/255, 0/255)) - -# Set title and labels -ax.set_title('MARCI 3.0 - Over/Under', color='white') -ax.set_xlabel('Games Bet On', color='white') -ax.set_ylabel('Return (%)', color='white') - -# Change tick colors to white -ax.tick_params(axis='x', colors='white') -ax.tick_params(axis='y', colors='white') - -# Change axis edge colors -ax.spines['bottom'].set_color('white') -ax.spines['top'].set_color('white') -ax.spines['left'].set_color('white') -ax.spines['right'].set_color('white') - -plt.savefig(f'{model}_dark.png', facecolor='black') \ No newline at end of file diff --git a/spaces/CVPR/MonoScene/app.py b/spaces/CVPR/MonoScene/app.py deleted file mode 100644 index 01225a388b656d95f22d55a02c47d4d62772c3c3..0000000000000000000000000000000000000000 --- a/spaces/CVPR/MonoScene/app.py +++ /dev/null @@ -1,126 +0,0 @@ -import gradio as gr -import numpy as np -from torchvision import transforms -import torch -from helpers import * -import sys -import csv -from monoscene.monoscene import MonoScene - -csv.field_size_limit(sys.maxsize) -torch.set_grad_enabled(False) - -# pipeline = pipeline(model="anhquancao/monoscene_kitti") -# model = AutoModel.from_pretrained( -# "anhquancao/monoscene_kitti", trust_remote_code=True, revision='bf033f87c2a86b60903ab811b790a1532c1ae313' -# )#.cuda() -model = MonoScene.load_from_checkpoint( - "monoscene_kitti.ckpt", - dataset="kitti", - n_classes=20, - feature = 64, - project_scale = 2, - full_scene_size = (256, 256, 32), - ) - -img_W, img_H = 1220, 370 - - -def predict(img): - img = np.array(img, dtype=np.float32, copy=False) / 255.0 - - normalize_rgb = transforms.Compose( - [ - transforms.ToTensor(), - transforms.Normalize( - mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] - ), - ] - ) - img = normalize_rgb(img) - - batch = get_projections(img_W, img_H) - batch["img"] = img - for k in batch: - batch[k] = batch[k].unsqueeze(0)#.cuda() - - pred = model(batch).squeeze() - # print(pred.shape) - pred = majority_pooling(pred, k_size=2) - fig = draw(pred, batch['fov_mask_2']) - - - return fig - - -description = """ -MonoScene Demo on SemanticKITTI Validation Set (Sequence 08), which uses the camera parameters of Sequence 08. -Due to the CPU-only inference, it might take up to 20s to predict a scene. \n -The output is downsampled by 2 for faster rendering. Darker colors represent the scenery outside the Field of View, i.e. not visible on the image. -
- - Project page - - - -
-""" -title = "MonoScene: Monocular 3D Semantic Scene Completion" -article=""" -
-We also released a smaller MonoScene model (Half resolution - w/o 3D CRP) at: https://huggingface.co/spaces/CVPR/monoscene_lite - visitor badge -
-""" - -examples = [ - 'images/08/001385.jpg', - 'images/08/000295.jpg', - 'images/08/002505.jpg', - 'images/08/000085.jpg', - 'images/08/000290.jpg', - 'images/08/000465.jpg', - 'images/08/000790.jpg', - 'images/08/001005.jpg', - 'images/08/001380.jpg', - 'images/08/001530.jpg', - 'images/08/002360.jpg', - 'images/08/004059.jpg', - 'images/08/003149.jpg', - 'images/08/001446.jpg', - 'images/08/000010.jpg', - 'images/08/001122.jpg', - 'images/08/003533.jpg', - 'images/08/003365.jpg', - 'images/08/002944.jpg', - 'images/08/000822.jpg', - 'images/08/000103.jpg', - 'images/08/002716.jpg', - 'images/08/000187.jpg', - 'images/08/002128.jpg', - 'images/08/000511.jpg', - 'images/08/000618.jpg', - 'images/08/002010.jpg', - 'images/08/000234.jpg', - 'images/08/001842.jpg', - 'images/08/001687.jpg', - 'images/08/003929.jpg', - 'images/08/002272.jpg', -] - - - -demo = gr.Interface( - predict, - gr.Image(shape=(1220, 370)), - gr.Plot(), - article=article, - title=title, - enable_queue=True, - cache_examples=False, - live=False, - examples=examples, - description=description) - - -demo.launch(enable_queue=True, debug=False) \ No newline at end of file diff --git a/spaces/CVPR/Text2Human/Text2Human/models/hierarchy_inference_model.py b/spaces/CVPR/Text2Human/Text2Human/models/hierarchy_inference_model.py deleted file mode 100644 index 3116307caa051cec1a2d0e3793f459f92b44fd80..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Text2Human/Text2Human/models/hierarchy_inference_model.py +++ /dev/null @@ -1,363 +0,0 @@ -import logging -import math -from collections import OrderedDict - -import torch -import torch.nn.functional as F -from torchvision.utils import save_image - -from models.archs.fcn_arch import MultiHeadFCNHead -from models.archs.unet_arch import UNet -from models.archs.vqgan_arch import (Decoder, DecoderRes, Encoder, - VectorQuantizerSpatialTextureAware, - VectorQuantizerTexture) -from models.losses.accuracy import accuracy -from models.losses.cross_entropy_loss import CrossEntropyLoss - -logger = logging.getLogger('base') - - -class VQGANTextureAwareSpatialHierarchyInferenceModel(): - - def __init__(self, opt): - self.opt = opt - self.device = torch.device('cuda') - self.is_train = opt['is_train'] - - self.top_encoder = Encoder( - ch=opt['top_ch'], - num_res_blocks=opt['top_num_res_blocks'], - attn_resolutions=opt['top_attn_resolutions'], - ch_mult=opt['top_ch_mult'], - in_channels=opt['top_in_channels'], - resolution=opt['top_resolution'], - z_channels=opt['top_z_channels'], - double_z=opt['top_double_z'], - dropout=opt['top_dropout']).to(self.device) - self.decoder = Decoder( - in_channels=opt['top_in_channels'], - resolution=opt['top_resolution'], - z_channels=opt['top_z_channels'], - ch=opt['top_ch'], - out_ch=opt['top_out_ch'], - num_res_blocks=opt['top_num_res_blocks'], - attn_resolutions=opt['top_attn_resolutions'], - ch_mult=opt['top_ch_mult'], - dropout=opt['top_dropout'], - resamp_with_conv=True, - give_pre_end=False).to(self.device) - self.top_quantize = VectorQuantizerTexture( - 1024, opt['embed_dim'], beta=0.25).to(self.device) - self.top_quant_conv = torch.nn.Conv2d(opt["top_z_channels"], - opt['embed_dim'], - 1).to(self.device) - self.top_post_quant_conv = torch.nn.Conv2d(opt['embed_dim'], - opt["top_z_channels"], - 1).to(self.device) - self.load_top_pretrain_models() - - self.bot_encoder = Encoder( - ch=opt['bot_ch'], - num_res_blocks=opt['bot_num_res_blocks'], - attn_resolutions=opt['bot_attn_resolutions'], - ch_mult=opt['bot_ch_mult'], - in_channels=opt['bot_in_channels'], - resolution=opt['bot_resolution'], - z_channels=opt['bot_z_channels'], - double_z=opt['bot_double_z'], - dropout=opt['bot_dropout']).to(self.device) - self.bot_decoder_res = DecoderRes( - in_channels=opt['bot_in_channels'], - resolution=opt['bot_resolution'], - z_channels=opt['bot_z_channels'], - ch=opt['bot_ch'], - num_res_blocks=opt['bot_num_res_blocks'], - ch_mult=opt['bot_ch_mult'], - dropout=opt['bot_dropout'], - give_pre_end=False).to(self.device) - self.bot_quantize = VectorQuantizerSpatialTextureAware( - opt['bot_n_embed'], - opt['embed_dim'], - beta=0.25, - spatial_size=opt['codebook_spatial_size']).to(self.device) - self.bot_quant_conv = torch.nn.Conv2d(opt["bot_z_channels"], - opt['embed_dim'], - 1).to(self.device) - self.bot_post_quant_conv = torch.nn.Conv2d(opt['embed_dim'], - opt["bot_z_channels"], - 1).to(self.device) - - self.load_bot_pretrain_network() - - self.guidance_encoder = UNet( - in_channels=opt['encoder_in_channels']).to(self.device) - self.index_decoder = MultiHeadFCNHead( - in_channels=opt['fc_in_channels'], - in_index=opt['fc_in_index'], - channels=opt['fc_channels'], - num_convs=opt['fc_num_convs'], - concat_input=opt['fc_concat_input'], - dropout_ratio=opt['fc_dropout_ratio'], - num_classes=opt['fc_num_classes'], - align_corners=opt['fc_align_corners'], - num_head=18).to(self.device) - - self.init_training_settings() - - def init_training_settings(self): - optim_params = [] - for v in self.guidance_encoder.parameters(): - if v.requires_grad: - optim_params.append(v) - for v in self.index_decoder.parameters(): - if v.requires_grad: - optim_params.append(v) - # set up optimizers - if self.opt['optimizer'] == 'Adam': - self.optimizer = torch.optim.Adam( - optim_params, - self.opt['lr'], - weight_decay=self.opt['weight_decay']) - elif self.opt['optimizer'] == 'SGD': - self.optimizer = torch.optim.SGD( - optim_params, - self.opt['lr'], - momentum=self.opt['momentum'], - weight_decay=self.opt['weight_decay']) - self.log_dict = OrderedDict() - if self.opt['loss_function'] == 'cross_entropy': - self.loss_func = CrossEntropyLoss().to(self.device) - - def load_top_pretrain_models(self): - # load pretrained vqgan for segmentation mask - top_vae_checkpoint = torch.load(self.opt['top_vae_path']) - self.top_encoder.load_state_dict( - top_vae_checkpoint['encoder'], strict=True) - self.decoder.load_state_dict( - top_vae_checkpoint['decoder'], strict=True) - self.top_quantize.load_state_dict( - top_vae_checkpoint['quantize'], strict=True) - self.top_quant_conv.load_state_dict( - top_vae_checkpoint['quant_conv'], strict=True) - self.top_post_quant_conv.load_state_dict( - top_vae_checkpoint['post_quant_conv'], strict=True) - self.top_encoder.eval() - self.top_quantize.eval() - self.top_quant_conv.eval() - self.top_post_quant_conv.eval() - - def load_bot_pretrain_network(self): - checkpoint = torch.load(self.opt['bot_vae_path']) - self.bot_encoder.load_state_dict( - checkpoint['bot_encoder'], strict=True) - self.bot_decoder_res.load_state_dict( - checkpoint['bot_decoder_res'], strict=True) - self.decoder.load_state_dict(checkpoint['decoder'], strict=True) - self.bot_quantize.load_state_dict( - checkpoint['bot_quantize'], strict=True) - self.bot_quant_conv.load_state_dict( - checkpoint['bot_quant_conv'], strict=True) - self.bot_post_quant_conv.load_state_dict( - checkpoint['bot_post_quant_conv'], strict=True) - - self.bot_encoder.eval() - self.bot_decoder_res.eval() - self.decoder.eval() - self.bot_quantize.eval() - self.bot_quant_conv.eval() - self.bot_post_quant_conv.eval() - - def top_encode(self, x, mask): - h = self.top_encoder(x) - h = self.top_quant_conv(h) - quant, _, _ = self.top_quantize(h, mask) - quant = self.top_post_quant_conv(quant) - - return quant, quant - - def feed_data(self, data): - self.image = data['image'].to(self.device) - self.texture_mask = data['texture_mask'].float().to(self.device) - self.get_gt_indices() - - self.texture_tokens = F.interpolate( - self.texture_mask, size=(32, 16), - mode='nearest').view(self.image.size(0), -1).long() - - def bot_encode(self, x, mask): - h = self.bot_encoder(x) - h = self.bot_quant_conv(h) - _, _, (_, _, indices_list) = self.bot_quantize(h, mask) - - return indices_list - - def get_gt_indices(self): - self.quant_t, self.feature_t = self.top_encode(self.image, - self.texture_mask) - self.gt_indices_list = self.bot_encode(self.image, self.texture_mask) - - def index_to_image(self, index_bottom_list, texture_mask): - quant_b = self.bot_quantize.get_codebook_entry( - index_bottom_list, texture_mask, - (index_bottom_list[0].size(0), index_bottom_list[0].size(1), - index_bottom_list[0].size(2), - self.opt["bot_z_channels"])) #.permute(0, 3, 1, 2) - quant_b = self.bot_post_quant_conv(quant_b) - bot_dec_res = self.bot_decoder_res(quant_b) - - dec = self.decoder(self.quant_t, bot_h=bot_dec_res) - - return dec - - def get_vis(self, pred_img_index, rec_img_index, texture_mask, save_path): - rec_img = self.index_to_image(rec_img_index, texture_mask) - pred_img = self.index_to_image(pred_img_index, texture_mask) - - base_img = self.decoder(self.quant_t) - img_cat = torch.cat([ - self.image, - rec_img, - base_img, - pred_img, - ], dim=3).detach() - img_cat = ((img_cat + 1) / 2) - img_cat = img_cat.clamp_(0, 1) - save_image(img_cat, save_path, nrow=1, padding=4) - - def optimize_parameters(self): - self.guidance_encoder.train() - self.index_decoder.train() - - self.feature_enc = self.guidance_encoder(self.feature_t) - self.memory_logits_list = self.index_decoder(self.feature_enc) - - loss = 0 - for i in range(18): - loss += self.loss_func( - self.memory_logits_list[i], - self.gt_indices_list[i], - ignore_index=-1) - - self.optimizer.zero_grad() - loss.backward() - self.optimizer.step() - - self.log_dict['loss_total'] = loss - - def inference(self, data_loader, save_dir): - self.guidance_encoder.eval() - self.index_decoder.eval() - - acc = 0 - num = 0 - - for _, data in enumerate(data_loader): - self.feed_data(data) - img_name = data['img_name'] - - num += self.image.size(0) - - texture_mask_flatten = self.texture_tokens.view(-1) - min_encodings_indices_list = [ - torch.full( - texture_mask_flatten.size(), - fill_value=-1, - dtype=torch.long, - device=texture_mask_flatten.device) for _ in range(18) - ] - with torch.no_grad(): - self.feature_enc = self.guidance_encoder(self.feature_t) - memory_logits_list = self.index_decoder(self.feature_enc) - # memory_indices_pred = memory_logits.argmax(dim=1) - batch_acc = 0 - for codebook_idx, memory_logits in enumerate(memory_logits_list): - region_of_interest = texture_mask_flatten == codebook_idx - if torch.sum(region_of_interest) > 0: - memory_indices_pred = memory_logits.argmax(dim=1).view(-1) - batch_acc += torch.sum( - memory_indices_pred[region_of_interest] == - self.gt_indices_list[codebook_idx].view( - -1)[region_of_interest]) - memory_indices_pred = memory_indices_pred - min_encodings_indices_list[codebook_idx][ - region_of_interest] = memory_indices_pred[ - region_of_interest] - min_encodings_indices_return_list = [ - min_encodings_indices.view(self.gt_indices_list[0].size()) - for min_encodings_indices in min_encodings_indices_list - ] - batch_acc = batch_acc / self.gt_indices_list[codebook_idx].numel( - ) * self.image.size(0) - acc += batch_acc - self.get_vis(min_encodings_indices_return_list, - self.gt_indices_list, self.texture_mask, - f'{save_dir}/{img_name[0]}') - - self.guidance_encoder.train() - self.index_decoder.train() - return (acc / num).item() - - def load_network(self): - checkpoint = torch.load(self.opt['pretrained_models']) - self.guidance_encoder.load_state_dict( - checkpoint['guidance_encoder'], strict=True) - self.guidance_encoder.eval() - - self.index_decoder.load_state_dict( - checkpoint['index_decoder'], strict=True) - self.index_decoder.eval() - - def save_network(self, save_path): - """Save networks. - - Args: - net (nn.Module): Network to be saved. - net_label (str): Network label. - current_iter (int): Current iter number. - """ - - save_dict = {} - save_dict['guidance_encoder'] = self.guidance_encoder.state_dict() - save_dict['index_decoder'] = self.index_decoder.state_dict() - - torch.save(save_dict, save_path) - - def update_learning_rate(self, epoch): - """Update learning rate. - - Args: - current_iter (int): Current iteration. - warmup_iter (int): Warmup iter numbers. -1 for no warmup. - Default: -1. - """ - lr = self.optimizer.param_groups[0]['lr'] - - if self.opt['lr_decay'] == 'step': - lr = self.opt['lr'] * ( - self.opt['gamma']**(epoch // self.opt['step'])) - elif self.opt['lr_decay'] == 'cos': - lr = self.opt['lr'] * ( - 1 + math.cos(math.pi * epoch / self.opt['num_epochs'])) / 2 - elif self.opt['lr_decay'] == 'linear': - lr = self.opt['lr'] * (1 - epoch / self.opt['num_epochs']) - elif self.opt['lr_decay'] == 'linear2exp': - if epoch < self.opt['turning_point'] + 1: - # learning rate decay as 95% - # at the turning point (1 / 95% = 1.0526) - lr = self.opt['lr'] * ( - 1 - epoch / int(self.opt['turning_point'] * 1.0526)) - else: - lr *= self.opt['gamma'] - elif self.opt['lr_decay'] == 'schedule': - if epoch in self.opt['schedule']: - lr *= self.opt['gamma'] - else: - raise ValueError('Unknown lr mode {}'.format(self.opt['lr_decay'])) - # set learning rate - for param_group in self.optimizer.param_groups: - param_group['lr'] = lr - - return lr - - def get_current_log(self): - return self.log_dict diff --git a/spaces/CVPR/lama-example/bin/extract_masks.py b/spaces/CVPR/lama-example/bin/extract_masks.py deleted file mode 100644 index d114e0fe470595f1d2aaeeeb84b36352f65b121e..0000000000000000000000000000000000000000 --- a/spaces/CVPR/lama-example/bin/extract_masks.py +++ /dev/null @@ -1,63 +0,0 @@ -import PIL.Image as Image -import numpy as np -import os - - -def main(args): - if not args.indir.endswith('/'): - args.indir += '/' - os.makedirs(args.outdir, exist_ok=True) - - src_images = [ - args.indir+fname for fname in os.listdir(args.indir)] - - tgt_masks = [ - args.outdir+fname[:-4] + f'_mask000.png' - for fname in os.listdir(args.indir)] - - for img_name, msk_name in zip(src_images, tgt_masks): - #print(img) - #print(msk) - - image = Image.open(img_name).convert('RGB') - image = np.transpose(np.array(image), (2, 0, 1)) - - mask = (image == 255).astype(int) - - print(mask.dtype, mask.shape) - - - Image.fromarray( - np.clip(mask[0,:,:] * 255, 0, 255).astype('uint8'),mode='L' - ).save(msk_name) - - - - - ''' - for infile in src_images: - try: - file_relpath = infile[len(indir):] - img_outpath = os.path.join(outdir, file_relpath) - os.makedirs(os.path.dirname(img_outpath), exist_ok=True) - - image = Image.open(infile).convert('RGB') - - mask = - - Image.fromarray( - np.clip( - cur_mask * 255, 0, 255).astype('uint8'), - mode='L' - ).save(cur_basename + f'_mask{i:03d}.png') - ''' - - - -if __name__ == '__main__': - import argparse - aparser = argparse.ArgumentParser() - aparser.add_argument('--indir', type=str, help='Path to folder with images') - aparser.add_argument('--outdir', type=str, help='Path to folder to store aligned images and masks to') - - main(aparser.parse_args()) diff --git a/spaces/CVPR/monoscene_lite/fusion.py b/spaces/CVPR/monoscene_lite/fusion.py deleted file mode 100644 index aecd5cba3b1e3dd1e0534cda347eca8956657926..0000000000000000000000000000000000000000 --- a/spaces/CVPR/monoscene_lite/fusion.py +++ /dev/null @@ -1,507 +0,0 @@ -""" -Most of the code is taken from https://github.com/andyzeng/tsdf-fusion-python/blob/master/fusion.py - -@inproceedings{zeng20163dmatch, - title={3DMatch: Learning Local Geometric Descriptors from RGB-D Reconstructions}, - author={Zeng, Andy and Song, Shuran and Nie{\ss}ner, Matthias and Fisher, Matthew and Xiao, Jianxiong and Funkhouser, Thomas}, - booktitle={CVPR}, - year={2017} -} -""" - -import numpy as np - -from numba import njit, prange -from skimage import measure - -FUSION_GPU_MODE = 0 - - -class TSDFVolume: - """Volumetric TSDF Fusion of RGB-D Images.""" - - def __init__(self, vol_bnds, voxel_size, use_gpu=True): - """Constructor. - - Args: - vol_bnds (ndarray): An ndarray of shape (3, 2). Specifies the - xyz bounds (min/max) in meters. - voxel_size (float): The volume discretization in meters. - """ - vol_bnds = np.asarray(vol_bnds) - assert vol_bnds.shape == (3, 2), "[!] `vol_bnds` should be of shape (3, 2)." - - # Define voxel volume parameters - self._vol_bnds = vol_bnds - self._voxel_size = float(voxel_size) - self._trunc_margin = 5 * self._voxel_size # truncation on SDF - # self._trunc_margin = 10 # truncation on SDF - self._color_const = 256 * 256 - - # Adjust volume bounds and ensure C-order contiguous - self._vol_dim = ( - np.ceil((self._vol_bnds[:, 1] - self._vol_bnds[:, 0]) / self._voxel_size) - .copy(order="C") - .astype(int) - ) - self._vol_bnds[:, 1] = self._vol_bnds[:, 0] + self._vol_dim * self._voxel_size - self._vol_origin = self._vol_bnds[:, 0].copy(order="C").astype(np.float32) - - print( - "Voxel volume size: {} x {} x {} - # points: {:,}".format( - self._vol_dim[0], - self._vol_dim[1], - self._vol_dim[2], - self._vol_dim[0] * self._vol_dim[1] * self._vol_dim[2], - ) - ) - - # Initialize pointers to voxel volume in CPU memory - self._tsdf_vol_cpu = np.zeros(self._vol_dim).astype(np.float32) - # for computing the cumulative moving average of observations per voxel - self._weight_vol_cpu = np.zeros(self._vol_dim).astype(np.float32) - self._color_vol_cpu = np.zeros(self._vol_dim).astype(np.float32) - - self.gpu_mode = use_gpu and FUSION_GPU_MODE - - # Copy voxel volumes to GPU - if self.gpu_mode: - self._tsdf_vol_gpu = cuda.mem_alloc(self._tsdf_vol_cpu.nbytes) - cuda.memcpy_htod(self._tsdf_vol_gpu, self._tsdf_vol_cpu) - self._weight_vol_gpu = cuda.mem_alloc(self._weight_vol_cpu.nbytes) - cuda.memcpy_htod(self._weight_vol_gpu, self._weight_vol_cpu) - self._color_vol_gpu = cuda.mem_alloc(self._color_vol_cpu.nbytes) - cuda.memcpy_htod(self._color_vol_gpu, self._color_vol_cpu) - - # Cuda kernel function (C++) - self._cuda_src_mod = SourceModule( - """ - __global__ void integrate(float * tsdf_vol, - float * weight_vol, - float * color_vol, - float * vol_dim, - float * vol_origin, - float * cam_intr, - float * cam_pose, - float * other_params, - float * color_im, - float * depth_im) { - // Get voxel index - int gpu_loop_idx = (int) other_params[0]; - int max_threads_per_block = blockDim.x; - int block_idx = blockIdx.z*gridDim.y*gridDim.x+blockIdx.y*gridDim.x+blockIdx.x; - int voxel_idx = gpu_loop_idx*gridDim.x*gridDim.y*gridDim.z*max_threads_per_block+block_idx*max_threads_per_block+threadIdx.x; - int vol_dim_x = (int) vol_dim[0]; - int vol_dim_y = (int) vol_dim[1]; - int vol_dim_z = (int) vol_dim[2]; - if (voxel_idx > vol_dim_x*vol_dim_y*vol_dim_z) - return; - // Get voxel grid coordinates (note: be careful when casting) - float voxel_x = floorf(((float)voxel_idx)/((float)(vol_dim_y*vol_dim_z))); - float voxel_y = floorf(((float)(voxel_idx-((int)voxel_x)*vol_dim_y*vol_dim_z))/((float)vol_dim_z)); - float voxel_z = (float)(voxel_idx-((int)voxel_x)*vol_dim_y*vol_dim_z-((int)voxel_y)*vol_dim_z); - // Voxel grid coordinates to world coordinates - float voxel_size = other_params[1]; - float pt_x = vol_origin[0]+voxel_x*voxel_size; - float pt_y = vol_origin[1]+voxel_y*voxel_size; - float pt_z = vol_origin[2]+voxel_z*voxel_size; - // World coordinates to camera coordinates - float tmp_pt_x = pt_x-cam_pose[0*4+3]; - float tmp_pt_y = pt_y-cam_pose[1*4+3]; - float tmp_pt_z = pt_z-cam_pose[2*4+3]; - float cam_pt_x = cam_pose[0*4+0]*tmp_pt_x+cam_pose[1*4+0]*tmp_pt_y+cam_pose[2*4+0]*tmp_pt_z; - float cam_pt_y = cam_pose[0*4+1]*tmp_pt_x+cam_pose[1*4+1]*tmp_pt_y+cam_pose[2*4+1]*tmp_pt_z; - float cam_pt_z = cam_pose[0*4+2]*tmp_pt_x+cam_pose[1*4+2]*tmp_pt_y+cam_pose[2*4+2]*tmp_pt_z; - // Camera coordinates to image pixels - int pixel_x = (int) roundf(cam_intr[0*3+0]*(cam_pt_x/cam_pt_z)+cam_intr[0*3+2]); - int pixel_y = (int) roundf(cam_intr[1*3+1]*(cam_pt_y/cam_pt_z)+cam_intr[1*3+2]); - // Skip if outside view frustum - int im_h = (int) other_params[2]; - int im_w = (int) other_params[3]; - if (pixel_x < 0 || pixel_x >= im_w || pixel_y < 0 || pixel_y >= im_h || cam_pt_z<0) - return; - // Skip invalid depth - float depth_value = depth_im[pixel_y*im_w+pixel_x]; - if (depth_value == 0) - return; - // Integrate TSDF - float trunc_margin = other_params[4]; - float depth_diff = depth_value-cam_pt_z; - if (depth_diff < -trunc_margin) - return; - float dist = fmin(1.0f,depth_diff/trunc_margin); - float w_old = weight_vol[voxel_idx]; - float obs_weight = other_params[5]; - float w_new = w_old + obs_weight; - weight_vol[voxel_idx] = w_new; - tsdf_vol[voxel_idx] = (tsdf_vol[voxel_idx]*w_old+obs_weight*dist)/w_new; - // Integrate color - float old_color = color_vol[voxel_idx]; - float old_b = floorf(old_color/(256*256)); - float old_g = floorf((old_color-old_b*256*256)/256); - float old_r = old_color-old_b*256*256-old_g*256; - float new_color = color_im[pixel_y*im_w+pixel_x]; - float new_b = floorf(new_color/(256*256)); - float new_g = floorf((new_color-new_b*256*256)/256); - float new_r = new_color-new_b*256*256-new_g*256; - new_b = fmin(roundf((old_b*w_old+obs_weight*new_b)/w_new),255.0f); - new_g = fmin(roundf((old_g*w_old+obs_weight*new_g)/w_new),255.0f); - new_r = fmin(roundf((old_r*w_old+obs_weight*new_r)/w_new),255.0f); - color_vol[voxel_idx] = new_b*256*256+new_g*256+new_r; - }""" - ) - - self._cuda_integrate = self._cuda_src_mod.get_function("integrate") - - # Determine block/grid size on GPU - gpu_dev = cuda.Device(0) - self._max_gpu_threads_per_block = gpu_dev.MAX_THREADS_PER_BLOCK - n_blocks = int( - np.ceil( - float(np.prod(self._vol_dim)) - / float(self._max_gpu_threads_per_block) - ) - ) - grid_dim_x = min(gpu_dev.MAX_GRID_DIM_X, int(np.floor(np.cbrt(n_blocks)))) - grid_dim_y = min( - gpu_dev.MAX_GRID_DIM_Y, int(np.floor(np.sqrt(n_blocks / grid_dim_x))) - ) - grid_dim_z = min( - gpu_dev.MAX_GRID_DIM_Z, - int(np.ceil(float(n_blocks) / float(grid_dim_x * grid_dim_y))), - ) - self._max_gpu_grid_dim = np.array( - [grid_dim_x, grid_dim_y, grid_dim_z] - ).astype(int) - self._n_gpu_loops = int( - np.ceil( - float(np.prod(self._vol_dim)) - / float( - np.prod(self._max_gpu_grid_dim) - * self._max_gpu_threads_per_block - ) - ) - ) - - else: - # Get voxel grid coordinates - xv, yv, zv = np.meshgrid( - range(self._vol_dim[0]), - range(self._vol_dim[1]), - range(self._vol_dim[2]), - indexing="ij", - ) - self.vox_coords = ( - np.concatenate( - [xv.reshape(1, -1), yv.reshape(1, -1), zv.reshape(1, -1)], axis=0 - ) - .astype(int) - .T - ) - - @staticmethod - @njit(parallel=True) - def vox2world(vol_origin, vox_coords, vox_size, offsets=(0.5, 0.5, 0.5)): - """Convert voxel grid coordinates to world coordinates.""" - vol_origin = vol_origin.astype(np.float32) - vox_coords = vox_coords.astype(np.float32) - # print(np.min(vox_coords)) - cam_pts = np.empty_like(vox_coords, dtype=np.float32) - - for i in prange(vox_coords.shape[0]): - for j in range(3): - cam_pts[i, j] = ( - vol_origin[j] - + (vox_size * vox_coords[i, j]) - + vox_size * offsets[j] - ) - return cam_pts - - @staticmethod - @njit(parallel=True) - def cam2pix(cam_pts, intr): - """Convert camera coordinates to pixel coordinates.""" - intr = intr.astype(np.float32) - fx, fy = intr[0, 0], intr[1, 1] - cx, cy = intr[0, 2], intr[1, 2] - pix = np.empty((cam_pts.shape[0], 2), dtype=np.int64) - for i in prange(cam_pts.shape[0]): - pix[i, 0] = int(np.round((cam_pts[i, 0] * fx / cam_pts[i, 2]) + cx)) - pix[i, 1] = int(np.round((cam_pts[i, 1] * fy / cam_pts[i, 2]) + cy)) - return pix - - @staticmethod - @njit(parallel=True) - def integrate_tsdf(tsdf_vol, dist, w_old, obs_weight): - """Integrate the TSDF volume.""" - tsdf_vol_int = np.empty_like(tsdf_vol, dtype=np.float32) - # print(tsdf_vol.shape) - w_new = np.empty_like(w_old, dtype=np.float32) - for i in prange(len(tsdf_vol)): - w_new[i] = w_old[i] + obs_weight - tsdf_vol_int[i] = (w_old[i] * tsdf_vol[i] + obs_weight * dist[i]) / w_new[i] - return tsdf_vol_int, w_new - - def integrate(self, color_im, depth_im, cam_intr, cam_pose, obs_weight=1.0): - """Integrate an RGB-D frame into the TSDF volume. - - Args: - color_im (ndarray): An RGB image of shape (H, W, 3). - depth_im (ndarray): A depth image of shape (H, W). - cam_intr (ndarray): The camera intrinsics matrix of shape (3, 3). - cam_pose (ndarray): The camera pose (i.e. extrinsics) of shape (4, 4). - obs_weight (float): The weight to assign for the current observation. A higher - value - """ - im_h, im_w = depth_im.shape - - # Fold RGB color image into a single channel image - color_im = color_im.astype(np.float32) - color_im = np.floor( - color_im[..., 2] * self._color_const - + color_im[..., 1] * 256 - + color_im[..., 0] - ) - - if self.gpu_mode: # GPU mode: integrate voxel volume (calls CUDA kernel) - for gpu_loop_idx in range(self._n_gpu_loops): - self._cuda_integrate( - self._tsdf_vol_gpu, - self._weight_vol_gpu, - self._color_vol_gpu, - cuda.InOut(self._vol_dim.astype(np.float32)), - cuda.InOut(self._vol_origin.astype(np.float32)), - cuda.InOut(cam_intr.reshape(-1).astype(np.float32)), - cuda.InOut(cam_pose.reshape(-1).astype(np.float32)), - cuda.InOut( - np.asarray( - [ - gpu_loop_idx, - self._voxel_size, - im_h, - im_w, - self._trunc_margin, - obs_weight, - ], - np.float32, - ) - ), - cuda.InOut(color_im.reshape(-1).astype(np.float32)), - cuda.InOut(depth_im.reshape(-1).astype(np.float32)), - block=(self._max_gpu_threads_per_block, 1, 1), - grid=( - int(self._max_gpu_grid_dim[0]), - int(self._max_gpu_grid_dim[1]), - int(self._max_gpu_grid_dim[2]), - ), - ) - else: # CPU mode: integrate voxel volume (vectorized implementation) - # Convert voxel grid coordinates to pixel coordinates - cam_pts = self.vox2world( - self._vol_origin, self.vox_coords, self._voxel_size - ) - cam_pts = rigid_transform(cam_pts, np.linalg.inv(cam_pose)) - pix_z = cam_pts[:, 2] - pix = self.cam2pix(cam_pts, cam_intr) - pix_x, pix_y = pix[:, 0], pix[:, 1] - - # Eliminate pixels outside view frustum - valid_pix = np.logical_and( - pix_x >= 0, - np.logical_and( - pix_x < im_w, - np.logical_and(pix_y >= 0, np.logical_and(pix_y < im_h, pix_z > 0)), - ), - ) - depth_val = np.zeros(pix_x.shape) - depth_val[valid_pix] = depth_im[pix_y[valid_pix], pix_x[valid_pix]] - - # Integrate TSDF - depth_diff = depth_val - pix_z - - valid_pts = np.logical_and(depth_val > 0, depth_diff >= -10) - dist = depth_diff - - valid_vox_x = self.vox_coords[valid_pts, 0] - valid_vox_y = self.vox_coords[valid_pts, 1] - valid_vox_z = self.vox_coords[valid_pts, 2] - w_old = self._weight_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] - tsdf_vals = self._tsdf_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] - valid_dist = dist[valid_pts] - tsdf_vol_new, w_new = self.integrate_tsdf( - tsdf_vals, valid_dist, w_old, obs_weight - ) - self._weight_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = w_new - self._tsdf_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = tsdf_vol_new - - # Integrate color - old_color = self._color_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] - old_b = np.floor(old_color / self._color_const) - old_g = np.floor((old_color - old_b * self._color_const) / 256) - old_r = old_color - old_b * self._color_const - old_g * 256 - new_color = color_im[pix_y[valid_pts], pix_x[valid_pts]] - new_b = np.floor(new_color / self._color_const) - new_g = np.floor((new_color - new_b * self._color_const) / 256) - new_r = new_color - new_b * self._color_const - new_g * 256 - new_b = np.minimum( - 255.0, np.round((w_old * old_b + obs_weight * new_b) / w_new) - ) - new_g = np.minimum( - 255.0, np.round((w_old * old_g + obs_weight * new_g) / w_new) - ) - new_r = np.minimum( - 255.0, np.round((w_old * old_r + obs_weight * new_r) / w_new) - ) - self._color_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = ( - new_b * self._color_const + new_g * 256 + new_r - ) - - def get_volume(self): - if self.gpu_mode: - cuda.memcpy_dtoh(self._tsdf_vol_cpu, self._tsdf_vol_gpu) - cuda.memcpy_dtoh(self._color_vol_cpu, self._color_vol_gpu) - return self._tsdf_vol_cpu, self._color_vol_cpu - - def get_point_cloud(self): - """Extract a point cloud from the voxel volume.""" - tsdf_vol, color_vol = self.get_volume() - - # Marching cubes - verts = measure.marching_cubes_lewiner(tsdf_vol, level=0)[0] - verts_ind = np.round(verts).astype(int) - verts = verts * self._voxel_size + self._vol_origin - - # Get vertex colors - rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]] - colors_b = np.floor(rgb_vals / self._color_const) - colors_g = np.floor((rgb_vals - colors_b * self._color_const) / 256) - colors_r = rgb_vals - colors_b * self._color_const - colors_g * 256 - colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T - colors = colors.astype(np.uint8) - - pc = np.hstack([verts, colors]) - return pc - - def get_mesh(self): - """Compute a mesh from the voxel volume using marching cubes.""" - tsdf_vol, color_vol = self.get_volume() - - # Marching cubes - verts, faces, norms, vals = measure.marching_cubes_lewiner(tsdf_vol, level=0) - verts_ind = np.round(verts).astype(int) - verts = ( - verts * self._voxel_size + self._vol_origin - ) # voxel grid coordinates to world coordinates - - # Get vertex colors - rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]] - colors_b = np.floor(rgb_vals / self._color_const) - colors_g = np.floor((rgb_vals - colors_b * self._color_const) / 256) - colors_r = rgb_vals - colors_b * self._color_const - colors_g * 256 - colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T - colors = colors.astype(np.uint8) - return verts, faces, norms, colors - - -def rigid_transform(xyz, transform): - """Applies a rigid transform to an (N, 3) pointcloud.""" - xyz_h = np.hstack([xyz, np.ones((len(xyz), 1), dtype=np.float32)]) - xyz_t_h = np.dot(transform, xyz_h.T).T - return xyz_t_h[:, :3] - - -def get_view_frustum(depth_im, cam_intr, cam_pose): - """Get corners of 3D camera view frustum of depth image""" - im_h = depth_im.shape[0] - im_w = depth_im.shape[1] - max_depth = np.max(depth_im) - view_frust_pts = np.array( - [ - (np.array([0, 0, 0, im_w, im_w]) - cam_intr[0, 2]) - * np.array([0, max_depth, max_depth, max_depth, max_depth]) - / cam_intr[0, 0], - (np.array([0, 0, im_h, 0, im_h]) - cam_intr[1, 2]) - * np.array([0, max_depth, max_depth, max_depth, max_depth]) - / cam_intr[1, 1], - np.array([0, max_depth, max_depth, max_depth, max_depth]), - ] - ) - view_frust_pts = rigid_transform(view_frust_pts.T, cam_pose).T - return view_frust_pts - - -def meshwrite(filename, verts, faces, norms, colors): - """Save a 3D mesh to a polygon .ply file.""" - # Write header - ply_file = open(filename, "w") - ply_file.write("ply\n") - ply_file.write("format ascii 1.0\n") - ply_file.write("element vertex %d\n" % (verts.shape[0])) - ply_file.write("property float x\n") - ply_file.write("property float y\n") - ply_file.write("property float z\n") - ply_file.write("property float nx\n") - ply_file.write("property float ny\n") - ply_file.write("property float nz\n") - ply_file.write("property uchar red\n") - ply_file.write("property uchar green\n") - ply_file.write("property uchar blue\n") - ply_file.write("element face %d\n" % (faces.shape[0])) - ply_file.write("property list uchar int vertex_index\n") - ply_file.write("end_header\n") - - # Write vertex list - for i in range(verts.shape[0]): - ply_file.write( - "%f %f %f %f %f %f %d %d %d\n" - % ( - verts[i, 0], - verts[i, 1], - verts[i, 2], - norms[i, 0], - norms[i, 1], - norms[i, 2], - colors[i, 0], - colors[i, 1], - colors[i, 2], - ) - ) - - # Write face list - for i in range(faces.shape[0]): - ply_file.write("3 %d %d %d\n" % (faces[i, 0], faces[i, 1], faces[i, 2])) - - ply_file.close() - - -def pcwrite(filename, xyzrgb): - """Save a point cloud to a polygon .ply file.""" - xyz = xyzrgb[:, :3] - rgb = xyzrgb[:, 3:].astype(np.uint8) - - # Write header - ply_file = open(filename, "w") - ply_file.write("ply\n") - ply_file.write("format ascii 1.0\n") - ply_file.write("element vertex %d\n" % (xyz.shape[0])) - ply_file.write("property float x\n") - ply_file.write("property float y\n") - ply_file.write("property float z\n") - ply_file.write("property uchar red\n") - ply_file.write("property uchar green\n") - ply_file.write("property uchar blue\n") - ply_file.write("end_header\n") - - # Write vertex list - for i in range(xyz.shape[0]): - ply_file.write( - "%f %f %f %d %d %d\n" - % ( - xyz[i, 0], - xyz[i, 1], - xyz[i, 2], - rgb[i, 0], - rgb[i, 1], - rgb[i, 2], - ) - ) diff --git a/spaces/CassBunny/anything-v3.0/utils.py b/spaces/CassBunny/anything-v3.0/utils.py deleted file mode 100644 index ff1c065d186347ca51b47d010a697dbe1814695c..0000000000000000000000000000000000000000 --- a/spaces/CassBunny/anything-v3.0/utils.py +++ /dev/null @@ -1,6 +0,0 @@ -def is_google_colab(): - try: - import google.colab - return True - except: - return False \ No newline at end of file diff --git a/spaces/ChandraMohanNayal/AutoGPT/tests/context.py b/spaces/ChandraMohanNayal/AutoGPT/tests/context.py deleted file mode 100644 index cef969db69ab189109b935bba9ed06696cf5337a..0000000000000000000000000000000000000000 --- a/spaces/ChandraMohanNayal/AutoGPT/tests/context.py +++ /dev/null @@ -1,6 +0,0 @@ -import os -import sys - -sys.path.insert( - 0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../scripts")) -) diff --git a/spaces/Cletrason/cloudqi-cqi_text_to_image_pt_v0/app.py b/spaces/Cletrason/cloudqi-cqi_text_to_image_pt_v0/app.py deleted file mode 100644 index e917a619dd584c32f6866a2a3bc9d550760af235..0000000000000000000000000000000000000000 --- a/spaces/Cletrason/cloudqi-cqi_text_to_image_pt_v0/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/cloudqi/cqi_text_to_image_pt_v0").launch() \ No newline at end of file diff --git a/spaces/CoreyMorris/MMLU-by-task-Leaderboard/test_result_data_processing.py b/spaces/CoreyMorris/MMLU-by-task-Leaderboard/test_result_data_processing.py deleted file mode 100644 index fdd50925ceb8b15c9b0b814629b3f419dc47c428..0000000000000000000000000000000000000000 --- a/spaces/CoreyMorris/MMLU-by-task-Leaderboard/test_result_data_processing.py +++ /dev/null @@ -1,52 +0,0 @@ -import unittest -from result_data_processor import ResultDataProcessor -import pandas as pd - -class TestResultDataProcessor(unittest.TestCase): - - def setUp(self): - self.processor = ResultDataProcessor() - - # check that the result is a pandas dataframe - def test_process_data(self): - data = self.processor.data - self.assertIsInstance(data, pd.DataFrame) - - # check that pandas dataframe has the right columns - def test_columns(self): - data = self.processor.data - self.assertIn('Parameters', data.columns) - self.assertIn('MMLU_average', data.columns) - # check number of columns - self.assertEqual(len(data.columns), 64) - - # check that the number of rows is correct - def test_rows(self): - data = self.processor.data - self.assertEqual(len(data), 998) - - # check that mc1 column exists - def test_mc1(self): - data = self.processor.data - self.assertIn('harness|truthfulqa:mc1', data.columns) - - # test that a column that contains truthfulqa:mc does not exist - def test_truthfulqa_mc(self): - data = self.processor.data - self.assertNotIn('truthfulqa:mc', data.columns) - - # check for extreme outliers in mc1 column - def test_mc1_outliers(self): - data = self.processor.data - mc1 = data['harness|truthfulqa:mc1'] - self.assertLess(mc1.max(), 1.0) - self.assertGreater(mc1.min(), 0.0) - - - # test that a column named organization exists - def test_organization(self): - data = self.processor.data - self.assertIn('organization', data.columns) - -if __name__ == '__main__': - unittest.main() \ No newline at end of file diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/subset/cff.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/subset/cff.py deleted file mode 100644 index dd79f6db37a482891b6f151159ef4c9b89475b8e..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/subset/cff.py +++ /dev/null @@ -1,536 +0,0 @@ -from fontTools.misc import psCharStrings -from fontTools import ttLib -from fontTools.pens.basePen import NullPen -from fontTools.misc.roundTools import otRound -from fontTools.misc.loggingTools import deprecateFunction -from fontTools.subset.util import _add_method, _uniq_sort - - -class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler): - def __init__(self, components, localSubrs, globalSubrs): - psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs) - self.components = components - - def op_endchar(self, index): - args = self.popall() - if len(args) >= 4: - from fontTools.encodings.StandardEncoding import StandardEncoding - - # endchar can do seac accent bulding; The T2 spec says it's deprecated, - # but recent software that shall remain nameless does output it. - adx, ady, bchar, achar = args[-4:] - baseGlyph = StandardEncoding[bchar] - accentGlyph = StandardEncoding[achar] - self.components.add(baseGlyph) - self.components.add(accentGlyph) - - -@_add_method(ttLib.getTableClass("CFF ")) -def closure_glyphs(self, s): - cff = self.cff - assert len(cff) == 1 - font = cff[cff.keys()[0]] - glyphSet = font.CharStrings - - decompose = s.glyphs - while decompose: - components = set() - for g in decompose: - if g not in glyphSet: - continue - gl = glyphSet[g] - - subrs = getattr(gl.private, "Subrs", []) - decompiler = _ClosureGlyphsT2Decompiler(components, subrs, gl.globalSubrs) - decompiler.execute(gl) - components -= s.glyphs - s.glyphs.update(components) - decompose = components - - -def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False): - c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName) - if isCFF2 or ignoreWidth: - # CFF2 charstrings have no widths nor 'endchar' operators - c.setProgram([] if isCFF2 else ["endchar"]) - else: - if hasattr(font, "FDArray") and font.FDArray is not None: - private = font.FDArray[fdSelectIndex].Private - else: - private = font.Private - dfltWdX = private.defaultWidthX - nmnlWdX = private.nominalWidthX - pen = NullPen() - c.draw(pen) # this will set the charstring's width - if c.width != dfltWdX: - c.program = [c.width - nmnlWdX, "endchar"] - else: - c.program = ["endchar"] - - -@_add_method(ttLib.getTableClass("CFF ")) -def prune_pre_subset(self, font, options): - cff = self.cff - # CFF table must have one font only - cff.fontNames = cff.fontNames[:1] - - if options.notdef_glyph and not options.notdef_outline: - isCFF2 = cff.major > 1 - for fontname in cff.keys(): - font = cff[fontname] - _empty_charstring(font, ".notdef", isCFF2=isCFF2) - - # Clear useless Encoding - for fontname in cff.keys(): - font = cff[fontname] - # https://github.com/fonttools/fonttools/issues/620 - font.Encoding = "StandardEncoding" - - return True # bool(cff.fontNames) - - -@_add_method(ttLib.getTableClass("CFF ")) -def subset_glyphs(self, s): - cff = self.cff - for fontname in cff.keys(): - font = cff[fontname] - cs = font.CharStrings - - glyphs = s.glyphs.union(s.glyphs_emptied) - - # Load all glyphs - for g in font.charset: - if g not in glyphs: - continue - c, _ = cs.getItemAndSelector(g) - - if cs.charStringsAreIndexed: - indices = [i for i, g in enumerate(font.charset) if g in glyphs] - csi = cs.charStringsIndex - csi.items = [csi.items[i] for i in indices] - del csi.file, csi.offsets - if hasattr(font, "FDSelect"): - sel = font.FDSelect - sel.format = None - sel.gidArray = [sel.gidArray[i] for i in indices] - newCharStrings = {} - for indicesIdx, charsetIdx in enumerate(indices): - g = font.charset[charsetIdx] - if g in cs.charStrings: - newCharStrings[g] = indicesIdx - cs.charStrings = newCharStrings - else: - cs.charStrings = {g: v for g, v in cs.charStrings.items() if g in glyphs} - font.charset = [g for g in font.charset if g in glyphs] - font.numGlyphs = len(font.charset) - - if s.options.retain_gids: - isCFF2 = cff.major > 1 - for g in s.glyphs_emptied: - _empty_charstring(font, g, isCFF2=isCFF2, ignoreWidth=True) - - return True # any(cff[fontname].numGlyphs for fontname in cff.keys()) - - -@_add_method(psCharStrings.T2CharString) -def subset_subroutines(self, subrs, gsubrs): - p = self.program - for i in range(1, len(p)): - if p[i] == "callsubr": - assert isinstance(p[i - 1], int) - p[i - 1] = subrs._used.index(p[i - 1] + subrs._old_bias) - subrs._new_bias - elif p[i] == "callgsubr": - assert isinstance(p[i - 1], int) - p[i - 1] = ( - gsubrs._used.index(p[i - 1] + gsubrs._old_bias) - gsubrs._new_bias - ) - - -@_add_method(psCharStrings.T2CharString) -def drop_hints(self): - hints = self._hints - - if hints.deletions: - p = self.program - for idx in reversed(hints.deletions): - del p[idx - 2 : idx] - - if hints.has_hint: - assert not hints.deletions or hints.last_hint <= hints.deletions[0] - self.program = self.program[hints.last_hint :] - if not self.program: - # TODO CFF2 no need for endchar. - self.program.append("endchar") - if hasattr(self, "width"): - # Insert width back if needed - if self.width != self.private.defaultWidthX: - # For CFF2 charstrings, this should never happen - assert ( - self.private.defaultWidthX is not None - ), "CFF2 CharStrings must not have an initial width value" - self.program.insert(0, self.width - self.private.nominalWidthX) - - if hints.has_hintmask: - i = 0 - p = self.program - while i < len(p): - if p[i] in ["hintmask", "cntrmask"]: - assert i + 1 <= len(p) - del p[i : i + 2] - continue - i += 1 - - assert len(self.program) - - del self._hints - - -class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler): - def __init__(self, localSubrs, globalSubrs, private): - psCharStrings.SimpleT2Decompiler.__init__( - self, localSubrs, globalSubrs, private - ) - for subrs in [localSubrs, globalSubrs]: - if subrs and not hasattr(subrs, "_used"): - subrs._used = set() - - def op_callsubr(self, index): - self.localSubrs._used.add(self.operandStack[-1] + self.localBias) - psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) - - def op_callgsubr(self, index): - self.globalSubrs._used.add(self.operandStack[-1] + self.globalBias) - psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) - - -class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor): - class Hints(object): - def __init__(self): - # Whether calling this charstring produces any hint stems - # Note that if a charstring starts with hintmask, it will - # have has_hint set to True, because it *might* produce an - # implicit vstem if called under certain conditions. - self.has_hint = False - # Index to start at to drop all hints - self.last_hint = 0 - # Index up to which we know more hints are possible. - # Only relevant if status is 0 or 1. - self.last_checked = 0 - # The status means: - # 0: after dropping hints, this charstring is empty - # 1: after dropping hints, there may be more hints - # continuing after this, or there might be - # other things. Not clear yet. - # 2: no more hints possible after this charstring - self.status = 0 - # Has hintmask instructions; not recursive - self.has_hintmask = False - # List of indices of calls to empty subroutines to remove. - self.deletions = [] - - pass - - def __init__( - self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None - ): - self._css = css - psCharStrings.T2WidthExtractor.__init__( - self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX - ) - self.private = private - - def execute(self, charString): - old_hints = charString._hints if hasattr(charString, "_hints") else None - charString._hints = self.Hints() - - psCharStrings.T2WidthExtractor.execute(self, charString) - - hints = charString._hints - - if hints.has_hint or hints.has_hintmask: - self._css.add(charString) - - if hints.status != 2: - # Check from last_check, make sure we didn't have any operators. - for i in range(hints.last_checked, len(charString.program) - 1): - if isinstance(charString.program[i], str): - hints.status = 2 - break - else: - hints.status = 1 # There's *something* here - hints.last_checked = len(charString.program) - - if old_hints: - assert hints.__dict__ == old_hints.__dict__ - - def op_callsubr(self, index): - subr = self.localSubrs[self.operandStack[-1] + self.localBias] - psCharStrings.T2WidthExtractor.op_callsubr(self, index) - self.processSubr(index, subr) - - def op_callgsubr(self, index): - subr = self.globalSubrs[self.operandStack[-1] + self.globalBias] - psCharStrings.T2WidthExtractor.op_callgsubr(self, index) - self.processSubr(index, subr) - - def op_hstem(self, index): - psCharStrings.T2WidthExtractor.op_hstem(self, index) - self.processHint(index) - - def op_vstem(self, index): - psCharStrings.T2WidthExtractor.op_vstem(self, index) - self.processHint(index) - - def op_hstemhm(self, index): - psCharStrings.T2WidthExtractor.op_hstemhm(self, index) - self.processHint(index) - - def op_vstemhm(self, index): - psCharStrings.T2WidthExtractor.op_vstemhm(self, index) - self.processHint(index) - - def op_hintmask(self, index): - rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index) - self.processHintmask(index) - return rv - - def op_cntrmask(self, index): - rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index) - self.processHintmask(index) - return rv - - def processHintmask(self, index): - cs = self.callingStack[-1] - hints = cs._hints - hints.has_hintmask = True - if hints.status != 2: - # Check from last_check, see if we may be an implicit vstem - for i in range(hints.last_checked, index - 1): - if isinstance(cs.program[i], str): - hints.status = 2 - break - else: - # We are an implicit vstem - hints.has_hint = True - hints.last_hint = index + 1 - hints.status = 0 - hints.last_checked = index + 1 - - def processHint(self, index): - cs = self.callingStack[-1] - hints = cs._hints - hints.has_hint = True - hints.last_hint = index - hints.last_checked = index - - def processSubr(self, index, subr): - cs = self.callingStack[-1] - hints = cs._hints - subr_hints = subr._hints - - # Check from last_check, make sure we didn't have - # any operators. - if hints.status != 2: - for i in range(hints.last_checked, index - 1): - if isinstance(cs.program[i], str): - hints.status = 2 - break - hints.last_checked = index - - if hints.status != 2: - if subr_hints.has_hint: - hints.has_hint = True - - # Decide where to chop off from - if subr_hints.status == 0: - hints.last_hint = index - else: - hints.last_hint = index - 2 # Leave the subr call in - - elif subr_hints.status == 0: - hints.deletions.append(index) - - hints.status = max(hints.status, subr_hints.status) - - -@_add_method(ttLib.getTableClass("CFF ")) -def prune_post_subset(self, ttfFont, options): - cff = self.cff - for fontname in cff.keys(): - font = cff[fontname] - cs = font.CharStrings - - # Drop unused FontDictionaries - if hasattr(font, "FDSelect"): - sel = font.FDSelect - indices = _uniq_sort(sel.gidArray) - sel.gidArray = [indices.index(ss) for ss in sel.gidArray] - arr = font.FDArray - arr.items = [arr[i] for i in indices] - del arr.file, arr.offsets - - # Desubroutinize if asked for - if options.desubroutinize: - cff.desubroutinize() - - # Drop hints if not needed - if not options.hinting: - self.remove_hints() - elif not options.desubroutinize: - self.remove_unused_subroutines() - return True - - -def _delete_empty_subrs(private_dict): - if hasattr(private_dict, "Subrs") and not private_dict.Subrs: - if "Subrs" in private_dict.rawDict: - del private_dict.rawDict["Subrs"] - del private_dict.Subrs - - -@deprecateFunction( - "use 'CFFFontSet.desubroutinize()' instead", category=DeprecationWarning -) -@_add_method(ttLib.getTableClass("CFF ")) -def desubroutinize(self): - self.cff.desubroutinize() - - -@_add_method(ttLib.getTableClass("CFF ")) -def remove_hints(self): - cff = self.cff - for fontname in cff.keys(): - font = cff[fontname] - cs = font.CharStrings - # This can be tricky, but doesn't have to. What we do is: - # - # - Run all used glyph charstrings and recurse into subroutines, - # - For each charstring (including subroutines), if it has any - # of the hint stem operators, we mark it as such. - # Upon returning, for each charstring we note all the - # subroutine calls it makes that (recursively) contain a stem, - # - Dropping hinting then consists of the following two ops: - # * Drop the piece of the program in each charstring before the - # last call to a stem op or a stem-calling subroutine, - # * Drop all hintmask operations. - # - It's trickier... A hintmask right after hints and a few numbers - # will act as an implicit vstemhm. As such, we track whether - # we have seen any non-hint operators so far and do the right - # thing, recursively... Good luck understanding that :( - css = set() - for g in font.charset: - c, _ = cs.getItemAndSelector(g) - c.decompile() - subrs = getattr(c.private, "Subrs", []) - decompiler = _DehintingT2Decompiler( - css, - subrs, - c.globalSubrs, - c.private.nominalWidthX, - c.private.defaultWidthX, - c.private, - ) - decompiler.execute(c) - c.width = decompiler.width - for charstring in css: - charstring.drop_hints() - del css - - # Drop font-wide hinting values - all_privs = [] - if hasattr(font, "FDArray"): - all_privs.extend(fd.Private for fd in font.FDArray) - else: - all_privs.append(font.Private) - for priv in all_privs: - for k in [ - "BlueValues", - "OtherBlues", - "FamilyBlues", - "FamilyOtherBlues", - "BlueScale", - "BlueShift", - "BlueFuzz", - "StemSnapH", - "StemSnapV", - "StdHW", - "StdVW", - "ForceBold", - "LanguageGroup", - "ExpansionFactor", - ]: - if hasattr(priv, k): - setattr(priv, k, None) - self.remove_unused_subroutines() - - -@_add_method(ttLib.getTableClass("CFF ")) -def remove_unused_subroutines(self): - cff = self.cff - for fontname in cff.keys(): - font = cff[fontname] - cs = font.CharStrings - # Renumber subroutines to remove unused ones - - # Mark all used subroutines - for g in font.charset: - c, _ = cs.getItemAndSelector(g) - subrs = getattr(c.private, "Subrs", []) - decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs, c.private) - decompiler.execute(c) - - all_subrs = [font.GlobalSubrs] - if hasattr(font, "FDArray"): - all_subrs.extend( - fd.Private.Subrs - for fd in font.FDArray - if hasattr(fd.Private, "Subrs") and fd.Private.Subrs - ) - elif hasattr(font.Private, "Subrs") and font.Private.Subrs: - all_subrs.append(font.Private.Subrs) - - subrs = set(subrs) # Remove duplicates - - # Prepare - for subrs in all_subrs: - if not hasattr(subrs, "_used"): - subrs._used = set() - subrs._used = _uniq_sort(subrs._used) - subrs._old_bias = psCharStrings.calcSubrBias(subrs) - subrs._new_bias = psCharStrings.calcSubrBias(subrs._used) - - # Renumber glyph charstrings - for g in font.charset: - c, _ = cs.getItemAndSelector(g) - subrs = getattr(c.private, "Subrs", []) - c.subset_subroutines(subrs, font.GlobalSubrs) - - # Renumber subroutines themselves - for subrs in all_subrs: - if subrs == font.GlobalSubrs: - if not hasattr(font, "FDArray") and hasattr(font.Private, "Subrs"): - local_subrs = font.Private.Subrs - else: - local_subrs = [] - else: - local_subrs = subrs - - subrs.items = [subrs.items[i] for i in subrs._used] - if hasattr(subrs, "file"): - del subrs.file - if hasattr(subrs, "offsets"): - del subrs.offsets - - for subr in subrs.items: - subr.subset_subroutines(local_subrs, font.GlobalSubrs) - - # Delete local SubrsIndex if empty - if hasattr(font, "FDArray"): - for fd in font.FDArray: - _delete_empty_subrs(fd.Private) - else: - _delete_empty_subrs(font.Private) - - # Cleanup - for subrs in all_subrs: - del subrs._used, subrs._old_bias, subrs._new_bias diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-2908e8a9.css b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-2908e8a9.css deleted file mode 100644 index 78067c2729600b4ee3e7e9c6442a129e8ffe9894..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-2908e8a9.css +++ /dev/null @@ -1 +0,0 @@ -.gradio-bokeh.svelte-1fe5ixn.svelte-1fe5ixn{display:flex;justify-content:center}.layout.svelte-1fe5ixn.svelte-1fe5ixn{display:flex;flex-direction:column;justify-content:center;align-items:center;width:var(--size-full);height:var(--size-full);color:var(--body-text-color)}.altair.svelte-1fe5ixn.svelte-1fe5ixn{display:flex;flex-direction:column;justify-content:center;align-items:center;width:var(--size-full);height:var(--size-full)}.caption.svelte-1fe5ixn.svelte-1fe5ixn{font-size:var(--text-sm)}.matplotlib.svelte-1fe5ixn img.svelte-1fe5ixn{object-fit:contain} diff --git a/spaces/DeepDrivePL/PaddleSeg-Matting/matting/dataset/matting_dataset.py b/spaces/DeepDrivePL/PaddleSeg-Matting/matting/dataset/matting_dataset.py deleted file mode 100644 index 37f2a56b30a539716f8ff61cf452d7af9d510960..0000000000000000000000000000000000000000 --- a/spaces/DeepDrivePL/PaddleSeg-Matting/matting/dataset/matting_dataset.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import math - -import cv2 -import numpy as np -import random -import paddle -from paddleseg.cvlibs import manager - -import matting.transforms as T - - -@manager.DATASETS.add_component -class MattingDataset(paddle.io.Dataset): - """ - Pass in a dataset that conforms to the format. - matting_dataset/ - |--bg/ - | - |--train/ - | |--fg/ - | |--alpha/ - | - |--val/ - | |--fg/ - | |--alpha/ - | |--trimap/ (if existing) - | - |--train.txt - | - |--val.txt - See README.md for more information of dataset. - - Args: - dataset_root(str): The root path of dataset. - transforms(list): Transforms for image. - mode (str, optional): which part of dataset to use. it is one of ('train', 'val', 'trainval'). Default: 'train'. - train_file (str|list, optional): File list is used to train. It should be `foreground_image.png background_image.png` - or `foreground_image.png`. It shold be provided if mode equal to 'train'. Default: None. - val_file (str|list, optional): File list is used to evaluation. It should be `foreground_image.png background_image.png` - or `foreground_image.png` or ``foreground_image.png background_image.png trimap_image.png`. - It shold be provided if mode equal to 'val'. Default: None. - get_trimap (bool, optional): Whether to get triamp. Default: True. - separator (str, optional): The separator of train_file or val_file. If file name contains ' ', '|' may be perfect. Default: ' '. - """ - - def __init__(self, - dataset_root, - transforms, - mode='train', - train_file=None, - val_file=None, - get_trimap=True, - separator=' '): - super().__init__() - self.dataset_root = dataset_root - self.transforms = T.Compose(transforms) - self.mode = mode - self.get_trimap = get_trimap - self.separator = separator - - # check file - if mode == 'train' or mode == 'trainval': - if train_file is None: - raise ValueError( - "When `mode` is 'train' or 'trainval', `train_file must be provided!" - ) - if isinstance(train_file, str): - train_file = [train_file] - file_list = train_file - - if mode == 'val' or mode == 'trainval': - if val_file is None: - raise ValueError( - "When `mode` is 'val' or 'trainval', `val_file must be provided!" - ) - if isinstance(val_file, str): - val_file = [val_file] - file_list = val_file - - if mode == 'trainval': - file_list = train_file + val_file - - # read file - self.fg_bg_list = [] - for file in file_list: - file = os.path.join(dataset_root, file) - with open(file, 'r') as f: - lines = f.readlines() - for line in lines: - line = line.strip() - self.fg_bg_list.append(line) - - def __getitem__(self, idx): - data = {} - fg_bg_file = self.fg_bg_list[idx] - fg_bg_file = fg_bg_file.split(self.separator) - data['img_name'] = fg_bg_file[0] # using in save prediction results - fg_file = os.path.join(self.dataset_root, fg_bg_file[0]) - alpha_file = fg_file.replace('/fg', '/alpha') - fg = cv2.imread(fg_file) - alpha = cv2.imread(alpha_file, 0) - data['alpha'] = alpha - data['gt_fields'] = [] - - # line is: fg [bg] [trimap] - if len(fg_bg_file) >= 2: - bg_file = os.path.join(self.dataset_root, fg_bg_file[1]) - bg = cv2.imread(bg_file) - data['img'], data['bg'] = self.composite(fg, alpha, bg) - data['fg'] = fg - if self.mode in ['train', 'trainval']: - data['gt_fields'].append('fg') - data['gt_fields'].append('bg') - data['gt_fields'].append('alpha') - if len(fg_bg_file) == 3 and self.get_trimap: - if self.mode == 'val': - trimap_path = os.path.join(self.dataset_root, fg_bg_file[2]) - if os.path.exists(trimap_path): - data['trimap'] = trimap_path - data['gt_fields'].append('trimap') - data['ori_trimap'] = cv2.imread(trimap_path, 0) - else: - raise FileNotFoundError( - 'trimap is not Found: {}'.format(fg_bg_file[2])) - else: - data['img'] = fg - if self.mode in ['train', 'trainval']: - data['fg'] = fg.copy() - data['bg'] = fg.copy() - data['gt_fields'].append('fg') - data['gt_fields'].append('bg') - data['gt_fields'].append('alpha') - - data['trans_info'] = [] # Record shape change information - - # Generate trimap from alpha if no trimap file provided - if self.get_trimap: - if 'trimap' not in data: - data['trimap'] = self.gen_trimap( - data['alpha'], mode=self.mode).astype('float32') - data['gt_fields'].append('trimap') - if self.mode == 'val': - data['ori_trimap'] = data['trimap'].copy() - - data = self.transforms(data) - - # When evaluation, gt should not be transforms. - if self.mode == 'val': - data['gt_fields'].append('alpha') - - data['img'] = data['img'].astype('float32') - for key in data.get('gt_fields', []): - data[key] = data[key].astype('float32') - - if 'trimap' in data: - data['trimap'] = data['trimap'][np.newaxis, :, :] - if 'ori_trimap' in data: - data['ori_trimap'] = data['ori_trimap'][np.newaxis, :, :] - - data['alpha'] = data['alpha'][np.newaxis, :, :] / 255. - - return data - - def __len__(self): - return len(self.fg_bg_list) - - def composite(self, fg, alpha, ori_bg): - fg_h, fg_w = fg.shape[:2] - ori_bg_h, ori_bg_w = ori_bg.shape[:2] - - wratio = fg_w / ori_bg_w - hratio = fg_h / ori_bg_h - ratio = wratio if wratio > hratio else hratio - - # Resize ori_bg if it is smaller than fg. - if ratio > 1: - resize_h = math.ceil(ori_bg_h * ratio) - resize_w = math.ceil(ori_bg_w * ratio) - bg = cv2.resize( - ori_bg, (resize_w, resize_h), interpolation=cv2.INTER_LINEAR) - else: - bg = ori_bg - - bg = bg[0:fg_h, 0:fg_w, :] - alpha = alpha / 255 - alpha = np.expand_dims(alpha, axis=2) - image = alpha * fg + (1 - alpha) * bg - image = image.astype(np.uint8) - return image, bg - - @staticmethod - def gen_trimap(alpha, mode='train', eval_kernel=7): - if mode == 'train': - k_size = random.choice(range(2, 5)) - iterations = np.random.randint(5, 15) - kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, - (k_size, k_size)) - dilated = cv2.dilate(alpha, kernel, iterations=iterations) - eroded = cv2.erode(alpha, kernel, iterations=iterations) - trimap = np.zeros(alpha.shape) - trimap.fill(128) - trimap[eroded > 254.5] = 255 - trimap[dilated < 0.5] = 0 - else: - k_size = eval_kernel - kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, - (k_size, k_size)) - dilated = cv2.dilate(alpha, kernel) - trimap = np.zeros(alpha.shape) - trimap.fill(128) - trimap[alpha >= 250] = 255 - trimap[dilated <= 5] = 0 - - return trimap diff --git a/spaces/Dinoking/Guccio-AI-Designer/netdissect/upsegmodel/resnet.py b/spaces/Dinoking/Guccio-AI-Designer/netdissect/upsegmodel/resnet.py deleted file mode 100644 index ea5fdf82fafa3058c5f00074d55fbb1e584d5865..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/netdissect/upsegmodel/resnet.py +++ /dev/null @@ -1,235 +0,0 @@ -import os -import sys -import torch -import torch.nn as nn -import math -try: - from lib.nn import SynchronizedBatchNorm2d -except ImportError: - from torch.nn import BatchNorm2d as SynchronizedBatchNorm2d - -try: - from urllib import urlretrieve -except ImportError: - from urllib.request import urlretrieve - - -__all__ = ['ResNet', 'resnet50', 'resnet101'] # resnet101 is coming soon! - - -model_urls = { - 'resnet50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet50-imagenet.pth', - 'resnet101': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet101-imagenet.pth' -} - - -def conv3x3(in_planes, out_planes, stride=1): - "3x3 convolution with padding" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(BasicBlock, self).__init__() - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = SynchronizedBatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = SynchronizedBatchNorm2d(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(Bottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) - self.bn1 = SynchronizedBatchNorm2d(planes) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, - padding=1, bias=False) - self.bn2 = SynchronizedBatchNorm2d(planes) - self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) - self.bn3 = SynchronizedBatchNorm2d(planes * 4) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class ResNet(nn.Module): - - def __init__(self, block, layers, num_classes=1000): - self.inplanes = 128 - super(ResNet, self).__init__() - self.conv1 = conv3x3(3, 64, stride=2) - self.bn1 = SynchronizedBatchNorm2d(64) - self.relu1 = nn.ReLU(inplace=True) - self.conv2 = conv3x3(64, 64) - self.bn2 = SynchronizedBatchNorm2d(64) - self.relu2 = nn.ReLU(inplace=True) - self.conv3 = conv3x3(64, 128) - self.bn3 = SynchronizedBatchNorm2d(128) - self.relu3 = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - self.layer1 = self._make_layer(block, 64, layers[0]) - self.layer2 = self._make_layer(block, 128, layers[1], stride=2) - self.layer3 = self._make_layer(block, 256, layers[2], stride=2) - self.layer4 = self._make_layer(block, 512, layers[3], stride=2) - self.avgpool = nn.AvgPool2d(7, stride=1) - self.fc = nn.Linear(512 * block.expansion, num_classes) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2. / n)) - elif isinstance(m, SynchronizedBatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - - def _make_layer(self, block, planes, blocks, stride=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d(self.inplanes, planes * block.expansion, - kernel_size=1, stride=stride, bias=False), - SynchronizedBatchNorm2d(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample)) - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes)) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.relu1(self.bn1(self.conv1(x))) - x = self.relu2(self.bn2(self.conv2(x))) - x = self.relu3(self.bn3(self.conv3(x))) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x = self.avgpool(x) - x = x.view(x.size(0), -1) - x = self.fc(x) - - return x - -''' -def resnet18(pretrained=False, **kwargs): - """Constructs a ResNet-18 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) - if pretrained: - model.load_state_dict(load_url(model_urls['resnet18'])) - return model - - -def resnet34(pretrained=False, **kwargs): - """Constructs a ResNet-34 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) - if pretrained: - model.load_state_dict(load_url(model_urls['resnet34'])) - return model -''' - -def resnet50(pretrained=False, **kwargs): - """Constructs a ResNet-50 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) - if pretrained: - model.load_state_dict(load_url(model_urls['resnet50']), strict=False) - return model - - -def resnet101(pretrained=False, **kwargs): - """Constructs a ResNet-101 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on Places - """ - model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) - if pretrained: - model.load_state_dict(load_url(model_urls['resnet101']), strict=False) - return model - -# def resnet152(pretrained=False, **kwargs): -# """Constructs a ResNet-152 model. -# -# Args: -# pretrained (bool): If True, returns a model pre-trained on Places -# """ -# model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) -# if pretrained: -# model.load_state_dict(load_url(model_urls['resnet152'])) -# return model - -def load_url(url, model_dir='./pretrained', map_location=None): - if not os.path.exists(model_dir): - os.makedirs(model_dir) - filename = url.split('/')[-1] - cached_file = os.path.join(model_dir, filename) - if not os.path.exists(cached_file): - sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) - urlretrieve(url, cached_file) - return torch.load(cached_file, map_location=map_location) diff --git a/spaces/DragGan/DragGan-Inversion/gradio_utils/__init__.py b/spaces/DragGan/DragGan-Inversion/gradio_utils/__init__.py deleted file mode 100644 index 6a54920c53b4373690fd0ca59ee59159d33d1f92..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/gradio_utils/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from .utils import (ImageMask, draw_mask_on_image, draw_points_on_image, - get_latest_points_pair, get_valid_mask, - on_change_single_global_state) - -__all__ = [ - 'draw_mask_on_image', 'draw_points_on_image', - 'on_change_single_global_state', 'get_latest_points_pair', - 'get_valid_mask', 'ImageMask' -] diff --git a/spaces/DragGan/DragGan/stylegan_human/dnnlib/tflib/ops/__init__.py b/spaces/DragGan/DragGan/stylegan_human/dnnlib/tflib/ops/__init__.py deleted file mode 100644 index 4ee9b7dd51bb69639724580f167b2eac39666266..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/dnnlib/tflib/ops/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -# Copyright (c) 2019, NVIDIA Corporation. All rights reserved. -# -# This work is made available under the Nvidia Source Code License-NC. -# To view a copy of this license, visit -# https://nvlabs.github.io/stylegan2/license.html - -# empty diff --git a/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/include/STrack.h b/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/include/STrack.h deleted file mode 100644 index 752cbefa8f7f7f4f0aff08e0e28ff036afe7d61a..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/include/STrack.h +++ /dev/null @@ -1,50 +0,0 @@ -#pragma once - -#include -#include "kalmanFilter.h" - -using namespace cv; -using namespace std; - -enum TrackState { New = 0, Tracked, Lost, Removed }; - -class STrack -{ -public: - STrack(vector tlwh_, float score); - ~STrack(); - - vector static tlbr_to_tlwh(vector &tlbr); - void static multi_predict(vector &stracks, byte_kalman::KalmanFilter &kalman_filter); - void static_tlwh(); - void static_tlbr(); - vector tlwh_to_xyah(vector tlwh_tmp); - vector to_xyah(); - void mark_lost(); - void mark_removed(); - int next_id(); - int end_frame(); - - void activate(byte_kalman::KalmanFilter &kalman_filter, int frame_id); - void re_activate(STrack &new_track, int frame_id, bool new_id = false); - void update(STrack &new_track, int frame_id); - -public: - bool is_activated; - int track_id; - int state; - - vector _tlwh; - vector tlwh; - vector tlbr; - int frame_id; - int tracklet_len; - int start_frame; - - KAL_MEAN mean; - KAL_COVA covariance; - float score; - -private: - byte_kalman::KalmanFilter kalman_filter; -}; \ No newline at end of file diff --git a/spaces/ECCV2022/bytetrack/tools/track_motdt.py b/spaces/ECCV2022/bytetrack/tools/track_motdt.py deleted file mode 100644 index 303815dca938c66147ac0cfd301bb7bb11e240ae..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/tools/track_motdt.py +++ /dev/null @@ -1,293 +0,0 @@ -from loguru import logger - -import torch -import torch.backends.cudnn as cudnn -from torch.nn.parallel import DistributedDataParallel as DDP - -from yolox.core import launch -from yolox.exp import get_exp -from yolox.utils import configure_nccl, fuse_model, get_local_rank, get_model_info, setup_logger -from yolox.evaluators import MOTEvaluator - -import argparse -import os -import random -import warnings -import glob -import motmetrics as mm -from collections import OrderedDict -from pathlib import Path - - -def make_parser(): - parser = argparse.ArgumentParser("YOLOX Eval") - parser.add_argument("-expn", "--experiment-name", type=str, default=None) - parser.add_argument("-n", "--name", type=str, default=None, help="model name") - - # distributed - parser.add_argument( - "--dist-backend", default="nccl", type=str, help="distributed backend" - ) - parser.add_argument( - "--dist-url", - default=None, - type=str, - help="url used to set up distributed training", - ) - parser.add_argument("-b", "--batch-size", type=int, default=64, help="batch size") - parser.add_argument( - "-d", "--devices", default=None, type=int, help="device for training" - ) - parser.add_argument( - "--local_rank", default=0, type=int, help="local rank for dist training" - ) - parser.add_argument( - "--num_machines", default=1, type=int, help="num of node for training" - ) - parser.add_argument( - "--machine_rank", default=0, type=int, help="node rank for multi-node training" - ) - parser.add_argument( - "-f", - "--exp_file", - default=None, - type=str, - help="pls input your expriment description file", - ) - parser.add_argument( - "--fp16", - dest="fp16", - default=False, - action="store_true", - help="Adopting mix precision evaluating.", - ) - parser.add_argument( - "--fuse", - dest="fuse", - default=False, - action="store_true", - help="Fuse conv and bn for testing.", - ) - parser.add_argument( - "--trt", - dest="trt", - default=False, - action="store_true", - help="Using TensorRT model for testing.", - ) - parser.add_argument( - "--test", - dest="test", - default=False, - action="store_true", - help="Evaluating on test-dev set.", - ) - parser.add_argument( - "--speed", - dest="speed", - default=False, - action="store_true", - help="speed test only.", - ) - parser.add_argument( - "opts", - help="Modify config options using the command-line", - default=None, - nargs=argparse.REMAINDER, - ) - # det args - parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval") - parser.add_argument("--conf", default=0.1, type=float, help="test conf") - parser.add_argument("--nms", default=0.7, type=float, help="test nms threshold") - parser.add_argument("--tsize", default=None, type=int, help="test img size") - parser.add_argument("--seed", default=None, type=int, help="eval seed") - # tracking args - parser.add_argument("--track_thresh", type=float, default=0.6, help="tracking confidence threshold") - parser.add_argument("--track_buffer", type=int, default=30, help="the frames for keep lost tracks") - parser.add_argument("--match_thresh", type=int, default=0.9, help="matching threshold for tracking") - parser.add_argument('--min-box-area', type=float, default=100, help='filter out tiny boxes') - # deepsort args - parser.add_argument("--model_folder", type=str, default='pretrained/googlenet_part8_all_xavier_ckpt_56.h5', help="reid model folder") - return parser - - -def compare_dataframes(gts, ts): - accs = [] - names = [] - for k, tsacc in ts.items(): - if k in gts: - logger.info('Comparing {}...'.format(k)) - accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5)) - names.append(k) - else: - logger.warning('No ground truth for {}, skipping.'.format(k)) - - return accs, names - - -@logger.catch -def main(exp, args, num_gpu): - if args.seed is not None: - random.seed(args.seed) - torch.manual_seed(args.seed) - cudnn.deterministic = True - warnings.warn( - "You have chosen to seed testing. This will turn on the CUDNN deterministic setting, " - ) - - is_distributed = num_gpu > 1 - - # set environment variables for distributed training - cudnn.benchmark = True - - rank = args.local_rank - # rank = get_local_rank() - - file_name = os.path.join(exp.output_dir, args.experiment_name) - - if rank == 0: - os.makedirs(file_name, exist_ok=True) - - results_folder = os.path.join(file_name, "track_results_motdt") - os.makedirs(results_folder, exist_ok=True) - model_folder = args.model_folder - - setup_logger(file_name, distributed_rank=rank, filename="val_log.txt", mode="a") - logger.info("Args: {}".format(args)) - - if args.conf is not None: - exp.test_conf = args.conf - if args.nms is not None: - exp.nmsthre = args.nms - if args.tsize is not None: - exp.test_size = (args.tsize, args.tsize) - - model = exp.get_model() - logger.info("Model Summary: {}".format(get_model_info(model, exp.test_size))) - #logger.info("Model Structure:\n{}".format(str(model))) - - #evaluator = exp.get_evaluator(args.batch_size, is_distributed, args.test) - - val_loader = exp.get_eval_loader(args.batch_size, is_distributed, args.test) - evaluator = MOTEvaluator( - args=args, - dataloader=val_loader, - img_size=exp.test_size, - confthre=exp.test_conf, - nmsthre=exp.nmsthre, - num_classes=exp.num_classes, - ) - - torch.cuda.set_device(rank) - model.cuda(rank) - model.eval() - - if not args.speed and not args.trt: - if args.ckpt is None: - ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar") - else: - ckpt_file = args.ckpt - logger.info("loading checkpoint") - loc = "cuda:{}".format(rank) - ckpt = torch.load(ckpt_file, map_location=loc) - # load the model state dict - model.load_state_dict(ckpt["model"]) - logger.info("loaded checkpoint done.") - - if is_distributed: - model = DDP(model, device_ids=[rank]) - - if args.fuse: - logger.info("\tFusing model...") - model = fuse_model(model) - - if args.trt: - assert ( - not args.fuse and not is_distributed and args.batch_size == 1 - ), "TensorRT model is not support model fusing and distributed inferencing!" - trt_file = os.path.join(file_name, "model_trt.pth") - assert os.path.exists( - trt_file - ), "TensorRT model is not found!\n Run tools/trt.py first!" - model.head.decode_in_inference = False - decoder = model.head.decode_outputs - else: - trt_file = None - decoder = None - - # start evaluate - *_, summary = evaluator.evaluate_motdt( - model, is_distributed, args.fp16, trt_file, decoder, exp.test_size, results_folder, model_folder - ) - logger.info("\n" + summary) - - # evaluate MOTA - mm.lap.default_solver = 'lap' - - gt_type = '_val_half' - #gt_type = '' - print('gt_type', gt_type) - gtfiles = glob.glob( - os.path.join('datasets/mot/train', '*/gt/gt{}.txt'.format(gt_type))) - print('gt_files', gtfiles) - tsfiles = [f for f in glob.glob(os.path.join(results_folder, '*.txt')) if not os.path.basename(f).startswith('eval')] - - logger.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles))) - logger.info('Available LAP solvers {}'.format(mm.lap.available_solvers)) - logger.info('Default LAP solver \'{}\''.format(mm.lap.default_solver)) - logger.info('Loading files.') - - gt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=1)) for f in gtfiles]) - ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=-1)) for f in tsfiles]) - - mh = mm.metrics.create() - accs, names = compare_dataframes(gt, ts) - - logger.info('Running metrics') - metrics = ['recall', 'precision', 'num_unique_objects', 'mostly_tracked', - 'partially_tracked', 'mostly_lost', 'num_false_positives', 'num_misses', - 'num_switches', 'num_fragmentations', 'mota', 'motp', 'num_objects'] - summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True) - # summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True) - # print(mm.io.render_summary( - # summary, formatters=mh.formatters, - # namemap=mm.io.motchallenge_metric_names)) - div_dict = { - 'num_objects': ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations'], - 'num_unique_objects': ['mostly_tracked', 'partially_tracked', 'mostly_lost']} - for divisor in div_dict: - for divided in div_dict[divisor]: - summary[divided] = (summary[divided] / summary[divisor]) - fmt = mh.formatters - change_fmt_list = ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mostly_tracked', - 'partially_tracked', 'mostly_lost'] - for k in change_fmt_list: - fmt[k] = fmt['mota'] - print(mm.io.render_summary(summary, formatters=fmt, namemap=mm.io.motchallenge_metric_names)) - - metrics = mm.metrics.motchallenge_metrics + ['num_objects'] - summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True) - print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names)) - logger.info('Completed') - - -if __name__ == "__main__": - args = make_parser().parse_args() - exp = get_exp(args.exp_file, args.name) - exp.merge(args.opts) - - if not args.experiment_name: - args.experiment_name = exp.exp_name - - num_gpu = torch.cuda.device_count() if args.devices is None else args.devices - assert num_gpu <= torch.cuda.device_count() - - launch( - main, - num_gpu, - args.num_machines, - args.machine_rank, - backend=args.dist_backend, - dist_url=args.dist_url, - args=(exp, args, num_gpu), - ) diff --git a/spaces/Eddycrack864/Applio-Inference/i18n.py b/spaces/Eddycrack864/Applio-Inference/i18n.py deleted file mode 100644 index b958c6f7244c4b920e097a9a9e67e81990d03f59..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/i18n.py +++ /dev/null @@ -1,43 +0,0 @@ -import json - -def load_language_list(language): - try: - with open(f"./i18n/locale/{language}.json", "r", encoding="utf-8") as f: - return json.load(f) - except FileNotFoundError: - raise FileNotFoundError( - f"Failed to load language file for {language}. Check if the correct .json file exists." - ) - - -class I18nAuto: - """ - A class used for internationalization using JSON language files. - - Examples - -------- - >>> i18n = I18nAuto('en_US') - >>> i18n.print() - Using Language: en_US - """ - def __init__(self, language=None): - from locale import getdefaultlocale - language = language or getdefaultlocale()[0] - if not self._language_exists(language): - language = "en_US" - - self.language_map = load_language_list(language) - self.language = language - - @staticmethod - def _language_exists(language): - from os.path import exists - return exists(f"./i18n/locale/{language}.json") - - def __call__(self, key): - """Returns the translation of the given key if it exists, else returns the key itself.""" - return self.language_map.get(key, key) - - def print(self): - """Prints the language currently in use.""" - print(f"Using Language: {self.language}") \ No newline at end of file diff --git a/spaces/Eddycrack864/Applio-Inference/lib/uvr5_pack/lib_v5/nets_537238KB.py b/spaces/Eddycrack864/Applio-Inference/lib/uvr5_pack/lib_v5/nets_537238KB.py deleted file mode 100644 index a1bb530e006482704f234c2e739a695174142941..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/lib/uvr5_pack/lib_v5/nets_537238KB.py +++ /dev/null @@ -1,123 +0,0 @@ -import torch -import numpy as np -from torch import nn -import torch.nn.functional as F - -from . import layers_537238KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 64) - self.stg1_high_band_net = BaseASPPNet(2, 64) - - self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(32, 64) - - self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(64, 128) - - self.out = nn.Conv2d(128, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(64, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(64, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/Eddycrack864/Applio-Inference/lib/uvr5_pack/lib_v5/nets_new.py b/spaces/Eddycrack864/Applio-Inference/lib/uvr5_pack/lib_v5/nets_new.py deleted file mode 100644 index bfaf72e48b31cc1130f2892b0973c9aa06f195a3..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/lib/uvr5_pack/lib_v5/nets_new.py +++ /dev/null @@ -1,132 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F -from . import layers_new - - -class BaseNet(nn.Module): - def __init__( - self, nin, nout, nin_lstm, nout_lstm, dilations=((4, 2), (8, 4), (12, 6)) - ): - super(BaseNet, self).__init__() - self.enc1 = layers_new.Conv2DBNActiv(nin, nout, 3, 1, 1) - self.enc2 = layers_new.Encoder(nout, nout * 2, 3, 2, 1) - self.enc3 = layers_new.Encoder(nout * 2, nout * 4, 3, 2, 1) - self.enc4 = layers_new.Encoder(nout * 4, nout * 6, 3, 2, 1) - self.enc5 = layers_new.Encoder(nout * 6, nout * 8, 3, 2, 1) - - self.aspp = layers_new.ASPPModule(nout * 8, nout * 8, dilations, dropout=True) - - self.dec4 = layers_new.Decoder(nout * (6 + 8), nout * 6, 3, 1, 1) - self.dec3 = layers_new.Decoder(nout * (4 + 6), nout * 4, 3, 1, 1) - self.dec2 = layers_new.Decoder(nout * (2 + 4), nout * 2, 3, 1, 1) - self.lstm_dec2 = layers_new.LSTMModule(nout * 2, nin_lstm, nout_lstm) - self.dec1 = layers_new.Decoder(nout * (1 + 2) + 1, nout * 1, 3, 1, 1) - - def __call__(self, x): - e1 = self.enc1(x) - e2 = self.enc2(e1) - e3 = self.enc3(e2) - e4 = self.enc4(e3) - e5 = self.enc5(e4) - - h = self.aspp(e5) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = torch.cat([h, self.lstm_dec2(h)], dim=1) - h = self.dec1(h, e1) - - return h - - -class CascadedNet(nn.Module): - def __init__(self, n_fft, nout=32, nout_lstm=128): - super(CascadedNet, self).__init__() - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - self.nin_lstm = self.max_bin // 2 - self.offset = 64 - - self.stg1_low_band_net = nn.Sequential( - BaseNet(2, nout // 2, self.nin_lstm // 2, nout_lstm), - layers_new.Conv2DBNActiv(nout // 2, nout // 4, 1, 1, 0), - ) - - self.stg1_high_band_net = BaseNet( - 2, nout // 4, self.nin_lstm // 2, nout_lstm // 2 - ) - - self.stg2_low_band_net = nn.Sequential( - BaseNet(nout // 4 + 2, nout, self.nin_lstm // 2, nout_lstm), - layers_new.Conv2DBNActiv(nout, nout // 2, 1, 1, 0), - ) - self.stg2_high_band_net = BaseNet( - nout // 4 + 2, nout // 2, self.nin_lstm // 2, nout_lstm // 2 - ) - - self.stg3_full_band_net = BaseNet( - 3 * nout // 4 + 2, nout, self.nin_lstm, nout_lstm - ) - - self.out = nn.Conv2d(nout, 2, 1, bias=False) - self.aux_out = nn.Conv2d(3 * nout // 4, 2, 1, bias=False) - - def forward(self, x): - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - l1_in = x[:, :, :bandw] - h1_in = x[:, :, bandw:] - l1 = self.stg1_low_band_net(l1_in) - h1 = self.stg1_high_band_net(h1_in) - aux1 = torch.cat([l1, h1], dim=2) - - l2_in = torch.cat([l1_in, l1], dim=1) - h2_in = torch.cat([h1_in, h1], dim=1) - l2 = self.stg2_low_band_net(l2_in) - h2 = self.stg2_high_band_net(h2_in) - aux2 = torch.cat([l2, h2], dim=2) - - f3_in = torch.cat([x, aux1, aux2], dim=1) - f3 = self.stg3_full_band_net(f3_in) - - mask = torch.sigmoid(self.out(f3)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux = torch.cat([aux1, aux2], dim=1) - aux = torch.sigmoid(self.aux_out(aux)) - aux = F.pad( - input=aux, - pad=(0, 0, 0, self.output_bin - aux.size()[2]), - mode="replicate", - ) - return mask, aux - else: - return mask - - def predict_mask(self, x): - mask = self.forward(x) - - if self.offset > 0: - mask = mask[:, :, :, self.offset : -self.offset] - assert mask.size()[3] > 0 - - return mask - - def predict(self, x, aggressiveness=None): - mask = self.forward(x) - pred_mag = x * mask - - if self.offset > 0: - pred_mag = pred_mag[:, :, :, self.offset : -self.offset] - assert pred_mag.size()[3] > 0 - - return pred_mag diff --git a/spaces/EuroPython2022/BayesCap/networks_SRGAN.py b/spaces/EuroPython2022/BayesCap/networks_SRGAN.py deleted file mode 100644 index cd8a30dd8deecde53f527fb81c91b78409abc390..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/BayesCap/networks_SRGAN.py +++ /dev/null @@ -1,347 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchvision.models as models -from torch import Tensor - -# __all__ = [ -# "ResidualConvBlock", -# "Discriminator", "Generator", -# ] - - -class ResidualConvBlock(nn.Module): - """Implements residual conv function. - - Args: - channels (int): Number of channels in the input image. - """ - - def __init__(self, channels: int) -> None: - super(ResidualConvBlock, self).__init__() - self.rcb = nn.Sequential( - nn.Conv2d(channels, channels, (3, 3), (1, 1), (1, 1), bias=False), - nn.BatchNorm2d(channels), - nn.PReLU(), - nn.Conv2d(channels, channels, (3, 3), (1, 1), (1, 1), bias=False), - nn.BatchNorm2d(channels), - ) - - def forward(self, x: Tensor) -> Tensor: - identity = x - - out = self.rcb(x) - out = torch.add(out, identity) - - return out - - -class Discriminator(nn.Module): - def __init__(self) -> None: - super(Discriminator, self).__init__() - self.features = nn.Sequential( - # input size. (3) x 96 x 96 - nn.Conv2d(3, 64, (3, 3), (1, 1), (1, 1), bias=False), - nn.LeakyReLU(0.2, True), - # state size. (64) x 48 x 48 - nn.Conv2d(64, 64, (3, 3), (2, 2), (1, 1), bias=False), - nn.BatchNorm2d(64), - nn.LeakyReLU(0.2, True), - nn.Conv2d(64, 128, (3, 3), (1, 1), (1, 1), bias=False), - nn.BatchNorm2d(128), - nn.LeakyReLU(0.2, True), - # state size. (128) x 24 x 24 - nn.Conv2d(128, 128, (3, 3), (2, 2), (1, 1), bias=False), - nn.BatchNorm2d(128), - nn.LeakyReLU(0.2, True), - nn.Conv2d(128, 256, (3, 3), (1, 1), (1, 1), bias=False), - nn.BatchNorm2d(256), - nn.LeakyReLU(0.2, True), - # state size. (256) x 12 x 12 - nn.Conv2d(256, 256, (3, 3), (2, 2), (1, 1), bias=False), - nn.BatchNorm2d(256), - nn.LeakyReLU(0.2, True), - nn.Conv2d(256, 512, (3, 3), (1, 1), (1, 1), bias=False), - nn.BatchNorm2d(512), - nn.LeakyReLU(0.2, True), - # state size. (512) x 6 x 6 - nn.Conv2d(512, 512, (3, 3), (2, 2), (1, 1), bias=False), - nn.BatchNorm2d(512), - nn.LeakyReLU(0.2, True), - ) - - self.classifier = nn.Sequential( - nn.Linear(512 * 6 * 6, 1024), - nn.LeakyReLU(0.2, True), - nn.Linear(1024, 1), - ) - - def forward(self, x: Tensor) -> Tensor: - out = self.features(x) - out = torch.flatten(out, 1) - out = self.classifier(out) - - return out - - -class Generator(nn.Module): - def __init__(self) -> None: - super(Generator, self).__init__() - # First conv layer. - self.conv_block1 = nn.Sequential( - nn.Conv2d(3, 64, (9, 9), (1, 1), (4, 4)), - nn.PReLU(), - ) - - # Features trunk blocks. - trunk = [] - for _ in range(16): - trunk.append(ResidualConvBlock(64)) - self.trunk = nn.Sequential(*trunk) - - # Second conv layer. - self.conv_block2 = nn.Sequential( - nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1), bias=False), - nn.BatchNorm2d(64), - ) - - # Upscale conv block. - self.upsampling = nn.Sequential( - nn.Conv2d(64, 256, (3, 3), (1, 1), (1, 1)), - nn.PixelShuffle(2), - nn.PReLU(), - nn.Conv2d(64, 256, (3, 3), (1, 1), (1, 1)), - nn.PixelShuffle(2), - nn.PReLU(), - ) - - # Output layer. - self.conv_block3 = nn.Conv2d(64, 3, (9, 9), (1, 1), (4, 4)) - - # Initialize neural network weights. - self._initialize_weights() - - def forward(self, x: Tensor, dop=None) -> Tensor: - if not dop: - return self._forward_impl(x) - else: - return self._forward_w_dop_impl(x, dop) - - # Support torch.script function. - def _forward_impl(self, x: Tensor) -> Tensor: - out1 = self.conv_block1(x) - out = self.trunk(out1) - out2 = self.conv_block2(out) - out = torch.add(out1, out2) - out = self.upsampling(out) - out = self.conv_block3(out) - - return out - - def _forward_w_dop_impl(self, x: Tensor, dop) -> Tensor: - out1 = self.conv_block1(x) - out = self.trunk(out1) - out2 = F.dropout2d(self.conv_block2(out), p=dop) - out = torch.add(out1, out2) - out = self.upsampling(out) - out = self.conv_block3(out) - - return out - - def _initialize_weights(self) -> None: - for module in self.modules(): - if isinstance(module, nn.Conv2d): - nn.init.kaiming_normal_(module.weight) - if module.bias is not None: - nn.init.constant_(module.bias, 0) - elif isinstance(module, nn.BatchNorm2d): - nn.init.constant_(module.weight, 1) - - -#### BayesCap -class BayesCap(nn.Module): - def __init__(self, in_channels=3, out_channels=3) -> None: - super(BayesCap, self).__init__() - # First conv layer. - self.conv_block1 = nn.Sequential( - nn.Conv2d( - in_channels, 64, - kernel_size=9, stride=1, padding=4 - ), - nn.PReLU(), - ) - - # Features trunk blocks. - trunk = [] - for _ in range(16): - trunk.append(ResidualConvBlock(64)) - self.trunk = nn.Sequential(*trunk) - - # Second conv layer. - self.conv_block2 = nn.Sequential( - nn.Conv2d( - 64, 64, - kernel_size=3, stride=1, padding=1, bias=False - ), - nn.BatchNorm2d(64), - ) - - # Output layer. - self.conv_block3_mu = nn.Conv2d( - 64, out_channels=out_channels, - kernel_size=9, stride=1, padding=4 - ) - self.conv_block3_alpha = nn.Sequential( - nn.Conv2d( - 64, 64, - kernel_size=9, stride=1, padding=4 - ), - nn.PReLU(), - nn.Conv2d( - 64, 64, - kernel_size=9, stride=1, padding=4 - ), - nn.PReLU(), - nn.Conv2d( - 64, 1, - kernel_size=9, stride=1, padding=4 - ), - nn.ReLU(), - ) - self.conv_block3_beta = nn.Sequential( - nn.Conv2d( - 64, 64, - kernel_size=9, stride=1, padding=4 - ), - nn.PReLU(), - nn.Conv2d( - 64, 64, - kernel_size=9, stride=1, padding=4 - ), - nn.PReLU(), - nn.Conv2d( - 64, 1, - kernel_size=9, stride=1, padding=4 - ), - nn.ReLU(), - ) - - # Initialize neural network weights. - self._initialize_weights() - - def forward(self, x: Tensor) -> Tensor: - return self._forward_impl(x) - - # Support torch.script function. - def _forward_impl(self, x: Tensor) -> Tensor: - out1 = self.conv_block1(x) - out = self.trunk(out1) - out2 = self.conv_block2(out) - out = out1 + out2 - out_mu = self.conv_block3_mu(out) - out_alpha = self.conv_block3_alpha(out) - out_beta = self.conv_block3_beta(out) - return out_mu, out_alpha, out_beta - - def _initialize_weights(self) -> None: - for module in self.modules(): - if isinstance(module, nn.Conv2d): - nn.init.kaiming_normal_(module.weight) - if module.bias is not None: - nn.init.constant_(module.bias, 0) - elif isinstance(module, nn.BatchNorm2d): - nn.init.constant_(module.weight, 1) - - -class BayesCap_noID(nn.Module): - def __init__(self, in_channels=3, out_channels=3) -> None: - super(BayesCap_noID, self).__init__() - # First conv layer. - self.conv_block1 = nn.Sequential( - nn.Conv2d( - in_channels, 64, - kernel_size=9, stride=1, padding=4 - ), - nn.PReLU(), - ) - - # Features trunk blocks. - trunk = [] - for _ in range(16): - trunk.append(ResidualConvBlock(64)) - self.trunk = nn.Sequential(*trunk) - - # Second conv layer. - self.conv_block2 = nn.Sequential( - nn.Conv2d( - 64, 64, - kernel_size=3, stride=1, padding=1, bias=False - ), - nn.BatchNorm2d(64), - ) - - # Output layer. - # self.conv_block3_mu = nn.Conv2d( - # 64, out_channels=out_channels, - # kernel_size=9, stride=1, padding=4 - # ) - self.conv_block3_alpha = nn.Sequential( - nn.Conv2d( - 64, 64, - kernel_size=9, stride=1, padding=4 - ), - nn.PReLU(), - nn.Conv2d( - 64, 64, - kernel_size=9, stride=1, padding=4 - ), - nn.PReLU(), - nn.Conv2d( - 64, 1, - kernel_size=9, stride=1, padding=4 - ), - nn.ReLU(), - ) - self.conv_block3_beta = nn.Sequential( - nn.Conv2d( - 64, 64, - kernel_size=9, stride=1, padding=4 - ), - nn.PReLU(), - nn.Conv2d( - 64, 64, - kernel_size=9, stride=1, padding=4 - ), - nn.PReLU(), - nn.Conv2d( - 64, 1, - kernel_size=9, stride=1, padding=4 - ), - nn.ReLU(), - ) - - # Initialize neural network weights. - self._initialize_weights() - - def forward(self, x: Tensor) -> Tensor: - return self._forward_impl(x) - - # Support torch.script function. - def _forward_impl(self, x: Tensor) -> Tensor: - out1 = self.conv_block1(x) - out = self.trunk(out1) - out2 = self.conv_block2(out) - out = out1 + out2 - # out_mu = self.conv_block3_mu(out) - out_alpha = self.conv_block3_alpha(out) - out_beta = self.conv_block3_beta(out) - return out_alpha, out_beta - - def _initialize_weights(self) -> None: - for module in self.modules(): - if isinstance(module, nn.Conv2d): - nn.init.kaiming_normal_(module.weight) - if module.bias is not None: - nn.init.constant_(module.bias, 0) - elif isinstance(module, nn.BatchNorm2d): - nn.init.constant_(module.weight, 1) \ No newline at end of file diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/utils/file_client.py b/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/utils/file_client.py deleted file mode 100644 index 7f38d9796da3899048924f2f803d1088927966b0..0000000000000000000000000000000000000000 --- a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/utils/file_client.py +++ /dev/null @@ -1,167 +0,0 @@ -# Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py # noqa: E501 -from abc import ABCMeta, abstractmethod - - -class BaseStorageBackend(metaclass=ABCMeta): - """Abstract class of storage backends. - - All backends need to implement two apis: ``get()`` and ``get_text()``. - ``get()`` reads the file as a byte stream and ``get_text()`` reads the file - as texts. - """ - - @abstractmethod - def get(self, filepath): - pass - - @abstractmethod - def get_text(self, filepath): - pass - - -class MemcachedBackend(BaseStorageBackend): - """Memcached storage backend. - - Attributes: - server_list_cfg (str): Config file for memcached server list. - client_cfg (str): Config file for memcached client. - sys_path (str | None): Additional path to be appended to `sys.path`. - Default: None. - """ - - def __init__(self, server_list_cfg, client_cfg, sys_path=None): - if sys_path is not None: - import sys - sys.path.append(sys_path) - try: - import mc - except ImportError: - raise ImportError('Please install memcached to enable MemcachedBackend.') - - self.server_list_cfg = server_list_cfg - self.client_cfg = client_cfg - self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg, self.client_cfg) - # mc.pyvector servers as a point which points to a memory cache - self._mc_buffer = mc.pyvector() - - def get(self, filepath): - filepath = str(filepath) - import mc - self._client.Get(filepath, self._mc_buffer) - value_buf = mc.ConvertBuffer(self._mc_buffer) - return value_buf - - def get_text(self, filepath): - raise NotImplementedError - - -class HardDiskBackend(BaseStorageBackend): - """Raw hard disks storage backend.""" - - def get(self, filepath): - filepath = str(filepath) - with open(filepath, 'rb') as f: - value_buf = f.read() - return value_buf - - def get_text(self, filepath): - filepath = str(filepath) - with open(filepath, 'r') as f: - value_buf = f.read() - return value_buf - - -class LmdbBackend(BaseStorageBackend): - """Lmdb storage backend. - - Args: - db_paths (str | list[str]): Lmdb database paths. - client_keys (str | list[str]): Lmdb client keys. Default: 'default'. - readonly (bool, optional): Lmdb environment parameter. If True, - disallow any write operations. Default: True. - lock (bool, optional): Lmdb environment parameter. If False, when - concurrent access occurs, do not lock the database. Default: False. - readahead (bool, optional): Lmdb environment parameter. If False, - disable the OS filesystem readahead mechanism, which may improve - random read performance when a database is larger than RAM. - Default: False. - - Attributes: - db_paths (list): Lmdb database path. - _client (list): A list of several lmdb envs. - """ - - def __init__(self, db_paths, client_keys='default', readonly=True, lock=False, readahead=False, **kwargs): - try: - import lmdb - except ImportError: - raise ImportError('Please install lmdb to enable LmdbBackend.') - - if isinstance(client_keys, str): - client_keys = [client_keys] - - if isinstance(db_paths, list): - self.db_paths = [str(v) for v in db_paths] - elif isinstance(db_paths, str): - self.db_paths = [str(db_paths)] - assert len(client_keys) == len(self.db_paths), ('client_keys and db_paths should have the same length, ' - f'but received {len(client_keys)} and {len(self.db_paths)}.') - - self._client = {} - for client, path in zip(client_keys, self.db_paths): - self._client[client] = lmdb.open(path, readonly=readonly, lock=lock, readahead=readahead, **kwargs) - - def get(self, filepath, client_key): - """Get values according to the filepath from one lmdb named client_key. - - Args: - filepath (str | obj:`Path`): Here, filepath is the lmdb key. - client_key (str): Used for distinguishing differnet lmdb envs. - """ - filepath = str(filepath) - assert client_key in self._client, (f'client_key {client_key} is not ' 'in lmdb clients.') - client = self._client[client_key] - with client.begin(write=False) as txn: - value_buf = txn.get(filepath.encode('ascii')) - return value_buf - - def get_text(self, filepath): - raise NotImplementedError - - -class FileClient(object): - """A general file client to access files in different backend. - - The client loads a file or text in a specified backend from its path - and return it as a binary file. it can also register other backend - accessor with a given name and backend class. - - Attributes: - backend (str): The storage backend type. Options are "disk", - "memcached" and "lmdb". - client (:obj:`BaseStorageBackend`): The backend object. - """ - - _backends = { - 'disk': HardDiskBackend, - 'memcached': MemcachedBackend, - 'lmdb': LmdbBackend, - } - - def __init__(self, backend='disk', **kwargs): - if backend not in self._backends: - raise ValueError(f'Backend {backend} is not supported. Currently supported ones' - f' are {list(self._backends.keys())}') - self.backend = backend - self.client = self._backends[backend](**kwargs) - - def get(self, filepath, client_key='default'): - # client_key is used only for lmdb, where different fileclients have - # different lmdb environments. - if self.backend == 'lmdb': - return self.client.get(filepath, client_key) - else: - return self.client.get(filepath) - - def get_text(self, filepath): - return self.client.get_text(filepath) diff --git a/spaces/FroggyQc/ehartford-WizardLM-7B-Uncensored/README.md b/spaces/FroggyQc/ehartford-WizardLM-7B-Uncensored/README.md deleted file mode 100644 index 51aec6935dc9019a17abc3f59aad63092673a934..0000000000000000000000000000000000000000 --- a/spaces/FroggyQc/ehartford-WizardLM-7B-Uncensored/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Ehartford WizardLM 7B Uncensored -emoji: 🐠 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/GastonMazzei/escher-inpaint-project/glide_text2im/fp16_util.py b/spaces/GastonMazzei/escher-inpaint-project/glide_text2im/fp16_util.py deleted file mode 100644 index b69341c706f17ccf9ac9b08e966d10c630c72129..0000000000000000000000000000000000000000 --- a/spaces/GastonMazzei/escher-inpaint-project/glide_text2im/fp16_util.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -Helpers to inference with 16-bit precision. -""" - -import torch.nn as nn - - -def convert_module_to_f16(l): - """ - Convert primitive modules to float16. - """ - if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)): - l.weight.data = l.weight.data.half() - if l.bias is not None: - l.bias.data = l.bias.data.half() - - -def convert_module_to_f32(l): - """ - Convert primitive modules to float32, undoing convert_module_to_f16(). - """ - if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)): - l.weight.data = l.weight.data.float() - if l.bias is not None: - l.bias.data = l.bias.data.float() diff --git a/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/common/protein.py b/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/common/protein.py deleted file mode 100644 index 2848f5bbc52d646ddc22a8f2e1c6b4d98ae1ffce..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/common/protein.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright 2021 DeepMind Technologies Limited -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Protein data type.""" -import dataclasses -import io -from typing import Any, Mapping, Optional -from alphafold.common import residue_constants -from Bio.PDB import PDBParser -import numpy as np - -FeatureDict = Mapping[str, np.ndarray] -ModelOutput = Mapping[str, Any] # Is a nested dict. - - -@dataclasses.dataclass(frozen=True) -class Protein: - """Protein structure representation.""" - - # Cartesian coordinates of atoms in angstroms. The atom types correspond to - # residue_constants.atom_types, i.e. the first three are N, CA, CB. - atom_positions: np.ndarray # [num_res, num_atom_type, 3] - - # Amino-acid type for each residue represented as an integer between 0 and - # 20, where 20 is 'X'. - aatype: np.ndarray # [num_res] - - # Binary float mask to indicate presence of a particular atom. 1.0 if an atom - # is present and 0.0 if not. This should be used for loss masking. - atom_mask: np.ndarray # [num_res, num_atom_type] - - # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. - residue_index: np.ndarray # [num_res] - - # B-factors, or temperature factors, of each residue (in sq. angstroms units), - # representing the displacement of the residue from its ground truth mean - # value. - b_factors: np.ndarray # [num_res, num_atom_type] - - -def from_pdb_string(pdb_str: str, chain_id: Optional[str] = None) -> Protein: - """Takes a PDB string and constructs a Protein object. - - WARNING: All non-standard residue types will be converted into UNK. All - non-standard atoms will be ignored. - - Args: - pdb_str: The contents of the pdb file - chain_id: If None, then the pdb file must contain a single chain (which - will be parsed). If chain_id is specified (e.g. A), then only that chain - is parsed. - - Returns: - A new `Protein` parsed from the pdb contents. - """ - pdb_fh = io.StringIO(pdb_str) - parser = PDBParser(QUIET=True) - structure = parser.get_structure('none', pdb_fh) - models = list(structure.get_models()) - if len(models) != 1: - raise ValueError( - f'Only single model PDBs are supported. Found {len(models)} models.') - model = models[0] - - if chain_id is not None: - chain = model[chain_id] - else: - chains = list(model.get_chains()) - if len(chains) != 1: - raise ValueError( - 'Only single chain PDBs are supported when chain_id not specified. ' - f'Found {len(chains)} chains.') - else: - chain = chains[0] - - atom_positions = [] - aatype = [] - atom_mask = [] - residue_index = [] - b_factors = [] - - for res in chain: - if res.id[2] != ' ': - raise ValueError( - f'PDB contains an insertion code at chain {chain.id} and residue ' - f'index {res.id[1]}. These are not supported.') - res_shortname = residue_constants.restype_3to1.get(res.resname, 'X') - restype_idx = residue_constants.restype_order.get( - res_shortname, residue_constants.restype_num) - pos = np.zeros((residue_constants.atom_type_num, 3)) - mask = np.zeros((residue_constants.atom_type_num,)) - res_b_factors = np.zeros((residue_constants.atom_type_num,)) - for atom in res: - if atom.name not in residue_constants.atom_types: - continue - pos[residue_constants.atom_order[atom.name]] = atom.coord - mask[residue_constants.atom_order[atom.name]] = 1. - res_b_factors[residue_constants.atom_order[atom.name]] = atom.bfactor - if np.sum(mask) < 0.5: - # If no known atom positions are reported for the residue then skip it. - continue - aatype.append(restype_idx) - atom_positions.append(pos) - atom_mask.append(mask) - residue_index.append(res.id[1]) - b_factors.append(res_b_factors) - - return Protein( - atom_positions=np.array(atom_positions), - atom_mask=np.array(atom_mask), - aatype=np.array(aatype), - residue_index=np.array(residue_index), - b_factors=np.array(b_factors)) - - -def to_pdb(prot: Protein) -> str: - """Converts a `Protein` instance to a PDB string. - - Args: - prot: The protein to convert to PDB. - - Returns: - PDB string. - """ - restypes = residue_constants.restypes + ['X'] - res_1to3 = lambda r: residue_constants.restype_1to3.get(restypes[r], 'UNK') - atom_types = residue_constants.atom_types - - pdb_lines = [] - - atom_mask = prot.atom_mask - aatype = prot.aatype - atom_positions = prot.atom_positions - residue_index = prot.residue_index.astype(np.int32) - b_factors = prot.b_factors - - if np.any(aatype > residue_constants.restype_num): - raise ValueError('Invalid aatypes.') - - pdb_lines.append('MODEL 1') - atom_index = 1 - chain_id = 'A' - # Add all atom sites. - for i in range(aatype.shape[0]): - res_name_3 = res_1to3(aatype[i]) - for atom_name, pos, mask, b_factor in zip( - atom_types, atom_positions[i], atom_mask[i], b_factors[i]): - if mask < 0.5: - continue - - record_type = 'ATOM' - name = atom_name if len(atom_name) == 4 else f' {atom_name}' - alt_loc = '' - insertion_code = '' - occupancy = 1.00 - element = atom_name[0] # Protein supports only C, N, O, S, this works. - charge = '' - # PDB is a columnar format, every space matters here! - atom_line = (f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}' - f'{res_name_3:>3} {chain_id:>1}' - f'{residue_index[i]:>4}{insertion_code:>1} ' - f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}' - f'{occupancy:>6.2f}{b_factor:>6.2f} ' - f'{element:>2}{charge:>2}') - pdb_lines.append(atom_line) - atom_index += 1 - - # Close the chain. - chain_end = 'TER' - chain_termination_line = ( - f'{chain_end:<6}{atom_index:>5} {res_1to3(aatype[-1]):>3} ' - f'{chain_id:>1}{residue_index[-1]:>4}') - pdb_lines.append(chain_termination_line) - pdb_lines.append('ENDMDL') - - pdb_lines.append('END') - pdb_lines.append('') - return '\n'.join(pdb_lines) - - -def ideal_atom_mask(prot: Protein) -> np.ndarray: - """Computes an ideal atom mask. - - `Protein.atom_mask` typically is defined according to the atoms that are - reported in the PDB. This function computes a mask according to heavy atoms - that should be present in the given sequence of amino acids. - - Args: - prot: `Protein` whose fields are `numpy.ndarray` objects. - - Returns: - An ideal atom mask. - """ - return residue_constants.STANDARD_ATOM_MASK[prot.aatype] - - -def from_prediction(features: FeatureDict, result: ModelOutput, - b_factors: Optional[np.ndarray] = None) -> Protein: - """Assembles a protein from a prediction. - - Args: - features: Dictionary holding model inputs. - result: Dictionary holding model outputs. - b_factors: (Optional) B-factors to use for the protein. - - Returns: - A protein instance. - """ - fold_output = result['structure_module'] - if b_factors is None: - b_factors = np.zeros_like(fold_output['final_atom_mask']) - - return Protein( - aatype=features['aatype'][0], - atom_positions=fold_output['final_atom_positions'], - atom_mask=fold_output['final_atom_mask'], - residue_index=features['residue_index'][0] + 1, - b_factors=b_factors) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/coder/bucketing_bbox_coder.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/coder/bucketing_bbox_coder.py deleted file mode 100644 index 92d24b4519edece7a4af8f5cfa9af025b25f2dad..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/coder/bucketing_bbox_coder.py +++ /dev/null @@ -1,350 +0,0 @@ -import mmcv -import numpy as np -import torch -import torch.nn.functional as F - -from ..builder import BBOX_CODERS -from ..transforms import bbox_rescale -from .base_bbox_coder import BaseBBoxCoder - - -@BBOX_CODERS.register_module() -class BucketingBBoxCoder(BaseBBoxCoder): - """Bucketing BBox Coder for Side-Aware Boundary Localization (SABL). - - Boundary Localization with Bucketing and Bucketing Guided Rescoring - are implemented here. - - Please refer to https://arxiv.org/abs/1912.04260 for more details. - - Args: - num_buckets (int): Number of buckets. - scale_factor (int): Scale factor of proposals to generate buckets. - offset_topk (int): Topk buckets are used to generate - bucket fine regression targets. Defaults to 2. - offset_upperbound (float): Offset upperbound to generate - bucket fine regression targets. - To avoid too large offset displacements. Defaults to 1.0. - cls_ignore_neighbor (bool): Ignore second nearest bucket or Not. - Defaults to True. - clip_border (bool, optional): Whether clip the objects outside the - border of the image. Defaults to True. - """ - - def __init__(self, - num_buckets, - scale_factor, - offset_topk=2, - offset_upperbound=1.0, - cls_ignore_neighbor=True, - clip_border=True): - super(BucketingBBoxCoder, self).__init__() - self.num_buckets = num_buckets - self.scale_factor = scale_factor - self.offset_topk = offset_topk - self.offset_upperbound = offset_upperbound - self.cls_ignore_neighbor = cls_ignore_neighbor - self.clip_border = clip_border - - def encode(self, bboxes, gt_bboxes): - """Get bucketing estimation and fine regression targets during - training. - - Args: - bboxes (torch.Tensor): source boxes, e.g., object proposals. - gt_bboxes (torch.Tensor): target of the transformation, e.g., - ground truth boxes. - - Returns: - encoded_bboxes(tuple[Tensor]): bucketing estimation - and fine regression targets and weights - """ - - assert bboxes.size(0) == gt_bboxes.size(0) - assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 - encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets, - self.scale_factor, self.offset_topk, - self.offset_upperbound, - self.cls_ignore_neighbor) - return encoded_bboxes - - def decode(self, bboxes, pred_bboxes, max_shape=None): - """Apply transformation `pred_bboxes` to `boxes`. - Args: - boxes (torch.Tensor): Basic boxes. - pred_bboxes (torch.Tensor): Predictions for bucketing estimation - and fine regression - max_shape (tuple[int], optional): Maximum shape of boxes. - Defaults to None. - - Returns: - torch.Tensor: Decoded boxes. - """ - assert len(pred_bboxes) == 2 - cls_preds, offset_preds = pred_bboxes - assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size( - 0) == bboxes.size(0) - decoded_bboxes = bucket2bbox(bboxes, cls_preds, offset_preds, - self.num_buckets, self.scale_factor, - max_shape, self.clip_border) - - return decoded_bboxes - - -@mmcv.jit(coderize=True) -def generat_buckets(proposals, num_buckets, scale_factor=1.0): - """Generate buckets w.r.t bucket number and scale factor of proposals. - - Args: - proposals (Tensor): Shape (n, 4) - num_buckets (int): Number of buckets. - scale_factor (float): Scale factor to rescale proposals. - - Returns: - tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets, - t_buckets, d_buckets) - - - bucket_w: Width of buckets on x-axis. Shape (n, ). - - bucket_h: Height of buckets on y-axis. Shape (n, ). - - l_buckets: Left buckets. Shape (n, ceil(side_num/2)). - - r_buckets: Right buckets. Shape (n, ceil(side_num/2)). - - t_buckets: Top buckets. Shape (n, ceil(side_num/2)). - - d_buckets: Down buckets. Shape (n, ceil(side_num/2)). - """ - proposals = bbox_rescale(proposals, scale_factor) - - # number of buckets in each side - side_num = int(np.ceil(num_buckets / 2.0)) - pw = proposals[..., 2] - proposals[..., 0] - ph = proposals[..., 3] - proposals[..., 1] - px1 = proposals[..., 0] - py1 = proposals[..., 1] - px2 = proposals[..., 2] - py2 = proposals[..., 3] - - bucket_w = pw / num_buckets - bucket_h = ph / num_buckets - - # left buckets - l_buckets = px1[:, None] + (0.5 + torch.arange( - 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None] - # right buckets - r_buckets = px2[:, None] - (0.5 + torch.arange( - 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None] - # top buckets - t_buckets = py1[:, None] + (0.5 + torch.arange( - 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None] - # down buckets - d_buckets = py2[:, None] - (0.5 + torch.arange( - 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None] - return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets - - -@mmcv.jit(coderize=True) -def bbox2bucket(proposals, - gt, - num_buckets, - scale_factor, - offset_topk=2, - offset_upperbound=1.0, - cls_ignore_neighbor=True): - """Generate buckets estimation and fine regression targets. - - Args: - proposals (Tensor): Shape (n, 4) - gt (Tensor): Shape (n, 4) - num_buckets (int): Number of buckets. - scale_factor (float): Scale factor to rescale proposals. - offset_topk (int): Topk buckets are used to generate - bucket fine regression targets. Defaults to 2. - offset_upperbound (float): Offset allowance to generate - bucket fine regression targets. - To avoid too large offset displacements. Defaults to 1.0. - cls_ignore_neighbor (bool): Ignore second nearest bucket or Not. - Defaults to True. - - Returns: - tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights). - - - offsets: Fine regression targets. \ - Shape (n, num_buckets*2). - - offsets_weights: Fine regression weights. \ - Shape (n, num_buckets*2). - - bucket_labels: Bucketing estimation labels. \ - Shape (n, num_buckets*2). - - cls_weights: Bucketing estimation weights. \ - Shape (n, num_buckets*2). - """ - assert proposals.size() == gt.size() - - # generate buckets - proposals = proposals.float() - gt = gt.float() - (bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, - d_buckets) = generat_buckets(proposals, num_buckets, scale_factor) - - gx1 = gt[..., 0] - gy1 = gt[..., 1] - gx2 = gt[..., 2] - gy2 = gt[..., 3] - - # generate offset targets and weights - # offsets from buckets to gts - l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None] - r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None] - t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None] - d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None] - - # select top-k nearset buckets - l_topk, l_label = l_offsets.abs().topk( - offset_topk, dim=1, largest=False, sorted=True) - r_topk, r_label = r_offsets.abs().topk( - offset_topk, dim=1, largest=False, sorted=True) - t_topk, t_label = t_offsets.abs().topk( - offset_topk, dim=1, largest=False, sorted=True) - d_topk, d_label = d_offsets.abs().topk( - offset_topk, dim=1, largest=False, sorted=True) - - offset_l_weights = l_offsets.new_zeros(l_offsets.size()) - offset_r_weights = r_offsets.new_zeros(r_offsets.size()) - offset_t_weights = t_offsets.new_zeros(t_offsets.size()) - offset_d_weights = d_offsets.new_zeros(d_offsets.size()) - inds = torch.arange(0, proposals.size(0)).to(proposals).long() - - # generate offset weights of top-k nearset buckets - for k in range(offset_topk): - if k >= 1: - offset_l_weights[inds, l_label[:, - k]] = (l_topk[:, k] < - offset_upperbound).float() - offset_r_weights[inds, r_label[:, - k]] = (r_topk[:, k] < - offset_upperbound).float() - offset_t_weights[inds, t_label[:, - k]] = (t_topk[:, k] < - offset_upperbound).float() - offset_d_weights[inds, d_label[:, - k]] = (d_topk[:, k] < - offset_upperbound).float() - else: - offset_l_weights[inds, l_label[:, k]] = 1.0 - offset_r_weights[inds, r_label[:, k]] = 1.0 - offset_t_weights[inds, t_label[:, k]] = 1.0 - offset_d_weights[inds, d_label[:, k]] = 1.0 - - offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1) - offsets_weights = torch.cat([ - offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights - ], - dim=-1) - - # generate bucket labels and weight - side_num = int(np.ceil(num_buckets / 2.0)) - labels = torch.stack( - [l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1) - - batch_size = labels.size(0) - bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size, - -1).float() - bucket_cls_l_weights = (l_offsets.abs() < 1).float() - bucket_cls_r_weights = (r_offsets.abs() < 1).float() - bucket_cls_t_weights = (t_offsets.abs() < 1).float() - bucket_cls_d_weights = (d_offsets.abs() < 1).float() - bucket_cls_weights = torch.cat([ - bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights, - bucket_cls_d_weights - ], - dim=-1) - # ignore second nearest buckets for cls if necessary - if cls_ignore_neighbor: - bucket_cls_weights = (~((bucket_cls_weights == 1) & - (bucket_labels == 0))).float() - else: - bucket_cls_weights[:] = 1.0 - return offsets, offsets_weights, bucket_labels, bucket_cls_weights - - -@mmcv.jit(coderize=True) -def bucket2bbox(proposals, - cls_preds, - offset_preds, - num_buckets, - scale_factor=1.0, - max_shape=None, - clip_border=True): - """Apply bucketing estimation (cls preds) and fine regression (offset - preds) to generate det bboxes. - - Args: - proposals (Tensor): Boxes to be transformed. Shape (n, 4) - cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2). - offset_preds (Tensor): fine regression. Shape (n, num_buckets*2). - num_buckets (int): Number of buckets. - scale_factor (float): Scale factor to rescale proposals. - max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W) - clip_border (bool, optional): Whether clip the objects outside the - border of the image. Defaults to True. - - Returns: - tuple[Tensor]: (bboxes, loc_confidence). - - - bboxes: predicted bboxes. Shape (n, 4) - - loc_confidence: localization confidence of predicted bboxes. - Shape (n,). - """ - - side_num = int(np.ceil(num_buckets / 2.0)) - cls_preds = cls_preds.view(-1, side_num) - offset_preds = offset_preds.view(-1, side_num) - - scores = F.softmax(cls_preds, dim=1) - score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True) - - rescaled_proposals = bbox_rescale(proposals, scale_factor) - - pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0] - ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1] - px1 = rescaled_proposals[..., 0] - py1 = rescaled_proposals[..., 1] - px2 = rescaled_proposals[..., 2] - py2 = rescaled_proposals[..., 3] - - bucket_w = pw / num_buckets - bucket_h = ph / num_buckets - - score_inds_l = score_label[0::4, 0] - score_inds_r = score_label[1::4, 0] - score_inds_t = score_label[2::4, 0] - score_inds_d = score_label[3::4, 0] - l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w - r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w - t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h - d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h - - offsets = offset_preds.view(-1, 4, side_num) - inds = torch.arange(proposals.size(0)).to(proposals).long() - l_offsets = offsets[:, 0, :][inds, score_inds_l] - r_offsets = offsets[:, 1, :][inds, score_inds_r] - t_offsets = offsets[:, 2, :][inds, score_inds_t] - d_offsets = offsets[:, 3, :][inds, score_inds_d] - - x1 = l_buckets - l_offsets * bucket_w - x2 = r_buckets - r_offsets * bucket_w - y1 = t_buckets - t_offsets * bucket_h - y2 = d_buckets - d_offsets * bucket_h - - if clip_border and max_shape is not None: - x1 = x1.clamp(min=0, max=max_shape[1] - 1) - y1 = y1.clamp(min=0, max=max_shape[0] - 1) - x2 = x2.clamp(min=0, max=max_shape[1] - 1) - y2 = y2.clamp(min=0, max=max_shape[0] - 1) - bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]], - dim=-1) - - # bucketing guided rescoring - loc_confidence = score_topk[:, 0] - top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1 - loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float() - loc_confidence = loc_confidence.view(-1, 4).mean(dim=1) - - return bboxes, loc_confidence diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/pipelines/compose.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/pipelines/compose.py deleted file mode 100644 index ca48f1c935755c486edc2744e1713e2b5ba3cdc8..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/pipelines/compose.py +++ /dev/null @@ -1,51 +0,0 @@ -import collections - -from mmcv.utils import build_from_cfg - -from ..builder import PIPELINES - - -@PIPELINES.register_module() -class Compose(object): - """Compose multiple transforms sequentially. - - Args: - transforms (Sequence[dict | callable]): Sequence of transform object or - config dict to be composed. - """ - - def __init__(self, transforms): - assert isinstance(transforms, collections.abc.Sequence) - self.transforms = [] - for transform in transforms: - if isinstance(transform, dict): - transform = build_from_cfg(transform, PIPELINES) - self.transforms.append(transform) - elif callable(transform): - self.transforms.append(transform) - else: - raise TypeError('transform must be callable or a dict') - - def __call__(self, data): - """Call function to apply transforms sequentially. - - Args: - data (dict): A result dict contains the data to transform. - - Returns: - dict: Transformed data. - """ - - for t in self.transforms: - data = t(data) - if data is None: - return None - return data - - def __repr__(self): - format_string = self.__class__.__name__ + '(' - for t in self.transforms: - format_string += '\n' - format_string += f' {t}' - format_string += '\n)' - return format_string diff --git a/spaces/GrandaddyShmax/MusicGen_Plus/tests/modules/test_codebooks_patterns.py b/spaces/GrandaddyShmax/MusicGen_Plus/tests/modules/test_codebooks_patterns.py deleted file mode 100644 index b658f4779a369f9ec8dde692a61b7f0fe3485724..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/MusicGen_Plus/tests/modules/test_codebooks_patterns.py +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.modules.codebooks_patterns import ( - DelayedPatternProvider, - ParallelPatternProvider, - Pattern, - UnrolledPatternProvider, -) - - -class TestParallelPatternProvider: - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [0, 1, 16, 100]) - def test_get_pattern(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - # + 1 to account for 1st step - assert len(pattern.layout) == timesteps + 1 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_content(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - for s, v in enumerate(pattern.layout): - for i, code in enumerate(v): - assert i == code.q - assert code.t == s - 1 # account for the 1st empty step - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_max_delay(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == 0 - assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay - - -class TestDelayedPatternProvider: - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [0, 1, 16, 100]) - def test_get_pattern(self, n_q: int, timesteps: int): - delays = [ - list(range(n_q)), - [0] + [1] * (n_q - 1), - [0] + [4] * (n_q - 1), - ] - for delay in delays: - provider = DelayedPatternProvider(n_q, delay) - pattern = provider.get_pattern(timesteps) - # + 1 to account for 1st step - assert len(pattern.layout) == timesteps + max(delay) + 1 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_content(self, n_q: int, timesteps: int): - provider = DelayedPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - for s, v in enumerate(pattern.layout): - for i, code in enumerate(v): - assert i == code.q - assert code.t == max(0, s - code.q - 1) - - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - @pytest.mark.parametrize("delay", [[0, 1, 2, 3], [0, 1, 1, 1], [0, 3, 3, 3], [0, 3]]) - def test_pattern_max_delay(self, timesteps: int, delay: list): - provider = DelayedPatternProvider(len(delay), delay) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == max(delay) - assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay - - -class TestUnrolledPatternProvider: - - @pytest.mark.parametrize("timesteps", [0, 1, 16]) - @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]]) - @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]]) - def test_get_pattern(self, timesteps: int, flattening: list, delays: list): - n_q = len(flattening) - max_delay = max(delays) - provider = UnrolledPatternProvider(n_q, flattening, delays) - pattern = provider.get_pattern(timesteps) - assert len(pattern.layout) == provider.num_virtual_steps(timesteps) + max_delay - - @pytest.mark.parametrize("timesteps", [0, 1, 16]) - @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]]) - @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]]) - def test_pattern_max_delay(self, timesteps: int, flattening: list, delays: list): - n_q = len(flattening) - max_delay = max(delays) - provider = UnrolledPatternProvider(n_q, flattening, delays) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == max_delay - - -class TestPattern: - - def ref_build_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int): - """Reference method to build the sequence from the pattern without using fancy scatter.""" - bs, n_q, T = z.shape - z = z.cpu().numpy() - assert n_q == pattern.n_q - assert T <= pattern.timesteps - inp = torch.full((bs, n_q, len(pattern.layout)), special_token, dtype=torch.long).numpy() - inp[:] = special_token - for s, v in enumerate(pattern.layout): - for (t, q) in v: - if t < T: - inp[:, q, s] = z[:, q, t] - return torch.from_numpy(inp) - - def ref_revert_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int): - """Reference method to revert the sequence from the pattern without using fancy scatter.""" - z = z.cpu().numpy() - bs, n_q, S = z.shape - assert pattern.n_q == n_q - inp = torch.full((bs, pattern.n_q, pattern.timesteps), special_token, dtype=torch.long).numpy() - inp[:] = special_token - for s, v in enumerate(pattern.layout): - for (t, q) in v: - if t < pattern.timesteps: - inp[:, q, t] = z[:, q, s] - return torch.from_numpy(inp) - - def ref_revert_pattern_logits(self, z: torch.Tensor, pattern: Pattern, special_token: float): - """Reference method to revert the logits from the pattern without using fancy scatter.""" - z = z.cpu().numpy() - bs, card, n_q, S = z.shape - assert pattern.n_q == n_q - ref_layout = pattern.layout - inp = torch.full((bs, card, pattern.n_q, pattern.timesteps), special_token, dtype=torch.float).numpy() - inp[:] = special_token - for s, v in enumerate(ref_layout[1:]): - if s < S: - for (t, q) in v: - if t < pattern.timesteps: - inp[:, :, q, t] = z[:, :, q, s] - return torch.from_numpy(inp) - - def _get_pattern_providers(self, n_q: int): - pattern_provider_1 = ParallelPatternProvider(n_q) - pattern_provider_2 = DelayedPatternProvider(n_q, list(range(n_q))) - pattern_provider_3 = DelayedPatternProvider(n_q, [0] + [1] * (n_q - 1)) - pattern_provider_4 = UnrolledPatternProvider( - n_q, flattening=list(range(n_q)), delays=[0] * n_q - ) - pattern_provider_5 = UnrolledPatternProvider( - n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] * n_q - ) - pattern_provider_6 = UnrolledPatternProvider( - n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] + [5] * (n_q - 1) - ) - return [ - pattern_provider_1, - pattern_provider_2, - pattern_provider_3, - pattern_provider_4, - pattern_provider_5, - pattern_provider_6, - ] - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - def test_build_pattern_sequence(self, n_q: int, timesteps: int): - bs = 2 - card = 256 - special_token = card - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # we can correctly build the sequence from the pattern - z = torch.randint(0, card, (bs, n_q, timesteps)) - ref_res = self.ref_build_pattern_sequence(z, pattern, special_token) - res, indexes, mask = pattern.build_pattern_sequence(z, special_token) - assert (res == ref_res).float().mean() == 1.0 - - # expected assertion fails on the number of timesteps - invalid_timesteps = [timesteps + 1] - if pattern.num_sequence_steps != pattern.timesteps: - invalid_timesteps.append(pattern.num_sequence_steps) - for i_timesteps in invalid_timesteps: - z2 = torch.randint(0, card, (bs, n_q, i_timesteps)) - with pytest.raises(AssertionError): - pattern.build_pattern_sequence(z2, special_token) - - # expected assertion fails on the number of codebooks - invalid_qs = [0, n_q - 1, n_q + 1] - for i_q in invalid_qs: - z3 = torch.randint(0, card, (bs, i_q, timesteps)) - with pytest.raises(AssertionError): - pattern.build_pattern_sequence(z3, special_token) - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - def test_revert_pattern_sequence(self, n_q: int, timesteps: int): - bs = 2 - card = 256 - special_token = card - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # this works assuming previous tests are successful - z = torch.randint(0, card, (bs, n_q, timesteps)) - s = self.ref_build_pattern_sequence(z, pattern, special_token) - ref_out = self.ref_revert_pattern_sequence(s, pattern, special_token) - # ensure our reference script retrieve the original sequence - assert z.shape == ref_out.shape - assert (z == ref_out).float().mean() == 1.0 - # now we can test the scatter version - out, indexes, mask = pattern.revert_pattern_sequence(s, special_token) - assert out.shape == ref_out.shape - assert (out == ref_out).float().mean() == 1.0 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - @pytest.mark.parametrize("card", [1, 2, 256, 1024]) - def test_revert_pattern_logits(self, n_q: int, timesteps: int, card: int): - bs = 2 - special_token = card - logits_special_token = float('nan') - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # this works assuming previous tests are successful - z = torch.randint(0, card, (bs, n_q, timesteps)) - s = self.ref_build_pattern_sequence(z, pattern, special_token) - logits = torch.randn((bs, card, n_q, s.shape[-1])) - ref_out = self.ref_revert_pattern_logits(logits, pattern, logits_special_token) - # ensure our reference script retrieve the original sequence - assert ref_out.shape == torch.Size([bs, card, n_q, timesteps]) - # now we can test the scatter version - out, indexes, mask = pattern.revert_pattern_logits(logits, logits_special_token) - assert out.shape == ref_out.shape - assert (out == ref_out).float().mean() == 1.0 diff --git a/spaces/HarryLee/eCommerceImageCaptioning/models/search.py b/spaces/HarryLee/eCommerceImageCaptioning/models/search.py deleted file mode 100644 index 568612212bdbbe787c7ab64017f8170ec67619f8..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/models/search.py +++ /dev/null @@ -1,814 +0,0 @@ -# Copyright 2022 The OFA-Sys Team. -# All rights reserved. -# This source code is licensed under the Apache 2.0 license -# found in the LICENSE file in the root directory. - -import math -from typing import List, Optional - -import torch -import torch.nn as nn -from fairseq.token_generation_constraints import ( - ConstraintState, - OrderedConstraintState, - UnorderedConstraintState, -) -from torch import Tensor - - -class Search(nn.Module): - def __init__(self, tgt_dict): - super().__init__() - self.pad = tgt_dict.pad() - self.unk = tgt_dict.unk() - self.eos = tgt_dict.eos() - self.vocab_size = len(tgt_dict) - self.src_lengths = torch.tensor(-1) - self.supports_constraints = False - self.stop_on_max_len = False - - def step( - self, step, lprobs, scores, prev_output_tokens=None, original_batch_idxs=None - ): - """Take a single search step. - - Args: - step: the current search step, starting at 0 - lprobs: (bsz x input_beam_size x vocab_size) - the model's log-probabilities over the vocabulary at the current step - scores: (bsz x input_beam_size x step) - the historical model scores of each hypothesis up to this point - prev_output_tokens: (bsz x step) - the previously generated oputput tokens - original_batch_idxs: (bsz) - the tensor with the batch indices, in the range [0, bsz) - this is useful in case there has been applied a re-ordering - and we need to know the orignal indices - - Return: A tuple of (scores, indices, beams) where: - scores: (bsz x output_beam_size) - the scores of the chosen elements; output_beam_size can be - larger than input_beam_size, e.g., we may return - 2*input_beam_size to account for EOS - indices: (bsz x output_beam_size) - the indices of the chosen elements - beams: (bsz x output_beam_size) - the hypothesis ids of the chosen elements, in the range [0, input_beam_size) - """ - raise NotImplementedError - - @torch.jit.export - def set_src_lengths(self, src_lengths): - self.src_lengths = src_lengths - - @torch.jit.export - def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int): - """Initialize constraint states for constrained decoding (if supported). - - Args: - batch_constraints: (torch.Tensor, optional) - the list of constraints, in packed form - beam_size: (int) - the beam size - Returns: - *encoder_out* rearranged according to *new_order* - """ - pass - - def prune_sentences(self, batch_idxs: Tensor): - """ - Removes constraint states for completed sentences (if supported). - This is called from sequence_generator._generate() when sentences are - deleted from the batch. - - Args: - batch_idxs: Indices of *sentences* whose constraint state should be *kept*. - """ - pass - - def update_constraints(self, active_hypos: Tensor): - """ - Updates the constraint states by selecting the beam items that are retained. - This is called at each time step of sequence_generator._generate() when - the set of 2 * {beam_size} candidate hypotheses are reduced to the beam size. - - Args: - active_hypos: (batch size, beam size) - list of integers denoting, for each sentence, which beam candidate items - should be kept. - """ - pass - - -class BeamSearch(Search): - def __init__(self, tgt_dict): - super().__init__(tgt_dict) - self.constraint_states = None - - @torch.jit.export - def step( - self, - step: int, - lprobs, - scores: Optional[Tensor], - prev_output_tokens: Optional[Tensor] = None, - original_batch_idxs: Optional[Tensor] = None, - ): - bsz, beam_size, vocab_size = lprobs.size() - - if step == 0: - # at the first step all hypotheses are equally likely, so use - # only the first beam - lprobs = lprobs[:, ::beam_size, :].contiguous() - else: - # make probs contain cumulative scores for each hypothesis - assert scores is not None - lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1) - - top_prediction = torch.topk( - lprobs.view(bsz, -1), - k=min( - # Take the best 2 x beam_size predictions. We'll choose the first - # beam_size of these which don't predict eos to continue with. - beam_size * 2, - lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad - ), - ) - scores_buf = top_prediction[0] - indices_buf = top_prediction[1] - # Project back into relative indices and beams - beams_buf = indices_buf // vocab_size - indices_buf = indices_buf.fmod(vocab_size) - - # At this point, beams_buf and indices_buf are single-dim and contain relative indices - return scores_buf, indices_buf, beams_buf - - -class PrefixConstrainedBeamSearch(Search): - def __init__(self, tgt_dict, prefix_allowed_tokens_fn): - super().__init__(tgt_dict) - self.prefix_allowed_tokens_fn = prefix_allowed_tokens_fn - self.stop_on_max_len = True - - @torch.jit.export - def apply_mask(self, x, prev_output_tokens, original_batch_idxs): - beam_size = x.shape[0] // original_batch_idxs.shape[0] - original_batch_idxs = ( - original_batch_idxs.unsqueeze(-1).repeat((1, beam_size)).flatten().tolist() - ) - - mask = torch.full_like(x, -math.inf) - for sent_i, (sent, batch_i) in enumerate( - zip(prev_output_tokens, original_batch_idxs) - ): - mask[sent_i, :, self.prefix_allowed_tokens_fn(batch_i, sent)] = 0 - - return mask - - @torch.jit.export - def step( - self, - step: int, - lprobs: Tensor, - scores: Tensor, - prev_output_tokens: Tensor, - original_batch_idxs: Tensor, - ): - bsz, beam_size, vocab_size = lprobs.size() - - lprobs += self.apply_mask( - lprobs.view(bsz * beam_size, 1, vocab_size), - prev_output_tokens, - original_batch_idxs, - ).view(bsz, beam_size, vocab_size) - - if step == 0: - # at the first step all hypotheses are equally likely, so use - # only the first beam - lprobs = lprobs[:, ::beam_size, :].contiguous() - else: - # make probs contain cumulative scores for each hypothesis - assert scores is not None - lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1) - - top_prediction = torch.topk( - lprobs.view(bsz, -1), - k=min( - # Take the best beam_size predictions. We'll choose the first - # beam_size of these which don't predict eos to continue with. - beam_size, - lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad - ), - ) - scores_buf = top_prediction[0] - indices_buf = top_prediction[1] - beams_buf = indices_buf // vocab_size - indices_buf = indices_buf.fmod(vocab_size) - return scores_buf, indices_buf, beams_buf - - -class LexicallyConstrainedBeamSearch(Search): - """Implements lexically constrained beam search as described in - - Fast Lexically Constrained Decoding with Dynamic Beam - Allocation for Neural Machine Translation. Post & Vilar, - NAACL 2018. https://www.aclweb.org/anthology/N18-1119/ - - and - - Improved Lexically Constrained Decoding for Translation and - Monolingual Rewriting. Hu et al, NAACL - 2019. https://www.aclweb.org/anthology/N19-1090/ - - This is accomplished by maintaining, for each beam hypothesis, a - ConstraintState object (see constraints.py) that tracks which - constraints have been generated and using this information to - shape the beam for each input sentence. - """ - - def __init__(self, tgt_dict, representation): - super().__init__(tgt_dict) - self.representation = representation - self.vocab_size = len(tgt_dict) - self.num_cands = 0 - self.supports_constraints = True - - @torch.jit.export - def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int): - self.constraint_states = [] - for constraint_tensor in batch_constraints: - if self.representation == "ordered": - constraint_state = OrderedConstraintState.create(constraint_tensor) - elif self.representation == "unordered": - constraint_state = UnorderedConstraintState.create(constraint_tensor) - - self.constraint_states.append([constraint_state for i in range(beam_size)]) - - @torch.jit.export - def prune_sentences(self, batch_idxs: Tensor): - self.constraint_states = [ - self.constraint_states[i] for i in batch_idxs.tolist() - ] - - @torch.jit.export - def update_constraints(self, active_hypos: Tensor): - if self.constraint_states: - batch_size = active_hypos.size(0) - for sentid in range(batch_size): - self.constraint_states[sentid] = [ - self.constraint_states[sentid][i] for i in active_hypos[sentid] - ] - - @torch.jit.export - def step( - self, - step: int, - lprobs: Tensor, - scores: Optional[Tensor], - prev_output_tokens: Optional[Tensor] = None, - original_batch_idxs: Optional[Tensor] = None, - ): - """ - A constrained step builds a large candidates list from the following: - - the top 2 * {beam_size} items over the whole beam - - for each item in the beam - - the top {each_k} (default 1) - - all next constraints - We then compute the constrained state of each beam item, and assign - stripe codes: 0 to the best in each bank, 1 to the 2nd-best, and so - on. We then sort by (stripe, score), and truncate the list at - 2 * beam size. - - Args: - step: the decoder step - lprobs: (batch size, beam size, target vocab) - the target-vocab distributions for each item in the beam. - Retrun: A tuple of (scores, indices, beams, constraints) where: - scores: (batch, output beam size) - the scores of the chosen elements - indices: (batch, output beam size) - the target vocab indices of the chosen elements - beams: (batch, output beam size) - the 0-indexed hypothesis ids of the chosen elements - constraints: (batch, output beam size) - the new constraint states - """ - each_k = 1 - device = lprobs.device - - batch_size, beam_size, vocab_size = lprobs.size() - - self.num_cands = min( - # Just take the k-best. We'll get another k from the 1-best from each - # row, plus more from the constraints - beam_size * 2, - lprobs.view(batch_size, -1).size(1) - 1, # -1 so we never select pad - ) - - # STEP 0: Preliminary. Prevent EOS for unfinished hyps across all batch items - constraint_states = self.constraint_states - if constraint_states and step > 0: - not_finished_indices = [] - for sentno, sent_constraints in enumerate(constraint_states): - for beamno, state in enumerate(sent_constraints): - index = sentno * beam_size + beamno - if not state.finished: - not_finished_indices.append(index) - not_finished_indices = torch.tensor(not_finished_indices) - if not_finished_indices.numel() > 0: - lprobs.view(batch_size * beam_size, -1)[ - not_finished_indices, self.eos - ] = -math.inf - - if step == 0: - # at the first step all hypotheses are equally likely, so use - # only the first beam entry for each batch item - lprobs = lprobs[:, ::beam_size, :].contiguous() - else: - # make probs contain cumulative scores for each hypothesis - assert scores is not None - lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1) - - top_prediction = torch.topk( - lprobs.view(batch_size, -1), - self.num_cands, - ) - scores_buf, indices_buf = top_prediction - # Project back into relative indices and beams - beams_buf = indices_buf // vocab_size - indices_buf = indices_buf.fmod(vocab_size) - - # Short circuit if there are no constraints in this batch - if not constraint_states: - return scores_buf, indices_buf, beams_buf - - # STEP 1: get top-1 from each hypothesis across all sentences in the batch - if step > 0: - top_scores, top_indices = torch.topk( - lprobs.view(batch_size * beam_size, -1), - k=each_k, - dim=1, - ) - top_scores = top_scores.view(batch_size, -1) - top_indices = top_indices.view(batch_size, -1) - scores_buf = torch.cat((scores_buf, top_scores), dim=1) - indices_buf = torch.cat((indices_buf, top_indices), dim=1) - new_beams = torch.arange(0, beam_size, device=device).repeat(batch_size, 1) - beams_buf = torch.cat((beams_buf, new_beams), dim=1) - - # Now, process sentences in the batch one by one. - new_scores_buf = torch.zeros((batch_size, 2 * beam_size), device=device) - new_indices_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long() - new_beams_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long() - for sentno, states in enumerate(constraint_states): - scores, indices, beams, new_states = self.step_sentence( - step, - sentno, - lprobs[sentno], - constraint_states[sentno], - beams_buf[sentno].clone(), - indices_buf[sentno].clone(), - scores_buf[sentno].clone(), - ) - new_scores_buf[sentno] = scores - new_indices_buf[sentno] = indices - new_beams_buf[sentno] = beams - self.constraint_states[sentno] = new_states - - return new_scores_buf, new_indices_buf, new_beams_buf - - @torch.jit.export - def step_sentence( - self, - step: int, - sentno: int, - lprobs: Tensor, - constraint_states: List[List[ConstraintState]], - beams_buf: Tensor, - indices_buf: Tensor, - scores_buf: Tensor, - ): - """Does per-sentence processing. Adds all constraints for each - hypothesis to the list of candidates; then removes duplicates, - sorts, and dynamically stripes across the banks. All tensor inputs - are collapsed to those pertaining to a single input sentence. - """ - device = lprobs.device - - # STEP 2: Add all constraints for each beam item - for beamno, state in enumerate(constraint_states): - next_tokens = torch.tensor(list(state.next_tokens()), device=device).long() - if next_tokens.numel() != 0: - indices_buf = torch.cat((indices_buf, next_tokens)) - next_beams = ( - torch.tensor(beamno, device=device) - .repeat(next_tokens.size(0)) - .long() - ) - beams_buf = torch.cat((beams_buf, next_beams)) - next_values = lprobs[beamno].take(next_tokens.view(-1)) - scores_buf = torch.cat((scores_buf, next_values)) - - # At the 0th time step, there is just one beam item - if step == 0: - break - - # STEP 3: Compute the "bank" for each candidate. This is the - # number of constraints it's generated. We need this so that - # we can do round-robin allocation of the beam across these - # banks. If C is the number of constraints, we select the best - # item in bank C, then the best in bank C-1, etc, followed by - # the 2nd-best in bank C, the 2nd-best in bank C-1, etc, and so - # on, until the maximum beam size. We accomplish this by - # creating a sort key and striping across the banks. - - # Compute the new states for all candidates - cands_size = indices_buf.size(0) - constraint_states = [ - constraint_states[beams_buf[i]].advance(indices_buf[i]) - for i in range(cands_size) - ] - - banks = torch.tensor([state.bank for state in constraint_states], device=device) - - # STEP 4: Sort - num_constraint_tokens = len(state.tokens) - - # Sort by keys (bank, score) (i.e., sort banks together, and scores - # within banks). AFAIK pytorch doesn't support either stable sort or - # multi-key sorting, so we have to hack this. - MAX_SCORE = -100 - sort_key = (num_constraint_tokens - banks) * MAX_SCORE + scores_buf - sort_values, sort_indices = sort_key.sort(dim=0, descending=True) - scores_buf = scores_buf[sort_indices] - indices_buf = indices_buf[sort_indices] - beams_buf = beams_buf[sort_indices] - banks = banks[sort_indices] - - # Sort the constraints to follow suit - constraint_states = [constraint_states[i] for i in sort_indices] - - # STEP 5: Remove duplicates. The topk calls (overall and - # per-row) plus the per-row generation of constraints will - # produce duplicates. Here we remove them. - - def roll(t): - """Rolls a 1d tensor left by 1. - - [0, 1, 2, 3, 4] becomes [4, 0, 1, 2, 3] - """ - return torch.cat((t[-1].unsqueeze(0), t[0:-1]), dim=0) - - # We map candidates (beam, token_id) to a single dimension. - # This is then shifted by 1. We can then easily identify - # duplicates and create a mask that identifies unique - # extensions. - uniques_mask = beams_buf * (self.vocab_size + 1) + indices_buf - uniques_mask = roll(uniques_mask) != uniques_mask - - # Use the mask to pare down the data structures - scores_buf = torch.masked_select(scores_buf, uniques_mask) - indices_buf = torch.masked_select(indices_buf, uniques_mask) - beams_buf = torch.masked_select(beams_buf, uniques_mask) - banks = torch.masked_select(banks, uniques_mask) - i = 1 - for mask in uniques_mask[1:]: - if not mask: - constraint_states.pop(i) - i += mask - - # STEP 6: Assign IDs round-robin across banks, sort, and - # truncate. Now that the candidates are sorted by (bank, - # score) and uniqed, we dynamically allocate the {beam_size} - # beam by striping across the candidates. These stripes will - # be used as sort keys to do round-robin selection. This is - # accomplished in a single pass with offsets. Sorting by - # highest-banks (furthest-along hypotheses) first ensures - # progress through the constraints. - # - # e.g., BANKS: 3 3 3 2 2 2 2 1 1 1 0 0 - # OLD STRIPES: 0 1 2 0 1 2 3 0 1 2 0 1 - # NEW STRIPES: 0 1+4 2+8 0+1 1+5 2+9 3+11 0+2 1+6 2+10 0+3 1+7 - # = 0 5 10 1 6 11 13 2 7 12 3 8 - # - # Sorting by this then gives the following banks: - # - # 3 2 1 0 3 2 1 0 3 2 1 2 - # - # We'll take the top {beam_size} of these. - stripe_offsets = [offset * (len(banks) + 1) for offset in range(len(banks) + 1)] - stripes = torch.zeros_like(banks) - cur_bank_count = -1 - cur_bank = banks[0] - for i, bank in enumerate(banks): - if bank != cur_bank: - cur_bank_count = 0 - cur_bank = bank - else: - cur_bank_count += 1 - stripes[i] = num_constraint_tokens - bank + stripe_offsets[cur_bank_count] - - # STEP 7: Sort by the stripes values - sort_values, sort_indices = stripes.sort(dim=0) - scores_buf = scores_buf[sort_indices] - indices_buf = indices_buf[sort_indices] - beams_buf = beams_buf[sort_indices] - constraint_states = [constraint_states[i] for i in sort_indices] - - # STEP 8: Truncate to the candidates size! - scores_buf = scores_buf[: self.num_cands] - indices_buf = indices_buf[: self.num_cands] - beams_buf = beams_buf[: self.num_cands] - - return scores_buf, indices_buf, beams_buf, constraint_states - - -class LengthConstrainedBeamSearch(Search): - def __init__(self, tgt_dict, min_len_a, min_len_b, max_len_a, max_len_b): - super().__init__(tgt_dict) - self.min_len_a = min_len_a - self.min_len_b = min_len_b - self.max_len_a = max_len_a - self.max_len_b = max_len_b - self.beam = BeamSearch(tgt_dict) - self.needs_src_lengths = True - - def step( - self, - step: int, - lprobs, - scores, - prev_output_tokens: Optional[Tensor] = None, - original_batch_idxs: Optional[Tensor] = None, - ): - min_lens = self.min_len_a * self.src_lengths + self.min_len_b - max_lens = self.max_len_a * self.src_lengths + self.max_len_b - lprobs[step < min_lens, :, self.eos] = -math.inf - lprobs[step >= max_lens, :, self.eos] = 0 - return self.beam.step(step, lprobs, scores) - - -class DiverseBeamSearch(Search): - """Diverse Beam Search. - - See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence - Models" for details. - - We only implement the Hamming Diversity penalty here, which performed best - in the original paper. - """ - - def __init__(self, tgt_dict, num_groups, diversity_strength): - super().__init__(tgt_dict) - self.num_groups = num_groups - self.diversity_strength = -diversity_strength - self.beam = BeamSearch(tgt_dict) - - @torch.jit.export - def step( - self, - step: int, - lprobs, - scores, - prev_output_tokens: Optional[Tensor] = None, - original_batch_idxs: Optional[Tensor] = None, - ): - bsz, beam_size, vocab_size = lprobs.size() - if beam_size % self.num_groups != 0: - raise ValueError( - "DiverseBeamSearch requires --beam to be divisible by the number of groups" - ) - - # initialize diversity penalty - diversity_buf = torch.zeros(lprobs[:, 0, :].size()).to(lprobs) - - scores_G, indices_G, beams_G = [], [], [] - for g in range(self.num_groups): - lprobs_g = lprobs[:, g :: self.num_groups, :] - scores_g = scores[:, g :: self.num_groups, :] if step > 0 else None - - # apply diversity penalty - if g > 0: - lprobs_g = torch.add( - lprobs_g, - other=diversity_buf.unsqueeze(1), - alpha=self.diversity_strength, - ) - else: - lprobs_g = lprobs_g.contiguous() - - scores_buf, indices_buf, beams_buf = self.beam.step( - step, lprobs_g, scores_g - ) - beams_buf.mul_(self.num_groups).add_(g) - - scores_G.append(scores_buf.clone()) - indices_G.append(indices_buf.clone()) - beams_G.append(beams_buf.clone()) - - # update diversity penalty - diversity_buf.scatter_add_( - 1, indices_buf, torch.ones(indices_buf.size()).to(diversity_buf) - ) - - # interleave results from different groups - scores_buf = torch.stack(scores_G, dim=2).view(bsz, -1) - indices_buf = torch.stack(indices_G, dim=2).view(bsz, -1) - beams_buf = torch.stack(beams_G, dim=2).view(bsz, -1) - return scores_buf, indices_buf, beams_buf - - -class Sampling(Search): - sampling_topk: int - sampling_topp: float - - def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0): - super().__init__(tgt_dict) - self.sampling_topk = sampling_topk - self.sampling_topp = sampling_topp - - def _sample_topp(self, lprobs): - """Sample among the smallest set of elements whose cumulative probability mass exceeds p. - - See `"The Curious Case of Neural Text Degeneration" - (Holtzman et al., 2019) `_. - - Args: - lprobs: (bsz x input_beam_size x vocab_size) - the model's log-probabilities over the vocabulary at the current step - - Return: A tuple of (trimed_probs, truncated_indices) where: - trimed_probs: (bsz x input_beam_size x ?) - the model's probabilities over the elements selected to sample from. The - width of the third dimension is determined by top-P. - truncated_indices: (bsz x input_beam_size x ?) - the indices of the chosen elements. - """ - probs = lprobs.exp_() - - # sort the last dimension (vocab dimension) in descending order - sorted_probs, sorted_indices = probs.sort(descending=True) - - # compute a mask to indicate the words to be included in the top-P set. - cumsum_probs = sorted_probs.cumsum(dim=2) - mask = cumsum_probs.lt(self.sampling_topp) - - # note that mask was computed by 'lt'. One more word needs to be included - # so that the cumulative probability mass can exceed p. - cumsum_mask = mask.cumsum(dim=2) - last_included = cumsum_mask[:, :, -1:] - last_included.clamp_(0, mask.size()[2] - 1) - mask = mask.scatter_(2, last_included, 1) - - # truncate unnecessary dims. - max_dim = last_included.max() - truncated_mask = mask[:, :, : max_dim + 1] - truncated_probs = sorted_probs[:, :, : max_dim + 1] - truncated_indices = sorted_indices[:, :, : max_dim + 1] - - # trim the words that are not in top-P by setting their probabilities - # to 0, so that they would not be sampled later. - trim_mask = ~truncated_mask - trimed_probs = truncated_probs.masked_fill_(trim_mask, 0) - return trimed_probs, truncated_indices - - @torch.jit.export - def step( - self, - step: int, - lprobs, - scores, - prev_output_tokens: Optional[Tensor] = None, - original_batch_idxs: Optional[Tensor] = None, - ): - bsz, beam_size, vocab_size = lprobs.size() - - if step == 0: - # at the first step all hypotheses are equally likely, so use - # only the first beam - lprobs = lprobs[:, ::beam_size, :].contiguous() - - if self.sampling_topp > 0: - # only sample from the smallest set of words whose cumulative probability mass exceeds p - probs, top_indices = self._sample_topp(lprobs) - elif self.sampling_topk > 0: - # only sample from top-k candidates - lprobs, top_indices = lprobs.topk(self.sampling_topk) - probs = lprobs.exp_() - else: - probs = lprobs.exp_() - - # dummy data to be consistent with true branch for type check - top_indices = torch.empty(0).to(probs) - # sample - if step == 0: - indices_buf = torch.multinomial( - probs.view(bsz, -1), - beam_size, - replacement=True, - ).view(bsz, beam_size) - else: - indices_buf = torch.multinomial( - probs.view(bsz * beam_size, -1), - 1, - replacement=True, - ).view(bsz, beam_size) - - if step == 0: - # expand to beam size - probs = probs.expand(bsz, beam_size, -1) - - # gather scores - scores_buf = torch.gather(probs, dim=2, index=indices_buf.unsqueeze(-1)) - scores_buf = scores_buf.log_().view(bsz, -1) - - # remap indices if using top-k or top-P sampling - if self.sampling_topk > 0 or self.sampling_topp > 0: - indices_buf = torch.gather( - top_indices.expand(bsz, beam_size, -1), - dim=2, - index=indices_buf.unsqueeze(-1), - ).squeeze(2) - - if step == 0: - beams_buf = indices_buf.new_zeros(bsz, beam_size) - else: - beams_buf = torch.arange(0, beam_size).to(indices_buf).repeat(bsz, 1) - # make scores cumulative - scores_buf.add_( - torch.gather(scores[:, :, step - 1], dim=1, index=beams_buf) - ) - - return scores_buf, indices_buf, beams_buf - - -class DiverseSiblingsSearch(Search): - """ - Beam search with diverse siblings. - - See "A Simple, Fast Diverse Decoding Algorithm for Neural Generation" for details. - https://arxiv.org/abs/1611.08562 - - 1/ Calculate hypotheses for each beam - 2/ Intra-sibling ordering - 3/ Rewrite scores - 4/ Choose top K hypotheses - - if diversity_rate == 0 is equivalent to BeamSearch - """ - - def __init__(self, tgt_dict, diversity_rate): - super().__init__(tgt_dict) - self.diversity_rate = diversity_rate - self.beam = BeamSearch(tgt_dict) - - def step( - self, - step: int, - lprobs, - scores, - prev_output_tokens: Optional[Tensor] = None, - original_batch_idxs: Optional[Tensor] = None, - ): - bsz, beam_size, vocab_size = lprobs.size() - k = min( - # Take the best 2 x beam_size predictions. We'll choose the first - # beam_size of these which don't predict eos to continue with. - beam_size * 2, - lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad - ) - s_list: List[Tensor] - i_list: List[Tensor] - s_list = [torch.empty(0).to(lprobs) for i in range(beam_size)] - i_list = [torch.LongTensor().to(device=lprobs.device) for i in range(beam_size)] - sibling_score = torch.arange(1, k + 1).to(lprobs) * self.diversity_rate - - if step == 0: - return self.beam.step(step, lprobs, scores) - lprobs.add_(scores[:, :, step - 1].unsqueeze(-1)) - - # 1/ Calculate hypotheses for each beam - for i in range(beam_size): - torch.topk(lprobs[:, i, :].view(bsz, -1), k, out=(s_list[i], i_list[i])) - i_list[i].fmod_(vocab_size) - - # 2/ Intra-sibling ordering by default from topk + 3/ Rewrite scores - s_list[i].sub_(sibling_score) - - # 4/ Choose top K hypotheses - indices = torch.stack(i_list, dim=1).view(bsz, -1) - - final_scores = torch.empty(0).to(lprobs) - final_indices = torch.LongTensor().to(device=lprobs.device) - final_beams = torch.LongTensor().to(device=lprobs.device) - (final_scores, final_indices) = torch.topk( - torch.stack(s_list, dim=1).view(bsz, -1), - k, - ) - - final_beams = final_indices // k - - for i in range(bsz): - final_indices[i] = indices[i][final_indices[i]] - - return final_scores, final_indices, final_beams diff --git a/spaces/Hoodady/3DFuse/my/utils/seed.py b/spaces/Hoodady/3DFuse/my/utils/seed.py deleted file mode 100644 index e3e81fad6c7610d11ec8d847f9a61a4e6675ecc4..0000000000000000000000000000000000000000 --- a/spaces/Hoodady/3DFuse/my/utils/seed.py +++ /dev/null @@ -1,21 +0,0 @@ -# from pytorch lightning -import random -import numpy as np -import torch - -max_seed_value = np.iinfo(np.uint32).max -min_seed_value = np.iinfo(np.uint32).min - - -def seed_everything(seed=None): - seed = int(seed) - - if not (min_seed_value <= seed <= max_seed_value): - raise ValueError(f"{seed} is not in bounds, numpy accepts from {min_seed_value} to {max_seed_value}") - - print(f"seed set to {seed}") - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - return seed diff --git a/spaces/Intel/intel-xai-tools-cam-demo/app.py b/spaces/Intel/intel-xai-tools-cam-demo/app.py deleted file mode 100644 index 97f40cbdc4eafff12ed9c0711d7026d4c4ee259d..0000000000000000000000000000000000000000 --- a/spaces/Intel/intel-xai-tools-cam-demo/app.py +++ /dev/null @@ -1,309 +0,0 @@ -import gradio as gr -import torch -import cv2 - -### CAM explainer code from Intel XAI tools (https://github.com/IntelAI/intel-xai-tools) ### -class XGradCAM: - def __init__(self, model, targetLayer, targetClass, image, dims, device): - - # set any frozen layers to trainable - # gradcam cannot be calculated without it - for param in model.parameters(): - if not param.requires_grad: - param.requires_grad = True - - self.model = model - self.targetLayer = targetLayer - self.targetClass = targetClass - self.image = image - self.dims = dims - self.device = device - - def visualize(self): - from pytorch_grad_cam import XGradCAM, GuidedBackpropReLUModel - from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget - from pytorch_grad_cam.utils.image import show_cam_on_image, deprocess_image, preprocess_image - import torch - import cv2 - import numpy as np - import matplotlib.pyplot as plt - - self.model.eval().to(self.device) - - image = cv2.resize(self.image, self.dims) - # convert to rgb if image is grayscale - converted = False - if len(image.shape) == 2: - converted = True - image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) - - rgb_img = np.float32(image) / 255 - input_tensor = preprocess_image(rgb_img, - mean=[0.485, 0.456, 0.406], - std=[0.229, 0.224, 0.225]) - input_tensor = input_tensor.to(self.device) - - self.targetLayer = [self.targetLayer] - - if self.targetClass is None: - targets = None - else: - targets = [ClassifierOutputTarget(self.targetClass)] - - cam = XGradCAM(self.model, self.targetLayer, use_cuda=torch.cuda.is_available()) - - # convert back to grayscale if that is the initial dim - if converted: - input_tensor = input_tensor[:, 0:1, :, :] - - grayscale_cam = cam(input_tensor=input_tensor, targets=targets, aug_smooth=False, - eigen_smooth=False) - grayscale_cam = grayscale_cam[0, :] - cam_image = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True) - cam_image = cv2.cvtColor(cam_image, cv2.COLOR_RGB2BGR) - - gb_model = GuidedBackpropReLUModel(model=self.model, use_cuda=torch.cuda.is_available()) - gb = gb_model(input_tensor, target_category=None) - cam_mask = cv2.merge([grayscale_cam, grayscale_cam, grayscale_cam]) - cam_gb = deprocess_image(cam_mask * gb) - gb = deprocess_image(gb) - - print("XGradCAM, Guided backpropagation, and Guided XGradCAM are generated. ") - - return cv2.cvtColor(cam_image, cv2.COLOR_RGB2BGR) - -class EigenCAM: - def __init__(self, model, targetLayer, boxes, classes, colors, reshape, image, device): - self.model = model - self.targetLayer = targetLayer - self.boxes = boxes - self.classes = classes - self.colors = colors - self.reshape = reshape - self.image = image - self.device = device - - def visualize(self): - from pytorch_grad_cam import EigenCAM - from pytorch_grad_cam.utils.image import show_cam_on_image, preprocess_image, scale_cam_image - import torchvision - import torch - import cv2 - import numpy as np - - self.model.eval().to(self.device) - - rgb_img = np.float32(self.image) / 255 - transform = torchvision.transforms.ToTensor() - input_tensor = transform(rgb_img) - input_tensor = input_tensor.unsqueeze(0) - input_tensor = input_tensor.to(self.device) - - self.targetLayer = [self.targetLayer] - - if self.reshape is None: - cam = EigenCAM(self.model, self.targetLayer, use_cuda=torch.cuda.is_available()) - else: - cam = EigenCAM(self.model, self.targetLayer, use_cuda=torch.cuda.is_available(), - reshape_transform=self.reshape) - targets = [] - grayscale_cam = cam(input_tensor=input_tensor, targets=targets, aug_smooth=False, - eigen_smooth=False) - grayscale_cam = grayscale_cam[0, :] - cam_image = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True) - - renormalized_cam = np.zeros(grayscale_cam.shape, dtype=np.float32) - for x1, y1, x2, y2 in self.boxes: - renormalized_cam[y1:y2, x1:x2] = scale_cam_image(grayscale_cam[y1:y2, x1:x2].copy()) - renormalized_cam = scale_cam_image(renormalized_cam) - eigencam_image_renormalized = show_cam_on_image(rgb_img, renormalized_cam, use_rgb=True) - for i, box in enumerate(self.boxes): - color = self.colors[i] - cv2.rectangle( - eigencam_image_renormalized, - (box[0], box[1]), - (box[2], box[3]), - color, 2 - ) - cv2.putText(eigencam_image_renormalized, self.classes[i], (box[0], box[1] - 5), - cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2, - lineType=cv2.LINE_AA) - - print("EigenCAM is generated. ") - - return eigencam_image_renormalized - -### For Gradio Demo ### -def xgradcam(image, model_code, target_class): - global model, target_layer - exec(model_code, globals()) - if target_class == "": - target_class = None - else: - target_class = int(target_class) - image_dims = (224, 224) - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - xgradcam = XGradCAM(model, target_layer, target_class, image, image_dims, device) - - return xgradcam.visualize() - -def eigencam(image, model_code, class_code, process_code, reshape_code): - global input_image, model, target_layer, bounding_box_coordinates, class_names, box_colors, reshape - input_image = cv2.resize(image, (640, 640)) - exec(model_code, globals()) - exec(class_code, globals()) - exec(process_code, globals()) - exec(reshape_code, globals()) - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - eigencam = EigenCAM(model, target_layer, bounding_box_coordinates, class_names, box_colors, reshape, input_image, device) - - return eigencam.visualize() - -with gr.Blocks() as demo: - gr.Markdown( - """ - # Class Activation Mapping (CAM) Explainer Demo - This is a demo for CAM explainer from Intel XAI tools (https://github.com/IntelAI/intel-xai-tools). \ - CAM is an approach which localizes regions in the image responsible for a class prediction. \ - The demo shows visualization of XGradCAM for object classification model and EigenCAM for object detection model. - """ - ) - - with gr.Tab("XGradCAM"): - with gr.Row(): - with gr.Column(): - xgradcam_image = gr.Image(label="Input Image") - gr.Markdown( - """ - Load the pretrained model to the variable model depending on how it was saved. Then, specify target_layer (normally the last convolutional layer) to compute CAM for. \ - Here are some common choices: - - FasterRCNN: model.backbone - - ResNet18 and 50: model.layer4 - - VGG and DenseNet161: model.features - - Please don't change the variable names in the following code. - """ - ) - xgradcam_model = gr.Code(label="Model and Target Layer", value= - """ - from torchvision.models import resnet50, ResNet50_Weights - - model = resnet50(weights=ResNet50_Weights.IMAGENET1K_V2) - target_layer = model.layer4 - """, language="python") - gr.Markdown( - """ - Enter the target category as an integer to compute CAM for. It is the category index in the range [0, NUM_OF_CLASSES-1] based on the training dataset. \ - If it is left blank, the highest scoring category will be used. - """ - ) - xgradcam_targetClass = gr.Textbox(label="Target Category") - xgradcam_output = gr.Image() - xgradcam_button = gr.Button("Submit") - - with gr.Tab("EigenCAM"): - with gr.Row(): - with gr.Column(): - eigencam_image = gr.Image(label="Input Image") - gr.Markdown( - """ - Load the pretrained model to the variable model depending on how it was saved. Then, specify target_layer (normally the last convolutional layer) to compute CAM for. \ - Here are some common choices: - - FasterRCNN: model.backbone - - ResNet18 and 50: model.layer4 - - VGG and DenseNet161: model.features - - Please don't change the variable names in the following code. - """ - ) - eigencam_model = gr.Code(label="Model and Target Layer", value= - """ - from torchvision.models.detection import fasterrcnn_resnet50_fpn - - model = fasterrcnn_resnet50_fpn(pretrained=True).eval() - target_layer = model.backbone - """, language="python") - gr.Markdown( - """ - In the case there is no class name in the output from the model, specify class_labels as a list to print them with corresponding bounding box in the image. \ - Depending on the model, the class name might not be needed (e.g. YOLO). Then, create color as a list with a size of the number of classes. - """ - ) - eigencam_class = gr.Code(label="Class Name", value= - """ - import numpy as np - - class_labels = ['__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', - 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', - 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', - 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', - 'N/A', 'N/A', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', - 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', - 'surfboard', 'tennis racket', 'bottle', 'N/A', 'wine glass', 'cup', 'fork', - 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', - 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'N/A', 'dining table', 'N/A', 'N/A', 'toilet', - 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', - 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book', 'clock', 'vase', - 'scissors', 'teddy bear', 'hair drier', 'toothbrush'] - color = np.random.uniform(0, 255, size=(len(class_labels), 3)) - """, language="python") - gr.Markdown( - """ - Get output of the model (in the case of FasterRCNN, convert input_image to a tensor first). Then, write a custom process_output function to process the outputs from the model. \ - You should get bounding_box_coordinates, class_names, and box_colors of the detected objects with a higher detection score than detection_threshold value. \ - If you use other models than FasterRCNN, you need to make your own custom process function to match the structure of the outputs from this function. - """ - ) - eigencam_process = gr.Code(label="Output Processing", value= - """ - import torchvision - - transform = torchvision.transforms.ToTensor() - input_tensor = transform(np.float32(input_image) / 255).unsqueeze(0) - output = model(input_tensor)[0] - - def process_output(output, class_labels, color, detection_threshold): - boxes, classes, labels, colors = [], [], [], [] - box = output['boxes'].tolist() - name = [class_labels[i] for i in output['labels'].detach().numpy()] - label = output['labels'].detach().numpy() - for i in range(len(name)): - score = output['scores'].detach().numpy()[i] - if score < detection_threshold: - continue - boxes.append([int(b) for b in box[i]]) - classes.append(name[i]) - colors.append(color[label[i]]) - - return boxes, classes, colors - - detection_threshold = 0.9 - bounding_box_coordinates, class_names, box_colors = process_output(output, class_labels, color, detection_threshold) - """, language="python") - gr.Markdown( - """ - Write a custom reshape function to get the activations from the model and process them into 2D format. \ - For example, the backbone of FasterRCNN outputs 5 different tenors with different spatial size as an Ordered Dict, \ - thus, we need a custom function which aggregates these image tensors, resizes them to a common shape, and concatenates them. \ - If you use other models than FasterRCNN, you need to write your own custom reshape function. - """ - ) - eigencam_reshape = gr.Code(label="Reshape", value= - """ - def reshape(x): - target_size = x['pool'].size()[-2 : ] - activations = [] - for key, value in x.items(): - activations.append(torch.nn.functional.interpolate(torch.abs(value), target_size, mode='bilinear')) - activations = torch.cat(activations, axis=1) - - return activations - """, language="python") - eigencam_output = gr.Image() - eigencam_button = gr.Button("Submit") - - xgradcam_button.click(xgradcam, inputs=[xgradcam_image, xgradcam_model, xgradcam_targetClass], outputs=xgradcam_output) - eigencam_button.click(eigencam, inputs=[eigencam_image, eigencam_model, eigencam_class, eigencam_process, eigencam_reshape], outputs=eigencam_output) - -demo.launch() diff --git a/spaces/Jackflack09/diffuse-custom/README_HG.md b/spaces/Jackflack09/diffuse-custom/README_HG.md deleted file mode 100644 index 99a0776d1a4669fa8387cc77e162c60084100a92..0000000000000000000000000000000000000000 --- a/spaces/Jackflack09/diffuse-custom/README_HG.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Anything V3.0 -emoji: 🏃 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 3.10.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_lms_discrete.py b/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_lms_discrete.py deleted file mode 100644 index 28bc9bd0c608650ba67982b4eb408bab9c215ba1..0000000000000000000000000000000000000000 --- a/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_lms_discrete.py +++ /dev/null @@ -1,278 +0,0 @@ -# Copyright 2022 Katherine Crowson and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import warnings -from dataclasses import dataclass -from typing import List, Optional, Tuple, Union - -import numpy as np -import torch - -from scipy import integrate - -from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS, BaseOutput -from .scheduling_utils import SchedulerMixin - - -@dataclass -# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->LMSDiscrete -class LMSDiscreteSchedulerOutput(BaseOutput): - """ - Output class for the scheduler's step function output. - - Args: - prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the - denoising loop. - pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - The predicted denoised sample (x_{0}) based on the model output from the current timestep. - `pred_original_sample` can be used to preview progress or for guidance. - """ - - prev_sample: torch.FloatTensor - pred_original_sample: Optional[torch.FloatTensor] = None - - -class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin): - """ - Linear Multistep Scheduler for discrete beta schedules. Based on the original k-diffusion implementation by - Katherine Crowson: - https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181 - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and - [`~SchedulerMixin.from_pretrained`] functions. - - Args: - num_train_timesteps (`int`): number of diffusion steps used to train the model. - beta_start (`float`): the starting `beta` value of inference. - beta_end (`float`): the final `beta` value. - beta_schedule (`str`): - the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from - `linear` or `scaled_linear`. - trained_betas (`np.ndarray`, optional): - option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. - prediction_type (`str`, default `epsilon`, optional): - prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion - process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 - https://imagen.research.google/video/paper.pdf) - """ - - _compatibles = _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy() - order = 1 - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 1000, - beta_start: float = 0.0001, - beta_end: float = 0.02, - beta_schedule: str = "linear", - trained_betas: Optional[Union[np.ndarray, List[float]]] = None, - prediction_type: str = "epsilon", - ): - if trained_betas is not None: - self.betas = torch.tensor(trained_betas, dtype=torch.float32) - elif beta_schedule == "linear": - self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) - elif beta_schedule == "scaled_linear": - # this schedule is very specific to the latent diffusion model. - self.betas = ( - torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 - ) - else: - raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") - - self.alphas = 1.0 - self.betas - self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) - - sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) - sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32) - self.sigmas = torch.from_numpy(sigmas) - - # standard deviation of the initial noise distribution - self.init_noise_sigma = self.sigmas.max() - - # setable values - self.num_inference_steps = None - timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy() - self.timesteps = torch.from_numpy(timesteps) - self.derivatives = [] - self.is_scale_input_called = False - - def scale_model_input( - self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor] - ) -> torch.FloatTensor: - """ - Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the K-LMS algorithm. - - Args: - sample (`torch.FloatTensor`): input sample - timestep (`float` or `torch.FloatTensor`): the current timestep in the diffusion chain - - Returns: - `torch.FloatTensor`: scaled input sample - """ - if isinstance(timestep, torch.Tensor): - timestep = timestep.to(self.timesteps.device) - step_index = (self.timesteps == timestep).nonzero().item() - sigma = self.sigmas[step_index] - sample = sample / ((sigma**2 + 1) ** 0.5) - self.is_scale_input_called = True - return sample - - def get_lms_coefficient(self, order, t, current_order): - """ - Compute a linear multistep coefficient. - - Args: - order (TODO): - t (TODO): - current_order (TODO): - """ - - def lms_derivative(tau): - prod = 1.0 - for k in range(order): - if current_order == k: - continue - prod *= (tau - self.sigmas[t - k]) / (self.sigmas[t - current_order] - self.sigmas[t - k]) - return prod - - integrated_coeff = integrate.quad(lms_derivative, self.sigmas[t], self.sigmas[t + 1], epsrel=1e-4)[0] - - return integrated_coeff - - def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): - """ - Sets the timesteps used for the diffusion chain. Supporting function to be run before inference. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - device (`str` or `torch.device`, optional): - the device to which the timesteps should be moved to. If `None`, the timesteps are not moved. - """ - self.num_inference_steps = num_inference_steps - - timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=float)[::-1].copy() - sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) - sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) - sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) - - self.sigmas = torch.from_numpy(sigmas).to(device=device) - if str(device).startswith("mps"): - # mps does not support float64 - self.timesteps = torch.from_numpy(timesteps).to(device, dtype=torch.float32) - else: - self.timesteps = torch.from_numpy(timesteps).to(device=device) - - self.derivatives = [] - - def step( - self, - model_output: torch.FloatTensor, - timestep: Union[float, torch.FloatTensor], - sample: torch.FloatTensor, - order: int = 4, - return_dict: bool = True, - ) -> Union[LMSDiscreteSchedulerOutput, Tuple]: - """ - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - - Args: - model_output (`torch.FloatTensor`): direct output from learned diffusion model. - timestep (`float`): current timestep in the diffusion chain. - sample (`torch.FloatTensor`): - current instance of sample being created by diffusion process. - order: coefficient for multi-step inference. - return_dict (`bool`): option for returning tuple rather than LMSDiscreteSchedulerOutput class - - Returns: - [`~schedulers.scheduling_utils.LMSDiscreteSchedulerOutput`] or `tuple`: - [`~schedulers.scheduling_utils.LMSDiscreteSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. - When returning a tuple, the first element is the sample tensor. - - """ - if not self.is_scale_input_called: - warnings.warn( - "The `scale_model_input` function should be called before `step` to ensure correct denoising. " - "See `StableDiffusionPipeline` for a usage example." - ) - - if isinstance(timestep, torch.Tensor): - timestep = timestep.to(self.timesteps.device) - step_index = (self.timesteps == timestep).nonzero().item() - sigma = self.sigmas[step_index] - - # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise - if self.config.prediction_type == "epsilon": - pred_original_sample = sample - sigma * model_output - elif self.config.prediction_type == "v_prediction": - # * c_out + input * c_skip - pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) - else: - raise ValueError( - f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" - ) - - # 2. Convert to an ODE derivative - derivative = (sample - pred_original_sample) / sigma - self.derivatives.append(derivative) - if len(self.derivatives) > order: - self.derivatives.pop(0) - - # 3. Compute linear multistep coefficients - order = min(step_index + 1, order) - lms_coeffs = [self.get_lms_coefficient(order, step_index, curr_order) for curr_order in range(order)] - - # 4. Compute previous sample based on the derivatives path - prev_sample = sample + sum( - coeff * derivative for coeff, derivative in zip(lms_coeffs, reversed(self.derivatives)) - ) - - if not return_dict: - return (prev_sample,) - - return LMSDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) - - def add_noise( - self, - original_samples: torch.FloatTensor, - noise: torch.FloatTensor, - timesteps: torch.FloatTensor, - ) -> torch.FloatTensor: - # Make sure sigmas and timesteps have the same device and dtype as original_samples - sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) - if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): - # mps does not support float64 - schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) - timesteps = timesteps.to(original_samples.device, dtype=torch.float32) - else: - schedule_timesteps = self.timesteps.to(original_samples.device) - timesteps = timesteps.to(original_samples.device) - - step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] - - sigma = sigmas[step_indices].flatten() - while len(sigma.shape) < len(original_samples.shape): - sigma = sigma.unsqueeze(-1) - - noisy_samples = original_samples + noise * sigma - return noisy_samples - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/JavierIA/gccopen/models/common.py b/spaces/JavierIA/gccopen/models/common.py deleted file mode 100644 index 111af708dea55cb11c8da3bb22d69e659ee78925..0000000000000000000000000000000000000000 --- a/spaces/JavierIA/gccopen/models/common.py +++ /dev/null @@ -1,2019 +0,0 @@ -import math -from copy import copy -from pathlib import Path - -import numpy as np -import pandas as pd -import requests -import torch -import torch.nn as nn -import torch.nn.functional as F -from torchvision.ops import DeformConv2d -from PIL import Image -from torch.cuda import amp - -from utils.datasets import letterbox -from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh -from utils.plots import color_list, plot_one_box -from utils.torch_utils import time_synchronized - - -##### basic #### - -def autopad(k, p=None): # kernel, padding - # Pad to 'same' - if p is None: - p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad - return p - - -class MP(nn.Module): - def __init__(self, k=2): - super(MP, self).__init__() - self.m = nn.MaxPool2d(kernel_size=k, stride=k) - - def forward(self, x): - return self.m(x) - - -class SP(nn.Module): - def __init__(self, k=3, s=1): - super(SP, self).__init__() - self.m = nn.MaxPool2d(kernel_size=k, stride=s, padding=k // 2) - - def forward(self, x): - return self.m(x) - - -class ReOrg(nn.Module): - def __init__(self): - super(ReOrg, self).__init__() - - def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) - return torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1) - - -class Concat(nn.Module): - def __init__(self, dimension=1): - super(Concat, self).__init__() - self.d = dimension - - def forward(self, x): - return torch.cat(x, self.d) - - -class Chuncat(nn.Module): - def __init__(self, dimension=1): - super(Chuncat, self).__init__() - self.d = dimension - - def forward(self, x): - x1 = [] - x2 = [] - for xi in x: - xi1, xi2 = xi.chunk(2, self.d) - x1.append(xi1) - x2.append(xi2) - return torch.cat(x1+x2, self.d) - - -class Shortcut(nn.Module): - def __init__(self, dimension=0): - super(Shortcut, self).__init__() - self.d = dimension - - def forward(self, x): - return x[0]+x[1] - - -class Foldcut(nn.Module): - def __init__(self, dimension=0): - super(Foldcut, self).__init__() - self.d = dimension - - def forward(self, x): - x1, x2 = x.chunk(2, self.d) - return x1+x2 - - -class Conv(nn.Module): - # Standard convolution - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super(Conv, self).__init__() - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) - self.bn = nn.BatchNorm2d(c2) - self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) - - def forward(self, x): - return self.act(self.bn(self.conv(x))) - - def fuseforward(self, x): - return self.act(self.conv(x)) - - -class RobustConv(nn.Module): - # Robust convolution (use high kernel size 7-11 for: downsampling and other layers). Train for 300 - 450 epochs. - def __init__(self, c1, c2, k=7, s=1, p=None, g=1, act=True, layer_scale_init_value=1e-6): # ch_in, ch_out, kernel, stride, padding, groups - super(RobustConv, self).__init__() - self.conv_dw = Conv(c1, c1, k=k, s=s, p=p, g=c1, act=act) - self.conv1x1 = nn.Conv2d(c1, c2, 1, 1, 0, groups=1, bias=True) - self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(c2)) if layer_scale_init_value > 0 else None - - def forward(self, x): - x = x.to(memory_format=torch.channels_last) - x = self.conv1x1(self.conv_dw(x)) - if self.gamma is not None: - x = x.mul(self.gamma.reshape(1, -1, 1, 1)) - return x - - -class RobustConv2(nn.Module): - # Robust convolution 2 (use [32, 5, 2] or [32, 7, 4] or [32, 11, 8] for one of the paths in CSP). - def __init__(self, c1, c2, k=7, s=4, p=None, g=1, act=True, layer_scale_init_value=1e-6): # ch_in, ch_out, kernel, stride, padding, groups - super(RobustConv2, self).__init__() - self.conv_strided = Conv(c1, c1, k=k, s=s, p=p, g=c1, act=act) - self.conv_deconv = nn.ConvTranspose2d(in_channels=c1, out_channels=c2, kernel_size=s, stride=s, - padding=0, bias=True, dilation=1, groups=1 - ) - self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(c2)) if layer_scale_init_value > 0 else None - - def forward(self, x): - x = self.conv_deconv(self.conv_strided(x)) - if self.gamma is not None: - x = x.mul(self.gamma.reshape(1, -1, 1, 1)) - return x - - -def DWConv(c1, c2, k=1, s=1, act=True): - # Depthwise convolution - return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) - - -class GhostConv(nn.Module): - # Ghost Convolution https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups - super(GhostConv, self).__init__() - c_ = c2 // 2 # hidden channels - self.cv1 = Conv(c1, c_, k, s, None, g, act) - self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) - - def forward(self, x): - y = self.cv1(x) - return torch.cat([y, self.cv2(y)], 1) - - -class Stem(nn.Module): - # Stem - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super(Stem, self).__init__() - c_ = int(c2/2) # hidden channels - self.cv1 = Conv(c1, c_, 3, 2) - self.cv2 = Conv(c_, c_, 1, 1) - self.cv3 = Conv(c_, c_, 3, 2) - self.pool = torch.nn.MaxPool2d(2, stride=2) - self.cv4 = Conv(2 * c_, c2, 1, 1) - - def forward(self, x): - x = self.cv1(x) - return self.cv4(torch.cat((self.cv3(self.cv2(x)), self.pool(x)), dim=1)) - - -class DownC(nn.Module): - # Spatial pyramid pooling layer used in YOLOv3-SPP - def __init__(self, c1, c2, n=1, k=2): - super(DownC, self).__init__() - c_ = int(c1) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c2//2, 3, k) - self.cv3 = Conv(c1, c2//2, 1, 1) - self.mp = nn.MaxPool2d(kernel_size=k, stride=k) - - def forward(self, x): - return torch.cat((self.cv2(self.cv1(x)), self.cv3(self.mp(x))), dim=1) - - -class SPP(nn.Module): - # Spatial pyramid pooling layer used in YOLOv3-SPP - def __init__(self, c1, c2, k=(5, 9, 13)): - super(SPP, self).__init__() - c_ = c1 // 2 # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) - self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) - - def forward(self, x): - x = self.cv1(x) - return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) - - -class Bottleneck(nn.Module): - # Darknet bottleneck - def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super(Bottleneck, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c2, 3, 1, g=g) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) - - -class Res(nn.Module): - # ResNet bottleneck - def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super(Res, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c_, 3, 1, g=g) - self.cv3 = Conv(c_, c2, 1, 1) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv3(self.cv2(self.cv1(x))) if self.add else self.cv3(self.cv2(self.cv1(x))) - - -class ResX(Res): - # ResNet bottleneck - def __init__(self, c1, c2, shortcut=True, g=32, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super().__init__(c1, c2, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - - -class Ghost(nn.Module): - # Ghost Bottleneck https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride - super(Ghost, self).__init__() - c_ = c2 // 2 - self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw - DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw - GhostConv(c_, c2, 1, 1, act=False)) # pw-linear - self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), - Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() - - def forward(self, x): - return self.conv(x) + self.shortcut(x) - -##### end of basic ##### - - -##### cspnet ##### - -class SPPCSPC(nn.Module): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)): - super(SPPCSPC, self).__init__() - c_ = int(2 * c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(c_, c_, 3, 1) - self.cv4 = Conv(c_, c_, 1, 1) - self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) - self.cv5 = Conv(4 * c_, c_, 1, 1) - self.cv6 = Conv(c_, c_, 3, 1) - self.cv7 = Conv(2 * c_, c2, 1, 1) - - def forward(self, x): - x1 = self.cv4(self.cv3(self.cv1(x))) - y1 = self.cv6(self.cv5(torch.cat([x1] + [m(x1) for m in self.m], 1))) - y2 = self.cv2(x) - return self.cv7(torch.cat((y1, y2), dim=1)) - -class GhostSPPCSPC(SPPCSPC): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)): - super().__init__(c1, c2, n, shortcut, g, e, k) - c_ = int(2 * c2 * e) # hidden channels - self.cv1 = GhostConv(c1, c_, 1, 1) - self.cv2 = GhostConv(c1, c_, 1, 1) - self.cv3 = GhostConv(c_, c_, 3, 1) - self.cv4 = GhostConv(c_, c_, 1, 1) - self.cv5 = GhostConv(4 * c_, c_, 1, 1) - self.cv6 = GhostConv(c_, c_, 3, 1) - self.cv7 = GhostConv(2 * c_, c2, 1, 1) - - -class GhostStem(Stem): - # Stem - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__(c1, c2, k, s, p, g, act) - c_ = int(c2/2) # hidden channels - self.cv1 = GhostConv(c1, c_, 3, 2) - self.cv2 = GhostConv(c_, c_, 1, 1) - self.cv3 = GhostConv(c_, c_, 3, 2) - self.cv4 = GhostConv(2 * c_, c2, 1, 1) - - -class BottleneckCSPA(nn.Module): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(BottleneckCSPA, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1, 1) - self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - y1 = self.m(self.cv1(x)) - y2 = self.cv2(x) - return self.cv3(torch.cat((y1, y2), dim=1)) - - -class BottleneckCSPB(nn.Module): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(BottleneckCSPB, self).__init__() - c_ = int(c2) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1, 1) - self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - x1 = self.cv1(x) - y1 = self.m(x1) - y2 = self.cv2(x1) - return self.cv3(torch.cat((y1, y2), dim=1)) - - -class BottleneckCSPC(nn.Module): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(BottleneckCSPC, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(c_, c_, 1, 1) - self.cv4 = Conv(2 * c_, c2, 1, 1) - self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - y1 = self.cv3(self.m(self.cv1(x))) - y2 = self.cv2(x) - return self.cv4(torch.cat((y1, y2), dim=1)) - - -class ResCSPA(BottleneckCSPA): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - - -class ResCSPB(BottleneckCSPB): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2) # hidden channels - self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - - -class ResCSPC(BottleneckCSPC): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - - -class ResXCSPA(ResCSPA): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - -class ResXCSPB(ResCSPB): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2) # hidden channels - self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - -class ResXCSPC(ResCSPC): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - -class GhostCSPA(BottleneckCSPA): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[Ghost(c_, c_) for _ in range(n)]) - - -class GhostCSPB(BottleneckCSPB): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2) # hidden channels - self.m = nn.Sequential(*[Ghost(c_, c_) for _ in range(n)]) - - -class GhostCSPC(BottleneckCSPC): - # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[Ghost(c_, c_) for _ in range(n)]) - -##### end of cspnet ##### - - -##### yolor ##### - -class ImplicitA(nn.Module): - def __init__(self, channel, mean=0., std=.02): - super(ImplicitA, self).__init__() - self.channel = channel - self.mean = mean - self.std = std - self.implicit = nn.Parameter(torch.zeros(1, channel, 1, 1)) - nn.init.normal_(self.implicit, mean=self.mean, std=self.std) - - def forward(self, x): - return self.implicit + x - - -class ImplicitM(nn.Module): - def __init__(self, channel, mean=0., std=.02): - super(ImplicitM, self).__init__() - self.channel = channel - self.mean = mean - self.std = std - self.implicit = nn.Parameter(torch.ones(1, channel, 1, 1)) - nn.init.normal_(self.implicit, mean=self.mean, std=self.std) - - def forward(self, x): - return self.implicit * x - -##### end of yolor ##### - - -##### repvgg ##### - -class RepConv(nn.Module): - # Represented convolution - # https://arxiv.org/abs/2101.03697 - - def __init__(self, c1, c2, k=3, s=1, p=None, g=1, act=True, deploy=False): - super(RepConv, self).__init__() - - self.deploy = deploy - self.groups = g - self.in_channels = c1 - self.out_channels = c2 - - assert k == 3 - assert autopad(k, p) == 1 - - padding_11 = autopad(k, p) - k // 2 - - self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) - - if deploy: - self.rbr_reparam = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=True) - - else: - self.rbr_identity = (nn.BatchNorm2d(num_features=c1) if c2 == c1 and s == 1 else None) - - self.rbr_dense = nn.Sequential( - nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False), - nn.BatchNorm2d(num_features=c2), - ) - - self.rbr_1x1 = nn.Sequential( - nn.Conv2d( c1, c2, 1, s, padding_11, groups=g, bias=False), - nn.BatchNorm2d(num_features=c2), - ) - - def forward(self, inputs): - if hasattr(self, "rbr_reparam"): - return self.act(self.rbr_reparam(inputs)) - - if self.rbr_identity is None: - id_out = 0 - else: - id_out = self.rbr_identity(inputs) - - return self.act(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out) - - def get_equivalent_kernel_bias(self): - kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense) - kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1) - kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity) - return ( - kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, - bias3x3 + bias1x1 + biasid, - ) - - def _pad_1x1_to_3x3_tensor(self, kernel1x1): - if kernel1x1 is None: - return 0 - else: - return nn.functional.pad(kernel1x1, [1, 1, 1, 1]) - - def _fuse_bn_tensor(self, branch): - if branch is None: - return 0, 0 - if isinstance(branch, nn.Sequential): - kernel = branch[0].weight - running_mean = branch[1].running_mean - running_var = branch[1].running_var - gamma = branch[1].weight - beta = branch[1].bias - eps = branch[1].eps - else: - assert isinstance(branch, nn.BatchNorm2d) - if not hasattr(self, "id_tensor"): - input_dim = self.in_channels // self.groups - kernel_value = np.zeros( - (self.in_channels, input_dim, 3, 3), dtype=np.float32 - ) - for i in range(self.in_channels): - kernel_value[i, i % input_dim, 1, 1] = 1 - self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device) - kernel = self.id_tensor - running_mean = branch.running_mean - running_var = branch.running_var - gamma = branch.weight - beta = branch.bias - eps = branch.eps - std = (running_var + eps).sqrt() - t = (gamma / std).reshape(-1, 1, 1, 1) - return kernel * t, beta - running_mean * gamma / std - - def repvgg_convert(self): - kernel, bias = self.get_equivalent_kernel_bias() - return ( - kernel.detach().cpu().numpy(), - bias.detach().cpu().numpy(), - ) - - def fuse_conv_bn(self, conv, bn): - - std = (bn.running_var + bn.eps).sqrt() - bias = bn.bias - bn.running_mean * bn.weight / std - - t = (bn.weight / std).reshape(-1, 1, 1, 1) - weights = conv.weight * t - - bn = nn.Identity() - conv = nn.Conv2d(in_channels = conv.in_channels, - out_channels = conv.out_channels, - kernel_size = conv.kernel_size, - stride=conv.stride, - padding = conv.padding, - dilation = conv.dilation, - groups = conv.groups, - bias = True, - padding_mode = conv.padding_mode) - - conv.weight = torch.nn.Parameter(weights) - conv.bias = torch.nn.Parameter(bias) - return conv - - def fuse_repvgg_block(self): - if self.deploy: - return - print(f"RepConv.fuse_repvgg_block") - - self.rbr_dense = self.fuse_conv_bn(self.rbr_dense[0], self.rbr_dense[1]) - - self.rbr_1x1 = self.fuse_conv_bn(self.rbr_1x1[0], self.rbr_1x1[1]) - rbr_1x1_bias = self.rbr_1x1.bias - weight_1x1_expanded = torch.nn.functional.pad(self.rbr_1x1.weight, [1, 1, 1, 1]) - - # Fuse self.rbr_identity - if (isinstance(self.rbr_identity, nn.BatchNorm2d) or isinstance(self.rbr_identity, nn.modules.batchnorm.SyncBatchNorm)): - # print(f"fuse: rbr_identity == BatchNorm2d or SyncBatchNorm") - identity_conv_1x1 = nn.Conv2d( - in_channels=self.in_channels, - out_channels=self.out_channels, - kernel_size=1, - stride=1, - padding=0, - groups=self.groups, - bias=False) - identity_conv_1x1.weight.data = identity_conv_1x1.weight.data.to(self.rbr_1x1.weight.data.device) - identity_conv_1x1.weight.data = identity_conv_1x1.weight.data.squeeze().squeeze() - # print(f" identity_conv_1x1.weight = {identity_conv_1x1.weight.shape}") - identity_conv_1x1.weight.data.fill_(0.0) - identity_conv_1x1.weight.data.fill_diagonal_(1.0) - identity_conv_1x1.weight.data = identity_conv_1x1.weight.data.unsqueeze(2).unsqueeze(3) - # print(f" identity_conv_1x1.weight = {identity_conv_1x1.weight.shape}") - - identity_conv_1x1 = self.fuse_conv_bn(identity_conv_1x1, self.rbr_identity) - bias_identity_expanded = identity_conv_1x1.bias - weight_identity_expanded = torch.nn.functional.pad(identity_conv_1x1.weight, [1, 1, 1, 1]) - else: - # print(f"fuse: rbr_identity != BatchNorm2d, rbr_identity = {self.rbr_identity}") - bias_identity_expanded = torch.nn.Parameter( torch.zeros_like(rbr_1x1_bias) ) - weight_identity_expanded = torch.nn.Parameter( torch.zeros_like(weight_1x1_expanded) ) - - - #print(f"self.rbr_1x1.weight = {self.rbr_1x1.weight.shape}, ") - #print(f"weight_1x1_expanded = {weight_1x1_expanded.shape}, ") - #print(f"self.rbr_dense.weight = {self.rbr_dense.weight.shape}, ") - - self.rbr_dense.weight = torch.nn.Parameter(self.rbr_dense.weight + weight_1x1_expanded + weight_identity_expanded) - self.rbr_dense.bias = torch.nn.Parameter(self.rbr_dense.bias + rbr_1x1_bias + bias_identity_expanded) - - self.rbr_reparam = self.rbr_dense - self.deploy = True - - if self.rbr_identity is not None: - del self.rbr_identity - self.rbr_identity = None - - if self.rbr_1x1 is not None: - del self.rbr_1x1 - self.rbr_1x1 = None - - if self.rbr_dense is not None: - del self.rbr_dense - self.rbr_dense = None - - -class RepBottleneck(Bottleneck): - # Standard bottleneck - def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super().__init__(c1, c2, shortcut=True, g=1, e=0.5) - c_ = int(c2 * e) # hidden channels - self.cv2 = RepConv(c_, c2, 3, 1, g=g) - - -class RepBottleneckCSPA(BottleneckCSPA): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - -class RepBottleneckCSPB(BottleneckCSPB): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2) # hidden channels - self.m = nn.Sequential(*[RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - -class RepBottleneckCSPC(BottleneckCSPC): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - -class RepRes(Res): - # Standard bottleneck - def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super().__init__(c1, c2, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.cv2 = RepConv(c_, c_, 3, 1, g=g) - - -class RepResCSPA(ResCSPA): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[RepRes(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - - -class RepResCSPB(ResCSPB): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2) # hidden channels - self.m = nn.Sequential(*[RepRes(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - - -class RepResCSPC(ResCSPC): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[RepRes(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - - -class RepResX(ResX): - # Standard bottleneck - def __init__(self, c1, c2, shortcut=True, g=32, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super().__init__(c1, c2, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.cv2 = RepConv(c_, c_, 3, 1, g=g) - - -class RepResXCSPA(ResXCSPA): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[RepResX(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - - -class RepResXCSPB(ResXCSPB): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=False, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2) # hidden channels - self.m = nn.Sequential(*[RepResX(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - - -class RepResXCSPC(ResXCSPC): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[RepResX(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) - -##### end of repvgg ##### - - -##### transformer ##### - -class TransformerLayer(nn.Module): - # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) - def __init__(self, c, num_heads): - super().__init__() - self.q = nn.Linear(c, c, bias=False) - self.k = nn.Linear(c, c, bias=False) - self.v = nn.Linear(c, c, bias=False) - self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) - self.fc1 = nn.Linear(c, c, bias=False) - self.fc2 = nn.Linear(c, c, bias=False) - - def forward(self, x): - x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x - x = self.fc2(self.fc1(x)) + x - return x - - -class TransformerBlock(nn.Module): - # Vision Transformer https://arxiv.org/abs/2010.11929 - def __init__(self, c1, c2, num_heads, num_layers): - super().__init__() - self.conv = None - if c1 != c2: - self.conv = Conv(c1, c2) - self.linear = nn.Linear(c2, c2) # learnable position embedding - self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)]) - self.c2 = c2 - - def forward(self, x): - if self.conv is not None: - x = self.conv(x) - b, _, w, h = x.shape - p = x.flatten(2) - p = p.unsqueeze(0) - p = p.transpose(0, 3) - p = p.squeeze(3) - e = self.linear(p) - x = p + e - - x = self.tr(x) - x = x.unsqueeze(3) - x = x.transpose(0, 3) - x = x.reshape(b, self.c2, w, h) - return x - -##### end of transformer ##### - - -##### yolov5 ##### - -class Focus(nn.Module): - # Focus wh information into c-space - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super(Focus, self).__init__() - self.conv = Conv(c1 * 4, c2, k, s, p, g, act) - # self.contract = Contract(gain=2) - - def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) - return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) - # return self.conv(self.contract(x)) - - -class SPPF(nn.Module): - # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher - def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) - super().__init__() - c_ = c1 // 2 # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_ * 4, c2, 1, 1) - self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) - - def forward(self, x): - x = self.cv1(x) - y1 = self.m(x) - y2 = self.m(y1) - return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1)) - - -class Contract(nn.Module): - # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) - def __init__(self, gain=2): - super().__init__() - self.gain = gain - - def forward(self, x): - N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain' - s = self.gain - x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2) - x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) - return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40) - - -class Expand(nn.Module): - # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) - def __init__(self, gain=2): - super().__init__() - self.gain = gain - - def forward(self, x): - N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' - s = self.gain - x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80) - x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) - return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160) - - -class NMS(nn.Module): - # Non-Maximum Suppression (NMS) module - conf = 0.25 # confidence threshold - iou = 0.45 # IoU threshold - classes = None # (optional list) filter by class - - def __init__(self): - super(NMS, self).__init__() - - def forward(self, x): - return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) - - -class autoShape(nn.Module): - # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS - conf = 0.25 # NMS confidence threshold - iou = 0.45 # NMS IoU threshold - classes = None # (optional list) filter by class - - def __init__(self, model): - super(autoShape, self).__init__() - self.model = model.eval() - - def autoshape(self): - print('autoShape already enabled, skipping... ') # model already converted to model.autoshape() - return self - - @torch.no_grad() - def forward(self, imgs, size=640, augment=False, profile=False): - # Inference from various sources. For height=640, width=1280, RGB images example inputs are: - # filename: imgs = 'data/samples/zidane.jpg' - # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg' - # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) - # PIL: = Image.open('image.jpg') # HWC x(640,1280,3) - # numpy: = np.zeros((640,1280,3)) # HWC - # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) - # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images - - t = [time_synchronized()] - p = next(self.model.parameters()) # for device and type - if isinstance(imgs, torch.Tensor): # torch - with amp.autocast(enabled=p.device.type != 'cpu'): - return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference - - # Pre-process - n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images - shape0, shape1, files = [], [], [] # image and inference shapes, filenames - for i, im in enumerate(imgs): - f = f'image{i}' # filename - if isinstance(im, str): # filename or uri - im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im - elif isinstance(im, Image.Image): # PIL Image - im, f = np.asarray(im), getattr(im, 'filename', f) or f - files.append(Path(f).with_suffix('.jpg').name) - if im.shape[0] < 5: # image in CHW - im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) - im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input - s = im.shape[:2] # HWC - shape0.append(s) # image shape - g = (size / max(s)) # gain - shape1.append([y * g for y in s]) - imgs[i] = im # update - shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape - x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad - x = np.stack(x, 0) if n > 1 else x[0][None] # stack - x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW - x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 - t.append(time_synchronized()) - - with amp.autocast(enabled=p.device.type != 'cpu'): - # Inference - y = self.model(x, augment, profile)[0] # forward - t.append(time_synchronized()) - - # Post-process - y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS - for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) - - t.append(time_synchronized()) - return Detections(imgs, y, files, t, self.names, x.shape) - - -class Detections: - # detections class for YOLOv5 inference results - def __init__(self, imgs, pred, files, times=None, names=None, shape=None): - super(Detections, self).__init__() - d = pred[0].device # device - gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations - self.imgs = imgs # list of images as numpy arrays - self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) - self.names = names # class names - self.files = files # image filenames - self.xyxy = pred # xyxy pixels - self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels - self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized - self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized - self.n = len(self.pred) # number of images (batch size) - self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) - self.s = shape # inference BCHW shape - - def display(self, pprint=False, show=False, save=False, render=False, save_dir=''): - colors = color_list() - for i, (img, pred) in enumerate(zip(self.imgs, self.pred)): - str = f'image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} ' - if pred is not None: - for c in pred[:, -1].unique(): - n = (pred[:, -1] == c).sum() # detections per class - str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string - if show or save or render: - for *box, conf, cls in pred: # xyxy, confidence, class - label = f'{self.names[int(cls)]} {conf:.2f}' - plot_one_box(box, img, label=label, color=colors[int(cls) % 10]) - img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np - if pprint: - print(str.rstrip(', ')) - if show: - img.show(self.files[i]) # show - if save: - f = self.files[i] - img.save(Path(save_dir) / f) # save - print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n') - if render: - self.imgs[i] = np.asarray(img) - - def print(self): - self.display(pprint=True) # print results - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) - - def show(self): - self.display(show=True) # show results - - def save(self, save_dir='runs/hub/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp') # increment save_dir - Path(save_dir).mkdir(parents=True, exist_ok=True) - self.display(save=True, save_dir=save_dir) # save results - - def render(self): - self.display(render=True) # render results - return self.imgs - - def pandas(self): - # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) - new = copy(self) # return copy - ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns - cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns - for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): - a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update - setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) - return new - - def tolist(self): - # return a list of Detections objects, i.e. 'for result in results.tolist():' - x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)] - for d in x: - for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: - setattr(d, k, getattr(d, k)[0]) # pop out of list - return x - - def __len__(self): - return self.n - - -class Classify(nn.Module): - # Classification head, i.e. x(b,c1,20,20) to x(b,c2) - def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups - super(Classify, self).__init__() - self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) - self.flat = nn.Flatten() - - def forward(self, x): - z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list - return self.flat(self.conv(z)) # flatten to x(b,c2) - -##### end of yolov5 ###### - - -##### orepa ##### - -def transI_fusebn(kernel, bn): - gamma = bn.weight - std = (bn.running_var + bn.eps).sqrt() - return kernel * ((gamma / std).reshape(-1, 1, 1, 1)), bn.bias - bn.running_mean * gamma / std - - -class ConvBN(nn.Module): - def __init__(self, in_channels, out_channels, kernel_size, - stride=1, padding=0, dilation=1, groups=1, deploy=False, nonlinear=None): - super().__init__() - if nonlinear is None: - self.nonlinear = nn.Identity() - else: - self.nonlinear = nonlinear - if deploy: - self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, - stride=stride, padding=padding, dilation=dilation, groups=groups, bias=True) - else: - self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, - stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False) - self.bn = nn.BatchNorm2d(num_features=out_channels) - - def forward(self, x): - if hasattr(self, 'bn'): - return self.nonlinear(self.bn(self.conv(x))) - else: - return self.nonlinear(self.conv(x)) - - def switch_to_deploy(self): - kernel, bias = transI_fusebn(self.conv.weight, self.bn) - conv = nn.Conv2d(in_channels=self.conv.in_channels, out_channels=self.conv.out_channels, kernel_size=self.conv.kernel_size, - stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups, bias=True) - conv.weight.data = kernel - conv.bias.data = bias - for para in self.parameters(): - para.detach_() - self.__delattr__('conv') - self.__delattr__('bn') - self.conv = conv - -class OREPA_3x3_RepConv(nn.Module): - - def __init__(self, in_channels, out_channels, kernel_size, - stride=1, padding=0, dilation=1, groups=1, - internal_channels_1x1_3x3=None, - deploy=False, nonlinear=None, single_init=False): - super(OREPA_3x3_RepConv, self).__init__() - self.deploy = deploy - - if nonlinear is None: - self.nonlinear = nn.Identity() - else: - self.nonlinear = nonlinear - - self.kernel_size = kernel_size - self.in_channels = in_channels - self.out_channels = out_channels - self.groups = groups - assert padding == kernel_size // 2 - - self.stride = stride - self.padding = padding - self.dilation = dilation - - self.branch_counter = 0 - - self.weight_rbr_origin = nn.Parameter(torch.Tensor(out_channels, int(in_channels/self.groups), kernel_size, kernel_size)) - nn.init.kaiming_uniform_(self.weight_rbr_origin, a=math.sqrt(1.0)) - self.branch_counter += 1 - - - if groups < out_channels: - self.weight_rbr_avg_conv = nn.Parameter(torch.Tensor(out_channels, int(in_channels/self.groups), 1, 1)) - self.weight_rbr_pfir_conv = nn.Parameter(torch.Tensor(out_channels, int(in_channels/self.groups), 1, 1)) - nn.init.kaiming_uniform_(self.weight_rbr_avg_conv, a=1.0) - nn.init.kaiming_uniform_(self.weight_rbr_pfir_conv, a=1.0) - self.weight_rbr_avg_conv.data - self.weight_rbr_pfir_conv.data - self.register_buffer('weight_rbr_avg_avg', torch.ones(kernel_size, kernel_size).mul(1.0/kernel_size/kernel_size)) - self.branch_counter += 1 - - else: - raise NotImplementedError - self.branch_counter += 1 - - if internal_channels_1x1_3x3 is None: - internal_channels_1x1_3x3 = in_channels if groups < out_channels else 2 * in_channels # For mobilenet, it is better to have 2X internal channels - - if internal_channels_1x1_3x3 == in_channels: - self.weight_rbr_1x1_kxk_idconv1 = nn.Parameter(torch.zeros(in_channels, int(in_channels/self.groups), 1, 1)) - id_value = np.zeros((in_channels, int(in_channels/self.groups), 1, 1)) - for i in range(in_channels): - id_value[i, i % int(in_channels/self.groups), 0, 0] = 1 - id_tensor = torch.from_numpy(id_value).type_as(self.weight_rbr_1x1_kxk_idconv1) - self.register_buffer('id_tensor', id_tensor) - - else: - self.weight_rbr_1x1_kxk_conv1 = nn.Parameter(torch.Tensor(internal_channels_1x1_3x3, int(in_channels/self.groups), 1, 1)) - nn.init.kaiming_uniform_(self.weight_rbr_1x1_kxk_conv1, a=math.sqrt(1.0)) - self.weight_rbr_1x1_kxk_conv2 = nn.Parameter(torch.Tensor(out_channels, int(internal_channels_1x1_3x3/self.groups), kernel_size, kernel_size)) - nn.init.kaiming_uniform_(self.weight_rbr_1x1_kxk_conv2, a=math.sqrt(1.0)) - self.branch_counter += 1 - - expand_ratio = 8 - self.weight_rbr_gconv_dw = nn.Parameter(torch.Tensor(in_channels*expand_ratio, 1, kernel_size, kernel_size)) - self.weight_rbr_gconv_pw = nn.Parameter(torch.Tensor(out_channels, in_channels*expand_ratio, 1, 1)) - nn.init.kaiming_uniform_(self.weight_rbr_gconv_dw, a=math.sqrt(1.0)) - nn.init.kaiming_uniform_(self.weight_rbr_gconv_pw, a=math.sqrt(1.0)) - self.branch_counter += 1 - - if out_channels == in_channels and stride == 1: - self.branch_counter += 1 - - self.vector = nn.Parameter(torch.Tensor(self.branch_counter, self.out_channels)) - self.bn = nn.BatchNorm2d(out_channels) - - self.fre_init() - - nn.init.constant_(self.vector[0, :], 0.25) #origin - nn.init.constant_(self.vector[1, :], 0.25) #avg - nn.init.constant_(self.vector[2, :], 0.0) #prior - nn.init.constant_(self.vector[3, :], 0.5) #1x1_kxk - nn.init.constant_(self.vector[4, :], 0.5) #dws_conv - - - def fre_init(self): - prior_tensor = torch.Tensor(self.out_channels, self.kernel_size, self.kernel_size) - half_fg = self.out_channels/2 - for i in range(self.out_channels): - for h in range(3): - for w in range(3): - if i < half_fg: - prior_tensor[i, h, w] = math.cos(math.pi*(h+0.5)*(i+1)/3) - else: - prior_tensor[i, h, w] = math.cos(math.pi*(w+0.5)*(i+1-half_fg)/3) - - self.register_buffer('weight_rbr_prior', prior_tensor) - - def weight_gen(self): - - weight_rbr_origin = torch.einsum('oihw,o->oihw', self.weight_rbr_origin, self.vector[0, :]) - - weight_rbr_avg = torch.einsum('oihw,o->oihw', torch.einsum('oihw,hw->oihw', self.weight_rbr_avg_conv, self.weight_rbr_avg_avg), self.vector[1, :]) - - weight_rbr_pfir = torch.einsum('oihw,o->oihw', torch.einsum('oihw,ohw->oihw', self.weight_rbr_pfir_conv, self.weight_rbr_prior), self.vector[2, :]) - - weight_rbr_1x1_kxk_conv1 = None - if hasattr(self, 'weight_rbr_1x1_kxk_idconv1'): - weight_rbr_1x1_kxk_conv1 = (self.weight_rbr_1x1_kxk_idconv1 + self.id_tensor).squeeze() - elif hasattr(self, 'weight_rbr_1x1_kxk_conv1'): - weight_rbr_1x1_kxk_conv1 = self.weight_rbr_1x1_kxk_conv1.squeeze() - else: - raise NotImplementedError - weight_rbr_1x1_kxk_conv2 = self.weight_rbr_1x1_kxk_conv2 - - if self.groups > 1: - g = self.groups - t, ig = weight_rbr_1x1_kxk_conv1.size() - o, tg, h, w = weight_rbr_1x1_kxk_conv2.size() - weight_rbr_1x1_kxk_conv1 = weight_rbr_1x1_kxk_conv1.view(g, int(t/g), ig) - weight_rbr_1x1_kxk_conv2 = weight_rbr_1x1_kxk_conv2.view(g, int(o/g), tg, h, w) - weight_rbr_1x1_kxk = torch.einsum('gti,gothw->goihw', weight_rbr_1x1_kxk_conv1, weight_rbr_1x1_kxk_conv2).view(o, ig, h, w) - else: - weight_rbr_1x1_kxk = torch.einsum('ti,othw->oihw', weight_rbr_1x1_kxk_conv1, weight_rbr_1x1_kxk_conv2) - - weight_rbr_1x1_kxk = torch.einsum('oihw,o->oihw', weight_rbr_1x1_kxk, self.vector[3, :]) - - weight_rbr_gconv = self.dwsc2full(self.weight_rbr_gconv_dw, self.weight_rbr_gconv_pw, self.in_channels) - weight_rbr_gconv = torch.einsum('oihw,o->oihw', weight_rbr_gconv, self.vector[4, :]) - - weight = weight_rbr_origin + weight_rbr_avg + weight_rbr_1x1_kxk + weight_rbr_pfir + weight_rbr_gconv - - return weight - - def dwsc2full(self, weight_dw, weight_pw, groups): - - t, ig, h, w = weight_dw.size() - o, _, _, _ = weight_pw.size() - tg = int(t/groups) - i = int(ig*groups) - weight_dw = weight_dw.view(groups, tg, ig, h, w) - weight_pw = weight_pw.squeeze().view(o, groups, tg) - - weight_dsc = torch.einsum('gtihw,ogt->ogihw', weight_dw, weight_pw) - return weight_dsc.view(o, i, h, w) - - def forward(self, inputs): - weight = self.weight_gen() - out = F.conv2d(inputs, weight, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) - - return self.nonlinear(self.bn(out)) - -class RepConv_OREPA(nn.Module): - - def __init__(self, c1, c2, k=3, s=1, padding=1, dilation=1, groups=1, padding_mode='zeros', deploy=False, use_se=False, nonlinear=nn.SiLU()): - super(RepConv_OREPA, self).__init__() - self.deploy = deploy - self.groups = groups - self.in_channels = c1 - self.out_channels = c2 - - self.padding = padding - self.dilation = dilation - self.groups = groups - - assert k == 3 - assert padding == 1 - - padding_11 = padding - k // 2 - - if nonlinear is None: - self.nonlinearity = nn.Identity() - else: - self.nonlinearity = nonlinear - - if use_se: - self.se = SEBlock(self.out_channels, internal_neurons=self.out_channels // 16) - else: - self.se = nn.Identity() - - if deploy: - self.rbr_reparam = nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=k, stride=s, - padding=padding, dilation=dilation, groups=groups, bias=True, padding_mode=padding_mode) - - else: - self.rbr_identity = nn.BatchNorm2d(num_features=self.in_channels) if self.out_channels == self.in_channels and s == 1 else None - self.rbr_dense = OREPA_3x3_RepConv(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=k, stride=s, padding=padding, groups=groups, dilation=1) - self.rbr_1x1 = ConvBN(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=1, stride=s, padding=padding_11, groups=groups, dilation=1) - print('RepVGG Block, identity = ', self.rbr_identity) - - - def forward(self, inputs): - if hasattr(self, 'rbr_reparam'): - return self.nonlinearity(self.se(self.rbr_reparam(inputs))) - - if self.rbr_identity is None: - id_out = 0 - else: - id_out = self.rbr_identity(inputs) - - out1 = self.rbr_dense(inputs) - out2 = self.rbr_1x1(inputs) - out3 = id_out - out = out1 + out2 + out3 - - return self.nonlinearity(self.se(out)) - - - # Optional. This improves the accuracy and facilitates quantization. - # 1. Cancel the original weight decay on rbr_dense.conv.weight and rbr_1x1.conv.weight. - # 2. Use like this. - # loss = criterion(....) - # for every RepVGGBlock blk: - # loss += weight_decay_coefficient * 0.5 * blk.get_cust_L2() - # optimizer.zero_grad() - # loss.backward() - - # Not used for OREPA - def get_custom_L2(self): - K3 = self.rbr_dense.weight_gen() - K1 = self.rbr_1x1.conv.weight - t3 = (self.rbr_dense.bn.weight / ((self.rbr_dense.bn.running_var + self.rbr_dense.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach() - t1 = (self.rbr_1x1.bn.weight / ((self.rbr_1x1.bn.running_var + self.rbr_1x1.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach() - - l2_loss_circle = (K3 ** 2).sum() - (K3[:, :, 1:2, 1:2] ** 2).sum() # The L2 loss of the "circle" of weights in 3x3 kernel. Use regular L2 on them. - eq_kernel = K3[:, :, 1:2, 1:2] * t3 + K1 * t1 # The equivalent resultant central point of 3x3 kernel. - l2_loss_eq_kernel = (eq_kernel ** 2 / (t3 ** 2 + t1 ** 2)).sum() # Normalize for an L2 coefficient comparable to regular L2. - return l2_loss_eq_kernel + l2_loss_circle - - def get_equivalent_kernel_bias(self): - kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense) - kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1) - kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity) - return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid - - def _pad_1x1_to_3x3_tensor(self, kernel1x1): - if kernel1x1 is None: - return 0 - else: - return torch.nn.functional.pad(kernel1x1, [1,1,1,1]) - - def _fuse_bn_tensor(self, branch): - if branch is None: - return 0, 0 - if not isinstance(branch, nn.BatchNorm2d): - if isinstance(branch, OREPA_3x3_RepConv): - kernel = branch.weight_gen() - elif isinstance(branch, ConvBN): - kernel = branch.conv.weight - else: - raise NotImplementedError - running_mean = branch.bn.running_mean - running_var = branch.bn.running_var - gamma = branch.bn.weight - beta = branch.bn.bias - eps = branch.bn.eps - else: - if not hasattr(self, 'id_tensor'): - input_dim = self.in_channels // self.groups - kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), dtype=np.float32) - for i in range(self.in_channels): - kernel_value[i, i % input_dim, 1, 1] = 1 - self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device) - kernel = self.id_tensor - running_mean = branch.running_mean - running_var = branch.running_var - gamma = branch.weight - beta = branch.bias - eps = branch.eps - std = (running_var + eps).sqrt() - t = (gamma / std).reshape(-1, 1, 1, 1) - return kernel * t, beta - running_mean * gamma / std - - def switch_to_deploy(self): - if hasattr(self, 'rbr_reparam'): - return - print(f"RepConv_OREPA.switch_to_deploy") - kernel, bias = self.get_equivalent_kernel_bias() - self.rbr_reparam = nn.Conv2d(in_channels=self.rbr_dense.in_channels, out_channels=self.rbr_dense.out_channels, - kernel_size=self.rbr_dense.kernel_size, stride=self.rbr_dense.stride, - padding=self.rbr_dense.padding, dilation=self.rbr_dense.dilation, groups=self.rbr_dense.groups, bias=True) - self.rbr_reparam.weight.data = kernel - self.rbr_reparam.bias.data = bias - for para in self.parameters(): - para.detach_() - self.__delattr__('rbr_dense') - self.__delattr__('rbr_1x1') - if hasattr(self, 'rbr_identity'): - self.__delattr__('rbr_identity') - -##### end of orepa ##### - - -##### swin transformer ##### - -class WindowAttention(nn.Module): - - def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): - - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim ** -0.5 - - # define a parameter table of relative position bias - self.relative_position_bias_table = nn.Parameter( - torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(self.window_size[0]) - coords_w = torch.arange(self.window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - self.register_buffer("relative_position_index", relative_position_index) - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - nn.init.normal_(self.relative_position_bias_table, std=.02) - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x, mask=None): - - B_, N, C = x.shape - qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - q = q * self.scale - attn = (q @ k.transpose(-2, -1)) - - relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( - self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - else: - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - # print(attn.dtype, v.dtype) - try: - x = (attn @ v).transpose(1, 2).reshape(B_, N, C) - except: - #print(attn.dtype, v.dtype) - x = (attn.half() @ v).transpose(1, 2).reshape(B_, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - -class Mlp(nn.Module): - - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - -def window_partition(x, window_size): - - B, H, W, C = x.shape - assert H % window_size == 0, 'feature map h and w can not divide by window size' - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - -def window_reverse(windows, window_size, H, W): - - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class SwinTransformerLayer(nn.Module): - - def __init__(self, dim, num_heads, window_size=8, shift_size=0, - mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., - act_layer=nn.SiLU, norm_layer=nn.LayerNorm): - super().__init__() - self.dim = dim - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - # if min(self.input_resolution) <= self.window_size: - # # if window size is larger than input resolution, we don't partition windows - # self.shift_size = 0 - # self.window_size = min(self.input_resolution) - assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" - - self.norm1 = norm_layer(dim) - self.attn = WindowAttention( - dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, - qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) - - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def create_mask(self, H, W): - # calculate attention mask for SW-MSA - img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 - h_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - w_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) - - return attn_mask - - def forward(self, x): - # reshape x[b c h w] to x[b l c] - _, _, H_, W_ = x.shape - - Padding = False - if min(H_, W_) < self.window_size or H_ % self.window_size!=0 or W_ % self.window_size!=0: - Padding = True - # print(f'img_size {min(H_, W_)} is less than (or not divided by) window_size {self.window_size}, Padding.') - pad_r = (self.window_size - W_ % self.window_size) % self.window_size - pad_b = (self.window_size - H_ % self.window_size) % self.window_size - x = F.pad(x, (0, pad_r, 0, pad_b)) - - # print('2', x.shape) - B, C, H, W = x.shape - L = H * W - x = x.permute(0, 2, 3, 1).contiguous().view(B, L, C) # b, L, c - - # create mask from init to forward - if self.shift_size > 0: - attn_mask = self.create_mask(H, W).to(x.device) - else: - attn_mask = None - - shortcut = x - x = self.norm1(x) - x = x.view(B, H, W, C) - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - else: - shifted_x = x - - # partition windows - x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C - x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C - - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - else: - x = shifted_x - x = x.view(B, H * W, C) - - # FFN - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - - x = x.permute(0, 2, 1).contiguous().view(-1, C, H, W) # b c h w - - if Padding: - x = x[:, :, :H_, :W_] # reverse padding - - return x - - -class SwinTransformerBlock(nn.Module): - def __init__(self, c1, c2, num_heads, num_layers, window_size=8): - super().__init__() - self.conv = None - if c1 != c2: - self.conv = Conv(c1, c2) - - # remove input_resolution - self.blocks = nn.Sequential(*[SwinTransformerLayer(dim=c2, num_heads=num_heads, window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2) for i in range(num_layers)]) - - def forward(self, x): - if self.conv is not None: - x = self.conv(x) - x = self.blocks(x) - return x - - -class STCSPA(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(STCSPA, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1, 1) - num_heads = c_ // 32 - self.m = SwinTransformerBlock(c_, c_, num_heads, n) - #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - y1 = self.m(self.cv1(x)) - y2 = self.cv2(x) - return self.cv3(torch.cat((y1, y2), dim=1)) - - -class STCSPB(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(STCSPB, self).__init__() - c_ = int(c2) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1, 1) - num_heads = c_ // 32 - self.m = SwinTransformerBlock(c_, c_, num_heads, n) - #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - x1 = self.cv1(x) - y1 = self.m(x1) - y2 = self.cv2(x1) - return self.cv3(torch.cat((y1, y2), dim=1)) - - -class STCSPC(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(STCSPC, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(c_, c_, 1, 1) - self.cv4 = Conv(2 * c_, c2, 1, 1) - num_heads = c_ // 32 - self.m = SwinTransformerBlock(c_, c_, num_heads, n) - #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - y1 = self.cv3(self.m(self.cv1(x))) - y2 = self.cv2(x) - return self.cv4(torch.cat((y1, y2), dim=1)) - -##### end of swin transformer ##### - - -##### swin transformer v2 ##### - -class WindowAttention_v2(nn.Module): - - def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0., - pretrained_window_size=[0, 0]): - - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.pretrained_window_size = pretrained_window_size - self.num_heads = num_heads - - self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True) - - # mlp to generate continuous relative position bias - self.cpb_mlp = nn.Sequential(nn.Linear(2, 512, bias=True), - nn.ReLU(inplace=True), - nn.Linear(512, num_heads, bias=False)) - - # get relative_coords_table - relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32) - relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32) - relative_coords_table = torch.stack( - torch.meshgrid([relative_coords_h, - relative_coords_w])).permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2 - if pretrained_window_size[0] > 0: - relative_coords_table[:, :, :, 0] /= (pretrained_window_size[0] - 1) - relative_coords_table[:, :, :, 1] /= (pretrained_window_size[1] - 1) - else: - relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1) - relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1) - relative_coords_table *= 8 # normalize to -8, 8 - relative_coords_table = torch.sign(relative_coords_table) * torch.log2( - torch.abs(relative_coords_table) + 1.0) / np.log2(8) - - self.register_buffer("relative_coords_table", relative_coords_table) - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(self.window_size[0]) - coords_w = torch.arange(self.window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - self.register_buffer("relative_position_index", relative_position_index) - - self.qkv = nn.Linear(dim, dim * 3, bias=False) - if qkv_bias: - self.q_bias = nn.Parameter(torch.zeros(dim)) - self.v_bias = nn.Parameter(torch.zeros(dim)) - else: - self.q_bias = None - self.v_bias = None - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x, mask=None): - - B_, N, C = x.shape - qkv_bias = None - if self.q_bias is not None: - qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) - qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) - qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - # cosine attention - attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) - logit_scale = torch.clamp(self.logit_scale, max=torch.log(torch.tensor(1. / 0.01))).exp() - attn = attn * logit_scale - - relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads) - relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view( - self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - relative_position_bias = 16 * torch.sigmoid(relative_position_bias) - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - else: - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - try: - x = (attn @ v).transpose(1, 2).reshape(B_, N, C) - except: - x = (attn.half() @ v).transpose(1, 2).reshape(B_, N, C) - - x = self.proj(x) - x = self.proj_drop(x) - return x - - def extra_repr(self) -> str: - return f'dim={self.dim}, window_size={self.window_size}, ' \ - f'pretrained_window_size={self.pretrained_window_size}, num_heads={self.num_heads}' - - def flops(self, N): - # calculate flops for 1 window with token length of N - flops = 0 - # qkv = self.qkv(x) - flops += N * self.dim * 3 * self.dim - # attn = (q @ k.transpose(-2, -1)) - flops += self.num_heads * N * (self.dim // self.num_heads) * N - # x = (attn @ v) - flops += self.num_heads * N * N * (self.dim // self.num_heads) - # x = self.proj(x) - flops += N * self.dim * self.dim - return flops - -class Mlp_v2(nn.Module): - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -def window_partition_v2(x, window_size): - - B, H, W, C = x.shape - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - - -def window_reverse_v2(windows, window_size, H, W): - - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class SwinTransformerLayer_v2(nn.Module): - - def __init__(self, dim, num_heads, window_size=7, shift_size=0, - mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., - act_layer=nn.SiLU, norm_layer=nn.LayerNorm, pretrained_window_size=0): - super().__init__() - self.dim = dim - #self.input_resolution = input_resolution - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - #if min(self.input_resolution) <= self.window_size: - # # if window size is larger than input resolution, we don't partition windows - # self.shift_size = 0 - # self.window_size = min(self.input_resolution) - assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" - - self.norm1 = norm_layer(dim) - self.attn = WindowAttention_v2( - dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, - qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, - pretrained_window_size=(pretrained_window_size, pretrained_window_size)) - - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp_v2(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def create_mask(self, H, W): - # calculate attention mask for SW-MSA - img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 - h_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - w_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) - - return attn_mask - - def forward(self, x): - # reshape x[b c h w] to x[b l c] - _, _, H_, W_ = x.shape - - Padding = False - if min(H_, W_) < self.window_size or H_ % self.window_size!=0 or W_ % self.window_size!=0: - Padding = True - # print(f'img_size {min(H_, W_)} is less than (or not divided by) window_size {self.window_size}, Padding.') - pad_r = (self.window_size - W_ % self.window_size) % self.window_size - pad_b = (self.window_size - H_ % self.window_size) % self.window_size - x = F.pad(x, (0, pad_r, 0, pad_b)) - - # print('2', x.shape) - B, C, H, W = x.shape - L = H * W - x = x.permute(0, 2, 3, 1).contiguous().view(B, L, C) # b, L, c - - # create mask from init to forward - if self.shift_size > 0: - attn_mask = self.create_mask(H, W).to(x.device) - else: - attn_mask = None - - shortcut = x - x = x.view(B, H, W, C) - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - else: - shifted_x = x - - # partition windows - x_windows = window_partition_v2(shifted_x, self.window_size) # nW*B, window_size, window_size, C - x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - shifted_x = window_reverse_v2(attn_windows, self.window_size, H, W) # B H' W' C - - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - else: - x = shifted_x - x = x.view(B, H * W, C) - x = shortcut + self.drop_path(self.norm1(x)) - - # FFN - x = x + self.drop_path(self.norm2(self.mlp(x))) - x = x.permute(0, 2, 1).contiguous().view(-1, C, H, W) # b c h w - - if Padding: - x = x[:, :, :H_, :W_] # reverse padding - - return x - - def extra_repr(self) -> str: - return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ - f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" - - def flops(self): - flops = 0 - H, W = self.input_resolution - # norm1 - flops += self.dim * H * W - # W-MSA/SW-MSA - nW = H * W / self.window_size / self.window_size - flops += nW * self.attn.flops(self.window_size * self.window_size) - # mlp - flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio - # norm2 - flops += self.dim * H * W - return flops - - -class SwinTransformer2Block(nn.Module): - def __init__(self, c1, c2, num_heads, num_layers, window_size=7): - super().__init__() - self.conv = None - if c1 != c2: - self.conv = Conv(c1, c2) - - # remove input_resolution - self.blocks = nn.Sequential(*[SwinTransformerLayer_v2(dim=c2, num_heads=num_heads, window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2) for i in range(num_layers)]) - - def forward(self, x): - if self.conv is not None: - x = self.conv(x) - x = self.blocks(x) - return x - - -class ST2CSPA(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(ST2CSPA, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1, 1) - num_heads = c_ // 32 - self.m = SwinTransformer2Block(c_, c_, num_heads, n) - #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - y1 = self.m(self.cv1(x)) - y2 = self.cv2(x) - return self.cv3(torch.cat((y1, y2), dim=1)) - - -class ST2CSPB(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(ST2CSPB, self).__init__() - c_ = int(c2) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1, 1) - num_heads = c_ // 32 - self.m = SwinTransformer2Block(c_, c_, num_heads, n) - #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - x1 = self.cv1(x) - y1 = self.m(x1) - y2 = self.cv2(x1) - return self.cv3(torch.cat((y1, y2), dim=1)) - - -class ST2CSPC(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(ST2CSPC, self).__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(c_, c_, 1, 1) - self.cv4 = Conv(2 * c_, c2, 1, 1) - num_heads = c_ // 32 - self.m = SwinTransformer2Block(c_, c_, num_heads, n) - #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - - def forward(self, x): - y1 = self.cv3(self.m(self.cv1(x))) - y2 = self.cv2(x) - return self.cv4(torch.cat((y1, y2), dim=1)) - -##### end of swin transformer v2 ##### diff --git a/spaces/JeffJing/ZookChatBot/steamship/data/tags/tag_constants.py b/spaces/JeffJing/ZookChatBot/steamship/data/tags/tag_constants.py deleted file mode 100644 index 6baf3f62d3bdc060bae163bec99459d1ac122f47..0000000000000000000000000000000000000000 --- a/spaces/JeffJing/ZookChatBot/steamship/data/tags/tag_constants.py +++ /dev/null @@ -1,246 +0,0 @@ -from enum import Enum -from typing import Optional - - -class TagKind(str, Enum): - """A set of `kind` constants for Tags. - - These define broad categories of tags. Suggested `name` values for each category are found in - separate enums. For example: kind=TagKind.DOCUMENT, name=DocTag.H1 - """ - - PART_OF_SPEECH = "part-of-speech" - DEPENDENCY = "dependency" - SENTIMENT = "sentiment" - EMOTION = "emotion" - ENTITY = "entity" - DOCUMENT = "document" - TOKEN = "token" # noqa: S105 - INTENT = "intent" - EMBEDDING = "embedding" - GENERATION = "generation" - PROVENANCE = "provenance" - TOPIC = "topic" - TOKENIZATION = "tokenization" - KIND = "summary" - TIMESTAMP = "timestamp" - SUMMARY = "summary" - SEARCH_RESULT = "search-result" - ROLE = "role" - - -class DocTag(str, Enum): - """A set of `name` constants for Tags with a `kind` of `TagKind.doc`; appropriate for HTML and Markdown ideas.""" - - DOCUMENT = "document" - PAGE = "page" # E.g. in a PDF - REGION = "region" # E.g., abstract catchall region in a document - HEADER = "header" - H1 = "h1" - H2 = "h2" - H3 = "h3" - H4 = "h4" - H5 = "h5" - LINE = "line" - TITLE = "title" - SUBTITLE = "subtitle" - FOOTER = "footer" - PARAGRAPH = "paragraph" - ORDERED_LIST = "ordered-list" - UNORDERED_LIST = "unordered-list" - LIST_ITEM = "list-item" - LINK = "link" - CAPTION = "caption" - IMAGE = "image" - BLOCK_QUOTE = "block-quote" - BLOCK_CODE = "block-code" - UNKNOWN = "unknown" - SENTENCE = "sentence" - TOKEN = "token" # noqa: S105 - SPAN = "span" - DIV = "div" - PRE = "pre" - STRONG = "strong" - EMPHASIZED = "emphasized" - UNDERLINED = "underlined" - TELETYPE = "teletype" - ARTICLE = "article" - MAIN = "main" - CHAPTER = "chapter" - TEXT = "text" - - @staticmethod - def from_html_tag(tagname: Optional[str]) -> Optional["DocTag"]: # noqa: C901 - if tagname is None: - return None - - name = tagname.lower().strip() - - if name == "p": - return DocTag.PARAGRAPH - elif name == "h1": - return DocTag.H1 - elif name == "h2": - return DocTag.H2 - elif name == "h3": - return DocTag.H3 - elif name == "h4": - return DocTag.H4 - elif name == "h5": - return DocTag.H5 - elif name == "ul": - return DocTag.UNORDERED_LIST - elif name == "ol": - return DocTag.ORDERED_LIST - elif name == "li": - return DocTag.LIST_ITEM - elif name == "a": - return DocTag.LINK - elif name == "div": - return DocTag.DIV - elif name == "img": - return DocTag.IMAGE - elif name == "span": - return DocTag.SPAN - elif name == "pre": - return DocTag.PRE - elif name == "code": - return DocTag.BLOCK_CODE - elif name == "blockquote": - return DocTag.BLOCK_QUOTE - elif name == "strong": - return DocTag.STRONG - elif name == "b": - return DocTag.STRONG - elif name == "emph": - return DocTag.EMPHASIZED - elif name == "i": - return DocTag.EMPHASIZED - elif name == "u": - return DocTag.UNDERLINED - elif name == "tt": - return DocTag.TELETYPE - elif name == "article": - return DocTag.ARTICLE - elif name == "header": - return DocTag.HEADER - elif name == "footer": - return DocTag.FOOTER - elif name == "main": - return DocTag.MAIN - - return None - - -class TokenTag(str, Enum): - """A set of `name` constants for Tags with a `kind` of `TagKind.token`; appropriate for parsing-level ideas.""" - - TEXT_WITH_WHITESPACE = "text-with-whitespace" - TEXT = "text" - WHITESPACE = "whitespace" - HEAD = "head" - LEFT_EDGE = "left-edge" - RIGHT_EDGE = "right-edge" - ENTITY_TYPE = "entity-type" - ENTITY_IOB = "entity-iob" - LEMMA = "lemma" - NORMALIZED = "normalized" - SHAPE = "shape" - PREFIX = "prefix" - SUFFIX = "suffix" - IS_ALPHA = "is-alpha" - IS_ASCII = "is-ascii" - IS_DIGIT = "is-digit" - IS_TITLE = "is-title" - IS_PUNCT = "is-punct" - IS_LEFT_PUNCT = "is-left-punct" - IS_RIGHT_PUNCT = "is-right-punct" - IS_SPACE = "is-space" - IS_BRACKET = "is-bracket" - IS_QUOTE = "is-quote" - IS_CURRENCY = "is-currency" - LIKE_URL = "like-url" - LIKE_NUM = "like-num" - LIKE_EMAIL = "like-email" - IS_OUT_OF_VOCABULARY = "is-out-of-vocabulary" - IS_STOPWORD = "is-stopword" - LANGUAGE = "language" - - -class TagValueKey(str, Enum): - """A set of key constants for the `value` object within a tag.""" - - # Catch-all for confidence, score, ranking - SCORE = "score" - - # Catch-all for values of different types such as integers, floats, booleans, and strings - VALUE = "value" - - # An array of floats or integers - VECTOR_VALUE = "vector-value" - - # A float or integer - NUMBER_VALUE = "number-value" - - # A bool - BOOL_VALUE = "bool-value" - - # A string - STRING_VALUE = "string-value" - - # Whether some annotation is direct ("Susan said 'Hi'") - DIRECT = "direct" - - # Start time of a region of a document, in some other medium (seconds) - START_TIME_S = "start-time-s" - - # End time of a region of a document, in some other medium (seconds) - END_TIME_S = "end-time-s" - - # The normalized name of an entity - ENTITY_NAME = "entity_name" - - # Timestamp. Can be used to provide a time-based sort-ordering for tags. - TIMESTAMP_VALUE = "timestamp-value" - - -class GenerationTag(str, Enum): - """A set of `name` constants for Tags with a `kind` of `TagKind.generation`.""" - - # A generated summary of some region of a document - SUMMARY = "summary" - - # A generated headline for some region of a document - HEADLINE = "headline" - - # A generated "micro summary" of some region of a document - GIST = "gist" - - # A generated completion using some region of the document as input - PROMPT_COMPLETION = "prompt-completion" - - -class ProvenanceTag(str, Enum): - """A set of `name` constants for Tags with a `kind` of `TagKind.provenance`.""" - - # The speaker of a section of a document - SPEAKER = "speaker" - - # The URL from which some section of a document was sourced - URL = "url" - - # The File from which some section of a document was sourced - FILE = "file" - - -class RoleTag(str, Enum): - """A set of `name` constants for Tags with a `kind` of `TagKind.ROLE`.""" - - # This block's content was created by the System; likely instructional text on how to respond - SYSTEM = "system" - - # This block's content was created by an end user - USER = "user" - - # This block's content was created by the generative AI assistant - ASSISTANT = "assistant" diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT/web_assets/stylesheet/chatbot.css b/spaces/JohnSmith9982/ChuanhuChatGPT/web_assets/stylesheet/chatbot.css deleted file mode 100644 index d99584282c052861e5e401add62c3b94eb48ec65..0000000000000000000000000000000000000000 --- a/spaces/JohnSmith9982/ChuanhuChatGPT/web_assets/stylesheet/chatbot.css +++ /dev/null @@ -1,278 +0,0 @@ - -hr.append-display { - margin: 8px 0; - border: none; - height: 1px; - border-top-width: 0; - background-image: linear-gradient(to right, rgba(50,50,50, 0.1), rgba(150, 150, 150, 0.8), rgba(50,50,50, 0.1)); -} -.source-a { - font-size: 0.8em; - max-width: 100%; - margin: 0; - display: flex; - flex-direction: row; - flex-wrap: wrap; - align-items: center; - /* background-color: #dddddd88; */ - border-radius: 1.5rem; - padding: 0.2em; -} -.source-a a { - display: inline-block; - background-color: #aaaaaa50; - border-radius: 1rem; - padding: 0.5em; - text-align: center; - text-overflow: ellipsis; - overflow: hidden; - min-width: 20%; - white-space: nowrap; - margin: 0.2rem 0.1rem; - text-decoration: none !important; - flex: 1; - transition: flex 0.5s; -} -.source-a a:hover { - background-color: #aaaaaa20; - flex: 2; -} - -/* 川虎助理 */ -.agent-prefix { - font-size: smaller; - opacity: 0.6; - padding: 6px 0 4px; -} -.agent-prefix::before { - content: '🐯'; - filter: grayscale(); - padding: 0 4px; -} - -/* 亮色(默认) */ -#chuanhu-chatbot { - background-color: var(--chatbot-background-color-light) !important; - color: var(--chatbot-color-light) !important; -} -[data-testid = "bot"] { - background-color: var(--message-bot-background-color-light) !important; -} -[data-testid = "user"] { - background-color: var(--message-user-background-color-light) !important; -} -/* 暗色 */ -.dark #chuanhu-chatbot { - background-color: var(--chatbot-background-color-dark) !important; - color: var(--chatbot-color-dark) !important; -} -.dark [data-testid = "bot"] { - background-color: var(--message-bot-background-color-dark) !important; -} -.dark [data-testid = "user"] { - background-color: var(--message-user-background-color-dark) !important; -} - -/* 对话气泡 */ -.message { - border-radius: var(--radius-xl) !important; - border: none; - padding: var(--spacing-xl) !important; - font-size: var(--text-md) !important; - line-height: var(--line-md) !important; - min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); - min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); -} -[data-testid = "bot"] { - max-width: calc(85% - 38px); - border-top-left-radius: 0 !important; -} -[data-testid = "user"] { - max-width: calc(85% - 38px); - width: auto !important; - border-top-right-radius: 0 !important; -} - -/* 屏幕宽度大于等于500px的设备 */ -/* update on 2023.4.8: 高度的细致调整已写入JavaScript */ -@media screen and (min-width: 500px) { - #chuanhu-chatbot { - height: calc(100vh - 200px); - } - #chuanhu-chatbot>.wrapper>.wrap { - max-height: calc(100vh - 200px - var(--line-sm)*1rem - 2*var(--block-label-margin) ); - } -} -/* 屏幕宽度小于500px的设备 */ -@media screen and (max-width: 499px) { - #chuanhu-chatbot { - height: calc(100vh - 140px); - } - #chuanhu-chatbot>.wrapper>.wrap { - max-height: calc(100vh - 140px - var(--line-sm)*1rem - 2*var(--block-label-margin) ); - } - [data-testid = "bot"] { - max-width: calc(98% - 20px) !important; - } - .chatbot-avatar { - display: none; - } - #app-title h1{ - letter-spacing: -1px; font-size: 22px; - } -} - -#chuanhu-chatbot>.wrapper>.wrap { - overflow-x: hidden; -} - -.message.user p { - white-space: pre-wrap; -} -.message .user-message { - display: block; - padding: 0 !important; - white-space: pre-wrap; -} - -.message .md-message p { - margin-top: 0.6em !important; - margin-bottom: 0.6em !important; -} -.message .md-message p:first-child { margin-top: 0 !important; } -.message .md-message p:last-of-type { margin-bottom: 0 !important; } - -.message .md-message { - display: block; - padding: 0 !important; -} -.message .raw-message p { - margin:0 !important; -} -.message .raw-message { - display: block; - padding: 0 !important; - white-space: pre-wrap; -} -.message .hideM { - display: none; -} - -/* custom buttons */ -.chuanhu-btn { - border-radius: 5px; - /* background-color: #E6E6E6 !important; */ - color: rgba(120, 120, 120, 0.64) !important; - padding: 4px !important; - position: absolute; - right: -22px; - cursor: pointer !important; - transition: color .2s ease, background-color .2s ease; -} -.chuanhu-btn:hover { - background-color: rgba(167, 167, 167, 0.25) !important; - color: unset !important; -} -.chuanhu-btn:active { - background-color: rgba(167, 167, 167, 0.5) !important; -} -.chuanhu-btn:focus { - outline: none; -} - -.copy-bot-btn { - /* top: 18px; */ - bottom: 0; -} -.toggle-md-btn { - /* top: 0; */ - bottom: 20px; -} - -/* note: this is deprecated */ -.copy-code-btn { - position: relative; - float: right; - font-size: 1em; - cursor: pointer; -} -/* note: the button below disabled in chatbot.py */ -.message div.icon-button > button[title="copy"] { - display: none; -} - - -/* history message */ -.wrapper>.wrap>.history-message { - padding-bottom: 10px !important; -} -.history-message { - /* padding: 0 !important; */ - opacity: 80%; - display: flex; - flex-direction: column; -} -.history-message>.history-message { - padding: 0 !important; -} -.history-message>.message-wrap { - padding: 0 !important; - margin-bottom: 16px; -} -.history-message>.message { - margin-bottom: 16px; -} -.wrapper>.wrap>.history-message::after { - content: ""; - display: block; - height: 2px; - background-color: var(--body-text-color-subdued); - margin-bottom: 10px; - margin-top: -10px; - clear: both; -} -.wrapper>.wrap>.history-message>:last-child::after { - content: "仅供查看"; - display: block; - text-align: center; - color: var(--body-text-color-subdued); - font-size: 0.8em; -} - -/* #chuanhu-chatbot { - transition: height 0.3s ease; - note: find it better without transition animation...; -} */ - - -.message-row { - flex-direction: row; - display: flex; - gap: 8px; - width: 100%; -} -.bot-message-row { - justify-content: flex-start; -} -.user-message-row { - justify-content: flex-end; -} -.chatbot-avatar { - width: 32px; - height: 32px; - background-color: transparent; - background-size: cover; - border-radius: 5px !important; -} -.chatbot-avatar.bot-avatar { - margin-left: 5px; -} -.chatbot-avatar.user-avatar { - margin-right: 10px; -} -.chatbot-avatar img { - border-radius: 5px !important; - object-fit: cover; - width: 100%; - height: 100%; -} \ No newline at end of file diff --git a/spaces/Kangarroar/ApplioRVC-Inference/go-applio-manager-recode.bat b/spaces/Kangarroar/ApplioRVC-Inference/go-applio-manager-recode.bat deleted file mode 100644 index 91b8acfc0c69a356fd5b1d77650b2cd728b1072b..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/ApplioRVC-Inference/go-applio-manager-recode.bat +++ /dev/null @@ -1,322 +0,0 @@ -@echo off -title Applio Installer - -::: _ _ _____ _ -::: /\ | (_) | __ \ | | -::: / \ _ __ _ __ | |_ ___ | |__) |___ ___ ___ __| | ___ -::: / /\ \ | '_ \| '_ \| | |/ _ \ | _ // _ \/ __/ _ \ / _` |/ _ \ -::: / ____ \| |_) | |_) | | | (_) | | | \ \ __/ (_| (_) | (_| | __/ -::: /_/ \_\ .__/| .__/|_|_|\___/ |_| \_\___|\___\___/ \__,_|\___| -::: | | | | -::: |_| |_| -::: -::: - -setlocal -set "branch=applio-recode" -set "runtime=runtime-recode" -set "repoUrl=https://github.com/IAHispano/Applio-RVC-Fork/archive/refs/heads/%branch%.zip" -set "fixesFolder=fixes" -set "localFixesPy=local_fixes.py" -set "principal=%cd%" -set "URL_BASE=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main" -set "URL_EXTRA=https://huggingface.co/IAHispano/applio/resolve/main" - -:menu -for /f "delims=: tokens=*" %%A in ('findstr /b ":::" "%~f0"') do @echo(%%A - -echo [1] Reinstall Applio -echo [2] Update Applio -echo [3] Update Applio + Runtime -echo. - -set /p choice=Select an option: -set choice=%choice: =% - -if "%choice%"=="1" ( - cls - echo Starting Applio Reinstaller... - echo. - goto reinstaller - pause - cls - goto menu - -) - -if "%choice%"=="2" ( - cls - echo Starting Applio Updater... - echo. - goto updater - pause - cls - goto menu -) - -if "%choice%"=="3" ( - cls - echo Updating Applio + Runtime... - echo. - goto updaterRuntime - pause - cls - goto menu - -) - -cls -echo Invalid option. Please enter a number from 1 to 3. -echo. -echo Press 'Enter' to access the main menu... -pause>nul -cls -goto menu - -:reinstaller - -echo WARNING: Remember to install Microsoft C++ Build Tools, Redistributable, Python, and Git before continuing. -echo. -echo Step-by-step guide: https://rentry.org/appliolocal -echo Build Tools: https://aka.ms/vs/17/release/vs_BuildTools.exe -echo Redistributable: https://aka.ms/vs/17/release/vc_redist.x64.exe -echo Git: https://github.com/git-for-windows/git/releases/download/v2.42.0.windows.2/Git-2.42.0.2-64-bit.exe -echo Python: Add this route to the windows enviroment variables the user path variable: %principal%\runtime\Scripts -echo. -pause -cls - -echo Downloading ZIP file... -powershell -command "& { Invoke-WebRequest -Uri '%repoUrl%' -OutFile '%principal%\repo.zip' }" -echo. - -echo Extracting ZIP file... -powershell -command "& { Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory('%principal%\repo.zip', '%principal%') }" -echo. - -echo Copying folder and file structure from subdirectory to main directory... -robocopy "%principal%\Applio-RVC-Fork-%branch%" "%principal%" /E -echo. - -echo Deleting contents of subdirectory (files and folders)... -rmdir "%principal%\Applio-RVC-Fork-%branch%" /S /Q -echo. - -echo Cleaning up... -del "%principal%\repo.zip" -echo. -cls - -echo Proceeding to download the models... -echo. - -echo WARNING: At this point, it's recommended to disable antivirus or firewall, as errors might occur when downloading pretrained models. -pause -cls - -echo Downloading models in the assets folder... -cd "assets" -echo. -echo Downloading the "pretrained" folder... -cd "pretrained" -curl -LJO "%URL_BASE%/pretrained/D32k.pth" -curl -LJO "%URL_BASE%/pretrained/D40k.pth" -curl -LJO "%URL_BASE%/pretrained/D48k.pth" -curl -LJO "%URL_BASE%/pretrained/G32k.pth" -curl -LJO "%URL_BASE%/pretrained/G40k.pth" -curl -LJO "%URL_BASE%/pretrained/G48k.pth" -curl -LJO "%URL_BASE%/pretrained/f0D32k.pth" -curl -LJO "%URL_BASE%/pretrained/f0D40k.pth" -curl -LJO "%URL_BASE%/pretrained/f0D48k.pth" -curl -LJO "%URL_BASE%/pretrained/f0G32k.pth" -curl -LJO "%URL_BASE%/pretrained/f0G40k.pth" -curl -LJO "%URL_BASE%/pretrained/f0G48k.pth" -cd ".." -echo. -cls - -echo Downloading the "pretrained_v2" folder... -cd "pretrained_v2" -curl -LJO "%URL_BASE%/pretrained_v2/D32k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/D40k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/D48k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/G32k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/G40k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/G48k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/f0D32k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/f0D40k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/f0D48k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/f0G32k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/f0G40k.pth" -curl -LJO "%URL_BASE%/pretrained_v2/f0G48k.pth" -cd ".." -echo. -cls - -echo Downloading the hubert_base.pt file... -cd "hubert" -curl -LJO "%URL_BASE%/hubert_base.pt" -cd ".." -echo. -cls - - -echo Downloading the rmvpe.pt file... -cd "rmvpe" -curl -LJO "%URL_BASE%/rmvpe.pt" -echo. -cls - -echo Downloading the rmvpe.onnx file... -curl -LJO "%URL_BASE%/rmvpe.onnx" -cd ".." -cd ".." -echo. -cls - -echo Downloading the rest of the large files - -echo Downloading the "uvr5_weights" folder... -cd "uvr5_weights" -curl -LJO "%URL_BASE%/uvr5_weights/HP2_all_vocals.pth" -curl -LJO "%URL_BASE%/uvr5_weights/HP3_all_vocals.pth" -curl -LJO "%URL_BASE%/uvr5_weights/HP5_only_main_vocal.pth" -curl -LJO "%URL_BASE%/uvr5_weights/VR-DeEchoAggressive.pth" -curl -LJO "%URL_BASE%/uvr5_weights/VR-DeEchoDeReverb.pth" -curl -LJO "%URL_BASE%/uvr5_weights/VR-DeEchoNormal.pth" -cd ".." -echo. -cls - -echo Downloading the ffmpeg.exe file... -curl -LJO "%URL_BASE%/ffmpeg.exe" -echo. -cls - -echo Downloading the ffprobe.exe file... -curl -LJO "%URL_BASE%/ffprobe.exe" -echo. -cls - -echo Downloading the runtime.zip file... -curl -LJO "%URL_EXTRA%/%runtime%.zip" -echo. -cls - -echo Extracting the runtime.zip file, this might take a while... -powershell -Command "Expand-Archive -Path '%runtime%.zip' -DestinationPath '.'" -del %runtime%.zip -echo. -cls - -echo Downloads completed! -echo. - -echo Checking if the local_fixes.py file exists in the Fixes folder... -if exist "%fixesFolder%\%localFixesPy%" ( - echo Running the file... - runtime\python.exe "%fixesFolder%\%localFixesPy%" -) else ( - echo The "%localFixesPy%" file was not found in the "Fixes" folder. -) -echo. - -echo Fixes Applied! -echo. - -echo Applio has been reinstalled! -echo. -echo Press 'Enter' to access the main menu... -pause>nul -cls -goto menu - - -:updater - -echo Downloading the ZIP file... -powershell -command "& { Invoke-WebRequest -Uri '%repoUrl%' -OutFile '%principal%\repo.zip' }" -echo. - -echo Extracting ZIP file... -powershell -command "& { Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory('%principal%\repo.zip', '%principal%') }" -echo. - -echo Copying folder and file structure from subdirectory to main directory... -robocopy "%principal%\Applio-RVC-Fork-%branch%" "%principal%" /E -echo. - -echo Deleting contents of the subdirectory (files and folders)... -rmdir "%principal%\Applio-RVC-Fork-%branch%" /S /Q -echo. - -echo Cleaning up... -del "%principal%\repo.zip" -echo. -cls - -echo Verifying if the local_fixes.py file exists in the Fixes folder... -if exist "%fixesFolder%\%localFixesPy%" ( - echo Running the file... - runtime\python.exe "%fixesFolder%\%localFixesPy%" -) else ( - echo The file "%localFixesPy%" was not found in the "Fixes" folder. -) -echo. - -echo Applio has been updated! -echo. -echo Press 'Enter' to access the main menu... -pause>nul -cls -goto menu - - -:updaterRuntime - -echo Downloading the ZIP file... -powershell -command "& { Invoke-WebRequest -Uri '%repoUrl%' -OutFile '%principal%\repo.zip' }" -echo. - -echo Extracting ZIP file... -powershell -command "& { Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory('%principal%\repo.zip', '%principal%') }" -echo. - -echo Copying folder and file structure from subdirectory to main directory... -robocopy "%principal%\Applio-RVC-Fork-%branch%" "%principal%" /E -echo. - -echo Deleting contents of the subdirectory (files and folders)... -rmdir "%principal%\Applio-RVC-Fork-%branch%" /S /Q -echo. - -echo Cleaning up... -del "%principal%\repo.zip" -echo. -cls - -echo Downloading the runtime.zip file... -curl -LJO "%URL_EXTRA%/%runtime%.zip" -echo. -cls -echo Extracting the runtime.zip file, this might take a while... -powershell -Command "Expand-Archive -Path '%runtime%.zip' -DestinationPath '.'" -del runtime.zip -echo. -cls - -echo Verifying if the local_fixes.py file exists in the Fixes folder... -if exist "%fixesFolder%\%localFixesPy%" ( - echo Running the file... - runtime\python.exe "%fixesFolder%\%localFixesPy%" -) else ( - echo The file "%localFixesPy%" was not found in the "Fixes" folder. -) -echo. - -echo Applio has been updated! -echo. -echo Press 'Enter' to access the main menu... -pause>nul -cls -goto menu diff --git a/spaces/KenjieDec/GPEN/retinaface/utils/nms/__init__.py b/spaces/KenjieDec/GPEN/retinaface/utils/nms/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/KyanChen/RSPrompter/mmpl/evaluation/metrics/__init__.py b/spaces/KyanChen/RSPrompter/mmpl/evaluation/metrics/__init__.py deleted file mode 100644 index 17edbe4e4d3a1defddeb23deceba504f9058c43e..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpl/evaluation/metrics/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .builder import PL_METRICS -from .coco_pl_metric import CocoPLMetric -from .mean_ap import PLMeanAveragePrecision diff --git a/spaces/Kyllano/ShrimpClassifier/README.md b/spaces/Kyllano/ShrimpClassifier/README.md deleted file mode 100644 index da80294c4c143c1dad0b0e34bb66cef3693137bf..0000000000000000000000000000000000000000 --- a/spaces/Kyllano/ShrimpClassifier/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ShrimpClassifier -emoji: 🚀 -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Lippppxy/AiAnimeVoice/utils.py b/spaces/Lippppxy/AiAnimeVoice/utils.py deleted file mode 100644 index ee4b01ddfbe8173965371b29f770f3e87615fe71..0000000000000000000000000000000000000000 --- a/spaces/Lippppxy/AiAnimeVoice/utils.py +++ /dev/null @@ -1,225 +0,0 @@ -import os -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -import librosa -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_audio_to_torch(full_path, target_sampling_rate): - audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True) - return torch.FloatTensor(audio.astype(np.float32)) - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/Loren/Streamlit_OCR_comparator/configs/_base_/det_pipelines/dbnet_pipeline.py b/spaces/Loren/Streamlit_OCR_comparator/configs/_base_/det_pipelines/dbnet_pipeline.py deleted file mode 100644 index 40eee02db3b68d5682841532d1122c92bdca2a65..0000000000000000000000000000000000000000 --- a/spaces/Loren/Streamlit_OCR_comparator/configs/_base_/det_pipelines/dbnet_pipeline.py +++ /dev/null @@ -1,88 +0,0 @@ -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -train_pipeline_r18 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='LoadTextAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5), - dict(type='Normalize', **img_norm_cfg), - dict( - type='ImgAug', - args=[['Fliplr', 0.5], - dict(cls='Affine', rotate=[-10, 10]), ['Resize', [0.5, 3.0]]]), - dict(type='EastRandomCrop', target_size=(640, 640)), - dict(type='DBNetTargets', shrink_ratio=0.4), - dict(type='Pad', size_divisor=32), - dict( - type='CustomFormatBundle', - keys=['gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask'], - visualize=dict(flag=False, boundary_key='gt_shrink')), - dict( - type='Collect', - keys=['img', 'gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask']) -] - -test_pipeline_1333_736 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 736), # used by Resize - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -# for dbnet_r50dcnv2_fpnc -img_norm_cfg_r50dcnv2 = dict( - mean=[122.67891434, 116.66876762, 104.00698793], - std=[58.395, 57.12, 57.375], - to_rgb=True) - -train_pipeline_r50dcnv2 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='LoadTextAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5), - dict(type='Normalize', **img_norm_cfg_r50dcnv2), - dict( - type='ImgAug', - args=[['Fliplr', 0.5], - dict(cls='Affine', rotate=[-10, 10]), ['Resize', [0.5, 3.0]]]), - dict(type='EastRandomCrop', target_size=(640, 640)), - dict(type='DBNetTargets', shrink_ratio=0.4), - dict(type='Pad', size_divisor=32), - dict( - type='CustomFormatBundle', - keys=['gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask'], - visualize=dict(flag=False, boundary_key='gt_shrink')), - dict( - type='Collect', - keys=['img', 'gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask']) -] - -test_pipeline_4068_1024 = [ - dict(type='LoadImageFromFile', color_type='color_ignore_orientation'), - dict( - type='MultiScaleFlipAug', - img_scale=(4068, 1024), # used by Resize - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='Normalize', **img_norm_cfg_r50dcnv2), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] diff --git a/spaces/Madhuri/vqa_audiobot/model/predictor.py b/spaces/Madhuri/vqa_audiobot/model/predictor.py deleted file mode 100644 index 66dda44baadb0a19f2cdf7773fa3155bbb212aec..0000000000000000000000000000000000000000 --- a/spaces/Madhuri/vqa_audiobot/model/predictor.py +++ /dev/null @@ -1,74 +0,0 @@ -from happytransformer import HappyTextToText, TTSettings -from transformers import ViltProcessor -from transformers import ViltForQuestionAnswering -from transformers import AutoTokenizer -from transformers import AutoModelForSeq2SeqLM -from joblib import load - -import os -import re -import string -import torch -import pandas as pd - -''' -Visual Question Answering Model to generate answer statement for -question. -''' - - -class Predictor: - def __init__(self): - auth_token = os.environ.get('TOKEN') or True - self.vqa_processor = ViltProcessor.from_pretrained( - 'dandelin/vilt-b32-finetuned-vqa') - self.vqa_model = ViltForQuestionAnswering.from_pretrained( - 'dandelin/vilt-b32-finetuned-vqa') - self.qa_model = AutoModelForSeq2SeqLM.from_pretrained( - 'Madhuri/t5_small_vqa_fs', use_auth_token=auth_token) - self.qa_tokenizer = AutoTokenizer.from_pretrained( - 'Madhuri/t5_small_vqa_fs', use_auth_token=auth_token) - self.happy_tt = HappyTextToText( - "T5", "vennify/t5-base-grammar-correction") - self.tt_args = TTSettings(num_beams=5, min_length=1) - model_path= os.path.join( os.path.dirname(os.path.abspath(__file__)), 'qa_classifier.joblib') - self.qa_classifier = load(model_path) - - def is_valid_question(self, question): - df=pd.DataFrame() - df['sentence']=[question] - return self.qa_classifier.predict(df['sentence'])[0] == 1 - - def predict_answer_from_text(self, image, input): - if image is None: - return 'Please select an image and ask a question...' - - chars = re.escape(string.punctuation) - question = re.sub(r'['+chars+']', '', input) - if not question or len(question.split()) < 3: - return 'I cannot understand, please ask a valid question...' - - if not self.is_valid_question(question): - return 'I can understand only questions, can you please ask a valid question...' - - # process question using image model - encoding = self.vqa_processor(image, question, return_tensors='pt') - with torch.no_grad(): - outputs = self.vqa_model(**encoding) - short_answer = self.vqa_model.config.id2label[outputs.logits.argmax( - -1).item()] - - # generate statement using sentence generator model - prompt = question + '. ' + short_answer - input_ids = self.qa_tokenizer(prompt, return_tensors='pt').input_ids - with torch.no_grad(): - output_ids = self.qa_model.generate(input_ids) - answers = self.qa_tokenizer.batch_decode( - output_ids, skip_special_tokens=True) - - # Correct the grammar of the answer - answer = self.happy_tt.generate_text( - 'grammar: ' + answers[0], args=self.tt_args).text - print( - f'question - {question}, answer - {answer}, original_answer - {answers[0]}') - return answer diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/three_interpolate.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/three_interpolate.py deleted file mode 100644 index 203f47f05d58087e034fb3cd8cd6a09233947b4a..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/three_interpolate.py +++ /dev/null @@ -1,68 +0,0 @@ -from typing import Tuple - -import torch -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['three_interpolate_forward', 'three_interpolate_backward']) - - -class ThreeInterpolate(Function): - """Performs weighted linear interpolation on 3 features. - - Please refer to `Paper of PointNet++ `_ - for more details. - """ - - @staticmethod - def forward(ctx, features: torch.Tensor, indices: torch.Tensor, - weight: torch.Tensor) -> torch.Tensor: - """ - Args: - features (Tensor): (B, C, M) Features descriptors to be - interpolated - indices (Tensor): (B, n, 3) index three nearest neighbors - of the target features in features - weight (Tensor): (B, n, 3) weights of interpolation - - Returns: - Tensor: (B, C, N) tensor of the interpolated features - """ - assert features.is_contiguous() - assert indices.is_contiguous() - assert weight.is_contiguous() - - B, c, m = features.size() - n = indices.size(1) - ctx.three_interpolate_for_backward = (indices, weight, m) - output = torch.cuda.FloatTensor(B, c, n) - - ext_module.three_interpolate_forward( - features, indices, weight, output, b=B, c=c, m=m, n=n) - return output - - @staticmethod - def backward( - ctx, grad_out: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Args: - grad_out (Tensor): (B, C, N) tensor with gradients of outputs - - Returns: - Tensor: (B, C, M) tensor with gradients of features - """ - idx, weight, m = ctx.three_interpolate_for_backward - B, c, n = grad_out.size() - - grad_features = torch.cuda.FloatTensor(B, c, m).zero_() - grad_out_data = grad_out.data.contiguous() - - ext_module.three_interpolate_backward( - grad_out_data, idx, weight, grad_features.data, b=B, c=c, n=n, m=m) - return grad_features, None, None - - -three_interpolate = ThreeInterpolate.apply diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/decode_heads/cascade_decode_head.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/decode_heads/cascade_decode_head.py deleted file mode 100644 index d02122ca0e68743b1bf7a893afae96042f23838c..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/decode_heads/cascade_decode_head.py +++ /dev/null @@ -1,57 +0,0 @@ -from abc import ABCMeta, abstractmethod - -from .decode_head import BaseDecodeHead - - -class BaseCascadeDecodeHead(BaseDecodeHead, metaclass=ABCMeta): - """Base class for cascade decode head used in - :class:`CascadeEncoderDecoder.""" - - def __init__(self, *args, **kwargs): - super(BaseCascadeDecodeHead, self).__init__(*args, **kwargs) - - @abstractmethod - def forward(self, inputs, prev_output): - """Placeholder of forward function.""" - pass - - def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg, - train_cfg): - """Forward function for training. - Args: - inputs (list[Tensor]): List of multi-level img features. - prev_output (Tensor): The output of previous decode head. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - gt_semantic_seg (Tensor): Semantic segmentation masks - used if the architecture supports semantic segmentation task. - train_cfg (dict): The training config. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - seg_logits = self.forward(inputs, prev_output) - losses = self.losses(seg_logits, gt_semantic_seg) - - return losses - - def forward_test(self, inputs, prev_output, img_metas, test_cfg): - """Forward function for testing. - - Args: - inputs (list[Tensor]): List of multi-level img features. - prev_output (Tensor): The output of previous decode head. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - test_cfg (dict): The testing config. - - Returns: - Tensor: Output segmentation map. - """ - return self.forward(inputs, prev_output) diff --git a/spaces/MiloSobral/PortiloopDemo/portiloop/src/demo/phase_demo.py b/spaces/MiloSobral/PortiloopDemo/portiloop/src/demo/phase_demo.py deleted file mode 100644 index 1f9b3c2ed220158673dc6f4e2ebfa54d9bc0950e..0000000000000000000000000000000000000000 --- a/spaces/MiloSobral/PortiloopDemo/portiloop/src/demo/phase_demo.py +++ /dev/null @@ -1,63 +0,0 @@ -import gradio as gr - -from portiloop.src.demo.offline import run_offline - - -def on_upload_file(file): - # Check if file extension is .xdf - if file.name.split(".")[-1] != "xdf": - raise gr.Error("Please upload a .xdf file.") - else: - return file.name - - -def main(): - with gr.Blocks(title="Portiloop") as demo: - gr.Markdown("# Portiloop Demo") - gr.Markdown("This Demo takes as input an XDF file coming from the Portiloop EEG device and allows you to convert it to CSV and perform the following actions:: \n * Filter the data offline \n * Perform offline spindle detection using Wamsley or Lacourse. \n * Simulate the Portiloop online filtering and spindle detection with different parameters.") - gr.Markdown("Upload your XDF file and click **Run Inference** to start the processing...") - - with gr.Row(): - xdf_file_button = gr.UploadButton(label="Click to Upload", type="file", file_count="single") - xdf_file_static = gr.File(label="XDF File", type='file', interactive=False) - - xdf_file_button.upload(on_upload_file, xdf_file_button, xdf_file_static) - - # Make a checkbox group for the options - detect_filter = gr.CheckboxGroup(['Offline Filtering', 'Lacourse Detection', 'Wamsley Detection', 'Online Filtering', 'Online Detection'], type='index', label="Filtering/Detection options") - - # Options for phase stimulation - with gr.Row(): - # Dropwdown for phase - phase = gr.Dropdown(choices=["Peak", "Fast", "Valley"], value="Peak", label="Phase", interactive=True) - buffer_time = gr.Slider(0, 1, value=0.3, step=0.01, label="Buffer Time", interactive=True) - - # Threshold value - threshold = gr.Slider(0, 1, value=0.82, step=0.01, label="Threshold", interactive=True) - # Detection Channel - detect_channel = gr.Dropdown(choices=["1", "2", "3", "4", "5", "6", "7", "8"], value="2", label="Detection Channel in XDF recording", interactive=True) - # Frequency - freq = gr.Dropdown(choices=["100", "200", "250", "256", "500", "512", "1000", "1024"], value="250", label="Sampling Frequency (Hz)", interactive=True) - - with gr.Row(): - output_array = gr.File(label="Output CSV File") - output_table = gr.Markdown(label="Output Table") - - run_inference = gr.Button(value="Run Inference") - run_inference.click( - fn=run_offline, - inputs=[ - xdf_file_static, - detect_filter, - threshold, - detect_channel, - freq, - phase, - buffer_time], - outputs=[output_array, output_table]) - - demo.queue() - demo.launch(share=False) - -if __name__ == "__main__": - main() diff --git a/spaces/NCTCMumbai/NCTC/models/official/core/base_task.py b/spaces/NCTCMumbai/NCTC/models/official/core/base_task.py deleted file mode 100644 index 31811cbe6606fac61b664973717f4c75b6b4b37b..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/core/base_task.py +++ /dev/null @@ -1,303 +0,0 @@ -# Lint as: python3 -# Copyright 2020 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Defines the base task abstraction.""" -import abc -import functools -from typing import Any, Callable, Optional - -import six -import tensorflow as tf - -from official.modeling.hyperparams import config_definitions as cfg -from official.utils import registry - - -@six.add_metaclass(abc.ABCMeta) -class Task(tf.Module): - """A single-replica view of training procedure. - - Tasks provide artifacts for training/evalution procedures, including - loading/iterating over Datasets, initializing the model, calculating the loss - and customized metrics with reduction. - """ - - # Special keys in train/validate step returned logs. - loss = "loss" - - def __init__(self, params: cfg.TaskConfig): - self._task_config = params - - @property - def task_config(self) -> cfg.TaskConfig: - return self._task_config - - def initialize(self, model: tf.keras.Model): - """A callback function used as CheckpointManager's init_fn. - - This function will be called when no checkpoint found for the model. - If there is a checkpoint, the checkpoint will be loaded and this function - will not be called. You can use this callback function to load a pretrained - checkpoint, saved under a directory other than the model_dir. - - Args: - model: The keras.Model built or used by this task. - """ - pass - - @abc.abstractmethod - def build_model(self) -> tf.keras.Model: - """Creates the model architecture. - - Returns: - A model instance. - """ - - def compile_model(self, - model: tf.keras.Model, - optimizer: tf.keras.optimizers.Optimizer, - loss=None, - train_step: Optional[Callable[..., Any]] = None, - validation_step: Optional[Callable[..., Any]] = None, - **kwargs) -> tf.keras.Model: - """Compiles the model with objects created by the task. - - The method should not be used in any customized training implementation. - - Args: - model: a keras.Model. - optimizer: the keras optimizer. - loss: a callable/list of losses. - train_step: optional train step function defined by the task. - validation_step: optional validation_step step function defined by the - task. - **kwargs: other kwargs consumed by keras.Model compile(). - - Returns: - a compiled keras.Model. - """ - if bool(loss is None) == bool(train_step is None): - raise ValueError("`loss` and `train_step` should be exclusive to " - "each other.") - model.compile(optimizer=optimizer, loss=loss, **kwargs) - - if train_step: - model.train_step = functools.partial( - train_step, model=model, optimizer=model.optimizer) - if validation_step: - model.test_step = functools.partial(validation_step, model=model) - return model - - @abc.abstractmethod - def build_inputs(self, - params: cfg.DataConfig, - input_context: Optional[tf.distribute.InputContext] = None): - """Returns a dataset or a nested structure of dataset functions. - - Dataset functions define per-host datasets with the per-replica batch size. - - Args: - params: hyperparams to create input pipelines. - input_context: optional distribution input pipeline context. - - Returns: - A nested structure of per-replica input functions. - """ - - def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor: - """Standard interface to compute losses. - - Args: - labels: optional label tensors. - model_outputs: a nested structure of output tensors. - aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model. - - Returns: - The total loss tensor. - """ - del model_outputs, labels - - if aux_losses is None: - losses = [tf.constant(0.0, dtype=tf.float32)] - else: - losses = aux_losses - total_loss = tf.add_n(losses) - return total_loss - - def build_metrics(self, training: bool = True): - """Gets streaming metrics for training/validation.""" - del training - return [] - - def process_metrics(self, metrics, labels, model_outputs): - """Process and update metrics. Called when using custom training loop API. - - Args: - metrics: a nested structure of metrics objects. - The return of function self.build_metrics. - labels: a tensor or a nested structure of tensors. - model_outputs: a tensor or a nested structure of tensors. - For example, output of the keras model built by self.build_model. - """ - for metric in metrics: - metric.update_state(labels, model_outputs) - - def process_compiled_metrics(self, compiled_metrics, labels, model_outputs): - """Process and update compiled_metrics. call when using compile/fit API. - - Args: - compiled_metrics: the compiled metrics (model.compiled_metrics). - labels: a tensor or a nested structure of tensors. - model_outputs: a tensor or a nested structure of tensors. - For example, output of the keras model built by self.build_model. - """ - compiled_metrics.update_state(labels, model_outputs) - - def train_step(self, - inputs, - model: tf.keras.Model, - optimizer: tf.keras.optimizers.Optimizer, - metrics=None): - """Does forward and backward. - - Args: - inputs: a dictionary of input tensors. - model: the model, forward pass definition. - optimizer: the optimizer for this training step. - metrics: a nested structure of metrics objects. - - Returns: - A dictionary of logs. - """ - if isinstance(inputs, tuple) and len(inputs) == 2: - features, labels = inputs - else: - features, labels = inputs, inputs - with tf.GradientTape() as tape: - outputs = model(features, training=True) - # Computes per-replica loss. - loss = self.build_losses( - labels=labels, model_outputs=outputs, aux_losses=model.losses) - # Scales loss as the default gradients allreduce performs sum inside the - # optimizer. - scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync - - # For mixed precision, when a LossScaleOptimizer is used, the loss is - # scaled to avoid numeric underflow. - if isinstance(optimizer, - tf.keras.mixed_precision.experimental.LossScaleOptimizer): - scaled_loss = optimizer.get_scaled_loss(scaled_loss) - - tvars = model.trainable_variables - grads = tape.gradient(scaled_loss, tvars) - - if isinstance(optimizer, - tf.keras.mixed_precision.experimental.LossScaleOptimizer): - grads = optimizer.get_unscaled_gradients(grads) - optimizer.apply_gradients(list(zip(grads, tvars))) - logs = {self.loss: loss} - if metrics: - self.process_metrics(metrics, labels, outputs) - logs.update({m.name: m.result() for m in metrics}) - elif model.compiled_metrics: - self.process_compiled_metrics(model.compiled_metrics, labels, outputs) - logs.update({m.name: m.result() for m in model.metrics}) - return logs - - def validation_step(self, inputs, model: tf.keras.Model, metrics=None): - """Validatation step. - - Args: - inputs: a dictionary of input tensors. - model: the keras.Model. - metrics: a nested structure of metrics objects. - - Returns: - A dictionary of logs. - """ - if isinstance(inputs, tuple) and len(inputs) == 2: - features, labels = inputs - else: - features, labels = inputs, inputs - outputs = self.inference_step(features, model) - loss = self.build_losses( - labels=labels, model_outputs=outputs, aux_losses=model.losses) - logs = {self.loss: loss} - if metrics: - self.process_metrics(metrics, labels, outputs) - logs.update({m.name: m.result() for m in metrics}) - elif model.compiled_metrics: - self.process_compiled_metrics(model.compiled_metrics, labels, outputs) - logs.update({m.name: m.result() for m in model.metrics}) - return logs - - def inference_step(self, inputs, model: tf.keras.Model): - """Performs the forward step.""" - return model(inputs, training=False) - - def aggregate_logs(self, state, step_logs): - """Optional aggregation over logs returned from a validation step.""" - pass - - def reduce_aggregated_logs(self, aggregated_logs): - """Optional reduce of aggregated logs over validation steps.""" - return {} - - -_REGISTERED_TASK_CLS = {} - - -# TODO(b/158268740): Move these outside the base class file. -# TODO(b/158741360): Add type annotations once pytype checks across modules. -def register_task_cls(task_config_cls): - """Decorates a factory of Tasks for lookup by a subclass of TaskConfig. - - This decorator supports registration of tasks as follows: - - ``` - @dataclasses.dataclass - class MyTaskConfig(TaskConfig): - # Add fields here. - pass - - @register_task_cls(MyTaskConfig) - class MyTask(Task): - # Inherits def __init__(self, task_config). - pass - - my_task_config = MyTaskConfig() - my_task = get_task(my_task_config) # Returns MyTask(my_task_config). - ``` - - Besisdes a class itself, other callables that create a Task from a TaskConfig - can be decorated by the result of this function, as long as there is at most - one registration for each config class. - - Args: - task_config_cls: a subclass of TaskConfig (*not* an instance of TaskConfig). - Each task_config_cls can only be used for a single registration. - - Returns: - A callable for use as class decorator that registers the decorated class - for creation from an instance of task_config_cls. - """ - return registry.register(_REGISTERED_TASK_CLS, task_config_cls) - - -# The user-visible get_task() is defined after classes have been registered. -# TODO(b/158741360): Add type annotations once pytype checks across modules. -def get_task_cls(task_config_cls): - task_cls = registry.lookup(_REGISTERED_TASK_CLS, task_config_cls) - return task_cls diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/embedding_layer.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/embedding_layer.py deleted file mode 100644 index 6694e2b42af47673ee3ce0b9572ec5867d69cb7d..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/embedding_layer.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2018 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Implementation of embedding layer with shared weights.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - - -class EmbeddingSharedWeights(tf.keras.layers.Layer): - """Calculates input embeddings and pre-softmax linear with shared weights.""" - - def __init__(self, vocab_size, hidden_size): - """Specify characteristic parameters of embedding layer. - - Args: - vocab_size: Number of tokens in the embedding. (Typically ~32,000) - hidden_size: Dimensionality of the embedding. (Typically 512 or 1024) - """ - super(EmbeddingSharedWeights, self).__init__() - self.vocab_size = vocab_size - self.hidden_size = hidden_size - - def build(self, input_shape): - """Build embedding layer.""" - with tf.name_scope("embedding_and_softmax"): - # Create and initialize weights. The random normal initializer was chosen - # arbitrarily, and works well. - self.shared_weights = self.add_weight( - "weights", - shape=[self.vocab_size, self.hidden_size], - initializer=tf.random_normal_initializer( - mean=0., stddev=self.hidden_size**-0.5)) - super(EmbeddingSharedWeights, self).build(input_shape) - - def get_config(self): - return { - "vocab_size": self.vocab_size, - "hidden_size": self.hidden_size, - } - - def call(self, inputs, mode="embedding"): - """Get token embeddings of inputs. - - Args: - inputs: An int64 tensor with shape [batch_size, length] - mode: string, a valid value is one of "embedding" and "linear". - Returns: - outputs: (1) If mode == "embedding", output embedding tensor, float32 with - shape [batch_size, length, embedding_size]; (2) mode == "linear", output - linear tensor, float32 with shape [batch_size, length, vocab_size]. - Raises: - ValueError: if mode is not valid. - """ - if mode == "embedding": - return self._embedding(inputs) - elif mode == "linear": - return self._linear(inputs) - else: - raise ValueError("mode {} is not valid.".format(mode)) - - def _embedding(self, inputs): - """Applies embedding based on inputs tensor.""" - with tf.name_scope("embedding"): - # Create binary mask of size [batch_size, length] - embeddings = tf.gather(self.shared_weights, inputs) - mask = tf.cast(tf.not_equal(inputs, 0), embeddings.dtype) - embeddings *= tf.expand_dims(mask, -1) - # Scale embedding by the sqrt of the hidden size - embeddings *= self.hidden_size ** 0.5 - - return embeddings - - def _linear(self, inputs): - """Computes logits by running inputs through a linear layer. - - Args: - inputs: A float32 tensor with shape [batch_size, length, hidden_size] - Returns: - float32 tensor with shape [batch_size, length, vocab_size]. - """ - with tf.name_scope("presoftmax_linear"): - batch_size = tf.shape(inputs)[0] - length = tf.shape(inputs)[1] - - x = tf.reshape(inputs, [-1, self.hidden_size]) - logits = tf.matmul(x, self.shared_weights, transpose_b=True) - - return tf.reshape(logits, [batch_size, length, self.vocab_size]) diff --git a/spaces/Najaf-Zawar/Image-Super-Resolution/app.py b/spaces/Najaf-Zawar/Image-Super-Resolution/app.py deleted file mode 100644 index 9e9f1e13a0d108c7d19c3b6475d34f4fc8fced22..0000000000000000000000000000000000000000 --- a/spaces/Najaf-Zawar/Image-Super-Resolution/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import cv2 -import os -import torch -from basicsr.archs.rrdbnet_arch import RRDBNet -from realesrgan import RealESRGANer - -import gradio as gr -import torchvision.transforms as transforms - - -model_path = "Trained_ESRGAN.pth" -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) -upsampler = RealESRGANer(scale=4, model_path=model_path, model=model) - - - -def esrgan(input_image): - output_img, _ = upsampler.enhance(input_image, outscale=3.5) - filename = "output.jpg" - output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2RGB) - cv2.imwrite(filename, output_img) - return filename - - -# Define the Gradio app interface -inputs = gr.Image(label="Input Image") -outputs = gr.Image(label="Enhanced_Image.") -title = "Image Super-Resolution Using ESR-GAN" -description = "Enhance the Quality of your Low Resolution Images To High Resolution Using Artificial Intelligence" - -iface = gr.Interface(fn=esrgan, inputs=inputs, outputs=outputs, title=title, description=description, allow_flagging="never") - -iface.launch(inline = False) \ No newline at end of file diff --git a/spaces/Navneet574/Heart_Disease_Prediciton/README.md b/spaces/Navneet574/Heart_Disease_Prediciton/README.md deleted file mode 100644 index 0da0926a8a158552f0a87448f2701fea6f778abe..0000000000000000000000000000000000000000 --- a/spaces/Navneet574/Heart_Disease_Prediciton/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Heart Disease Prediciton -emoji: 🚀 -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: cc-by-nc-nd-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Nekomaru180/rvc-model/infer_pack/models.py b/spaces/Nekomaru180/rvc-model/infer_pack/models.py deleted file mode 100644 index 96165f73644e6fb92d0ffedb4a3c9e1a457cb989..0000000000000000000000000000000000000000 --- a/spaces/Nekomaru180/rvc-model/infer_pack/models.py +++ /dev/null @@ -1,982 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder256Sim(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_sim(nn.Module): - """ - Synthesizer for Training - """ - - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - # hop_length, - gin_channels=0, - use_sdp=True, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256Sim( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - is_half=kwargs["is_half"], - ) - - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y_lengths, ds - ): # y是spec不需要了现在 - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - z_slice, ids_slice = commons.rand_slice_segments( - x, y_lengths, self.segment_size - ) - - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice - - def infer( - self, phone, phone_lengths, pitch, pitchf, ds, max_len=None - ): # y是spec不需要了现在 - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g) - return o, o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/NimaBoscarino/climategan/figures/human_evaluation.py b/spaces/NimaBoscarino/climategan/figures/human_evaluation.py deleted file mode 100644 index 2889c0a945879830b844259f203612f96f759bef..0000000000000000000000000000000000000000 --- a/spaces/NimaBoscarino/climategan/figures/human_evaluation.py +++ /dev/null @@ -1,208 +0,0 @@ -""" -This script plots the result of the human evaluation on Amazon Mechanical Turk, where -human participants chose between an image from ClimateGAN or from a different method. -""" -print("Imports...", end="") -from argparse import ArgumentParser -import os -import yaml -import numpy as np -import pandas as pd -import seaborn as sns -from pathlib import Path -import matplotlib.pyplot as plt - - -# ----------------------- -# ----- Constants ----- -# ----------------------- - -comparables_dict = { - "munit_flooded": "MUNIT", - "cyclegan": "CycleGAN", - "instagan": "InstaGAN", - "instagan_copypaste": "Mask-InstaGAN", - "painted_ground": "Painted ground", -} - - -# Colors -palette_colorblind = sns.color_palette("colorblind") -color_climategan = palette_colorblind[9] - -palette_colorblind = sns.color_palette("colorblind") -color_munit = palette_colorblind[1] -color_cyclegan = palette_colorblind[2] -color_instagan = palette_colorblind[3] -color_maskinstagan = palette_colorblind[6] -color_paintedground = palette_colorblind[8] -palette_comparables = [ - color_munit, - color_cyclegan, - color_instagan, - color_maskinstagan, - color_paintedground, -] -palette_comparables_light = [ - sns.light_palette(color, n_colors=3)[1] for color in palette_comparables -] - - -def parsed_args(): - """ - Parse and returns command-line args - - Returns: - argparse.Namespace: the parsed arguments - """ - parser = ArgumentParser() - parser.add_argument( - "--input_csv", - default="amt_omni-vs-other.csv", - type=str, - help="CSV containing the results of the human evaluation, pre-processed", - ) - parser.add_argument( - "--output_dir", - default=None, - type=str, - help="Output directory", - ) - parser.add_argument( - "--dpi", - default=200, - type=int, - help="DPI for the output images", - ) - parser.add_argument( - "--n_bs", - default=1e6, - type=int, - help="Number of bootrstrap samples", - ) - parser.add_argument( - "--bs_seed", - default=17, - type=int, - help="Bootstrap random seed, for reproducibility", - ) - - return parser.parse_args() - - -if __name__ == "__main__": - # ----------------------------- - # ----- Parse arguments ----- - # ----------------------------- - args = parsed_args() - print("Args:\n" + "\n".join([f" {k:20}: {v}" for k, v in vars(args).items()])) - - # Determine output dir - if args.output_dir is None: - output_dir = Path(os.environ["SLURM_TMPDIR"]) - else: - output_dir = Path(args.output_dir) - if not output_dir.exists(): - output_dir.mkdir(parents=True, exist_ok=False) - - # Store args - output_yml = output_dir / "args_human_evaluation.yml" - with open(output_yml, "w") as f: - yaml.dump(vars(args), f) - - # Read CSV - df = pd.read_csv(args.input_csv) - - # Sort Y labels - comparables = df.comparable.unique() - is_climategan_sum = [ - df.loc[df.comparable == c, "climategan"].sum() for c in comparables - ] - comparables = comparables[np.argsort(is_climategan_sum)[::-1]] - - # Plot setup - sns.set(style="whitegrid") - plt.rcParams.update({"font.family": "serif"}) - plt.rcParams.update( - { - "font.serif": [ - "Computer Modern Roman", - "Times New Roman", - "Utopia", - "New Century Schoolbook", - "Century Schoolbook L", - "ITC Bookman", - "Bookman", - "Times", - "Palatino", - "Charter", - "serif" "Bitstream Vera Serif", - "DejaVu Serif", - ] - } - ) - fontsize = "medium" - - # Initialize the matplotlib figure - fig, ax = plt.subplots(figsize=(10.5, 3), dpi=args.dpi) - - # Plot the total (right) - sns.barplot( - data=df.loc[df.is_valid], - x="is_valid", - y="comparable", - order=comparables, - orient="h", - label="comparable", - palette=palette_comparables_light, - ci=None, - ) - - # Plot the left - sns.barplot( - data=df.loc[df.is_valid], - x="climategan", - y="comparable", - order=comparables, - orient="h", - label="climategan", - color=color_climategan, - ci=99, - n_boot=args.n_bs, - seed=args.bs_seed, - errcolor="black", - errwidth=1.5, - capsize=0.1, - ) - - # Draw line at 0.5 - y = np.arange(ax.get_ylim()[1] + 0.1, ax.get_ylim()[0], 0.1) - x = 0.5 * np.ones(y.shape[0]) - ax.plot(x, y, linestyle=":", linewidth=1.5, color="black") - - # Change Y-Tick labels - yticklabels = [comparables_dict[ytick.get_text()] for ytick in ax.get_yticklabels()] - yticklabels_text = ax.set_yticklabels( - yticklabels, fontsize=fontsize, horizontalalignment="right", x=0.96 - ) - for ytl in yticklabels_text: - ax.add_artist(ytl) - - # Remove Y-label - ax.set_ylabel(ylabel="") - - # Change X-Tick labels - xlim = [0.0, 1.1] - xticks = np.arange(xlim[0], xlim[1], 0.1) - ax.set(xticks=xticks) - plt.setp(ax.get_xticklabels(), fontsize=fontsize) - - # Set X-label - ax.set_xlabel(None) - - # Change spines - sns.despine(left=True, bottom=True) - - # Save figure - output_fig = output_dir / "human_evaluation_rate_climategan.png" - fig.savefig(output_fig, dpi=fig.dpi, bbox_inches="tight") diff --git a/spaces/Norod78/Dragness/README.md b/spaces/Norod78/Dragness/README.md deleted file mode 100644 index fd6c1512df2a4ba3346d5a0f1c7db3c4aaa8df69..0000000000000000000000000000000000000000 --- a/spaces/Norod78/Dragness/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Dragness -emoji: 👸 -colorFrom: yellow -colorTo: blue -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/NotFungibleIO/GFPGAN/README.md b/spaces/NotFungibleIO/GFPGAN/README.md deleted file mode 100644 index 6ea52fba1169fccc56ab3ef7bcdcc322d13da2a8..0000000000000000000000000000000000000000 --- a/spaces/NotFungibleIO/GFPGAN/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: GFPGAN -emoji: 😁 -colorFrom: yellow -colorTo: green -sdk: gradio -sdk_version: 3.1.7 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/criterions/fairseq_criterion.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/criterions/fairseq_criterion.py deleted file mode 100644 index ff4beb02503ea48a6c09596630aad4c710be94b6..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/criterions/fairseq_criterion.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import inspect -from typing import Any, Dict, List - -from fairseq import metrics, utils -from fairseq.dataclass import FairseqDataclass -from fairseq.dataclass.utils import gen_parser_from_dataclass -from torch.nn.modules.loss import _Loss - - -class FairseqCriterion(_Loss): - def __init__(self, task): - super().__init__() - self.task = task - if hasattr(task, "target_dictionary"): - tgt_dict = task.target_dictionary - self.padding_idx = tgt_dict.pad() if tgt_dict is not None else -100 - - @classmethod - def add_args(cls, parser): - """Add criterion-specific arguments to the parser.""" - dc = getattr(cls, "__dataclass", None) - if dc is not None: - gen_parser_from_dataclass(parser, dc()) - - @classmethod - def build_criterion(cls, cfg: FairseqDataclass, task): - """Construct a criterion from command-line args.""" - # arguments in the __init__. - init_args = {} - for p in inspect.signature(cls).parameters.values(): - if ( - p.kind == p.POSITIONAL_ONLY - or p.kind == p.VAR_POSITIONAL - or p.kind == p.VAR_KEYWORD - ): - # we haven't implemented inference for these argument types, - # but PRs welcome :) - raise NotImplementedError("{} not supported".format(p.kind)) - - assert p.kind in {p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY} - - if p.name == "task": - init_args["task"] = task - elif p.name == "cfg": - init_args["cfg"] = cfg - elif hasattr(cfg, p.name): - init_args[p.name] = getattr(cfg, p.name) - elif p.default != p.empty: - pass # we'll use the default value - else: - raise NotImplementedError( - "Unable to infer Criterion arguments, please implement " - "{}.build_criterion".format(cls.__name__) - ) - return cls(**init_args) - - def forward(self, model, sample, reduce=True): - """Compute the loss for the given sample. - - Returns a tuple with three elements: - 1) the loss - 2) the sample size, which is used as the denominator for the gradient - 3) logging outputs to display while training - """ - raise NotImplementedError - - @staticmethod - def aggregate_logging_outputs( - logging_outputs: List[Dict[str, Any]] - ) -> Dict[str, Any]: - """Aggregate logging outputs from data parallel training.""" - utils.deprecation_warning( - "The aggregate_logging_outputs API is deprecated. " - "Please use the reduce_metrics API instead." - ) - raise NotImplementedError - - @classmethod - def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None: - """Aggregate logging outputs from data parallel training.""" - utils.deprecation_warning( - "Criterions should implement the reduce_metrics API. " - "Falling back to deprecated aggregate_logging_outputs API." - ) - agg_logging_outputs = cls.aggregate_logging_outputs(logging_outputs) - for k, v in agg_logging_outputs.items(): - if k in {"nsentences", "ntokens", "sample_size"}: - continue - metrics.log_scalar(k, v) - - @staticmethod - def logging_outputs_can_be_summed() -> bool: - """ - Whether the logging outputs returned by `forward` can be summed - across workers prior to calling `reduce_metrics`. Setting this - to True will improves distributed training speed. - """ - return False - - -class LegacyFairseqCriterion(FairseqCriterion): - def __init__(self, args, task): - super().__init__(task=task) - self.args = args - - utils.deprecation_warning( - "Criterions should take explicit arguments instead of an " - "argparse.Namespace object, please update your criterion by " - "extending FairseqCriterion instead of LegacyFairseqCriterion." - ) - - @classmethod - def build_criterion(cls, args, task): - """Construct a criterion from command-line args.""" - return cls(args, task) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py deleted file mode 100644 index 5ee9c1be4a59ad3d072412827ab4e9b62dc7434e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass, field -from typing import List - -import torch.optim.lr_scheduler -from omegaconf import II - -from fairseq.dataclass import FairseqDataclass -from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler - - -@dataclass -class ReduceLROnPlateauLRScheduleConfig(FairseqDataclass): - lr_shrink: float = field( - default=0.1, metadata={"help": "shrink factor for annealing"} - ) - lr_threshold: float = field( - default=1e-4, - metadata={ - "help": ( - "threshold for measuring the new optimum, to only focus on " - "significant changes" - ) - }, - ) - lr_patience: int = field( - default=0, - metadata={ - "help": ( - "number of epochs with no improvement after which learning rate will " - "be reduced" - ) - }, - ) - warmup_updates: int = field( - default=0, - metadata={"help": "warmup the learning rate linearly for the first N updates"}, - ) - warmup_init_lr: float = field( - default=-1, - metadata={ - "help": "initial learning rate during warmup phase; default is cfg.lr" - }, - ) - lr: List[float] = II("optimization.lr") - maximize_best_checkpoint_metric: bool = II( - "checkpoint.maximize_best_checkpoint_metric" - ) - - -@register_lr_scheduler( - "reduce_lr_on_plateau", dataclass=ReduceLROnPlateauLRScheduleConfig -) -class ReduceLROnPlateauLRSchedule(FairseqLRScheduler): - """ - Decay the LR by a factor every time the validation loss plateaus. - Also comes with optional warmup phase, where we linearly increase - the learning rate from some initial learning rate - (``--warmup-init-lr``) until the configured learning rate - (``--lr``). Thereafter the lr is adjusted according to original - reduce_on_plateau scheme. - - During warmup:: - - lrs = torch.linspace( - cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates - ) - lr = lrs[update_num] - """ - - def __init__(self, cfg: ReduceLROnPlateauLRScheduleConfig, optimizer): - super().__init__(cfg, optimizer) - if len(cfg.lr) > 1: - raise ValueError( - "Cannot use a fixed learning rate schedule with reduce_lr_on_plateau." - " Consider --lr-scheduler=fixed instead." - ) - self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( - self.optimizer.optimizer, - patience=cfg.lr_patience, - factor=cfg.lr_shrink, - mode="max" if cfg.maximize_best_checkpoint_metric else "min", - threshold=cfg.lr_threshold, - ) - warmup_end_lr = cfg.lr[0] - # if no warm up, sets initial lr to be cfg.lr[0] - if cfg.warmup_init_lr < 0: - cfg.warmup_init_lr = 0 if cfg.warmup_updates > 0 else warmup_end_lr - - # linearly warmup for the first cfg.warmup_updates - if cfg.warmup_updates > 0: - self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates - - # this flag is either set from arg when no warm up, or set by - # step_update() when warmup finishes - self.warmup_end = True if cfg.warmup_updates <= 0 else False - - # initial learning rate - # this self.lr is used only during init and/or warm up period - self.lr = warmup_end_lr if self.warmup_end else cfg.warmup_init_lr - self.optimizer.set_lr(self.lr) - - def state_dict(self): - """Return the LR scheduler state dict.""" - return { - "best": self.lr_scheduler.best, - "last_epoch": self.lr_scheduler.last_epoch, - } - - def load_state_dict(self, state_dict): - """Load an LR scheduler state dict.""" - self.lr_scheduler.best = state_dict["best"] - if "last_epoch" in state_dict: - self.lr_scheduler.last_epoch = state_dict["last_epoch"] - - def step(self, epoch, val_loss=None): - """ - Update the learning rate at the end of the given epoch if warmup - finishes otherwise no update of lr on epoch boundaries - """ - if val_loss is not None and self.warmup_end is True: - self.lr_scheduler.step(val_loss) - else: - self.lr_scheduler.last_epoch = epoch - return self.optimizer.get_lr() - - def step_update(self, num_updates): - """ - Update the learning rate after each update.""" - # if there is warmup - if self.cfg.warmup_updates > 0: - if num_updates <= self.cfg.warmup_updates: - self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step - self.optimizer.set_lr(self.lr) - else: - if self.warmup_end is False: - self.warmup_end = True - # else do nothing - return self.optimizer.get_lr() diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/model.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/model.py deleted file mode 100644 index 7f30dd98bb19b7bc414790787053efb231855129..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/model.py +++ /dev/null @@ -1,767 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fairseq import utils -from fairseq.model_parallel.models.pipeline_parallel_transformer.layers import ( - Embedding, - TransformerDecoderEmbedding, - TransformerDecoderLayer, - TransformerDecoderOutputLayer, - TransformerEncoderEmbedding, - TransformerEncoderLayer, - TransformerEncoderLayerNorm, -) -from fairseq.models import ( - BaseFairseqModel, - FairseqDecoder, - FairseqEncoder, - register_model, - register_model_architecture, -) -from fairseq.models.fairseq_encoder import EncoderOut -from fairseq.models.transformer import ( - base_architecture, - transformer_iwslt_de_en, - transformer_wmt_en_de_big, -) -from fairseq.modules import SinusoidalPositionalEmbedding - - -logger = logging.getLogger(__name__) - - -DEFAULT_MAX_SOURCE_POSITIONS = 1024 -DEFAULT_MAX_TARGET_POSITIONS = 1024 -TORCH_PIPE = False -RPC_INIT = False - -def import_pipe(): - global TORCH_PIPE - global RPC_INIT - try: - from torch.distributed.pipeline.sync import Pipe # noqa - global Pipe - from torch.distributed.pipeline.sync.utils import partition_model - global partition_model - from torch.distributed import rpc - import tempfile - TORCH_PIPE = True - # Initialize single process RPC agent since TORCH_PIPE requires - # RRef. RRef depends on RPC being initialized and as a result we initialize - # RPC with a single node. - tmpfile = tempfile.NamedTemporaryFile() - if not RPC_INIT: - rpc.init_rpc( - name="worker", - rank=0, - world_size=1, - rpc_backend_options=rpc.TensorPipeRpcBackendOptions( - init_method="file://{}".format(tmpfile.name), - ) - ) - RPC_INIT = True - logger.info('Using torch pipe') - except ImportError: - try: - from fairscale.nn import Pipe # noqa - logger.info('Using fairscale pipe') - except ImportError: - raise ImportError("Please install fairscale with: pip install fairscale") - - -@register_model("pipeline_parallel_transformer") -class PipelineParallelTransformerModel(BaseFairseqModel): - def __init__(self, encoder, decoder, balance, devices, chunks, checkpoint): - import_pipe() - super().__init__() - assert isinstance(encoder, FairseqEncoder) - assert isinstance(decoder, FairseqDecoder) - encoder_module_list = ( - [encoder.embedding_layer] - + list(encoder.encoder_layers) - + [encoder.final_layer_norm] - ) - self.num_encoder_modules = len(encoder_module_list) - decoder_module_list = ( - [decoder.embedding_layer] - + list(decoder.decoder_layers) - + [decoder.decoder_output_layer] - ) - self.num_decoder_modules = len(decoder_module_list) - module_list = encoder_module_list + decoder_module_list - self.devices = devices - if TORCH_PIPE: - self.model = Pipe( - partition_model(nn.Sequential(*module_list), balance, devices), - chunks=chunks, - checkpoint=checkpoint, - ) - else: - self.model = Pipe( - nn.Sequential(*module_list), - balance=balance, - devices=devices, - chunks=chunks, - checkpoint=checkpoint, - ) - self.encoder_max_positions = self.max_positions_helper( - encoder.embedding_layer, "max_source_positions" - ) - self.decoder_max_positions = self.max_positions_helper( - decoder.embedding_layer, "max_target_positions" - ) - self.adaptive_softmax = getattr(decoder, "adaptive_softmax", None) - # Note: To be populated during inference - self.encoder = None - self.decoder = None - - def forward(self, src_tokens, src_lengths, prev_output_tokens): - if self.training: - input_lst = [src_tokens, src_lengths, prev_output_tokens] - input = tuple(i.to(self.devices[0], non_blocking=True) for i in input_lst) - if TORCH_PIPE: - return self.model(input).local_value() - else: - return self.model(input) - else: - assert self.encoder is not None and self.decoder is not None, ( - "encoder and decoder need to be initialized by " - + "calling the `prepare_for_inference_()` method" - ) - encoder_output_tuple = self.encoder(input) - return self.decoder(encoder_output_tuple) - - def prepare_for_inference_(self, cfg): - if self.encoder is not None and self.decoder is not None: - logger.info("Encoder and Decoder already initialized") - return - encoder_module_list = [] - decoder_module_list = [] - module_count = 0 - for partition in self.model.partitions: - for module in partition: - if module_count < self.num_encoder_modules: - encoder_module_list.append(module) - else: - decoder_module_list.append(module) - module_count += 1 - self.model = None - self.encoder = TransformerEncoder(cfg.distributed_training, None, None, encoder_module_list) - self.decoder = TransformerDecoder( - cfg.distributed_training, None, None, decoder_module_list=decoder_module_list - ) - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - # fmt: off - parser.add_argument('--activation-fn', - choices=utils.get_available_activation_fns(), - help='activation function to use') - parser.add_argument('--dropout', type=float, metavar='D', - help='dropout probability') - parser.add_argument('--attention-dropout', type=float, metavar='D', - help='dropout probability for attention weights') - parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', - help='dropout probability after activation in FFN.') - parser.add_argument('--encoder-embed-path', type=str, metavar='STR', - help='path to pre-trained encoder embedding') - parser.add_argument('--encoder-embed-dim', type=int, metavar='N', - help='encoder embedding dimension') - parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', - help='encoder embedding dimension for FFN') - parser.add_argument('--encoder-layers', type=int, metavar='N', - help='num encoder layers') - parser.add_argument('--encoder-attention-heads', type=int, metavar='N', - help='num encoder attention heads') - parser.add_argument('--encoder-normalize-before', action='store_true', - help='apply layernorm before each encoder block') - parser.add_argument('--encoder-learned-pos', action='store_true', - help='use learned positional embeddings in the encoder') - parser.add_argument('--decoder-embed-path', type=str, metavar='STR', - help='path to pre-trained decoder embedding') - parser.add_argument('--decoder-embed-dim', type=int, metavar='N', - help='decoder embedding dimension') - parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', - help='decoder embedding dimension for FFN') - parser.add_argument('--decoder-layers', type=int, metavar='N', - help='num decoder layers') - parser.add_argument('--decoder-attention-heads', type=int, metavar='N', - help='num decoder attention heads') - parser.add_argument('--decoder-learned-pos', action='store_true', - help='use learned positional embeddings in the decoder') - parser.add_argument('--decoder-normalize-before', action='store_true', - help='apply layernorm before each decoder block') - parser.add_argument('--share-decoder-input-output-embed', action='store_true', - help='share decoder input and output embeddings') - parser.add_argument('--share-all-embeddings', action='store_true', - help='share encoder, decoder and output embeddings' - ' (requires shared dictionary and embed dim)') - parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', - help='if set, disables positional embeddings (outside self attention)') - parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', - help='comma separated list of adaptive softmax cutoff points. ' - 'Must be used with adaptive_loss criterion'), - parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', - help='sets adaptive softmax dropout for the tail projections') - parser.add_argument('--num-embedding-chunks', type=int, metavar='N', default=1, - help='Number of embedding layer chunks (enables more even distribution' - 'of optimizer states across data parallel nodes' - 'when using optimizer state sharding and' - 'a big embedding vocabulary)') - # fmt: on - - @classmethod - def build_model_base(cls, args, task): - """Build a new model instance.""" - - # make sure all arguments are present in older models - base_architecture(args) - - if not hasattr(args, "max_source_positions"): - args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS - if not hasattr(args, "max_target_positions"): - args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS - - src_dict, tgt_dict = task.source_dictionary, task.target_dictionary - - def build_embedding(dictionary, embed_dim, path=None, num_embed_chunks=1): - assert embed_dim % num_embed_chunks == 0, ( - f"Number of embedding chunks = {num_embed_chunks} should be " - + f"divisible by the embedding dimension = {embed_dim}" - ) - assert path is None or num_embed_chunks == 1, ( - "Loading embedding from a path with number of embedding chunks > 1" - + " is not yet supported" - ) - num_embeddings = len(dictionary) - padding_idx = dictionary.pad() - # if provided, load from preloaded dictionaries - if path: - emb = Embedding(num_embeddings, embed_dim, padding_idx) - embed_dict = utils.parse_embedding(path) - utils.load_embedding(embed_dict, dictionary, emb) - else: - embed_chunk_dim = embed_dim // num_embed_chunks - emb = nn.ModuleList() - for i in range(num_embed_chunks): - emb.append(Embedding(num_embeddings, embed_chunk_dim, padding_idx)) - return emb - - num_embed_chunks = args.num_embedding_chunks - if args.share_all_embeddings: - if src_dict != tgt_dict: - raise ValueError("--share-all-embeddings requires a joined dictionary") - if args.encoder_embed_dim != args.decoder_embed_dim: - raise ValueError( - "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim" - ) - if args.decoder_embed_path and ( - args.decoder_embed_path != args.encoder_embed_path - ): - raise ValueError( - "--share-all-embeddings not compatible with --decoder-embed-path" - ) - encoder_embed_tokens = build_embedding( - src_dict, - args.encoder_embed_dim, - args.encoder_embed_path, - num_embed_chunks, - ) - decoder_embed_tokens = encoder_embed_tokens - args.share_decoder_input_output_embed = True - else: - assert args.share_decoder_input_output_embed or num_embed_chunks == 1, ( - "Not sharing decoder I/O embeddings is not yet supported with number of " - + "embedding chunks > 1" - ) - encoder_embed_tokens = build_embedding( - src_dict, - args.encoder_embed_dim, - args.encoder_embed_path, - num_embed_chunks, - ) - decoder_embed_tokens = build_embedding( - tgt_dict, - args.decoder_embed_dim, - args.decoder_embed_path, - num_embed_chunks, - ) - - encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) - decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) - return (encoder, decoder) - - @classmethod - def build_encoder(cls, args, src_dict, embed_tokens): - return TransformerEncoder(args, src_dict, embed_tokens) - - @classmethod - def build_decoder(cls, args, tgt_dict, embed_tokens): - return TransformerDecoder(args, tgt_dict, embed_tokens) - - @classmethod - def build_model(cls, args, task): - encoder, decoder = cls.build_model_base(args, task) - return PipelineParallelTransformerModel( - encoder=encoder, - decoder=decoder, - balance=utils.eval_str_list(args.pipeline_balance, type=int), - devices=utils.eval_str_list(args.pipeline_devices, type=int), - chunks=args.pipeline_chunks, - checkpoint=args.pipeline_checkpoint, - ) - - def output_layer(self, features, **kwargs): - """Project features to the default output size (typically vocabulary size).""" - return self.decoder.output_layer(features, **kwargs) - - def max_positions(self): - """Maximum length supported by the model.""" - return (self.encoder_max_positions, self.decoder_max_positions) - - def max_positions_helper( - self, embedding_layer, max_positions_field="max_source_positions" - ): - """Maximum input length supported by the encoder or decoder.""" - if embedding_layer.embed_positions is None: - return getattr(embedding_layer, max_positions_field) - return min( - getattr(embedding_layer, max_positions_field), - embedding_layer.embed_positions.max_positions, - ) - - def get_normalized_probs(self, net_output, log_probs, sample=None): - """Get normalized probabilities (or log probs) from a net's output.""" - - if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None: - if sample is not None: - assert "target" in sample - target = sample["target"] - else: - target = None - out = self.adaptive_softmax.get_log_prob(net_output, target=target) - return out.exp_() if not log_probs else out - - # A Pipe() module returns a tuple of tensors as the output. - # In this case, the tuple has one element - the output tensor of logits - logits = net_output if isinstance(net_output, torch.Tensor) else net_output[0] - if log_probs: - return utils.log_softmax(logits, dim=-1, onnx_trace=False) - else: - return utils.softmax(logits, dim=-1, onnx_trace=False) - - def max_decoder_positions(self): - """Maximum length supported by the decoder.""" - return self.decoder_max_positions - - def load_state_dict(self, state_dict, strict=True, model_cfg=None): - """Copies parameters and buffers from *state_dict* into this module and - its descendants. - - Overrides the method in :class:`nn.Module`. Compared with that method - this additionally "upgrades" *state_dicts* from old checkpoints. - """ - self.upgrade_state_dict(state_dict) - is_regular_transformer = not any("model.partitions" in k for k in state_dict) - if is_regular_transformer: - state_dict = self.convert_to_pipeline_parallel_state_dict(state_dict) - return super().load_state_dict(state_dict, strict) - - def convert_to_pipeline_parallel_state_dict(self, state_dict): - new_state_dict = self.state_dict() - encoder_layer_idx = 0 - decoder_layer_idx = 0 - encoder_key_suffixes = [ - "self_attn.k_proj.weight", - "self_attn.k_proj.bias", - "self_attn.v_proj.weight", - "self_attn.v_proj.bias", - "self_attn.q_proj.weight", - "self_attn.q_proj.bias", - "self_attn.out_proj.weight", - "self_attn.out_proj.bias", - "self_attn_layer_norm.weight", - "self_attn_layer_norm.bias", - "fc1.weight", - "fc1.bias", - "fc2.weight", - "fc2.bias", - "final_layer_norm.weight", - "final_layer_norm.bias", - ] - decoder_key_suffixes = [ - "self_attn.k_proj.weight", - "self_attn.k_proj.bias", - "self_attn.v_proj.weight", - "self_attn.v_proj.bias", - "self_attn.q_proj.weight", - "self_attn.q_proj.bias", - "self_attn.out_proj.weight", - "self_attn.out_proj.bias", - "self_attn_layer_norm.weight", - "self_attn_layer_norm.bias", - "encoder_attn.k_proj.weight", - "encoder_attn.k_proj.bias", - "encoder_attn.v_proj.weight", - "encoder_attn.v_proj.bias", - "encoder_attn.q_proj.weight", - "encoder_attn.q_proj.bias", - "encoder_attn.out_proj.weight", - "encoder_attn.out_proj.bias", - "encoder_attn_layer_norm.weight", - "encoder_attn_layer_norm.bias", - "fc1.weight", - "fc1.bias", - "fc2.weight", - "fc2.bias", - "final_layer_norm.weight", - "final_layer_norm.bias", - ] - for pid, partition in enumerate(self.model.partitions): - logger.info(f"Begin Partition {pid}") - for mid, module in enumerate(partition): - # fmt: off - if isinstance(module, TransformerEncoderEmbedding): - new_state_dict[f'model.partitions.{pid}.{mid}.embed_tokens.weight'] = state_dict['encoder.embed_tokens.weight'] - new_state_dict[f'model.partitions.{pid}.{mid}.embed_positions._float_tensor'] = state_dict['encoder.embed_positions._float_tensor'] - if isinstance(module, TransformerEncoderLayer): - for suffix in encoder_key_suffixes: - new_state_dict[f'model.partitions.{pid}.{mid}.{suffix}'] = state_dict[f'encoder.layers.{encoder_layer_idx}.{suffix}'] - encoder_layer_idx += 1 - if isinstance(module, TransformerDecoderLayer): - for suffix in decoder_key_suffixes: - new_state_dict[f'model.partitions.{pid}.{mid}.{suffix}'] = state_dict[f'decoder.layers.{decoder_layer_idx}.{suffix}'] - decoder_layer_idx += 1 - if isinstance(module, TransformerEncoderLayerNorm): - if 'encoder.layer_norm.weight' in state_dict: - new_state_dict[f'model.partitions.{pid}.{mid}.layer_norm.weight'] = state_dict['encoder.layer_norm.weight'] - new_state_dict[f'model.partitions.{pid}.{mid}.layer_norm.bias'] = state_dict['encoder.layer_norm.bias'] - if isinstance(module, TransformerDecoderEmbedding): - new_state_dict[f'model.partitions.{pid}.{mid}.embed_tokens.weight'] = state_dict['decoder.embed_tokens.weight'] - new_state_dict[f'model.partitions.{pid}.{mid}.embed_positions._float_tensor'] = state_dict['decoder.embed_positions._float_tensor'] - if isinstance(module, TransformerDecoderOutputLayer): - new_state_dict[f'model.partitions.{pid}.{mid}.output_projection.weight'] = state_dict['decoder.output_projection.weight'] - # fmt: on - return new_state_dict - - -class TransformerEncoder(FairseqEncoder): - """ - Transformer encoder consisting of *args.encoder_layers* layers. Each layer - is a :class:`TransformerEncoderLayer`. - - Args: - args (argparse.Namespace): parsed command-line arguments - dictionary (~fairseq.data.Dictionary): encoding dictionary - embed_tokens (torch.nn.Embedding): input embedding - """ - - def __init__(self, args, dictionary, embed_tokens, encoder_module_list=None): - super().__init__(dictionary) - self.register_buffer("version", torch.Tensor([3])) - import_pipe() - self.use_pipeline = encoder_module_list is not None - if not self.use_pipeline: - self.embedding_layer = TransformerEncoderEmbedding(args, embed_tokens) - self.encoder_layers = nn.Sequential(*[TransformerEncoderLayer(args) for i in range(args.encoder_layers)]) - if isinstance(embed_tokens, nn.ModuleList): - emb_dim = sum(e.embedding_dim for e in embed_tokens) - else: - emb_dim = embed_tokens.embedding_dim - self.final_layer_norm = TransformerEncoderLayerNorm(args, emb_dim) - else: - encoder_balance = utils.eval_str_list( - args.pipeline_encoder_balance, type=int - ) - encoder_devices = utils.eval_str_list( - args.pipeline_encoder_devices, type=int - ) - assert sum(encoder_balance) == len(encoder_module_list), ( - f"Sum of encoder_balance={encoder_balance} is not equal " - + f"to num_encoder_modules={len(encoder_module_list)}" - ) - if TORCH_PIPE: - self.model = Pipe( - module=partition_model(nn.Sequential(*encoder_module_list), encoder_balance, encoder_devices), - chunks=args.pipeline_chunks, - checkpoint=args.pipeline_checkpoint, - ) - else: - self.model = Pipe( - module=nn.Sequential(*encoder_module_list), - balance=encoder_balance, - devices=encoder_devices, - chunks=args.pipeline_chunks, - checkpoint=args.pipeline_checkpoint, - ) - - def forward(self, src_tokens, src_lengths): - """ - Args: - input_tuple( - src_tokens (LongTensor): tokens in the source language of shape - `(batch, src_len)` - src_lengths (torch.LongTensor): lengths of each source sentence of - shape `(batch)` - ) - - Returns: - output_tuple( - - **encoder_out** (Tensor): the last encoder layer's output of - shape `(src_len, batch, embed_dim)` - - **encoder_padding_mask** (ByteTensor): the positions of - padding elements of shape `(batch, src_len)` - - prev_output_tokens - - **encoder_states** (List[Tensor]): all intermediate - hidden states of shape `(src_len, batch, embed_dim)`. - Only populated if *return_all_hiddens* is True. - ) - """ - dummy_prev_output_tokens = torch.zeros( - 1, dtype=src_tokens.dtype, device=src_tokens.device - ) - input_tuple = (src_tokens, src_lengths, dummy_prev_output_tokens) - if self.use_pipeline: - input_tuple = tuple(i.to(self.model.devices[0]) for i in input_tuple) - if TORCH_PIPE: - encoder_out = self.model(input_tuple).local_value() - else: - encoder_out = self.model(input_tuple) - else: - encoder_embed_output_tuple = self.embedding_layer(input_tuple) - encoder_layers_output = self.encoder_layers(encoder_embed_output_tuple) - encoder_out = self.final_layer_norm(encoder_layers_output) - # first element is the encoder output - # second element is the encoder padding mask - # the remaining elements of EncoderOut are not computed by - # the PipelineParallelTransformer - return EncoderOut(encoder_out[0], encoder_out[1], None, None, None, None) - - def reorder_encoder_out(self, encoder_out, new_order): - """ - Reorder encoder output according to *new_order*. - - Args: - encoder_out: output from the ``forward()`` method - new_order (LongTensor): desired order - - Returns: - *encoder_out* rearranged according to *new_order* - """ - if encoder_out.encoder_out is not None: - encoder_out = encoder_out._replace( - encoder_out=encoder_out.encoder_out.index_select(1, new_order) - ) - if encoder_out.encoder_padding_mask is not None: - encoder_out = encoder_out._replace( - encoder_padding_mask=encoder_out.encoder_padding_mask.index_select( - 0, new_order - ) - ) - if encoder_out.encoder_embedding is not None: - encoder_out = encoder_out._replace( - encoder_embedding=encoder_out.encoder_embedding.index_select( - 0, new_order - ) - ) - if encoder_out.encoder_states is not None: - for idx, state in enumerate(encoder_out.encoder_states): - encoder_out.encoder_states[idx] = state.index_select(1, new_order) - return encoder_out - - def max_positions(self): - """Maximum input length supported by the encoder.""" - if self.embedding_layer.embed_positions is None: - return self.embedding_layer.max_source_positions - return min( - self.embedding_layer.max_source_positions, - self.embedding_layer.embed_positions.max_positions, - ) - - -class TransformerDecoder(FairseqDecoder): - """ - Transformer decoder consisting of *args.decoder_layers* layers. Each layer - is a :class:`TransformerDecoderLayer`. - - Args: - args (argparse.Namespace): parsed command-line arguments - dictionary (~fairseq.data.Dictionary): decoding dictionary - embed_tokens (torch.nn.Embedding): output embedding - no_encoder_attn (bool, optional): whether to attend to encoder outputs - (default: False). - """ - - def __init__( - self, - args, - dictionary, - embed_tokens, - no_encoder_attn=False, - decoder_module_list=None, - ): - super().__init__(dictionary) - self.register_buffer("version", torch.Tensor([3])) - import_pipe() - self.use_pipeline = decoder_module_list is not None - if not self.use_pipeline: - self.embedding_layer = TransformerDecoderEmbedding(args, embed_tokens) - self.decoder_layers = nn.Sequential(*[ - TransformerDecoderLayer(args, no_encoder_attn) - for _ in range(args.decoder_layers) - ]) - self.decoder_output_layer = TransformerDecoderOutputLayer( - args, embed_tokens, dictionary - ) - else: - decoder_balance = utils.eval_str_list( - args.pipeline_decoder_balance, type=int - ) - decoder_devices = utils.eval_str_list( - args.pipeline_decoder_devices, type=int - ) - assert sum(decoder_balance) == len(decoder_module_list), ( - f"Sum of decoder_balance={decoder_balance} is not equal " - + f"to num_decoder_modules={len(decoder_module_list)}" - ) - if TORCH_PIPE: - self.model = Pipe( - module=partition_model(nn.Sequential(*decoder_module_list), decoder_balance, decoder_devices), - chunks=args.pipeline_chunks, - checkpoint=args.pipeline_checkpoint, - ) - else: - self.model = Pipe( - module=nn.Sequential(*decoder_module_list), - balance=decoder_balance, - devices=decoder_devices, - chunks=args.pipeline_chunks, - checkpoint=args.pipeline_checkpoint, - ) - - def forward( - self, - prev_output_tokens, - encoder_out=None, - ): - """ - Args: - prev_output_tokens (LongTensor): previous decoder outputs of shape - `(batch, tgt_len)`, for teacher forcing - encoder_out (optional): output from the encoder, used for - encoder-side attention - incremental_state (dict): dictionary used for storing state during - :ref:`Incremental decoding` - features_only (bool, optional): only return features without - applying output layer (default: False). - - Returns: - tuple: - - the decoder's output of shape `(batch, tgt_len, vocab)` - - a dictionary with any model-specific outputs - """ - input_tuple = ( - encoder_out.encoder_out, - encoder_out.encoder_padding_mask, - prev_output_tokens, - ) - if self.use_pipeline: - input_tuple = tuple(i.to(self.model.devices[0]) for i in input_tuple) - if TORCH_PIPE: - return (self.model(input_tuple).local_value(),) - else: - return (self.model(input_tuple),) - else: - embed_layer_output = self.embedding_layer(input_tuple) - state = self.decoder_layers(embed_layer_output) - return (self.decoder_output_layer(state),) - - def output_layer(self, features, **kwargs): - """Project features to the vocabulary size.""" - if self.adaptive_softmax is None: - # project back to size of vocabulary - if self.share_input_output_embed: - return F.linear(features, self.embed_tokens.weight) - else: - return F.linear(features, self.embed_out) - else: - return features - - def max_positions(self): - """Maximum output length supported by the decoder.""" - if self.embedding_layer.embed_positions is None: - return self.embedding_layer.max_target_positions - return min( - self.embedding_layer.max_target_positions, - self.embedding_layer.embed_positions.max_positions, - ) - - def buffered_future_mask(self, tensor): - dim = tensor.size(0) - if ( - not hasattr(self, "_future_mask") - or self._future_mask is None - or self._future_mask.device != tensor.device - or self._future_mask.size(0) < dim - ): - self._future_mask = torch.triu( - utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 - ) - return self._future_mask[:dim, :dim] - - def upgrade_state_dict_named(self, state_dict, name): - """Upgrade a (possibly old) state dict for new versions of fairseq.""" - if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): - weights_key = "{}.embed_positions.weights".format(name) - if weights_key in state_dict: - del state_dict[weights_key] - state_dict[ - "{}.embed_positions._float_tensor".format(name) - ] = torch.FloatTensor(1) - - for i in range(len(self.layers)): - # update layer norms - layer_norm_map = { - "0": "self_attn_layer_norm", - "1": "encoder_attn_layer_norm", - "2": "final_layer_norm", - } - for old, new in layer_norm_map.items(): - for m in ("weight", "bias"): - k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m) - if k in state_dict: - state_dict[ - "{}.layers.{}.{}.{}".format(name, i, new, m) - ] = state_dict[k] - del state_dict[k] - - version_key = "{}.version".format(name) - if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2: - # earlier checkpoints did not normalize after the stack of layers - self.layer_norm = None - self.normalize = False - state_dict[version_key] = torch.Tensor([1]) - - return state_dict - - -@register_model_architecture( - "pipeline_parallel_transformer", "transformer_iwslt_de_en_pipeline_parallel" -) -def transformer_iwslt_de_en_dist(args): - transformer_iwslt_de_en(args) - - -@register_model_architecture( - "pipeline_parallel_transformer", "transformer_wmt_en_de_big_pipeline_parallel" -) -def transformer_wmt_en_de_big_dist(args): - transformer_wmt_en_de_big(args) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp deleted file mode 100644 index 744c363e550231b8e0fbb94f998d46039daf5c00..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - -#include -#include - -std::vector -dynamicconv_cuda_forward(at::Tensor input, at::Tensor filters, int padding_l); - -std::vector dynamicconv_cuda_backward( - at::Tensor gradOutput, - int padding_l, - at::Tensor input, - at::Tensor filters); - -#define CHECK_CUDA(x) \ - AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) \ - AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) \ - CHECK_CUDA(x); \ - CHECK_CONTIGUOUS(x) - -std::vector -dynamicconv_forward(at::Tensor input, at::Tensor filters, int padding_l) { - CHECK_INPUT(input); - CHECK_INPUT(filters); - - return dynamicconv_cuda_forward(input, filters, padding_l); -} - -std::vector dynamicconv_backward( - at::Tensor gradOutput, - int padding_l, - at::Tensor input, - at::Tensor filters) { - CHECK_INPUT(gradOutput); - CHECK_INPUT(input); - CHECK_INPUT(filters); - - return dynamicconv_cuda_backward(gradOutput, padding_l, input, filters); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("forward", &dynamicconv_forward, "dynamicconv forward (CUDA)"); - m.def("backward", &dynamicconv_backward, "dynamicconv backward (CUDA)"); -} diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/online_backtranslation.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/online_backtranslation.py deleted file mode 100644 index 2e27ca237cde1980b2c3ca497e12f458da230c37..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/online_backtranslation.py +++ /dev/null @@ -1,682 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import contextlib -import json -import logging -import math -import os -from argparse import Namespace -from collections import OrderedDict, defaultdict -from pathlib import Path -from typing import Dict, Sequence, Tuple -from argparse import ArgumentError - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - -import fairseq -from fairseq import metrics, options, utils -from fairseq.data import ( - FairseqDataset, - LanguagePairDataset, - NoisingDataset, - PrependTokenDataset, - RoundRobinZipDatasets, - TransformEosLangPairDataset, - data_utils, - encoders, -) -from fairseq.sequence_generator import SequenceGenerator -from fairseq.tasks import register_task -from fairseq.tasks.translation import TranslationTask, load_langpair_dataset - -logger = logging.getLogger(__name__) - - -class PiecewiseLinearFn: - """Piecewise linear function. Can be configured with a string.""" - - def __init__(self, pieces: Sequence[Tuple[int, float]]): - assert pieces == sorted( - pieces - ), f"PiecewiseLinearFn configuration should be sorted, received: {pieces}" - - self.pieces = pieces - - def __call__(self, x: int) -> float: - for i, (x_a, y_a) in enumerate(self.pieces[:-1]): - x_b, y_b = self.pieces[i + 1] - if x_a <= x <= x_b: - return y_a + (x - x_a) * (y_b - y_a) / (x_b - x_a) - - return self.pieces[-1][1] - - @staticmethod - def from_string(configuration: str) -> "PiecewiseLinearFn": - """ - Parse the configuration of lambda coefficient (for scheduling). - x = "3" # lambda will be a constant equal to x - x = "0:1,1000:0" # lambda will start from 1 and linearly decrease - # to 0 during the first 1000 iterations - x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 - # iterations, then will linearly increase to 1 until iteration 2000 - """ - if isinstance(configuration, float): - return PiecewiseLinearFn([(0, configuration)]) - - try: - parts = configuration.split(",") - if len(parts) == 1: - v = float(configuration) - return PiecewiseLinearFn([(0, v)]) - - split = [s.split(":") for s in parts] - pieces = [(int(t), float(v)) for t, v in split] - return PiecewiseLinearFn(pieces) - except Exception: - raise ValueError( - f"Invalid PiecewiseLinearFn configuration: {configuration!r}" - ) - - @staticmethod - def one() -> "PiecewiseLinearFn": - return PiecewiseLinearFn([(0, 1.0)]) - - -@register_task("online_backtranslation") -class OnlineBackTranslationTask(TranslationTask): - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - # fmt: off - # Generic translation args - parser.add_argument('data', help='colon separated path to data directories list, \ - will be iterated upon during epochs in round-robin manner; \ - however, valid and test data are always in the first directory to \ - avoid the need for repeating them in all directories') - parser.add_argument('--mono-langs', metavar='MONO_LANGS', - help='monolingual languages for training') - parser.add_argument('--valid-lang-pairs', default=None, metavar='VALID_LANG_PAIRS', - help='language pairs for validation') - parser.add_argument('--load-alignments', action='store_true', - help='load the binarized alignments') - parser.add_argument('--left-pad-source', default='False', type=str, metavar='BOOL', - help='pad the source on the left') - parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', - help='pad the target on the left') - parser.add_argument('--upsample-primary', default=1, type=int, - help='amount to upsample primary dataset') - try: - parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', - help='max number of tokens in the source sequence') - parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', - help='max number of tokens in the target sequence') - except ArgumentError: - # this might have already been defined. Once we transition this to hydra it should be fine to add it here. - pass - parser.add_argument('--truncate-source', action='store_true', default=False, - help='truncate source to max-source-positions') - parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N', - help='if >0, then bucket source and target lengths into N ' - 'buckets and pad accordingly; this is useful on TPUs ' - 'to minimize the number of compilations') - - # Denoising args - parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N', - help='maximum word shuffle distance for denoising autoencoding data generation') - parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N', - help='word dropout probability for denoising autoencoding data generation') - parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N', - help='word blanking probability for denoising autoencoding data generation') - - # Backtranslation args - parser.add_argument('--lambda-bt', default="1.0", type=str, metavar='N', - help='back-translation weight') - parser.add_argument('--lambda-dae', default="1.0", type=str, metavar='N', - help='denoising auto-encoder weight') - - # Evaluation args - parser.add_argument('--generate-one-by-one', action='store_true', - help='generate one sentence at a time for backtranslation') - - parser.add_argument('--eval-bleu', action='store_true', - help='evaluation with BLEU scores') - parser.add_argument('--eval-bleu-detok', type=str, default="space", - help='detokenize before computing BLEU (e.g., "moses"); ' - 'required if using --eval-bleu; use "space" to ' - 'disable detokenization; see fairseq.data.encoders ' - 'for other options') - parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON', - help='args for building the tokenizer, if needed') - parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False, - help='compute tokenized BLEU instead of sacrebleu') - parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None, - help='remove BPE before computing BLEU') - parser.add_argument('--eval-bleu-args', type=str, metavar='JSON', - help='generation args for BLUE scoring, ' - 'e.g., \'{"beam": 4, "lenpen": 0.6}\'') - parser.add_argument('--eval-bleu-print-samples', action='store_true', - help='print sample generations during validation') - # fmt: on - - def __init__(self, args, common_dict, mono_langs, valid_lang_pairs): - super().__init__(args, common_dict, common_dict) - self.common_dict = common_dict - self.mono_langs = mono_langs - self.valid_lang_pairs = valid_lang_pairs - - self.SHOW_SAMPLES_INTERVAL = 1000 - # Start by showing samples - self._show_samples_ctr = self.SHOW_SAMPLES_INTERVAL - self.SHOW_SAMPLES_NUMBER = 5 - self.lambda_bt = PiecewiseLinearFn.from_string(args.lambda_bt) - self.lambda_dae = PiecewiseLinearFn.from_string(args.lambda_dae) - - self.args = args - self.data = utils.split_paths(self.args.data) - if len(self.data) == 1: - shards = list(Path(self.data[0]).glob("shard*")) - if len(shards) > 0: - # keep this as strings, since it can also be a manifold path - old_data = self.data - self.data = [str(shard) for shard in shards] - logging.warning(f"Expanded data directory {old_data} to {self.data}") - - @classmethod - def setup_task(cls, args, **kwargs): - """Setup the task (e.g., load dictionaries). - - Args: - args (argparse.Namespace): parsed command-line arguments - """ - args.left_pad_source = options.eval_bool(args.left_pad_source) - args.left_pad_target = options.eval_bool(args.left_pad_target) - - paths = utils.split_paths(args.data) - assert len(paths) > 0 - assert args.mono_langs is not None - - mono_langs = args.mono_langs.split(",") - valid_lang_pairs = args.valid_lang_pairs.split(",") - - # load dictionary - dict_path = os.path.join(paths[0], "dict.txt") - common_dict = cls.load_dictionary(dict_path) - - return cls(args, common_dict, mono_langs, valid_lang_pairs) - - def load_dataset(self, split, epoch=1, combine=False, **kwargs) -> FairseqDataset: - """Load a given dataset split. - - Args: - split (str): name of the split (e.g., train, valid, test) - """ - if split == "train": - data_path = self.data[(epoch - 1) % len(self.data)] - dataset = self.load_train_dataset(data_path) - else: - # valid/test should always be the same. - dataset = self.load_translation_dataset(split, self.data[0]) - - self.datasets[split] = dataset - return dataset - - def load_train_dataset(self, data_path: str) -> FairseqDataset: - """The training dataset is made of backtranslation dataset and denoising dataset.""" - data = [] - for lang in self.mono_langs: - train_path = os.path.join(data_path, lang, "train") - # TODO: could we do the BT using denoise sample ? - # this would half the data loading work - data.append((f"{lang}-BT", self.load_bt_dataset(train_path, lang))) - data.append( - (f"{lang}-DENOISE", self.load_denoise_dataset(train_path, lang)) - ) - - return RoundRobinZipDatasets(OrderedDict(data)) - - def _langpair_dataset( - self, src: FairseqDataset, tgt: FairseqDataset - ) -> LanguagePairDataset: - return LanguagePairDataset( - src, - src.sizes, - self.dictionary, - tgt=tgt, - tgt_sizes=tgt.sizes, - tgt_dict=self.dictionary, - left_pad_source=self.args.left_pad_source, - left_pad_target=self.args.left_pad_target, - # TODO: should we shuffle ? we are already sorting batch by sizes so ? - # shuffle=True, - ) - - def _prepend_lang_bos_to_target( - self, dataset: LanguagePairDataset, lang: str - ) -> LanguagePairDataset: - bos = _lang_token_index(self.dictionary, lang) - return TransformEosLangPairDataset( - dataset, - src_eos=self.dictionary.eos(), - new_src_eos=self.dictionary.eos(), - tgt_bos=self.dictionary.eos(), - new_tgt_bos=bos, - ) - - def load_bt_dataset(self, data_path: str, lang: str) -> FairseqDataset: - """The BT dataset is generated with (tgt, tgt) pairs. - The actual translation to a (generated_src, tgt) pair - is done on the fly during training. - """ - mono_dataset = data_utils.load_indexed_dataset( - data_path, self.common_dict, self.args.dataset_impl - ) - assert mono_dataset is not None, f"No dataset found for {lang}" - - mono_dataset_src = PrependTokenDataset( - mono_dataset, _lang_token_index(self.dictionary, lang) - ) - - mono_dataset_bt = self._langpair_dataset(mono_dataset_src, mono_dataset) - logger.info( - f"mono_lang = {lang} " - f"lang token index = {_lang_token_index(self.dictionary, lang)} " - f"lang token = {_lang_token(lang)}" - ) - - mono_dataset_bt = self._prepend_lang_bos_to_target(mono_dataset_bt, lang) - return mono_dataset_bt - - def load_denoise_dataset(self, data_path: str, lang: str) -> FairseqDataset: - """Classic denoising dataset""" - dataset = data_utils.load_indexed_dataset( - data_path, self.common_dict, self.args.dataset_impl - ) - noisy_dataset = NoisingDataset( - dataset, - self.dictionary, - seed=1, - max_word_shuffle_distance=self.args.max_word_shuffle_distance, - word_dropout_prob=self.args.word_dropout_prob, - word_blanking_prob=self.args.word_blanking_prob, - ) - noisy_dataset = PrependTokenDataset( - noisy_dataset, _lang_token_index(self.dictionary, lang) - ) - - clean_dataset = data_utils.load_indexed_dataset( - data_path, self.common_dict, self.args.dataset_impl - ) - denoising_dataset = self._langpair_dataset(noisy_dataset, clean_dataset) - denoising_dataset = self._prepend_lang_bos_to_target(denoising_dataset, lang) - return denoising_dataset - - def load_translation_dataset( - self, split: str, data_path: str, combine: bool = False - ): - # only judging with one language pair for the moment, - # since ConcatDataset doesn't work as expected - assert len(self.valid_lang_pairs) == 1, "For now..." - valid_lang_pair = self.valid_lang_pairs[0] - src, tgt = valid_lang_pair.split("-") - - # use the same function than TranslationTask - src_tgt_dt = load_langpair_dataset( - data_path, - split, - src, - self.common_dict, - tgt, - self.common_dict, - combine=combine, - dataset_impl=self.args.dataset_impl, - upsample_primary=self.args.upsample_primary, - left_pad_source=self.args.left_pad_source, - left_pad_target=self.args.left_pad_target, - max_source_positions=self.args.max_source_positions, - max_target_positions=self.args.max_target_positions, - load_alignments=self.args.load_alignments, - truncate_source=self.args.truncate_source, - num_buckets=self.args.num_batch_buckets, - shuffle=(split != "test"), - prepend_bos_src=_lang_token_index(self.dictionary, src), - ) - - src_tgt_eos_dt = self._prepend_lang_bos_to_target(src_tgt_dt, tgt) - src_tgt_eos_dt.args = self.args - return src_tgt_eos_dt - - def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): - raise NotImplementedError - - def build_model(self, args): - # torch.autograd.set_detect_anomaly(True) - model = super().build_model(args) - - add_secial_tokens_to_dict_and_model(self.common_dict, model, self.mono_langs) - - self.sequence_generators = {} - for mono_lang in self.mono_langs: - self.sequence_generators[mono_lang] = SequenceGenerator( - [model], - tgt_dict=self.dictionary, - beam_size=1, - max_len_a=1.3, - max_len_b=5, - min_len=5, - # keep 1 to be able to prepend bos - max_len=model.max_decoder_positions() - 1, - ) - - if getattr(args, "eval_bleu", False): - assert getattr(args, "eval_bleu_detok", None) is not None, ( - "--eval-bleu-detok is required if using --eval-bleu; " - "try --eval-bleu-detok=moses (or --eval-bleu-detok=space " - "to disable detokenization, e.g., when using sentencepiece)" - ) - detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}") - self.tokenizer = encoders.build_tokenizer( - Namespace( - tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args - ) - ) - - gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}") - self.bleu_sequence_generator = self.build_generator( - [model], Namespace(**gen_args) - ) - - return model - - def max_positions(self): - """Return the max sentence length allowed by the task.""" - return (self.args.max_source_positions, self.args.max_target_positions) - - @property - def dictionary(self): - """Return the source :class:`~fairseq.data.Dictionary`.""" - return self.common_dict - - def display_samples_once_in_a_while(self, smp, mono_lang, other_lang): - self._show_samples_ctr += 1 - if self._show_samples_ctr < self.SHOW_SAMPLES_INTERVAL: - return - self._show_samples_ctr = 0 - - ln = smp["net_input"]["src_tokens"].shape[0] - - logger.info( - f"(r:{self.args.distributed_rank}) : " - f"{other_lang} ---> {mono_lang} " - f"({other_lang} was generated by back-translation.) {ln} samples" - ) - - for i in range(min(ln, self.SHOW_SAMPLES_NUMBER)): - src_tokens = smp["net_input"]["src_tokens"][i] - tgt_tokens = smp["target"][i] - - src_str = self.dictionary.string(src_tokens, "sentencepiece") - tgt_str = self.dictionary.string(tgt_tokens, "sentencepiece") - logger.info( - f"\n{i}\t\t[{other_lang} generated] {src_str}\n" - f"\t\t[{mono_lang} original ] {tgt_str}\n" - f"\t\t[ src tokens] {src_tokens}\n" - ) - - def backtranslate_sample(self, smp, orig_lang, other_lang) -> None: - """ - * WARNING: smp is modified in place. - * At the start of this function, `smp` has the same input and target: - |--------------------------------------------------------| - | smp['net_input']['src_tokens'] | smp['target'] | - | (from data) __en__ hello world | __en__ hello world | - |--------------------------------------------------------| - - * We call generator.generate(smp, bos_token = token("ro")), - and copy the result as input - * At the end, `smp` has the translation to other language. - |--------------------------------------------------------| - | smp['net_input']['src_tokens'] | smp['target'] | - | (generated) __ro__ salut lume | __en__ hello world | - |--------------------------------------------------------| - - """ - bos_token = _lang_token_index(self.dictionary, other_lang) - generated = self.sequence_generators[orig_lang].generate( - models=[], sample=smp, bos_token=bos_token - ) - - max_lngth = max([gn[0]["tokens"].size(0) for gn in generated]) - net_input = smp["net_input"] - n_src_tokens = torch.empty( - size=(len(generated), max_lngth + 1), dtype=net_input["src_tokens"].dtype - ) - n_src_lengths = torch.empty( - len(generated), dtype=net_input["src_lengths"].dtype - ) - - for i, gn in enumerate(generated): - tokens = gn[0]["tokens"] - tokens_size = tokens.size(0) - padding_needed = max_lngth - tokens_size - tokens = torch.cat([tokens.new([bos_token]), tokens]) - tokens = F.pad(tokens, (0, padding_needed), value=self.dictionary.pad()) - n_src_tokens[i] = tokens - n_src_lengths[i] = tokens_size + 1 - - device = net_input["src_tokens"].device - # This seems to be important - del net_input["src_tokens"] - del net_input["src_lengths"] - net_input["src_tokens"] = n_src_tokens.to(device) - net_input["src_lengths"] = n_src_lengths.to(device) - - def generate(self, smp, model): - model.eval() - orig_lang = ( - self.dictionary[smp["net_input"]["src_tokens"][0][0]] - .replace(" ", "") - .replace("_", "") - ) - bos_token = smp["net_input"]["prev_output_tokens"][0][0] - with torch.no_grad(): - generated = self.sequence_generators[orig_lang].generate( - models=[model], sample=smp, bos_token=bos_token - ) - return generated - - def get_other_lang(self, lang): - # TODO: allow more complex mapping - if lang != self.mono_langs[0]: - return self.mono_langs[0] - if len(self.mono_langs) == 2: - return self.mono_langs[1] - return self.mono_langs[np.random.randint(1, len(self.mono_langs))] - - def train_step( - self, sample, model, criterion, optimizer, update_num, ignore_grad=False - ): - - model.train() - model.set_num_updates(update_num) - - agg_loss, agg_sample_size = 0.0, 0.0 - agg_logging_output: Dict[str, float] = defaultdict(float) - - dataset_keys = self.datasets["train"].datasets.keys() - - weights = { - "BT": self.lambda_bt(update_num), - "DENOISE": self.lambda_dae(update_num), - } - log_keys = {"BT": "bt_", "DENOISE": "dae_"} - - for dataset_key in dataset_keys: - smp = sample[dataset_key] - mono_lang, task_subtype = dataset_key.split("-") - if weights[task_subtype] == 0: - continue - - if task_subtype == "BT": - with torch.autograd.profiler.record_function("backtranslation"): - model.eval() - # TODO: Could we translate to several language at once ? - # this would allow to share encoder_out and maximize GPU usage. - other_lang = self.get_other_lang(mono_lang) - self.backtranslate_sample(smp, mono_lang, other_lang) - self.display_samples_once_in_a_while(smp, mono_lang, other_lang) - model.train() - - # Like in FairseqTask.train_step - with torch.autograd.profiler.record_function("forward"): - loss, sample_size, logging_output = criterion(model, smp) - loss *= weights[task_subtype] - if ignore_grad: - loss *= 0 - with torch.autograd.profiler.record_function("backward"): - optimizer.backward(loss) - - agg_loss += loss.item() - agg_sample_size += sample_size - for k in logging_output: - agg_logging_output[log_keys[task_subtype] + k] += logging_output[k] - agg_logging_output[k] += logging_output[k] - - return agg_loss, agg_sample_size, agg_logging_output - - def get_bos_token_from_sample(self, sample): - net_input = sample["net_input"] - source_lang_token_id = torch.unique(net_input["src_tokens"][:, 0]).item() - source_lang_token = self.dictionary[source_lang_token_id].replace("_", "") - target_lang_token_id = _lang_token_index( - self.dictionary, self.get_other_lang(source_lang_token) - ) - - return target_lang_token_id - - def reduce_metrics(self, logging_outputs, criterion): - super().reduce_metrics(logging_outputs, criterion) - bt_sample_size = sum(x.get("bt_sample_size", 0) for x in logging_outputs) - if bt_sample_size: - bt_loss_sum = sum(x.get("bt_loss", 0) for x in logging_outputs) - bt_loss_sum *= 1 / bt_sample_size / math.log(2) - metrics.log_scalar("bt_loss", bt_loss_sum, bt_sample_size, round=3) - - bt_nll_loss_sum = sum(x.get("bt_nll_loss", 0) for x in logging_outputs) - bt_ntokens = sum(x.get("bt_ntokens", 0) for x in logging_outputs) - bt_nll_loss_sum *= 1 / bt_ntokens / math.log(2) - metrics.log_scalar("bt_nll_loss", bt_nll_loss_sum, bt_ntokens, round=3) - metrics.log_derived( - "bt_ppl", lambda meters: utils.get_perplexity(meters["bt_nll_loss"].avg) - ) - - dae_sample_size = sum(x.get("dae_sample_size", 0) for x in logging_outputs) - if dae_sample_size: - dae_loss_sum = sum(x.get("dae_loss", 0) for x in logging_outputs) - dae_loss_sum *= 1 / dae_sample_size / math.log(2) - metrics.log_scalar("dae_loss", dae_loss_sum, dae_sample_size, round=3) - - dae_nll_loss_sum = sum(x.get("dae_nll_loss", 0) for x in logging_outputs) - dae_ntokens = sum(x.get("dae_ntokens", 0) for x in logging_outputs) - dae_nll_loss_sum *= 1 / dae_ntokens / math.log(2) - metrics.log_scalar("dae_nll_loss", dae_nll_loss_sum, dae_ntokens, round=3) - metrics.log_derived( - "dae_ppl", - lambda meters: utils.get_perplexity(meters["dae_nll_loss"].avg), - ) - - -@torch.no_grad() -def extend_embedding( - emb: nn.Module, new_vocab_size: int, copy_from_token_id: int -) -> None: - old_emb_data = emb.weight.data - (old_vocab_size, dim) = old_emb_data.shape - assert new_vocab_size >= old_vocab_size - - if new_vocab_size > old_vocab_size: - emb.weight.data = torch.zeros((new_vocab_size, dim)) - emb.weight.data[:old_vocab_size, :] = old_emb_data - # initialize new embeddings - emb.weight.data[old_vocab_size:, :] = old_emb_data[copy_from_token_id] - if hasattr(emb, "num_embeddings"): - emb.num_embeddings = new_vocab_size - if hasattr(emb, "out_features"): - emb.out_features = new_vocab_size - - if getattr(emb, "bias", None) is None: - return - - # Fix the bias. - # Bias shape can be different from the previous vocab size - # if the weight matrix was shared and alread extended but not the bias. - (old_vocab_size,) = emb.bias.shape - assert new_vocab_size >= old_vocab_size - if new_vocab_size > old_vocab_size: - old_bias = emb.bias.data - new_bias = torch.zeros( - (new_vocab_size,), dtype=old_bias.dtype, device=old_bias.device - ) - new_bias[:old_vocab_size] = old_bias - emb.bias.data = new_bias - - -def add_secial_tokens_to_dict_and_model( - dictionary: "fairseq.data.Dictionary", - model: nn.Module, - mono_langs: Sequence[str], -) -> None: - embs = model.encoder.embed_tokens - vocab_size, embedding_dim = embs.weight.shape - - # The model may or may not have a '' embedding yet - assert ( - len(dictionary) <= vocab_size <= len(dictionary) + 1 - ), f"Dictionary len ({len(dictionary)}) doesn't match embs shape ({embs.weight.shape})" - # TODO: we should reuse the pretrained model dict which already has - dictionary.add_symbol("") - - for lang in mono_langs: - lang_token = _lang_token(lang) - dictionary.add_symbol(lang_token) - logger.info( - f"dictionary: {len(dictionary)} -> {vocab_size} tokens " - f"after adding {len(mono_langs)} lang tokens." - ) - - if len(dictionary) <= vocab_size: - return - - extend_embedding(embs, len(dictionary), dictionary.bos()) - dec_embs = model.decoder.embed_tokens - extend_embedding(dec_embs, len(dictionary), dictionary.bos()) - lm_head = model.decoder.output_projection - extend_embedding(lm_head, len(dictionary), dictionary.bos()) - assert lm_head.weight.shape == (len(dictionary), embedding_dim) - - -def _lang_token(lang: str) -> str: - return f"__{lang}__" - - -def _lang_token_index(dictionary, lang: str) -> int: - return dictionary.index(_lang_token(lang)) - - -@contextlib.contextmanager -def assert_weights_have_changed(model: nn.Module): - def checksum(model: nn.Module) -> float: - return sum(p.sum().item() for p in model.parameters()) - - initial_checksum = checksum(model) - yield model - final_checksum = checksum(model) - logger.info( - f"initial_checksum={initial_checksum} -> final_checksum={final_checksum}" - ) - assert initial_checksum != final_checksum, "Model hasn't changed !" diff --git a/spaces/OFA-Sys/OFA-vqa/criterions/label_smoothed_cross_entropy.py b/spaces/OFA-Sys/OFA-vqa/criterions/label_smoothed_cross_entropy.py deleted file mode 100644 index 73b36e750a0037cad8403e383d790f868b509d24..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/criterions/label_smoothed_cross_entropy.py +++ /dev/null @@ -1,343 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -from dataclasses import dataclass, field -from typing import Optional - -import torch -import torch.nn.functional as F -import numpy as np -from fairseq import metrics, utils -from fairseq.criterions import FairseqCriterion, register_criterion -from fairseq.dataclass import FairseqDataclass -from omegaconf import II - - -@dataclass -class AjustLabelSmoothedCrossEntropyCriterionConfig(FairseqDataclass): - label_smoothing: float = field( - default=0.0, - metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"}, - ) - report_accuracy: bool = field( - default=False, - metadata={"help": "report accuracy metric"}, - ) - ignore_prefix_size: int = field( - default=0, - metadata={"help": "Ignore first N tokens"}, - ) - ignore_eos: bool = field( - default=False, - metadata={"help": "Ignore eos token"}, - ) - sentence_avg: bool = II("optimization.sentence_avg") - drop_worst_ratio: float = field( - default=0.0, - metadata={"help": "ratio for discarding bad samples"}, - ) - drop_worst_after: int = field( - default=0, - metadata={"help": "steps for discarding bad samples"}, - ) - use_rdrop: bool = field( - default=False, metadata={"help": "use R-Drop"} - ) - reg_alpha: float = field( - default=1.0, metadata={"help": "weight for R-Drop"} - ) - sample_patch_num: int = field( - default=196, metadata={"help": "sample patchs for v1"} - ) - constraint_range: Optional[str] = field( - default=None, - metadata={"help": "constraint range"} - ) - - -def construct_rdrop_sample(x): - if isinstance(x, dict): - for key in x: - x[key] = construct_rdrop_sample(x[key]) - return x - elif isinstance(x, torch.Tensor): - return x.repeat(2, *([1] * (x.dim()-1))) - elif isinstance(x, int): - return x * 2 - elif isinstance(x, np.ndarray): - return x.repeat(2) - else: - raise NotImplementedError - - -def kl_loss(p, q): - p_loss = F.kl_div(p, torch.exp(q), reduction='sum') - q_loss = F.kl_div(q, torch.exp(p), reduction='sum') - loss = (p_loss + q_loss) / 2 - return loss - - -def label_smoothed_nll_loss( - lprobs, target, epsilon, update_num, reduce=True, - drop_worst_ratio=0.0, drop_worst_after=0, use_rdrop=False, reg_alpha=1.0, - constraint_masks=None, constraint_start=None, constraint_end=None -): - if target.dim() == lprobs.dim() - 1: - target = target.unsqueeze(-1) - nll_loss = -lprobs.gather(dim=-1, index=target).squeeze(-1) - if constraint_masks is not None: - smooth_loss = -lprobs.masked_fill(~constraint_masks, 0).sum(dim=-1, keepdim=True).squeeze(-1) - eps_i = epsilon / (constraint_masks.sum(1) - 1 + 1e-6) - elif constraint_start is not None and constraint_end is not None: - constraint_range = [0, 1, 2, 3] + list(range(constraint_start, constraint_end)) - smooth_loss = -lprobs[:, constraint_range].sum(dim=-1, keepdim=True).squeeze(-1) - eps_i = epsilon / (len(constraint_range) - 1 + 1e-6) - else: - smooth_loss = -lprobs.sum(dim=-1, keepdim=True).squeeze(-1) - eps_i = epsilon / (lprobs.size(-1) - 1) - loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss - if drop_worst_ratio > 0 and update_num > drop_worst_after: - if use_rdrop: - true_batch_size = loss.size(0) // 2 - _, indices = torch.topk(loss[:true_batch_size], k=int(true_batch_size * (1 - drop_worst_ratio)), largest=False) - loss = torch.cat([loss[indices], loss[indices+true_batch_size]]) - nll_loss = torch.cat([nll_loss[indices], nll_loss[indices+true_batch_size]]) - lprobs = torch.cat([lprobs[indices], lprobs[indices+true_batch_size]]) - else: - loss, indices = torch.topk(loss, k=int(loss.shape[0] * (1 - drop_worst_ratio)), largest=False) - nll_loss = nll_loss[indices] - lprobs = lprobs[indices] - - ntokens = loss.numel() - nll_loss = nll_loss.sum() - loss = loss.sum() - if use_rdrop: - true_batch_size = lprobs.size(0) // 2 - p = lprobs[:true_batch_size] - q = lprobs[true_batch_size:] - if constraint_start is not None and constraint_end is not None: - constraint_range = [0, 1, 2, 3] + list(range(constraint_start, constraint_end)) - p = p[:, constraint_range] - q = q[:, constraint_range] - loss += kl_loss(p, q) * reg_alpha - - return loss, nll_loss, ntokens - - -@register_criterion( - "ajust_label_smoothed_cross_entropy", dataclass=AjustLabelSmoothedCrossEntropyCriterionConfig -) -class AjustLabelSmoothedCrossEntropyCriterion(FairseqCriterion): - def __init__( - self, - task, - sentence_avg, - label_smoothing, - ignore_prefix_size=0, - ignore_eos=False, - report_accuracy=False, - drop_worst_ratio=0, - drop_worst_after=0, - use_rdrop=False, - reg_alpha=1.0, - sample_patch_num=196, - constraint_range=None - ): - super().__init__(task) - self.sentence_avg = sentence_avg - self.eps = label_smoothing - self.ignore_prefix_size = ignore_prefix_size - self.ignore_eos = ignore_eos - self.report_accuracy = report_accuracy - self.drop_worst_ratio = drop_worst_ratio - self.drop_worst_after = drop_worst_after - self.use_rdrop = use_rdrop - self.reg_alpha = reg_alpha - self.sample_patch_num = sample_patch_num - - self.constraint_start = None - self.constraint_end = None - if constraint_range is not None: - constraint_start, constraint_end = constraint_range.split(',') - self.constraint_start = int(constraint_start) - self.constraint_end = int(constraint_end) - - def forward(self, model, sample, update_num=0, reduce=True): - """Compute the loss for the given sample. - - Returns a tuple with three elements: - 1) the loss - 2) the sample size, which is used as the denominator for the gradient - 3) logging outputs to display while training - """ - if isinstance(sample, list): - if self.sample_patch_num > 0: - sample[0]['net_input']['sample_patch_num'] = self.sample_patch_num - loss_v1, sample_size_v1, logging_output_v1 = self.forward(model, sample[0], update_num, reduce) - loss_v2, sample_size_v2, logging_output_v2 = self.forward(model, sample[1], update_num, reduce) - loss = loss_v1 / sample_size_v1 + loss_v2 / sample_size_v2 - sample_size = 1 - logging_output = { - "loss": loss.data, - "loss_v1": loss_v1.data, - "loss_v2": loss_v2.data, - "nll_loss": logging_output_v1["nll_loss"].data / sample_size_v1 + logging_output_v2["nll_loss"].data / sample_size_v2, - "ntokens": logging_output_v1["ntokens"] + logging_output_v2["ntokens"], - "nsentences": logging_output_v1["nsentences"] + logging_output_v2["nsentences"], - "sample_size": 1, - "sample_size_v1": sample_size_v1, - "sample_size_v2": sample_size_v2, - } - return loss, sample_size, logging_output - - if self.use_rdrop: - construct_rdrop_sample(sample) - - net_output = model(**sample["net_input"]) - loss, nll_loss, ntokens = self.compute_loss(model, net_output, sample, update_num, reduce=reduce) - sample_size = ( - sample["target"].size(0) if self.sentence_avg else ntokens - ) - logging_output = { - "loss": loss.data, - "nll_loss": nll_loss.data, - "ntokens": sample["ntokens"], - "nsentences": sample["nsentences"], - "sample_size": sample_size, - } - if self.report_accuracy: - n_correct, total = self.compute_accuracy(model, net_output, sample) - logging_output["n_correct"] = utils.item(n_correct.data) - logging_output["total"] = utils.item(total.data) - return loss, sample_size, logging_output - - def get_lprobs_and_target(self, model, net_output, sample): - conf = sample['conf'][:, None, None] if 'conf' in sample and sample['conf'] is not None else 1 - constraint_masks = None - if "constraint_masks" in sample and sample["constraint_masks"] is not None: - constraint_masks = sample["constraint_masks"] - net_output[0].masked_fill_(~constraint_masks, -math.inf) - if self.constraint_start is not None and self.constraint_end is not None: - net_output[0][:, :, 4:self.constraint_start] = -math.inf - net_output[0][:, :, self.constraint_end:] = -math.inf - lprobs = model.get_normalized_probs(net_output, log_probs=True) * conf - target = model.get_targets(sample, net_output) - if self.ignore_prefix_size > 0: - lprobs = lprobs[:, self.ignore_prefix_size :, :].contiguous() - target = target[:, self.ignore_prefix_size :].contiguous() - if constraint_masks is not None: - constraint_masks = constraint_masks[:, self.ignore_prefix_size :, :].contiguous() - if self.ignore_eos: - bsz, seq_len, embed_dim = lprobs.size() - eos_indices = target.eq(self.task.tgt_dict.eos()) - lprobs = lprobs[~eos_indices].reshape(bsz, seq_len-1, embed_dim) - target = target[~eos_indices].reshape(bsz, seq_len-1) - if constraint_masks is not None: - constraint_masks = constraint_masks[~eos_indices].reshape(bsz, seq_len-1, embed_dim) - if constraint_masks is not None: - constraint_masks = constraint_masks.view(-1, constraint_masks.size(-1)) - return lprobs.view(-1, lprobs.size(-1)), target.view(-1), constraint_masks - - def compute_loss(self, model, net_output, sample, update_num, reduce=True): - lprobs, target, constraint_masks = self.get_lprobs_and_target(model, net_output, sample) - if constraint_masks is not None: - constraint_masks = constraint_masks[target != self.padding_idx] - lprobs = lprobs[target != self.padding_idx] - target = target[target != self.padding_idx] - loss, nll_loss, ntokens = label_smoothed_nll_loss( - lprobs, - target, - self.eps, - update_num, - reduce=reduce, - drop_worst_ratio=self.drop_worst_ratio, - drop_worst_after=self.drop_worst_after, - use_rdrop=self.use_rdrop, - reg_alpha=self.reg_alpha, - constraint_masks=constraint_masks, - constraint_start=self.constraint_start, - constraint_end=self.constraint_end - ) - return loss, nll_loss, ntokens - - def compute_accuracy(self, model, net_output, sample): - lprobs, target = self.get_lprobs_and_target(model, net_output, sample) - mask = target.ne(self.padding_idx) - n_correct = torch.sum( - lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask)) - ) - total = torch.sum(mask) - return n_correct, total - - @classmethod - def reduce_metrics(cls, logging_outputs) -> None: - """Aggregate logging outputs from data parallel training.""" - loss_sum = sum(log.get("loss", 0) for log in logging_outputs) - loss_sum_v1 = sum(log.get("loss_v1", 0) for log in logging_outputs) - loss_sum_v2 = sum(log.get("loss_v2", 0) for log in logging_outputs) - nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs) - ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) - nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) - sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) - sample_size_v1 = sum(log.get("sample_size_v1", 0) for log in logging_outputs) - sample_size_v2 = sum(log.get("sample_size_v2", 0) for log in logging_outputs) - - metrics.log_scalar( - "loss", loss_sum / sample_size, sample_size, round=3 - ) - metrics.log_scalar( - "loss_v1", loss_sum_v1 / max(sample_size_v1, 1), max(sample_size_v1, 1), round=3 - ) - metrics.log_scalar( - "loss_v2", loss_sum_v2 / max(sample_size_v2, 1), max(sample_size_v2, 1), round=3 - ) - metrics.log_scalar( - "nll_loss", nll_loss_sum / sample_size, ntokens, round=3 - ) - metrics.log_derived( - "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) - ) - - metrics.log_scalar( - "ntokens", ntokens, 1, round=3 - ) - metrics.log_scalar( - "nsentences", nsentences, 1, round=3 - ) - metrics.log_scalar( - "sample_size", sample_size, 1, round=3 - ) - metrics.log_scalar( - "sample_size_v1", sample_size_v1, 1, round=3 - ) - metrics.log_scalar( - "sample_size_v2", sample_size_v2, 1, round=3 - ) - - total = utils.item(sum(log.get("total", 0) for log in logging_outputs)) - if total > 0: - metrics.log_scalar("total", total) - n_correct = utils.item( - sum(log.get("n_correct", 0) for log in logging_outputs) - ) - metrics.log_scalar("n_correct", n_correct) - metrics.log_derived( - "accuracy", - lambda meters: round( - meters["n_correct"].sum * 100.0 / meters["total"].sum, 3 - ) - if meters["total"].sum > 0 - else float("nan"), - ) - - @staticmethod - def logging_outputs_can_be_summed() -> bool: - """ - Whether the logging outputs returned by `forward` can be summed - across workers prior to calling `reduce_metrics`. Setting this - to True will improves distributed training speed. - """ - return True diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/tasks/speech_recognition.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/tasks/speech_recognition.py deleted file mode 100644 index d9f011d55ff4fdfeb4c04ca790c314d685708c3a..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/tasks/speech_recognition.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import json -import os -import re -import sys - -import torch -from examples.speech_recognition.data import AsrDataset -from examples.speech_recognition.data.replabels import replabel_symbol -from fairseq.data import Dictionary -from fairseq.tasks import LegacyFairseqTask, register_task - - -def get_asr_dataset_from_json(data_json_path, tgt_dict): - """ - Parse data json and create dataset. - See scripts/asr_prep_json.py which pack json from raw files - - Json example: - { - "utts": { - "4771-29403-0025": { - "input": { - "length_ms": 170, - "path": "/tmp/file1.flac" - }, - "output": { - "text": "HELLO \n", - "token": "HE LLO", - "tokenid": "4815, 861" - } - }, - "1564-142299-0096": { - ... - } - } - """ - if not os.path.isfile(data_json_path): - raise FileNotFoundError("Dataset not found: {}".format(data_json_path)) - with open(data_json_path, "rb") as f: - data_samples = json.load(f)["utts"] - assert len(data_samples) != 0 - sorted_samples = sorted( - data_samples.items(), - key=lambda sample: int(sample[1]["input"]["length_ms"]), - reverse=True, - ) - aud_paths = [s[1]["input"]["path"] for s in sorted_samples] - ids = [s[0] for s in sorted_samples] - speakers = [] - for s in sorted_samples: - m = re.search("(.+?)-(.+?)-(.+?)", s[0]) - speakers.append(m.group(1) + "_" + m.group(2)) - frame_sizes = [s[1]["input"]["length_ms"] for s in sorted_samples] - tgt = [ - [int(i) for i in s[1]["output"]["tokenid"].split(", ")] - for s in sorted_samples - ] - # append eos - tgt = [[*t, tgt_dict.eos()] for t in tgt] - return AsrDataset(aud_paths, frame_sizes, tgt, tgt_dict, ids, speakers) - - -@register_task("speech_recognition") -class SpeechRecognitionTask(LegacyFairseqTask): - """ - Task for training speech recognition model. - """ - - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - parser.add_argument("data", help="path to data directory") - parser.add_argument( - "--silence-token", default="\u2581", help="token for silence (used by w2l)" - ) - parser.add_argument( - "--max-source-positions", - default=sys.maxsize, - type=int, - metavar="N", - help="max number of frames in the source sequence", - ) - parser.add_argument( - "--max-target-positions", - default=1024, - type=int, - metavar="N", - help="max number of tokens in the target sequence", - ) - - def __init__(self, args, tgt_dict): - super().__init__(args) - self.tgt_dict = tgt_dict - - @classmethod - def setup_task(cls, args, **kwargs): - """Setup the task (e.g., load dictionaries).""" - dict_path = os.path.join(args.data, "dict.txt") - if not os.path.isfile(dict_path): - raise FileNotFoundError("Dict not found: {}".format(dict_path)) - tgt_dict = Dictionary.load(dict_path) - - if args.criterion == "ctc_loss": - tgt_dict.add_symbol("") - elif args.criterion == "asg_loss": - for i in range(1, args.max_replabel + 1): - tgt_dict.add_symbol(replabel_symbol(i)) - - print("| dictionary: {} types".format(len(tgt_dict))) - return cls(args, tgt_dict) - - def load_dataset(self, split, combine=False, **kwargs): - """Load a given dataset split. - - Args: - split (str): name of the split (e.g., train, valid, test) - """ - data_json_path = os.path.join(self.args.data, "{}.json".format(split)) - self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict) - - def build_generator(self, models, args, **unused): - w2l_decoder = getattr(args, "w2l_decoder", None) - if w2l_decoder == "viterbi": - from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder - - return W2lViterbiDecoder(args, self.target_dictionary) - elif w2l_decoder == "kenlm": - from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder - - return W2lKenLMDecoder(args, self.target_dictionary) - elif w2l_decoder == "fairseqlm": - from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder - - return W2lFairseqLMDecoder(args, self.target_dictionary) - else: - return super().build_generator(models, args) - - @property - def target_dictionary(self): - """Return the :class:`~fairseq.data.Dictionary` for the language - model.""" - return self.tgt_dict - - @property - def source_dictionary(self): - """Return the source :class:`~fairseq.data.Dictionary` (if applicable - for this task).""" - return None - - def max_positions(self): - """Return the max speech and sentence length allowed by the task.""" - return (self.args.max_source_positions, self.args.max_target_positions) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/quantization/scalar/modules/qemb.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/quantization/scalar/modules/qemb.py deleted file mode 100644 index d6cf06e5872cb86e5c2e726153c7a80c78db9d1e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/quantization/scalar/modules/qemb.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ..ops import emulate_int - - -class IntEmbedding(nn.Module): - """ - Quantized counterpart of the nn.Embedding module that applies QuantNoise during training. - - Args: - - num_embeddings: number of tokens - - embedding_dim: embedding dimension - - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights) - - bits: number of bits - - method: choose among {"tensor", "histogram", "channel"} - - update_step: recompute scale and zero_point every update_steps iterations - - Remarks: - - We use the straight-through estimator so that the gradients - back-propagate nicely in the network, this is implemented with - the detach() trick - - Parameters scale and zero_point are recomputed every update_step - forward pass to reduce the overhead - - At test time, the weights are fully quantized - """ - - def __init__( - self, - num_embeddings, - embedding_dim, - padding_idx=None, - max_norm=None, - norm_type=2.0, - scale_grad_by_freq=False, - sparse=False, - _weight=None, - p=0, - update_step=1000, - bits=8, - method="histogram", - ): - super(IntEmbedding, self).__init__() - self.num_embeddings = num_embeddings - self.embedding_dim = embedding_dim - if padding_idx is not None: - if padding_idx > 0: - assert ( - padding_idx < self.num_embeddings - ), "Padding_idx must be within num_embeddings" - elif padding_idx < 0: - assert ( - padding_idx >= -self.num_embeddings - ), "Padding_idx must be within num_embeddings" - padding_idx = self.num_embeddings + padding_idx - self.padding_idx = padding_idx - self.max_norm = max_norm - self.norm_type = norm_type - self.scale_grad_by_freq = scale_grad_by_freq - if _weight is None: - self.weight = nn.Parameter(torch.Tensor(num_embeddings, embedding_dim)) - self.reset_parameters() - else: - assert list(_weight.shape) == [ - num_embeddings, - embedding_dim, - ], "Shape of weight does not match num_embeddings and embedding_dim" - self.weight = nn.Parameter(_weight) - self.sparse = sparse - - # quantization parameters - self.p = p - self.bits = bits - self.method = method - self.update_step = update_step - self.counter = 0 - - def reset_parameters(self): - nn.init.normal_(self.weight) - if self.padding_idx is not None: - with torch.no_grad(): - self.weight[self.padding_idx].fill_(0) - - def forward(self, input): - # train with QuantNoise and evaluate the fully quantized network - p = self.p if self.training else 1 - - # update parameters every 1000 iterations - if self.counter % self.update_step == 0: - self.scale = None - self.zero_point = None - self.counter += 1 - - # quantize weight - weight_quantized, self.scale, self.zero_point = emulate_int( - self.weight.detach(), - bits=self.bits, - method=self.method, - scale=self.scale, - zero_point=self.zero_point, - ) - - # mask to apply noise - mask = torch.zeros_like(self.weight) - mask.bernoulli_(1 - p) - noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) - - # using straight-through estimator (STE) - clamp_low = -self.scale * self.zero_point - clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) - weight = ( - torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) - + noise.detach() - ) - - # return output - output = F.embedding( - input, - weight, - self.padding_idx, - self.max_norm, - self.norm_type, - self.scale_grad_by_freq, - self.sparse, - ) - return output - - def extra_repr(self): - s = "{num_embeddings}, {embedding_dim}" - if self.padding_idx is not None: - s += ", padding_idx={padding_idx}" - if self.max_norm is not None: - s += ", max_norm={max_norm}" - if self.norm_type != 2: - s += ", norm_type={norm_type}" - if self.scale_grad_by_freq is not False: - s += ", scale_grad_by_freq={scale_grad_by_freq}" - if self.sparse is not False: - s += ", sparse=True" - s += "quant_noise={p}, bits={bits}, method={method}" - return s.format(**self.__dict__) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/sparse_transformer_sentence_encoder_layer.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/sparse_transformer_sentence_encoder_layer.py deleted file mode 100644 index d95da59c2471bfa858fd627605196d7f41f9ec12..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/sparse_transformer_sentence_encoder_layer.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq.modules import TransformerSentenceEncoderLayer -from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention - - -class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer): - """ - Implements a Sprase Transformer Encoder Layer (see SparseMultiheadAttention) - """ - - def __init__( - self, - embedding_dim: int = 768, - ffn_embedding_dim: int = 3072, - num_attention_heads: int = 8, - dropout: float = 0.1, - attention_dropout: float = 0.1, - activation_dropout: float = 0.1, - activation_fn: str = "relu", - export: bool = False, - is_bidirectional: bool = True, - stride: int = 32, - expressivity: int = 8, - ) -> None: - - super().__init__( - embedding_dim, - ffn_embedding_dim, - num_attention_heads, - dropout, - attention_dropout, - activation_dropout, - activation_fn, - export, - ) - - self.self_attn = SparseMultiheadAttention( - self.embedding_dim, - num_attention_heads, - dropout=attention_dropout, - add_bias_kv=False, - add_zero_attn=False, - self_attention=True, - is_bidirectional=is_bidirectional, - stride=stride, - expressivity=expressivity, - ) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/__init__.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/__init__.py deleted file mode 100644 index be783be896396ff659c0bd173a7acebb8a2d165d..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/__init__.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -"""isort:skip_file""" - -import importlib -import os - -from fairseq import registry -from fairseq.optim.bmuf import FairseqBMUF # noqa -from fairseq.optim.fairseq_optimizer import ( # noqa - FairseqOptimizer, - LegacyFairseqOptimizer, -) -from fairseq.optim.amp_optimizer import AMPOptimizer -from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer -from fairseq.optim.shard import shard_ -from omegaconf import DictConfig - -__all__ = [ - "AMPOptimizer", - "FairseqOptimizer", - "FP16Optimizer", - "MemoryEfficientFP16Optimizer", - "shard_", -] - -( - _build_optimizer, - register_optimizer, - OPTIMIZER_REGISTRY, - OPTIMIZER_DATACLASS_REGISTRY, -) = registry.setup_registry("--optimizer", base_class=FairseqOptimizer, required=True) - - -def build_optimizer(cfg: DictConfig, params, *extra_args, **extra_kwargs): - if all(isinstance(p, dict) for p in params): - params = [t for p in params for t in p.values()] - params = list(filter(lambda p: p.requires_grad, params)) - return _build_optimizer(cfg, params, *extra_args, **extra_kwargs) - - -# automatically import any Python files in the optim/ directory -for file in sorted(os.listdir(os.path.dirname(__file__))): - if file.endswith(".py") and not file.startswith("_"): - file_name = file[: file.find(".py")] - importlib.import_module("fairseq.optim." + file_name) diff --git a/spaces/OIUGLK/bingo/src/components/turn-counter.tsx b/spaces/OIUGLK/bingo/src/components/turn-counter.tsx deleted file mode 100644 index 08a9e488f044802a8600f4d195b106567c35aab4..0000000000000000000000000000000000000000 --- a/spaces/OIUGLK/bingo/src/components/turn-counter.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import React from 'react' -import { Throttling } from '@/lib/bots/bing/types' - -export interface TurnCounterProps { - throttling?: Throttling -} - -export function TurnCounter({ throttling }: TurnCounterProps) { - if (!throttling) { - return null - } - - return ( -
-
- {throttling.numUserMessagesInConversation} - - {throttling.maxNumUserMessagesInConversation} -
-
-
- ) -} diff --git a/spaces/Omnibus/TTS-voice-clone/README.md b/spaces/Omnibus/TTS-voice-clone/README.md deleted file mode 100644 index 970a308912fada668a7f5de7d8379350cd9f72cb..0000000000000000000000000000000000000000 --- a/spaces/Omnibus/TTS-voice-clone/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: TTS Voice Clone -emoji: 🐨 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.41.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/evaluation/evaluator.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/evaluation/evaluator.py deleted file mode 100644 index baf996002b2fddc8c1952408d450b5bf69394f0a..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/evaluation/evaluator.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import datetime -import logging -import time -from collections import OrderedDict, abc -from contextlib import ExitStack, contextmanager -from typing import List, Union -import torch -from torch import nn - -from detectron2.utils.comm import get_world_size, is_main_process -from detectron2.utils.logger import log_every_n_seconds - - -class DatasetEvaluator: - """ - Base class for a dataset evaluator. - - The function :func:`inference_on_dataset` runs the model over - all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs. - - This class will accumulate information of the inputs/outputs (by :meth:`process`), - and produce evaluation results in the end (by :meth:`evaluate`). - """ - - def reset(self): - """ - Preparation for a new round of evaluation. - Should be called before starting a round of evaluation. - """ - pass - - def process(self, inputs, outputs): - """ - Process the pair of inputs and outputs. - If they contain batches, the pairs can be consumed one-by-one using `zip`: - - .. code-block:: python - - for input_, output in zip(inputs, outputs): - # do evaluation on single input/output pair - ... - - Args: - inputs (list): the inputs that's used to call the model. - outputs (list): the return value of `model(inputs)` - """ - pass - - def evaluate(self): - """ - Evaluate/summarize the performance, after processing all input/output pairs. - - Returns: - dict: - A new evaluator class can return a dict of arbitrary format - as long as the user can process the results. - In our train_net.py, we expect the following format: - - * key: the name of the task (e.g., bbox) - * value: a dict of {metric name: score}, e.g.: {"AP50": 80} - """ - pass - - -class DatasetEvaluators(DatasetEvaluator): - """ - Wrapper class to combine multiple :class:`DatasetEvaluator` instances. - - This class dispatches every evaluation call to - all of its :class:`DatasetEvaluator`. - """ - - def __init__(self, evaluators): - """ - Args: - evaluators (list): the evaluators to combine. - """ - super().__init__() - self._evaluators = evaluators - - def reset(self): - for evaluator in self._evaluators: - evaluator.reset() - - def process(self, inputs, outputs): - for evaluator in self._evaluators: - evaluator.process(inputs, outputs) - - def evaluate(self): - results = OrderedDict() - for evaluator in self._evaluators: - result = evaluator.evaluate() - if is_main_process() and result is not None: - for k, v in result.items(): - assert ( - k not in results - ), "Different evaluators produce results with the same key {}".format(k) - results[k] = v - return results - - -def inference_on_dataset( - model, data_loader, evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None] -): - """ - Run model on the data_loader and evaluate the metrics with evaluator. - Also benchmark the inference speed of `model.__call__` accurately. - The model will be used in eval mode. - - Args: - model (callable): a callable which takes an object from - `data_loader` and returns some outputs. - - If it's an nn.Module, it will be temporarily set to `eval` mode. - If you wish to evaluate a model in `training` mode instead, you can - wrap the given model and override its behavior of `.eval()` and `.train()`. - data_loader: an iterable object with a length. - The elements it generates will be the inputs to the model. - evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark, - but don't want to do any evaluation. - - Returns: - The return value of `evaluator.evaluate()` - """ - num_devices = get_world_size() - logger = logging.getLogger(__name__) - logger.info("Start inference on {} batches".format(len(data_loader))) - - total = len(data_loader) # inference data loader must have a fixed length - if evaluator is None: - # create a no-op evaluator - evaluator = DatasetEvaluators([]) - if isinstance(evaluator, abc.MutableSequence): - evaluator = DatasetEvaluators(evaluator) - evaluator.reset() - - num_warmup = min(5, total - 1) - start_time = time.perf_counter() - total_data_time = 0 - total_compute_time = 0 - total_eval_time = 0 - with ExitStack() as stack: - if isinstance(model, nn.Module): - stack.enter_context(inference_context(model)) - stack.enter_context(torch.no_grad()) - - start_data_time = time.perf_counter() - for idx, inputs in enumerate(data_loader): - total_data_time += time.perf_counter() - start_data_time - if idx == num_warmup: - start_time = time.perf_counter() - total_data_time = 0 - total_compute_time = 0 - total_eval_time = 0 - - start_compute_time = time.perf_counter() - outputs = model(inputs) - if torch.cuda.is_available(): - torch.cuda.synchronize() - total_compute_time += time.perf_counter() - start_compute_time - - start_eval_time = time.perf_counter() - evaluator.process(inputs, outputs) - total_eval_time += time.perf_counter() - start_eval_time - - iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup) - data_seconds_per_iter = total_data_time / iters_after_start - compute_seconds_per_iter = total_compute_time / iters_after_start - eval_seconds_per_iter = total_eval_time / iters_after_start - total_seconds_per_iter = (time.perf_counter() - start_time) / iters_after_start - if idx >= num_warmup * 2 or compute_seconds_per_iter > 5: - eta = datetime.timedelta(seconds=int(total_seconds_per_iter * (total - idx - 1))) - log_every_n_seconds( - logging.INFO, - ( - f"Inference done {idx + 1}/{total}. " - f"Dataloading: {data_seconds_per_iter:.4f} s/iter. " - f"Inference: {compute_seconds_per_iter:.4f} s/iter. " - f"Eval: {eval_seconds_per_iter:.4f} s/iter. " - f"Total: {total_seconds_per_iter:.4f} s/iter. " - f"ETA={eta}" - ), - n=5, - ) - start_data_time = time.perf_counter() - - # Measure the time only for this worker (before the synchronization barrier) - total_time = time.perf_counter() - start_time - total_time_str = str(datetime.timedelta(seconds=total_time)) - # NOTE this format is parsed by grep - logger.info( - "Total inference time: {} ({:.6f} s / iter per device, on {} devices)".format( - total_time_str, total_time / (total - num_warmup), num_devices - ) - ) - total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time))) - logger.info( - "Total inference pure compute time: {} ({:.6f} s / iter per device, on {} devices)".format( - total_compute_time_str, total_compute_time / (total - num_warmup), num_devices - ) - ) - - results = evaluator.evaluate() - # An evaluator may return None when not in main process. - # Replace it by an empty dict instead to make it easier for downstream code to handle - if results is None: - results = {} - return results - - -@contextmanager -def inference_context(model): - """ - A context where the model is temporarily changed to eval mode, - and restored to previous mode afterwards. - - Args: - model: a torch Module - """ - training_mode = model.training - model.eval() - yield - model.train(training_mode) diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/export/README.md b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/export/README.md deleted file mode 100644 index 9fcd33513fb81ef3aeb4d3c8d9732324dffa2646..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/export/README.md +++ /dev/null @@ -1,13 +0,0 @@ - -This directory contains code to prepare a detectron2 model for deployment. -Currently it supports exporting a detectron2 model to Caffe2 format through ONNX. - -Please see [documentation](https://detectron2.readthedocs.io/tutorials/deployment.html) for its usage. - - -### Acknowledgements - -Thanks to Mobile Vision team at Facebook for developing the Caffe2 conversion tools. - -Thanks to Computing Platform Department - PAI team at Alibaba Group (@bddpqq, @chenbohua3) who -help export Detectron2 models to TorchScript. diff --git a/spaces/OpenShape/openshape-demo/fetch_sample_images.py b/spaces/OpenShape/openshape-demo/fetch_sample_images.py deleted file mode 100644 index fa13dfe010cfabd9636181737b0becd1f0f68eb9..0000000000000000000000000000000000000000 --- a/spaces/OpenShape/openshape-demo/fetch_sample_images.py +++ /dev/null @@ -1,53 +0,0 @@ -import io -import os -import cv2 -import tqdm -import numpy -import requests - - -def get_bytes(x: str): - return numpy.frombuffer(requests.get(x).content, numpy.uint8) - - -def get_image(x): - return cv2.imdecode(get_bytes(x), cv2.IMREAD_COLOR) - - -os.chdir(os.path.dirname(os.path.abspath(__file__))) -# classification -# uids = """ -# a784af0713a643b19ffcf65194bc0fbf -# 569a71ccf4d94c1585c9573521fb998f -# 4e6d591f6e50493aa5e31355084fc4e8 -# """.split() - -# caption -# uids = """ -# 283c845f2c2c4567971d42dc46831372 -# fc655111af5b49bf84722affc3ddba00 -# fa17099f18804409bc6d9e8e397b4681 -# d3c0e3495b5d40d087a7f82d1690b9cb -# 4b27adcf92f644bdabf8ecc6c5bef399 -# f8c13a19e84343e7b644c19f7b9488d3 -# """.split() - -# sd -uids = """ -b464ff8d732d44fab00b903652c8274e -efae586a477b49cea1a0777487cc2df3 -f8272460c67d476a8af29e1f2e344bc0 -ff2875fb1a5b4771805a5fd35c8fe7bb -b8db8dc5caad4fa5842a9ed6dbd2e9d6 -tpvzmLUXAURQ7ZxccJIBZvcIDlr -""".split() - - -uri_fmt = 'https://objaverse-thumbnail-images.s3.us-west-2.amazonaws.com/{}.jpg' -for u in tqdm.tqdm(uids): - img = get_image(uri_fmt.format(u)) - max_edge = max(img.shape) - if max_edge > 512: - s = 512 / max_edge - img = cv2.resize(img, [0, 0], fx=s, fy=s, interpolation=cv2.INTER_CUBIC) - cv2.imwrite("samples/sd/%s.jpg" % u, img) diff --git a/spaces/PAIR/PAIR-Diffusion/ldm/modules/midas/midas/__init__.py b/spaces/PAIR/PAIR-Diffusion/ldm/modules/midas/midas/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/PKUWilliamYang/StyleGANEX/models/stylegan2/lpips/networks_basic.py b/spaces/PKUWilliamYang/StyleGANEX/models/stylegan2/lpips/networks_basic.py deleted file mode 100644 index ec3f045f9f22dbf49e18e9edca25d04ccc551da9..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/StyleGANEX/models/stylegan2/lpips/networks_basic.py +++ /dev/null @@ -1,187 +0,0 @@ - -from __future__ import absolute_import - -import sys -import torch -import torch.nn as nn -import torch.nn.init as init -from torch.autograd import Variable -import numpy as np -from pdb import set_trace as st -from skimage import color -from IPython import embed -from models.stylegan2.lpips import pretrained_networks as pn - -import models.stylegan2.lpips as util - -def spatial_average(in_tens, keepdim=True): - return in_tens.mean([2,3],keepdim=keepdim) - -def upsample(in_tens, out_H=64): # assumes scale factor is same for H and W - in_H = in_tens.shape[2] - scale_factor = 1.*out_H/in_H - - return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) - -# Learned perceptual metric -class PNetLin(nn.Module): - def __init__(self, pnet_type='vgg', pnet_rand=False, pnet_tune=False, use_dropout=True, spatial=False, version='0.1', lpips=True): - super(PNetLin, self).__init__() - - self.pnet_type = pnet_type - self.pnet_tune = pnet_tune - self.pnet_rand = pnet_rand - self.spatial = spatial - self.lpips = lpips - self.version = version - self.scaling_layer = ScalingLayer() - - if(self.pnet_type in ['vgg','vgg16']): - net_type = pn.vgg16 - self.chns = [64,128,256,512,512] - elif(self.pnet_type=='alex'): - net_type = pn.alexnet - self.chns = [64,192,384,256,256] - elif(self.pnet_type=='squeeze'): - net_type = pn.squeezenet - self.chns = [64,128,256,384,384,512,512] - self.L = len(self.chns) - - self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune) - - if(lpips): - self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout) - self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout) - self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout) - self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout) - self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout) - self.lins = [self.lin0,self.lin1,self.lin2,self.lin3,self.lin4] - if(self.pnet_type=='squeeze'): # 7 layers for squeezenet - self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout) - self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout) - self.lins+=[self.lin5,self.lin6] - - def forward(self, in0, in1, retPerLayer=False): - # v0.0 - original release had a bug, where input was not scaled - in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) if self.version=='0.1' else (in0, in1) - outs0, outs1 = self.net.forward(in0_input), self.net.forward(in1_input) - feats0, feats1, diffs = {}, {}, {} - - for kk in range(self.L): - feats0[kk], feats1[kk] = util.normalize_tensor(outs0[kk]), util.normalize_tensor(outs1[kk]) - diffs[kk] = (feats0[kk]-feats1[kk])**2 - - if(self.lpips): - if(self.spatial): - res = [upsample(self.lins[kk].model(diffs[kk]), out_H=in0.shape[2]) for kk in range(self.L)] - else: - res = [spatial_average(self.lins[kk].model(diffs[kk]), keepdim=True) for kk in range(self.L)] - else: - if(self.spatial): - res = [upsample(diffs[kk].sum(dim=1,keepdim=True), out_H=in0.shape[2]) for kk in range(self.L)] - else: - res = [spatial_average(diffs[kk].sum(dim=1,keepdim=True), keepdim=True) for kk in range(self.L)] - - val = res[0] - for l in range(1,self.L): - val += res[l] - - if(retPerLayer): - return (val, res) - else: - return val - -class ScalingLayer(nn.Module): - def __init__(self): - super(ScalingLayer, self).__init__() - self.register_buffer('shift', torch.Tensor([-.030,-.088,-.188])[None,:,None,None]) - self.register_buffer('scale', torch.Tensor([.458,.448,.450])[None,:,None,None]) - - def forward(self, inp): - return (inp - self.shift) / self.scale - - -class NetLinLayer(nn.Module): - ''' A single linear layer which does a 1x1 conv ''' - def __init__(self, chn_in, chn_out=1, use_dropout=False): - super(NetLinLayer, self).__init__() - - layers = [nn.Dropout(),] if(use_dropout) else [] - layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),] - self.model = nn.Sequential(*layers) - - -class Dist2LogitLayer(nn.Module): - ''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) ''' - def __init__(self, chn_mid=32, use_sigmoid=True): - super(Dist2LogitLayer, self).__init__() - - layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True),] - layers += [nn.LeakyReLU(0.2,True),] - layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True),] - layers += [nn.LeakyReLU(0.2,True),] - layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True),] - if(use_sigmoid): - layers += [nn.Sigmoid(),] - self.model = nn.Sequential(*layers) - - def forward(self,d0,d1,eps=0.1): - return self.model.forward(torch.cat((d0,d1,d0-d1,d0/(d1+eps),d1/(d0+eps)),dim=1)) - -class BCERankingLoss(nn.Module): - def __init__(self, chn_mid=32): - super(BCERankingLoss, self).__init__() - self.net = Dist2LogitLayer(chn_mid=chn_mid) - # self.parameters = list(self.net.parameters()) - self.loss = torch.nn.BCELoss() - - def forward(self, d0, d1, judge): - per = (judge+1.)/2. - self.logit = self.net.forward(d0,d1) - return self.loss(self.logit, per) - -# L2, DSSIM metrics -class FakeNet(nn.Module): - def __init__(self, use_gpu=True, colorspace='Lab'): - super(FakeNet, self).__init__() - self.use_gpu = use_gpu - self.colorspace=colorspace - -class L2(FakeNet): - - def forward(self, in0, in1, retPerLayer=None): - assert(in0.size()[0]==1) # currently only supports batchSize 1 - - if(self.colorspace=='RGB'): - (N,C,X,Y) = in0.size() - value = torch.mean(torch.mean(torch.mean((in0-in1)**2,dim=1).view(N,1,X,Y),dim=2).view(N,1,1,Y),dim=3).view(N) - return value - elif(self.colorspace=='Lab'): - value = util.l2(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)), - util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float') - ret_var = Variable( torch.Tensor((value,) ) ) - if(self.use_gpu): - ret_var = ret_var.cuda() - return ret_var - -class DSSIM(FakeNet): - - def forward(self, in0, in1, retPerLayer=None): - assert(in0.size()[0]==1) # currently only supports batchSize 1 - - if(self.colorspace=='RGB'): - value = util.dssim(1.*util.tensor2im(in0.data), 1.*util.tensor2im(in1.data), range=255.).astype('float') - elif(self.colorspace=='Lab'): - value = util.dssim(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)), - util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float') - ret_var = Variable( torch.Tensor((value,) ) ) - if(self.use_gpu): - ret_var = ret_var.cuda() - return ret_var - -def print_network(net): - num_params = 0 - for param in net.parameters(): - num_params += param.numel() - print('Network',net) - print('Total number of parameters: %d' % num_params) diff --git a/spaces/PKUWilliamYang/VToonify/vtoonify/model/encoder/align_all_parallel.py b/spaces/PKUWilliamYang/VToonify/vtoonify/model/encoder/align_all_parallel.py deleted file mode 100644 index 05b520cd6590dc02ee533d3f0d69e6a364447d9f..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/VToonify/vtoonify/model/encoder/align_all_parallel.py +++ /dev/null @@ -1,217 +0,0 @@ -""" -brief: face alignment with FFHQ method (https://github.com/NVlabs/ffhq-dataset) -author: lzhbrian (https://lzhbrian.me) -date: 2020.1.5 -note: code is heavily borrowed from - https://github.com/NVlabs/ffhq-dataset - http://dlib.net/face_landmark_detection.py.html - -requirements: - apt install cmake - conda install Pillow numpy scipy - pip install dlib - # download face landmark model from: - # http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 -""" -from argparse import ArgumentParser -import time -import numpy as np -import PIL -import PIL.Image -import os -import scipy -import scipy.ndimage -import dlib -import multiprocessing as mp -import math - -#from configs.paths_config import model_paths -SHAPE_PREDICTOR_PATH = 'shape_predictor_68_face_landmarks.dat'#model_paths["shape_predictor"] - - -def get_landmark(filepath, predictor): - """get landmark with dlib - :return: np.array shape=(68, 2) - """ - detector = dlib.get_frontal_face_detector() - if type(filepath) == str: - img = dlib.load_rgb_image(filepath) - else: - img = filepath - dets = detector(img, 1) - - if len(dets) == 0: - print('Error: no face detected!') - return None - - shape = None - for k, d in enumerate(dets): - shape = predictor(img, d) - - if shape is None: - print('Error: No face detected! If you are sure there are faces in your input, you may rerun the code several times until the face is detected. Sometimes the detector is unstable.') - t = list(shape.parts()) - a = [] - for tt in t: - a.append([tt.x, tt.y]) - lm = np.array(a) - return lm - - -def align_face(filepath, predictor): - """ - :param filepath: str - :return: PIL Image - """ - - lm = get_landmark(filepath, predictor) - if lm is None: - return None - - lm_chin = lm[0: 17] # left-right - lm_eyebrow_left = lm[17: 22] # left-right - lm_eyebrow_right = lm[22: 27] # left-right - lm_nose = lm[27: 31] # top-down - lm_nostrils = lm[31: 36] # top-down - lm_eye_left = lm[36: 42] # left-clockwise - lm_eye_right = lm[42: 48] # left-clockwise - lm_mouth_outer = lm[48: 60] # left-clockwise - lm_mouth_inner = lm[60: 68] # left-clockwise - - # Calculate auxiliary vectors. - eye_left = np.mean(lm_eye_left, axis=0) - eye_right = np.mean(lm_eye_right, axis=0) - eye_avg = (eye_left + eye_right) * 0.5 - eye_to_eye = eye_right - eye_left - mouth_left = lm_mouth_outer[0] - mouth_right = lm_mouth_outer[6] - mouth_avg = (mouth_left + mouth_right) * 0.5 - eye_to_mouth = mouth_avg - eye_avg - - # Choose oriented crop rectangle. - x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] - x /= np.hypot(*x) - x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) - y = np.flipud(x) * [-1, 1] - c = eye_avg + eye_to_mouth * 0.1 - quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) - qsize = np.hypot(*x) * 2 - - # read image - if type(filepath) == str: - img = PIL.Image.open(filepath) - else: - img = PIL.Image.fromarray(filepath) - - output_size = 256 - transform_size = 256 - enable_padding = True - - # Shrink. - shrink = int(np.floor(qsize / output_size * 0.5)) - if shrink > 1: - rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink))) - img = img.resize(rsize, PIL.Image.ANTIALIAS) - quad /= shrink - qsize /= shrink - - # Crop. - border = max(int(np.rint(qsize * 0.1)), 3) - crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), - int(np.ceil(max(quad[:, 1])))) - crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), - min(crop[3] + border, img.size[1])) - if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]: - img = img.crop(crop) - quad -= crop[0:2] - - # Pad. - pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), - int(np.ceil(max(quad[:, 1])))) - pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), - max(pad[3] - img.size[1] + border, 0)) - if enable_padding and max(pad) > border - 4: - pad = np.maximum(pad, int(np.rint(qsize * 0.3))) - img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect') - h, w, _ = img.shape - y, x, _ = np.ogrid[:h, :w, :1] - mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]), - 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3])) - blur = qsize * 0.02 - img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) - img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0) - img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB') - quad += pad[:2] - - # Transform. - img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR) - if output_size < transform_size: - img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS) - - # Save aligned image. - return img - - -def chunks(lst, n): - """Yield successive n-sized chunks from lst.""" - for i in range(0, len(lst), n): - yield lst[i:i + n] - - -def extract_on_paths(file_paths): - predictor = dlib.shape_predictor(SHAPE_PREDICTOR_PATH) - pid = mp.current_process().name - print('\t{} is starting to extract on #{} images'.format(pid, len(file_paths))) - tot_count = len(file_paths) - count = 0 - for file_path, res_path in file_paths: - count += 1 - if count % 100 == 0: - print('{} done with {}/{}'.format(pid, count, tot_count)) - try: - res = align_face(file_path, predictor) - res = res.convert('RGB') - os.makedirs(os.path.dirname(res_path), exist_ok=True) - res.save(res_path) - except Exception: - continue - print('\tDone!') - - -def parse_args(): - parser = ArgumentParser(add_help=False) - parser.add_argument('--num_threads', type=int, default=1) - parser.add_argument('--root_path', type=str, default='') - args = parser.parse_args() - return args - - -def run(args): - root_path = args.root_path - out_crops_path = root_path + '_crops' - if not os.path.exists(out_crops_path): - os.makedirs(out_crops_path, exist_ok=True) - - file_paths = [] - for root, dirs, files in os.walk(root_path): - for file in files: - file_path = os.path.join(root, file) - fname = os.path.join(out_crops_path, os.path.relpath(file_path, root_path)) - res_path = '{}.jpg'.format(os.path.splitext(fname)[0]) - if os.path.splitext(file_path)[1] == '.txt' or os.path.exists(res_path): - continue - file_paths.append((file_path, res_path)) - - file_chunks = list(chunks(file_paths, int(math.ceil(len(file_paths) / args.num_threads)))) - print(len(file_chunks)) - pool = mp.Pool(args.num_threads) - print('Running on {} paths\nHere we goooo'.format(len(file_paths))) - tic = time.time() - pool.map(extract_on_paths, file_chunks) - toc = time.time() - print('Mischief managed in {}s'.format(toc - tic)) - - -if __name__ == '__main__': - args = parse_args() - run(args) diff --git a/spaces/PascalNotin/Tranception_design/tranception/activations.py b/spaces/PascalNotin/Tranception_design/tranception/activations.py deleted file mode 100644 index 25702efc8ff20c62819d22fb2c2aa440a6210045..0000000000000000000000000000000000000000 --- a/spaces/PascalNotin/Tranception_design/tranception/activations.py +++ /dev/null @@ -1,114 +0,0 @@ -import math - -import torch -from packaging import version -from torch import nn - -from transformers.utils import logging - - -logger = logging.get_logger(__name__) - - -def _gelu_python(x): - """ - Original Implementation of the GELU activation function in Google BERT repo when initially created. For - information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 + - torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional - Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 - """ - return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) - - -def gelu_new(x): - """ - Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see - the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 - """ - return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0)))) - - -if version.parse(torch.__version__) < version.parse("1.4"): - gelu = _gelu_python -else: - gelu = nn.functional.gelu - - -def gelu_fast(x): - return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x))) - - -def quick_gelu(x): - return x * torch.sigmoid(1.702 * x) - - -def _silu_python(x): - """ - See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear - Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function - Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated - Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with - later. - """ - return x * torch.sigmoid(x) - - -if version.parse(torch.__version__) < version.parse("1.7"): - silu = _silu_python -else: - silu = nn.functional.silu - - -def _mish_python(x): - """ - See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also - visit the official repository for the paper: https://github.com/digantamisra98/Mish - """ - return x * torch.tanh(nn.functional.softplus(x)) - - -if version.parse(torch.__version__) < version.parse("1.9"): - mish = _mish_python -else: - mish = nn.functional.mish - - -def linear_act(x): - return x - -def squared_relu(x): - """ - Squared ReLU variant that is fastest with Pytorch. - """ - x = nn.functional.relu(x) - return x*x - -def squared_relu_xla(x): - """ - Squared ReLU variant that is fastest with JAX. - """ - x = nn.functional.relu(x) - return x**2 - -tranception_ACT2FN = { - "relu": nn.functional.relu, - "silu": silu, - "swish": silu, - "gelu": gelu, - "tanh": torch.tanh, - "gelu_new": gelu_new, - "gelu_fast": gelu_fast, - "quick_gelu": quick_gelu, - "mish": mish, - "linear": linear_act, - "sigmoid": torch.sigmoid, - "squared_relu": squared_relu, - "squared_relu_xla": squared_relu_xla, -} - - -def get_activation(activation_string): - if activation_string in tranception_ACT2FN: - return tranception_ACT2FN[activation_string] - else: - raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(tranception_ACT2FN.keys())}") \ No newline at end of file diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/ecmascript/function.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/ecmascript/function.go deleted file mode 100644 index 52c93bb9c755ed2f7b80baf9a86c056947fbbfef..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/ecmascript/function.go and /dev/null differ diff --git a/spaces/PeepDaSlan9/SDXL-artists-browser/index.html b/spaces/PeepDaSlan9/SDXL-artists-browser/index.html deleted file mode 100644 index b3fb0c4c79ac210f2867054ab61d000b4b7c30df..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/SDXL-artists-browser/index.html +++ /dev/null @@ -1,212 +0,0 @@ - - - - SDXL Artist Style Explorer by Mattthew - - - - - - - - - -
-
-
-
-

How to

-

Clicking an artist's...

-
    -
  • name copies them to clipboard
  • -
  • tags toggles those checkmarks
  • -
  • star marks them as favorite
  • -
  • 🎨, 🧑, and 🏞️ switches the prompt/image
  • -
-

Permissive filter

-
    -
  • checked: artists matching any checked tags
  • -
  • unchecked: artists matching all checked tags
  • -
  • use unchecked to filter your favorites
  • -
-

Hide low-use tags

-
    -
  • checked: hides tags that match less than 3 artists
  • -
  • note that all hidden tags are unchecked
  • -
-

Hide deprecated artists

-
    -
  • I've hand verified that SDXL doesn't know these artists' styles
  • -
  • If you prompt with their names, the result...
  • -
  • ...will be unlike their actual style and generic.
  • -
  • You're better off prompting "a painting", etc.
  • -
  • I may eventually remove the artists from the database
  • -
-

When using Stable Diffusion

-
    -
  • Reproduce these styles with the prompt with "by {Artist full name}, ...." -
  • Adding the style tags to the prompt can also help
  • -
  • You can combine the styles of two are more artists
  • -
-
-
-
-
-

About

-

Incomplete beta version!

-

- I'll add thousands more artists to the database soon. I want to work out any major issues first. Please give feedback on Huggingface. -

-

How to support this project

-
    -
  • Please tell a friends or share on your socials
  • -
  • Suggest artists I should add or remove
  • -
  • Suggest features and report bugs
  • -
  • Leave all feedback on Huggingface
  • -
  • I don't need money. Thanks always feels nice!
  • -
-

Image parameters

-
    -
  • - All: Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 10, Seed: 42, Size: 1024x1024, Model hash: 31e35c80fc, Model: sd_xl_base_1.0, Version: v1.5.1 (no refiner pass) -
  • -
  • - 🎨 Prompt: by {Artist full name}, artwork image, Negative prompt: frame, border, text, signature -
  • -
  • - 🧑 Prompt: by {Artist full name}, image of a gently smiling, portrait of a person, head and torso, sitting in their room, Negative prompt: self-portrait, outside, frame, border, text, signature -
  • -
  • - 🏞️ Prompt: by {Artist full name}, image of a landscape, outdoor natural scenery, water, bridge, Negative prompt: people, person, frame, border, text, signature -
  • -
-

Disclaimers

-
    -
  • This tools is just for fun. This information NOT be correct! The tags are from a mix of sources and manual input. Many tags are AI generated. I make corrections when I happen to spot them or when submitted.
  • -
  • Remember, SDXL is only a crude imitation of these artists. Check out these artists' actual artwork for more inspiration!
  • -
  • This database has more straight white male artists than in the actual population. That's because they've been favored by art-collectors, past and present. Please suggest artists I should add.
  • -
  • My code doesn't use cookies and sends nothing to any server. But it's hosted on Huggingface, and I can't control if they cookie you
  • -
  • Open source. Creatives Commons license. Download for free.
  • -
  • I don't get nor do I want compensation for this
  • -
  • I'm not affiliated with Stability AI
  • -
  • Use at your own risk 🧟
  • -
-

What's with the sedan?

-
    -
  • - All images were generated with the same seed (42). For many artists, especially with the 🎨 prompt, that seed produces a car, which is not in the prompt. Other seeds will cause a different visual element across many artists. -
  • -
-

Missing images

-

- If artists are listed in the database file, but their image files are missing, the files are listed below. -

-
    -
    -
    -
    -
    -

    Export/Import favorited artists

    - -
    -
    copy to clipboard
    -
    import
    -
    -
    -
    -
    -
    - documents: - ⁉️  - 📓  - 📤  -
    -
    - show me: - 🎨 - 🧑 - 🏞️ -
    -
    - sort artists by: - 🎰 - 🔠 -
    -
    - sort tags by: - 📶 - 🔠 -
    -
    - -
    - -
    -
    - - - - - -
    - - -
    -
    -
    -
    - these filters hide every image
    - check-mark any tag or ‟permissive”
    - 👀 -
    - - -
    -
    -
    -
    - SDXL Artist Style Explorer, v1.0, by - Mattthew - -
    -
    -
    -
    - - \ No newline at end of file diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/saconv.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/saconv.py deleted file mode 100644 index b4ee3978e097fca422805db4e31ae481006d7971..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/saconv.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F - -from annotator.uniformer.mmcv.cnn import CONV_LAYERS, ConvAWS2d, constant_init -from annotator.uniformer.mmcv.ops.deform_conv import deform_conv2d -from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version - - -@CONV_LAYERS.register_module(name='SAC') -class SAConv2d(ConvAWS2d): - """SAC (Switchable Atrous Convolution) - - This is an implementation of SAC in DetectoRS - (https://arxiv.org/pdf/2006.02334.pdf). - - Args: - in_channels (int): Number of channels in the input image - out_channels (int): Number of channels produced by the convolution - kernel_size (int or tuple): Size of the convolving kernel - stride (int or tuple, optional): Stride of the convolution. Default: 1 - padding (int or tuple, optional): Zero-padding added to both sides of - the input. Default: 0 - padding_mode (string, optional): ``'zeros'``, ``'reflect'``, - ``'replicate'`` or ``'circular'``. Default: ``'zeros'`` - dilation (int or tuple, optional): Spacing between kernel elements. - Default: 1 - groups (int, optional): Number of blocked connections from input - channels to output channels. Default: 1 - bias (bool, optional): If ``True``, adds a learnable bias to the - output. Default: ``True`` - use_deform: If ``True``, replace convolution with deformable - convolution. Default: ``False``. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - bias=True, - use_deform=False): - super().__init__( - in_channels, - out_channels, - kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - bias=bias) - self.use_deform = use_deform - self.switch = nn.Conv2d( - self.in_channels, 1, kernel_size=1, stride=stride, bias=True) - self.weight_diff = nn.Parameter(torch.Tensor(self.weight.size())) - self.pre_context = nn.Conv2d( - self.in_channels, self.in_channels, kernel_size=1, bias=True) - self.post_context = nn.Conv2d( - self.out_channels, self.out_channels, kernel_size=1, bias=True) - if self.use_deform: - self.offset_s = nn.Conv2d( - self.in_channels, - 18, - kernel_size=3, - padding=1, - stride=stride, - bias=True) - self.offset_l = nn.Conv2d( - self.in_channels, - 18, - kernel_size=3, - padding=1, - stride=stride, - bias=True) - self.init_weights() - - def init_weights(self): - constant_init(self.switch, 0, bias=1) - self.weight_diff.data.zero_() - constant_init(self.pre_context, 0) - constant_init(self.post_context, 0) - if self.use_deform: - constant_init(self.offset_s, 0) - constant_init(self.offset_l, 0) - - def forward(self, x): - # pre-context - avg_x = F.adaptive_avg_pool2d(x, output_size=1) - avg_x = self.pre_context(avg_x) - avg_x = avg_x.expand_as(x) - x = x + avg_x - # switch - avg_x = F.pad(x, pad=(2, 2, 2, 2), mode='reflect') - avg_x = F.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0) - switch = self.switch(avg_x) - # sac - weight = self._get_weight(self.weight) - zero_bias = torch.zeros( - self.out_channels, device=weight.device, dtype=weight.dtype) - - if self.use_deform: - offset = self.offset_s(avg_x) - out_s = deform_conv2d(x, offset, weight, self.stride, self.padding, - self.dilation, self.groups, 1) - else: - if (TORCH_VERSION == 'parrots' - or digit_version(TORCH_VERSION) < digit_version('1.5.0')): - out_s = super().conv2d_forward(x, weight) - elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'): - # bias is a required argument of _conv_forward in torch 1.8.0 - out_s = super()._conv_forward(x, weight, zero_bias) - else: - out_s = super()._conv_forward(x, weight) - ori_p = self.padding - ori_d = self.dilation - self.padding = tuple(3 * p for p in self.padding) - self.dilation = tuple(3 * d for d in self.dilation) - weight = weight + self.weight_diff - if self.use_deform: - offset = self.offset_l(avg_x) - out_l = deform_conv2d(x, offset, weight, self.stride, self.padding, - self.dilation, self.groups, 1) - else: - if (TORCH_VERSION == 'parrots' - or digit_version(TORCH_VERSION) < digit_version('1.5.0')): - out_l = super().conv2d_forward(x, weight) - elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'): - # bias is a required argument of _conv_forward in torch 1.8.0 - out_l = super()._conv_forward(x, weight, zero_bias) - else: - out_l = super()._conv_forward(x, weight) - - out = switch * out_s + (1 - switch) * out_l - self.padding = ori_p - self.dilation = ori_d - # post-context - avg_x = F.adaptive_avg_pool2d(out, output_size=1) - avg_x = self.post_context(avg_x) - avg_x = avg_x.expand_as(out) - out = out + avg_x - return out diff --git a/spaces/ProteinDesignLab/protpardelle/ProteinMPNN/examples/submit_example_7.sh b/spaces/ProteinDesignLab/protpardelle/ProteinMPNN/examples/submit_example_7.sh deleted file mode 100644 index bc4501e54285cf726a55907da472532244223e11..0000000000000000000000000000000000000000 --- a/spaces/ProteinDesignLab/protpardelle/ProteinMPNN/examples/submit_example_7.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -#SBATCH -p gpu -#SBATCH --mem=32g -#SBATCH --gres=gpu:rtx2080:1 -#SBATCH -c 2 -#SBATCH --output=example_7.out - -source activate mlfold - -folder_with_pdbs="../inputs/PDB_monomers/pdbs/" - -output_dir="../outputs/example_7_outputs" -if [ ! -d $output_dir ] -then - mkdir -p $output_dir -fi - -path_for_parsed_chains=$output_dir"/parsed_pdbs.jsonl" - -python ../helper_scripts/parse_multiple_chains.py --input_path=$folder_with_pdbs --output_path=$path_for_parsed_chains - -python ../protein_mpnn_run.py \ - --jsonl_path $path_for_parsed_chains \ - --out_folder $output_dir \ - --num_seq_per_target 1 \ - --sampling_temp "0.1" \ - --unconditional_probs_only 1 \ - --seed 37 \ - --batch_size 1 diff --git a/spaces/PushkarA07/Sanskrit-Text-To-Speech/monotonic_align/core.py b/spaces/PushkarA07/Sanskrit-Text-To-Speech/monotonic_align/core.py deleted file mode 100644 index dddc688d76172b880054e544b7a217acd013f14f..0000000000000000000000000000000000000000 --- a/spaces/PushkarA07/Sanskrit-Text-To-Speech/monotonic_align/core.py +++ /dev/null @@ -1,35 +0,0 @@ -import numba - - -@numba.jit(numba.void(numba.int32[:,:,::1], numba.float32[:,:,::1], numba.int32[::1], numba.int32[::1]), nopython=True, nogil=True) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val=-1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y-1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y-1, x-1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - index = index - 1 diff --git a/spaces/Raghavan1988/falcon-lablabai-hackathon-brainstorming-buddy-for-researchers/README.md b/spaces/Raghavan1988/falcon-lablabai-hackathon-brainstorming-buddy-for-researchers/README.md deleted file mode 100644 index 59f39baf2f6a633c17695698f582035ae296823d..0000000000000000000000000000000000000000 --- a/spaces/Raghavan1988/falcon-lablabai-hackathon-brainstorming-buddy-for-researchers/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Falcon-180B Demo -emoji: 💬 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.42.0 -app_file: app.py -pinned: true ---- diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/operations/check.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/operations/check.py deleted file mode 100644 index fb3ac8b9c9ea57ec1bb667cb8e904a8b5b2f9df2..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/operations/check.py +++ /dev/null @@ -1,149 +0,0 @@ -"""Validation of dependencies of packages -""" - -import logging -from typing import Callable, Dict, List, NamedTuple, Optional, Set, Tuple - -from pip._vendor.packaging.requirements import Requirement -from pip._vendor.packaging.utils import NormalizedName, canonicalize_name - -from pip._internal.distributions import make_distribution_for_install_requirement -from pip._internal.metadata import get_default_environment -from pip._internal.metadata.base import DistributionVersion -from pip._internal.req.req_install import InstallRequirement - -logger = logging.getLogger(__name__) - - -class PackageDetails(NamedTuple): - version: DistributionVersion - dependencies: List[Requirement] - - -# Shorthands -PackageSet = Dict[NormalizedName, PackageDetails] -Missing = Tuple[NormalizedName, Requirement] -Conflicting = Tuple[NormalizedName, DistributionVersion, Requirement] - -MissingDict = Dict[NormalizedName, List[Missing]] -ConflictingDict = Dict[NormalizedName, List[Conflicting]] -CheckResult = Tuple[MissingDict, ConflictingDict] -ConflictDetails = Tuple[PackageSet, CheckResult] - - -def create_package_set_from_installed() -> Tuple[PackageSet, bool]: - """Converts a list of distributions into a PackageSet.""" - package_set = {} - problems = False - env = get_default_environment() - for dist in env.iter_installed_distributions(local_only=False, skip=()): - name = dist.canonical_name - try: - dependencies = list(dist.iter_dependencies()) - package_set[name] = PackageDetails(dist.version, dependencies) - except (OSError, ValueError) as e: - # Don't crash on unreadable or broken metadata. - logger.warning("Error parsing requirements for %s: %s", name, e) - problems = True - return package_set, problems - - -def check_package_set( - package_set: PackageSet, should_ignore: Optional[Callable[[str], bool]] = None -) -> CheckResult: - """Check if a package set is consistent - - If should_ignore is passed, it should be a callable that takes a - package name and returns a boolean. - """ - - missing = {} - conflicting = {} - - for package_name, package_detail in package_set.items(): - # Info about dependencies of package_name - missing_deps: Set[Missing] = set() - conflicting_deps: Set[Conflicting] = set() - - if should_ignore and should_ignore(package_name): - continue - - for req in package_detail.dependencies: - name = canonicalize_name(req.name) - - # Check if it's missing - if name not in package_set: - missed = True - if req.marker is not None: - missed = req.marker.evaluate() - if missed: - missing_deps.add((name, req)) - continue - - # Check if there's a conflict - version = package_set[name].version - if not req.specifier.contains(version, prereleases=True): - conflicting_deps.add((name, version, req)) - - if missing_deps: - missing[package_name] = sorted(missing_deps, key=str) - if conflicting_deps: - conflicting[package_name] = sorted(conflicting_deps, key=str) - - return missing, conflicting - - -def check_install_conflicts(to_install: List[InstallRequirement]) -> ConflictDetails: - """For checking if the dependency graph would be consistent after \ - installing given requirements - """ - # Start from the current state - package_set, _ = create_package_set_from_installed() - # Install packages - would_be_installed = _simulate_installation_of(to_install, package_set) - - # Only warn about directly-dependent packages; create a whitelist of them - whitelist = _create_whitelist(would_be_installed, package_set) - - return ( - package_set, - check_package_set( - package_set, should_ignore=lambda name: name not in whitelist - ), - ) - - -def _simulate_installation_of( - to_install: List[InstallRequirement], package_set: PackageSet -) -> Set[NormalizedName]: - """Computes the version of packages after installing to_install.""" - # Keep track of packages that were installed - installed = set() - - # Modify it as installing requirement_set would (assuming no errors) - for inst_req in to_install: - abstract_dist = make_distribution_for_install_requirement(inst_req) - dist = abstract_dist.get_metadata_distribution() - name = dist.canonical_name - package_set[name] = PackageDetails(dist.version, list(dist.iter_dependencies())) - - installed.add(name) - - return installed - - -def _create_whitelist( - would_be_installed: Set[NormalizedName], package_set: PackageSet -) -> Set[NormalizedName]: - packages_affected = set(would_be_installed) - - for package_name in package_set: - if package_name in packages_affected: - continue - - for req in package_set[package_name].dependencies: - if canonicalize_name(req.name) in packages_affected: - packages_affected.add(package_name) - break - - return packages_affected diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/extension.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/extension.py deleted file mode 100644 index 58c023f6b4479c631f382e5062932793d2bee26b..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/extension.py +++ /dev/null @@ -1,148 +0,0 @@ -import re -import functools -import distutils.core -import distutils.errors -import distutils.extension - -from .monkey import get_unpatched - - -def _have_cython(): - """ - Return True if Cython can be imported. - """ - cython_impl = 'Cython.Distutils.build_ext' - try: - # from (cython_impl) import build_ext - __import__(cython_impl, fromlist=['build_ext']).build_ext - return True - except Exception: - pass - return False - - -# for compatibility -have_pyrex = _have_cython - -_Extension = get_unpatched(distutils.core.Extension) - - -class Extension(_Extension): - """ - Describes a single extension module. - - This means that all source files will be compiled into a single binary file - ``.`` (with ```` derived from ``name`` and - ```` defined by one of the values in - ``importlib.machinery.EXTENSION_SUFFIXES``). - - In the case ``.pyx`` files are passed as ``sources and`` ``Cython`` is **not** - installed in the build environment, ``setuptools`` may also try to look for the - equivalent ``.cpp`` or ``.c`` files. - - :arg str name: - the full name of the extension, including any packages -- ie. - *not* a filename or pathname, but Python dotted name - - :arg list[str] sources: - list of source filenames, relative to the distribution root - (where the setup script lives), in Unix form (slash-separated) - for portability. Source files may be C, C++, SWIG (.i), - platform-specific resource files, or whatever else is recognized - by the "build_ext" command as source for a Python extension. - - :keyword list[str] include_dirs: - list of directories to search for C/C++ header files (in Unix - form for portability) - - :keyword list[tuple[str, str|None]] define_macros: - list of macros to define; each macro is defined using a 2-tuple: - the first item corresponding to the name of the macro and the second - item either a string with its value or None to - define it without a particular value (equivalent of "#define - FOO" in source or -DFOO on Unix C compiler command line) - - :keyword list[str] undef_macros: - list of macros to undefine explicitly - - :keyword list[str] library_dirs: - list of directories to search for C/C++ libraries at link time - - :keyword list[str] libraries: - list of library names (not filenames or paths) to link against - - :keyword list[str] runtime_library_dirs: - list of directories to search for C/C++ libraries at run time - (for shared extensions, this is when the extension is loaded). - Setting this will cause an exception during build on Windows - platforms. - - :keyword list[str] extra_objects: - list of extra files to link with (eg. object files not implied - by 'sources', static library that must be explicitly specified, - binary resource files, etc.) - - :keyword list[str] extra_compile_args: - any extra platform- and compiler-specific information to use - when compiling the source files in 'sources'. For platforms and - compilers where "command line" makes sense, this is typically a - list of command-line arguments, but for other platforms it could - be anything. - - :keyword list[str] extra_link_args: - any extra platform- and compiler-specific information to use - when linking object files together to create the extension (or - to create a new static Python interpreter). Similar - interpretation as for 'extra_compile_args'. - - :keyword list[str] export_symbols: - list of symbols to be exported from a shared extension. Not - used on all platforms, and not generally necessary for Python - extensions, which typically export exactly one symbol: "init" + - extension_name. - - :keyword list[str] swig_opts: - any extra options to pass to SWIG if a source file has the .i - extension. - - :keyword list[str] depends: - list of files that the extension depends on - - :keyword str language: - extension language (i.e. "c", "c++", "objc"). Will be detected - from the source extensions if not provided. - - :keyword bool optional: - specifies that a build failure in the extension should not abort the - build process, but simply not install the failing extension. - - :keyword bool py_limited_api: - opt-in flag for the usage of :doc:`Python's limited API `. - - :raises setuptools.errors.PlatformError: if 'runtime_library_dirs' is - specified on Windows. (since v63) - """ - - def __init__(self, name, sources, *args, **kw): - # The *args is needed for compatibility as calls may use positional - # arguments. py_limited_api may be set only via keyword. - self.py_limited_api = kw.pop("py_limited_api", False) - super().__init__(name, sources, *args, **kw) - - def _convert_pyx_sources_to_lang(self): - """ - Replace sources with .pyx extensions to sources with the target - language extension. This mechanism allows language authors to supply - pre-converted sources but to prefer the .pyx sources. - """ - if _have_cython(): - # the build has Cython, so allow it to compile the .pyx files - return - lang = self.language or '' - target_ext = '.cpp' if lang.lower() == 'c++' else '.c' - sub = functools.partial(re.sub, '.pyx$', target_ext) - self.sources = list(map(sub, self.sources)) - - -class Library(Extension): - """Just like a regular Extension, but built as a library instead""" diff --git a/spaces/Rbrq/DeticChatGPT/README.md b/spaces/Rbrq/DeticChatGPT/README.md deleted file mode 100644 index 15e4d59222dd185085c1399189f6e5d6f32d579f..0000000000000000000000000000000000000000 --- a/spaces/Rbrq/DeticChatGPT/README.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Detic+ChatGPT -emoji: 👀 -colorFrom: blue -colorTo: red -sdk: gradio -app_file: app.py -pinned: false -duplicated_from: taesiri/DeticChatGPT ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Realcat/image-matching-webui/third_party/r2d2/datasets/dataset.py b/spaces/Realcat/image-matching-webui/third_party/r2d2/datasets/dataset.py deleted file mode 100644 index 5f4474e7dc8b81f091cac1e13f431c5c9f1840f3..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/r2d2/datasets/dataset.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2019-present NAVER Corp. -# CC BY-NC-SA 3.0 -# Available only for non-commercial use - -import os -import json -import pdb -import numpy as np - - -class Dataset(object): - """Base class for a dataset. To be overloaded.""" - - root = "" - img_dir = "" - nimg = 0 - - def __len__(self): - return self.nimg - - def get_key(self, img_idx): - raise NotImplementedError() - - def get_filename(self, img_idx, root=None): - return os.path.join(root or self.root, self.img_dir, self.get_key(img_idx)) - - def get_image(self, img_idx): - from PIL import Image - - fname = self.get_filename(img_idx) - try: - return Image.open(fname).convert("RGB") - except Exception as e: - raise IOError("Could not load image %s (reason: %s)" % (fname, str(e))) - - def __repr__(self): - res = "Dataset: %s\n" % self.__class__.__name__ - res += " %d images" % self.nimg - res += "\n root: %s...\n" % self.root - return res - - -class CatDataset(Dataset): - """Concatenation of several datasets.""" - - def __init__(self, *datasets): - assert len(datasets) >= 1 - self.datasets = datasets - offsets = [0] - for db in datasets: - offsets.append(db.nimg) - self.offsets = np.cumsum(offsets) - self.nimg = self.offsets[-1] - self.root = None - - def which(self, i): - pos = np.searchsorted(self.offsets, i, side="right") - 1 - assert pos < self.nimg, "Bad image index %d >= %d" % (i, self.nimg) - return pos, i - self.offsets[pos] - - def get_key(self, i): - b, i = self.which(i) - return self.datasets[b].get_key(i) - - def get_filename(self, i): - b, i = self.which(i) - return self.datasets[b].get_filename(i) - - def __repr__(self): - fmt_str = "CatDataset(" - for db in self.datasets: - fmt_str += str(db).replace("\n", " ") + ", " - return fmt_str[:-2] + ")" diff --git a/spaces/Rezuwan/parrot_classifier/app.py b/spaces/Rezuwan/parrot_classifier/app.py deleted file mode 100644 index efbb9811ee721a70c314d5b30d38b780a58fb6a7..0000000000000000000000000000000000000000 --- a/spaces/Rezuwan/parrot_classifier/app.py +++ /dev/null @@ -1,122 +0,0 @@ - -from fastai.vision.all import * -from fastai.vision.all import load_learner -import fastai -import os -import gradio as gr -import pathlib - - -# from google.colab import drive -# drive.mount('/content/drive/') - -temp = pathlib.WindowsPath -pathlib.WindowsPath = pathlib.PosixPath - -model_dir = "models/parrot-recognizer-v10.pkl" - -model = load_learner(model_dir) - -parrot_species = ['african grey parrot', - 'australian king parrot', - 'australian night parrot', - 'bare eyed cockatoo', - 'blue and yellow macaw', - 'blue headed parrot', - 'blue lorikeet', - 'brown hooded parrot', - 'budgerigar', - 'burrowing parrot', - 'caique parrot', - 'catalina macaw', - 'chestnut-fronted macaw', - 'citron cockatoo', - 'cockatiels', - 'crimson rosella', - 'cuban amazon', - 'eclectus parrot', - 'galah cockatoo', - 'gang gang cockatoo', - 'golden parakeet', - 'great green macaw', - 'great hanging parrot', - 'greater vasa parrot', - 'hahn_s macaws', - 'hooded parrot', - 'hyacinth macaw', - 'kea', - 'kākāpō', - 'lovebirds', - 'major mitchell_s cockatoo', - 'monk parakeet', - 'musk lorikeet', - 'palm cockatoo', - 'parrotlet', - 'plum headed parakeet', - 'puerto rican amazon', - 'rainbow lorikeet', - 'red breasted parakeet', - 'red crowned amazon', - 'red crowned parakeet', - 'red fan parrot', - 'red lory', - 'red rumped parrot', - 'red shouldered macaw', - 'red tailed black cockatoos', - 'rose ringed parakeet', - 'saint vincent amazon', - 'salmon crested cockatoo', - 'scarlet macaw', - 'senegal parrot', - 'spixs macaw', - 'sulpher crested cockatoo', - 'sun conure', - 'thick billed parrot', - 'turquoise fronted amazon', - 'umbrella cockatoo', - 'vernal hanging parrot', - 'yellow collared macaws', - 'yellow headed amazon'] - -def recognize_image(image): - pred, idx, probs = model.predict(image) - return dict(zip(parrot_species, map(float, probs))) - - -# im = "/content/drive/MyDrive/Learnings/fai/test_images/unknown_12.jpg" -# img = PILImage.create(im) -# img.thumbnail((192,192)) -# img - -# recognize_image(img) - - - -image = gr.inputs.Image(shape=(192,192)) -label = gr.outputs.Label(num_top_classes=5) - -examples = [ - "test_images/unknown_00.jpg", - "test_images/unknown_01.jpg", - "test_images/unknown_02.jpg", - "test_images/unknown_03.jpg", - "test_images/unknown_04.jpg", - "test_images/unknown_05.jpg", - "test_images/unknown_06.jpg", - "test_images/unknown_07.jpg", - "test_images/unknown_08.jpg", - "test_images/unknown_09.jpg", - "test_images/unknown_10.jpg", - "test_images/unknown_11.jpg", - "test_images/unknown_12.jpg", - "test_images/unknown_13.jpg", - "test_images/unknown_14.jpg", - "test_images/unknown_15.jpg", - "test_images/unknown_16.jpg", - "test_images/unknown_17.jpg", - "test_images/unknown_18.jpg", - "test_images/unknown_19.jpg", - ] - -iface = gr.Interface(fn=recognize_image, inputs=image, outputs=label, examples=examples) -iface.launch(inline=False) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/evaluation/bbox_overlaps.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/evaluation/bbox_overlaps.py deleted file mode 100644 index 93559ea0f25369d552a5365312fa32b9ffec9226..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/evaluation/bbox_overlaps.py +++ /dev/null @@ -1,48 +0,0 @@ -import numpy as np - - -def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6): - """Calculate the ious between each bbox of bboxes1 and bboxes2. - - Args: - bboxes1(ndarray): shape (n, 4) - bboxes2(ndarray): shape (k, 4) - mode(str): iou (intersection over union) or iof (intersection - over foreground) - - Returns: - ious(ndarray): shape (n, k) - """ - - assert mode in ['iou', 'iof'] - - bboxes1 = bboxes1.astype(np.float32) - bboxes2 = bboxes2.astype(np.float32) - rows = bboxes1.shape[0] - cols = bboxes2.shape[0] - ious = np.zeros((rows, cols), dtype=np.float32) - if rows * cols == 0: - return ious - exchange = False - if bboxes1.shape[0] > bboxes2.shape[0]: - bboxes1, bboxes2 = bboxes2, bboxes1 - ious = np.zeros((cols, rows), dtype=np.float32) - exchange = True - area1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1]) - area2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1]) - for i in range(bboxes1.shape[0]): - x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0]) - y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1]) - x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2]) - y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3]) - overlap = np.maximum(x_end - x_start, 0) * np.maximum( - y_end - y_start, 0) - if mode == 'iou': - union = area1[i] + area2 - overlap - else: - union = area1[i] if not exchange else area2 - union = np.maximum(union, eps) - ious[i, :] = overlap / union - if exchange: - ious = ious.T - return ious diff --git a/spaces/SUPERpuper/Text-to-image-AI-3/app.py b/spaces/SUPERpuper/Text-to-image-AI-3/app.py deleted file mode 100644 index 6f9a0f0853d6e44b1882522ac59c3a1031b82744..0000000000000000000000000000000000000000 --- a/spaces/SUPERpuper/Text-to-image-AI-3/app.py +++ /dev/null @@ -1,77 +0,0 @@ -import numpy as np -import tensorflow as tf -from tensorflow.keras.preprocessing import image as kp_image -from tensorflow.keras.applications import vgg19 -import matplotlib.pyplot as plt - -# Load the content and style images -content_path = 'content_image.jpg' -style_path = 'futuristic_style_image.jpg' -content_img = kp_image.load_img(content_path) -style_img = kp_image.load_img(style_path) - -# Convert the images to arrays and preprocess them for use with VGG19 -content_array = kp_image.img_to_array(content_img) -style_array = kp_image.img_to_array(style_img) -content_array = np.expand_dims(content_array, axis=0) -style_array = np.expand_dims(style_array, axis=0) -content_array = vgg19.preprocess_input(content_array) -style_array = vgg19.preprocess_input(style_array) - -# Load the VGG19 model and extract the content and style features -vgg_model = vgg19.VGG19(include_top=False, weights='imagenet') -content_features = vgg_model.predict(content_array)['block5_conv2'] -style_features = vgg_model.predict(style_array)['block5_conv2'] - -# Define a function to compute the style transfer loss -def style_transfer_loss(style_features, generated_features): - style_loss = tf.reduce_mean(tf.square(style_features - generated_features)) - return style_loss - -# Define a function to generate the output image using neural style transfer -def generate_output_image(content_array, style_features, num_iterations=100): - # Initialize the output image with the content image - output_array = content_array.copy() - - # Convert the output array to a tensor and create a variable from it - output_tensor = tf.Variable(output_array) - - # Define a function to compute the content loss - def content_loss(content_features, generated_features): - content_loss = tf.reduce_mean(tf.square(content_features - generated_features)) - return content_loss - - # Define the optimizer and learning rate - optimizer = tf.optimizers.Adam(learning_rate=0.01) - - # Generate the output image using neural style transfer - for i in range(num_iterations): - with tf.GradientTape() as tape: - # Compute the content and style losses - generated_features = vgg_model(output_tensor)['block5_conv2'] - content_loss_value = content_loss(content_features, generated_features) - style_loss_value = style_transfer_loss(style_features, generated_features) - - # Compute the total loss - total_loss = content_loss_value + style_loss_value - - # Compute the gradients of the total loss with respect to the output tensor - gradients = tape.gradient(total_loss, output_tensor) - - # Apply the gradients to the output tensor - optimizer.apply_gradients([(gradients, output_tensor)]) - - # Clip the output tensor to the range [0, 255] - output_tensor.assign(tf.clip_by_value(output_tensor, 0.0, 255.0)) - - # Convert the output tensor back to an array - output_array = output_tensor.numpy()[0] - return output_array - -# Generate the output image -output_array = generate_output_image(content_array, style_features) - -# Convert the output array to an image and display it -output_img = kp_image.array_to_img(output_array) -plt.imshow(output_img) -plt.show() \ No newline at end of file diff --git a/spaces/Sadashiv/BERT-NER/utils.py b/spaces/Sadashiv/BERT-NER/utils.py deleted file mode 100644 index 92532d123dd92ea34549165aad3e89667af6d7ee..0000000000000000000000000000000000000000 --- a/spaces/Sadashiv/BERT-NER/utils.py +++ /dev/null @@ -1,85 +0,0 @@ -import requests -from dotenv import load_dotenv -import os - -# load the .env file -load_dotenv() - -API_KEY = os.getenv("API") - -API_URL = "https://api-inference.huggingface.co/models/Sadashiv/BERT-ner" -headers = {"Authorization": f"Bearer {API_KEY}"} - -tag_color_combination = {'O': '#FF5733', - 'PER': '#35B7FF', - 'ORG': '#00FF00', - 'LOC': '#FFA500', - 'MISC': '#BA55D3'} - - -class ner_extraction: - def __init__(self, input_text): - self.input_text = input_text - - def query(self): - response = requests.post(API_URL, headers=headers, json=self.input_text) - return response.json() - - def entity_position_locator(self): - output = self.query() - entity_position = {} - - for i in range(len(output)): - entity_position[i]={} - entity_position[i]["start"]=output[i]['start'] - entity_position[i]["end"]=output[i]['end'] - - return entity_position - - def entity_update(self): - entity_list = [] - output = self.query() - - for i in range(len(output)): - entity_list.append( - ( - output[i]['word'], - output[i]['entity_group'], - tag_color_combination.get(output[i]['entity_group']) - ) - ) - - return entity_list - - def text_list(self): - - input_text = self.input_text - entity_position = self.entity_position_locator() - - split_text = [] - - for i in entity_position: - split_text.append(input_text[entity_position[i]['start']:entity_position[i]['end']]) - - if entity_position[i]['end']!=len(input_text): - - if i+1= float(span[0]) and t<= float(span[1]): - answers.append('yes') - flag = True - break - if not flag: - answers.append('no') - else: - for t in time_stamp: - answers.append('no') # for test - - answers = '_'.join(answers) - - result = True - except Exception as e: - - print(f"Error while read file idx") - print("video is: {}".format(ann['video'])) - index = random.randint(0, len(self.annotation) - 1) - - return { - "video": frms, - "qa_input": qa_prompt, - "loc_input": loc_prompt, - "qa_output": answers, - "question_id": qid, - 'duration': duration - } diff --git a/spaces/ShaunWithGPT/ChuanhuChatGPT/llama_func.py b/spaces/ShaunWithGPT/ChuanhuChatGPT/llama_func.py deleted file mode 100644 index c71027dd4e6f99c0c12626cbbf276f407877be04..0000000000000000000000000000000000000000 --- a/spaces/ShaunWithGPT/ChuanhuChatGPT/llama_func.py +++ /dev/null @@ -1,192 +0,0 @@ -import os -import logging - -from llama_index import GPTSimpleVectorIndex -from llama_index import download_loader -from llama_index import ( - Document, - LLMPredictor, - PromptHelper, - QuestionAnswerPrompt, - RefinePrompt, -) -from langchain.llms import OpenAI -import colorama - - -from presets import * -from utils import * - - -def get_documents(file_src): - documents = [] - index_name = "" - logging.debug("Loading documents...") - logging.debug(f"file_src: {file_src}") - for file in file_src: - logging.debug(f"file: {file.name}") - index_name += file.name - if os.path.splitext(file.name)[1] == ".pdf": - logging.debug("Loading PDF...") - CJKPDFReader = download_loader("CJKPDFReader") - loader = CJKPDFReader() - documents += loader.load_data(file=file.name) - elif os.path.splitext(file.name)[1] == ".docx": - logging.debug("Loading DOCX...") - DocxReader = download_loader("DocxReader") - loader = DocxReader() - documents += loader.load_data(file=file.name) - elif os.path.splitext(file.name)[1] == ".epub": - logging.debug("Loading EPUB...") - EpubReader = download_loader("EpubReader") - loader = EpubReader() - documents += loader.load_data(file=file.name) - else: - logging.debug("Loading text file...") - with open(file.name, "r", encoding="utf-8") as f: - text = add_space(f.read()) - documents += [Document(text)] - index_name = sha1sum(index_name) - return documents, index_name - - -def construct_index( - api_key, - file_src, - max_input_size=4096, - num_outputs=1, - max_chunk_overlap=20, - chunk_size_limit=600, - embedding_limit=None, - separator=" ", - num_children=10, - max_keywords_per_chunk=10, -): - os.environ["OPENAI_API_KEY"] = api_key - chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit - embedding_limit = None if embedding_limit == 0 else embedding_limit - separator = " " if separator == "" else separator - - llm_predictor = LLMPredictor( - llm=OpenAI(model_name="gpt-3.5-turbo-0301", openai_api_key=api_key) - ) - prompt_helper = PromptHelper( - max_input_size, - num_outputs, - max_chunk_overlap, - embedding_limit, - chunk_size_limit, - separator=separator, - ) - documents, index_name = get_documents(file_src) - if os.path.exists(f"./index/{index_name}.json"): - logging.info("找到了缓存的索引文件,加载中……") - return GPTSimpleVectorIndex.load_from_disk(f"./index/{index_name}.json") - else: - try: - logging.debug("构建索引中……") - index = GPTSimpleVectorIndex( - documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper - ) - os.makedirs("./index", exist_ok=True) - index.save_to_disk(f"./index/{index_name}.json") - return index - except Exception as e: - print(e) - return None - - -def chat_ai( - api_key, - index, - question, - context, - chatbot, -): - os.environ["OPENAI_API_KEY"] = api_key - - logging.info(f"Question: {question}") - - response, chatbot_display, status_text = ask_ai( - api_key, - index, - question, - replace_today(PROMPT_TEMPLATE), - REFINE_TEMPLATE, - SIM_K, - INDEX_QUERY_TEMPRATURE, - context, - ) - if response is None: - status_text = "查询失败,请换个问法试试" - return context, chatbot - response = response - - context.append({"role": "user", "content": question}) - context.append({"role": "assistant", "content": response}) - chatbot.append((question, chatbot_display)) - - os.environ["OPENAI_API_KEY"] = "" - return context, chatbot, status_text - - -def ask_ai( - api_key, - index, - question, - prompt_tmpl, - refine_tmpl, - sim_k=1, - temprature=0, - prefix_messages=[], -): - os.environ["OPENAI_API_KEY"] = api_key - - logging.debug("Index file found") - logging.debug("Querying index...") - llm_predictor = LLMPredictor( - llm=OpenAI( - temperature=temprature, - model_name="gpt-3.5-turbo-0301", - prefix_messages=prefix_messages, - ) - ) - - response = None # Initialize response variable to avoid UnboundLocalError - qa_prompt = QuestionAnswerPrompt(prompt_tmpl) - rf_prompt = RefinePrompt(refine_tmpl) - response = index.query( - question, - llm_predictor=llm_predictor, - similarity_top_k=sim_k, - text_qa_template=qa_prompt, - refine_template=rf_prompt, - response_mode="compact", - ) - - if response is not None: - logging.info(f"Response: {response}") - ret_text = response.response - nodes = [] - for index, node in enumerate(response.source_nodes): - brief = node.source_text[:25].replace("\n", "") - nodes.append( - f"
    [{index+1}]\t{brief}...

    {node.source_text}

    " - ) - new_response = ret_text + "\n----------\n" + "\n\n".join(nodes) - logging.info( - f"Response: {colorama.Fore.BLUE}{ret_text}{colorama.Style.RESET_ALL}" - ) - os.environ["OPENAI_API_KEY"] = "" - return ret_text, new_response, f"查询消耗了{llm_predictor.last_token_usage} tokens" - else: - logging.warning("No response found, returning None") - os.environ["OPENAI_API_KEY"] = "" - return None - - -def add_space(text): - punctuations = {",": ", ", "。": "。 ", "?": "? ", "!": "! ", ":": ": ", ";": "; "} - for cn_punc, en_punc in punctuations.items(): - text = text.replace(cn_punc, en_punc) - return text diff --git a/spaces/Slava917/pronunciation-trainer/predict.py b/spaces/Slava917/pronunciation-trainer/predict.py deleted file mode 100644 index cfeba17de83e7c4ea0fd638a619e530d8cdb3283..0000000000000000000000000000000000000000 --- a/spaces/Slava917/pronunciation-trainer/predict.py +++ /dev/null @@ -1,30 +0,0 @@ -import torch -import torchaudio - -#fixes second prediction bug -torch._C._jit_override_can_fuse_on_cpu(False) -torch._C._jit_override_can_fuse_on_gpu(False) -torch._C._jit_set_texpr_fuser_enabled(False) -torch._C._jit_set_nvfuser_enabled(False) - -loader = torch.jit.load("audio_loader.pt") -model = torch.jit.load('QuartzNet_thunderspeech_3.pt') - -vocab = model.text_transform.vocab.itos -vocab[-1] = '' - -def convert_probs(probs): - ids = probs.argmax(1)[0] - s = [] - if vocab[ids[0]]: s.append(vocab[ids[0]]) - for i in range(1,len(ids)): - if ids[i-1] != ids[i]: - new = vocab[ids[i]] - if new: s.append(new) - #return '.'.join(s) - return s - -def predict(path): - audio = loader(path) - probs = model(audio, torch.tensor(audio.shape[0] * [audio.shape[-1]], device=audio.device))[0] - return convert_probs(probs) \ No newline at end of file diff --git a/spaces/SuYuanS/AudioCraft_Plus/tests/adversarial/__init__.py b/spaces/SuYuanS/AudioCraft_Plus/tests/adversarial/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/SuYuanS/AudioCraft_Plus/tests/adversarial/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/SuYuanS/AudioCraft_Plus/tests/modules/test_transformer.py b/spaces/SuYuanS/AudioCraft_Plus/tests/modules/test_transformer.py deleted file mode 100644 index 2bb79bfd58d535469f9b3c56b8a5fe254db5d8ba..0000000000000000000000000000000000000000 --- a/spaces/SuYuanS/AudioCraft_Plus/tests/modules/test_transformer.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product - -import pytest -import torch - -from audiocraft.modules.transformer import ( - StreamingMultiheadAttention, StreamingTransformer, set_efficient_attention_backend) - - -def test_transformer_causal_streaming(): - torch.manual_seed(1234) - - for context, custom in product([None, 10], [False, True]): - # Test that causality and receptive fields are properly handled. - # looking at the gradients - tr = StreamingTransformer( - 16, 4, 1 if context else 2, - causal=True, past_context=context, custom=custom, - dropout=0.) - steps = 20 - for k in [0, 10, 15, 19]: - x = torch.randn(4, steps, 16, requires_grad=True) - y = tr(x) - y[:, k].abs().sum().backward() - if k + 1 < steps: - assert torch.allclose(x.grad[:, k + 1:], torch.tensor(0.)), x.grad[:, k + 1:].norm() - assert not torch.allclose(x.grad[:, :k + 1], torch.tensor(0.)), x.grad[:, :k + 1].norm() - if context is not None and k > context: - limit = k - context - 1 - assert torch.allclose(x.grad[:, :limit], - torch.tensor(0.)), x.grad[:, :limit].norm() - - # Now check that streaming gives the same result at batch eval. - x = torch.randn(4, steps, 16) - y = tr(x) - ys = [] - with tr.streaming(): - for k in range(steps): - chunk = x[:, k:k + 1, :] - ys.append(tr(chunk)) - y_stream = torch.cat(ys, dim=1) - delta = torch.norm(y_stream - y) / torch.norm(y) - assert delta < 1e-6, delta - - -def test_transformer_vs_pytorch(): - torch.manual_seed(1234) - # Check that in the non causal setting, we get the same result as - # PyTorch Transformer encoder. - for custom in [False, True]: - tr = StreamingTransformer( - 16, 4, 2, - causal=False, custom=custom, dropout=0., positional_scale=0.) - layer = torch.nn.TransformerEncoderLayer(16, 4, dropout=0., batch_first=True) - tr_ref = torch.nn.TransformerEncoder(layer, 2) - tr.load_state_dict(tr_ref.state_dict()) - - x = torch.randn(4, 20, 16) - y = tr(x) - y2 = tr_ref(x) - delta = torch.norm(y2 - y) / torch.norm(y) - assert delta < 1e-6, delta - - -def test_streaming_api(): - tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0.) - tr.eval() - steps = 12 - x = torch.randn(1, steps, 16) - - with torch.no_grad(): - with tr.streaming(): - _ = tr(x[:, :1]) - state = {k: v.clone() for k, v in tr.get_streaming_state().items()} - y = tr(x[:, 1:2]) - tr.set_streaming_state(state) - y2 = tr(x[:, 1:2]) - assert torch.allclose(y, y2), (y - y2).norm() - assert tr.flush() is None - - -def test_memory_efficient(): - for backend in ['torch', 'xformers']: - torch.manual_seed(1234) - set_efficient_attention_backend(backend) - - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., layer_scale=0.1) - tr_mem_efficient = StreamingTransformer( - 16, 4, 2, dropout=0., memory_efficient=True, layer_scale=0.1) - tr_mem_efficient.load_state_dict(tr.state_dict()) - tr.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - with torch.no_grad(): - y = tr(x) - y2 = tr_mem_efficient(x) - assert torch.allclose(y, y2), ((y - y2).norm(), backend) - - -def test_attention_as_float32(): - torch.manual_seed(1234) - cases = [ - {'custom': True}, - {'custom': False}, - ] - for case in cases: - tr = StreamingTransformer(16, 4, 2, dropout=0., dtype=torch.bfloat16, **case) - tr_float32 = StreamingTransformer( - 16, 4, 2, dropout=0., attention_as_float32=True, dtype=torch.bfloat16, **case) - if not case['custom']: - # we are not using autocast here because it doesn't really - # work as expected on CPU, so we have to manually cast the weights of the MHA. - for layer in tr_float32.layers: - layer.self_attn.mha.to(torch.float32) - tr_float32.load_state_dict(tr.state_dict()) - steps = 12 - x = torch.randn(3, steps, 16, dtype=torch.bfloat16) - - with torch.no_grad(): - y = tr(x) - y2 = tr_float32(x) - assert not torch.allclose(y, y2), (y - y2).norm() - - -@torch.no_grad() -def test_streaming_memory_efficient(): - for backend in ['torch', 'xformers']: - torch.manual_seed(1234) - set_efficient_attention_backend(backend) - tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0., custom=True) - tr_mem_efficient = StreamingTransformer( - 16, 4, 2, dropout=0., memory_efficient=True, causal=True) - tr.load_state_dict(tr_mem_efficient.state_dict()) - tr.eval() - tr_mem_efficient.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - ref = tr(x) - - with tr_mem_efficient.streaming(): - outs = [] - # frame_sizes = [2] + [1] * (steps - 2) - frame_sizes = [1] * steps - - for frame_size in frame_sizes: - frame = x[:, :frame_size] - x = x[:, frame_size:] - outs.append(tr_mem_efficient(frame)) - - out = torch.cat(outs, dim=1) - delta = torch.norm(out - ref) / torch.norm(out) - assert delta < 1e-6, delta - - -def test_cross_attention(): - torch.manual_seed(1234) - for norm_first in [True, False]: - m = StreamingTransformer( - 16, 4, 2, cross_attention=False, norm_first=norm_first, dropout=0., custom=True) - m_cross = StreamingTransformer( - 16, 4, 2, cross_attention=True, norm_first=norm_first, dropout=0., custom=True) - m_cross.load_state_dict(m.state_dict(), strict=False) - x = torch.randn(2, 5, 16) - cross_x = torch.randn(2, 3, 16) - y_ref = m(x) - y_cross_zero = m_cross(x, cross_attention_src=0 * cross_x) - # With norm_first, the two should be exactly the same, - # but with norm_first=False, we get 2 normalization in a row - # and the epsilon value leads to a tiny change. - atol = 0. if norm_first else 1e-6 - print((y_ref - y_cross_zero).norm() / y_ref.norm()) - assert torch.allclose(y_ref, y_cross_zero, atol=atol) - - # We now expect a difference even with a generous atol of 1e-2. - y_cross = m_cross(x, cross_attention_src=cross_x) - assert not torch.allclose(y_cross, y_cross_zero, atol=1e-2) - - with pytest.raises(AssertionError): - _ = m_cross(x) - _ = m(x, cross_attention_src=cross_x) - - -def test_cross_attention_compat(): - torch.manual_seed(1234) - num_heads = 2 - dim = num_heads * 64 - with pytest.raises(AssertionError): - StreamingMultiheadAttention(dim, num_heads, causal=True, cross_attention=True) - - cross_attn = StreamingMultiheadAttention( - dim, num_heads, dropout=0, cross_attention=True, custom=True) - ref_attn = torch.nn.MultiheadAttention(dim, num_heads, dropout=0, batch_first=True) - - # We can load the regular attention state dict - # so we have compat when loading old checkpoints. - cross_attn.load_state_dict(ref_attn.state_dict()) - - queries = torch.randn(3, 7, dim) - keys = torch.randn(3, 9, dim) - values = torch.randn(3, 9, dim) - - y = cross_attn(queries, keys, values)[0] - y_ref = ref_attn(queries, keys, values)[0] - assert torch.allclose(y, y_ref, atol=1e-7), (y - y_ref).norm() / y_ref.norm() - - # Now let's check that streaming is working properly. - with cross_attn.streaming(): - ys = [] - for step in range(queries.shape[1]): - ys.append(cross_attn(queries[:, step: step + 1], keys, values)[0]) - y_streaming = torch.cat(ys, dim=1) - assert torch.allclose(y_streaming, y, atol=1e-7) - - -def test_repeat_kv(): - torch.manual_seed(1234) - num_heads = 8 - kv_repeat = 4 - dim = num_heads * 64 - with pytest.raises(AssertionError): - mha = StreamingMultiheadAttention( - dim, num_heads, causal=True, kv_repeat=kv_repeat, cross_attention=True) - mha = StreamingMultiheadAttention( - dim, num_heads, causal=True, kv_repeat=kv_repeat) - mha = StreamingMultiheadAttention( - dim, num_heads, causal=True, kv_repeat=kv_repeat, custom=True) - x = torch.randn(4, 18, dim) - y = mha(x, x, x)[0] - assert x.shape == y.shape - - -def test_qk_layer_norm(): - torch.manual_seed(1234) - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, bias_attn=False) - steps = 12 - x = torch.randn(3, steps, 16) - y = tr(x) - - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, cross_attention=True) - z = torch.randn(3, 21, 16) - y = tr(x, cross_attention_src=z) - assert y.shape == x.shape diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/tests/test_dir2.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/tests/test_dir2.py deleted file mode 100644 index bf7f5e57ea2219cb320772be79270feb229553a9..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/tests/test_dir2.py +++ /dev/null @@ -1,66 +0,0 @@ -from IPython.utils.dir2 import dir2 - -import pytest - - -class Base(object): - x = 1 - z = 23 - - -def test_base(): - res = dir2(Base()) - assert "x" in res - assert "z" in res - assert "y" not in res - assert "__class__" in res - assert res.count("x") == 1 - assert res.count("__class__") == 1 - - -def test_SubClass(): - class SubClass(Base): - y = 2 - - res = dir2(SubClass()) - assert "y" in res - assert res.count("y") == 1 - assert res.count("x") == 1 - - -def test_SubClass_with_trait_names_attr(): - # usecase: trait_names is used in a class describing psychological classification - - class SubClass(Base): - y = 2 - trait_names = 44 - - res = dir2(SubClass()) - assert "trait_names" in res - - -def test_misbehaving_object_without_trait_names(): - # dir2 shouldn't raise even when objects are dumb and raise - # something other than AttribteErrors on bad getattr. - - class MisbehavingGetattr: - def __getattr__(self, attr): - raise KeyError("I should be caught") - - def some_method(self): - return True - - class SillierWithDir(MisbehavingGetattr): - def __dir__(self): - return ["some_method"] - - for bad_klass in (MisbehavingGetattr, SillierWithDir): - obj = bad_klass() - - assert obj.some_method() - - with pytest.raises(KeyError): - obj.other_method() - - res = dir2(obj) - assert "some_method" in res diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/BmpImagePlugin.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/BmpImagePlugin.py deleted file mode 100644 index 5bda0a5b05d8b6a6a0ccaa91da3475e34c9b1cf3..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/BmpImagePlugin.py +++ /dev/null @@ -1,471 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# BMP file handler -# -# Windows (and OS/2) native bitmap storage format. -# -# history: -# 1995-09-01 fl Created -# 1996-04-30 fl Added save -# 1997-08-27 fl Fixed save of 1-bit images -# 1998-03-06 fl Load P images as L where possible -# 1998-07-03 fl Load P images as 1 where possible -# 1998-12-29 fl Handle small palettes -# 2002-12-30 fl Fixed load of 1-bit palette images -# 2003-04-21 fl Fixed load of 1-bit monochrome images -# 2003-04-23 fl Added limited support for BI_BITFIELDS compression -# -# Copyright (c) 1997-2003 by Secret Labs AB -# Copyright (c) 1995-2003 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - - -import os - -from . import Image, ImageFile, ImagePalette -from ._binary import i16le as i16 -from ._binary import i32le as i32 -from ._binary import o8 -from ._binary import o16le as o16 -from ._binary import o32le as o32 - -# -# -------------------------------------------------------------------- -# Read BMP file - -BIT2MODE = { - # bits => mode, rawmode - 1: ("P", "P;1"), - 4: ("P", "P;4"), - 8: ("P", "P"), - 16: ("RGB", "BGR;15"), - 24: ("RGB", "BGR"), - 32: ("RGB", "BGRX"), -} - - -def _accept(prefix): - return prefix[:2] == b"BM" - - -def _dib_accept(prefix): - return i32(prefix) in [12, 40, 64, 108, 124] - - -# ============================================================================= -# Image plugin for the Windows BMP format. -# ============================================================================= -class BmpImageFile(ImageFile.ImageFile): - """Image plugin for the Windows Bitmap format (BMP)""" - - # ------------------------------------------------------------- Description - format_description = "Windows Bitmap" - format = "BMP" - - # -------------------------------------------------- BMP Compression values - COMPRESSIONS = {"RAW": 0, "RLE8": 1, "RLE4": 2, "BITFIELDS": 3, "JPEG": 4, "PNG": 5} - for k, v in COMPRESSIONS.items(): - vars()[k] = v - - def _bitmap(self, header=0, offset=0): - """Read relevant info about the BMP""" - read, seek = self.fp.read, self.fp.seek - if header: - seek(header) - # read bmp header size @offset 14 (this is part of the header size) - file_info = {"header_size": i32(read(4)), "direction": -1} - - # -------------------- If requested, read header at a specific position - # read the rest of the bmp header, without its size - header_data = ImageFile._safe_read(self.fp, file_info["header_size"] - 4) - - # -------------------------------------------------- IBM OS/2 Bitmap v1 - # ----- This format has different offsets because of width/height types - if file_info["header_size"] == 12: - file_info["width"] = i16(header_data, 0) - file_info["height"] = i16(header_data, 2) - file_info["planes"] = i16(header_data, 4) - file_info["bits"] = i16(header_data, 6) - file_info["compression"] = self.RAW - file_info["palette_padding"] = 3 - - # --------------------------------------------- Windows Bitmap v2 to v5 - # v3, OS/2 v2, v4, v5 - elif file_info["header_size"] in (40, 64, 108, 124): - file_info["y_flip"] = header_data[7] == 0xFF - file_info["direction"] = 1 if file_info["y_flip"] else -1 - file_info["width"] = i32(header_data, 0) - file_info["height"] = ( - i32(header_data, 4) - if not file_info["y_flip"] - else 2**32 - i32(header_data, 4) - ) - file_info["planes"] = i16(header_data, 8) - file_info["bits"] = i16(header_data, 10) - file_info["compression"] = i32(header_data, 12) - # byte size of pixel data - file_info["data_size"] = i32(header_data, 16) - file_info["pixels_per_meter"] = ( - i32(header_data, 20), - i32(header_data, 24), - ) - file_info["colors"] = i32(header_data, 28) - file_info["palette_padding"] = 4 - self.info["dpi"] = tuple(x / 39.3701 for x in file_info["pixels_per_meter"]) - if file_info["compression"] == self.BITFIELDS: - if len(header_data) >= 52: - for idx, mask in enumerate( - ["r_mask", "g_mask", "b_mask", "a_mask"] - ): - file_info[mask] = i32(header_data, 36 + idx * 4) - else: - # 40 byte headers only have the three components in the - # bitfields masks, ref: - # https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx - # See also - # https://github.com/python-pillow/Pillow/issues/1293 - # There is a 4th component in the RGBQuad, in the alpha - # location, but it is listed as a reserved component, - # and it is not generally an alpha channel - file_info["a_mask"] = 0x0 - for mask in ["r_mask", "g_mask", "b_mask"]: - file_info[mask] = i32(read(4)) - file_info["rgb_mask"] = ( - file_info["r_mask"], - file_info["g_mask"], - file_info["b_mask"], - ) - file_info["rgba_mask"] = ( - file_info["r_mask"], - file_info["g_mask"], - file_info["b_mask"], - file_info["a_mask"], - ) - else: - msg = f"Unsupported BMP header type ({file_info['header_size']})" - raise OSError(msg) - - # ------------------ Special case : header is reported 40, which - # ---------------------- is shorter than real size for bpp >= 16 - self._size = file_info["width"], file_info["height"] - - # ------- If color count was not found in the header, compute from bits - file_info["colors"] = ( - file_info["colors"] - if file_info.get("colors", 0) - else (1 << file_info["bits"]) - ) - if offset == 14 + file_info["header_size"] and file_info["bits"] <= 8: - offset += 4 * file_info["colors"] - - # ---------------------- Check bit depth for unusual unsupported values - self.mode, raw_mode = BIT2MODE.get(file_info["bits"], (None, None)) - if self.mode is None: - msg = f"Unsupported BMP pixel depth ({file_info['bits']})" - raise OSError(msg) - - # ---------------- Process BMP with Bitfields compression (not palette) - decoder_name = "raw" - if file_info["compression"] == self.BITFIELDS: - SUPPORTED = { - 32: [ - (0xFF0000, 0xFF00, 0xFF, 0x0), - (0xFF000000, 0xFF0000, 0xFF00, 0x0), - (0xFF000000, 0xFF0000, 0xFF00, 0xFF), - (0xFF, 0xFF00, 0xFF0000, 0xFF000000), - (0xFF0000, 0xFF00, 0xFF, 0xFF000000), - (0x0, 0x0, 0x0, 0x0), - ], - 24: [(0xFF0000, 0xFF00, 0xFF)], - 16: [(0xF800, 0x7E0, 0x1F), (0x7C00, 0x3E0, 0x1F)], - } - MASK_MODES = { - (32, (0xFF0000, 0xFF00, 0xFF, 0x0)): "BGRX", - (32, (0xFF000000, 0xFF0000, 0xFF00, 0x0)): "XBGR", - (32, (0xFF000000, 0xFF0000, 0xFF00, 0xFF)): "ABGR", - (32, (0xFF, 0xFF00, 0xFF0000, 0xFF000000)): "RGBA", - (32, (0xFF0000, 0xFF00, 0xFF, 0xFF000000)): "BGRA", - (32, (0x0, 0x0, 0x0, 0x0)): "BGRA", - (24, (0xFF0000, 0xFF00, 0xFF)): "BGR", - (16, (0xF800, 0x7E0, 0x1F)): "BGR;16", - (16, (0x7C00, 0x3E0, 0x1F)): "BGR;15", - } - if file_info["bits"] in SUPPORTED: - if ( - file_info["bits"] == 32 - and file_info["rgba_mask"] in SUPPORTED[file_info["bits"]] - ): - raw_mode = MASK_MODES[(file_info["bits"], file_info["rgba_mask"])] - self.mode = "RGBA" if "A" in raw_mode else self.mode - elif ( - file_info["bits"] in (24, 16) - and file_info["rgb_mask"] in SUPPORTED[file_info["bits"]] - ): - raw_mode = MASK_MODES[(file_info["bits"], file_info["rgb_mask"])] - else: - msg = "Unsupported BMP bitfields layout" - raise OSError(msg) - else: - msg = "Unsupported BMP bitfields layout" - raise OSError(msg) - elif file_info["compression"] == self.RAW: - if file_info["bits"] == 32 and header == 22: # 32-bit .cur offset - raw_mode, self.mode = "BGRA", "RGBA" - elif file_info["compression"] in (self.RLE8, self.RLE4): - decoder_name = "bmp_rle" - else: - msg = f"Unsupported BMP compression ({file_info['compression']})" - raise OSError(msg) - - # --------------- Once the header is processed, process the palette/LUT - if self.mode == "P": # Paletted for 1, 4 and 8 bit images - # ---------------------------------------------------- 1-bit images - if not (0 < file_info["colors"] <= 65536): - msg = f"Unsupported BMP Palette size ({file_info['colors']})" - raise OSError(msg) - else: - padding = file_info["palette_padding"] - palette = read(padding * file_info["colors"]) - greyscale = True - indices = ( - (0, 255) - if file_info["colors"] == 2 - else list(range(file_info["colors"])) - ) - - # ----------------- Check if greyscale and ignore palette if so - for ind, val in enumerate(indices): - rgb = palette[ind * padding : ind * padding + 3] - if rgb != o8(val) * 3: - greyscale = False - - # ------- If all colors are grey, white or black, ditch palette - if greyscale: - self.mode = "1" if file_info["colors"] == 2 else "L" - raw_mode = self.mode - else: - self.mode = "P" - self.palette = ImagePalette.raw( - "BGRX" if padding == 4 else "BGR", palette - ) - - # ---------------------------- Finally set the tile data for the plugin - self.info["compression"] = file_info["compression"] - args = [raw_mode] - if decoder_name == "bmp_rle": - args.append(file_info["compression"] == self.RLE4) - else: - args.append(((file_info["width"] * file_info["bits"] + 31) >> 3) & (~3)) - args.append(file_info["direction"]) - self.tile = [ - ( - decoder_name, - (0, 0, file_info["width"], file_info["height"]), - offset or self.fp.tell(), - tuple(args), - ) - ] - - def _open(self): - """Open file, check magic number and read header""" - # read 14 bytes: magic number, filesize, reserved, header final offset - head_data = self.fp.read(14) - # choke if the file does not have the required magic bytes - if not _accept(head_data): - msg = "Not a BMP file" - raise SyntaxError(msg) - # read the start position of the BMP image data (u32) - offset = i32(head_data, 10) - # load bitmap information (offset=raster info) - self._bitmap(offset=offset) - - -class BmpRleDecoder(ImageFile.PyDecoder): - _pulls_fd = True - - def decode(self, buffer): - rle4 = self.args[1] - data = bytearray() - x = 0 - while len(data) < self.state.xsize * self.state.ysize: - pixels = self.fd.read(1) - byte = self.fd.read(1) - if not pixels or not byte: - break - num_pixels = pixels[0] - if num_pixels: - # encoded mode - if x + num_pixels > self.state.xsize: - # Too much data for row - num_pixels = max(0, self.state.xsize - x) - if rle4: - first_pixel = o8(byte[0] >> 4) - second_pixel = o8(byte[0] & 0x0F) - for index in range(num_pixels): - if index % 2 == 0: - data += first_pixel - else: - data += second_pixel - else: - data += byte * num_pixels - x += num_pixels - else: - if byte[0] == 0: - # end of line - while len(data) % self.state.xsize != 0: - data += b"\x00" - x = 0 - elif byte[0] == 1: - # end of bitmap - break - elif byte[0] == 2: - # delta - bytes_read = self.fd.read(2) - if len(bytes_read) < 2: - break - right, up = self.fd.read(2) - data += b"\x00" * (right + up * self.state.xsize) - x = len(data) % self.state.xsize - else: - # absolute mode - if rle4: - # 2 pixels per byte - byte_count = byte[0] // 2 - bytes_read = self.fd.read(byte_count) - for byte_read in bytes_read: - data += o8(byte_read >> 4) - data += o8(byte_read & 0x0F) - else: - byte_count = byte[0] - bytes_read = self.fd.read(byte_count) - data += bytes_read - if len(bytes_read) < byte_count: - break - x += byte[0] - - # align to 16-bit word boundary - if self.fd.tell() % 2 != 0: - self.fd.seek(1, os.SEEK_CUR) - rawmode = "L" if self.mode == "L" else "P" - self.set_as_raw(bytes(data), (rawmode, 0, self.args[-1])) - return -1, 0 - - -# ============================================================================= -# Image plugin for the DIB format (BMP alias) -# ============================================================================= -class DibImageFile(BmpImageFile): - format = "DIB" - format_description = "Windows Bitmap" - - def _open(self): - self._bitmap() - - -# -# -------------------------------------------------------------------- -# Write BMP file - - -SAVE = { - "1": ("1", 1, 2), - "L": ("L", 8, 256), - "P": ("P", 8, 256), - "RGB": ("BGR", 24, 0), - "RGBA": ("BGRA", 32, 0), -} - - -def _dib_save(im, fp, filename): - _save(im, fp, filename, False) - - -def _save(im, fp, filename, bitmap_header=True): - try: - rawmode, bits, colors = SAVE[im.mode] - except KeyError as e: - msg = f"cannot write mode {im.mode} as BMP" - raise OSError(msg) from e - - info = im.encoderinfo - - dpi = info.get("dpi", (96, 96)) - - # 1 meter == 39.3701 inches - ppm = tuple(map(lambda x: int(x * 39.3701 + 0.5), dpi)) - - stride = ((im.size[0] * bits + 7) // 8 + 3) & (~3) - header = 40 # or 64 for OS/2 version 2 - image = stride * im.size[1] - - if im.mode == "1": - palette = b"".join(o8(i) * 4 for i in (0, 255)) - elif im.mode == "L": - palette = b"".join(o8(i) * 4 for i in range(256)) - elif im.mode == "P": - palette = im.im.getpalette("RGB", "BGRX") - colors = len(palette) // 4 - else: - palette = None - - # bitmap header - if bitmap_header: - offset = 14 + header + colors * 4 - file_size = offset + image - if file_size > 2**32 - 1: - msg = "File size is too large for the BMP format" - raise ValueError(msg) - fp.write( - b"BM" # file type (magic) - + o32(file_size) # file size - + o32(0) # reserved - + o32(offset) # image data offset - ) - - # bitmap info header - fp.write( - o32(header) # info header size - + o32(im.size[0]) # width - + o32(im.size[1]) # height - + o16(1) # planes - + o16(bits) # depth - + o32(0) # compression (0=uncompressed) - + o32(image) # size of bitmap - + o32(ppm[0]) # resolution - + o32(ppm[1]) # resolution - + o32(colors) # colors used - + o32(colors) # colors important - ) - - fp.write(b"\0" * (header - 40)) # padding (for OS/2 format) - - if palette: - fp.write(palette) - - ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, stride, -1))]) - - -# -# -------------------------------------------------------------------- -# Registry - - -Image.register_open(BmpImageFile.format, BmpImageFile, _accept) -Image.register_save(BmpImageFile.format, _save) - -Image.register_extension(BmpImageFile.format, ".bmp") - -Image.register_mime(BmpImageFile.format, "image/bmp") - -Image.register_decoder("bmp_rle", BmpRleDecoder) - -Image.register_open(DibImageFile.format, DibImageFile, _dib_accept) -Image.register_save(DibImageFile.format, _dib_save) - -Image.register_extension(DibImageFile.format, ".dib") - -Image.register_mime(DibImageFile.format, "image/bmp") diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/coloredlogs/tests.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/coloredlogs/tests.py deleted file mode 100644 index 650d3281a8584c1e863347643444f55db463edf9..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/coloredlogs/tests.py +++ /dev/null @@ -1,673 +0,0 @@ -# Automated tests for the `coloredlogs' package. -# -# Author: Peter Odding -# Last Change: June 11, 2021 -# URL: https://coloredlogs.readthedocs.io - -"""Automated tests for the `coloredlogs` package.""" - -# Standard library modules. -import contextlib -import logging -import logging.handlers -import os -import re -import subprocess -import sys -import tempfile - -# External dependencies. -from humanfriendly.compat import StringIO -from humanfriendly.terminal import ANSI_COLOR_CODES, ANSI_CSI, ansi_style, ansi_wrap -from humanfriendly.testing import PatchedAttribute, PatchedItem, TestCase, retry -from humanfriendly.text import format, random_string - -# The module we're testing. -import coloredlogs -import coloredlogs.cli -from coloredlogs import ( - CHROOT_FILES, - ColoredFormatter, - NameNormalizer, - decrease_verbosity, - find_defined_levels, - find_handler, - find_hostname, - find_program_name, - find_username, - get_level, - increase_verbosity, - install, - is_verbose, - level_to_number, - match_stream_handler, - parse_encoded_styles, - set_level, - walk_propagation_tree, -) -from coloredlogs.demo import demonstrate_colored_logging -from coloredlogs.syslog import SystemLogging, is_syslog_supported, match_syslog_handler -from coloredlogs.converter import ( - ColoredCronMailer, - EIGHT_COLOR_PALETTE, - capture, - convert, -) - -# External test dependencies. -from capturer import CaptureOutput -from verboselogs import VerboseLogger - -# Compiled regular expression that matches a single line of output produced by -# the default log format (does not include matching of ANSI escape sequences). -PLAIN_TEXT_PATTERN = re.compile(r''' - (?P \d{4}-\d{2}-\d{2} ) - \s (?P