diff --git a/spaces/17TheWord/RealESRGAN/FAQ.md b/spaces/17TheWord/RealESRGAN/FAQ.md
deleted file mode 100644
index caa8c08cfe4302eb8812c823569e8a0be30fa49c..0000000000000000000000000000000000000000
--- a/spaces/17TheWord/RealESRGAN/FAQ.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# FAQ
-
-1. **What is the difference of `--netscale` and `outscale`?**
-
-A: TODO.
-
-1. **How to select models?**
-
-A: TODO.
diff --git a/spaces/17TheWord/RealESRGAN/inference_realesrgan.py b/spaces/17TheWord/RealESRGAN/inference_realesrgan.py
deleted file mode 100644
index 6d5ff4d188faaa16c0131be69a08fd22fb608f80..0000000000000000000000000000000000000000
--- a/spaces/17TheWord/RealESRGAN/inference_realesrgan.py
+++ /dev/null
@@ -1,128 +0,0 @@
-import argparse
-import cv2
-import glob
-import os
-from basicsr.archs.rrdbnet_arch import RRDBNet
-
-from realesrgan import RealESRGANer
-from realesrgan.archs.srvgg_arch import SRVGGNetCompact
-
-
-def main():
- """Inference demo for Real-ESRGAN.
- """
- parser = argparse.ArgumentParser()
- parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder')
- parser.add_argument(
- '-n',
- '--model_name',
- type=str,
- default='RealESRGAN_x4plus',
- help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus'
- 'RealESRGANv2-anime-xsx2 | RealESRGANv2-animevideo-xsx2-nousm | RealESRGANv2-animevideo-xsx2'
- 'RealESRGANv2-anime-xsx4 | RealESRGANv2-animevideo-xsx4-nousm | RealESRGANv2-animevideo-xsx4'))
- parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
- parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
- parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored image')
- parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
- parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
- parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
- parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')
- parser.add_argument('--half', action='store_true', help='Use half precision during inference')
- parser.add_argument(
- '--alpha_upsampler',
- type=str,
- default='realesrgan',
- help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
- parser.add_argument(
- '--ext',
- type=str,
- default='auto',
- help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
- args = parser.parse_args()
-
- # determine models according to model names
- args.model_name = args.model_name.split('.')[0]
- if args.model_name in ['RealESRGAN_x4plus', 'RealESRNet_x4plus']: # x4 RRDBNet model
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
- netscale = 4
- elif args.model_name in ['RealESRGAN_x4plus_anime_6B']: # x4 RRDBNet model with 6 blocks
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
- netscale = 4
- elif args.model_name in ['RealESRGAN_x2plus']: # x2 RRDBNet model
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
- netscale = 2
- elif args.model_name in [
- 'RealESRGANv2-anime-xsx2', 'RealESRGANv2-animevideo-xsx2-nousm', 'RealESRGANv2-animevideo-xsx2'
- ]: # x2 VGG-style model (XS size)
- model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu')
- netscale = 2
- elif args.model_name in [
- 'RealESRGANv2-anime-xsx4', 'RealESRGANv2-animevideo-xsx4-nousm', 'RealESRGANv2-animevideo-xsx4'
- ]: # x4 VGG-style model (XS size)
- model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
- netscale = 4
-
- # determine model paths
- model_path = os.path.join('.', args.model_name + '.pth')
- if not os.path.isfile(model_path):
- model_path = os.path.join('.', args.model_name + '.pth')
- if not os.path.isfile(model_path):
- raise ValueError(f'Model {args.model_name} does not exist.')
-
- # restorer
- upsampler = RealESRGANer(
- scale=netscale,
- model_path=model_path,
- model=model,
- tile=args.tile,
- tile_pad=args.tile_pad,
- pre_pad=args.pre_pad,
- half=args.half)
-
- if args.face_enhance: # Use GFPGAN for face enhancement
- from gfpgan import GFPGANer
- face_enhancer = GFPGANer(
- model_path='https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth',
- upscale=args.outscale,
- arch='clean',
- channel_multiplier=2,
- bg_upsampler=upsampler)
- os.makedirs(args.output, exist_ok=True)
-
- if os.path.isfile(args.input):
- paths = [args.input]
- else:
- paths = sorted(glob.glob(os.path.join(args.input, '*')))
-
- for idx, path in enumerate(paths):
- imgname, extension = os.path.splitext(os.path.basename(path))
- print('Testing', idx, imgname)
-
- img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
- if len(img.shape) == 3 and img.shape[2] == 4:
- img_mode = 'RGBA'
- else:
- img_mode = None
-
- try:
- if args.face_enhance:
- _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
- else:
- output, _ = upsampler.enhance(img, outscale=args.outscale)
- except RuntimeError as error:
- print('Error', error)
- print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
- else:
- if args.ext == 'auto':
- extension = extension[1:]
- else:
- extension = args.ext
- if img_mode == 'RGBA': # RGBA images should be saved in png format
- extension = 'png'
- save_path = os.path.join(args.output, f'{imgname}_{args.suffix}.{extension}')
- cv2.imwrite(save_path, output)
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/FIFA 22 on Mac Two Ways to Experience the Game of Football.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/FIFA 22 on Mac Two Ways to Experience the Game of Football.md
deleted file mode 100644
index cc2e488c4f1fe7059bd0d725e4c573ab0d59f0f8..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/FIFA 22 on Mac Two Ways to Experience the Game of Football.md
+++ /dev/null
@@ -1,50 +0,0 @@
-
-
How to Play FIFA 22 on Mac: A Guide for Football Fans
-FIFA 22 is the latest installment of the popular football simulation game series developed by EA Sports. It features new gameplay innovations, improved graphics, and more realistic animations powered by HyperMotion technology. However, if you are a Mac user, you might be wondering how to play FIFA 22 on your device, since the game is not officially supported by macOS. In this article, we will show you two ways to play FIFA 22 on Mac: using cloud gaming services or installing Windows 10 on your Mac.
-What is cloud gaming?
-Cloud gaming is a technology that allows you to stream games from remote servers to your device via the internet. You don't need to download or install the games on your device, and you don't need to worry about compatibility or hardware requirements. All you need is a stable internet connection and a compatible device, such as a laptop, tablet, smartphone, or smart TV.
-fifa 22 mac DOWNLOAD ►►►►► https://byltly.com/2uKzLJ
-How to play FIFA 22 on Mac using cloud gaming services?
-There are several cloud gaming services that offer FIFA 22 as part of their library. Two of the most popular ones are Boosteroid and Google Stadia. Here are the steps to play FIFA 22 on Mac using these services:
-
-Boosteroid : Boosteroid is a cloud gaming platform that allows you to play PC games on any device with a browser. It supports Windows, Mac OS X, Linux, Android, iOS, and smart TVs. To play FIFA 22 on Mac using Boosteroid, you need to follow these steps:
-
-Create an account on Boosteroid.com and choose a subscription plan.
-Log in to your account and browse the game library.
-Select FIFA 22 and click on the Play button.
-Enjoy playing FIFA 22 on your Mac with high graphics and low latency.
-
-
-Google Stadia : Google Stadia is a cloud gaming service that allows you to play games on various devices using Google Chrome or the Stadia app. It supports Windows, Mac OS X, Linux, Android, iOS, and Chromecast. To play FIFA 22 on Mac using Google Stadia, you need to follow these steps:
-
-Create an account on Stadia.com and choose a subscription plan.
-Log in to your account and browse the game store.
-Purchase FIFA 22 and click on the Play button.
-Enjoy playing FIFA 22 on your Mac with high graphics and low latency.
-
-
-
-What are the advantages and disadvantages of cloud gaming?
-Cloud gaming has some advantages and disadvantages that you should consider before choosing this option. Here are some of them:
-
-Advantages :
-
-You don't need to download or install anything on your device.
-You don't need to worry about compatibility or hardware requirements.
-You can play games on any device with a browser or an app.
-You can access a large library of games with different genres and categories.
-You can enjoy high graphics and low latency with a stable internet connection.
-
-
-Disadvantages :
-
-You need a stable and fast internet connection to play games smoothly.
-You may experience lag or buffering if your internet connection is slow or unstable.
-You may not be able to play games offline or without an internet connection.
-You may not be able to mod or customize your games as much as you want.
-You may need to pay a monthly fee or purchase games separately to access them.
-
-
- ddb901b051
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Cadence Allegro Extracta Exe Downloa.md b/spaces/1gistliPinn/ChatGPT4/Examples/Cadence Allegro Extracta Exe Downloa.md
deleted file mode 100644
index d476010b63525eb743c101671bdfc27ad28df90e..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Cadence Allegro Extracta Exe Downloa.md
+++ /dev/null
@@ -1,6 +0,0 @@
-cadence allegro extracta exe downloa Download ··· https://imgfil.com/2uy1iG
-
-There are 2 possibilities for extracting data from Cadence Allegro (and also latest ... for ODB++ which means that you have to download a utility script from Valor. ... All they need to do is to run the executable CDC2FAB in this file structure. 4d29de3e1b
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/FaceRig Pro V1.312 (Inclu ALL DLC) Cheats Tool Download Free.md b/spaces/1gistliPinn/ChatGPT4/Examples/FaceRig Pro V1.312 (Inclu ALL DLC) Cheats Tool Download Free.md
deleted file mode 100644
index 2613c980d46be2a5baf153a2ead2dd969602603c..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/FaceRig Pro V1.312 (Inclu ALL DLC) Cheats Tool Download Free.md
+++ /dev/null
@@ -1,6 +0,0 @@
-FaceRig Pro v1.312 (Inclu ALL DLC) cheats tool download Download 🔗 https://imgfil.com/2uxXK0
-
-Multimedia tools downloads - PluralEyes for Edius by Singular ... As for functionality, If all MS did was update the map data annually, I'd ... Acura Navigation Hack or Torrent DVD Downloads As an ... to dig FaceRig Pro v1.312 (Inclu Live2D Module & DLCs) TORRENT Cracked Free Download in magnet. 1fdad05405
-
-
-
diff --git a/spaces/1line/AutoGPT/tests/test_config.py b/spaces/1line/AutoGPT/tests/test_config.py
deleted file mode 100644
index b472a24c78edd1f931a76c68e08ed544bbe61d98..0000000000000000000000000000000000000000
--- a/spaces/1line/AutoGPT/tests/test_config.py
+++ /dev/null
@@ -1,84 +0,0 @@
-from unittest import TestCase
-
-from autogpt.config import Config
-
-
-class TestConfig(TestCase):
- """
- Test cases for the Config class, which handles the configuration settings
- for the AI and ensures it behaves as a singleton.
- """
-
- def setUp(self):
- """
- Set up the test environment by creating an instance of the Config class.
- """
- self.config = Config()
-
- def test_singleton(self):
- """
- Test if the Config class behaves as a singleton by ensuring that two instances are the same.
- """
- config2 = Config()
- self.assertIs(self.config, config2)
-
- def test_initial_values(self):
- """
- Test if the initial values of the Config class attributes are set correctly.
- """
- self.assertFalse(self.config.debug_mode)
- self.assertFalse(self.config.continuous_mode)
- self.assertFalse(self.config.speak_mode)
- self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo")
- self.assertEqual(self.config.smart_llm_model, "gpt-4")
- self.assertEqual(self.config.fast_token_limit, 4000)
- self.assertEqual(self.config.smart_token_limit, 8000)
-
- def test_set_continuous_mode(self):
- """
- Test if the set_continuous_mode() method updates the continuous_mode attribute.
- """
- self.config.set_continuous_mode(True)
- self.assertTrue(self.config.continuous_mode)
-
- def test_set_speak_mode(self):
- """
- Test if the set_speak_mode() method updates the speak_mode attribute.
- """
- self.config.set_speak_mode(True)
- self.assertTrue(self.config.speak_mode)
-
- def test_set_fast_llm_model(self):
- """
- Test if the set_fast_llm_model() method updates the fast_llm_model attribute.
- """
- self.config.set_fast_llm_model("gpt-3.5-turbo-test")
- self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo-test")
-
- def test_set_smart_llm_model(self):
- """
- Test if the set_smart_llm_model() method updates the smart_llm_model attribute.
- """
- self.config.set_smart_llm_model("gpt-4-test")
- self.assertEqual(self.config.smart_llm_model, "gpt-4-test")
-
- def test_set_fast_token_limit(self):
- """
- Test if the set_fast_token_limit() method updates the fast_token_limit attribute.
- """
- self.config.set_fast_token_limit(5000)
- self.assertEqual(self.config.fast_token_limit, 5000)
-
- def test_set_smart_token_limit(self):
- """
- Test if the set_smart_token_limit() method updates the smart_token_limit attribute.
- """
- self.config.set_smart_token_limit(9000)
- self.assertEqual(self.config.smart_token_limit, 9000)
-
- def test_set_debug_mode(self):
- """
- Test if the set_debug_mode() method updates the debug_mode attribute.
- """
- self.config.set_debug_mode(True)
- self.assertTrue(self.config.debug_mode)
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Arena of Valor Global and Enjoy Fast and Fun Matches in 15 Minutes or Less.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Arena of Valor Global and Enjoy Fast and Fun Matches in 15 Minutes or Less.md
deleted file mode 100644
index eb49421396472f0e64ea818bd7e56c5b5263e98c..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Arena of Valor Global and Enjoy Fast and Fun Matches in 15 Minutes or Less.md
+++ /dev/null
@@ -1,155 +0,0 @@
-
-How to Download Arena of Valor Global: A Guide for MOBA Fans
-If you are a fan of multiplayer online battle arena (MOBA) games, you might have heard of Arena of Valor , an epic 5v5 MOBA game developed by TiMi Studio Group and brought to you by Level Infinite. In this game, you can choose from over 100 unique heroes, team up with your friends, and compete in various modes and maps. Whether you prefer classic 5v5 combat, fast-paced 3v3 action, or solo adventure, there is something for everyone in Arena of Valor.
-But did you know that there is a global version of this game that is available in more than 140 countries and regions? That's right, Arena of Valor Global is the ultimate version of this game that lets you play with players from all over the world, enjoy exclusive content and events, and experience the best performance and graphics. If you want to join the millions of players who are already enjoying this game, you might be wondering how to download it. Don't worry, we've got you covered. In this article, we will show you how to download Arena of Valor Global for your Android, iOS, or PC device. We will also share some tips and tricks for playing this game like a pro. So, without further ado, let's get started!
-download arena of valor global Download Zip ⚡ https://urlin.us/2uSRVn
- What is Arena of Valor Global?
-Arena of Valor Global is a real-time 5v5 MOBA game that offers a variety of features, modes, and heroes for you to enjoy. Here are some of the highlights of this game:
-
-Fast & Fun Matches: You can select a game mode, find opponents, and compete in intense battles that can be completed in 15 minutes or less.
-Fight With Your Friends: You can team up with your friends, create a guild, and master over 100 unique heroes from internationally acclaimed franchises.
-Battle For Top Ranking: You can master your heroes, unleash their powers, and defeat your enemies in ranked matches and climb the leaderboards.
-Enjoy Exclusive Content & Events: You can access exclusive heroes, skins, game modes, and events that are only available in Arena of Valor Global.
-Experience High-Quality Performance & Graphics: You can play the game with smooth controls, stunning visuals, and immersive sound effects that will make you feel like you are in the middle of the action.
-
-As you can see, Arena of Valor Global is a game that has something for everyone. Whether you are a casual player or a hardcore gamer, you will find yourself hooked to this game in no time. But before you can start playing, you need to download the game first. Let's see how you can do that for your device.
- How to Download Arena of Valor Global for Android Devices
-If you have an Android device, such as a smartphone or a tablet, you can download Arena of Valor Global from the Google Play Store. Here are the steps you need to follow:
-
-Open the Google Play Store app on your device.
-Search for "Arena of Valor Global" in the search bar.
-Tap on the game icon that appears in the results.
-Tap on the "Install" button and wait for the game to download and install on your device.
-Once the installation is complete, tap on the "Open" button to launch the game and start playing.
-
-That's it! You have successfully downloaded Arena of Valor Global for your Android device. You can now enjoy the game and join millions of players from around the world. But what if you don't have access to the Google Play Store or you want to download the game from another source? Don't worry, there is another way to download the game using an APK file.
- How to Download Arena of Valor Global APK File
-An APK file is a file format that contains all the data and code needed to install an Android app on your device. You can download an APK file from various websites that offer them, such as APKPure, APKMirror, or Uptodown. However, you need to be careful when downloading APK files from unknown sources, as they may contain malware or viruses that can harm your device. To avoid this, you should always scan the APK file with an antivirus app before installing it. Here are the steps you need to follow to download Arena of Valor Global APK file:
-
-Go to a website that offers Arena of Valor Global APK file, such as APKPure .
-Search for "Arena of Valor Global" in the search bar.
-Select the game icon that appears in the results.
-Tap on the "Download APK" button and wait for the file to download on your device.
-Once the download is complete, locate the file in your device's storage and tap on it to install it.
-If you see a warning message that says "For your security, your phone is not allowed to install unknown apps from this source", tap on "Settings" and enable the option that says "Allow from this source".
-Go back to the installation screen and tap on "Install" to proceed with the installation.
-Once the installation is complete, tap on "Open" to launch the game and start playing.
-
-Congratulations! You have successfully downloaded Arena of Valor Global APK file and installed it on your device. You can now enjoy the game and join millions of players from around the world. But what if you have an iOS device instead of an Android device? Don't worry, we have a solution for that too. Let's see how you can download Arena of Valor Global for your iOS device.
- How to Download Arena of Valor Global for iOS Devices
-If you have an iOS device, such as an iPhone or an iPad, you can download Arena of Valor Global from the App Store. Here are the steps you need to follow:
-How to download arena of valor global on android
-Arena of valor global apk download latest version
-Download arena of valor global for pc windows 10
-Arena of valor global tier list 2023
-Best heroes in arena of valor global
-Arena of valor global update patch notes
-Arena of valor global vs mobile legends
-Arena of valor global server status
-Arena of valor global discord server
-Arena of valor global reddit community
-Arena of valor global gameplay tips and tricks
-Arena of valor global review and rating
-Arena of valor global download size and requirements
-Arena of valor global support and customer service
-Arena of valor global free redeem codes 2023
-Arena of valor global events and rewards
-Arena of valor global skins and costumes
-Arena of valor global characters and abilities
-Arena of valor global guides and tutorials
-Arena of valor global news and updates
-Arena of valor global esports and tournaments
-Arena of valor global live stream and videos
-Arena of valor global memes and jokes
-Arena of valor global fan art and wallpapers
-Arena of valor global merchandise and products
-How to download arena of valor global on ios
-Arena of valor global ios app store link
-Download arena of valor global for mac os x
-Arena of valor global crossplay and cross platform
-How to play arena of valor global with friends
-How to join a guild in arena of valor global
-How to rank up in arena of valor global
-How to get more gold and gems in arena of valor global
-How to unlock more heroes in arena of valor global
-How to master a hero in arena of valor global
-How to counter a hero in arena of valor global
-How to build a hero in arena of valor global
-How to report a player in arena of valor global
-How to change your name in arena of valor global
-How to change your region in arena of valor global
-How to contact level infinite in arena of valor global
-How to delete your account in arena of valor global
-How to reinstall arena of valor global without losing data
-How to fix lag and connection issues in arena of valor global
-How to enable voice chat in arena of valor global
-How to mute a player in arena of valor global
-How to customize your controls in arena of valor global
-How to switch between modes in arena of valor global
-How to watch replays in arena of valor global
-
-Open the App Store app on your device.
-Search for "Arena of Valor Global" in the search bar.
-Tap on the game icon that appears in the results.
-Tap on the "Get" button and wait for the game to download and install on your device.
-Once the installation is complete, tap on the game icon on your home screen to launch the game and start playing.
-
-That's it! You have successfully downloaded Arena of Valor Global for your iOS device. You can now enjoy the game and join millions of players from around the world. But what if you don't have access to the App Store or you want to download the game from another source? Don't worry, there is another way to download the game using an IPA file.
- How to Download Arena of Valor Global IPA File
-An IPA file is a file format that contains all the data and code needed to install an iOS app on your device. You can download an IPA file from various websites that offer them, such as Panda Helper, AppValley, or TweakBox. However, you need to be careful when downloading IPA files from unknown sources, as they may contain malware or viruses that can harm your device. To avoid this, you should always scan the IPA file with an antivirus app before installing it. Here are the steps you need to follow to download Arena of Valor Global IPA file:
-
-Go to a website that offers Arena of Valor Global IPA file, such as Panda Helper .
-Search for "Arena of Valor Global" in the search bar.
-Select the game icon that appears in the results.
-Tap on the "Download" button and wait for the file to download on your device.
-Once the download is complete, locate the file in your device's storage and tap on it to install it.
-If you see a warning message that says "Untrusted Enterprise Developer", tap on "Cancel" and go to your device's settings.
-Go to General > Profiles & Device Management and find the profile that belongs to the app you just installed.
-Tap on the profile and then tap on "Trust" to allow the app to run on your device.
-Go back to your home screen and tap on the game icon to launch the game and start playing.
-
-Congratulations! You have successfully downloaded Arena of Valor Global IPA file and installed it on your device. You can now enjoy the game and join millions of players from around the world. But what if you want to play the game on a bigger screen and with better controls? Don't worry, we have a solution for that too. Let's see how you can download Arena of Valor Global for your PC.
- How to Download Arena of Valor Global for PC
-If you want to play Arena of Valor Global on your PC, you will need an emulator. An emulator is a software that allows you to run mobile apps on your computer. There are many emulators available for playing mobile games on PC, but one of the best ones is BlueStacks. BlueStacks is a free and powerful emulator that offers high-quality performance and graphics, easy controls, and a wide range of features. Here are the steps you need to follow to download Arena of Valor Global for PC using BlueStacks emulator:
- How to Download and Install BlueStacks Emulator
-
-Go to BlueStacks official website and click on the "Download BlueStacks" button.
-Wait for the file to download on your PC and then double-click on it to run it.
-Follow the instructions on the screen to install BlueStacks emulator on your PC.
-Once the installation is complete, launch BlueStacks emulator from your desktop or start menu.
-
- How to Download and Install Arena of Valor Global on BlueStacks Emulator
-
-In BlueStacks emulator, go to Google Play Store and sign in with your Google account.
-Search for "Arena of Valor Global" in the search bar.
-Select the game icon that appears in the results.
-Click on the "Install" button and wait for the game to download and install on BlueStacks emulator.
-Once the installation is complete, click on the game icon on the home screen of BlueStacks emulator to launch the game and start playing.
-
-Congratulations! You have successfully downloaded Arena of Valor Global for PC using BlueStacks emulator. You can now enjoy the game on a bigger screen and with better controls. You can also customize your keyboard and mouse settings, record your gameplay, and stream your matches to your friends and fans. But before you jump into the game, you might want to learn some tips and tricks for playing Arena of Valor Global like a pro. Let's see what they are.
- Tips and Tricks for Playing Arena of Valor Global
-Arena of Valor Global is a game that requires skill, strategy, and teamwork. If you want to improve your gameplay and win more matches, you need to master some tips and tricks that will give you an edge over your opponents. Here are some of them:
- Choose Your Role and Hero Wisely
-In Arena of Valor Global, there are five main roles that you can choose from: Tank, Warrior, Assassin, Mage, and Support. Each role has its own strengths, weaknesses, and responsibilities in the game. You should choose a role that suits your playstyle and preference, and then select a hero that fits that role. For example, if you like to initiate fights and protect your teammates, you should choose a Tank role and a hero like Maloch or Thane. If you like to deal massive damage and eliminate enemies quickly, you should choose an Assassin role and a hero like Quillen or Butterfly. You should also consider your team composition and the enemy team composition when choosing your role and hero. You should try to balance your team with different roles and heroes that can complement each other and counter the enemy team.
- Communicate and Coordinate with Your Teammates
-Arena of Valor Global is a team-based game that requires communication and coordination with your teammates. You should use the chat and ping system to communicate with your teammates effectively. You can use the chat to type messages or use voice chat to talk to your teammates. You can also use the ping system to send signals to your teammates, such as "Attack", "Retreat", "Gather", or "Enemy Missing". You should communicate with your teammates about your strategy, objectives, enemy movements, item builds, cooldowns, and other important information. You should also listen to your teammates' suggestions and feedback, and cooperate with them in fights and objectives. By communicating and coordinating with your teammates, you can increase your chances of winning the game.
- Learn from the Pros and Watch Live Streams
-Arena of Valor Global is a game that has a competitive scene with professional players and teams from around the world. If you want to learn from the pros and watch live streams of their matches, you can do so in the game itself. You can go to the "Watch" tab in the game menu and select from various live streams of professional players and teams. You can also watch replays of previous matches or highlights of epic moments. By watching live streams of pros, you can learn from their strategies, techniques, item builds, hero choices, map awareness, positioning, teamwork, and more. You can also interact with them through chat or send them gifts to show your support. By learning from the pros and watching live streams, you can improve your gameplay and skills in Arena of Valor Global.
- Conclusion
-Arena of Valor Global is an epic 5v5 MOBA game that offers a variety of features, modes, and heroes for you to enjoy. Whether you have an Android device, an iOS device, or a PC device, you can download this game easily using our guide above. You can also use our tips and tricks to play this game like a pro and win more matches. Arena of Valor Global is a game that will keep you entertained and challenged for hours. So, what are you waiting for? Download Arena of Valor Global today and join the global community of MOBA fans. You won't regret it!
- FAQs
-Here are some frequently asked questions and answers about Arena of Valor Global:
-
-Q: How much space does Arena of Valor Global take on my device?
-A: Arena of Valor Global takes about 1.5 GB of space on your device. However, this may vary depending on your device model and the updates you download.
-Q: How can I update Arena of Valor Global to the latest version?
-A: You can update Arena of Valor Global to the latest version by going to the Google Play Store or the App Store and tapping on the "Update" button. Alternatively, you can download the latest APK or IPA file from the websites we mentioned above and install it on your device.
-Q: How can I change the language of Arena of Valor Global?
-A: You can change the language of Arena of Valor Global by going to the game settings and tapping on the "Language" option. You can choose from various languages, such as English, Spanish, French, German, Portuguese, Russian, Turkish, Arabic, Thai, Indonesian, Vietnamese, and more.
-Q: How can I contact the customer service of Arena of Valor Global?
-A: You can contact the customer service of Arena of Valor Global by going to the game settings and tapping on the "Customer Service" option. You can then choose from various options, such as FAQ, Feedback, Report a Problem, or Live Chat. You can also email them at support@arenaofvalor.com .
-Q: How can I get more gold and gems in Arena of Valor Global?
-A: You can get more gold and gems in Arena of Valor Global by playing the game regularly, completing quests and achievements, participating in events and activities, joining a guild, watching ads, or purchasing them with real money.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Chess Board Offline 2 Player - A Simple and Fun Chess App for Everyone.md b/spaces/1phancelerku/anime-remove-background/Chess Board Offline 2 Player - A Simple and Fun Chess App for Everyone.md
deleted file mode 100644
index e98b396d4f990512a3ca0cc227a5a142d0ad0001..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Chess Board Offline 2 Player - A Simple and Fun Chess App for Everyone.md
+++ /dev/null
@@ -1,110 +0,0 @@
-
-Chess Board Offline 2 Player APK: A Free and Fun Way to Play Chess with a Friend
- Chess is one of the oldest and most popular board games in the world. It is a game of strategy, logic, and skill that can challenge your mind and entertain you for hours. But what if you want to play chess with a friend without internet connection or creating an account? What if you want to play chess on your phone or tablet without installing multiple apps? What if you want to save and share your games with other chess enthusiasts?
- If you are looking for a simple and convenient way to play chess with a friend on one screen and completely offline, then you should try Chess Board Offline 2 Player APK. This is a free app that lets you play chess on a virtual board with a friend or by yourself. You can also use a chess clock, create custom setups, save unlimited games, and export them in PGN format. In this article, we will tell you more about this app and how to download and install it on your device.
-chess board offline 2 player apk Download Zip ★ https://jinyurl.com/2uNO1X
- What is Chess Board Offline 2 Player APK?
- A virtual chess board for two players
- Chess Board Offline 2 Player APK is an app that simulates a real chess board on your screen. You can play chess with a friend by taking turns on the same device. You can also play by yourself against an imaginary opponent or practice different moves and scenarios. The app has a standard 8x8 board with all the pieces and rules of chess. You can move the pieces by dragging them or tapping them.
- A free and offline app
- One of the best features of Chess Board Offline 2 Player APK is that it is completely free and offline. You don't need to pay anything to download or use the app. You also don't need to be online or create an account to play chess. You can play anytime and anywhere without worrying about internet connection or data usage. You can also enjoy the app without any ads or in-app purchases.
- A simple and user-friendly interface
- Another great feature of Chess Board Offline 2 Player APK is that it has a simple and user-friendly interface. The app has a minimalist design that focuses on the chess board and the pieces. The app also has easy-to-use controls and settings that let you customize your game. You can choose between different board colors, piece styles, sound effects, and languages. You can also enable or disable hints, undo moves, flip board, rotate screen, and more.
- Why Should You Download Chess Board Offline 2 Player APK?
- To enjoy chess without internet or accounts
- If you love chess but don't have access to internet or don't want to create an account on other apps, then Chess Board Offline 2 Player APK is perfect for you. You can play chess with a friend on one screen without any hassle or interruption. You can also play by yourself without any pressure or competition. You can have fun and relax with this app.
- To practice chess openings and strategies
- If you want to improve your chess skills or learn new chess openings and strategies, then Chess Board Offline 2 Player APK can help you. You can use the app to practice different moves and scenarios on the board. You can also create custom setups and test your skills. The app has a hint feature that can suggest the best move for you. You can also undo your moves and try different options. The app can help you learn from your mistakes and improve your chess game.
- To save and export your games in PGN format
- If you want to save and share your chess games with other chess enthusiasts, then Chess Board Offline 2 Player APK can help you. The app allows you to save unlimited games on your device. You can also export your games in PGN format, which is a standard format for chess games. You can use PGN files to view, analyze, or replay your games on other apps or websites. You can also share your PGN files with your friends or online communities.
- How to Download and Install Chess Board Offline 2 Player APK?
- Step 1: Go to the official website or Google Play Store
- The easiest way to download Chess Board Offline 2 Player APK is to go to the official website of the app or the Google Play Store. You can use the following links to access them:
-chess board offline 2 player apk download
-chess board offline 2 player apk free
-chess board offline 2 player apk mod
-chess board offline 2 player apk android
-chess board offline 2 player apk latest version
-chess board offline 2 player apk for pc
-chess board offline 2 player apk no ads
-chess board offline 2 player apk full
-chess board offline 2 player apk premium
-chess board offline 2 player apk pro
-chess board offline 2 player apk best
-chess board offline 2 player apk review
-chess board offline 2 player apk online
-chess board offline 2 player apk multiplayer
-chess board offline 2 player apk with friends
-chess board offline 2 player apk without internet
-chess board offline 2 player apk unlimited coins
-chess board offline 2 player apk hack
-chess board offline 2 player apk cheat
-chess board offline 2 player apk cracked
-chess board offline 2 player apk unlocked
-chess board offline 2 player apk update
-chess board offline 2 player apk new features
-chess board offline 2 player apk bug fixes
-chess board offline 2 player apk improvements
-chess board offline 2 player apk tips and tricks
-chess board offline 2 player apk tutorial
-chess board offline 2 player apk guide
-chess board offline 2 player apk how to play
-chess board offline 2 player apk rules and regulations
-chess board offline 2 player apk game modes
-chess board offline 2 player apk difficulty levels
-chess board offline 2 player apk themes and skins
-chess board offline 2 player apk sound and music
-chess board offline 2 player apk graphics and animation
-chess board offline 2 player apk performance and optimization
-chess board offline 2 player apk compatibility and requirements
-chess board offline 2 player apk installation and setup
-chess board offline 2 player apk feedback and support
-chess board offline 2 player apk rating and reviews
-
- You can also scan the QR codes below to download the app:
-
-
-Official website
-Google Play Store
-
-
-
-
-
-
- Step 2: Click on the download button or install button
- Once you are on the official website or the Google Play Store, you will see a download button or an install button. Click on it to start downloading the app. The app is about 5 MB in size, so it should not take long to download.
- Step 3: Allow unknown sources if prompted
- If you are downloading the app from the official website, you may need to allow unknown sources on your device. This is because the app is not from the Google Play Store and may not be verified by Google. To allow unknown sources, follow these steps:
-
-Go to your device settings and look for security or privacy options.
-Find the option that says unknown sources or install unknown apps and enable it.
-You may see a warning message that says installing apps from unknown sources may harm your device. Tap on OK or Allow to proceed.
-
- If you are downloading the app from the Google Play Store, you don't need to do this step.
- Step 4: Open the app and start playing
- Once the app is downloaded and installed, you can open it and start playing chess with a friend or by yourself. You will see a welcome screen that shows you how to use the app and its features. You can also access the settings menu to customize your game. Enjoy playing chess with Chess Board Offline 2 Player APK!
- Conclusion
- Chess Board Offline 2 Player APK is a free and fun way to play chess with a friend on one screen and completely offline. You can also play by yourself and practice different moves and scenarios. The app has a simple and user-friendly interface that lets you customize your game. You can also save and export your games in PGN format and share them with other chess enthusiasts. If you love chess and want to play it anytime and anywhere without internet or accounts, then you should download Chess Board Offline 2 Player APK today!
- FAQs
- Q: Is Chess Board Offline 2 Player APK safe?
-A: Yes, Chess Board Offline 2 Player APK is safe to download and use. The app does not require any permissions or access to your device data. The app also does not contain any ads or in-app purchases that may harm your device or privacy.
- Q: Can I play chess online with Chess Board Offline 2 Player APK?
-A: No, Chess Board Offline 2 Player APK is an offline app that does not support online play. You can only play chess with a friend on one screen or by yourself against an imaginary opponent. If you want to play chess online with other players, you will need to use a different app that supports online play.
- Q: Can I play chess with different difficulty levels with Chess Board Offline 2 Player APK?
-A: No, Chess Board Offline 2 Player APK does not have different difficulty levels or artificial intelligence. The app is designed for playing chess with a friend or by yourself. You can adjust the level of challenge by choosing your opponent or creating custom setups. If you want to play chess with different difficulty levels or artificial intelligence, you will need to use a different app that has these features.
- Q: Can I play chess with different variants or rules with Chess Board Offline 2 Player APK?
-A: No, Chess Board Offline 2 Player APK only supports the standard chess rules and variants. The app does not have options for changing the board size, the number of pieces, the movement of pieces, or the game objectives. The app follows the official rules of chess as defined by the World Chess Federation (FIDE). If you want to play chess with different variants or rules, you will need to use a different app that has these options.
- Q: Can I play chess with other apps or devices with Chess Board Offline 2 Player APK?
-A: Yes, you can play chess with other apps or devices with Chess Board Offline 2 Player APK. The app allows you to export your games in PGN format, which is a standard format for chess games. You can use PGN files to view, analyze, or replay your games on other apps or devices that support PGN files. You can also share your PGN files with your friends or online communities that use PGN files.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Incredibox Now and Join the Merry Crew of Beatboxers on Your Android Device.md b/spaces/1phancelerku/anime-remove-background/Download Incredibox Now and Join the Merry Crew of Beatboxers on Your Android Device.md
deleted file mode 100644
index a297684bb1c976f3daf7d3265bbe697eadb566fb..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Incredibox Now and Join the Merry Crew of Beatboxers on Your Android Device.md
+++ /dev/null
@@ -1,131 +0,0 @@
-
-Incredibox Free Download Android: How to Create Your Own Music with a Merry Crew of Beatboxers
- Do you love music and want to create your own songs with a simple and fun app? Do you want to explore different musical genres and mix them together to create unique sounds? Do you want to share your creations with the world and get feedback from other users? If you answered yes to any of these questions, then you should try Incredibox , a music app that lets you create your own music with the help of a merry crew of beatboxers.
- In this article, we will tell you everything you need to know about Incredibox, how to download it for free on your Android device, how to play it, and why you should play it. Let's get started!
-incredibox free download android DOWNLOAD » https://jinyurl.com/2uNLA5
- What is Incredibox?
- Incredibox is a music app that was created in 2009 by the French company So Far So Good. It is a combination of a game, a tool, and an educational resource that introduces kids and adults to notions of rhythm and melody in a fun and entertaining way.
- A fun, interactive music experience
- Incredibox is a music app that lets you create your own music with the help of a merry crew of beatboxers. You can choose from 9 musical styles among 8 impressive atmospheres and start to lay down, record, and share your mix. You can also find the right sound combos to unlock animated choruses that will enhance your tune. You can save your mix and get a link to share it with anybody so they can listen and vote for it. If your mix gets enough votes from other users, you may join the Top 50 chart and become a legend.
- A music app with 9 musical styles and 8 characters
- Incredibox features 9 musical styles that cover a wide range of genres, such as hip-hop, rock, funk, jazz, techno, electro-pop, samba, trap, and Bollywood. Each style has its own atmosphere, graphics, animation, and sound samples. You can switch between styles anytime you want and create your own combinations.
- Incredibox also features 8 characters that represent different types of sounds, such as beats, effects, melodies, voices, choruses, percussions, basses, and bonuses. Each character has its own personality and appearance. You can drag and drop icons onto the characters to make them sing and start to compose your own music. You can also customize the characters by changing their outfits and accessories.
- A game, a tool, and an educational resource
- Incredibox is not only a music app but also a game, a tool, and an educational resource. As a game, Incredibox challenges you to create the best mix possible by finding the right sound combos and unlocking animated choruses. You can also compete with other users by sharing your mix and getting votes from them. As a tool, Incredibox allows you to express your creativity and musical talent by creating your own songs with simple drag-and-drop actions. You can also download your mixes as MP3 files and listen to them anytime you want. As an educational resource, Incredibox introduces you to the basics of musical creation by teaching you about rhythm and melody in an interactive way. You can also learn about different musical genres and cultures by exploring the different styles and atmospheres.
How to play Incredibox on Android devices?
- Now that you have downloaded Incredibox on your Android device, you may wonder how to play it and have fun with it. Incredibox is a very easy and intuitive app that anyone can use, regardless of their age or musical skills. Here are some steps and tips to help you play Incredibox on your Android device.
- The basic gameplay
- The basic gameplay of Incredibox is very simple and straightforward. You just need to follow these steps:
-
-Open the app and choose a musical style from the 9 available ones. You can swipe left or right to see all the options.
-Tap on the play button to start the music and see the 8 characters on the screen. Each character represents a type of sound, such as beats, effects, melodies, voices, choruses, percussions, basses, and bonuses.
-Drag and drop icons from the bottom of the screen onto the characters to make them sing and create your own mix. You can use up to 20 icons at a time, and you can change them anytime you want.
-Find the right sound combos to unlock animated choruses that will enhance your mix. You can see the progress of the combos on the top of the screen.
-Tap on the record button to record your mix and save it on your device. You can also share it with other users by tapping on the share button.
-
- The advanced features
- Incredibox also has some advanced features that you can use to make your mix more interesting and unique. Here are some of them:
-incredibox app free download for android
-incredibox apk free download latest version
-incredibox mod apk free download android
-incredibox v8 dystopia free download android
-incredibox v9 wekiddy free download android
-incredibox v7 jeevan free download android
-incredibox v6 alive free download android
-incredibox v5 brazil free download android
-incredibox v4 the love free download android
-incredibox v3 sunrise free download android
-incredibox v2 little miss free download android
-incredibox v1 alpha free download android
-incredibox 0.5.1 apk free download android
-incredibox 0.4.9 apk free download android
-incredibox 0.4.8 apk free download android
-incredibox 0.4.7 apk free download android
-incredibox 0.4.6 apk free download android
-incredibox 0.4.5 apk free download android
-incredibox 0.4.4 apk free download android
-incredibox 0.4.3 apk free download android
-how to get incredibox for free on android
-how to play incredibox offline on android
-how to install incredibox on android
-how to update incredibox on android
-how to record incredibox on android
-how to share incredibox on android
-how to delete incredibox on android
-how to use dark mode in incredibox on android
-how to disable sharing in incredibox on android for kids
-how to access all versions of incredibox on android app
-best music creation app for android like incredibox
-best alternatives to incredibox for android users
-best tips and tricks for playing incredibox on android devices
-best reviews and ratings for incredibox app on android store
-best songs and mixes made with incredibox app on android phone
-best features and benefits of downloading incredibox app on android tablet
-best deals and discounts for buying incredibox app on android market
-best ways and methods to learn music with incredibox app on android online
-best resources and guides for using incredibox app on android offline
-best fun and entertainment with incredibox app on android for adults and kids alike
-
-You can mute or solo any character by tapping on them. This will allow you to focus on a specific sound or create different variations of your mix.
-You can shuffle your mix by tapping on the shuffle button. This will randomly change the icons on the characters and create a new mix.
-You can customize your characters by tapping on the customize button. This will let you change their outfits and accessories according to your taste and style.
-You can access the bonus mode by tapping on the bonus button. This will let you play with special sound effects that are not available in the normal mode.
-
- The tips and tricks
- If you want to improve your skills and enjoy Incredibox more, here are some tips and tricks that you can follow:
-
-Experiment with different musical styles and sound combinations. You may discover new sounds and genres that you like or that inspire you.
-Listen to other users' mixes and vote for them. You may learn from their techniques and ideas, and also get feedback from them for your own mixes.
-Try to complete all the combos and unlock all the choruses. This will challenge your creativity and musical sense, and also reward you with amazing animations and sounds.
-Have fun and express yourself. Incredibox is a music app that lets you create your own music with no rules or limitations. You can be as creative and original as you want, and share your emotions and feelings through music.
- Why should you play Incredibox on Android devices?
- By now, you may have a clear idea of what Incredibox is and how to play it on your Android device. But you may still wonder why you should play it and what benefits it can bring you. Here are some reasons why you should play Incredibox on your Android device.
- The benefits of playing Incredibox
- Incredibox is not just a music app, but also a game, a tool, and an educational resource that can offer you many benefits, such as:
-
-It can stimulate your creativity and musical talent by letting you create your own songs with simple drag-and-drop actions.
-It can improve your musical knowledge and skills by introducing you to different musical genres and cultures, and teaching you about rhythm and melody.
-It can enhance your mood and well-being by providing you with a fun and entertaining experience that can make you laugh, smile, and relax.
-It can boost your confidence and self-expression by allowing you to share your creations with the world and get feedback from other users.
-It can foster your social interaction and communication by enabling you to connect with other users who share your passion for music and Incredibox.
-
- The reviews and ratings of Incredibox
- If you are still not convinced by the benefits of playing Incredibox, you may want to check out the reviews and ratings of Incredibox from other users who have tried it. Incredibox has received overwhelmingly positive feedback from its users, who have praised its originality, simplicity, quality, and fun factor. Here are some examples of what users have said about Incredibox:
-
-"This is the best app ever! I love making music with this app. It's so easy and fun. The graphics are amazing and the sounds are awesome. I recommend this app to everyone who loves music."
-
-
-"Incredibox is a masterpiece. It's not just a game, it's an art. It's a way to express yourself through music. It's a way to learn about different musical styles and cultures. It's a way to have fun and relax."
-
-
-"I'm addicted to this app. I can't stop playing it. It's so cool and creative. I love how I can mix different sounds and create my own songs. I also love how I can share my mixes with other people and listen to theirs."
-
- Incredibox has also received high ratings from its users, who have given it an average of 4.8 out of 5 stars on the Google Play Store. This shows that Incredibox is a highly rated and popular app that many users enjoy and appreciate.
- The alternatives to Incredibox
- If you are looking for some alternatives to Incredibox, you may want to try some other music apps that are similar or related to Incredibox. Here are some of them:
-
-Groovepad : A music app that lets you create your own beats and music tracks with various sound effects, loops, samples, and genres.
-Beat Snap : A music app that lets you make your own music with drums, synths, vocals, FX, and more.
-Music Maker Jam : A music app that lets you create your own songs with thousands of studio-quality loops, beats, and samples.
-DJ Loop Pads : A music app that lets you remix your favorite songs or create your own music with various pads, loops, FX, and more.
-BandLab : A music app that lets you record, edit, mix, and share your own music with millions of creators and fans.
-
- Conclusion
- In conclusion, Incredibox is a music app that lets you create your own music with the help of a merry crew of beatboxers. You can choose from 9 musical styles among 8 impressive atmospheres and start to lay down, record, and share your mix. You can also find the right sound combos to unlock animated choruses that will enhance your tune.
- In this article, we have told you everything you need to know about Incredibox, how to download it for free on your Android device, how to play it, and why you should play it. We hope that this article has been helpful and informative for you, and that you have enjoyed reading it as much as we have enjoyed writing it
Now that you have reached the end of the article, you may have some questions or doubts about Incredibox or anything related to it. To help you with that, we have prepared a list of 5 frequently asked questions (FAQs) that may answer some of your queries. Here they are:
- FAQs
-
-Is Incredibox safe for kids?
-Yes, Incredibox is safe for kids, as it does not contain any inappropriate or harmful content. It is also suitable for kids, as it is easy to use, fun to play, and educational to learn. In fact, Incredibox is often used by teachers and parents as a way to introduce kids to music and creativity.
-Is Incredibox available for other devices?
-Yes, Incredibox is available for other devices besides Android devices. You can also play Incredibox on iOS devices, such as iPhones and iPads, by downloading it from the App Store. You can also play Incredibox on your web browser, such as Chrome, Firefox, or Safari, by visiting the official website of Incredibox.
-How can I contact the developer of Incredibox?
-If you want to contact the developer of Incredibox, you can do so by visiting their website and filling out the contact form. You can also follow them on their social media accounts, such as Facebook, Twitter, Instagram, and YouTube. You can also send them an email at contact@incredibox.com.
-How can I support the developer of Incredibox?
-If you want to support the developer of Incredibox, you can do so by buying the app from the app store, leaving a positive review and rating for the app, sharing your mixes with other users and friends, and following their social media accounts. You can also donate to them via PayPal or Patreon.
-How can I learn more about Incredibox?
-If you want to learn more about Incredibox, you can do so by visiting their website and reading their blog posts and news articles. You can also watch their videos and tutorials on their YouTube channel. You can also join their community and forum on their website and interact with other users and fans.
-
- We hope that these FAQs have been useful and informative for you. If you have any other questions or comments about Incredibox or this article, please feel free to leave them below. We would love to hear from you and help you out.
- Thank you for reading this article and playing Incredibox. We hope that you have enjoyed it as much as we have enjoyed writing it and creating music with it. Have a great day and keep on making music!
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Experience Nintendo GameCube and Wii Games on Xbox with Dolphin Emulator A Step-by-Step Guide.md b/spaces/1phancelerku/anime-remove-background/Experience Nintendo GameCube and Wii Games on Xbox with Dolphin Emulator A Step-by-Step Guide.md
deleted file mode 100644
index bfbeeb6ce59954e5188aa38d3825570398fe4352..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Experience Nintendo GameCube and Wii Games on Xbox with Dolphin Emulator A Step-by-Step Guide.md
+++ /dev/null
@@ -1,119 +0,0 @@
-
-How to Download Dolphin Emulator on Xbox
-If you are a fan of Nintendo GameCube and Wii games, you might be wondering if there is a way to play them on your Xbox console. The answer is yes, thanks to a powerful emulator called Dolphin. Dolphin is a software that can run GameCube and Wii games on various platforms, including Windows, Linux, Android, and even Xbox. In this article, we will show you how to download and install Dolphin Emulator on your Xbox Series X/S or Xbox One, and how to configure it for the best performance and compatibility. We will also share some tips and tricks to enhance your gaming experience with Dolphin Emulator on Xbox.
- Requirements for Dolphin Emulator on Xbox
-Before you start, you will need the following things:
-how to download dolphin emulator on xbox Download ››››› https://jinyurl.com/2uNOB2
-
-An Xbox Series X/S or Xbox One console with enough storage space.
-A USB drive formatted as NTFS with at least 4 GB of free space.
-A PC with internet access and a web browser.
-A copy of the latest version of Dolphin Emulator for UWP (Universal Windows Platform) from here .
-Some GameCube or Wii game ROMs or ISOs that you legally own. You can rip them from your original discs using a compatible disc drive and software like CleanRip or RawDump.
-
- How to Enable Developer Mode on Xbox
-The first step is to enable developer mode on your Xbox console. This will allow you to install apps that are not available on the Microsoft Store, such as Dolphin Emulator. Developer mode is free for anyone to use, but it has some limitations and risks. For example, you will not be able to play online multiplayer games or use some features like achievements or game DVR while in developer mode. You will also need to switch back to retail mode if you want to use those features again. To enable developer mode, follow these steps:
-
-Go to https://developer.microsoft.com/en-us/xboxactivate on your PC and sign in with your Microsoft account.
-Select Activate Console and follow the instructions to register your console as a developer device.
-On your console, go to Settings > System > Console info and select Reset console.
-Select Reset and keep my games & apps.
-Wait for the reset process to complete and sign in with your Microsoft account again.
-Go to Settings > System > Developer settings and select Enable developer mode.
-Wait for the console to reboot into developer mode.
-
- How to Download and Install Dolphin Emulator on Xbox
-Now that you have enabled developer mode, you can download and install Dolphin Emulator on your console. To do this, follow these steps:
-
-Copy the Dolphin Emulator app file (DolphinUWP_.appx) from your PC to your USB drive.
-Plug the USB drive into your console.
-On your console, go to Settings > System > Developer settings and select Remote Access Settings.
-Enable Remote Access and set a username and password for authentication.
-Note down the IP address of your console shown under Remote Access Settings.
-On your PC, open a web browser and enter the IP address of your console followed by :11443 in the address bar. For example, https://192.168.1.100:11443.
-You will see a security warning about an untrusted certificate. Click on Advanced and proceed to the website.
-Enter the username and password that you set for your console and click on Log in.
-Click on Add and browse to the location of the Dolphin Emulator app file on your USB drive.
-Select the app file and click on Next.
-Wait for the app to be uploaded and installed on your console.
-Once the installation is complete, you will see Dolphin Emulator listed under Installed Apps on the web page.
-On your console, go to My games & apps > See all > Apps and launch Dolphin Emulator.
-
- How to Configure Dolphin Emulator Settings on Xbox
-Before you start playing games, you will need to configure some settings in Dolphin Emulator to optimize its performance and compatibility. To do this, follow these steps:
-
-On the main menu of Dolphin Emulator, select Config.
-Under the General tab, you can adjust some basic settings such as language, theme, and interface options.
-Under the Graphics tab, you can change some settings related to video output, such as resolution, aspect ratio, vsync, and enhancements. For the best performance, we recommend using the native resolution of your console (1080p for Xbox One and 4K for Xbox Series X/S) and disabling any unnecessary enhancements such as anti-aliasing or anisotropic filtering.
-Under the Audio tab, you can change some settings related to sound output, such as volume, backend, and latency. For the best compatibility, we recommend using the XAudio2 backend and lowering the latency to 20 ms or less.
-Under the GameCube tab, you can change some settings related to GameCube emulation, such as system language, memory card size, and controller type. For the best compatibility, we recommend using a standard controller for port 1 and leaving the other ports empty.
-Under the Wii tab, you can change some settings related to Wii emulation, such as system language, aspect ratio, sensor bar position, and speaker volume. For the best compatibility, we recommend using a horizontal aspect ratio and placing the sensor bar above or below your TV screen.
-Under the Paths tab, you can add or remove folders where Dolphin Emulator will look for game files. By default, it will scan the internal storage of your console and any USB drives connected to it. You can also add network paths if you have game files stored on a PC or a NAS device.
-Under the Advanced tab, you can change some settings related to advanced features such as CPU overclocking, dual core mode, cheats, and debug options. For the best stability, we recommend leaving these settings at their default values unless you know what you are doing.
-
- How to Play GameCube and Wii Games on Xbox with Dolphin Emulator
-Now that you have configured Dolphin Emulator settings on your console, you are ready to play some games. To do this, follow these steps:
-
-Make sure that you have some GameCube or Wii game files (ROMs or ISOs) stored on your console's internal storage or a USB drive. You can also use network paths if you have game files stored on a PC or a NAS device.
-On the main menu of Dolphin Emulator, select Browse.
-Navigate to the folder where your game files are located and select one of them.
-The game will start loading and you will see some information about it on the screen. You can press the Menu button on your controller to access some options such as save states, screenshots, cheats, and more.
-You can use your controller to play the game as if it was a native Xbox game. You can also use a keyboard and mouse if you prefer. You can customize the controller mappings in Dolphin Emulator by going to Controllers > Configure Controller in the main menu.
-To exit the game, press the View button on your controller and select Exit from the menu that appears.
-
- Tips and Tricks for Dolphin Emulator on Xbox
-To make the most out of Dolphin Emulator on Xbox, here are some tips and tricks that you might find useful:
-
-If you encounter any issues with a game, such as graphical glitches, audio problems, or crashes, you can try changing some settings in Dolphin Emulator to fix them. You can also check the official compatibility list for more information about how well each game works with Dolphin Emulator.
-If you want to play games that require motion controls or pointer input, such as Wii Sports or Super Mario Galaxy, [user you can use a smartphone as a virtual controller. To do this, you will need to download the Dolphin Controller app on your smartphone and connect it to the same Wi-Fi network as your console. Then, you can scan the QR code shown on Dolphin Emulator on your console and pair your smartphone as a controller. You can also customize the layout and sensitivity of the virtual buttons and sensors on your smartphone.
-If you want to play games that support online multiplayer, such as Mario Kart Wii or Super Smash Bros. Brawl, you can use a service called Dolphin Netplay . This will allow you to play with other Dolphin Emulator users over the internet. To do this, you will need to create or join a Netplay session on your PC and then connect your console to it using the IP address and port number shown on Dolphin Emulator on your PC. You will also need to have the same game file and settings as the other players in the session.
-If you want to enhance the graphics and sound of your games, you can use some features such as shaders, texture packs, HD audio packs, and more. These are optional add-ons that can improve the quality and fidelity of your games. You can download them from various sources online and place them in the appropriate folders on your console or USB drive. You can also enable or disable them in Dolphin Emulator by going to Graphics > Enhancements or Audio > DSP in the main menu.
-
- Conclusion
-Dolphin Emulator is a great way to enjoy GameCube and Wii games on your Xbox console. It is easy to download and install, and it offers a lot of customization and optimization options. You can play hundreds of games with high compatibility and performance, and even use some features that are not available on the original consoles, such as online multiplayer, motion controls, and graphical enhancements. Dolphin Emulator is a must-have for any Nintendo fan who owns an Xbox console.
- FAQs
-Here are some frequently asked questions about Dolphin Emulator on Xbox:
-How to install dolphin emulator on xbox series x/s and xbox one
-How to play gamecube and wii games on xbox series x/s with dolphin
-How to set up dolphin emulator for uwp on xbox consoles
-How to enable developer mode on xbox for dolphin emulator
-How to use hd texture packs with dolphin emulator on xbox
-How to run dolphin emulator on xbox one x with retroarch
-How to play mario kart double dash online with dolphin emulator on xbox
-How to configure xbox controller for dolphin emulator games
-How to use a usb drive for storing gamecube and wii roms for dolphin emulator on xbox
-How to update dolphin emulator for uwp on xbox series x/s and xbox one
-How to fix performance issues with dolphin emulator on xbox one
-How to play zelda twilight princess with dolphin emulator on xbox series x/s
-How to use broadband adapter with dolphin emulator on xbox consoles
-How to install gamecube and wii mods with dolphin emulator on xbox series x/s
-How to play metroid prime trilogy with dolphin emulator on xbox series s/x
-How to use cheats and codes with dolphin emulator on xbox consoles
-How to play super smash bros melee with dolphin emulator on xbox series x/s
-How to use save states and memory cards with dolphin emulator on xbox consoles
-How to play resident evil 4 with dolphin emulator on xbox series x/s
-How to use shaders and filters with dolphin emulator on xbox consoles
-How to play animal crossing with dolphin emulator on xbox series x/s
-How to use motion controls and wiimote with dolphin emulator on xbox consoles
-How to play pikmin 2 with dolphin emulator on xbox series x/s
-How to use netplay and multiplayer with dolphin emulator on xbox consoles
-How to play luigi's mansion with dolphin emulator on xbox series x/s
-How to use custom resolutions and aspect ratios with dolphin emulator on xbox consoles
-How to play fire emblem path of radiance with dolphin emulator on xbox series x/s
-How to use gamepad and keyboard mapping with dolphin emulator on xbox consoles
-How to play paper mario the thousand year door with dolphin emulator on xbox series x/s
-How to use screenshots and video recording with dolphin emulator on xbox consoles
-How to play sonic adventure 2 battle with dolphin emulator on xbox series x/s
-How to use turbo mode and speed hacks with dolphin emulator on xbox consoles
-How to play star wars rogue squadron ii rogue leader with dolphin emulator on xbox series x/s
-How to use audio settings and enhancements with dolphin emulator on xbox consoles
-How to play f-zero gx with dolphin emulator on xbox series x/s
-
-Is Dolphin Emulator legal? Dolphin Emulator itself is legal, as it is a software that emulates the hardware and software of GameCube and Wii consoles. However, downloading or distributing game files (ROMs or ISOs) that you do not own is illegal, as it violates the copyright laws of the game developers and publishers. You should only use game files that you have legally obtained from your own discs or digital purchases.
-Is Dolphin Emulator safe? Dolphin Emulator is safe, as long as you download it from its official website or GitHub repository. It does not contain any viruses, malware, or spyware that could harm your console or PC. However, you should be careful when downloading any add-ons or game files from other sources online, as they might contain harmful or malicious content.
-Does Dolphin Emulator work on Xbox One S or Xbox One X? Yes, Dolphin Emulator works on any Xbox One model, including Xbox One S and Xbox One X. However, you might notice some differences in performance and compatibility depending on the model of your console. For example, Xbox One X has more power and memory than Xbox One S, which means it can run some games faster and smoother than Xbox One S.
-Can I use an external hard drive instead of a USB drive for Dolphin Emulator? Yes, you can use an external hard drive instead of a USB drive for Dolphin Emulator, as long as it is formatted as NTFS and has enough space for your game files. However, you might experience some issues with loading times or compatibility depending on the speed and quality of your external hard drive.
-Can I use a wireless controller instead of a wired controller for Dolphin Emulator? Yes, you can use a wireless controller instead of a wired controller for Dolphin Emulator, as long as it is compatible with your console and has enough battery life. However, you might experience some issues with input lag or responsiveness depending on the quality and signal strength of your wireless controller.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1toTree/lora_test/ppdiffusers/experimental/rl/value_guided_sampling.py b/spaces/1toTree/lora_test/ppdiffusers/experimental/rl/value_guided_sampling.py
deleted file mode 100644
index 4184c0ab362dd23eff61c72997291eaa1a95feee..0000000000000000000000000000000000000000
--- a/spaces/1toTree/lora_test/ppdiffusers/experimental/rl/value_guided_sampling.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-# Copyright 2022 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import numpy as np
-import paddle
-
-from ...models.unet_1d import UNet1DModel
-from ...pipeline_utils import DiffusionPipeline
-from ...utils.dummy_paddle_objects import DDPMScheduler
-
-
-class ValueGuidedRLPipeline(DiffusionPipeline):
- r"""
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
- Pipeline for sampling actions from a diffusion model trained to predict sequences of states.
- Original implementation inspired by this repository: https://github.com/jannerm/diffuser.
-
- Parameters:
- value_function ([`UNet1DModel`]): A specialized UNet for fine-tuning trajectories base on reward.
- unet ([`UNet1DModel`]): U-Net architecture to denoise the encoded trajectories.
- scheduler ([`SchedulerMixin`]):
- A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this
- application is [`DDPMScheduler`].
- env: An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models.
- """
-
- def __init__(
- self,
- value_function: UNet1DModel,
- unet: UNet1DModel,
- scheduler: DDPMScheduler,
- env,
- ):
- super().__init__()
- self.value_function = value_function
- self.unet = unet
- self.scheduler = scheduler
- self.env = env
- self.data = env.get_dataset()
- self.means = dict()
- for key in self.data.keys():
- try:
- self.means[key] = self.data[key].mean()
- except Exception:
- pass
- self.stds = dict()
- for key in self.data.keys():
- try:
- self.stds[key] = self.data[key].std()
- except Exception:
- pass
- self.state_dim = env.observation_space.shape[0]
- self.action_dim = env.action_space.shape[0]
-
- def normalize(self, x_in, key):
- return (x_in - self.means[key]) / self.stds[key]
-
- def de_normalize(self, x_in, key):
- return x_in * self.stds[key] + self.means[key]
-
- def to_paddle(self, x_in):
- if type(x_in) is dict:
- return {k: self.to_paddle(v) for k, v in x_in.items()}
- elif paddle.is_tensor(x_in):
- return x_in
- return paddle.to_tensor(x_in)
-
- def reset_x0(self, x_in, cond, act_dim):
- for key, val in cond.items():
- x_in[:, key, act_dim:] = val.clone()
- return x_in
-
- def run_diffusion(self, x, conditions, n_guide_steps, scale):
- batch_size = x.shape[0]
- y = None
- for i in self.progress_bar(self.scheduler.timesteps):
- # create batch of timesteps to pass into model
- timesteps = paddle.full((batch_size,), i, dtype="int64")
- for _ in range(n_guide_steps):
- with paddle.set_grad_enabled(True):
- x.stop_gradient = False
- # permute to match dimension for pre-trained models
- y = self.value_function(x.transpose([0, 2, 1]), timesteps).sample
- grad = paddle.autograd.grad([y.sum()], [x])[0]
-
- posterior_variance = self.scheduler._get_variance(i)
- model_std = paddle.exp(0.5 * posterior_variance)
- grad = model_std * grad
-
- grad[timesteps < 2] = 0
- x = x.detach()
- x = x + scale * grad
- x = self.reset_x0(x, conditions, self.action_dim)
- prev_x = self.unet(x.transpose([0, 2, 1]), timesteps).sample.transpose([0, 2, 1])
- # TODO: verify deprecation of this kwarg
- x = self.scheduler.step(prev_x, i, x, predict_epsilon=False)["prev_sample"]
-
- # apply conditions to the trajectory (set the initial state)
- x = self.reset_x0(x, conditions, self.action_dim)
- x = self.to_paddle(x)
- return x, y
-
- def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1):
- # normalize the observations and create batch dimension
- obs = self.normalize(obs, "observations")
- obs = obs[None].repeat(batch_size, axis=0)
-
- conditions = {0: self.to_paddle(obs)}
- shape = [batch_size, planning_horizon, self.state_dim + self.action_dim]
-
- # generate initial noise and apply our conditions (to make the trajectories start at current state)
- x1 = paddle.randn(shape)
- x = self.reset_x0(x1, conditions, self.action_dim)
- x = self.to_paddle(x)
-
- # run the diffusion process
- x, y = self.run_diffusion(x, conditions, n_guide_steps, scale)
-
- # sort output trajectories by value
- sorted_idx = paddle.argsort(y, 0, descending=True).squeeze()
- sorted_values = x[sorted_idx]
- actions = sorted_values[:, :, : self.action_dim]
- actions = actions.detach().numpy()
- denorm_actions = self.de_normalize(actions, key="actions")
-
- # select the action with the highest value
- if y is not None:
- selected_index = 0
- else:
- # if we didn't run value guiding, select a random action
- selected_index = np.random.randint(0, batch_size)
- denorm_actions = denorm_actions[selected_index, 0]
- return denorm_actions
diff --git a/spaces/A00001/bingothoo/src/pages/api/healthz.ts b/spaces/A00001/bingothoo/src/pages/api/healthz.ts
deleted file mode 100644
index f6ae44ff0fd66ccd3f7feaa550025fbf2a83bf77..0000000000000000000000000000000000000000
--- a/spaces/A00001/bingothoo/src/pages/api/healthz.ts
+++ /dev/null
@@ -1,7 +0,0 @@
-'use server'
-
-import { NextApiRequest, NextApiResponse } from 'next'
-
-export default async function handler(req: NextApiRequest, res: NextApiResponse) {
- res.status(200).end('ok')
-}
diff --git a/spaces/AI-ZTH-03-23/README/README.md b/spaces/AI-ZTH-03-23/README/README.md
deleted file mode 100644
index 02b96483a48dd036113efcd9b2d52664091b0523..0000000000000000000000000000000000000000
--- a/spaces/AI-ZTH-03-23/README/README.md
+++ /dev/null
@@ -1,20 +0,0 @@
----
-title: README
-emoji: 🐠
-colorFrom: gray
-colorTo: purple
-sdk: static
-pinned: false
----
-
-# 03-23-2023 Code Examples:
-1. Classroom: https://huggingface.co/AI-ZTH-03-23
-2. Dynamic Architecture Modeling: https://huggingface.co/spaces/awacke1/Streamlit.GraphViz.Dynamic.Architecture.Diagram
-3. Aframe VR IOT Motion Sensor WASD: https://huggingface.co/spaces/awacke1/HTML5-Aframe-3dMap-Flight
-4. MediaPipe: https://huggingface.co/spaces/awacke1/RealTime-MediaPipe-AI-From-Video-On-Any-Device
-5. Wikipedia Fact Check Chat: https://huggingface.co/spaces/awacke1/StreamlitWikipediaChat
-6. Dashboard - Tweet, Wiki, Memory: https://huggingface.co/spaces/awacke1/AI.Dashboard.Wiki.Chat.Cognitive.HTML5
-7. Dashboard - Chat, Download, Image Search, OCR, StoryGen, Q, Mermaid HTML5: https://huggingface.co/spaces/awacke1/AI.Dashboard.Gradio.Streamlit.HTML5
-8. Datasets - Biomed NER: https://huggingface.co/spaces/DataScienceEngineering/7-NER-Biomed-ClinicalTerms
-9. MN Hospitals Comparative Maps: https://huggingface.co/spaces/awacke1/MN.Map.Hospitals.Top.Five
-10. Find Mental Health Providers, Maps, Location: https://huggingface.co/spaces/awacke1/Gradio-Maps-Latitude-Longitude
\ No newline at end of file
diff --git a/spaces/AI-Zero-to-Hero/01-H5-Play-Canvas-Sim-Physics/style.css b/spaces/AI-Zero-to-Hero/01-H5-Play-Canvas-Sim-Physics/style.css
deleted file mode 100644
index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000
--- a/spaces/AI-Zero-to-Hero/01-H5-Play-Canvas-Sim-Physics/style.css
+++ /dev/null
@@ -1,28 +0,0 @@
-body {
- padding: 2rem;
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
-}
-
-h1 {
- font-size: 16px;
- margin-top: 0;
-}
-
-p {
- color: rgb(107, 114, 128);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-.card {
- max-width: 620px;
- margin: 0 auto;
- padding: 16px;
- border: 1px solid lightgray;
- border-radius: 16px;
-}
-
-.card p:last-child {
- margin-bottom: 0;
-}
diff --git a/spaces/AIFILMS/generate_human_motion/pyrender/tests/unit/__init__.py b/spaces/AIFILMS/generate_human_motion/pyrender/tests/unit/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/normalizing_flow/res_flow.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/normalizing_flow/res_flow.py
deleted file mode 100644
index a237b05b7fc0fd8626d3da95ad3a39171e7129fc..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/normalizing_flow/res_flow.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import torch
-from torch import nn
-from text_to_speech.modules.commons.conv import ConditionalConvBlocks
-from text_to_speech.modules.commons.wavenet import WN
-
-
-class FlipLayer(nn.Module):
- def forward(self, x, nonpadding, cond=None, reverse=False):
- x = torch.flip(x, [1])
- return x
-
-
-class CouplingLayer(nn.Module):
- def __init__(self, c_in, hidden_size, kernel_size, n_layers, p_dropout=0, c_in_g=0, nn_type='wn'):
- super().__init__()
- self.channels = c_in
- self.hidden_size = hidden_size
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.c_half = c_in // 2
-
- self.pre = nn.Conv1d(self.c_half, hidden_size, 1)
- if nn_type == 'wn':
- self.enc = WN(hidden_size, kernel_size, 1, n_layers, p_dropout=p_dropout,
- c_cond=c_in_g)
- elif nn_type == 'conv':
- self.enc = ConditionalConvBlocks(
- hidden_size, c_in_g, hidden_size, None, kernel_size,
- layers_in_block=1, is_BTC=False, num_layers=n_layers)
- self.post = nn.Conv1d(hidden_size, self.c_half, 1)
-
- def forward(self, x, nonpadding, cond=None, reverse=False):
- x0, x1 = x[:, :self.c_half], x[:, self.c_half:]
- x_ = self.pre(x0) * nonpadding
- x_ = self.enc(x_, nonpadding=nonpadding, cond=cond)
- m = self.post(x_)
- x1 = m + x1 if not reverse else x1 - m
- x = torch.cat([x0, x1], 1)
- return x * nonpadding
-
-
-class ResFlow(nn.Module):
- def __init__(self,
- c_in,
- hidden_size,
- kernel_size,
- n_flow_layers,
- n_flow_steps=4,
- c_cond=0,
- nn_type='wn'):
- super().__init__()
- self.flows = nn.ModuleList()
- for i in range(n_flow_steps):
- self.flows.append(
- CouplingLayer(c_in, hidden_size, kernel_size, n_flow_layers, c_in_g=c_cond, nn_type=nn_type))
- self.flows.append(FlipLayer())
-
- def forward(self, x, nonpadding, cond=None, reverse=False):
- for flow in (self.flows if not reverse else reversed(self.flows)):
- x = flow(x, nonpadding, cond=cond, reverse=reverse)
- return x
diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/normalizing_flow/utils.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/normalizing_flow/utils.py
deleted file mode 100644
index 7eb56ec514bff822ba1a19a6474207ed82492410..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/normalizing_flow/utils.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import torch
-
-
-def squeeze(x, x_mask=None, n_sqz=2):
- b, c, t = x.size()
-
- t = (t // n_sqz) * n_sqz
- x = x[:, :, :t]
- x_sqz = x.view(b, c, t // n_sqz, n_sqz)
- x_sqz = x_sqz.permute(0, 3, 1, 2).contiguous().view(b, c * n_sqz, t // n_sqz)
-
- if x_mask is not None:
- x_mask = x_mask[:, :, n_sqz - 1::n_sqz]
- else:
- x_mask = torch.ones(b, 1, t // n_sqz).to(device=x.device, dtype=x.dtype)
- return x_sqz * x_mask, x_mask
-
-
-def unsqueeze(x, x_mask=None, n_sqz=2):
- b, c, t = x.size()
-
- x_unsqz = x.view(b, n_sqz, c // n_sqz, t)
- x_unsqz = x_unsqz.permute(0, 2, 3, 1).contiguous().view(b, c // n_sqz, t * n_sqz)
-
- if x_mask is not None:
- x_mask = x_mask.unsqueeze(-1).repeat(1, 1, 1, n_sqz).view(b, 1, t * n_sqz)
- else:
- x_mask = torch.ones(b, 1, t * n_sqz).to(device=x.device, dtype=x.dtype)
- return x_unsqz * x_mask, x_mask
diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/ps_flow.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/ps_flow.py
deleted file mode 100644
index a52fcd7bc3887f479d02e9ffbf03cb6e717a89d5..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/tasks/tts/ps_flow.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import torch
-from text_to_speech.modules.tts.portaspeech.portaspeech_flow import PortaSpeechFlow
-from tasks.tts.fs import FastSpeechTask
-from tasks.tts.ps import PortaSpeechTask
-from text_to_speech.utils.audio.pitch.utils import denorm_f0
-from text_to_speech.utils.commons.hparams import hparams
-
-
-class PortaSpeechFlowTask(PortaSpeechTask):
- def __init__(self):
- super().__init__()
- self.training_post_glow = False
-
- def build_tts_model(self):
- ph_dict_size = len(self.token_encoder)
- word_dict_size = len(self.word_encoder)
- self.model = PortaSpeechFlow(ph_dict_size, word_dict_size, hparams)
-
- def _training_step(self, sample, batch_idx, opt_idx):
- self.training_post_glow = self.global_step >= hparams['post_glow_training_start'] \
- and hparams['use_post_flow']
- if hparams['two_stage'] and \
- ((opt_idx == 0 and self.training_post_glow) or (opt_idx == 1 and not self.training_post_glow)):
- return None
- loss_output, _ = self.run_model(sample)
- total_loss = sum([v for v in loss_output.values() if isinstance(v, torch.Tensor) and v.requires_grad])
- loss_output['batch_size'] = sample['txt_tokens'].size()[0]
- if 'postflow' in loss_output and loss_output['postflow'] is None:
- return None
- return total_loss, loss_output
-
- def run_model(self, sample, infer=False, *args, **kwargs):
- if not infer:
- training_post_glow = self.training_post_glow
- spk_embed = sample.get('spk_embed')
- spk_id = sample.get('spk_ids')
- output = self.model(sample['txt_tokens'],
- sample['word_tokens'],
- ph2word=sample['ph2word'],
- mel2word=sample['mel2word'],
- mel2ph=sample['mel2ph'],
- word_len=sample['word_lengths'].max(),
- tgt_mels=sample['mels'],
- pitch=sample.get('pitch'),
- spk_embed=spk_embed,
- spk_id=spk_id,
- infer=False,
- forward_post_glow=training_post_glow,
- two_stage=hparams['two_stage'],
- global_step=self.global_step,
- bert_feats=sample.get('bert_feats'))
- losses = {}
- self.add_mel_loss(output['mel_out'], sample['mels'], losses)
- if (training_post_glow or not hparams['two_stage']) and hparams['use_post_flow']:
- losses['postflow'] = output['postflow']
- losses['l1'] = losses['l1'].detach()
- losses['ssim'] = losses['ssim'].detach()
- if not training_post_glow or not hparams['two_stage'] or not self.training:
- losses['kl'] = output['kl']
- if self.global_step < hparams['kl_start_steps']:
- losses['kl'] = losses['kl'].detach()
- else:
- losses['kl'] = torch.clamp(losses['kl'], min=hparams['kl_min'])
- losses['kl'] = losses['kl'] * hparams['lambda_kl']
- if hparams['dur_level'] == 'word':
- self.add_dur_loss(
- output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses)
- self.get_attn_stats(output['attn'], sample, losses)
- else:
- super().add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses)
- return losses, output
- else:
- use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur'])
- forward_post_glow = self.global_step >= hparams['post_glow_training_start'] + 1000 \
- and hparams['use_post_flow']
- spk_embed = sample.get('spk_embed')
- spk_id = sample.get('spk_ids')
- output = self.model(
- sample['txt_tokens'],
- sample['word_tokens'],
- ph2word=sample['ph2word'],
- word_len=sample['word_lengths'].max(),
- pitch=sample.get('pitch'),
- mel2ph=sample['mel2ph'] if use_gt_dur else None,
- mel2word=sample['mel2word'] if hparams['profile_infer'] or hparams['use_gt_dur'] else None,
- infer=True,
- forward_post_glow=forward_post_glow,
- spk_embed=spk_embed,
- spk_id=spk_id,
- two_stage=hparams['two_stage'],
- bert_feats=sample.get('bert_feats'))
- return output
-
- def validation_step(self, sample, batch_idx):
- self.training_post_glow = self.global_step >= hparams['post_glow_training_start'] \
- and hparams['use_post_flow']
- return super().validation_step(sample, batch_idx)
-
- def save_valid_result(self, sample, batch_idx, model_out):
- super(PortaSpeechFlowTask, self).save_valid_result(sample, batch_idx, model_out)
- sr = hparams['audio_sample_rate']
- f0_gt = None
- if sample.get('f0') is not None:
- f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu())
- if self.global_step > 0:
- # save FVAE result
- if hparams['use_post_flow']:
- wav_pred = self.vocoder.spec2wav(model_out['mel_out_fvae'][0].cpu(), f0=f0_gt)
- self.logger.add_audio(f'wav_fvae_{batch_idx}', wav_pred, self.global_step, sr)
- self.plot_mel(batch_idx, sample['mels'], model_out['mel_out_fvae'][0],
- f'mel_fvae_{batch_idx}', f0s=f0_gt)
-
- def build_optimizer(self, model):
- if hparams['two_stage'] and hparams['use_post_flow']:
- self.optimizer = torch.optim.AdamW(
- [p for name, p in self.model.named_parameters() if 'post_flow' not in name],
- lr=hparams['lr'],
- betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
- weight_decay=hparams['weight_decay'])
- self.post_flow_optimizer = torch.optim.AdamW(
- self.model.post_flow.parameters(),
- lr=hparams['post_flow_lr'],
- betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
- weight_decay=hparams['weight_decay'])
- return [self.optimizer, self.post_flow_optimizer]
- else:
- self.optimizer = torch.optim.AdamW(
- self.model.parameters(),
- lr=hparams['lr'],
- betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
- weight_decay=hparams['weight_decay'])
- return [self.optimizer]
-
- def build_scheduler(self, optimizer):
- return FastSpeechTask.build_scheduler(self, optimizer[0])
\ No newline at end of file
diff --git a/spaces/AIWaves/SOP_Generation-single/Agent/__init__.py b/spaces/AIWaves/SOP_Generation-single/Agent/__init__.py
deleted file mode 100644
index 5919811a5cec1b9d44051cdb1e9ac26a21ee3064..0000000000000000000000000000000000000000
--- a/spaces/AIWaves/SOP_Generation-single/Agent/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .Agent import Agent
\ No newline at end of file
diff --git a/spaces/AIZeroToHero/04-Image2OCR/README.md b/spaces/AIZeroToHero/04-Image2OCR/README.md
deleted file mode 100644
index 3adf279a22a8ec0c46cfbb8247925692edfdcd9e..0000000000000000000000000000000000000000
--- a/spaces/AIZeroToHero/04-Image2OCR/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: 04 Image2OCR
-emoji: 🚀
-colorFrom: yellow
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.1.5
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ASJMO/freegpt/g4f/Provider/Providers/Dfehub.py b/spaces/ASJMO/freegpt/g4f/Provider/Providers/Dfehub.py
deleted file mode 100644
index 2f66f19b50b6b4ab79c012f123c47241141942eb..0000000000000000000000000000000000000000
--- a/spaces/ASJMO/freegpt/g4f/Provider/Providers/Dfehub.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import os
-import requests
-from ...typing import sha256, Dict, get_type_hints
-
-url = "https://chat.dfehub.com"
-model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-4']
-supports_stream = True
-needs_auth = False
-
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- headers = {
- 'Authority': 'chat.dfehub.com',
- 'Content-Type': 'application/json',
- 'Method': 'POST',
- 'Path': '/api/openai/v1/chat/completions',
- 'Scheme': 'https',
- 'Accept': 'text/event-stream',
- 'Accept-Language': 'pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6,zh;q=0.5',
- 'Content-Type': 'application/json',
- 'Origin': 'https://chat.dfehub.com',
- 'Referer': 'https://chat.dfehub.com/',
- 'Sec-Ch-Ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'Sec-Ch-Ua-Mobile': '?0',
- 'Sec-Ch-Ua-Platform': '"Windows"',
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-origin',
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
- 'X-Requested-With': 'XMLHttpRequest',
- }
-
- data = {
- 'model': model,
- 'temperature': 0.7,
- 'max_tokens': '8000',
- 'presence_penalty': 0,
- 'messages': messages,
- }
-
- response = requests.post(url + '/api/openai/v1/chat/completions',
- headers=headers, json=data, stream=stream)
-
- yield response.json()['choices'][0]['message']['content']
-
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/spaces/Aashir01/Live_Transcription/app.py b/spaces/Aashir01/Live_Transcription/app.py
deleted file mode 100644
index ed71e3237ed01aaa15397b34d58651e299e7e605..0000000000000000000000000000000000000000
--- a/spaces/Aashir01/Live_Transcription/app.py
+++ /dev/null
@@ -1,236 +0,0 @@
-import base64
-import math
-import os
-import time
-from functools import partial
-from multiprocessing import Pool
-
-import gradio as gr
-import numpy as np
-import pytube
-import requests
-from processing_whisper import WhisperPrePostProcessor
-from transformers.models.whisper.tokenization_whisper import TO_LANGUAGE_CODE
-from transformers.pipelines.audio_utils import ffmpeg_read
-
-
-title = "Whisper JAX: The Fastest Whisper API ⚡️"
-
-description = """Whisper JAX is an optimised implementation of the [Whisper model](https://huggingface.co/openai/whisper-large-v2) by OpenAI. It runs on JAX with a TPU v4-8 in the backend. Compared to PyTorch on an A100 GPU, it is over [**70x faster**](https://github.com/sanchit-gandhi/whisper-jax#benchmarks), making it the fastest Whisper API available.
-
-Note that at peak times, you may find yourself in the queue for this demo. When you submit a request, your queue position will be shown in the top right-hand side of the demo pane. Once you reach the front of the queue, your audio file will be sent to the TPU and then transcribed, with the progress displayed through a progress bar.
-
-To skip the queue, you may wish to create your own inference endpoint, details for which can be found in the [Whisper JAX repository](https://github.com/sanchit-gandhi/whisper-jax#creating-an-endpoint).
-"""
-
-article = "Whisper large-v2 model by OpenAI. Backend running JAX on a TPU v4-8 through the generous support of the [TRC](https://sites.research.google/trc/about/) programme. Whisper JAX [code](https://github.com/sanchit-gandhi/whisper-jax) and Gradio demo by 🤗 Hugging Face."
-
-API_SEND_URL = os.getenv("API_SEND_URL")
-API_FORWARD_URL = os.getenv("API_FORWARD_URL")
-
-language_names = sorted(TO_LANGUAGE_CODE.keys())
-CHUNK_LENGTH_S = 30
-BATCH_SIZE = 16
-NUM_PROC = 16
-FILE_LIMIT_MB = 1000
-
-
-def query(url, payload):
- response = requests.post(url, json=payload)
- return response.json(), response.status_code
-
-
-def inference(batch_id, idx, task=None, return_timestamps=False):
- payload = {"batch_id": batch_id, "idx": idx, "task": task, "return_timestamps": return_timestamps}
-
- data, status_code = query(API_FORWARD_URL, payload)
-
- if status_code == 200:
- tokens = {"tokens": np.asarray(data["tokens"])}
- return tokens
- else:
- gr.Error(data["detail"])
-
-
-def send_chunks(batch, batch_id):
- feature_shape = batch["input_features"].shape
- batch["input_features"] = base64.b64encode(batch["input_features"].tobytes()).decode()
- query(API_SEND_URL, {"batch": batch, "feature_shape": feature_shape, "batch_id": batch_id})
-
-
-def forward(batch_id, idx, task=None, return_timestamps=False):
- outputs = inference(batch_id, idx, task, return_timestamps)
- return outputs
-
-
-# Copied from https://github.com/openai/whisper/blob/c09a7ae299c4c34c5839a76380ae407e7d785914/whisper/utils.py#L50
-def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = "."):
- if seconds is not None:
- milliseconds = round(seconds * 1000.0)
-
- hours = milliseconds // 3_600_000
- milliseconds -= hours * 3_600_000
-
- minutes = milliseconds // 60_000
- milliseconds -= minutes * 60_000
-
- seconds = milliseconds // 1_000
- milliseconds -= seconds * 1_000
-
- hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
- return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
- else:
- # we have a malformed timestamp so just return it as is
- return seconds
-
-
-if __name__ == "__main__":
- processor = WhisperPrePostProcessor.from_pretrained("openai/whisper-large-v2")
- stride_length_s = CHUNK_LENGTH_S / 6
- chunk_len = round(CHUNK_LENGTH_S * processor.feature_extractor.sampling_rate)
- stride_left = stride_right = round(stride_length_s * processor.feature_extractor.sampling_rate)
- step = chunk_len - stride_left - stride_right
- pool = Pool(NUM_PROC)
-
- def tqdm_generate(inputs: dict, task: str, return_timestamps: bool, progress: gr.Progress):
- inputs_len = inputs["array"].shape[0]
- all_chunk_start_batch_id = np.arange(0, inputs_len, step)
- num_samples = len(all_chunk_start_batch_id)
- num_batches = math.ceil(num_samples / BATCH_SIZE)
- dummy_batches = list(range(num_batches))
-
- dataloader = processor.preprocess_batch(inputs, chunk_length_s=CHUNK_LENGTH_S, batch_size=BATCH_SIZE)
- progress(0, desc="Sending audio to TPU...")
- batch_id = np.random.randint(
- 1000000
- ) # TODO(SG): swap to an iterator - currently taking our 1 in a million chances
- pool.map(partial(send_chunks, batch_id=batch_id), dataloader)
-
- model_outputs = []
- start_time = time.time()
- # iterate over our chunked audio samples
- for idx in progress.tqdm(dummy_batches, desc="Transcribing..."):
- model_outputs.append(forward(batch_id, idx, task=task, return_timestamps=return_timestamps))
- runtime = time.time() - start_time
-
- post_processed = processor.postprocess(model_outputs, return_timestamps=return_timestamps)
- text = post_processed["text"]
- timestamps = post_processed.get("chunks")
- if timestamps is not None:
- timestamps = [
- f"[{format_timestamp(chunk['timestamp'][0])} -> {format_timestamp(chunk['timestamp'][1])}] {chunk['text']}"
- for chunk in timestamps
- ]
- text = "\n".join(str(feature) for feature in timestamps)
- return text, runtime
-
- def transcribe_chunked_audio(inputs, task, return_timestamps, progress=gr.Progress()):
- progress(0, desc="Loading audio file...")
- if inputs is None:
- raise gr.Error("No audio file submitted! Please upload an audio file before submitting your request.")
- file_size_mb = os.stat(inputs).st_size / (1024 * 1024)
- if file_size_mb > FILE_LIMIT_MB:
- raise gr.Error(
- f"File size exceeds file size limit. Got file of size {file_size_mb:.2f}MB for a limit of {FILE_LIMIT_MB}MB."
- )
-
- with open(inputs, "rb") as f:
- inputs = f.read()
-
- inputs = ffmpeg_read(inputs, processor.feature_extractor.sampling_rate)
- inputs = {"array": inputs, "sampling_rate": processor.feature_extractor.sampling_rate}
- text, runtime = tqdm_generate(inputs, task=task, return_timestamps=return_timestamps, progress=progress)
- return text, runtime
-
- def _return_yt_html_embed(yt_url):
- video_id = yt_url.split("?v=")[-1]
- HTML_str = (
- f' VIDEO '
- " "
- )
- return HTML_str
-
- def transcribe_youtube(yt_url, task, return_timestamps, progress=gr.Progress(), max_filesize=75.0):
- progress(0, desc="Loading audio file...")
- html_embed_str = _return_yt_html_embed(yt_url)
- try:
- yt = pytube.YouTube(yt_url)
- stream = yt.streams.filter(only_audio=True)[0]
- except:
- raise gr.Error("An error occurred while loading the YouTube video. Please try again.")
-
- if stream.filesize_mb > max_filesize:
- raise gr.Error(f"Maximum YouTube file size is {max_filesize}MB, got {stream.filesize_mb:.2f}MB.")
-
- stream.download(filename="audio.mp3")
-
- with open("audio.mp3", "rb") as f:
- inputs = f.read()
-
- inputs = ffmpeg_read(inputs, processor.feature_extractor.sampling_rate)
- inputs = {"array": inputs, "sampling_rate": processor.feature_extractor.sampling_rate}
- text, runtime = tqdm_generate(inputs, task=task, return_timestamps=return_timestamps, progress=progress)
- return html_embed_str, text, runtime
-
- microphone_chunked = gr.Interface(
- fn=transcribe_chunked_audio,
- inputs=[
- gr.inputs.Audio(source="microphone", optional=True, type="filepath"),
- gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
- gr.inputs.Checkbox(default=False, label="Return timestamps"),
- ],
- outputs=[
- gr.outputs.Textbox(label="Transcription").style(show_copy_button=True),
- gr.outputs.Textbox(label="Transcription Time (s)"),
- ],
- allow_flagging="never",
- title=title,
- description=description,
- article=article,
- )
-
- audio_chunked = gr.Interface(
- fn=transcribe_chunked_audio,
- inputs=[
- gr.inputs.Audio(source="upload", optional=True, label="Audio file", type="filepath"),
- gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
- gr.inputs.Checkbox(default=False, label="Return timestamps"),
- ],
- outputs=[
- gr.outputs.Textbox(label="Transcription").style(show_copy_button=True),
- gr.outputs.Textbox(label="Transcription Time (s)"),
- ],
- allow_flagging="never",
- title=title,
- description=description,
- article=article,
- )
-
- youtube = gr.Interface(
- fn=transcribe_youtube,
- inputs=[
- gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"),
- gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"),
- gr.inputs.Checkbox(default=False, label="Return timestamps"),
- ],
- outputs=[
- gr.outputs.HTML(label="Video"),
- gr.outputs.Textbox(label="Transcription").style(show_copy_button=True),
- gr.outputs.Textbox(label="Transcription Time (s)"),
- ],
- allow_flagging="never",
- title=title,
- examples=[["https://www.youtube.com/watch?v=m8u-18Q0s7I", "transcribe", False]],
- cache_examples=False,
- description=description,
- article=article,
- )
-
- demo = gr.Blocks()
-
- with demo:
- gr.TabbedInterface([microphone_chunked, audio_chunked, youtube], ["Microphone", "Audio File", "YouTube"])
-
- demo.queue(max_size=10)
- demo.launch(show_api=False, max_threads=10)
-
diff --git a/spaces/Abhaykoul/Wizard-AI/README.md b/spaces/Abhaykoul/Wizard-AI/README.md
deleted file mode 100644
index 8042a6baca2655771bc84753e281a8887699ca1b..0000000000000000000000000000000000000000
--- a/spaces/Abhaykoul/Wizard-AI/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Wizard AI
-emoji: 🏃
-colorFrom: gray
-colorTo: indigo
-sdk: streamlit
-sdk_version: 1.28.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/layermanager.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/layermanager.d.ts
deleted file mode 100644
index 5bfa0d86b952307c67973124a38a0101c8bf1a6c..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/layermanager.d.ts
+++ /dev/null
@@ -1,2 +0,0 @@
-import LayerManager from './gameobjects/layer/layermanager/LayerManager';
-export default LayerManager;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/alphamaskimage/Factory.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/alphamaskimage/Factory.js
deleted file mode 100644
index a0df36403ee53a70343ee4199af9f1141e228f3e..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/alphamaskimage/Factory.js
+++ /dev/null
@@ -1,13 +0,0 @@
-import AlphaMaskImage from './AlphaMaskImage.js';
-import ObjectFactory from '../ObjectFactory.js';
-import SetValue from '../../../plugins/utils/object/SetValue.js';
-
-ObjectFactory.register('alphaMaskImage', function (x, y, key, frame, config) {
- var gameObject = new AlphaMaskImage(this.scene, x, y, key, frame, config);
- this.scene.add.existing(gameObject);
- return gameObject;
-});
-
-SetValue(window, 'RexPlugins.UI.AlphaMaskImage', AlphaMaskImage);
-
-export default AlphaMaskImage;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/chart/UpdateChart.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/chart/UpdateChart.js
deleted file mode 100644
index 84d6d49322e149b8d58c819b9136826813132096..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/chart/UpdateChart.js
+++ /dev/null
@@ -1,8 +0,0 @@
-var UpdateChart = function () {
- if (this.chart === undefined) {
- return this;
- }
- this.chart.update();
- return this;
-}
-export default UpdateChart;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateImage.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateImage.js
deleted file mode 100644
index e8f2506cbea728a94fdd351536e651516b74bdc0..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateImage.js
+++ /dev/null
@@ -1,9 +0,0 @@
-import CreateAnyImage from './utils/CreateAnyImage.js';
-
-const PhaserImage = Phaser.GameObjects.Image;
-
-var CreateImage = function (scene, data, view, styles, customBuilders) {
- return CreateAnyImage(scene, data, view, styles, customBuilders, PhaserImage);
-}
-
-export default CreateImage;
\ No newline at end of file
diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/options/__init__.py b/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/options/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/unconditional_image_generation.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/unconditional_image_generation.md
deleted file mode 100644
index c0888f94c6c135e429feb42d2026962d3a257f5f..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/unconditional_image_generation.md
+++ /dev/null
@@ -1,69 +0,0 @@
-
-
-# Unconditional image generation
-
-[[open-in-colab]]
-
-Unconditional image generation is a relatively straightforward task. The model only generates images - without any additional context like text or an image - resembling the training data it was trained on.
-
-The [`DiffusionPipeline`] is the easiest way to use a pre-trained diffusion system for inference.
-
-Start by creating an instance of [`DiffusionPipeline`] and specify which pipeline checkpoint you would like to download.
-You can use any of the 🧨 Diffusers [checkpoints](https://huggingface.co/models?library=diffusers&sort=downloads) from the Hub (the checkpoint you'll use generates images of butterflies).
-
-
-
-💡 Want to train your own unconditional image generation model? Take a look at the training [guide](training/unconditional_training) to learn how to generate your own images.
-
-
-
-In this guide, you'll use [`DiffusionPipeline`] for unconditional image generation with [DDPM](https://arxiv.org/abs/2006.11239):
-
-```python
->>> from diffusers import DiffusionPipeline
-
->>> generator = DiffusionPipeline.from_pretrained("anton-l/ddpm-butterflies-128")
-```
-
-The [`DiffusionPipeline`] downloads and caches all modeling, tokenization, and scheduling components.
-Because the model consists of roughly 1.4 billion parameters, we strongly recommend running it on a GPU.
-You can move the generator object to a GPU, just like you would in PyTorch:
-
-```python
->>> generator.to("cuda")
-```
-
-Now you can use the `generator` to generate an image:
-
-```python
->>> image = generator().images[0]
-```
-
-The output is by default wrapped into a [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) object.
-
-You can save the image by calling:
-
-```python
->>> image.save("generated_image.png")
-```
-
-Try out the Spaces below, and feel free to play around with the inference steps parameter to see how it affects the image quality!
-
-
-
-
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_tensorrt_inpaint.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_tensorrt_inpaint.py
deleted file mode 100644
index 44f3bf5049b892cdf48098f14297e9425c5f0773..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_tensorrt_inpaint.py
+++ /dev/null
@@ -1,1088 +0,0 @@
-#
-# Copyright 2023 The HuggingFace Inc. team.
-# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-# SPDX-License-Identifier: Apache-2.0
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import gc
-import os
-from collections import OrderedDict
-from copy import copy
-from typing import List, Optional, Union
-
-import numpy as np
-import onnx
-import onnx_graphsurgeon as gs
-import PIL
-import tensorrt as trt
-import torch
-from huggingface_hub import snapshot_download
-from onnx import shape_inference
-from polygraphy import cuda
-from polygraphy.backend.common import bytes_from_path
-from polygraphy.backend.onnx.loader import fold_constants
-from polygraphy.backend.trt import (
- CreateConfig,
- Profile,
- engine_from_bytes,
- engine_from_network,
- network_from_onnx_path,
- save_engine,
-)
-from polygraphy.backend.trt import util as trt_util
-from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
-
-from diffusers.models import AutoencoderKL, UNet2DConditionModel
-from diffusers.pipelines.stable_diffusion import (
- StableDiffusionInpaintPipeline,
- StableDiffusionPipelineOutput,
- StableDiffusionSafetyChecker,
-)
-from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image
-from diffusers.schedulers import DDIMScheduler
-from diffusers.utils import DIFFUSERS_CACHE, logging
-
-
-"""
-Installation instructions
-python3 -m pip install --upgrade transformers diffusers>=0.16.0
-python3 -m pip install --upgrade tensorrt>=8.6.1
-python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
-python3 -m pip install onnxruntime
-"""
-
-TRT_LOGGER = trt.Logger(trt.Logger.ERROR)
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-# Map of numpy dtype -> torch dtype
-numpy_to_torch_dtype_dict = {
- np.uint8: torch.uint8,
- np.int8: torch.int8,
- np.int16: torch.int16,
- np.int32: torch.int32,
- np.int64: torch.int64,
- np.float16: torch.float16,
- np.float32: torch.float32,
- np.float64: torch.float64,
- np.complex64: torch.complex64,
- np.complex128: torch.complex128,
-}
-if np.version.full_version >= "1.24.0":
- numpy_to_torch_dtype_dict[np.bool_] = torch.bool
-else:
- numpy_to_torch_dtype_dict[np.bool] = torch.bool
-
-# Map of torch dtype -> numpy dtype
-torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}
-
-
-def device_view(t):
- return cuda.DeviceView(ptr=t.data_ptr(), shape=t.shape, dtype=torch_to_numpy_dtype_dict[t.dtype])
-
-
-def preprocess_image(image):
- """
- image: torch.Tensor
- """
- w, h = image.size
- w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
- image = image.resize((w, h))
- image = np.array(image).astype(np.float32) / 255.0
- image = image[None].transpose(0, 3, 1, 2)
- image = torch.from_numpy(image).contiguous()
- return 2.0 * image - 1.0
-
-
-class Engine:
- def __init__(self, engine_path):
- self.engine_path = engine_path
- self.engine = None
- self.context = None
- self.buffers = OrderedDict()
- self.tensors = OrderedDict()
-
- def __del__(self):
- [buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray)]
- del self.engine
- del self.context
- del self.buffers
- del self.tensors
-
- def build(
- self,
- onnx_path,
- fp16,
- input_profile=None,
- enable_preview=False,
- enable_all_tactics=False,
- timing_cache=None,
- workspace_size=0,
- ):
- logger.warning(f"Building TensorRT engine for {onnx_path}: {self.engine_path}")
- p = Profile()
- if input_profile:
- for name, dims in input_profile.items():
- assert len(dims) == 3
- p.add(name, min=dims[0], opt=dims[1], max=dims[2])
-
- config_kwargs = {}
-
- config_kwargs["preview_features"] = [trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]
- if enable_preview:
- # Faster dynamic shapes made optional since it increases engine build time.
- config_kwargs["preview_features"].append(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805)
- if workspace_size > 0:
- config_kwargs["memory_pool_limits"] = {trt.MemoryPoolType.WORKSPACE: workspace_size}
- if not enable_all_tactics:
- config_kwargs["tactic_sources"] = []
-
- engine = engine_from_network(
- network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),
- config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **config_kwargs),
- save_timing_cache=timing_cache,
- )
- save_engine(engine, path=self.engine_path)
-
- def load(self):
- logger.warning(f"Loading TensorRT engine: {self.engine_path}")
- self.engine = engine_from_bytes(bytes_from_path(self.engine_path))
-
- def activate(self):
- self.context = self.engine.create_execution_context()
-
- def allocate_buffers(self, shape_dict=None, device="cuda"):
- for idx in range(trt_util.get_bindings_per_profile(self.engine)):
- binding = self.engine[idx]
- if shape_dict and binding in shape_dict:
- shape = shape_dict[binding]
- else:
- shape = self.engine.get_binding_shape(binding)
- dtype = trt.nptype(self.engine.get_binding_dtype(binding))
- if self.engine.binding_is_input(binding):
- self.context.set_binding_shape(idx, shape)
- tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)
- self.tensors[binding] = tensor
- self.buffers[binding] = cuda.DeviceView(ptr=tensor.data_ptr(), shape=shape, dtype=dtype)
-
- def infer(self, feed_dict, stream):
- start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)
- # shallow copy of ordered dict
- device_buffers = copy(self.buffers)
- for name, buf in feed_dict.items():
- assert isinstance(buf, cuda.DeviceView)
- device_buffers[name] = buf
- bindings = [0] * start_binding + [buf.ptr for buf in device_buffers.values()]
- noerror = self.context.execute_async_v2(bindings=bindings, stream_handle=stream.ptr)
- if not noerror:
- raise ValueError("ERROR: inference failed.")
-
- return self.tensors
-
-
-class Optimizer:
- def __init__(self, onnx_graph):
- self.graph = gs.import_onnx(onnx_graph)
-
- def cleanup(self, return_onnx=False):
- self.graph.cleanup().toposort()
- if return_onnx:
- return gs.export_onnx(self.graph)
-
- def select_outputs(self, keep, names=None):
- self.graph.outputs = [self.graph.outputs[o] for o in keep]
- if names:
- for i, name in enumerate(names):
- self.graph.outputs[i].name = name
-
- def fold_constants(self, return_onnx=False):
- onnx_graph = fold_constants(gs.export_onnx(self.graph), allow_onnxruntime_shape_inference=True)
- self.graph = gs.import_onnx(onnx_graph)
- if return_onnx:
- return onnx_graph
-
- def infer_shapes(self, return_onnx=False):
- onnx_graph = gs.export_onnx(self.graph)
- if onnx_graph.ByteSize() > 2147483648:
- raise TypeError("ERROR: model size exceeds supported 2GB limit")
- else:
- onnx_graph = shape_inference.infer_shapes(onnx_graph)
-
- self.graph = gs.import_onnx(onnx_graph)
- if return_onnx:
- return onnx_graph
-
-
-class BaseModel:
- def __init__(self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77):
- self.model = model
- self.name = "SD Model"
- self.fp16 = fp16
- self.device = device
-
- self.min_batch = 1
- self.max_batch = max_batch_size
- self.min_image_shape = 256 # min image resolution: 256x256
- self.max_image_shape = 1024 # max image resolution: 1024x1024
- self.min_latent_shape = self.min_image_shape // 8
- self.max_latent_shape = self.max_image_shape // 8
-
- self.embedding_dim = embedding_dim
- self.text_maxlen = text_maxlen
-
- def get_model(self):
- return self.model
-
- def get_input_names(self):
- pass
-
- def get_output_names(self):
- pass
-
- def get_dynamic_axes(self):
- return None
-
- def get_sample_input(self, batch_size, image_height, image_width):
- pass
-
- def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
- return None
-
- def get_shape_dict(self, batch_size, image_height, image_width):
- return None
-
- def optimize(self, onnx_graph):
- opt = Optimizer(onnx_graph)
- opt.cleanup()
- opt.fold_constants()
- opt.infer_shapes()
- onnx_opt_graph = opt.cleanup(return_onnx=True)
- return onnx_opt_graph
-
- def check_dims(self, batch_size, image_height, image_width):
- assert batch_size >= self.min_batch and batch_size <= self.max_batch
- assert image_height % 8 == 0 or image_width % 8 == 0
- latent_height = image_height // 8
- latent_width = image_width // 8
- assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape
- assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape
- return (latent_height, latent_width)
-
- def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape):
- min_batch = batch_size if static_batch else self.min_batch
- max_batch = batch_size if static_batch else self.max_batch
- latent_height = image_height // 8
- latent_width = image_width // 8
- min_image_height = image_height if static_shape else self.min_image_shape
- max_image_height = image_height if static_shape else self.max_image_shape
- min_image_width = image_width if static_shape else self.min_image_shape
- max_image_width = image_width if static_shape else self.max_image_shape
- min_latent_height = latent_height if static_shape else self.min_latent_shape
- max_latent_height = latent_height if static_shape else self.max_latent_shape
- min_latent_width = latent_width if static_shape else self.min_latent_shape
- max_latent_width = latent_width if static_shape else self.max_latent_shape
- return (
- min_batch,
- max_batch,
- min_image_height,
- max_image_height,
- min_image_width,
- max_image_width,
- min_latent_height,
- max_latent_height,
- min_latent_width,
- max_latent_width,
- )
-
-
-def getOnnxPath(model_name, onnx_dir, opt=True):
- return os.path.join(onnx_dir, model_name + (".opt" if opt else "") + ".onnx")
-
-
-def getEnginePath(model_name, engine_dir):
- return os.path.join(engine_dir, model_name + ".plan")
-
-
-def build_engines(
- models: dict,
- engine_dir,
- onnx_dir,
- onnx_opset,
- opt_image_height,
- opt_image_width,
- opt_batch_size=1,
- force_engine_rebuild=False,
- static_batch=False,
- static_shape=True,
- enable_preview=False,
- enable_all_tactics=False,
- timing_cache=None,
- max_workspace_size=0,
-):
- built_engines = {}
- if not os.path.isdir(onnx_dir):
- os.makedirs(onnx_dir)
- if not os.path.isdir(engine_dir):
- os.makedirs(engine_dir)
-
- # Export models to ONNX
- for model_name, model_obj in models.items():
- engine_path = getEnginePath(model_name, engine_dir)
- if force_engine_rebuild or not os.path.exists(engine_path):
- logger.warning("Building Engines...")
- logger.warning("Engine build can take a while to complete")
- onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)
- onnx_opt_path = getOnnxPath(model_name, onnx_dir)
- if force_engine_rebuild or not os.path.exists(onnx_opt_path):
- if force_engine_rebuild or not os.path.exists(onnx_path):
- logger.warning(f"Exporting model: {onnx_path}")
- model = model_obj.get_model()
- with torch.inference_mode(), torch.autocast("cuda"):
- inputs = model_obj.get_sample_input(opt_batch_size, opt_image_height, opt_image_width)
- torch.onnx.export(
- model,
- inputs,
- onnx_path,
- export_params=True,
- opset_version=onnx_opset,
- do_constant_folding=True,
- input_names=model_obj.get_input_names(),
- output_names=model_obj.get_output_names(),
- dynamic_axes=model_obj.get_dynamic_axes(),
- )
- del model
- torch.cuda.empty_cache()
- gc.collect()
- else:
- logger.warning(f"Found cached model: {onnx_path}")
-
- # Optimize onnx
- if force_engine_rebuild or not os.path.exists(onnx_opt_path):
- logger.warning(f"Generating optimizing model: {onnx_opt_path}")
- onnx_opt_graph = model_obj.optimize(onnx.load(onnx_path))
- onnx.save(onnx_opt_graph, onnx_opt_path)
- else:
- logger.warning(f"Found cached optimized model: {onnx_opt_path} ")
-
- # Build TensorRT engines
- for model_name, model_obj in models.items():
- engine_path = getEnginePath(model_name, engine_dir)
- engine = Engine(engine_path)
- onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)
- onnx_opt_path = getOnnxPath(model_name, onnx_dir)
-
- if force_engine_rebuild or not os.path.exists(engine.engine_path):
- engine.build(
- onnx_opt_path,
- fp16=True,
- input_profile=model_obj.get_input_profile(
- opt_batch_size,
- opt_image_height,
- opt_image_width,
- static_batch=static_batch,
- static_shape=static_shape,
- ),
- enable_preview=enable_preview,
- timing_cache=timing_cache,
- workspace_size=max_workspace_size,
- )
- built_engines[model_name] = engine
-
- # Load and activate TensorRT engines
- for model_name, model_obj in models.items():
- engine = built_engines[model_name]
- engine.load()
- engine.activate()
-
- return built_engines
-
-
-def runEngine(engine, feed_dict, stream):
- return engine.infer(feed_dict, stream)
-
-
-class CLIP(BaseModel):
- def __init__(self, model, device, max_batch_size, embedding_dim):
- super(CLIP, self).__init__(
- model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
- )
- self.name = "CLIP"
-
- def get_input_names(self):
- return ["input_ids"]
-
- def get_output_names(self):
- return ["text_embeddings", "pooler_output"]
-
- def get_dynamic_axes(self):
- return {"input_ids": {0: "B"}, "text_embeddings": {0: "B"}}
-
- def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
- self.check_dims(batch_size, image_height, image_width)
- min_batch, max_batch, _, _, _, _, _, _, _, _ = self.get_minmax_dims(
- batch_size, image_height, image_width, static_batch, static_shape
- )
- return {
- "input_ids": [(min_batch, self.text_maxlen), (batch_size, self.text_maxlen), (max_batch, self.text_maxlen)]
- }
-
- def get_shape_dict(self, batch_size, image_height, image_width):
- self.check_dims(batch_size, image_height, image_width)
- return {
- "input_ids": (batch_size, self.text_maxlen),
- "text_embeddings": (batch_size, self.text_maxlen, self.embedding_dim),
- }
-
- def get_sample_input(self, batch_size, image_height, image_width):
- self.check_dims(batch_size, image_height, image_width)
- return torch.zeros(batch_size, self.text_maxlen, dtype=torch.int32, device=self.device)
-
- def optimize(self, onnx_graph):
- opt = Optimizer(onnx_graph)
- opt.select_outputs([0]) # delete graph output#1
- opt.cleanup()
- opt.fold_constants()
- opt.infer_shapes()
- opt.select_outputs([0], names=["text_embeddings"]) # rename network output
- opt_onnx_graph = opt.cleanup(return_onnx=True)
- return opt_onnx_graph
-
-
-def make_CLIP(model, device, max_batch_size, embedding_dim, inpaint=False):
- return CLIP(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
-
-
-class UNet(BaseModel):
- def __init__(
- self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77, unet_dim=4
- ):
- super(UNet, self).__init__(
- model=model,
- fp16=fp16,
- device=device,
- max_batch_size=max_batch_size,
- embedding_dim=embedding_dim,
- text_maxlen=text_maxlen,
- )
- self.unet_dim = unet_dim
- self.name = "UNet"
-
- def get_input_names(self):
- return ["sample", "timestep", "encoder_hidden_states"]
-
- def get_output_names(self):
- return ["latent"]
-
- def get_dynamic_axes(self):
- return {
- "sample": {0: "2B", 2: "H", 3: "W"},
- "encoder_hidden_states": {0: "2B"},
- "latent": {0: "2B", 2: "H", 3: "W"},
- }
-
- def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
- latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
- (
- min_batch,
- max_batch,
- _,
- _,
- _,
- _,
- min_latent_height,
- max_latent_height,
- min_latent_width,
- max_latent_width,
- ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
- return {
- "sample": [
- (2 * min_batch, self.unet_dim, min_latent_height, min_latent_width),
- (2 * batch_size, self.unet_dim, latent_height, latent_width),
- (2 * max_batch, self.unet_dim, max_latent_height, max_latent_width),
- ],
- "encoder_hidden_states": [
- (2 * min_batch, self.text_maxlen, self.embedding_dim),
- (2 * batch_size, self.text_maxlen, self.embedding_dim),
- (2 * max_batch, self.text_maxlen, self.embedding_dim),
- ],
- }
-
- def get_shape_dict(self, batch_size, image_height, image_width):
- latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
- return {
- "sample": (2 * batch_size, self.unet_dim, latent_height, latent_width),
- "encoder_hidden_states": (2 * batch_size, self.text_maxlen, self.embedding_dim),
- "latent": (2 * batch_size, 4, latent_height, latent_width),
- }
-
- def get_sample_input(self, batch_size, image_height, image_width):
- latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
- dtype = torch.float16 if self.fp16 else torch.float32
- return (
- torch.randn(
- 2 * batch_size, self.unet_dim, latent_height, latent_width, dtype=torch.float32, device=self.device
- ),
- torch.tensor([1.0], dtype=torch.float32, device=self.device),
- torch.randn(2 * batch_size, self.text_maxlen, self.embedding_dim, dtype=dtype, device=self.device),
- )
-
-
-def make_UNet(model, device, max_batch_size, embedding_dim, inpaint=False, unet_dim=4):
- return UNet(
- model,
- fp16=True,
- device=device,
- max_batch_size=max_batch_size,
- embedding_dim=embedding_dim,
- unet_dim=unet_dim,
- )
-
-
-class VAE(BaseModel):
- def __init__(self, model, device, max_batch_size, embedding_dim):
- super(VAE, self).__init__(
- model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
- )
- self.name = "VAE decoder"
-
- def get_input_names(self):
- return ["latent"]
-
- def get_output_names(self):
- return ["images"]
-
- def get_dynamic_axes(self):
- return {"latent": {0: "B", 2: "H", 3: "W"}, "images": {0: "B", 2: "8H", 3: "8W"}}
-
- def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
- latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
- (
- min_batch,
- max_batch,
- _,
- _,
- _,
- _,
- min_latent_height,
- max_latent_height,
- min_latent_width,
- max_latent_width,
- ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
- return {
- "latent": [
- (min_batch, 4, min_latent_height, min_latent_width),
- (batch_size, 4, latent_height, latent_width),
- (max_batch, 4, max_latent_height, max_latent_width),
- ]
- }
-
- def get_shape_dict(self, batch_size, image_height, image_width):
- latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
- return {
- "latent": (batch_size, 4, latent_height, latent_width),
- "images": (batch_size, 3, image_height, image_width),
- }
-
- def get_sample_input(self, batch_size, image_height, image_width):
- latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
- return torch.randn(batch_size, 4, latent_height, latent_width, dtype=torch.float32, device=self.device)
-
-
-def make_VAE(model, device, max_batch_size, embedding_dim, inpaint=False):
- return VAE(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
-
-
-class TorchVAEEncoder(torch.nn.Module):
- def __init__(self, model):
- super().__init__()
- self.vae_encoder = model
-
- def forward(self, x):
- return self.vae_encoder.encode(x).latent_dist.sample()
-
-
-class VAEEncoder(BaseModel):
- def __init__(self, model, device, max_batch_size, embedding_dim):
- super(VAEEncoder, self).__init__(
- model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
- )
- self.name = "VAE encoder"
-
- def get_model(self):
- vae_encoder = TorchVAEEncoder(self.model)
- return vae_encoder
-
- def get_input_names(self):
- return ["images"]
-
- def get_output_names(self):
- return ["latent"]
-
- def get_dynamic_axes(self):
- return {"images": {0: "B", 2: "8H", 3: "8W"}, "latent": {0: "B", 2: "H", 3: "W"}}
-
- def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
- assert batch_size >= self.min_batch and batch_size <= self.max_batch
- min_batch = batch_size if static_batch else self.min_batch
- max_batch = batch_size if static_batch else self.max_batch
- self.check_dims(batch_size, image_height, image_width)
- (
- min_batch,
- max_batch,
- min_image_height,
- max_image_height,
- min_image_width,
- max_image_width,
- _,
- _,
- _,
- _,
- ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
-
- return {
- "images": [
- (min_batch, 3, min_image_height, min_image_width),
- (batch_size, 3, image_height, image_width),
- (max_batch, 3, max_image_height, max_image_width),
- ]
- }
-
- def get_shape_dict(self, batch_size, image_height, image_width):
- latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
- return {
- "images": (batch_size, 3, image_height, image_width),
- "latent": (batch_size, 4, latent_height, latent_width),
- }
-
- def get_sample_input(self, batch_size, image_height, image_width):
- self.check_dims(batch_size, image_height, image_width)
- return torch.randn(batch_size, 3, image_height, image_width, dtype=torch.float32, device=self.device)
-
-
-def make_VAEEncoder(model, device, max_batch_size, embedding_dim, inpaint=False):
- return VAEEncoder(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
-
-
-class TensorRTStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
- r"""
- Pipeline for inpainting using TensorRT accelerated Stable Diffusion.
-
- This model inherits from [`StableDiffusionInpaintPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
-
- Args:
- vae ([`AutoencoderKL`]):
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
- text_encoder ([`CLIPTextModel`]):
- Frozen text-encoder. Stable Diffusion uses the text portion of
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
- tokenizer (`CLIPTokenizer`):
- Tokenizer of class
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
- scheduler ([`SchedulerMixin`]):
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
- safety_checker ([`StableDiffusionSafetyChecker`]):
- Classification module that estimates whether generated images could be considered offensive or harmful.
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
- feature_extractor ([`CLIPFeatureExtractor`]):
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
- """
-
- def __init__(
- self,
- vae: AutoencoderKL,
- text_encoder: CLIPTextModel,
- tokenizer: CLIPTokenizer,
- unet: UNet2DConditionModel,
- scheduler: DDIMScheduler,
- safety_checker: StableDiffusionSafetyChecker,
- feature_extractor: CLIPFeatureExtractor,
- requires_safety_checker: bool = True,
- stages=["clip", "unet", "vae", "vae_encoder"],
- image_height: int = 512,
- image_width: int = 512,
- max_batch_size: int = 16,
- # ONNX export parameters
- onnx_opset: int = 17,
- onnx_dir: str = "onnx",
- # TensorRT engine build parameters
- engine_dir: str = "engine",
- build_preview_features: bool = True,
- force_engine_rebuild: bool = False,
- timing_cache: str = "timing_cache",
- ):
- super().__init__(
- vae, text_encoder, tokenizer, unet, scheduler, safety_checker, feature_extractor, requires_safety_checker
- )
-
- self.vae.forward = self.vae.decode
-
- self.stages = stages
- self.image_height, self.image_width = image_height, image_width
- self.inpaint = True
- self.onnx_opset = onnx_opset
- self.onnx_dir = onnx_dir
- self.engine_dir = engine_dir
- self.force_engine_rebuild = force_engine_rebuild
- self.timing_cache = timing_cache
- self.build_static_batch = False
- self.build_dynamic_shape = False
- self.build_preview_features = build_preview_features
-
- self.max_batch_size = max_batch_size
- # TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.
- if self.build_dynamic_shape or self.image_height > 512 or self.image_width > 512:
- self.max_batch_size = 4
-
- self.stream = None # loaded in loadResources()
- self.models = {} # loaded in __loadModels()
- self.engine = {} # loaded in build_engines()
-
- def __loadModels(self):
- # Load pipeline models
- self.embedding_dim = self.text_encoder.config.hidden_size
- models_args = {
- "device": self.torch_device,
- "max_batch_size": self.max_batch_size,
- "embedding_dim": self.embedding_dim,
- "inpaint": self.inpaint,
- }
- if "clip" in self.stages:
- self.models["clip"] = make_CLIP(self.text_encoder, **models_args)
- if "unet" in self.stages:
- self.models["unet"] = make_UNet(self.unet, **models_args, unet_dim=self.unet.config.in_channels)
- if "vae" in self.stages:
- self.models["vae"] = make_VAE(self.vae, **models_args)
- if "vae_encoder" in self.stages:
- self.models["vae_encoder"] = make_VAEEncoder(self.vae, **models_args)
-
- @classmethod
- def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
- cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
- resume_download = kwargs.pop("resume_download", False)
- proxies = kwargs.pop("proxies", None)
- local_files_only = kwargs.pop("local_files_only", False)
- use_auth_token = kwargs.pop("use_auth_token", None)
- revision = kwargs.pop("revision", None)
-
- cls.cached_folder = (
- pretrained_model_name_or_path
- if os.path.isdir(pretrained_model_name_or_path)
- else snapshot_download(
- pretrained_model_name_or_path,
- cache_dir=cache_dir,
- resume_download=resume_download,
- proxies=proxies,
- local_files_only=local_files_only,
- use_auth_token=use_auth_token,
- revision=revision,
- )
- )
-
- def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings: bool = False):
- super().to(torch_device, silence_dtype_warnings=silence_dtype_warnings)
-
- self.onnx_dir = os.path.join(self.cached_folder, self.onnx_dir)
- self.engine_dir = os.path.join(self.cached_folder, self.engine_dir)
- self.timing_cache = os.path.join(self.cached_folder, self.timing_cache)
-
- # set device
- self.torch_device = self._execution_device
- logger.warning(f"Running inference on device: {self.torch_device}")
-
- # load models
- self.__loadModels()
-
- # build engines
- self.engine = build_engines(
- self.models,
- self.engine_dir,
- self.onnx_dir,
- self.onnx_opset,
- opt_image_height=self.image_height,
- opt_image_width=self.image_width,
- force_engine_rebuild=self.force_engine_rebuild,
- static_batch=self.build_static_batch,
- static_shape=not self.build_dynamic_shape,
- enable_preview=self.build_preview_features,
- timing_cache=self.timing_cache,
- )
-
- return self
-
- def __initialize_timesteps(self, timesteps, strength):
- self.scheduler.set_timesteps(timesteps)
- offset = self.scheduler.steps_offset if hasattr(self.scheduler, "steps_offset") else 0
- init_timestep = int(timesteps * strength) + offset
- init_timestep = min(init_timestep, timesteps)
- t_start = max(timesteps - init_timestep + offset, 0)
- timesteps = self.scheduler.timesteps[t_start:].to(self.torch_device)
- return timesteps, t_start
-
- def __preprocess_images(self, batch_size, images=()):
- init_images = []
- for image in images:
- image = image.to(self.torch_device).float()
- image = image.repeat(batch_size, 1, 1, 1)
- init_images.append(image)
- return tuple(init_images)
-
- def __encode_image(self, init_image):
- init_latents = runEngine(self.engine["vae_encoder"], {"images": device_view(init_image)}, self.stream)[
- "latent"
- ]
- init_latents = 0.18215 * init_latents
- return init_latents
-
- def __encode_prompt(self, prompt, negative_prompt):
- r"""
- Encodes the prompt into text encoder hidden states.
-
- Args:
- prompt (`str` or `List[str]`, *optional*):
- prompt to be encoded
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
- `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
- Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
- """
- # Tokenize prompt
- text_input_ids = (
- self.tokenizer(
- prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- .input_ids.type(torch.int32)
- .to(self.torch_device)
- )
-
- text_input_ids_inp = device_view(text_input_ids)
- # NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt
- text_embeddings = runEngine(self.engine["clip"], {"input_ids": text_input_ids_inp}, self.stream)[
- "text_embeddings"
- ].clone()
-
- # Tokenize negative prompt
- uncond_input_ids = (
- self.tokenizer(
- negative_prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- .input_ids.type(torch.int32)
- .to(self.torch_device)
- )
- uncond_input_ids_inp = device_view(uncond_input_ids)
- uncond_embeddings = runEngine(self.engine["clip"], {"input_ids": uncond_input_ids_inp}, self.stream)[
- "text_embeddings"
- ]
-
- # Concatenate the unconditional and text embeddings into a single batch to avoid doing two forward passes for classifier free guidance
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings]).to(dtype=torch.float16)
-
- return text_embeddings
-
- def __denoise_latent(
- self, latents, text_embeddings, timesteps=None, step_offset=0, mask=None, masked_image_latents=None
- ):
- if not isinstance(timesteps, torch.Tensor):
- timesteps = self.scheduler.timesteps
- for step_index, timestep in enumerate(timesteps):
- # Expand the latents if we are doing classifier free guidance
- latent_model_input = torch.cat([latents] * 2)
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep)
- if isinstance(mask, torch.Tensor):
- latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
-
- # Predict the noise residual
- timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep
-
- sample_inp = device_view(latent_model_input)
- timestep_inp = device_view(timestep_float)
- embeddings_inp = device_view(text_embeddings)
- noise_pred = runEngine(
- self.engine["unet"],
- {"sample": sample_inp, "timestep": timestep_inp, "encoder_hidden_states": embeddings_inp},
- self.stream,
- )["latent"]
-
- # Perform guidance
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
-
- latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample
-
- latents = 1.0 / 0.18215 * latents
- return latents
-
- def __decode_latent(self, latents):
- images = runEngine(self.engine["vae"], {"latent": device_view(latents)}, self.stream)["images"]
- images = (images / 2 + 0.5).clamp(0, 1)
- return images.cpu().permute(0, 2, 3, 1).float().numpy()
-
- def __loadResources(self, image_height, image_width, batch_size):
- self.stream = cuda.Stream()
-
- # Allocate buffers for TensorRT engine bindings
- for model_name, obj in self.models.items():
- self.engine[model_name].allocate_buffers(
- shape_dict=obj.get_shape_dict(batch_size, image_height, image_width), device=self.torch_device
- )
-
- @torch.no_grad()
- def __call__(
- self,
- prompt: Union[str, List[str]] = None,
- image: Union[torch.FloatTensor, PIL.Image.Image] = None,
- mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
- strength: float = 0.75,
- num_inference_steps: int = 50,
- guidance_scale: float = 7.5,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
- ):
- r"""
- Function invoked when calling the pipeline for generation.
-
- Args:
- prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
- instead.
- image (`PIL.Image.Image`):
- `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
- be masked out with `mask_image` and repainted according to `prompt`.
- mask_image (`PIL.Image.Image`):
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
- repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
- to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
- instead of 3, so the expected shape would be `(B, H, W, 1)`.
- strength (`float`, *optional*, defaults to 0.8):
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
- will be used as a starting point, adding more noise to it the larger the `strength`. The number of
- denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
- be maximum and the denoising process will run for the full number of iterations specified in
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
- num_inference_steps (`int`, *optional*, defaults to 50):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- guidance_scale (`float`, *optional*, defaults to 7.5):
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
- usually at the expense of lower image quality.
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
- `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
- Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
- to make generation deterministic.
-
- """
- self.generator = generator
- self.denoising_steps = num_inference_steps
- self.guidance_scale = guidance_scale
-
- # Pre-compute latent input scales and linear multistep coefficients
- self.scheduler.set_timesteps(self.denoising_steps, device=self.torch_device)
-
- # Define call parameters
- if prompt is not None and isinstance(prompt, str):
- batch_size = 1
- prompt = [prompt]
- elif prompt is not None and isinstance(prompt, list):
- batch_size = len(prompt)
- else:
- raise ValueError(f"Expected prompt to be of type list or str but got {type(prompt)}")
-
- if negative_prompt is None:
- negative_prompt = [""] * batch_size
-
- if negative_prompt is not None and isinstance(negative_prompt, str):
- negative_prompt = [negative_prompt]
-
- assert len(prompt) == len(negative_prompt)
-
- if batch_size > self.max_batch_size:
- raise ValueError(
- f"Batch size {len(prompt)} is larger than allowed {self.max_batch_size}. If dynamic shape is used, then maximum batch size is 4"
- )
-
- # Validate image dimensions
- mask_width, mask_height = mask_image.size
- if mask_height != self.image_height or mask_width != self.image_width:
- raise ValueError(
- f"Input image height and width {self.image_height} and {self.image_width} are not equal to "
- f"the respective dimensions of the mask image {mask_height} and {mask_width}"
- )
-
- # load resources
- self.__loadResources(self.image_height, self.image_width, batch_size)
-
- with torch.inference_mode(), torch.autocast("cuda"), trt.Runtime(TRT_LOGGER):
- # Spatial dimensions of latent tensor
- latent_height = self.image_height // 8
- latent_width = self.image_width // 8
-
- # Pre-initialize latents
- num_channels_latents = self.vae.config.latent_channels
- latents = self.prepare_latents(
- batch_size,
- num_channels_latents,
- self.image_height,
- self.image_width,
- torch.float32,
- self.torch_device,
- generator,
- )
-
- # Pre-process input images
- mask, masked_image = self.__preprocess_images(batch_size, prepare_mask_and_masked_image(image, mask_image))
- # print(mask)
- mask = torch.nn.functional.interpolate(mask, size=(latent_height, latent_width))
- mask = torch.cat([mask] * 2)
-
- # Initialize timesteps
- timesteps, t_start = self.__initialize_timesteps(self.denoising_steps, strength)
-
- # VAE encode masked image
- masked_latents = self.__encode_image(masked_image)
- masked_latents = torch.cat([masked_latents] * 2)
-
- # CLIP text encoder
- text_embeddings = self.__encode_prompt(prompt, negative_prompt)
-
- # UNet denoiser
- latents = self.__denoise_latent(
- latents,
- text_embeddings,
- timesteps=timesteps,
- step_offset=t_start,
- mask=mask,
- masked_image_latents=masked_latents,
- )
-
- # VAE decode latent
- images = self.__decode_latent(latents)
-
- images = self.numpy_to_pil(images)
- return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=None)
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py
deleted file mode 100644
index 56836f0b6d77b8daa25e956101694863e418339f..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# This file is autogenerated by the command `make fix-copies`, do not edit.
-from ..utils import DummyObject, requires_backends
-
-
-class StableDiffusionKDiffusionPipeline(metaclass=DummyObject):
- _backends = ["torch", "transformers", "k_diffusion"]
-
- def __init__(self, *args, **kwargs):
- requires_backends(self, ["torch", "transformers", "k_diffusion"])
-
- @classmethod
- def from_config(cls, *args, **kwargs):
- requires_backends(cls, ["torch", "transformers", "k_diffusion"])
-
- @classmethod
- def from_pretrained(cls, *args, **kwargs):
- requires_backends(cls, ["torch", "transformers", "k_diffusion"])
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py
deleted file mode 100644
index 1905af6c695fa6e4e87bac2e6c462a8b9439528c..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py
+++ /dev/null
@@ -1,424 +0,0 @@
-# coding=utf-8
-# Copyright 2023 HuggingFace Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import gc
-import random
-import unittest
-
-import numpy as np
-import torch
-from PIL import Image
-from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
-
-from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNet2DConditionModel
-from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
-from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
-
-
-enable_full_determinism()
-
-
-class StableDiffusionUpscalePipelineFastTests(unittest.TestCase):
- def tearDown(self):
- # clean up the VRAM after each test
- super().tearDown()
- gc.collect()
- torch.cuda.empty_cache()
-
- @property
- def dummy_image(self):
- batch_size = 1
- num_channels = 3
- sizes = (32, 32)
-
- image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device)
- return image
-
- @property
- def dummy_cond_unet_upscale(self):
- torch.manual_seed(0)
- model = UNet2DConditionModel(
- block_out_channels=(32, 32, 64),
- layers_per_block=2,
- sample_size=32,
- in_channels=7,
- out_channels=4,
- down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D"),
- up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"),
- cross_attention_dim=32,
- # SD2-specific config below
- attention_head_dim=8,
- use_linear_projection=True,
- only_cross_attention=(True, True, False),
- num_class_embeds=100,
- )
- return model
-
- @property
- def dummy_vae(self):
- torch.manual_seed(0)
- model = AutoencoderKL(
- block_out_channels=[32, 32, 64],
- in_channels=3,
- out_channels=3,
- down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"],
- up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
- latent_channels=4,
- )
- return model
-
- @property
- def dummy_text_encoder(self):
- torch.manual_seed(0)
- config = CLIPTextConfig(
- bos_token_id=0,
- eos_token_id=2,
- hidden_size=32,
- intermediate_size=37,
- layer_norm_eps=1e-05,
- num_attention_heads=4,
- num_hidden_layers=5,
- pad_token_id=1,
- vocab_size=1000,
- # SD2-specific config below
- hidden_act="gelu",
- projection_dim=512,
- )
- return CLIPTextModel(config)
-
- def test_stable_diffusion_upscale(self):
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
- unet = self.dummy_cond_unet_upscale
- low_res_scheduler = DDPMScheduler()
- scheduler = DDIMScheduler(prediction_type="v_prediction")
- vae = self.dummy_vae
- text_encoder = self.dummy_text_encoder
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
-
- image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0]
- low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64))
-
- # make sure here that pndm scheduler skips prk
- sd_pipe = StableDiffusionUpscalePipeline(
- unet=unet,
- low_res_scheduler=low_res_scheduler,
- scheduler=scheduler,
- vae=vae,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- max_noise_level=350,
- )
- sd_pipe = sd_pipe.to(device)
- sd_pipe.set_progress_bar_config(disable=None)
-
- prompt = "A painting of a squirrel eating a burger"
- generator = torch.Generator(device=device).manual_seed(0)
- output = sd_pipe(
- [prompt],
- image=low_res_image,
- generator=generator,
- guidance_scale=6.0,
- noise_level=20,
- num_inference_steps=2,
- output_type="np",
- )
-
- image = output.images
-
- generator = torch.Generator(device=device).manual_seed(0)
- image_from_tuple = sd_pipe(
- [prompt],
- image=low_res_image,
- generator=generator,
- guidance_scale=6.0,
- noise_level=20,
- num_inference_steps=2,
- output_type="np",
- return_dict=False,
- )[0]
-
- image_slice = image[0, -3:, -3:, -1]
- image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
-
- expected_height_width = low_res_image.size[0] * 4
- assert image.shape == (1, expected_height_width, expected_height_width, 3)
- expected_slice = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661])
-
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
- assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
-
- def test_stable_diffusion_upscale_batch(self):
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
- unet = self.dummy_cond_unet_upscale
- low_res_scheduler = DDPMScheduler()
- scheduler = DDIMScheduler(prediction_type="v_prediction")
- vae = self.dummy_vae
- text_encoder = self.dummy_text_encoder
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
-
- image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0]
- low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64))
-
- # make sure here that pndm scheduler skips prk
- sd_pipe = StableDiffusionUpscalePipeline(
- unet=unet,
- low_res_scheduler=low_res_scheduler,
- scheduler=scheduler,
- vae=vae,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- max_noise_level=350,
- )
- sd_pipe = sd_pipe.to(device)
- sd_pipe.set_progress_bar_config(disable=None)
-
- prompt = "A painting of a squirrel eating a burger"
- output = sd_pipe(
- 2 * [prompt],
- image=2 * [low_res_image],
- guidance_scale=6.0,
- noise_level=20,
- num_inference_steps=2,
- output_type="np",
- )
- image = output.images
- assert image.shape[0] == 2
-
- generator = torch.Generator(device=device).manual_seed(0)
- output = sd_pipe(
- [prompt],
- image=low_res_image,
- generator=generator,
- num_images_per_prompt=2,
- guidance_scale=6.0,
- noise_level=20,
- num_inference_steps=2,
- output_type="np",
- )
- image = output.images
- assert image.shape[0] == 2
-
- def test_stable_diffusion_upscale_prompt_embeds(self):
- device = "cpu" # ensure determinism for the device-dependent torch.Generator
- unet = self.dummy_cond_unet_upscale
- low_res_scheduler = DDPMScheduler()
- scheduler = DDIMScheduler(prediction_type="v_prediction")
- vae = self.dummy_vae
- text_encoder = self.dummy_text_encoder
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
-
- image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0]
- low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64))
-
- # make sure here that pndm scheduler skips prk
- sd_pipe = StableDiffusionUpscalePipeline(
- unet=unet,
- low_res_scheduler=low_res_scheduler,
- scheduler=scheduler,
- vae=vae,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- max_noise_level=350,
- )
- sd_pipe = sd_pipe.to(device)
- sd_pipe.set_progress_bar_config(disable=None)
-
- prompt = "A painting of a squirrel eating a burger"
- generator = torch.Generator(device=device).manual_seed(0)
- output = sd_pipe(
- [prompt],
- image=low_res_image,
- generator=generator,
- guidance_scale=6.0,
- noise_level=20,
- num_inference_steps=2,
- output_type="np",
- )
-
- image = output.images
-
- generator = torch.Generator(device=device).manual_seed(0)
- prompt_embeds = sd_pipe._encode_prompt(prompt, device, 1, False)
- image_from_prompt_embeds = sd_pipe(
- prompt_embeds=prompt_embeds,
- image=[low_res_image],
- generator=generator,
- guidance_scale=6.0,
- noise_level=20,
- num_inference_steps=2,
- output_type="np",
- return_dict=False,
- )[0]
-
- image_slice = image[0, -3:, -3:, -1]
- image_from_prompt_embeds_slice = image_from_prompt_embeds[0, -3:, -3:, -1]
-
- expected_height_width = low_res_image.size[0] * 4
- assert image.shape == (1, expected_height_width, expected_height_width, 3)
- expected_slice = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661])
-
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
- assert np.abs(image_from_prompt_embeds_slice.flatten() - expected_slice).max() < 1e-2
-
- @unittest.skipIf(torch_device != "cuda", "This test requires a GPU")
- def test_stable_diffusion_upscale_fp16(self):
- """Test that stable diffusion upscale works with fp16"""
- unet = self.dummy_cond_unet_upscale
- low_res_scheduler = DDPMScheduler()
- scheduler = DDIMScheduler(prediction_type="v_prediction")
- vae = self.dummy_vae
- text_encoder = self.dummy_text_encoder
- tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
-
- image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0]
- low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64))
-
- # put models in fp16, except vae as it overflows in fp16
- unet = unet.half()
- text_encoder = text_encoder.half()
-
- # make sure here that pndm scheduler skips prk
- sd_pipe = StableDiffusionUpscalePipeline(
- unet=unet,
- low_res_scheduler=low_res_scheduler,
- scheduler=scheduler,
- vae=vae,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- max_noise_level=350,
- )
- sd_pipe = sd_pipe.to(torch_device)
- sd_pipe.set_progress_bar_config(disable=None)
-
- prompt = "A painting of a squirrel eating a burger"
- generator = torch.manual_seed(0)
- image = sd_pipe(
- [prompt],
- image=low_res_image,
- generator=generator,
- num_inference_steps=2,
- output_type="np",
- ).images
-
- expected_height_width = low_res_image.size[0] * 4
- assert image.shape == (1, expected_height_width, expected_height_width, 3)
-
-
-@slow
-@require_torch_gpu
-class StableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase):
- def tearDown(self):
- # clean up the VRAM after each test
- super().tearDown()
- gc.collect()
- torch.cuda.empty_cache()
-
- def test_stable_diffusion_upscale_pipeline(self):
- image = load_image(
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
- "/sd2-upscale/low_res_cat.png"
- )
- expected_image = load_numpy(
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
- "/upsampled_cat.npy"
- )
-
- model_id = "stabilityai/stable-diffusion-x4-upscaler"
- pipe = StableDiffusionUpscalePipeline.from_pretrained(model_id)
- pipe.to(torch_device)
- pipe.set_progress_bar_config(disable=None)
- pipe.enable_attention_slicing()
-
- prompt = "a cat sitting on a park bench"
-
- generator = torch.manual_seed(0)
- output = pipe(
- prompt=prompt,
- image=image,
- generator=generator,
- output_type="np",
- )
- image = output.images[0]
-
- assert image.shape == (512, 512, 3)
- assert np.abs(expected_image - image).max() < 1e-3
-
- def test_stable_diffusion_upscale_pipeline_fp16(self):
- image = load_image(
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
- "/sd2-upscale/low_res_cat.png"
- )
- expected_image = load_numpy(
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
- "/upsampled_cat_fp16.npy"
- )
-
- model_id = "stabilityai/stable-diffusion-x4-upscaler"
- pipe = StableDiffusionUpscalePipeline.from_pretrained(
- model_id,
- torch_dtype=torch.float16,
- )
- pipe.to(torch_device)
- pipe.set_progress_bar_config(disable=None)
- pipe.enable_attention_slicing()
-
- prompt = "a cat sitting on a park bench"
-
- generator = torch.manual_seed(0)
- output = pipe(
- prompt=prompt,
- image=image,
- generator=generator,
- output_type="np",
- )
- image = output.images[0]
-
- assert image.shape == (512, 512, 3)
- assert np.abs(expected_image - image).max() < 5e-1
-
- def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
- torch.cuda.empty_cache()
- torch.cuda.reset_max_memory_allocated()
- torch.cuda.reset_peak_memory_stats()
-
- image = load_image(
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
- "/sd2-upscale/low_res_cat.png"
- )
-
- model_id = "stabilityai/stable-diffusion-x4-upscaler"
- pipe = StableDiffusionUpscalePipeline.from_pretrained(
- model_id,
- torch_dtype=torch.float16,
- )
- pipe.to(torch_device)
- pipe.set_progress_bar_config(disable=None)
- pipe.enable_attention_slicing(1)
- pipe.enable_sequential_cpu_offload()
-
- prompt = "a cat sitting on a park bench"
-
- generator = torch.manual_seed(0)
- _ = pipe(
- prompt=prompt,
- image=image,
- generator=generator,
- num_inference_steps=5,
- output_type="np",
- )
-
- mem_bytes = torch.cuda.max_memory_allocated()
- # make sure that less than 2.9 GB is allocated
- assert mem_bytes < 2.9 * 10**9
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py
deleted file mode 100644
index 4b87b2ce58b2efc2461046df897038fdd5128cee..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py
+++ /dev/null
@@ -1,42 +0,0 @@
-_base_ = './faster_rcnn_r50_fpn_1x_coco.py'
-model = dict(
- pretrained='open-mmlab://detectron2/resnet50_caffe',
- backbone=dict(
- norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe'))
-# use caffe img_norm
-img_norm_cfg = dict(
- mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', with_bbox=True),
- dict(
- type='Resize',
- img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
- (1333, 768), (1333, 800)],
- multiscale_mode='value',
- keep_ratio=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(1333, 800),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- train=dict(pipeline=train_pipeline),
- val=dict(pipeline=test_pipeline),
- test=dict(pipeline=test_pipeline))
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/atss_assigner.py b/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/atss_assigner.py
deleted file mode 100644
index d4fe9d0e3c8704bd780d493eff20a5505dbe9580..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/atss_assigner.py
+++ /dev/null
@@ -1,178 +0,0 @@
-import torch
-
-from ..builder import BBOX_ASSIGNERS
-from ..iou_calculators import build_iou_calculator
-from .assign_result import AssignResult
-from .base_assigner import BaseAssigner
-
-
-@BBOX_ASSIGNERS.register_module()
-class ATSSAssigner(BaseAssigner):
- """Assign a corresponding gt bbox or background to each bbox.
-
- Each proposals will be assigned with `0` or a positive integer
- indicating the ground truth index.
-
- - 0: negative sample, no assigned gt
- - positive integer: positive sample, index (1-based) of assigned gt
-
- Args:
- topk (float): number of bbox selected in each level
- """
-
- def __init__(self,
- topk,
- iou_calculator=dict(type='BboxOverlaps2D'),
- ignore_iof_thr=-1):
- self.topk = topk
- self.iou_calculator = build_iou_calculator(iou_calculator)
- self.ignore_iof_thr = ignore_iof_thr
-
- # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py
-
- def assign(self,
- bboxes,
- num_level_bboxes,
- gt_bboxes,
- gt_bboxes_ignore=None,
- gt_labels=None):
- """Assign gt to bboxes.
-
- The assignment is done in following steps
-
- 1. compute iou between all bbox (bbox of all pyramid levels) and gt
- 2. compute center distance between all bbox and gt
- 3. on each pyramid level, for each gt, select k bbox whose center
- are closest to the gt center, so we total select k*l bbox as
- candidates for each gt
- 4. get corresponding iou for the these candidates, and compute the
- mean and std, set mean + std as the iou threshold
- 5. select these candidates whose iou are greater than or equal to
- the threshold as positive
- 6. limit the positive sample's center in gt
-
-
- Args:
- bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
- num_level_bboxes (List): num of bboxes in each level
- gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
- gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
- labelled as `ignored`, e.g., crowd boxes in COCO.
- gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
-
- Returns:
- :obj:`AssignResult`: The assign result.
- """
- INF = 100000000
- bboxes = bboxes[:, :4]
- num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0)
-
- # compute iou between all bbox and gt
- overlaps = self.iou_calculator(bboxes, gt_bboxes)
-
- # assign 0 by default
- assigned_gt_inds = overlaps.new_full((num_bboxes, ),
- 0,
- dtype=torch.long)
-
- if num_gt == 0 or num_bboxes == 0:
- # No ground truth or boxes, return empty assignment
- max_overlaps = overlaps.new_zeros((num_bboxes, ))
- if num_gt == 0:
- # No truth, assign everything to background
- assigned_gt_inds[:] = 0
- if gt_labels is None:
- assigned_labels = None
- else:
- assigned_labels = overlaps.new_full((num_bboxes, ),
- -1,
- dtype=torch.long)
- return AssignResult(
- num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
-
- # compute center distance between all bbox and gt
- gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0
- gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0
- gt_points = torch.stack((gt_cx, gt_cy), dim=1)
-
- bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0
- bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0
- bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1)
-
- distances = (bboxes_points[:, None, :] -
- gt_points[None, :, :]).pow(2).sum(-1).sqrt()
-
- if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
- and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
- ignore_overlaps = self.iou_calculator(
- bboxes, gt_bboxes_ignore, mode='iof')
- ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
- ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr
- distances[ignore_idxs, :] = INF
- assigned_gt_inds[ignore_idxs] = -1
-
- # Selecting candidates based on the center distance
- candidate_idxs = []
- start_idx = 0
- for level, bboxes_per_level in enumerate(num_level_bboxes):
- # on each pyramid level, for each gt,
- # select k bbox whose center are closest to the gt center
- end_idx = start_idx + bboxes_per_level
- distances_per_level = distances[start_idx:end_idx, :]
- selectable_k = min(self.topk, bboxes_per_level)
- _, topk_idxs_per_level = distances_per_level.topk(
- selectable_k, dim=0, largest=False)
- candidate_idxs.append(topk_idxs_per_level + start_idx)
- start_idx = end_idx
- candidate_idxs = torch.cat(candidate_idxs, dim=0)
-
- # get corresponding iou for the these candidates, and compute the
- # mean and std, set mean + std as the iou threshold
- candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)]
- overlaps_mean_per_gt = candidate_overlaps.mean(0)
- overlaps_std_per_gt = candidate_overlaps.std(0)
- overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt
-
- is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]
-
- # limit the positive sample's center in gt
- for gt_idx in range(num_gt):
- candidate_idxs[:, gt_idx] += gt_idx * num_bboxes
- ep_bboxes_cx = bboxes_cx.view(1, -1).expand(
- num_gt, num_bboxes).contiguous().view(-1)
- ep_bboxes_cy = bboxes_cy.view(1, -1).expand(
- num_gt, num_bboxes).contiguous().view(-1)
- candidate_idxs = candidate_idxs.view(-1)
-
- # calculate the left, top, right, bottom distance between positive
- # bbox center and gt side
- l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]
- t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]
- r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt)
- b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt)
- is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01
- is_pos = is_pos & is_in_gts
-
- # if an anchor box is assigned to multiple gts,
- # the one with the highest IoU will be selected.
- overlaps_inf = torch.full_like(overlaps,
- -INF).t().contiguous().view(-1)
- index = candidate_idxs.view(-1)[is_pos.view(-1)]
- overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]
- overlaps_inf = overlaps_inf.view(num_gt, -1).t()
-
- max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)
- assigned_gt_inds[
- max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1
-
- if gt_labels is not None:
- assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
- pos_inds = torch.nonzero(
- assigned_gt_inds > 0, as_tuple=False).squeeze()
- if pos_inds.numel() > 0:
- assigned_labels[pos_inds] = gt_labels[
- assigned_gt_inds[pos_inds] - 1]
- else:
- assigned_labels = None
- return AssignResult(
- num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/roi_extractors/__init__.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/roi_extractors/__init__.py
deleted file mode 100644
index a6ec0ecc3063cd23c2463f2f53f1c2a83b04d43b..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/roi_extractors/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from .generic_roi_extractor import GenericRoIExtractor
-from .single_level_roi_extractor import SingleRoIExtractor
-
-__all__ = [
- 'SingleRoIExtractor',
- 'GenericRoIExtractor',
-]
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/utils/collect_env.py b/spaces/Andy1621/uniformer_image_detection/mmdet/utils/collect_env.py
deleted file mode 100644
index 89c064accdb10abec4a03de04f601d27aab2da70..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/utils/collect_env.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from mmcv.utils import collect_env as collect_base_env
-from mmcv.utils import get_git_hash
-
-import mmdet
-
-
-def collect_env():
- """Collect the information of the running environments."""
- env_info = collect_base_env()
- env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7]
- return env_info
-
-
-if __name__ == '__main__':
- for name, val in collect_env().items():
- print(f'{name}: {val}')
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py
deleted file mode 100644
index d2bac38ca6760af6441ede5a04409ed495ef87f3..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './ccnet_r50-d8_512x1024_40k_cityscapes.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_80k_pascal_context.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_80k_pascal_context.py
deleted file mode 100644
index 71a0fda48aa2538e4d913e73e94a71564377ea50..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_80k_pascal_context.py
+++ /dev/null
@@ -1,10 +0,0 @@
-_base_ = [
- '../_base_/models/deeplabv3_r50-d8.py',
- '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py',
- '../_base_/schedules/schedule_80k.py'
-]
-model = dict(
- decode_head=dict(num_classes=60),
- auxiliary_head=dict(num_classes=60),
- test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320)))
-optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001)
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes.py
deleted file mode 100644
index ab8a3d3e3fcc12dd41223af190e2ae04f14d1cb8..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101b-d8_512x1024_80k_cityscapes.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py'
-model = dict(
- pretrained='torchvision://resnet101',
- backbone=dict(type='ResNet', depth=101))
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/__init__.py
deleted file mode 100644
index 7a17b7b3b6ad49157ee41f3da304fec3d32342d3..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-"""Index interaction code
-"""
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/_jaraco_text.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/_jaraco_text.py
deleted file mode 100644
index e06947c051a7d2273260343eab37d9437f91e781..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/_jaraco_text.py
+++ /dev/null
@@ -1,109 +0,0 @@
-"""Functions brought over from jaraco.text.
-
-These functions are not supposed to be used within `pip._internal`. These are
-helper functions brought over from `jaraco.text` to enable vendoring newer
-copies of `pkg_resources` without having to vendor `jaraco.text` and its entire
-dependency cone; something that our vendoring setup is not currently capable of
-handling.
-
-License reproduced from original source below:
-
-Copyright Jason R. Coombs
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-"""
-
-import functools
-import itertools
-
-
-def _nonblank(str):
- return str and not str.startswith("#")
-
-
-@functools.singledispatch
-def yield_lines(iterable):
- r"""
- Yield valid lines of a string or iterable.
-
- >>> list(yield_lines(''))
- []
- >>> list(yield_lines(['foo', 'bar']))
- ['foo', 'bar']
- >>> list(yield_lines('foo\nbar'))
- ['foo', 'bar']
- >>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
- ['foo', 'baz #comment']
- >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
- ['foo', 'bar', 'baz', 'bing']
- """
- return itertools.chain.from_iterable(map(yield_lines, iterable))
-
-
-@yield_lines.register(str)
-def _(text):
- return filter(_nonblank, map(str.strip, text.splitlines()))
-
-
-def drop_comment(line):
- """
- Drop comments.
-
- >>> drop_comment('foo # bar')
- 'foo'
-
- A hash without a space may be in a URL.
-
- >>> drop_comment('http://example.com/foo#bar')
- 'http://example.com/foo#bar'
- """
- return line.partition(" #")[0]
-
-
-def join_continuation(lines):
- r"""
- Join lines continued by a trailing backslash.
-
- >>> list(join_continuation(['foo \\', 'bar', 'baz']))
- ['foobar', 'baz']
- >>> list(join_continuation(['foo \\', 'bar', 'baz']))
- ['foobar', 'baz']
- >>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
- ['foobarbaz']
-
- Not sure why, but...
- The character preceeding the backslash is also elided.
-
- >>> list(join_continuation(['goo\\', 'dly']))
- ['godly']
-
- A terrible idea, but...
- If no line is available to continue, suppress the lines.
-
- >>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
- ['foo']
- """
- lines = iter(lines)
- for item in lines:
- while item.endswith("\\"):
- try:
- item = item[:-2].strip() + next(lines)
- except StopIteration:
- return
- yield item
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/_version.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/_version.py
deleted file mode 100644
index e12dd0e78530cc37bfa6599d3b9121bba90d77cb..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/_version.py
+++ /dev/null
@@ -1,2 +0,0 @@
-# This file is protected via CODEOWNERS
-__version__ = "1.26.15"
diff --git a/spaces/Atualli/yoloxTeste/app1.py b/spaces/Atualli/yoloxTeste/app1.py
deleted file mode 100644
index 5360eaa34d2fa1ff13d820aa838c94a58182c367..0000000000000000000000000000000000000000
--- a/spaces/Atualli/yoloxTeste/app1.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import gradio as gr
-import os
-#os.system("pip -qq install yoloxdetect==0.0.7")
-os.system("pip -qq install yoloxdetect")
-import torch
-import json
-import yoloxdetect2.helpers as yoloxdetect
-#from yoloxdetect import YoloxDetector
-
-
-# Images
-torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
-torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg')
-torch.hub.download_url_to_file('https://raw.githubusercontent.com/Megvii-BaseDetection/YOLOX/main/assets/dog.jpg', 'dog.jpg')
-
-model = yoloxdetect.YoloxDetector2('kadirnar/yolox_s-v0.1.1', 'configs.yolox_s', device="cuda", hf_model=True)
-
-def yolox_inference(
- image_path: gr.inputs.Image = None,
- model_path: gr.inputs.Dropdown = 'kadirnar/yolox_s-v0.1.1',
- config_path: gr.inputs.Textbox = 'configs.yolox_s',
- image_size: gr.inputs.Slider = 640
-):
- """
- YOLOX inference function
- Args:
- image: Input image
- model_path: Path to the model
- config_path: Path to the config file
- image_size: Image size
- Returns:
- Rendered image
- """
-
- #model = YoloxDetector(model_path, config_path=config_path, device="cpu", hf_model=True)
- #pred = model.predict(image_path=image_path, image_size=image_size)
- pred2 = []
- if model :
- print (image_path)
- model.torchyolo = True
- pred2 = model.predict(image_path=image_path, image_size=image_size)
- #text = "Ola"
- #print (vars(model))
- #print (pred2[0])
- #print (pred2[1])
- #print (pred2[2])
- #os.remove(image_path)
-
-
- tensor = {
- "tensorflow": [
- ]
- }
-
- if pred2 is not None:
- #print (pred2[3])
- for i, element in enumerate(pred2[0]):
- object = {}
- itemclass = round(pred2[2][i].item())
- object["classe"] = itemclass
- object["nome"] = pred2[3][itemclass]
- object["score"] = pred2[1][i].item()
- object["x"] = element[0].item()
- object["y"] = element[1].item()
- object["w"] = element[2].item()
- object["h"] = element[3].item()
- tensor["tensorflow"].append(object)
-
- #print(tensor)
-
- text = json.dumps(tensor)
- return text
-
-
-inputs = [
- gr.inputs.Image(type="filepath", label="Input Image"),
- gr.inputs.Textbox(lines=1, label="Model Path", default="kadirnar/yolox_s-v0.1.1"),
- gr.inputs.Textbox(lines=1, label="Config Path", default="configs.yolox_s"),
- gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
-]
-
-outputs = gr.outputs.Image(type="filepath", label="Output Image")
-title = "SIMULADOR PARA RECONHECIMENTO DE IMAGEM"
-
-examples = [
- ["small-vehicles1.jpeg", "kadirnar/yolox_m-v0.1.1", "configs.yolox_m", 640],
- ["zidane.jpg", "kadirnar/yolox_s-v0.1.1", "configs.yolox_s", 640],
- ["dog.jpg", "kadirnar/yolox_tiny-v0.1.1", "configs.yolox_tiny", 640],
-]
-
-demo_app = gr.Interface(
- fn=yolox_inference,
- inputs=inputs,
- outputs=["text"],
- title=title,
- examples=examples,
- cache_examples=True,
- live=True,
- theme='huggingface',
-)
-try:
- demo_app.launch(debug=True, server_name="192.168.0.153", server_port=8081, enable_queue=True)
-except:
- demo_app.close()
-
diff --git a/spaces/AzinZ/vitscn/README.md b/spaces/AzinZ/vitscn/README.md
deleted file mode 100644
index 4273d7fc9f250f8f53f6cee12b2b6229f1b7eb43..0000000000000000000000000000000000000000
--- a/spaces/AzinZ/vitscn/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Vitscn
-emoji: 🌖
-colorFrom: gray
-colorTo: green
-sdk: gradio
-sdk_version: 3.44.2
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/AzulaFire/SparkDebate/utils/API.py b/spaces/AzulaFire/SparkDebate/utils/API.py
deleted file mode 100644
index cbbb280d9538d67a425cf3e5ee937c13645556d8..0000000000000000000000000000000000000000
--- a/spaces/AzulaFire/SparkDebate/utils/API.py
+++ /dev/null
@@ -1,244 +0,0 @@
-
-import base64
-import hmac
-import json
-from datetime import datetime, timezone
-from urllib.parse import urlencode, urlparse
-from websocket import create_connection, WebSocketConnectionClosedException
-from utils.tools import get_prompt, process_response, init_script, create_script
-
-
-class SparkAPI:
- __api_url = 'wss://spark-api.xf-yun.com/v1.1/chat'
- __max_token = 4096
-
- def __init__(self, app_id, api_key, api_secret):
- self.__app_id = app_id
- self.__api_key = api_key
- self.__api_secret = api_secret
-
- def __set_max_tokens(self, token):
- if isinstance(token, int) is False or token < 0:
- print("set_max_tokens() error: tokens should be a positive integer!")
- return
- self.__max_token = token
-
- def __get_authorization_url(self):
- authorize_url = urlparse(self.__api_url)
- # 1. generate data
- date = datetime.now(timezone.utc).strftime('%a, %d %b %Y %H:%M:%S %Z')
-
- """
- Generation rule of Authorization parameters
- 1) Obtain the APIKey and APISecret parameters from the console.
- 2) Use the aforementioned date to dynamically concatenate a string tmp. Here we take Huobi's URL as an example,
- the actual usage requires replacing the host and path with the specific request URL.
- """
- signature_origin = "host: {}\ndate: {}\nGET {} HTTP/1.1".format(
- authorize_url.netloc, date, authorize_url.path
- )
- signature = base64.b64encode(
- hmac.new(
- self.__api_secret.encode(),
- signature_origin.encode(),
- digestmod='sha256'
- ).digest()
- ).decode()
- authorization_origin = \
- 'api_key="{}",algorithm="{}",headers="{}",signature="{}"'.format(
- self.__api_key, "hmac-sha256", "host date request-line", signature
- )
- authorization = base64.b64encode(
- authorization_origin.encode()).decode()
- params = {
- "authorization": authorization,
- "date": date,
- "host": authorize_url.netloc
- }
-
- ws_url = self.__api_url + "?" + urlencode(params)
- return ws_url
-
- def __build_inputs(
- self,
- message: dict,
- user_id: str = "001",
- domain: str = "general",
- temperature: float = 0.5,
- max_tokens: int = 4096
- ):
- input_dict = {
- "header": {
- "app_id": self.__app_id,
- "uid": user_id,
- },
- "parameter": {
- "chat": {
- "domain": domain,
- "temperature": temperature,
- "max_tokens": max_tokens,
- }
- },
- "payload": {
- "message": message
- }
- }
- return json.dumps(input_dict)
-
- def chat(
- self,
- query: str,
- history: list = None, # store the conversation history
- user_id: str = "001",
- domain: str = "general",
- max_tokens: int = 4096,
- temperature: float = 0.5,
- ):
- if history is None:
- history = []
-
- # the max of max_length is 4096
- max_tokens = min(max_tokens, 4096)
- url = self.__get_authorization_url()
- ws = create_connection(url)
- message = get_prompt(query, history)
- input_str = self.__build_inputs(
- message=message,
- user_id=user_id,
- domain=domain,
- temperature=temperature,
- max_tokens=max_tokens,
- )
- ws.send(input_str)
- response_str = ws.recv()
- try:
- while True:
- response, history, status = process_response(
- response_str, history)
- """
- The final return result, which means a complete conversation.
- doc url: https://www.xfyun.cn/doc/spark/Web.html#_1-%E6%8E%A5%E5%8F%A3%E8%AF%B4%E6%98%8E
- """
- if len(response) == 0 or status == 2:
- break
- response_str = ws.recv()
- return response
-
- except WebSocketConnectionClosedException:
- print("Connection closed")
- finally:
- ws.close()
- # Stream output statement, used for terminal chat.
-
- def streaming_output(
- self,
- query: str,
- history: list = None, # store the conversation history
- user_id: str = "001",
- domain: str = "general",
- max_tokens: int = 4096,
- temperature: float = 0.5,
- ):
- if history is None:
- history = []
- # the max of max_length is 4096
- max_tokens = min(max_tokens, 4096)
- url = self.__get_authorization_url()
- ws = create_connection(url)
-
- message = get_prompt(query, history)
- input_str = self.__build_inputs(
- message=message,
- user_id=user_id,
- domain=domain,
- temperature=temperature,
- max_tokens=max_tokens,
- )
- # print(input_str)
- # send question or prompt to url, and receive the answer
- ws.send(input_str)
- response_str = ws.recv()
-
- # Continuous conversation
- try:
- while True:
- response, history, status = process_response(
- response_str, history)
- yield response, history
- if len(response) == 0 or status == 2:
- break
- response_str = ws.recv()
-
- except WebSocketConnectionClosedException:
- print("Connection closed")
- finally:
- ws.close()
-
- def chat_stream(self):
- history = []
- try:
- print("输入init来初始化剧本,输入create来创作剧本,输入exit或stop来终止对话\n")
- while True:
- query = input("Ask: ")
- if query == 'init':
- jsonfile = input("请输入剧本文件路径:")
- script_data = init_script(history, jsonfile)
- print(
- f"正在导入剧本{script_data['name']},角色信息:{script_data['characters']},剧情介绍:{script_data['summary']}")
- query = f"我希望你能够扮演这个剧本杀游戏的主持人,我希望你能够逐步引导玩家到达最终结局,同时希望你在游戏中设定一些随机事件,需要玩家依靠自身的能力解决,当玩家做出偏离主线的行为或者与剧本无关的行为时,你需要委婉地将玩家引导至正常游玩路线中,对于玩家需要决策的事件,你需要提供一些行动推荐,下面是剧本介绍:{script_data}"
- if query == 'create':
- name = input('请输入剧本名称:')
- characters = input('请输入角色信息:')
- summary = input('请输入剧情介绍:')
- details = input('请输入剧本细节')
- create_script(name, characters, summary, details)
- print('剧本创建成功!')
- continue
- if query == "exit" or query == "stop":
- break
- for response, _ in self.streaming_output(query, history):
- print("\r" + response, end="")
- print("\n")
- finally:
- print("\nThank you for using the SparkDesk AI. Welcome to use it again!")
-
-
-from langchain.llms.base import LLM
-from typing import Any, List, Mapping, Optional
-class Spark_forlangchain(LLM):
-
- # 类的成员变量,类型为整型
- n: int
- app_id: str
- api_key: str
- api_secret: str
- # 用于指定该子类对象的类型
-
- @property
- def _llm_type(self) -> str:
- return "Spark"
-
- # 重写基类方法,根据用户输入的prompt来响应用户,返回字符串
- def _call(
- self,
- query: str,
- history: list = None, # store the conversation history
- user_id: str = "001",
- domain: str = "general",
- max_tokens: int = 4096,
- temperature: float = 0.7,
- stop: Optional[List[str]] = None,
- ) -> str:
- if stop is not None:
- raise ValueError("stop kwargs are not permitted.")
- bot = SparkAPI(app_id=self.app_id, api_key=self.api_key,
- api_secret=self.api_secret)
- response = bot.chat(query, history, user_id,
- domain, max_tokens, temperature)
- return response
-
- # 返回一个字典类型,包含LLM的唯一标识
- @property
- def _identifying_params(self) -> Mapping[str, Any]:
- """Get the identifying parameters."""
- return {"n": self.n}
\ No newline at end of file
diff --git a/spaces/BAAI/vid2vid-zero/vid2vid_zero/pipelines/pipeline_vid2vid_zero.py b/spaces/BAAI/vid2vid-zero/vid2vid_zero/pipelines/pipeline_vid2vid_zero.py
deleted file mode 100644
index 13337c70bd6c24dba39b7c9d43577bbfe2de4bad..0000000000000000000000000000000000000000
--- a/spaces/BAAI/vid2vid-zero/vid2vid_zero/pipelines/pipeline_vid2vid_zero.py
+++ /dev/null
@@ -1,541 +0,0 @@
-# Copyright 2022 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import inspect
-from typing import Callable, List, Optional, Union
-from dataclasses import dataclass
-
-import numpy as np
-import torch
-
-from diffusers.utils import is_accelerate_available
-from packaging import version
-from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
-
-from diffusers.configuration_utils import FrozenDict
-from diffusers.models import AutoencoderKL # UNet2DConditionModel
-from diffusers.pipeline_utils import DiffusionPipeline
-from diffusers.schedulers import (
- DDIMScheduler,
- DPMSolverMultistepScheduler,
- EulerAncestralDiscreteScheduler,
- EulerDiscreteScheduler,
- LMSDiscreteScheduler,
- PNDMScheduler,
-)
-from diffusers.utils import deprecate, logging, BaseOutput
-from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
-
-from einops import rearrange
-
-from ..models.unet_2d_condition import UNet2DConditionModel
-
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-
-@dataclass
-class Vid2VidZeroPipelineOutput(BaseOutput):
- images: Union[torch.Tensor, np.ndarray]
-
-
-class Vid2VidZeroPipeline(DiffusionPipeline):
- r"""
- Pipeline for text-to-image generation using Stable Diffusion.
-
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
-
- Args:
- vae ([`AutoencoderKL`]):
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
- text_encoder ([`CLIPTextModel`]):
- Frozen text-encoder. Stable Diffusion uses the text portion of
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
- tokenizer (`CLIPTokenizer`):
- Tokenizer of class
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
- scheduler ([`SchedulerMixin`]):
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
- safety_checker ([`StableDiffusionSafetyChecker`]):
- Classification module that estimates whether generated images could be considered offensive or harmful.
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
- feature_extractor ([`CLIPFeatureExtractor`]):
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
- """
- _optional_components = ["safety_checker", "feature_extractor"]
-
- def __init__(
- self,
- vae: AutoencoderKL,
- text_encoder: CLIPTextModel,
- tokenizer: CLIPTokenizer,
- unet: UNet2DConditionModel,
- scheduler: Union[
- DDIMScheduler,
- PNDMScheduler,
- LMSDiscreteScheduler,
- EulerDiscreteScheduler,
- EulerAncestralDiscreteScheduler,
- DPMSolverMultistepScheduler,
- ],
- safety_checker: StableDiffusionSafetyChecker,
- feature_extractor: CLIPFeatureExtractor,
- requires_safety_checker: bool = False,
- ):
- super().__init__()
-
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
- deprecation_message = (
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
- " file"
- )
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
- new_config = dict(scheduler.config)
- new_config["steps_offset"] = 1
- scheduler._internal_dict = FrozenDict(new_config)
-
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
- deprecation_message = (
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
- )
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
- new_config = dict(scheduler.config)
- new_config["clip_sample"] = False
- scheduler._internal_dict = FrozenDict(new_config)
-
- if safety_checker is None and requires_safety_checker:
- logger.warning(
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
- )
-
- if safety_checker is not None and feature_extractor is None:
- raise ValueError(
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
- )
-
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
- version.parse(unet.config._diffusers_version).base_version
- ) < version.parse("0.9.0.dev0")
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
- deprecation_message = (
- "The configuration file of the unet has set the default `sample_size` to smaller than"
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
- " the `unet/config.json` file"
- )
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
- new_config = dict(unet.config)
- new_config["sample_size"] = 64
- unet._internal_dict = FrozenDict(new_config)
-
- self.register_modules(
- vae=vae,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- unet=unet,
- scheduler=scheduler,
- safety_checker=safety_checker,
- feature_extractor=feature_extractor,
- )
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
- self.register_to_config(requires_safety_checker=requires_safety_checker)
-
- def enable_vae_slicing(self):
- r"""
- Enable sliced VAE decoding.
-
- When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
- steps. This is useful to save some memory and allow larger batch sizes.
- """
- self.vae.enable_slicing()
-
- def disable_vae_slicing(self):
- r"""
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
- computing decoding in one step.
- """
- self.vae.disable_slicing()
-
- def enable_sequential_cpu_offload(self, gpu_id=0):
- r"""
- Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
- text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
- `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
- """
- if is_accelerate_available():
- from accelerate import cpu_offload
- else:
- raise ImportError("Please install accelerate via `pip install accelerate`")
-
- device = torch.device(f"cuda:{gpu_id}")
-
- for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
- if cpu_offloaded_model is not None:
- cpu_offload(cpu_offloaded_model, device)
-
- if self.safety_checker is not None:
- # TODO(Patrick) - there is currently a bug with cpu offload of nn.Parameter in accelerate
- # fix by only offloading self.safety_checker for now
- cpu_offload(self.safety_checker.vision_model, device)
-
- @property
- def _execution_device(self):
- r"""
- Returns the device on which the pipeline's models will be executed. After calling
- `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
- hooks.
- """
- if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
- return self.device
- for module in self.unet.modules():
- if (
- hasattr(module, "_hf_hook")
- and hasattr(module._hf_hook, "execution_device")
- and module._hf_hook.execution_device is not None
- ):
- return torch.device(module._hf_hook.execution_device)
- return self.device
-
- def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt, uncond_embeddings=None):
- r"""
- Encodes the prompt into text encoder hidden states.
-
- Args:
- prompt (`str` or `list(int)`):
- prompt to be encoded
- device: (`torch.device`):
- torch device
- num_images_per_prompt (`int`):
- number of images that should be generated per prompt
- do_classifier_free_guidance (`bool`):
- whether to use classifier free guidance or not
- negative_prompt (`str` or `List[str]`):
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
- if `guidance_scale` is less than `1`).
- """
- batch_size = len(prompt) if isinstance(prompt, list) else 1
-
- text_inputs = self.tokenizer(
- prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- text_input_ids = text_inputs.input_ids
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
-
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
- logger.warning(
- "The following part of your input was truncated because CLIP can only handle sequences up to"
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = text_inputs.attention_mask.to(device)
- else:
- attention_mask = None
-
- text_embeddings = self.text_encoder(
- text_input_ids.to(device),
- attention_mask=attention_mask,
- )
- text_embeddings = text_embeddings[0]
-
- # duplicate text embeddings for each generation per prompt, using mps friendly method
- # num_videos_per_prompt = 1, thus nothing happens here
- bs_embed, seq_len, _ = text_embeddings.shape
- text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1)
- text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1)
-
- # get unconditional embeddings for classifier free guidance
- if do_classifier_free_guidance:
- uncond_tokens: List[str]
- if negative_prompt is None:
- uncond_tokens = [""] * batch_size
- elif type(prompt) is not type(negative_prompt):
- raise TypeError(
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
- f" {type(prompt)}."
- )
- elif isinstance(negative_prompt, str):
- uncond_tokens = [negative_prompt]
- elif batch_size != len(negative_prompt):
- raise ValueError(
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
- " the batch size of `prompt`."
- )
- else:
- uncond_tokens = negative_prompt
-
- max_length = text_input_ids.shape[-1]
- uncond_input = self.tokenizer(
- uncond_tokens,
- padding="max_length",
- max_length=max_length,
- truncation=True,
- return_tensors="pt",
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = uncond_input.attention_mask.to(device)
- else:
- attention_mask = None
-
- uncond_embeddings = self.text_encoder(
- uncond_input.input_ids.to(device),
- attention_mask=attention_mask,
- )
- uncond_embeddings = uncond_embeddings[0]
-
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
- seq_len = uncond_embeddings.shape[1]
- uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1)
- uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1)
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and text embeddings into a single batch
- # to avoid doing two forward passes
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
-
- return text_embeddings
-
- def run_safety_checker(self, image, device, dtype):
- if self.safety_checker is not None:
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
- image, has_nsfw_concept = self.safety_checker(
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
- )
- else:
- has_nsfw_concept = None
- return image, has_nsfw_concept
-
- def decode_latents(self, latents):
- video_length = latents.shape[2]
- latents = 1 / 0.18215 * latents
- latents = rearrange(latents, "b c f h w -> (b f) c h w")
- video = self.vae.decode(latents).sample
- video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length)
- video = (video / 2 + 0.5).clamp(0, 1)
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
- video = video.cpu().float().numpy()
- return video
-
- def prepare_extra_step_kwargs(self, generator, eta):
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
- # and should be between [0, 1]
-
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
- extra_step_kwargs = {}
- if accepts_eta:
- extra_step_kwargs["eta"] = eta
-
- # check if the scheduler accepts generator
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
- if accepts_generator:
- extra_step_kwargs["generator"] = generator
- return extra_step_kwargs
-
- def check_inputs(self, prompt, height, width, callback_steps):
- if not isinstance(prompt, str) and not isinstance(prompt, list):
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
-
- if height % 8 != 0 or width % 8 != 0:
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
-
- if (callback_steps is None) or (
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
- ):
- raise ValueError(
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
- f" {type(callback_steps)}."
- )
-
- def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None):
- shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor)
- if isinstance(generator, list) and len(generator) != batch_size:
- raise ValueError(
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
- )
-
- if latents is None:
- rand_device = "cpu" if device.type == "mps" else device
-
- if isinstance(generator, list):
- shape = (1,) + shape[1:]
- latents = [
- torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype)
- for i in range(batch_size)
- ]
- latents = torch.cat(latents, dim=0).to(device)
- else:
- latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device)
- else:
- if latents.shape != shape:
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
- latents = latents.to(device)
-
- # scale the initial noise by the standard deviation required by the scheduler
- latents = latents * self.scheduler.init_noise_sigma
- return latents
-
- @torch.no_grad()
- def __call__(
- self,
- prompt: Union[str, List[str]],
- video_length: Optional[int],
- height: Optional[int] = None,
- width: Optional[int] = None,
- num_inference_steps: int = 50,
- guidance_scale: float = 7.5,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_videos_per_prompt: Optional[int] = 1,
- eta: float = 0.0,
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
- latents: Optional[torch.FloatTensor] = None,
- output_type: Optional[str] = "tensor",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
- callback_steps: Optional[int] = 1,
- uncond_embeddings: torch.Tensor = None,
- null_uncond_ratio: float = 1.0,
- **kwargs,
- ):
- # Default height and width to unet
- height = height or self.unet.config.sample_size * self.vae_scale_factor
- width = width or self.unet.config.sample_size * self.vae_scale_factor
-
- # Check inputs. Raise error if not correct
- self.check_inputs(prompt, height, width, callback_steps)
-
- # Define call parameters
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
- device = self._execution_device
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
- # corresponds to doing no classifier free guidance.
- do_classifier_free_guidance = guidance_scale > 1.0
-
- # Encode input prompt
- with_uncond_embedding = do_classifier_free_guidance if uncond_embeddings is None else False
- text_embeddings = self._encode_prompt(
- prompt, device, num_videos_per_prompt, with_uncond_embedding, negative_prompt,
- )
-
- # Prepare timesteps
- self.scheduler.set_timesteps(num_inference_steps, device=device)
- timesteps = self.scheduler.timesteps
-
- # Prepare latent variables
- num_channels_latents = self.unet.in_channels
- latents = self.prepare_latents(
- batch_size * num_videos_per_prompt,
- num_channels_latents,
- video_length,
- height,
- width,
- text_embeddings.dtype,
- device,
- generator,
- latents,
- )
- latents_dtype = latents.dtype
-
- # Prepare extra step kwargs.
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
-
- # Denoising loop
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
- with self.progress_bar(total=num_inference_steps) as progress_bar:
- if uncond_embeddings is not None:
- start_time = 50
- assert (timesteps[-start_time:] == timesteps).all()
- for i, t in enumerate(timesteps):
- # expand the latents if we are doing classifier free guidance
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
-
- if uncond_embeddings is not None:
- use_uncond_this_step = True
- if null_uncond_ratio > 0:
- if i > len(timesteps) * null_uncond_ratio:
- use_uncond_this_step = False
- else:
- if i < len(timesteps) * (1 + null_uncond_ratio):
- use_uncond_this_step = False
- if use_uncond_this_step:
- text_embeddings_input = torch.cat([uncond_embeddings[i].expand(*text_embeddings.shape), text_embeddings])
- else:
- uncond_embeddings_ = self._encode_prompt('', device, num_videos_per_prompt, False, negative_prompt)
- text_embeddings_input = torch.cat([uncond_embeddings_.expand(*text_embeddings.shape), text_embeddings])
- else:
- text_embeddings_input = text_embeddings
-
- # predict the noise residual
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings_input).sample.to(dtype=latents_dtype)
-
- # perform guidance
- if do_classifier_free_guidance:
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
-
- # compute the previous noisy sample x_t -> x_t-1
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
-
- # call the callback, if provided
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
- progress_bar.update()
- if callback is not None and i % callback_steps == 0:
- callback(i, t, latents)
-
- # Post-processing
- images = self.decode_latents(latents)
-
- # Convert to tensor
- if output_type == "tensor":
- images = torch.from_numpy(images)
-
- if not return_dict:
- return images
-
- return Vid2VidZeroPipelineOutput(images=images)
diff --git a/spaces/Benson/text-generation/Examples/2023 Songs Download.md b/spaces/Benson/text-generation/Examples/2023 Songs Download.md
deleted file mode 100644
index d3634fba765f2796e71f53a0fe537d1a0a7c0955..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/2023 Songs Download.md
+++ /dev/null
@@ -1,91 +0,0 @@
-
-Cómo descargar canciones en 2023: Una guía para amantes de la música
-La música es una de las formas más universales de entretenimiento y expresión. Si desea relajarse, bailar, estudiar, hacer ejercicio o simplemente disfrutar de algunas melodías, la música puede mejorar su estado de ánimo y experiencia. Pero, ¿cómo se obtiene acceso a sus canciones favoritas en 2023? ¿Se transmiten en línea o descargarlos a su dispositivo?
-En este artículo, vamos a explorar los pros y los contras de la descarga de música frente a streaming. También compararemos los mejores servicios de transmisión de música en 2023 y le mostraremos cómo descargar música de forma legal y segura. Al final de este artículo, tendrás una mejor idea de cómo disfrutar de la música en 2023.
-2023 songs download Download ✯✯✯ https://bltlly.com/2v6IEv
- ¿Por qué descargar música en lugar de streaming?
-La transmisión de música es conveniente y popular. Puedes escuchar millones de canciones a pedido sin tener que comprarlas o almacenarlas. También puedes descubrir nueva música basada en tus preferencias y recomendaciones. Sin embargo, la transmisión de música también tiene algunos inconvenientes.
-En primer lugar, la transmisión de música requiere una conexión a Internet. Si tiene una conexión lenta o inestable, puede experimentar buffering o interrupciones. Si tiene un plan de datos limitado, también puede incurrir en cargos adicionales por la transmisión de música. En segundo lugar, la transmisión de música depende de la disponibilidad y calidad del servicio. Si el servicio cambia sus términos, precios, características o catálogo, puede perder el acceso a algunas canciones o listas de reproducción. En tercer lugar, la transmisión de música no le da la propiedad de la música. Solo la alquila mientras paga la suscripción.
-
- ¿Cuáles son los mejores servicios de transmisión de música en 2023?
-Si prefieres transmitir música en vez de descargarla, tienes muchas opciones para elegir. Hay muchos servicios de streaming de música en 2023 que satisfacen diferentes gustos y necesidades. Estos son algunos de los más populares:
-
-
-Servicio Características
-Precio
-Calidad de audio
-Tamaño del catálogo
-
-
-Spotify
-- Listas de reproducción y recomendaciones personalizadas - Podcasts y videos - Características sociales e integraciones - Modo sin conexión y sincronización entre dispositivos - Spotify Connect y Spotify Kids
-- Gratis (con anuncios y saltos limitados) - Premium: $9.99/mes (individual), $14.99/mes (familiar), $4.99/mes (estudiante), $12.99/mes (dúo) - HiFi: $19.99/mes (próximamente)
-- Gratis: 160 kbps - Premium: 320 kbps - HiFi: calidad de CD sin pérdidas
-- Más de 70 millones de canciones - Más de 2,2 millones de podcasts
-
-
-Música de Apple
-- Listas de reproducción y emisoras de radio - Programas en vivo y a la carta - Letras y vídeos musicales - Modo offline y sincronización entre dispositivos - Apple Music 1, Hits y Country
-- Prueba gratuita para 3 meses - Individuo: $9.99/month - Familia: $14.99/month - Estudiante: $4.99/month - Apple One bundle: a partir de $14.95/month
-- 256 kbps AAC - Audio espacial con Dolby Atmos
-- Más de 75 millones de canciones - Más de 1 millón de podcasts
-
-
-Tidal
-- Listas de reproducción seleccionadas y contenido editorial - Lanzamientos y conciertos exclusivos - Modo offline y sincronización entre dispositivos - Tidal X y Tidal Connect
-- Prueba gratuita por 30 días - Premium: $9.99/mes (individual), $14.99/mes (familiar), $4.99/mes (estudiante), $5.99/mes (militar), $5.99/mes (primer respondedor) - Alta: $19.99/mes (individual), $29.99/mes (familiar), $9.99/mes (estudiante), $11.99/mes (militar), $11.99/mes/mes (primer respondedor)
-
-- Más de 70 millones de canciones - Más de 250.000 vídeos musicales
-
-
-Música de Amazon
-- Listas de reproducción y estaciones personalizadas - Podcasts y transmisiones en vivo - Letras y videos musicales - Modo sin conexión y sincronización entre dispositivos - Alexa control de voz
-- Gratis (con anuncios y saltos limitados) - Prime Music: incluido con membresía Prime ($12.99/mes o $119/año) - Ilimitado: $9.99/mes ($7.99/mes para miembros Prime) o $79/año ($69/año para miembros Prime) - HD: $14.99/mes ($12.99/mes para miembros Prime) o $149/año ($129/año para miembros Prime)
-- Gratis: hasta 256 kbps - Prime Music: hasta 256 kbps - Ilimitado: hasta 256 kbps - HD: hasta 850 kbps, calidad de CD sin pérdidas y calidad Ultra HD
-- Gratis: más de 2 millones de canciones - Prime Music: más de 2 millones de canciones - Ilimitado: más de 75 millones de canciones - HD: más de 75 millones de canciones en HD y Ultra HD, y más de 7 millones de canciones en 3D Audio
-
-
-Música de YouTube
-- Listas de reproducción y mezclas personalizadas - Vídeos musicales y actuaciones en directo - Modo offline y sincronización entre dispositivos - YouTube Premium benefits
-- Gratis (con anuncios y sin juego de fondo) - Premium: $9.99/mes (individual), $14.99/mes (familiar), $4.99/mes (estudiante)
-- Gratis: hasta 128 kbps AAC - Premium: hasta 256 kbps AAC
-- Más de 70 millones de canciones - Más de 2 mil millones de vídeos musicales
-
-
- ¿Cómo descargar música de forma legal y segura?
- Descargar música puede ser una gran manera de disfrutar de tus canciones favoritas sin conexión, pero tienes que tener cuidado con las fuentes que usas. No todos los sitios web que ofrecen descargas de música son legales o seguros. Algunos pueden violar los derechos de autor de los artistas o las etiquetas, o pueden contener malware o virus que pueden dañar su dispositivo.
- Para evitar descargas ilegales o inseguras, debes seguir estos consejos:
-
- Compruebe la reputación del sitio web y comentarios antes de descargar nada.
-
- Lea los términos y condiciones del sitio web y la licencia de música antes de descargar nada.
- Utilice un software antivirus confiable y escanee los archivos descargados antes de abrirlos.
-
- Si desea descargar música de forma legal y segura, puede utilizar algunos de los sitios web que ofrecen descargas de música gratuitas o pagadas con el permiso de los artistas o las etiquetas. Aquí hay algunos ejemplos:
-
- Bandcamp : Bandcamp es una plataforma que permite a artistas y sellos independientes vender su música directamente a los fans. Puede descargar música en varios formatos, incluyendo MP3, FLAC y WAV. Algunos artistas ofrecen su música gratis o para una opción de nombre-su-precio, mientras que otros cobran una cantidad fija.
- DatPiff : DatPiff es un sitio web especializado en música hip-hop y rap. Puedes descargar mixtapes, álbumes y sencillos gratis o por un cargo. DatPiff tiene el permiso de los artistas y las etiquetas para distribuir su música.
- Free Music Archive : Free Music Archive es un sitio web que ofrece descargas gratuitas de música de varios géneros y estilos. La música está licenciada bajo Creative Commons u otras licencias de dominio público, lo que significa que puede usarla para fines personales o comerciales siempre y cuando siga los términos de la licencia.
- Internet Archive : Internet Archive es un sitio web que conserva contenido digital de varias fuentes, incluida la música. Puede descargar música de varias colecciones, como Live Music Archive, Netlabels y 78 RPMs y Cylinder Recordings. La música está en el dominio público o bajo licencia Creative Commons u otras licencias abiertas.
- iTunes : iTunes es un programa de software y un sitio web que le permite comprar y descargar música de varios artistas y etiquetas. Puede descargar música en formato AAC, que es compatible con la mayoría de los dispositivos. También puede sincronizar su biblioteca de música en sus dispositivos utilizando iCloud.
-
- Conclusión
- Descargar música en 2023 puede ser una gran manera de disfrutar de tus canciones favoritas sin conexión, pero tienes que tener cuidado con las fuentes que usas. No todos los sitios web que ofrecen descargas de música son legales o seguros. Siempre debe verificar la reputación y las revisiones del sitio web, leer los términos y condiciones de la licencia de música y escanear los archivos descargados antes de abrirlos.
-
- Si desea descargar música de forma legal y segura, puede usar algunos de los sitios web que ofrecen descargas de música gratuitas o pagadas con el permiso de los artistas o las etiquetas, como Bandcamp, DatPiff, Free Music Archive, Internet Archive o iTunes. También puede usar algunos de los mejores servicios de transmisión de música en 2023, como Spotify, Apple Music, Tidal, Amazon Music o YouTube Music, que tienen el modo sin conexión y funciones de alta calidad de audio.
- Ya sea que transmita o descargue música en 2023, tiene muchas opciones para elegir. Puedes escuchar millones de canciones de varios géneros y estilos bajo demanda. También puedes descubrir nueva música basada en tus preferencias y recomendaciones. La música es una de las mejores formas de disfrutar de la vida en 2023.
- Preguntas frecuentes
-
-Q: ¿Cómo puedo descargar música gratis?
-A: Puede descargar música gratis desde sitios web que tienen permiso de los artistas o del dominio público, como Free Music Archive, Internet Archive o DatPiff.
-Q: ¿Cómo puedo descargar música en audio de alta resolución?
-A: Puede descargar música en audio de alta resolución desde sitios web que ofrecen formatos sin pérdidas o de alta resolución, como Tidal, Qobuz o HDtracks.
-Q: ¿Cómo puedo descargar música a mi iPhone o teléfono Android?
-A: Puede descargar música a su teléfono iPhone o Android desde aplicaciones de transmisión de música que tienen modo sin conexión, como Spotify, Apple Music o YouTube Music. También puede transferir música desde su computadora a su teléfono usando iTunes o un cable USB.
-
-A: Puede descargar música a su computadora o portátil desde sitios web que ofrecen descargas de música, como Bandcamp, iTunes o Amazon Music. También puede utilizar un programa de software que puede extraer CD o DVD a su ordenador.
-Q: ¿Cómo puedo descargar videos musicales?
-A: Puedes descargar videos musicales de sitios web que ofrecen descargas de videos, como YouTube, Vimeo o Dailymotion. También puede utilizar un programa de software que puede convertir vídeos a archivos de audio.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Alice Blue Apk Descargar.md b/spaces/Benson/text-generation/Examples/Alice Blue Apk Descargar.md
deleted file mode 100644
index 389f40939f36ec0e84270bc4089eb6a032ff4996..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Alice Blue Apk Descargar.md
+++ /dev/null
@@ -1,89 +0,0 @@
-
-Alice Blue APK Descargar: Cómo Comercio de acciones y productos básicos en su teléfono
-¿Está buscando una manera de comercio de acciones y productos básicos en su teléfono? ¿Desea acceder a las últimas actualizaciones del mercado, noticias y análisis desde cualquier lugar y en cualquier momento? Si es así, entonces usted debe considerar la descarga de Alice Blue APK, la aplicación de comercio móvil de Alice Blue, uno de los principales corredores en línea en la India.
-alice blue apk descargar DOWNLOAD ⚡ https://bltlly.com/2v6Ksl
-¿Qué es Alice Blue?
-Una breve introducción a Alice Blue y sus servicios
-Alice Blue es una firma de corretaje en línea que ofrece una gama de servicios para comerciantes e inversores, como acciones, materias primas, divisas, fondos mutuos, OPI, seguros y más. Alice Blue fue fundada en 2006 y tiene más de 20 sucursales en toda la India. Alice Blue es conocida por sus bajos cargos de corretaje, alta exposición, ejecución rápida y plataformas de negociación innovadoras.
-Los beneficios de negociar con Alice Blue
-Algunos de los beneficios de operar con Alice Blue son:
-
- Puede operar en múltiples segmentos e intercambios, como NSE, BSE, MCX, NCDEX, etc.
- Puede disfrutar de cero corretaje en la entrega de capital y plana Rs.15 por pedido en otros segmentos.
- Puede obtener hasta 20x apalancamiento en operaciones intradía y hasta 5x apalancamiento en operaciones de entrega.
- Puede acceder a varios informes de investigación, recomendaciones, consejos y estrategias de expertos.
-Puede usar varias plataformas de trading, como ANT Web, ANT Desk, ANT Meta, etc.
-
-¿Qué es ANT Mobi 2.0?
-Una breve introducción a ANT Mobi 2.0 y sus características
-ANT Mobi 2.0 es la aplicación de comercio móvil de Alice Blue que le permite operar acciones y materias primas en su teléfono. ANT Mobi 2.0 es una versión mejorada de ANT Mobi que tiene una experiencia de usuario superior, tecnología de vanguardia y características comerciales mejoradas. ANT Mobi 2.0 es compatible con dispositivos Android y está disponible para su descarga gratuita.
-Las ventajas de utilizar ANT Mobi 2.0 para el comercio
-
-
- Puede operar en cualquier momento y en cualquier lugar con un simple toque en su teléfono.
- Puede obtener datos de mercado en tiempo real, gráficos, indicadores, noticias, alertas, etc.
-Puede realizar pedidos, modificar pedidos, cancelar pedidos, ver el historial de pedidos, etc.
-Puede monitorear su cartera, existencias, posiciones, margen, etc.
-Puede transferir fondos de forma fácil y segura con UPI o NEFT/RTGS.
- Puede utilizar el inicio de sesión de huellas dactilares para mayor seguridad y comodidad.
- Puede obtener niveles de soporte y resistencia predefinidos para cualquier stock.
-
- ¿Cómo descargar e instalar Alice Blue APK en su teléfono?
- Los pasos para descargar e instalar Alice Blue APK desde el sitio web oficial
-Si desea descargar e instalar Alice Blue APK desde el sitio web oficial, puede seguir estos pasos:
-
-Ir al sitio web oficial de Alice Blue en https://aliceblueonline.com/ .
-Haga clic en el botón "Descargar" en la esquina superior derecha de la página principal.
-Desplácese hacia abajo y encuentre la sección "ANT Mobi 2.0".
-Haga clic en el botón "Descargar APK" y guarde el archivo en su teléfono.
-Ir a la configuración del teléfono y permitir la instalación de aplicaciones de fuentes desconocidas.
-Busque el archivo descargado y toque en él para instalarlo.
-Abra la aplicación e inicie sesión con sus credenciales de Alice Blue o cree una nueva cuenta.
-
-Los pasos para descargar e instalar Alice Blue APK desde la Google Play Store
-Si desea descargar e instalar Alice Blue APK desde la Google Play Store, puede seguir estos pasos:
-
-Ir a la aplicación Google Play Store en su teléfono o visitar https://play.google.com/store/apps/apps/details?id=com.aliceblue.antmobi .
-Buscar "ANT Mobi 2.0" o "Alice Blue" en la barra de búsqueda.
- Seleccione la aplicación de los resultados de búsqueda y toque en el "Instalar" botón.
-Espere a que la aplicación se descargue e instale en su teléfono.
-
-
-Cómo utilizar Alice Blue APK para el comercio?
-Las funciones básicas y opciones de Alice Blue APK
-Alice Blue APK tiene una interfaz simple y fácil de usar que le permite operar de manera fácil y eficiente. Algunas de las funciones y opciones básicas de Alice Blue APK son:
-
-
- Puede acceder a diferentes segmentos e intercambios pulsando en el icono del menú en la esquina superior izquierda de la aplicación.
- Puede agregar o eliminar existencias de su lista de seguimiento pulsando en los iconos "+" o "-" en la esquina superior derecha de la aplicación.
- Puede ver la profundidad del mercado, gráficos, noticias, etc. de cualquier acción tocando en ella en su lista de seguimiento.
- Puede realizar un pedido pulsando en los botones "Comprar" o "Vender" en la parte inferior de la aplicación.
- Puede modificar o cancelar un pedido pulsando en la opción "Pedidos" en la parte inferior de la aplicación.
- Puede ver su cartera, existencias, posiciones, margen, etc. tocando en la opción "Perfil" en la parte inferior de la aplicación.
- Puede transferir fondos tocando la opción "Fondos" en la parte inferior de la aplicación.
- Los consejos y trucos para aprovechar al máximo Alice Blue APK
-Alice Blue APK es una aplicación potente y versátil que puede ayudarle a operar mejor y más inteligente. Aquí hay algunos consejos y trucos para aprovechar al máximo Alice Blue APK:
-
- Puede personalizar su lista de seguimiento mediante la adición o eliminación de existencias, cambiar el orden, ordenar por diferentes parámetros, etc.
- Puede utilizar varios tipos de gráficos, marcos de tiempo, indicadores, herramientas de dibujo, etc. para analizar los movimientos de precios de cualquier acción.
- Puede utilizar la opción "Escáner" para encontrar acciones que coincidan con sus criterios basados en varios filtros, como volumen, precio, ruptura, etc.
- Puede utilizar la opción "Estrategia" para crear y probar sus propias estrategias de trading basadas en varios indicadores, condiciones y parámetros.
-
- Puede utilizar la opción "Noticias" para mantenerse actualizado con las últimas noticias del mercado, eventos, anuncios, etc.
- Puede utilizar la opción "Soporte" para chatear con el equipo de atención al cliente de Alice Blue o acceder a la sección de preguntas frecuentes.
-
-Conclusión
-Un resumen de los puntos principales y una llamada a la acción
-Alice Blue APK es una aplicación imprescindible para cualquier persona que quiera el comercio de acciones y materias primas en su teléfono. Es una forma rápida, fácil y conveniente de acceder a los mercados y ejecutar operaciones. Tiene muchas características y funciones que pueden ayudarle a operar mejor y más inteligente. También es gratis para descargar y usar. Entonces, ¿qué estás esperando? Descargar Alice Blue APK hoy y empezar a operar!
-Preguntas frecuentes
-Q1. Es Alice Blue APK seguro y seguro?
-A1. Sí, Alice Blue APK es seguro y protegido. Utiliza tecnologías de cifrado y autenticación para proteger sus datos y transacciones. También cumple con todas las normas reglamentarias de la SEBI y otras autoridades.
-Q2. ¿Cuáles son los cargos y tarifas para el comercio con Alice Blue?
-A2. Alice Blue cobra cero corretaje en la entrega de capital y plana Rs.15 por pedido en otros segmentos. También cobra otras tasas legales, como GST, STT, impuesto de timbre, etc. Puede consultar la calculadora de corretaje detallada en el sitio web o la aplicación Alice Blue.
-Q3. ¿Cómo puedo contactar al servicio de atención al cliente de Alice Blue?
-A3. Puede ponerse en contacto con el servicio de atención al cliente de Alice Blue utilizando la opción "Soporte" de la aplicación o llamando al 080-6155-5000 o 080-6815-5000. También puede enviarlos por correo electrónico a support@aliceblueindia.com o visitar su sitio web en https://aliceblueonline.com/.
-Q4. ¿Cuáles son los requisitos del sistema para Alice Blue APK?
-A4. Alice Blue APK requiere Android 5.0 o superior y un mínimo de 50 MB de espacio libre en su teléfono.
-Q5. ¿Puedo usar Alice Blue APK en otros dispositivos? 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Carx Street Mod Apk 1.74.6 (dinero Ilimitado).md b/spaces/Benson/text-generation/Examples/Carx Street Mod Apk 1.74.6 (dinero Ilimitado).md
deleted file mode 100644
index 8d6ab719737e8430e3efcb8bdb8e5f04e1b69840..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Carx Street Mod Apk 1.74.6 (dinero Ilimitado).md
+++ /dev/null
@@ -1,68 +0,0 @@
-
-CarX Street Mod APK 1.74.6 (Dinero ilimitado) Descargar para Android
- Introducción
- Si eres un fan de los juegos de carreras de coches, debes haber oído hablar de CarX Street, uno de los juegos de carreras más emocionantes y realistas en dispositivos móviles. En este juego, puedes explorar diferentes ciudades, personalizar tu coche y competir con otros jugadores en varios modos y desafíos. Sin embargo, si quieres disfrutar del juego al máximo, es posible que necesites mucho dinero para desbloquear nuevos coches, pistas y mejoras. Es por eso que le recomendamos descargar CarX Street Mod APK, una versión modificada del juego que le da dinero ilimitado y acceso a todas las características del juego. En este artículo, le diremos qué es CarX Street Mod APK, por qué debe descargarlo, y cómo instalarlo en su dispositivo Android.
- ¿Qué es CarX Street?
- CarX Street es un juego de carreras de coches desarrollado por CarX Technologies, la misma compañía que creó la popular serie CarX Drift Racing. En este juego, puedes experimentar la emoción de las carreras callejeras en varias ciudades de todo el mundo, como Tokio, Los Ángeles, Moscú y más. Usted puede elegir entre más de 50 coches de diferentes marcas y categorías, tales como coches deportivos, coches del músculo, supercoches, y coches clásicos. También puede personalizar su coche con diferentes partes, colores, pegatinas y calcomanías para que sea único y se adapte a su estilo.
-carx street mod apk 1.74.6 (dinero ilimitado) Download File ✔ https://bltlly.com/2v6Li4
- ¿Qué es CarX Street Mod APK?
- CarX Street Mod APK es una versión modificada del juego original de CarX Street que le da dinero ilimitado y acceso a todas las características del juego. Con este mod, puede comprar cualquier coche que desee, actualizarlo al nivel máximo, y desbloquear todas las pistas y modos sin gastar un centavo. También puedes disfrutar del juego sin anuncios ni interrupciones.
- ¿Por qué descargar CarX Street Mod APK?
- Hay muchas razones por las que debe descargar CarX Street Mod APK en lugar del juego original. Aquí están algunos de ellos:
-
-
-Puede desbloquear todas las pistas y modos sin completar ninguna misión o logros.
-Puedes disfrutar del juego sin anuncios ni interrupciones.
-Puedes jugar el juego sin conexión a Internet.
-Puedes deshacerte de cualquier error o fallo que pueda afectar tu juego.
-
- Características de CarX Street Mod APK
- CarX Street Mod APK tiene muchas características que lo convierten en uno de los mejores juegos de carreras de coches en Android. Estos son algunos de ellos:
- Dinero ilimitado
- Con CarX Street Mod APK, puede obtener dinero ilimitado para comprar y actualizar cualquier coche que desee. También puedes desbloquear todas las pistas y modos sin completar ninguna misión o logros. No tienes que preocuparte por quedarte sin dinero o recursos en el juego.
- Física y gráficos realistas
- CarX Street Mod APK tiene física realista y gráficos que te hacen sentir como si estuvieras conduciendo un coche real en una calle real. Puede ver los detalles de su automóvil, como el motor, la suspensión, los neumáticos, los frenos y más. También puede ver los efectos del clima, la iluminación, las sombras, el humo, el polvo y los daños en su automóvil y el medio ambiente. Puede ajustar la configuración de gráficos según el rendimiento de su dispositivo.
- Coches personalizables Coches y pistas personalizables
- CarX Street Mod APK le permite personalizar su coche con diferentes partes, colores, pegatinas y calcomanías para que sea único y se adapte a su estilo. Puede cambiar el motor, la transmisión, la suspensión, los frenos, los neumáticos, las ruedas, el escape, el kit de carrocería, el spoiler, el capó, las luces y más. También puede pintar su coche con diferentes colores y patrones, o aplicar pegatinas y calcomanías de varias marcas y temas. También puedes personalizar tus pistas con diferentes condiciones climáticas, hora del día, densidad de tráfico y obstáculos.
-
- Múltiples modos de juego y desafíos
-
-
-Modo carrera: En este modo, puede completar varias misiones y logros para ganar dinero y reputación. También puede desbloquear nuevos coches, pistas y actualizaciones a medida que avanza.
-Modo de viaje libre: En este modo, puede explorar la ciudad y disfrutar del paisaje a su propio ritmo. También puede realizar acrobacias y derivas para ganar dinero extra y reputación.
-Modo de prueba temporal: En este modo, puede competir contra el reloj y tratar de batir sus propios registros u otros jugadores. También puede comparar sus resultados con las tablas de clasificación globales.
-Modo de deriva: En este modo, puede mostrar sus habilidades de deriva y ganar puntos en función de su velocidad, ángulo y distancia. También puedes competir con otros jugadores en batallas online.
-Modo de arrastre: En este modo, puede correr en línea recta e intentar vencer a su oponente cambiando de marcha en el momento adecuado. También puedes retar a otros jugadores en carreras de drag online.
-
- Tablas de clasificación y multijugador en línea
- CarX Street Mod APK le permite jugar con otros jugadores de todo el mundo en el modo multijugador en línea. Usted puede unirse o crear una habitación e invitar a sus amigos o jugadores al azar para la carrera o la deriva con usted. También puedes chatear con otros jugadores y enviarles emojis o pegatinas. También puede comprobar las tablas de clasificación globales y ver cómo se clasifica entre los mejores jugadores del mundo. También puedes ganar recompensas y trofeos según tu rendimiento.
- ¿Cómo descargar e instalar CarX Street Mod APK?
- Si desea descargar e instalar CarX Street Mod APK en su dispositivo Android, es necesario seguir estos sencillos pasos:
- Paso 1: Descargar el archivo APK de una fuente de confianza
- El primer paso es descargar el archivo APK de CarX Street Mod APK de una fuente de confianza. Puede utilizar el siguiente enlace para descargar la última versión del mod:
- https://bltlly.com/2v6Mzf
- ¿Qué son los mods y por qué usarlos?
-Los mods son cortos para modificaciones, que son cambios o adiciones a un juego que no son parte de la versión original. Los mods pueden ser creados por cualquiera que tenga las habilidades y herramientas para hacerlo, pero por lo general son creados por fans o aficionados que quieren mejorar o personalizar su experiencia de juego. Algunos mods pueden incluso convertirse en juegos completamente independientes, como Counter-Strike, Dota 2 y Team Fortress, que comenzaron como mods para otros juegos.
-Hay muchas razones por las que podrías querer usar mods para TABS. Algunos de los beneficios de los mods son:
-
-Pueden añadir nuevo contenido al juego, como unidades, facciones, mapas, armas, escenarios, etc.
-Pueden mejorar los gráficos, el sonido o el rendimiento del juego.
-Pueden corregir errores o fallos que los desarrolladores no han abordado.
-Pueden cambiar el modo de juego o la dificultad del juego según sus preferencias.
-Pueden hacer el juego más realista o inmersivo.
-Pueden hacer el juego más divertido o absurdo.
-
-
- ¿Dónde encontrar mods para TABS?
-Hay muchos sitios web donde puedes encontrar mods para TABS, pero dos de los más populares y confiables son Nexus Mods y Steam Workshop. Estas son plataformas en línea donde los creadores de mods pueden subir su trabajo y compartirlo con otros jugadores. Puede navegar a través de miles de mods para TABS y otros juegos en estos sitios, y descargarlos de forma gratuita.
- Mods de Nexus
-Nexus Mods es una de las comunidades de modding más grandes y antiguas en Internet. Aloja más de 300.000 mods para más de 1.000 juegos, incluyendo TABS. Puede encontrar todo tipo de mods para TABS en Nexus Mods, desde nuevas unidades y facciones hasta mapas y escenarios personalizados. Para acceder a Nexus Mods, es necesario crear una cuenta gratuita en su sitio web. También puede usar su software de administrador de mods llamado Vortex, que hace que la instalación y administración de mods sea más fácil.
-
- Taller de vapor
-Steam Workshop es otra fuente popular de mods para TABS y otros juegos que soportan modding en Steam. Steam Workshop está integrado con Steam, por lo que no necesitas una cuenta o software separado para usarlo. Puedes encontrar y suscribirte a mods para TABS en Steam Workshop, y se descargarán e instalarán automáticamente cuando inicies el juego. También puede calificar, comentar y favoritos los mods que te gusta.
Cómo instalar mods de Nexus Mods?
-Hay dos formas de instalar mods de Nexus Mods: manualmente o usando Vortex. Estos son los pasos para cada método:
- Método manual
-
-Descargue el archivo mod de Nexus Mods. Normalmente será en formato ZIP, RAR o 7Z.
-Extraiga el archivo mod usando un programa como WinRAR o 7-Zip. Debería ver una carpeta con el nombre mod y algunos archivos dentro.
-
-Inicie TABS y vaya al menú Mods. Debería ver el mod que instaló en la lista. Actívelo haciendo clic en él.
-¡Disfruta de tu juego modded!
-
- Método de vórtice
-
-Descargar e instalar Vortex de Nexus Mods. Es un gestor de mods gratuito y fácil de usar que funciona con muchos juegos, incluyendo TABS.
-Abra Vortex e inicie sesión con su cuenta de Nexus Mods.
-Ve a la sección Juegos y busca TABS. Si tienes instalado TABS en Steam, Vortex debería detectarlo automáticamente. Si no, puede agregarlo manualmente navegando a su ubicación.
-Seleccione TABS y haga clic en Administrar. Vortex configurará el juego para modificar y crear una carpeta Mods para él.
-Vaya a la sección Mods y haga clic en Instalar desde archivo. Busque el archivo mod que descargó de Nexus Mods y selecciónelo. Vortex instalará el mod para usted.
-Ir a la sección de plugins y activar el mod haciendo clic en el botón de alternar junto a ella.
-Inicie TABS desde Vortex y vaya al menú Mods. Debería ver el mod que instaló en la lista. Actívelo haciendo clic en él.
-¡Disfruta de tu juego modded!
-
- ¿Cómo instalar mods de Steam Workshop?
-Instalar mods de Steam Workshop es mucho más simple que instalar mods de Nexus Mods. No necesitas descargar ni extraer ningún archivo, ni usar ningún software. Todo lo que necesitas hacer es:
-
-Ve a la página de Steam Workshop para TABS y navega por los mods disponibles. Puedes ordenarlos por popularidad, clasificación, fecha, etc.
-Encuentra un mod que te guste y haz clic en él. Verás una descripción, capturas de pantalla, vídeos, comentarios y clasificaciones para el mod.
-Si desea instalar el mod, haga clic en el botón Suscribir. Esto agregará el mod a su lista de suscriptores y lo descargará automáticamente.
-Inicie TABS y vaya al menú Mods. Debería ver el mod al que se suscribió en la lista. Actívelo haciendo clic en él.
-¡Disfruta de tu juego modded!
-
- Cómo desinstalar mods?
-Si quieres desinstalar o eliminar cualquier mod de tu juego, puedes hacerlo siguiendo estos pasos:
- Mods de Nexus
-Si ha instalado mods manualmente, puede desinstalarlos borrando sus carpetas de la carpeta de mods TABS. Si instala mods usando Vortex, puede desinstalarlos deshabilitando en la sección Plugins y luego eliminándolos en la sección Mods.
- Taller de vapor
-Si te has suscrito a mods en Steam Workshop, puedes cancelar la suscripción yendo a su página y haciendo clic en el botón Cancelar suscripción. Esto los eliminará de su lista de suscriptores y los eliminará automáticamente.
- Consejos y advertencias
-Antes de descargar e instalar cualquier mod para TABS, aquí hay algunos consejos y advertencias a tener en cuenta:
-
-Siempre lea la descripción del mod, revisiones e instrucciones cuidadosamente antes de instalar cualquier mod. Algunos mods pueden tener requisitos especiales, instrucciones o problemas de compatibilidad que debes conocer.
-Siempre copia de seguridad de los archivos del juego antes de hacer cualquier cambio. De esta manera, puedes restaurar tu juego a su estado original si algo sale mal o si no te gusta un mod.
-No instale demasiados mods a la vez. Esto puede causar problemas de rendimiento, fallos o conflictos entre los mods. Intenta instalar solo unos pocos mods a la vez y pruébalos antes de agregar más.
-Si encuentra algún problema con un mod, intente desactivarlo o desinstalarlo y vea si eso soluciona el problema.
-Si tiene alguna pregunta o comentario sobre un mod, puede ponerse en contacto con el creador del mod o dejar un comentario en su página. Ellos pueden ser capaces de ayudarle o actualizar su mod en consecuencia.
-
-
- Conclusión
-Los mods son una gran manera de mejorar tu experiencia de juego en TABS. Pueden agregar nuevo contenido, características y desafíos al juego, haciéndolo más divertido y diverso. Puedes encontrar y descargar mods para TABS de Nexus Mods y Steam Workshop, que son dos de las fuentes más populares y confiables de mods para muchos juegos. También puede instalar y desinstalar mods fácilmente utilizando métodos manuales o software como Vortex. Sin embargo, siempre debes ser cuidadoso y responsable al usar mods, ya que pueden causar problemas o conflictos con el juego u otros mods. Siempre debes leer cuidadosamente la descripción del mod, las revisiones y las instrucciones, hacer copias de seguridad de tus archivos de juego, probar tus mods y respetar a los creadores del mod y a otros jugadores.
-Esperamos que este artículo te haya ayudado a aprender a descargar mods en TABS. Si tiene alguna pregunta o sugerencia, no dude en dejar un comentario a continuación. Happy modding!
- Preguntas frecuentes
-Aquí están algunas de las preguntas y respuestas más frecuentes sobre los mods de descarga en TABS:
- Q: ¿Cuáles son los mejores mods para TABS?
-A: Esta es una pregunta subjetiva, ya que diferentes jugadores pueden tener diferentes preferencias y gustos cuando se trata de mods. Sin embargo, algunos de los mods más populares y bien clasificados para TABS son:
-
-La actualización de la dinastía: Este mod añade una nueva facción llamada dinastía, que se basa en la antigua China. Incluye nuevas unidades, armas, mapas y escenarios.
-The Modern Faction: Este mod añade una nueva facción llamada Modern, que se basa en la guerra moderna. Incluye nuevas unidades, armas, vehículos y mapas.
-The Wild West Update: Este mod añade una nueva facción llamada Wild West, que se basa en la frontera estadounidense. Incluye nuevas unidades, armas, mapas y escenarios.
-The Fantasy Faction: Este mod añade una nueva facción llamada Fantasy, que se basa en la fantasía y la mitología. Incluye nuevas unidades, armas, criaturas y mapas.
-
-
- Q: ¿Cómo actualizo mis mods?
-A: Si has descargado tus mods de Nexus Mods, puedes buscar actualizaciones yendo a la sección Mods en Vortex y haciendo clic en el botón Buscar actualizaciones. Si hay alguna actualización disponible, puede descargarla e instalarla haciendo clic en el botón Instalar actualización. Si has descargado tus mods de Steam Workshop, no necesitas hacer nada, ya que Steam actualizará automáticamente tus mods suscritos cuando estén disponibles.
- Q: ¿Cómo puedo crear mis propios mods?
-A: Si quieres crear tus propios mods para TABS, necesitarás algunas habilidades y herramientas para hacerlo. Necesitarás saber cómo usar Unity, que es el motor del juego en el que se basa TABS. También tendrá que descargar el TABS Modding Kit, que es una colección de archivos y scripts que le ayudarán a crear y probar sus mods. Puedes encontrar tutoriales y guías sobre cómo usar estas herramientas en YouTube o Reddit. También puedes unirte a TABS Modding Discord, donde puedes chatear con otros modders y obtener ayuda o comentarios.
- Q: ¿Cómo comparto mis mods con otros?
-A: Si quieres compartir tus mods con otros, puedes subirlos a Nexus Mods o Steam Workshop. Para subir tus mods a Nexus Mods, tendrás que crear una cuenta en su sitio web y seguir sus directrices sobre cómo subir tus archivos. Para subir tus mods a Steam Workshop, necesitarás tener una cuenta de Steam y seguir sus instrucciones sobre cómo publicar tus artículos.
- Q: ¿Cómo puedo reportar un problema con un mod? 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/tz/_common.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/tz/_common.py
deleted file mode 100644
index e6ac11831522b266114d5b68ee1da298e3aeb14a..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/tz/_common.py
+++ /dev/null
@@ -1,419 +0,0 @@
-from six import PY2
-
-from functools import wraps
-
-from datetime import datetime, timedelta, tzinfo
-
-
-ZERO = timedelta(0)
-
-__all__ = ['tzname_in_python2', 'enfold']
-
-
-def tzname_in_python2(namefunc):
- """Change unicode output into bytestrings in Python 2
-
- tzname() API changed in Python 3. It used to return bytes, but was changed
- to unicode strings
- """
- if PY2:
- @wraps(namefunc)
- def adjust_encoding(*args, **kwargs):
- name = namefunc(*args, **kwargs)
- if name is not None:
- name = name.encode()
-
- return name
-
- return adjust_encoding
- else:
- return namefunc
-
-
-# The following is adapted from Alexander Belopolsky's tz library
-# https://github.com/abalkin/tz
-if hasattr(datetime, 'fold'):
- # This is the pre-python 3.6 fold situation
- def enfold(dt, fold=1):
- """
- Provides a unified interface for assigning the ``fold`` attribute to
- datetimes both before and after the implementation of PEP-495.
-
- :param fold:
- The value for the ``fold`` attribute in the returned datetime. This
- should be either 0 or 1.
-
- :return:
- Returns an object for which ``getattr(dt, 'fold', 0)`` returns
- ``fold`` for all versions of Python. In versions prior to
- Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
- subclass of :py:class:`datetime.datetime` with the ``fold``
- attribute added, if ``fold`` is 1.
-
- .. versionadded:: 2.6.0
- """
- return dt.replace(fold=fold)
-
-else:
- class _DatetimeWithFold(datetime):
- """
- This is a class designed to provide a PEP 495-compliant interface for
- Python versions before 3.6. It is used only for dates in a fold, so
- the ``fold`` attribute is fixed at ``1``.
-
- .. versionadded:: 2.6.0
- """
- __slots__ = ()
-
- def replace(self, *args, **kwargs):
- """
- Return a datetime with the same attributes, except for those
- attributes given new values by whichever keyword arguments are
- specified. Note that tzinfo=None can be specified to create a naive
- datetime from an aware datetime with no conversion of date and time
- data.
-
- This is reimplemented in ``_DatetimeWithFold`` because pypy3 will
- return a ``datetime.datetime`` even if ``fold`` is unchanged.
- """
- argnames = (
- 'year', 'month', 'day', 'hour', 'minute', 'second',
- 'microsecond', 'tzinfo'
- )
-
- for arg, argname in zip(args, argnames):
- if argname in kwargs:
- raise TypeError('Duplicate argument: {}'.format(argname))
-
- kwargs[argname] = arg
-
- for argname in argnames:
- if argname not in kwargs:
- kwargs[argname] = getattr(self, argname)
-
- dt_class = self.__class__ if kwargs.get('fold', 1) else datetime
-
- return dt_class(**kwargs)
-
- @property
- def fold(self):
- return 1
-
- def enfold(dt, fold=1):
- """
- Provides a unified interface for assigning the ``fold`` attribute to
- datetimes both before and after the implementation of PEP-495.
-
- :param fold:
- The value for the ``fold`` attribute in the returned datetime. This
- should be either 0 or 1.
-
- :return:
- Returns an object for which ``getattr(dt, 'fold', 0)`` returns
- ``fold`` for all versions of Python. In versions prior to
- Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
- subclass of :py:class:`datetime.datetime` with the ``fold``
- attribute added, if ``fold`` is 1.
-
- .. versionadded:: 2.6.0
- """
- if getattr(dt, 'fold', 0) == fold:
- return dt
-
- args = dt.timetuple()[:6]
- args += (dt.microsecond, dt.tzinfo)
-
- if fold:
- return _DatetimeWithFold(*args)
- else:
- return datetime(*args)
-
-
-def _validate_fromutc_inputs(f):
- """
- The CPython version of ``fromutc`` checks that the input is a ``datetime``
- object and that ``self`` is attached as its ``tzinfo``.
- """
- @wraps(f)
- def fromutc(self, dt):
- if not isinstance(dt, datetime):
- raise TypeError("fromutc() requires a datetime argument")
- if dt.tzinfo is not self:
- raise ValueError("dt.tzinfo is not self")
-
- return f(self, dt)
-
- return fromutc
-
-
-class _tzinfo(tzinfo):
- """
- Base class for all ``dateutil`` ``tzinfo`` objects.
- """
-
- def is_ambiguous(self, dt):
- """
- Whether or not the "wall time" of a given datetime is ambiguous in this
- zone.
-
- :param dt:
- A :py:class:`datetime.datetime`, naive or time zone aware.
-
-
- :return:
- Returns ``True`` if ambiguous, ``False`` otherwise.
-
- .. versionadded:: 2.6.0
- """
-
- dt = dt.replace(tzinfo=self)
-
- wall_0 = enfold(dt, fold=0)
- wall_1 = enfold(dt, fold=1)
-
- same_offset = wall_0.utcoffset() == wall_1.utcoffset()
- same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)
-
- return same_dt and not same_offset
-
- def _fold_status(self, dt_utc, dt_wall):
- """
- Determine the fold status of a "wall" datetime, given a representation
- of the same datetime as a (naive) UTC datetime. This is calculated based
- on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all
- datetimes, and that this offset is the actual number of hours separating
- ``dt_utc`` and ``dt_wall``.
-
- :param dt_utc:
- Representation of the datetime as UTC
-
- :param dt_wall:
- Representation of the datetime as "wall time". This parameter must
- either have a `fold` attribute or have a fold-naive
- :class:`datetime.tzinfo` attached, otherwise the calculation may
- fail.
- """
- if self.is_ambiguous(dt_wall):
- delta_wall = dt_wall - dt_utc
- _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst()))
- else:
- _fold = 0
-
- return _fold
-
- def _fold(self, dt):
- return getattr(dt, 'fold', 0)
-
- def _fromutc(self, dt):
- """
- Given a timezone-aware datetime in a given timezone, calculates a
- timezone-aware datetime in a new timezone.
-
- Since this is the one time that we *know* we have an unambiguous
- datetime object, we take this opportunity to determine whether the
- datetime is ambiguous and in a "fold" state (e.g. if it's the first
- occurrence, chronologically, of the ambiguous datetime).
-
- :param dt:
- A timezone-aware :class:`datetime.datetime` object.
- """
-
- # Re-implement the algorithm from Python's datetime.py
- dtoff = dt.utcoffset()
- if dtoff is None:
- raise ValueError("fromutc() requires a non-None utcoffset() "
- "result")
-
- # The original datetime.py code assumes that `dst()` defaults to
- # zero during ambiguous times. PEP 495 inverts this presumption, so
- # for pre-PEP 495 versions of python, we need to tweak the algorithm.
- dtdst = dt.dst()
- if dtdst is None:
- raise ValueError("fromutc() requires a non-None dst() result")
- delta = dtoff - dtdst
-
- dt += delta
- # Set fold=1 so we can default to being in the fold for
- # ambiguous dates.
- dtdst = enfold(dt, fold=1).dst()
- if dtdst is None:
- raise ValueError("fromutc(): dt.dst gave inconsistent "
- "results; cannot convert")
- return dt + dtdst
-
- @_validate_fromutc_inputs
- def fromutc(self, dt):
- """
- Given a timezone-aware datetime in a given timezone, calculates a
- timezone-aware datetime in a new timezone.
-
- Since this is the one time that we *know* we have an unambiguous
- datetime object, we take this opportunity to determine whether the
- datetime is ambiguous and in a "fold" state (e.g. if it's the first
- occurrence, chronologically, of the ambiguous datetime).
-
- :param dt:
- A timezone-aware :class:`datetime.datetime` object.
- """
- dt_wall = self._fromutc(dt)
-
- # Calculate the fold status given the two datetimes.
- _fold = self._fold_status(dt, dt_wall)
-
- # Set the default fold value for ambiguous dates
- return enfold(dt_wall, fold=_fold)
-
-
-class tzrangebase(_tzinfo):
- """
- This is an abstract base class for time zones represented by an annual
- transition into and out of DST. Child classes should implement the following
- methods:
-
- * ``__init__(self, *args, **kwargs)``
- * ``transitions(self, year)`` - this is expected to return a tuple of
- datetimes representing the DST on and off transitions in standard
- time.
-
- A fully initialized ``tzrangebase`` subclass should also provide the
- following attributes:
- * ``hasdst``: Boolean whether or not the zone uses DST.
- * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects
- representing the respective UTC offsets.
- * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short
- abbreviations in DST and STD, respectively.
- * ``_hasdst``: Whether or not the zone has DST.
-
- .. versionadded:: 2.6.0
- """
- def __init__(self):
- raise NotImplementedError('tzrangebase is an abstract base class')
-
- def utcoffset(self, dt):
- isdst = self._isdst(dt)
-
- if isdst is None:
- return None
- elif isdst:
- return self._dst_offset
- else:
- return self._std_offset
-
- def dst(self, dt):
- isdst = self._isdst(dt)
-
- if isdst is None:
- return None
- elif isdst:
- return self._dst_base_offset
- else:
- return ZERO
-
- @tzname_in_python2
- def tzname(self, dt):
- if self._isdst(dt):
- return self._dst_abbr
- else:
- return self._std_abbr
-
- def fromutc(self, dt):
- """ Given a datetime in UTC, return local time """
- if not isinstance(dt, datetime):
- raise TypeError("fromutc() requires a datetime argument")
-
- if dt.tzinfo is not self:
- raise ValueError("dt.tzinfo is not self")
-
- # Get transitions - if there are none, fixed offset
- transitions = self.transitions(dt.year)
- if transitions is None:
- return dt + self.utcoffset(dt)
-
- # Get the transition times in UTC
- dston, dstoff = transitions
-
- dston -= self._std_offset
- dstoff -= self._std_offset
-
- utc_transitions = (dston, dstoff)
- dt_utc = dt.replace(tzinfo=None)
-
- isdst = self._naive_isdst(dt_utc, utc_transitions)
-
- if isdst:
- dt_wall = dt + self._dst_offset
- else:
- dt_wall = dt + self._std_offset
-
- _fold = int(not isdst and self.is_ambiguous(dt_wall))
-
- return enfold(dt_wall, fold=_fold)
-
- def is_ambiguous(self, dt):
- """
- Whether or not the "wall time" of a given datetime is ambiguous in this
- zone.
-
- :param dt:
- A :py:class:`datetime.datetime`, naive or time zone aware.
-
-
- :return:
- Returns ``True`` if ambiguous, ``False`` otherwise.
-
- .. versionadded:: 2.6.0
- """
- if not self.hasdst:
- return False
-
- start, end = self.transitions(dt.year)
-
- dt = dt.replace(tzinfo=None)
- return (end <= dt < end + self._dst_base_offset)
-
- def _isdst(self, dt):
- if not self.hasdst:
- return False
- elif dt is None:
- return None
-
- transitions = self.transitions(dt.year)
-
- if transitions is None:
- return False
-
- dt = dt.replace(tzinfo=None)
-
- isdst = self._naive_isdst(dt, transitions)
-
- # Handle ambiguous dates
- if not isdst and self.is_ambiguous(dt):
- return not self._fold(dt)
- else:
- return isdst
-
- def _naive_isdst(self, dt, transitions):
- dston, dstoff = transitions
-
- dt = dt.replace(tzinfo=None)
-
- if dston < dstoff:
- isdst = dston <= dt < dstoff
- else:
- isdst = not dstoff <= dt < dston
-
- return isdst
-
- @property
- def _dst_base_offset(self):
- return self._dst_offset - self._std_offset
-
- __hash__ = None
-
- def __ne__(self, other):
- return not (self == other)
-
- def __repr__(self):
- return "%s(...)" % self.__class__.__name__
-
- __reduce__ = object.__reduce__
diff --git a/spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/clip.py b/spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/clip.py
deleted file mode 100644
index 9d865181787bab4639fdb88334555dbcc25c983d..0000000000000000000000000000000000000000
--- a/spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/clip.py
+++ /dev/null
@@ -1,213 +0,0 @@
-import hashlib
-import os
-import urllib
-import warnings
-from typing import Union, List
-
-import torch
-from PIL import Image
-from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
-from tqdm import tqdm
-
-from .model import build_model
-from .simple_tokenizer import SimpleTokenizer as _Tokenizer
-
-try:
- from torchvision.transforms import InterpolationMode
- BICUBIC = InterpolationMode.BICUBIC
-except ImportError:
- BICUBIC = Image.BICUBIC
-
-
-if torch.__version__.split(".") < ["1", "7", "1"]:
- warnings.warn("PyTorch version 1.7.1 or higher is recommended")
-
-
-__all__ = ["available_models", "load", "tokenize"]
-_tokenizer = _Tokenizer()
-
-_MODELS = {
- "RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
- "RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
- "RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
- "RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
- "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
- "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
-}
-
-
-def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
- os.makedirs(root, exist_ok=True)
- filename = os.path.basename(url)
-
- expected_sha256 = url.split("/")[-2]
- download_target = os.path.join(root, filename)
-
- if os.path.exists(download_target) and not os.path.isfile(download_target):
- raise RuntimeError(f"{download_target} exists and is not a regular file")
-
- if os.path.isfile(download_target):
- if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
- return download_target
- else:
- warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
-
- with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
- with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
- while True:
- buffer = source.read(8192)
- if not buffer:
- break
-
- output.write(buffer)
- loop.update(len(buffer))
-
- if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
- raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
-
- return download_target
-
-
-def _transform(n_px):
- return Compose([
- Resize(n_px, interpolation=BICUBIC),
- CenterCrop(n_px),
- lambda image: image.convert("RGB"),
- ToTensor(),
- Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
- ])
-
-
-def available_models() -> List[str]:
- """Returns the names of available CLIP models"""
- return list(_MODELS.keys())
-
-
-def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=False):
- """Load a CLIP model
- Parameters
- ----------
- name : str
- A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
- device : Union[str, torch.device]
- The device to put the loaded model
- jit : bool
- Whether to load the optimized JIT model or more hackable non-JIT model (default).
- Returns
- -------
- model : torch.nn.Module
- The CLIP model
- preprocess : Callable[[PIL.Image], torch.Tensor]
- A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
- """
- if name in _MODELS:
- model_path = _download(_MODELS[name])
- elif os.path.isfile(name):
- model_path = name
- else:
- raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
-
- try:
- # loading JIT archive
- model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
- state_dict = None
- except RuntimeError:
- # loading saved state dict
- if jit:
- warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
- jit = False
- state_dict = torch.load(model_path, map_location="cpu")
-
- if not jit:
- # print("Heree.....")
- model = build_model(state_dict or model.state_dict()).to(device)
- if str(device) == "cpu":
- model.float()
- return model, _transform(model.visual.input_resolution)
-
- # patch the device names
- device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
- device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
-
- def patch_device(module):
- try:
- graphs = [module.graph] if hasattr(module, "graph") else []
- except RuntimeError:
- graphs = []
-
- if hasattr(module, "forward1"):
- graphs.append(module.forward1.graph)
-
- for graph in graphs:
- for node in graph.findAllNodes("prim::Constant"):
- if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
- node.copyAttributes(device_node)
-
- model.apply(patch_device)
- patch_device(model.encode_image)
- patch_device(model.encode_text)
-
- # patch dtype to float32 on CPU
- if str(device) == "cpu":
- float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
- float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
- float_node = float_input.node()
-
- def patch_float(module):
- try:
- graphs = [module.graph] if hasattr(module, "graph") else []
- except RuntimeError:
- graphs = []
-
- if hasattr(module, "forward1"):
- graphs.append(module.forward1.graph)
-
- for graph in graphs:
- for node in graph.findAllNodes("aten::to"):
- inputs = list(node.inputs())
- for i in [1, 2]: # dtype can be the second or third argument to aten::to()
- if inputs[i].node()["value"] == 5:
- inputs[i].node().copyAttributes(float_node)
-
- model.apply(patch_float)
- patch_float(model.encode_image)
- patch_float(model.encode_text)
-
- model.float()
-
- return model, _transform(model.input_resolution.item())
-
-
-def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> torch.LongTensor:
- """
- Returns the tokenized representation of given input string(s)
- Parameters
- ----------
- texts : Union[str, List[str]]
- An input string or a list of input strings to tokenize
- context_length : int
- The context length to use; all CLIP models use 77 as the context length
- truncate: bool
- Whether to truncate the text in case its encoding is longer than the context length
- Returns
- -------
- A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
- """
- if isinstance(texts, str):
- texts = [texts]
-
- sot_token = _tokenizer.encoder["<|startoftext|>"]
- eot_token = _tokenizer.encoder["<|endoftext|>"]
- all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
- result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
-
- for i, tokens in enumerate(all_tokens):
- if len(tokens) > context_length:
- if truncate:
- tokens = tokens[:context_length]
- tokens[-1] = eot_token
- else:
- raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
- result[i, :len(tokens)] = torch.tensor(tokens)
-
- return result
\ No newline at end of file
diff --git a/spaces/BradSegal/Literature-Rating/app.py b/spaces/BradSegal/Literature-Rating/app.py
deleted file mode 100644
index 7cf1ceb0627470a97564839e50060e34a57e7411..0000000000000000000000000000000000000000
--- a/spaces/BradSegal/Literature-Rating/app.py
+++ /dev/null
@@ -1,163 +0,0 @@
-import os
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from transformers import AutoModel, AutoConfig, AutoTokenizer
-import gradio as gr
-
-os.system("gdown https://drive.google.com/uc?id=1whDb0yL_Kqoyx-sIw0sS5xTfb6r_9nlJ")
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
-
-def init_params(module_lst):
- for module in module_lst:
- for param in module.parameters():
- if param.dim() > 1:
- torch.nn.init.xavier_uniform_(param)
- return
-
-
-class Custom_bert(nn.Module):
- def __init__(self, model_dir):
- super().__init__()
-
- # load base model
- config = AutoConfig.from_pretrained(model_dir)
- config.update({"output_hidden_states": True,
- "hidden_dropout_prob": 0.0,
- "layer_norm_eps": 1e-7})
-
- self.base = AutoModel.from_pretrained(model_dir, config=config)
-
- dim = self.base.encoder.layer[0].output.dense.bias.shape[0]
-
- self.dropout = nn.Dropout(p=0.2)
- self.high_dropout = nn.Dropout(p=0.5)
-
- # weights for weighted layer average
- n_weights = 24
- weights_init = torch.zeros(n_weights).float()
- weights_init.data[:-1] = -3
- self.layer_weights = torch.nn.Parameter(weights_init)
-
- # attention head
- self.attention = nn.Sequential(
- nn.Linear(1024, 1024),
- nn.Tanh(),
- nn.Linear(1024, 1),
- nn.Softmax(dim=1)
- )
- self.cls = nn.Sequential(
- nn.Linear(dim, 1)
- )
- init_params([self.cls, self.attention])
-
- def reini_head(self):
- init_params([self.cls, self.attention])
- return
-
- def forward(self, input_ids, attention_mask):
- base_output = self.base(input_ids=input_ids,
- attention_mask=attention_mask)
-
- # weighted average of all encoder outputs
- cls_outputs = torch.stack(
- [self.dropout(layer) for layer in base_output['hidden_states'][-24:]], dim=0
- )
- cls_output = (
- torch.softmax(self.layer_weights, dim=0).unsqueeze(1).unsqueeze(1).unsqueeze(1) * cls_outputs).sum(
- 0)
-
- # multisample dropout
- logits = torch.mean(
- torch.stack(
- [torch.sum(self.attention(self.high_dropout(cls_output)) * cls_output, dim=1) for _ in range(5)],
- dim=0,
- ),
- dim=0,
- )
- return self.cls(logits)
-
-
-def get_batches(input, tokenizer, batch_size=128, max_length=256, device='cpu'):
- out = tokenizer(input, return_tensors='pt', max_length=max_length, padding='max_length')
- out['input_ids'], out['attention_mask'] = out['input_ids'].to(device), out['attention_mask'].to(device)
- input_id_split = torch.split(out['input_ids'], max_length, dim=1)
- attention_split = torch.split(out['attention_mask'], max_length, dim=1)
-
- input_id_batches = []
- attention_batches = []
-
- i = 0
- input_length = len(input_id_split)
-
- while i * batch_size < input_length:
- if i * batch_size + batch_size <= input_length:
- input_id_batches.append(list(input_id_split[i * batch_size:(i + 1) * batch_size]))
- attention_batches.append(list(attention_split[i * batch_size:(i + 1) * batch_size]))
- else:
- input_id_batches.append(list(input_id_split[i * batch_size:input_length]))
- attention_batches.append(list(attention_split[i * batch_size:input_length]))
- i += 1
-
- if input_id_batches[-1][-1].shape[1] < max_length:
- input_id_batches[-1][-1] = F.pad(input_id_batches[-1][-1],
- (1, max_length - input_id_batches[-1][-1].shape[1] - 1),
- value=0)
- attention_batches[-1][-1] = F.pad(attention_batches[-1][-1],
- (1, max_length - attention_batches[-1][-1].shape[1] - 1),
- value=1)
-
- input_id_batches = [torch.cat(batch, dim=0) for batch in input_id_batches]
- attention_batches = [torch.cat(batch, dim=0) for batch in attention_batches]
-
- return tuple(zip(input_id_batches, attention_batches))
-
-
-def predict(input, tokenizer, model, batch_size=128, max_length=256, max_val=-4, min_val=3, score=100):
- device = model.base.device
- batches = get_batches(input, tokenizer, batch_size, max_length, device)
-
- predictions = []
-
- with torch.no_grad():
- for input_ids, attention_mask in batches:
- pred = model(input_ids, attention_mask)
- pred = score * (pred - min_val) / (max_val - min_val)
- predictions.append(pred)
-
- predictions = torch.cat(predictions, dim=0)
- mean, std = predictions.mean().cpu().item(), predictions.std().cpu().item()
- mean, std = round(mean, 2), round(std, 2)
- if np.isnan(std):
- return f"The reading difficulty score is {mean}."
- else:
- return f"""The reading difficulty score is {mean} with a standard deviation of {std}.
- \nThe 95% confidence interval of the score is {mean - 2 * std} to {mean + 2 * std}."""
-
-
-if __name__ == "__main__":
- deberta_loc = "deberta_large_0.pt"
- deberta_tokenizer = AutoTokenizer.from_pretrained("microsoft/deberta-large", model_max_length=256)
-
- model = Custom_bert("microsoft/deberta-large")
- model.load_state_dict(torch.load(deberta_loc, map_location=torch.device(device)))
- model.eval().to(device)
-
-
- description = """
- This tool attempts to estimate how difficult a piece of text is to read by a school child.
- The underlying model has been developed based on expert ranking of text difficulty for students from grade 3 to 12.
- The score has been scaled to range from zero (very easy) to one hundred (very difficult).
- Very long passages will be broken up and reported with the average as well as the standard deviation of the difficulty score.
- """
-
- interface = gr.Interface(fn=lambda x: predict(x, deberta_tokenizer, model, batch_size=4),
- inputs=gr.inputs.Textbox(lines = 7, label = "Text:",
- placeholder = "Insert text to be scored here."),
- outputs='text',
- title = "Reading Difficulty Analyser",
- description = description)
- interface.launch()
-
diff --git a/spaces/BraydenMoore/MARCI-NFL-Betting/Source/Test/xgboost_OU.py b/spaces/BraydenMoore/MARCI-NFL-Betting/Source/Test/xgboost_OU.py
deleted file mode 100644
index e666282db845d5b54fd670371f445386c810550e..0000000000000000000000000000000000000000
--- a/spaces/BraydenMoore/MARCI-NFL-Betting/Source/Test/xgboost_OU.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import xgboost as xgb
-import pandas as pd
-import pickle as pkl
-import numpy as np
-import os
-
-model = 'xgboost_OU_no_odds_60.8%'
-
-current_directory = os.path.dirname(os.path.abspath(__file__))
-parent_directory = os.path.dirname(current_directory)
-data_directory = os.path.join(parent_directory, 'Data')
-model_directory = os.path.join(parent_directory, 'Models')
-pickle_directory = os.path.join(parent_directory, 'Pickles')
-
-file_path = os.path.join(model_directory, f'{model}.json')
-xgb_ou = xgb.Booster()
-xgb_ou.load_model(file_path)
-
-file_path = os.path.join(pickle_directory, 'test_games_OU_no_odds.pkl')
-with open(file_path,'rb') as f:
- test_games = pkl.load(f).tolist()
-
-file_path = os.path.join(data_directory, 'gbg_and_odds.csv')
-gbg_and_odds = pd.read_csv(file_path)
-test_data = gbg_and_odds.loc[gbg_and_odds['game_id'].isin(test_games)]
-test_data_matrix = xgb.DMatrix(test_data.drop(columns=['game_id','Over','Home-Team-Win','Season','home_team','away_team','game_date','Key','Home Score','Away Score','Home Odds Close','Away Odds Close','Home Winnings','Away Winnings','Away Odds','Home Odds']).astype(float).values)
-
-predicted_probas = xgb_ou.predict(test_data_matrix)
-predictions = np.argmax(predicted_probas, axis=1)
-test_data['predicted_proba'] = [i[1] for i in predicted_probas]
-test_data['prediction'] = (test_data['predicted_proba']>0.5).astype(int)
-test_data['correct'] = test_data['Over']==test_data['prediction']
-
-bets = test_data.loc[(test_data['predicted_proba']>0.6) | (test_data['predicted_proba']<0.4)]
-bets['winnings'] = [0.91 if c else -1 for c in bets[['correct']].values]
-
-import matplotlib.pyplot as plt
-fig = plt.figure(facecolor='black')
-ax = fig.add_subplot(1, 1, 1, facecolor='black')
-
-# Plot data with line color as RGB(0, 128, 0)
-ax.plot(bets['winnings'].cumsum().values*100, linewidth=3, color=(0/255, 128/255, 0/255))
-
-# Set title and labels
-ax.set_title('MARCI 3.0 - Over/Under', color='white')
-ax.set_xlabel('Games Bet On', color='white')
-ax.set_ylabel('Return (%)', color='white')
-
-# Change tick colors to white
-ax.tick_params(axis='x', colors='white')
-ax.tick_params(axis='y', colors='white')
-
-# Change axis edge colors
-ax.spines['bottom'].set_color('white')
-ax.spines['top'].set_color('white')
-ax.spines['left'].set_color('white')
-ax.spines['right'].set_color('white')
-
-plt.savefig(f'{model}_dark.png', facecolor='black')
\ No newline at end of file
diff --git a/spaces/CVPR/MonoScene/app.py b/spaces/CVPR/MonoScene/app.py
deleted file mode 100644
index 01225a388b656d95f22d55a02c47d4d62772c3c3..0000000000000000000000000000000000000000
--- a/spaces/CVPR/MonoScene/app.py
+++ /dev/null
@@ -1,126 +0,0 @@
-import gradio as gr
-import numpy as np
-from torchvision import transforms
-import torch
-from helpers import *
-import sys
-import csv
-from monoscene.monoscene import MonoScene
-
-csv.field_size_limit(sys.maxsize)
-torch.set_grad_enabled(False)
-
-# pipeline = pipeline(model="anhquancao/monoscene_kitti")
-# model = AutoModel.from_pretrained(
-# "anhquancao/monoscene_kitti", trust_remote_code=True, revision='bf033f87c2a86b60903ab811b790a1532c1ae313'
-# )#.cuda()
-model = MonoScene.load_from_checkpoint(
- "monoscene_kitti.ckpt",
- dataset="kitti",
- n_classes=20,
- feature = 64,
- project_scale = 2,
- full_scene_size = (256, 256, 32),
- )
-
-img_W, img_H = 1220, 370
-
-
-def predict(img):
- img = np.array(img, dtype=np.float32, copy=False) / 255.0
-
- normalize_rgb = transforms.Compose(
- [
- transforms.ToTensor(),
- transforms.Normalize(
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
- ),
- ]
- )
- img = normalize_rgb(img)
-
- batch = get_projections(img_W, img_H)
- batch["img"] = img
- for k in batch:
- batch[k] = batch[k].unsqueeze(0)#.cuda()
-
- pred = model(batch).squeeze()
- # print(pred.shape)
- pred = majority_pooling(pred, k_size=2)
- fig = draw(pred, batch['fov_mask_2'])
-
-
- return fig
-
-
-description = """
-MonoScene Demo on SemanticKITTI Validation Set (Sequence 08), which uses the camera parameters of Sequence 08 .
-Due to the CPU-only inference, it might take up to 20s to predict a scene. \n
-The output is downsampled by 2 for faster rendering. Darker colors represent the scenery outside the Field of View , i.e. not visible on the image.
-
-
-
-
-
-
-
-"""
-title = "MonoScene: Monocular 3D Semantic Scene Completion"
-article="""
-
-We also released a smaller MonoScene model (Half resolution - w/o 3D CRP) at: https://huggingface.co/spaces/CVPR/monoscene_lite
-
-
-"""
-
-examples = [
- 'images/08/001385.jpg',
- 'images/08/000295.jpg',
- 'images/08/002505.jpg',
- 'images/08/000085.jpg',
- 'images/08/000290.jpg',
- 'images/08/000465.jpg',
- 'images/08/000790.jpg',
- 'images/08/001005.jpg',
- 'images/08/001380.jpg',
- 'images/08/001530.jpg',
- 'images/08/002360.jpg',
- 'images/08/004059.jpg',
- 'images/08/003149.jpg',
- 'images/08/001446.jpg',
- 'images/08/000010.jpg',
- 'images/08/001122.jpg',
- 'images/08/003533.jpg',
- 'images/08/003365.jpg',
- 'images/08/002944.jpg',
- 'images/08/000822.jpg',
- 'images/08/000103.jpg',
- 'images/08/002716.jpg',
- 'images/08/000187.jpg',
- 'images/08/002128.jpg',
- 'images/08/000511.jpg',
- 'images/08/000618.jpg',
- 'images/08/002010.jpg',
- 'images/08/000234.jpg',
- 'images/08/001842.jpg',
- 'images/08/001687.jpg',
- 'images/08/003929.jpg',
- 'images/08/002272.jpg',
-]
-
-
-
-demo = gr.Interface(
- predict,
- gr.Image(shape=(1220, 370)),
- gr.Plot(),
- article=article,
- title=title,
- enable_queue=True,
- cache_examples=False,
- live=False,
- examples=examples,
- description=description)
-
-
-demo.launch(enable_queue=True, debug=False)
\ No newline at end of file
diff --git a/spaces/CVPR/Text2Human/Text2Human/models/hierarchy_inference_model.py b/spaces/CVPR/Text2Human/Text2Human/models/hierarchy_inference_model.py
deleted file mode 100644
index 3116307caa051cec1a2d0e3793f459f92b44fd80..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Text2Human/Text2Human/models/hierarchy_inference_model.py
+++ /dev/null
@@ -1,363 +0,0 @@
-import logging
-import math
-from collections import OrderedDict
-
-import torch
-import torch.nn.functional as F
-from torchvision.utils import save_image
-
-from models.archs.fcn_arch import MultiHeadFCNHead
-from models.archs.unet_arch import UNet
-from models.archs.vqgan_arch import (Decoder, DecoderRes, Encoder,
- VectorQuantizerSpatialTextureAware,
- VectorQuantizerTexture)
-from models.losses.accuracy import accuracy
-from models.losses.cross_entropy_loss import CrossEntropyLoss
-
-logger = logging.getLogger('base')
-
-
-class VQGANTextureAwareSpatialHierarchyInferenceModel():
-
- def __init__(self, opt):
- self.opt = opt
- self.device = torch.device('cuda')
- self.is_train = opt['is_train']
-
- self.top_encoder = Encoder(
- ch=opt['top_ch'],
- num_res_blocks=opt['top_num_res_blocks'],
- attn_resolutions=opt['top_attn_resolutions'],
- ch_mult=opt['top_ch_mult'],
- in_channels=opt['top_in_channels'],
- resolution=opt['top_resolution'],
- z_channels=opt['top_z_channels'],
- double_z=opt['top_double_z'],
- dropout=opt['top_dropout']).to(self.device)
- self.decoder = Decoder(
- in_channels=opt['top_in_channels'],
- resolution=opt['top_resolution'],
- z_channels=opt['top_z_channels'],
- ch=opt['top_ch'],
- out_ch=opt['top_out_ch'],
- num_res_blocks=opt['top_num_res_blocks'],
- attn_resolutions=opt['top_attn_resolutions'],
- ch_mult=opt['top_ch_mult'],
- dropout=opt['top_dropout'],
- resamp_with_conv=True,
- give_pre_end=False).to(self.device)
- self.top_quantize = VectorQuantizerTexture(
- 1024, opt['embed_dim'], beta=0.25).to(self.device)
- self.top_quant_conv = torch.nn.Conv2d(opt["top_z_channels"],
- opt['embed_dim'],
- 1).to(self.device)
- self.top_post_quant_conv = torch.nn.Conv2d(opt['embed_dim'],
- opt["top_z_channels"],
- 1).to(self.device)
- self.load_top_pretrain_models()
-
- self.bot_encoder = Encoder(
- ch=opt['bot_ch'],
- num_res_blocks=opt['bot_num_res_blocks'],
- attn_resolutions=opt['bot_attn_resolutions'],
- ch_mult=opt['bot_ch_mult'],
- in_channels=opt['bot_in_channels'],
- resolution=opt['bot_resolution'],
- z_channels=opt['bot_z_channels'],
- double_z=opt['bot_double_z'],
- dropout=opt['bot_dropout']).to(self.device)
- self.bot_decoder_res = DecoderRes(
- in_channels=opt['bot_in_channels'],
- resolution=opt['bot_resolution'],
- z_channels=opt['bot_z_channels'],
- ch=opt['bot_ch'],
- num_res_blocks=opt['bot_num_res_blocks'],
- ch_mult=opt['bot_ch_mult'],
- dropout=opt['bot_dropout'],
- give_pre_end=False).to(self.device)
- self.bot_quantize = VectorQuantizerSpatialTextureAware(
- opt['bot_n_embed'],
- opt['embed_dim'],
- beta=0.25,
- spatial_size=opt['codebook_spatial_size']).to(self.device)
- self.bot_quant_conv = torch.nn.Conv2d(opt["bot_z_channels"],
- opt['embed_dim'],
- 1).to(self.device)
- self.bot_post_quant_conv = torch.nn.Conv2d(opt['embed_dim'],
- opt["bot_z_channels"],
- 1).to(self.device)
-
- self.load_bot_pretrain_network()
-
- self.guidance_encoder = UNet(
- in_channels=opt['encoder_in_channels']).to(self.device)
- self.index_decoder = MultiHeadFCNHead(
- in_channels=opt['fc_in_channels'],
- in_index=opt['fc_in_index'],
- channels=opt['fc_channels'],
- num_convs=opt['fc_num_convs'],
- concat_input=opt['fc_concat_input'],
- dropout_ratio=opt['fc_dropout_ratio'],
- num_classes=opt['fc_num_classes'],
- align_corners=opt['fc_align_corners'],
- num_head=18).to(self.device)
-
- self.init_training_settings()
-
- def init_training_settings(self):
- optim_params = []
- for v in self.guidance_encoder.parameters():
- if v.requires_grad:
- optim_params.append(v)
- for v in self.index_decoder.parameters():
- if v.requires_grad:
- optim_params.append(v)
- # set up optimizers
- if self.opt['optimizer'] == 'Adam':
- self.optimizer = torch.optim.Adam(
- optim_params,
- self.opt['lr'],
- weight_decay=self.opt['weight_decay'])
- elif self.opt['optimizer'] == 'SGD':
- self.optimizer = torch.optim.SGD(
- optim_params,
- self.opt['lr'],
- momentum=self.opt['momentum'],
- weight_decay=self.opt['weight_decay'])
- self.log_dict = OrderedDict()
- if self.opt['loss_function'] == 'cross_entropy':
- self.loss_func = CrossEntropyLoss().to(self.device)
-
- def load_top_pretrain_models(self):
- # load pretrained vqgan for segmentation mask
- top_vae_checkpoint = torch.load(self.opt['top_vae_path'])
- self.top_encoder.load_state_dict(
- top_vae_checkpoint['encoder'], strict=True)
- self.decoder.load_state_dict(
- top_vae_checkpoint['decoder'], strict=True)
- self.top_quantize.load_state_dict(
- top_vae_checkpoint['quantize'], strict=True)
- self.top_quant_conv.load_state_dict(
- top_vae_checkpoint['quant_conv'], strict=True)
- self.top_post_quant_conv.load_state_dict(
- top_vae_checkpoint['post_quant_conv'], strict=True)
- self.top_encoder.eval()
- self.top_quantize.eval()
- self.top_quant_conv.eval()
- self.top_post_quant_conv.eval()
-
- def load_bot_pretrain_network(self):
- checkpoint = torch.load(self.opt['bot_vae_path'])
- self.bot_encoder.load_state_dict(
- checkpoint['bot_encoder'], strict=True)
- self.bot_decoder_res.load_state_dict(
- checkpoint['bot_decoder_res'], strict=True)
- self.decoder.load_state_dict(checkpoint['decoder'], strict=True)
- self.bot_quantize.load_state_dict(
- checkpoint['bot_quantize'], strict=True)
- self.bot_quant_conv.load_state_dict(
- checkpoint['bot_quant_conv'], strict=True)
- self.bot_post_quant_conv.load_state_dict(
- checkpoint['bot_post_quant_conv'], strict=True)
-
- self.bot_encoder.eval()
- self.bot_decoder_res.eval()
- self.decoder.eval()
- self.bot_quantize.eval()
- self.bot_quant_conv.eval()
- self.bot_post_quant_conv.eval()
-
- def top_encode(self, x, mask):
- h = self.top_encoder(x)
- h = self.top_quant_conv(h)
- quant, _, _ = self.top_quantize(h, mask)
- quant = self.top_post_quant_conv(quant)
-
- return quant, quant
-
- def feed_data(self, data):
- self.image = data['image'].to(self.device)
- self.texture_mask = data['texture_mask'].float().to(self.device)
- self.get_gt_indices()
-
- self.texture_tokens = F.interpolate(
- self.texture_mask, size=(32, 16),
- mode='nearest').view(self.image.size(0), -1).long()
-
- def bot_encode(self, x, mask):
- h = self.bot_encoder(x)
- h = self.bot_quant_conv(h)
- _, _, (_, _, indices_list) = self.bot_quantize(h, mask)
-
- return indices_list
-
- def get_gt_indices(self):
- self.quant_t, self.feature_t = self.top_encode(self.image,
- self.texture_mask)
- self.gt_indices_list = self.bot_encode(self.image, self.texture_mask)
-
- def index_to_image(self, index_bottom_list, texture_mask):
- quant_b = self.bot_quantize.get_codebook_entry(
- index_bottom_list, texture_mask,
- (index_bottom_list[0].size(0), index_bottom_list[0].size(1),
- index_bottom_list[0].size(2),
- self.opt["bot_z_channels"])) #.permute(0, 3, 1, 2)
- quant_b = self.bot_post_quant_conv(quant_b)
- bot_dec_res = self.bot_decoder_res(quant_b)
-
- dec = self.decoder(self.quant_t, bot_h=bot_dec_res)
-
- return dec
-
- def get_vis(self, pred_img_index, rec_img_index, texture_mask, save_path):
- rec_img = self.index_to_image(rec_img_index, texture_mask)
- pred_img = self.index_to_image(pred_img_index, texture_mask)
-
- base_img = self.decoder(self.quant_t)
- img_cat = torch.cat([
- self.image,
- rec_img,
- base_img,
- pred_img,
- ], dim=3).detach()
- img_cat = ((img_cat + 1) / 2)
- img_cat = img_cat.clamp_(0, 1)
- save_image(img_cat, save_path, nrow=1, padding=4)
-
- def optimize_parameters(self):
- self.guidance_encoder.train()
- self.index_decoder.train()
-
- self.feature_enc = self.guidance_encoder(self.feature_t)
- self.memory_logits_list = self.index_decoder(self.feature_enc)
-
- loss = 0
- for i in range(18):
- loss += self.loss_func(
- self.memory_logits_list[i],
- self.gt_indices_list[i],
- ignore_index=-1)
-
- self.optimizer.zero_grad()
- loss.backward()
- self.optimizer.step()
-
- self.log_dict['loss_total'] = loss
-
- def inference(self, data_loader, save_dir):
- self.guidance_encoder.eval()
- self.index_decoder.eval()
-
- acc = 0
- num = 0
-
- for _, data in enumerate(data_loader):
- self.feed_data(data)
- img_name = data['img_name']
-
- num += self.image.size(0)
-
- texture_mask_flatten = self.texture_tokens.view(-1)
- min_encodings_indices_list = [
- torch.full(
- texture_mask_flatten.size(),
- fill_value=-1,
- dtype=torch.long,
- device=texture_mask_flatten.device) for _ in range(18)
- ]
- with torch.no_grad():
- self.feature_enc = self.guidance_encoder(self.feature_t)
- memory_logits_list = self.index_decoder(self.feature_enc)
- # memory_indices_pred = memory_logits.argmax(dim=1)
- batch_acc = 0
- for codebook_idx, memory_logits in enumerate(memory_logits_list):
- region_of_interest = texture_mask_flatten == codebook_idx
- if torch.sum(region_of_interest) > 0:
- memory_indices_pred = memory_logits.argmax(dim=1).view(-1)
- batch_acc += torch.sum(
- memory_indices_pred[region_of_interest] ==
- self.gt_indices_list[codebook_idx].view(
- -1)[region_of_interest])
- memory_indices_pred = memory_indices_pred
- min_encodings_indices_list[codebook_idx][
- region_of_interest] = memory_indices_pred[
- region_of_interest]
- min_encodings_indices_return_list = [
- min_encodings_indices.view(self.gt_indices_list[0].size())
- for min_encodings_indices in min_encodings_indices_list
- ]
- batch_acc = batch_acc / self.gt_indices_list[codebook_idx].numel(
- ) * self.image.size(0)
- acc += batch_acc
- self.get_vis(min_encodings_indices_return_list,
- self.gt_indices_list, self.texture_mask,
- f'{save_dir}/{img_name[0]}')
-
- self.guidance_encoder.train()
- self.index_decoder.train()
- return (acc / num).item()
-
- def load_network(self):
- checkpoint = torch.load(self.opt['pretrained_models'])
- self.guidance_encoder.load_state_dict(
- checkpoint['guidance_encoder'], strict=True)
- self.guidance_encoder.eval()
-
- self.index_decoder.load_state_dict(
- checkpoint['index_decoder'], strict=True)
- self.index_decoder.eval()
-
- def save_network(self, save_path):
- """Save networks.
-
- Args:
- net (nn.Module): Network to be saved.
- net_label (str): Network label.
- current_iter (int): Current iter number.
- """
-
- save_dict = {}
- save_dict['guidance_encoder'] = self.guidance_encoder.state_dict()
- save_dict['index_decoder'] = self.index_decoder.state_dict()
-
- torch.save(save_dict, save_path)
-
- def update_learning_rate(self, epoch):
- """Update learning rate.
-
- Args:
- current_iter (int): Current iteration.
- warmup_iter (int): Warmup iter numbers. -1 for no warmup.
- Default: -1.
- """
- lr = self.optimizer.param_groups[0]['lr']
-
- if self.opt['lr_decay'] == 'step':
- lr = self.opt['lr'] * (
- self.opt['gamma']**(epoch // self.opt['step']))
- elif self.opt['lr_decay'] == 'cos':
- lr = self.opt['lr'] * (
- 1 + math.cos(math.pi * epoch / self.opt['num_epochs'])) / 2
- elif self.opt['lr_decay'] == 'linear':
- lr = self.opt['lr'] * (1 - epoch / self.opt['num_epochs'])
- elif self.opt['lr_decay'] == 'linear2exp':
- if epoch < self.opt['turning_point'] + 1:
- # learning rate decay as 95%
- # at the turning point (1 / 95% = 1.0526)
- lr = self.opt['lr'] * (
- 1 - epoch / int(self.opt['turning_point'] * 1.0526))
- else:
- lr *= self.opt['gamma']
- elif self.opt['lr_decay'] == 'schedule':
- if epoch in self.opt['schedule']:
- lr *= self.opt['gamma']
- else:
- raise ValueError('Unknown lr mode {}'.format(self.opt['lr_decay']))
- # set learning rate
- for param_group in self.optimizer.param_groups:
- param_group['lr'] = lr
-
- return lr
-
- def get_current_log(self):
- return self.log_dict
diff --git a/spaces/CVPR/lama-example/bin/extract_masks.py b/spaces/CVPR/lama-example/bin/extract_masks.py
deleted file mode 100644
index d114e0fe470595f1d2aaeeeb84b36352f65b121e..0000000000000000000000000000000000000000
--- a/spaces/CVPR/lama-example/bin/extract_masks.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import PIL.Image as Image
-import numpy as np
-import os
-
-
-def main(args):
- if not args.indir.endswith('/'):
- args.indir += '/'
- os.makedirs(args.outdir, exist_ok=True)
-
- src_images = [
- args.indir+fname for fname in os.listdir(args.indir)]
-
- tgt_masks = [
- args.outdir+fname[:-4] + f'_mask000.png'
- for fname in os.listdir(args.indir)]
-
- for img_name, msk_name in zip(src_images, tgt_masks):
- #print(img)
- #print(msk)
-
- image = Image.open(img_name).convert('RGB')
- image = np.transpose(np.array(image), (2, 0, 1))
-
- mask = (image == 255).astype(int)
-
- print(mask.dtype, mask.shape)
-
-
- Image.fromarray(
- np.clip(mask[0,:,:] * 255, 0, 255).astype('uint8'),mode='L'
- ).save(msk_name)
-
-
-
-
- '''
- for infile in src_images:
- try:
- file_relpath = infile[len(indir):]
- img_outpath = os.path.join(outdir, file_relpath)
- os.makedirs(os.path.dirname(img_outpath), exist_ok=True)
-
- image = Image.open(infile).convert('RGB')
-
- mask =
-
- Image.fromarray(
- np.clip(
- cur_mask * 255, 0, 255).astype('uint8'),
- mode='L'
- ).save(cur_basename + f'_mask{i:03d}.png')
- '''
-
-
-
-if __name__ == '__main__':
- import argparse
- aparser = argparse.ArgumentParser()
- aparser.add_argument('--indir', type=str, help='Path to folder with images')
- aparser.add_argument('--outdir', type=str, help='Path to folder to store aligned images and masks to')
-
- main(aparser.parse_args())
diff --git a/spaces/CVPR/monoscene_lite/fusion.py b/spaces/CVPR/monoscene_lite/fusion.py
deleted file mode 100644
index aecd5cba3b1e3dd1e0534cda347eca8956657926..0000000000000000000000000000000000000000
--- a/spaces/CVPR/monoscene_lite/fusion.py
+++ /dev/null
@@ -1,507 +0,0 @@
-"""
-Most of the code is taken from https://github.com/andyzeng/tsdf-fusion-python/blob/master/fusion.py
-
-@inproceedings{zeng20163dmatch,
- title={3DMatch: Learning Local Geometric Descriptors from RGB-D Reconstructions},
- author={Zeng, Andy and Song, Shuran and Nie{\ss}ner, Matthias and Fisher, Matthew and Xiao, Jianxiong and Funkhouser, Thomas},
- booktitle={CVPR},
- year={2017}
-}
-"""
-
-import numpy as np
-
-from numba import njit, prange
-from skimage import measure
-
-FUSION_GPU_MODE = 0
-
-
-class TSDFVolume:
- """Volumetric TSDF Fusion of RGB-D Images."""
-
- def __init__(self, vol_bnds, voxel_size, use_gpu=True):
- """Constructor.
-
- Args:
- vol_bnds (ndarray): An ndarray of shape (3, 2). Specifies the
- xyz bounds (min/max) in meters.
- voxel_size (float): The volume discretization in meters.
- """
- vol_bnds = np.asarray(vol_bnds)
- assert vol_bnds.shape == (3, 2), "[!] `vol_bnds` should be of shape (3, 2)."
-
- # Define voxel volume parameters
- self._vol_bnds = vol_bnds
- self._voxel_size = float(voxel_size)
- self._trunc_margin = 5 * self._voxel_size # truncation on SDF
- # self._trunc_margin = 10 # truncation on SDF
- self._color_const = 256 * 256
-
- # Adjust volume bounds and ensure C-order contiguous
- self._vol_dim = (
- np.ceil((self._vol_bnds[:, 1] - self._vol_bnds[:, 0]) / self._voxel_size)
- .copy(order="C")
- .astype(int)
- )
- self._vol_bnds[:, 1] = self._vol_bnds[:, 0] + self._vol_dim * self._voxel_size
- self._vol_origin = self._vol_bnds[:, 0].copy(order="C").astype(np.float32)
-
- print(
- "Voxel volume size: {} x {} x {} - # points: {:,}".format(
- self._vol_dim[0],
- self._vol_dim[1],
- self._vol_dim[2],
- self._vol_dim[0] * self._vol_dim[1] * self._vol_dim[2],
- )
- )
-
- # Initialize pointers to voxel volume in CPU memory
- self._tsdf_vol_cpu = np.zeros(self._vol_dim).astype(np.float32)
- # for computing the cumulative moving average of observations per voxel
- self._weight_vol_cpu = np.zeros(self._vol_dim).astype(np.float32)
- self._color_vol_cpu = np.zeros(self._vol_dim).astype(np.float32)
-
- self.gpu_mode = use_gpu and FUSION_GPU_MODE
-
- # Copy voxel volumes to GPU
- if self.gpu_mode:
- self._tsdf_vol_gpu = cuda.mem_alloc(self._tsdf_vol_cpu.nbytes)
- cuda.memcpy_htod(self._tsdf_vol_gpu, self._tsdf_vol_cpu)
- self._weight_vol_gpu = cuda.mem_alloc(self._weight_vol_cpu.nbytes)
- cuda.memcpy_htod(self._weight_vol_gpu, self._weight_vol_cpu)
- self._color_vol_gpu = cuda.mem_alloc(self._color_vol_cpu.nbytes)
- cuda.memcpy_htod(self._color_vol_gpu, self._color_vol_cpu)
-
- # Cuda kernel function (C++)
- self._cuda_src_mod = SourceModule(
- """
- __global__ void integrate(float * tsdf_vol,
- float * weight_vol,
- float * color_vol,
- float * vol_dim,
- float * vol_origin,
- float * cam_intr,
- float * cam_pose,
- float * other_params,
- float * color_im,
- float * depth_im) {
- // Get voxel index
- int gpu_loop_idx = (int) other_params[0];
- int max_threads_per_block = blockDim.x;
- int block_idx = blockIdx.z*gridDim.y*gridDim.x+blockIdx.y*gridDim.x+blockIdx.x;
- int voxel_idx = gpu_loop_idx*gridDim.x*gridDim.y*gridDim.z*max_threads_per_block+block_idx*max_threads_per_block+threadIdx.x;
- int vol_dim_x = (int) vol_dim[0];
- int vol_dim_y = (int) vol_dim[1];
- int vol_dim_z = (int) vol_dim[2];
- if (voxel_idx > vol_dim_x*vol_dim_y*vol_dim_z)
- return;
- // Get voxel grid coordinates (note: be careful when casting)
- float voxel_x = floorf(((float)voxel_idx)/((float)(vol_dim_y*vol_dim_z)));
- float voxel_y = floorf(((float)(voxel_idx-((int)voxel_x)*vol_dim_y*vol_dim_z))/((float)vol_dim_z));
- float voxel_z = (float)(voxel_idx-((int)voxel_x)*vol_dim_y*vol_dim_z-((int)voxel_y)*vol_dim_z);
- // Voxel grid coordinates to world coordinates
- float voxel_size = other_params[1];
- float pt_x = vol_origin[0]+voxel_x*voxel_size;
- float pt_y = vol_origin[1]+voxel_y*voxel_size;
- float pt_z = vol_origin[2]+voxel_z*voxel_size;
- // World coordinates to camera coordinates
- float tmp_pt_x = pt_x-cam_pose[0*4+3];
- float tmp_pt_y = pt_y-cam_pose[1*4+3];
- float tmp_pt_z = pt_z-cam_pose[2*4+3];
- float cam_pt_x = cam_pose[0*4+0]*tmp_pt_x+cam_pose[1*4+0]*tmp_pt_y+cam_pose[2*4+0]*tmp_pt_z;
- float cam_pt_y = cam_pose[0*4+1]*tmp_pt_x+cam_pose[1*4+1]*tmp_pt_y+cam_pose[2*4+1]*tmp_pt_z;
- float cam_pt_z = cam_pose[0*4+2]*tmp_pt_x+cam_pose[1*4+2]*tmp_pt_y+cam_pose[2*4+2]*tmp_pt_z;
- // Camera coordinates to image pixels
- int pixel_x = (int) roundf(cam_intr[0*3+0]*(cam_pt_x/cam_pt_z)+cam_intr[0*3+2]);
- int pixel_y = (int) roundf(cam_intr[1*3+1]*(cam_pt_y/cam_pt_z)+cam_intr[1*3+2]);
- // Skip if outside view frustum
- int im_h = (int) other_params[2];
- int im_w = (int) other_params[3];
- if (pixel_x < 0 || pixel_x >= im_w || pixel_y < 0 || pixel_y >= im_h || cam_pt_z<0)
- return;
- // Skip invalid depth
- float depth_value = depth_im[pixel_y*im_w+pixel_x];
- if (depth_value == 0)
- return;
- // Integrate TSDF
- float trunc_margin = other_params[4];
- float depth_diff = depth_value-cam_pt_z;
- if (depth_diff < -trunc_margin)
- return;
- float dist = fmin(1.0f,depth_diff/trunc_margin);
- float w_old = weight_vol[voxel_idx];
- float obs_weight = other_params[5];
- float w_new = w_old + obs_weight;
- weight_vol[voxel_idx] = w_new;
- tsdf_vol[voxel_idx] = (tsdf_vol[voxel_idx]*w_old+obs_weight*dist)/w_new;
- // Integrate color
- float old_color = color_vol[voxel_idx];
- float old_b = floorf(old_color/(256*256));
- float old_g = floorf((old_color-old_b*256*256)/256);
- float old_r = old_color-old_b*256*256-old_g*256;
- float new_color = color_im[pixel_y*im_w+pixel_x];
- float new_b = floorf(new_color/(256*256));
- float new_g = floorf((new_color-new_b*256*256)/256);
- float new_r = new_color-new_b*256*256-new_g*256;
- new_b = fmin(roundf((old_b*w_old+obs_weight*new_b)/w_new),255.0f);
- new_g = fmin(roundf((old_g*w_old+obs_weight*new_g)/w_new),255.0f);
- new_r = fmin(roundf((old_r*w_old+obs_weight*new_r)/w_new),255.0f);
- color_vol[voxel_idx] = new_b*256*256+new_g*256+new_r;
- }"""
- )
-
- self._cuda_integrate = self._cuda_src_mod.get_function("integrate")
-
- # Determine block/grid size on GPU
- gpu_dev = cuda.Device(0)
- self._max_gpu_threads_per_block = gpu_dev.MAX_THREADS_PER_BLOCK
- n_blocks = int(
- np.ceil(
- float(np.prod(self._vol_dim))
- / float(self._max_gpu_threads_per_block)
- )
- )
- grid_dim_x = min(gpu_dev.MAX_GRID_DIM_X, int(np.floor(np.cbrt(n_blocks))))
- grid_dim_y = min(
- gpu_dev.MAX_GRID_DIM_Y, int(np.floor(np.sqrt(n_blocks / grid_dim_x)))
- )
- grid_dim_z = min(
- gpu_dev.MAX_GRID_DIM_Z,
- int(np.ceil(float(n_blocks) / float(grid_dim_x * grid_dim_y))),
- )
- self._max_gpu_grid_dim = np.array(
- [grid_dim_x, grid_dim_y, grid_dim_z]
- ).astype(int)
- self._n_gpu_loops = int(
- np.ceil(
- float(np.prod(self._vol_dim))
- / float(
- np.prod(self._max_gpu_grid_dim)
- * self._max_gpu_threads_per_block
- )
- )
- )
-
- else:
- # Get voxel grid coordinates
- xv, yv, zv = np.meshgrid(
- range(self._vol_dim[0]),
- range(self._vol_dim[1]),
- range(self._vol_dim[2]),
- indexing="ij",
- )
- self.vox_coords = (
- np.concatenate(
- [xv.reshape(1, -1), yv.reshape(1, -1), zv.reshape(1, -1)], axis=0
- )
- .astype(int)
- .T
- )
-
- @staticmethod
- @njit(parallel=True)
- def vox2world(vol_origin, vox_coords, vox_size, offsets=(0.5, 0.5, 0.5)):
- """Convert voxel grid coordinates to world coordinates."""
- vol_origin = vol_origin.astype(np.float32)
- vox_coords = vox_coords.astype(np.float32)
- # print(np.min(vox_coords))
- cam_pts = np.empty_like(vox_coords, dtype=np.float32)
-
- for i in prange(vox_coords.shape[0]):
- for j in range(3):
- cam_pts[i, j] = (
- vol_origin[j]
- + (vox_size * vox_coords[i, j])
- + vox_size * offsets[j]
- )
- return cam_pts
-
- @staticmethod
- @njit(parallel=True)
- def cam2pix(cam_pts, intr):
- """Convert camera coordinates to pixel coordinates."""
- intr = intr.astype(np.float32)
- fx, fy = intr[0, 0], intr[1, 1]
- cx, cy = intr[0, 2], intr[1, 2]
- pix = np.empty((cam_pts.shape[0], 2), dtype=np.int64)
- for i in prange(cam_pts.shape[0]):
- pix[i, 0] = int(np.round((cam_pts[i, 0] * fx / cam_pts[i, 2]) + cx))
- pix[i, 1] = int(np.round((cam_pts[i, 1] * fy / cam_pts[i, 2]) + cy))
- return pix
-
- @staticmethod
- @njit(parallel=True)
- def integrate_tsdf(tsdf_vol, dist, w_old, obs_weight):
- """Integrate the TSDF volume."""
- tsdf_vol_int = np.empty_like(tsdf_vol, dtype=np.float32)
- # print(tsdf_vol.shape)
- w_new = np.empty_like(w_old, dtype=np.float32)
- for i in prange(len(tsdf_vol)):
- w_new[i] = w_old[i] + obs_weight
- tsdf_vol_int[i] = (w_old[i] * tsdf_vol[i] + obs_weight * dist[i]) / w_new[i]
- return tsdf_vol_int, w_new
-
- def integrate(self, color_im, depth_im, cam_intr, cam_pose, obs_weight=1.0):
- """Integrate an RGB-D frame into the TSDF volume.
-
- Args:
- color_im (ndarray): An RGB image of shape (H, W, 3).
- depth_im (ndarray): A depth image of shape (H, W).
- cam_intr (ndarray): The camera intrinsics matrix of shape (3, 3).
- cam_pose (ndarray): The camera pose (i.e. extrinsics) of shape (4, 4).
- obs_weight (float): The weight to assign for the current observation. A higher
- value
- """
- im_h, im_w = depth_im.shape
-
- # Fold RGB color image into a single channel image
- color_im = color_im.astype(np.float32)
- color_im = np.floor(
- color_im[..., 2] * self._color_const
- + color_im[..., 1] * 256
- + color_im[..., 0]
- )
-
- if self.gpu_mode: # GPU mode: integrate voxel volume (calls CUDA kernel)
- for gpu_loop_idx in range(self._n_gpu_loops):
- self._cuda_integrate(
- self._tsdf_vol_gpu,
- self._weight_vol_gpu,
- self._color_vol_gpu,
- cuda.InOut(self._vol_dim.astype(np.float32)),
- cuda.InOut(self._vol_origin.astype(np.float32)),
- cuda.InOut(cam_intr.reshape(-1).astype(np.float32)),
- cuda.InOut(cam_pose.reshape(-1).astype(np.float32)),
- cuda.InOut(
- np.asarray(
- [
- gpu_loop_idx,
- self._voxel_size,
- im_h,
- im_w,
- self._trunc_margin,
- obs_weight,
- ],
- np.float32,
- )
- ),
- cuda.InOut(color_im.reshape(-1).astype(np.float32)),
- cuda.InOut(depth_im.reshape(-1).astype(np.float32)),
- block=(self._max_gpu_threads_per_block, 1, 1),
- grid=(
- int(self._max_gpu_grid_dim[0]),
- int(self._max_gpu_grid_dim[1]),
- int(self._max_gpu_grid_dim[2]),
- ),
- )
- else: # CPU mode: integrate voxel volume (vectorized implementation)
- # Convert voxel grid coordinates to pixel coordinates
- cam_pts = self.vox2world(
- self._vol_origin, self.vox_coords, self._voxel_size
- )
- cam_pts = rigid_transform(cam_pts, np.linalg.inv(cam_pose))
- pix_z = cam_pts[:, 2]
- pix = self.cam2pix(cam_pts, cam_intr)
- pix_x, pix_y = pix[:, 0], pix[:, 1]
-
- # Eliminate pixels outside view frustum
- valid_pix = np.logical_and(
- pix_x >= 0,
- np.logical_and(
- pix_x < im_w,
- np.logical_and(pix_y >= 0, np.logical_and(pix_y < im_h, pix_z > 0)),
- ),
- )
- depth_val = np.zeros(pix_x.shape)
- depth_val[valid_pix] = depth_im[pix_y[valid_pix], pix_x[valid_pix]]
-
- # Integrate TSDF
- depth_diff = depth_val - pix_z
-
- valid_pts = np.logical_and(depth_val > 0, depth_diff >= -10)
- dist = depth_diff
-
- valid_vox_x = self.vox_coords[valid_pts, 0]
- valid_vox_y = self.vox_coords[valid_pts, 1]
- valid_vox_z = self.vox_coords[valid_pts, 2]
- w_old = self._weight_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z]
- tsdf_vals = self._tsdf_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z]
- valid_dist = dist[valid_pts]
- tsdf_vol_new, w_new = self.integrate_tsdf(
- tsdf_vals, valid_dist, w_old, obs_weight
- )
- self._weight_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = w_new
- self._tsdf_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = tsdf_vol_new
-
- # Integrate color
- old_color = self._color_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z]
- old_b = np.floor(old_color / self._color_const)
- old_g = np.floor((old_color - old_b * self._color_const) / 256)
- old_r = old_color - old_b * self._color_const - old_g * 256
- new_color = color_im[pix_y[valid_pts], pix_x[valid_pts]]
- new_b = np.floor(new_color / self._color_const)
- new_g = np.floor((new_color - new_b * self._color_const) / 256)
- new_r = new_color - new_b * self._color_const - new_g * 256
- new_b = np.minimum(
- 255.0, np.round((w_old * old_b + obs_weight * new_b) / w_new)
- )
- new_g = np.minimum(
- 255.0, np.round((w_old * old_g + obs_weight * new_g) / w_new)
- )
- new_r = np.minimum(
- 255.0, np.round((w_old * old_r + obs_weight * new_r) / w_new)
- )
- self._color_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = (
- new_b * self._color_const + new_g * 256 + new_r
- )
-
- def get_volume(self):
- if self.gpu_mode:
- cuda.memcpy_dtoh(self._tsdf_vol_cpu, self._tsdf_vol_gpu)
- cuda.memcpy_dtoh(self._color_vol_cpu, self._color_vol_gpu)
- return self._tsdf_vol_cpu, self._color_vol_cpu
-
- def get_point_cloud(self):
- """Extract a point cloud from the voxel volume."""
- tsdf_vol, color_vol = self.get_volume()
-
- # Marching cubes
- verts = measure.marching_cubes_lewiner(tsdf_vol, level=0)[0]
- verts_ind = np.round(verts).astype(int)
- verts = verts * self._voxel_size + self._vol_origin
-
- # Get vertex colors
- rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]
- colors_b = np.floor(rgb_vals / self._color_const)
- colors_g = np.floor((rgb_vals - colors_b * self._color_const) / 256)
- colors_r = rgb_vals - colors_b * self._color_const - colors_g * 256
- colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T
- colors = colors.astype(np.uint8)
-
- pc = np.hstack([verts, colors])
- return pc
-
- def get_mesh(self):
- """Compute a mesh from the voxel volume using marching cubes."""
- tsdf_vol, color_vol = self.get_volume()
-
- # Marching cubes
- verts, faces, norms, vals = measure.marching_cubes_lewiner(tsdf_vol, level=0)
- verts_ind = np.round(verts).astype(int)
- verts = (
- verts * self._voxel_size + self._vol_origin
- ) # voxel grid coordinates to world coordinates
-
- # Get vertex colors
- rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]
- colors_b = np.floor(rgb_vals / self._color_const)
- colors_g = np.floor((rgb_vals - colors_b * self._color_const) / 256)
- colors_r = rgb_vals - colors_b * self._color_const - colors_g * 256
- colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T
- colors = colors.astype(np.uint8)
- return verts, faces, norms, colors
-
-
-def rigid_transform(xyz, transform):
- """Applies a rigid transform to an (N, 3) pointcloud."""
- xyz_h = np.hstack([xyz, np.ones((len(xyz), 1), dtype=np.float32)])
- xyz_t_h = np.dot(transform, xyz_h.T).T
- return xyz_t_h[:, :3]
-
-
-def get_view_frustum(depth_im, cam_intr, cam_pose):
- """Get corners of 3D camera view frustum of depth image"""
- im_h = depth_im.shape[0]
- im_w = depth_im.shape[1]
- max_depth = np.max(depth_im)
- view_frust_pts = np.array(
- [
- (np.array([0, 0, 0, im_w, im_w]) - cam_intr[0, 2])
- * np.array([0, max_depth, max_depth, max_depth, max_depth])
- / cam_intr[0, 0],
- (np.array([0, 0, im_h, 0, im_h]) - cam_intr[1, 2])
- * np.array([0, max_depth, max_depth, max_depth, max_depth])
- / cam_intr[1, 1],
- np.array([0, max_depth, max_depth, max_depth, max_depth]),
- ]
- )
- view_frust_pts = rigid_transform(view_frust_pts.T, cam_pose).T
- return view_frust_pts
-
-
-def meshwrite(filename, verts, faces, norms, colors):
- """Save a 3D mesh to a polygon .ply file."""
- # Write header
- ply_file = open(filename, "w")
- ply_file.write("ply\n")
- ply_file.write("format ascii 1.0\n")
- ply_file.write("element vertex %d\n" % (verts.shape[0]))
- ply_file.write("property float x\n")
- ply_file.write("property float y\n")
- ply_file.write("property float z\n")
- ply_file.write("property float nx\n")
- ply_file.write("property float ny\n")
- ply_file.write("property float nz\n")
- ply_file.write("property uchar red\n")
- ply_file.write("property uchar green\n")
- ply_file.write("property uchar blue\n")
- ply_file.write("element face %d\n" % (faces.shape[0]))
- ply_file.write("property list uchar int vertex_index\n")
- ply_file.write("end_header\n")
-
- # Write vertex list
- for i in range(verts.shape[0]):
- ply_file.write(
- "%f %f %f %f %f %f %d %d %d\n"
- % (
- verts[i, 0],
- verts[i, 1],
- verts[i, 2],
- norms[i, 0],
- norms[i, 1],
- norms[i, 2],
- colors[i, 0],
- colors[i, 1],
- colors[i, 2],
- )
- )
-
- # Write face list
- for i in range(faces.shape[0]):
- ply_file.write("3 %d %d %d\n" % (faces[i, 0], faces[i, 1], faces[i, 2]))
-
- ply_file.close()
-
-
-def pcwrite(filename, xyzrgb):
- """Save a point cloud to a polygon .ply file."""
- xyz = xyzrgb[:, :3]
- rgb = xyzrgb[:, 3:].astype(np.uint8)
-
- # Write header
- ply_file = open(filename, "w")
- ply_file.write("ply\n")
- ply_file.write("format ascii 1.0\n")
- ply_file.write("element vertex %d\n" % (xyz.shape[0]))
- ply_file.write("property float x\n")
- ply_file.write("property float y\n")
- ply_file.write("property float z\n")
- ply_file.write("property uchar red\n")
- ply_file.write("property uchar green\n")
- ply_file.write("property uchar blue\n")
- ply_file.write("end_header\n")
-
- # Write vertex list
- for i in range(xyz.shape[0]):
- ply_file.write(
- "%f %f %f %d %d %d\n"
- % (
- xyz[i, 0],
- xyz[i, 1],
- xyz[i, 2],
- rgb[i, 0],
- rgb[i, 1],
- rgb[i, 2],
- )
- )
diff --git a/spaces/CassBunny/anything-v3.0/utils.py b/spaces/CassBunny/anything-v3.0/utils.py
deleted file mode 100644
index ff1c065d186347ca51b47d010a697dbe1814695c..0000000000000000000000000000000000000000
--- a/spaces/CassBunny/anything-v3.0/utils.py
+++ /dev/null
@@ -1,6 +0,0 @@
-def is_google_colab():
- try:
- import google.colab
- return True
- except:
- return False
\ No newline at end of file
diff --git a/spaces/ChandraMohanNayal/AutoGPT/tests/context.py b/spaces/ChandraMohanNayal/AutoGPT/tests/context.py
deleted file mode 100644
index cef969db69ab189109b935bba9ed06696cf5337a..0000000000000000000000000000000000000000
--- a/spaces/ChandraMohanNayal/AutoGPT/tests/context.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import os
-import sys
-
-sys.path.insert(
- 0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../scripts"))
-)
diff --git a/spaces/Cletrason/cloudqi-cqi_text_to_image_pt_v0/app.py b/spaces/Cletrason/cloudqi-cqi_text_to_image_pt_v0/app.py
deleted file mode 100644
index e917a619dd584c32f6866a2a3bc9d550760af235..0000000000000000000000000000000000000000
--- a/spaces/Cletrason/cloudqi-cqi_text_to_image_pt_v0/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/cloudqi/cqi_text_to_image_pt_v0").launch()
\ No newline at end of file
diff --git a/spaces/CoreyMorris/MMLU-by-task-Leaderboard/test_result_data_processing.py b/spaces/CoreyMorris/MMLU-by-task-Leaderboard/test_result_data_processing.py
deleted file mode 100644
index fdd50925ceb8b15c9b0b814629b3f419dc47c428..0000000000000000000000000000000000000000
--- a/spaces/CoreyMorris/MMLU-by-task-Leaderboard/test_result_data_processing.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import unittest
-from result_data_processor import ResultDataProcessor
-import pandas as pd
-
-class TestResultDataProcessor(unittest.TestCase):
-
- def setUp(self):
- self.processor = ResultDataProcessor()
-
- # check that the result is a pandas dataframe
- def test_process_data(self):
- data = self.processor.data
- self.assertIsInstance(data, pd.DataFrame)
-
- # check that pandas dataframe has the right columns
- def test_columns(self):
- data = self.processor.data
- self.assertIn('Parameters', data.columns)
- self.assertIn('MMLU_average', data.columns)
- # check number of columns
- self.assertEqual(len(data.columns), 64)
-
- # check that the number of rows is correct
- def test_rows(self):
- data = self.processor.data
- self.assertEqual(len(data), 998)
-
- # check that mc1 column exists
- def test_mc1(self):
- data = self.processor.data
- self.assertIn('harness|truthfulqa:mc1', data.columns)
-
- # test that a column that contains truthfulqa:mc does not exist
- def test_truthfulqa_mc(self):
- data = self.processor.data
- self.assertNotIn('truthfulqa:mc', data.columns)
-
- # check for extreme outliers in mc1 column
- def test_mc1_outliers(self):
- data = self.processor.data
- mc1 = data['harness|truthfulqa:mc1']
- self.assertLess(mc1.max(), 1.0)
- self.assertGreater(mc1.min(), 0.0)
-
-
- # test that a column named organization exists
- def test_organization(self):
- data = self.processor.data
- self.assertIn('organization', data.columns)
-
-if __name__ == '__main__':
- unittest.main()
\ No newline at end of file
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/subset/cff.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/subset/cff.py
deleted file mode 100644
index dd79f6db37a482891b6f151159ef4c9b89475b8e..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/subset/cff.py
+++ /dev/null
@@ -1,536 +0,0 @@
-from fontTools.misc import psCharStrings
-from fontTools import ttLib
-from fontTools.pens.basePen import NullPen
-from fontTools.misc.roundTools import otRound
-from fontTools.misc.loggingTools import deprecateFunction
-from fontTools.subset.util import _add_method, _uniq_sort
-
-
-class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler):
- def __init__(self, components, localSubrs, globalSubrs):
- psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs)
- self.components = components
-
- def op_endchar(self, index):
- args = self.popall()
- if len(args) >= 4:
- from fontTools.encodings.StandardEncoding import StandardEncoding
-
- # endchar can do seac accent bulding; The T2 spec says it's deprecated,
- # but recent software that shall remain nameless does output it.
- adx, ady, bchar, achar = args[-4:]
- baseGlyph = StandardEncoding[bchar]
- accentGlyph = StandardEncoding[achar]
- self.components.add(baseGlyph)
- self.components.add(accentGlyph)
-
-
-@_add_method(ttLib.getTableClass("CFF "))
-def closure_glyphs(self, s):
- cff = self.cff
- assert len(cff) == 1
- font = cff[cff.keys()[0]]
- glyphSet = font.CharStrings
-
- decompose = s.glyphs
- while decompose:
- components = set()
- for g in decompose:
- if g not in glyphSet:
- continue
- gl = glyphSet[g]
-
- subrs = getattr(gl.private, "Subrs", [])
- decompiler = _ClosureGlyphsT2Decompiler(components, subrs, gl.globalSubrs)
- decompiler.execute(gl)
- components -= s.glyphs
- s.glyphs.update(components)
- decompose = components
-
-
-def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False):
- c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName)
- if isCFF2 or ignoreWidth:
- # CFF2 charstrings have no widths nor 'endchar' operators
- c.setProgram([] if isCFF2 else ["endchar"])
- else:
- if hasattr(font, "FDArray") and font.FDArray is not None:
- private = font.FDArray[fdSelectIndex].Private
- else:
- private = font.Private
- dfltWdX = private.defaultWidthX
- nmnlWdX = private.nominalWidthX
- pen = NullPen()
- c.draw(pen) # this will set the charstring's width
- if c.width != dfltWdX:
- c.program = [c.width - nmnlWdX, "endchar"]
- else:
- c.program = ["endchar"]
-
-
-@_add_method(ttLib.getTableClass("CFF "))
-def prune_pre_subset(self, font, options):
- cff = self.cff
- # CFF table must have one font only
- cff.fontNames = cff.fontNames[:1]
-
- if options.notdef_glyph and not options.notdef_outline:
- isCFF2 = cff.major > 1
- for fontname in cff.keys():
- font = cff[fontname]
- _empty_charstring(font, ".notdef", isCFF2=isCFF2)
-
- # Clear useless Encoding
- for fontname in cff.keys():
- font = cff[fontname]
- # https://github.com/fonttools/fonttools/issues/620
- font.Encoding = "StandardEncoding"
-
- return True # bool(cff.fontNames)
-
-
-@_add_method(ttLib.getTableClass("CFF "))
-def subset_glyphs(self, s):
- cff = self.cff
- for fontname in cff.keys():
- font = cff[fontname]
- cs = font.CharStrings
-
- glyphs = s.glyphs.union(s.glyphs_emptied)
-
- # Load all glyphs
- for g in font.charset:
- if g not in glyphs:
- continue
- c, _ = cs.getItemAndSelector(g)
-
- if cs.charStringsAreIndexed:
- indices = [i for i, g in enumerate(font.charset) if g in glyphs]
- csi = cs.charStringsIndex
- csi.items = [csi.items[i] for i in indices]
- del csi.file, csi.offsets
- if hasattr(font, "FDSelect"):
- sel = font.FDSelect
- sel.format = None
- sel.gidArray = [sel.gidArray[i] for i in indices]
- newCharStrings = {}
- for indicesIdx, charsetIdx in enumerate(indices):
- g = font.charset[charsetIdx]
- if g in cs.charStrings:
- newCharStrings[g] = indicesIdx
- cs.charStrings = newCharStrings
- else:
- cs.charStrings = {g: v for g, v in cs.charStrings.items() if g in glyphs}
- font.charset = [g for g in font.charset if g in glyphs]
- font.numGlyphs = len(font.charset)
-
- if s.options.retain_gids:
- isCFF2 = cff.major > 1
- for g in s.glyphs_emptied:
- _empty_charstring(font, g, isCFF2=isCFF2, ignoreWidth=True)
-
- return True # any(cff[fontname].numGlyphs for fontname in cff.keys())
-
-
-@_add_method(psCharStrings.T2CharString)
-def subset_subroutines(self, subrs, gsubrs):
- p = self.program
- for i in range(1, len(p)):
- if p[i] == "callsubr":
- assert isinstance(p[i - 1], int)
- p[i - 1] = subrs._used.index(p[i - 1] + subrs._old_bias) - subrs._new_bias
- elif p[i] == "callgsubr":
- assert isinstance(p[i - 1], int)
- p[i - 1] = (
- gsubrs._used.index(p[i - 1] + gsubrs._old_bias) - gsubrs._new_bias
- )
-
-
-@_add_method(psCharStrings.T2CharString)
-def drop_hints(self):
- hints = self._hints
-
- if hints.deletions:
- p = self.program
- for idx in reversed(hints.deletions):
- del p[idx - 2 : idx]
-
- if hints.has_hint:
- assert not hints.deletions or hints.last_hint <= hints.deletions[0]
- self.program = self.program[hints.last_hint :]
- if not self.program:
- # TODO CFF2 no need for endchar.
- self.program.append("endchar")
- if hasattr(self, "width"):
- # Insert width back if needed
- if self.width != self.private.defaultWidthX:
- # For CFF2 charstrings, this should never happen
- assert (
- self.private.defaultWidthX is not None
- ), "CFF2 CharStrings must not have an initial width value"
- self.program.insert(0, self.width - self.private.nominalWidthX)
-
- if hints.has_hintmask:
- i = 0
- p = self.program
- while i < len(p):
- if p[i] in ["hintmask", "cntrmask"]:
- assert i + 1 <= len(p)
- del p[i : i + 2]
- continue
- i += 1
-
- assert len(self.program)
-
- del self._hints
-
-
-class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler):
- def __init__(self, localSubrs, globalSubrs, private):
- psCharStrings.SimpleT2Decompiler.__init__(
- self, localSubrs, globalSubrs, private
- )
- for subrs in [localSubrs, globalSubrs]:
- if subrs and not hasattr(subrs, "_used"):
- subrs._used = set()
-
- def op_callsubr(self, index):
- self.localSubrs._used.add(self.operandStack[-1] + self.localBias)
- psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
-
- def op_callgsubr(self, index):
- self.globalSubrs._used.add(self.operandStack[-1] + self.globalBias)
- psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
-
-
-class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
- class Hints(object):
- def __init__(self):
- # Whether calling this charstring produces any hint stems
- # Note that if a charstring starts with hintmask, it will
- # have has_hint set to True, because it *might* produce an
- # implicit vstem if called under certain conditions.
- self.has_hint = False
- # Index to start at to drop all hints
- self.last_hint = 0
- # Index up to which we know more hints are possible.
- # Only relevant if status is 0 or 1.
- self.last_checked = 0
- # The status means:
- # 0: after dropping hints, this charstring is empty
- # 1: after dropping hints, there may be more hints
- # continuing after this, or there might be
- # other things. Not clear yet.
- # 2: no more hints possible after this charstring
- self.status = 0
- # Has hintmask instructions; not recursive
- self.has_hintmask = False
- # List of indices of calls to empty subroutines to remove.
- self.deletions = []
-
- pass
-
- def __init__(
- self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None
- ):
- self._css = css
- psCharStrings.T2WidthExtractor.__init__(
- self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX
- )
- self.private = private
-
- def execute(self, charString):
- old_hints = charString._hints if hasattr(charString, "_hints") else None
- charString._hints = self.Hints()
-
- psCharStrings.T2WidthExtractor.execute(self, charString)
-
- hints = charString._hints
-
- if hints.has_hint or hints.has_hintmask:
- self._css.add(charString)
-
- if hints.status != 2:
- # Check from last_check, make sure we didn't have any operators.
- for i in range(hints.last_checked, len(charString.program) - 1):
- if isinstance(charString.program[i], str):
- hints.status = 2
- break
- else:
- hints.status = 1 # There's *something* here
- hints.last_checked = len(charString.program)
-
- if old_hints:
- assert hints.__dict__ == old_hints.__dict__
-
- def op_callsubr(self, index):
- subr = self.localSubrs[self.operandStack[-1] + self.localBias]
- psCharStrings.T2WidthExtractor.op_callsubr(self, index)
- self.processSubr(index, subr)
-
- def op_callgsubr(self, index):
- subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
- psCharStrings.T2WidthExtractor.op_callgsubr(self, index)
- self.processSubr(index, subr)
-
- def op_hstem(self, index):
- psCharStrings.T2WidthExtractor.op_hstem(self, index)
- self.processHint(index)
-
- def op_vstem(self, index):
- psCharStrings.T2WidthExtractor.op_vstem(self, index)
- self.processHint(index)
-
- def op_hstemhm(self, index):
- psCharStrings.T2WidthExtractor.op_hstemhm(self, index)
- self.processHint(index)
-
- def op_vstemhm(self, index):
- psCharStrings.T2WidthExtractor.op_vstemhm(self, index)
- self.processHint(index)
-
- def op_hintmask(self, index):
- rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index)
- self.processHintmask(index)
- return rv
-
- def op_cntrmask(self, index):
- rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index)
- self.processHintmask(index)
- return rv
-
- def processHintmask(self, index):
- cs = self.callingStack[-1]
- hints = cs._hints
- hints.has_hintmask = True
- if hints.status != 2:
- # Check from last_check, see if we may be an implicit vstem
- for i in range(hints.last_checked, index - 1):
- if isinstance(cs.program[i], str):
- hints.status = 2
- break
- else:
- # We are an implicit vstem
- hints.has_hint = True
- hints.last_hint = index + 1
- hints.status = 0
- hints.last_checked = index + 1
-
- def processHint(self, index):
- cs = self.callingStack[-1]
- hints = cs._hints
- hints.has_hint = True
- hints.last_hint = index
- hints.last_checked = index
-
- def processSubr(self, index, subr):
- cs = self.callingStack[-1]
- hints = cs._hints
- subr_hints = subr._hints
-
- # Check from last_check, make sure we didn't have
- # any operators.
- if hints.status != 2:
- for i in range(hints.last_checked, index - 1):
- if isinstance(cs.program[i], str):
- hints.status = 2
- break
- hints.last_checked = index
-
- if hints.status != 2:
- if subr_hints.has_hint:
- hints.has_hint = True
-
- # Decide where to chop off from
- if subr_hints.status == 0:
- hints.last_hint = index
- else:
- hints.last_hint = index - 2 # Leave the subr call in
-
- elif subr_hints.status == 0:
- hints.deletions.append(index)
-
- hints.status = max(hints.status, subr_hints.status)
-
-
-@_add_method(ttLib.getTableClass("CFF "))
-def prune_post_subset(self, ttfFont, options):
- cff = self.cff
- for fontname in cff.keys():
- font = cff[fontname]
- cs = font.CharStrings
-
- # Drop unused FontDictionaries
- if hasattr(font, "FDSelect"):
- sel = font.FDSelect
- indices = _uniq_sort(sel.gidArray)
- sel.gidArray = [indices.index(ss) for ss in sel.gidArray]
- arr = font.FDArray
- arr.items = [arr[i] for i in indices]
- del arr.file, arr.offsets
-
- # Desubroutinize if asked for
- if options.desubroutinize:
- cff.desubroutinize()
-
- # Drop hints if not needed
- if not options.hinting:
- self.remove_hints()
- elif not options.desubroutinize:
- self.remove_unused_subroutines()
- return True
-
-
-def _delete_empty_subrs(private_dict):
- if hasattr(private_dict, "Subrs") and not private_dict.Subrs:
- if "Subrs" in private_dict.rawDict:
- del private_dict.rawDict["Subrs"]
- del private_dict.Subrs
-
-
-@deprecateFunction(
- "use 'CFFFontSet.desubroutinize()' instead", category=DeprecationWarning
-)
-@_add_method(ttLib.getTableClass("CFF "))
-def desubroutinize(self):
- self.cff.desubroutinize()
-
-
-@_add_method(ttLib.getTableClass("CFF "))
-def remove_hints(self):
- cff = self.cff
- for fontname in cff.keys():
- font = cff[fontname]
- cs = font.CharStrings
- # This can be tricky, but doesn't have to. What we do is:
- #
- # - Run all used glyph charstrings and recurse into subroutines,
- # - For each charstring (including subroutines), if it has any
- # of the hint stem operators, we mark it as such.
- # Upon returning, for each charstring we note all the
- # subroutine calls it makes that (recursively) contain a stem,
- # - Dropping hinting then consists of the following two ops:
- # * Drop the piece of the program in each charstring before the
- # last call to a stem op or a stem-calling subroutine,
- # * Drop all hintmask operations.
- # - It's trickier... A hintmask right after hints and a few numbers
- # will act as an implicit vstemhm. As such, we track whether
- # we have seen any non-hint operators so far and do the right
- # thing, recursively... Good luck understanding that :(
- css = set()
- for g in font.charset:
- c, _ = cs.getItemAndSelector(g)
- c.decompile()
- subrs = getattr(c.private, "Subrs", [])
- decompiler = _DehintingT2Decompiler(
- css,
- subrs,
- c.globalSubrs,
- c.private.nominalWidthX,
- c.private.defaultWidthX,
- c.private,
- )
- decompiler.execute(c)
- c.width = decompiler.width
- for charstring in css:
- charstring.drop_hints()
- del css
-
- # Drop font-wide hinting values
- all_privs = []
- if hasattr(font, "FDArray"):
- all_privs.extend(fd.Private for fd in font.FDArray)
- else:
- all_privs.append(font.Private)
- for priv in all_privs:
- for k in [
- "BlueValues",
- "OtherBlues",
- "FamilyBlues",
- "FamilyOtherBlues",
- "BlueScale",
- "BlueShift",
- "BlueFuzz",
- "StemSnapH",
- "StemSnapV",
- "StdHW",
- "StdVW",
- "ForceBold",
- "LanguageGroup",
- "ExpansionFactor",
- ]:
- if hasattr(priv, k):
- setattr(priv, k, None)
- self.remove_unused_subroutines()
-
-
-@_add_method(ttLib.getTableClass("CFF "))
-def remove_unused_subroutines(self):
- cff = self.cff
- for fontname in cff.keys():
- font = cff[fontname]
- cs = font.CharStrings
- # Renumber subroutines to remove unused ones
-
- # Mark all used subroutines
- for g in font.charset:
- c, _ = cs.getItemAndSelector(g)
- subrs = getattr(c.private, "Subrs", [])
- decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs, c.private)
- decompiler.execute(c)
-
- all_subrs = [font.GlobalSubrs]
- if hasattr(font, "FDArray"):
- all_subrs.extend(
- fd.Private.Subrs
- for fd in font.FDArray
- if hasattr(fd.Private, "Subrs") and fd.Private.Subrs
- )
- elif hasattr(font.Private, "Subrs") and font.Private.Subrs:
- all_subrs.append(font.Private.Subrs)
-
- subrs = set(subrs) # Remove duplicates
-
- # Prepare
- for subrs in all_subrs:
- if not hasattr(subrs, "_used"):
- subrs._used = set()
- subrs._used = _uniq_sort(subrs._used)
- subrs._old_bias = psCharStrings.calcSubrBias(subrs)
- subrs._new_bias = psCharStrings.calcSubrBias(subrs._used)
-
- # Renumber glyph charstrings
- for g in font.charset:
- c, _ = cs.getItemAndSelector(g)
- subrs = getattr(c.private, "Subrs", [])
- c.subset_subroutines(subrs, font.GlobalSubrs)
-
- # Renumber subroutines themselves
- for subrs in all_subrs:
- if subrs == font.GlobalSubrs:
- if not hasattr(font, "FDArray") and hasattr(font.Private, "Subrs"):
- local_subrs = font.Private.Subrs
- else:
- local_subrs = []
- else:
- local_subrs = subrs
-
- subrs.items = [subrs.items[i] for i in subrs._used]
- if hasattr(subrs, "file"):
- del subrs.file
- if hasattr(subrs, "offsets"):
- del subrs.offsets
-
- for subr in subrs.items:
- subr.subset_subroutines(local_subrs, font.GlobalSubrs)
-
- # Delete local SubrsIndex if empty
- if hasattr(font, "FDArray"):
- for fd in font.FDArray:
- _delete_empty_subrs(fd.Private)
- else:
- _delete_empty_subrs(font.Private)
-
- # Cleanup
- for subrs in all_subrs:
- del subrs._used, subrs._old_bias, subrs._new_bias
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-2908e8a9.css b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-2908e8a9.css
deleted file mode 100644
index 78067c2729600b4ee3e7e9c6442a129e8ffe9894..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-2908e8a9.css
+++ /dev/null
@@ -1 +0,0 @@
-.gradio-bokeh.svelte-1fe5ixn.svelte-1fe5ixn{display:flex;justify-content:center}.layout.svelte-1fe5ixn.svelte-1fe5ixn{display:flex;flex-direction:column;justify-content:center;align-items:center;width:var(--size-full);height:var(--size-full);color:var(--body-text-color)}.altair.svelte-1fe5ixn.svelte-1fe5ixn{display:flex;flex-direction:column;justify-content:center;align-items:center;width:var(--size-full);height:var(--size-full)}.caption.svelte-1fe5ixn.svelte-1fe5ixn{font-size:var(--text-sm)}.matplotlib.svelte-1fe5ixn img.svelte-1fe5ixn{object-fit:contain}
diff --git a/spaces/DeepDrivePL/PaddleSeg-Matting/matting/dataset/matting_dataset.py b/spaces/DeepDrivePL/PaddleSeg-Matting/matting/dataset/matting_dataset.py
deleted file mode 100644
index 37f2a56b30a539716f8ff61cf452d7af9d510960..0000000000000000000000000000000000000000
--- a/spaces/DeepDrivePL/PaddleSeg-Matting/matting/dataset/matting_dataset.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import math
-
-import cv2
-import numpy as np
-import random
-import paddle
-from paddleseg.cvlibs import manager
-
-import matting.transforms as T
-
-
-@manager.DATASETS.add_component
-class MattingDataset(paddle.io.Dataset):
- """
- Pass in a dataset that conforms to the format.
- matting_dataset/
- |--bg/
- |
- |--train/
- | |--fg/
- | |--alpha/
- |
- |--val/
- | |--fg/
- | |--alpha/
- | |--trimap/ (if existing)
- |
- |--train.txt
- |
- |--val.txt
- See README.md for more information of dataset.
-
- Args:
- dataset_root(str): The root path of dataset.
- transforms(list): Transforms for image.
- mode (str, optional): which part of dataset to use. it is one of ('train', 'val', 'trainval'). Default: 'train'.
- train_file (str|list, optional): File list is used to train. It should be `foreground_image.png background_image.png`
- or `foreground_image.png`. It shold be provided if mode equal to 'train'. Default: None.
- val_file (str|list, optional): File list is used to evaluation. It should be `foreground_image.png background_image.png`
- or `foreground_image.png` or ``foreground_image.png background_image.png trimap_image.png`.
- It shold be provided if mode equal to 'val'. Default: None.
- get_trimap (bool, optional): Whether to get triamp. Default: True.
- separator (str, optional): The separator of train_file or val_file. If file name contains ' ', '|' may be perfect. Default: ' '.
- """
-
- def __init__(self,
- dataset_root,
- transforms,
- mode='train',
- train_file=None,
- val_file=None,
- get_trimap=True,
- separator=' '):
- super().__init__()
- self.dataset_root = dataset_root
- self.transforms = T.Compose(transforms)
- self.mode = mode
- self.get_trimap = get_trimap
- self.separator = separator
-
- # check file
- if mode == 'train' or mode == 'trainval':
- if train_file is None:
- raise ValueError(
- "When `mode` is 'train' or 'trainval', `train_file must be provided!"
- )
- if isinstance(train_file, str):
- train_file = [train_file]
- file_list = train_file
-
- if mode == 'val' or mode == 'trainval':
- if val_file is None:
- raise ValueError(
- "When `mode` is 'val' or 'trainval', `val_file must be provided!"
- )
- if isinstance(val_file, str):
- val_file = [val_file]
- file_list = val_file
-
- if mode == 'trainval':
- file_list = train_file + val_file
-
- # read file
- self.fg_bg_list = []
- for file in file_list:
- file = os.path.join(dataset_root, file)
- with open(file, 'r') as f:
- lines = f.readlines()
- for line in lines:
- line = line.strip()
- self.fg_bg_list.append(line)
-
- def __getitem__(self, idx):
- data = {}
- fg_bg_file = self.fg_bg_list[idx]
- fg_bg_file = fg_bg_file.split(self.separator)
- data['img_name'] = fg_bg_file[0] # using in save prediction results
- fg_file = os.path.join(self.dataset_root, fg_bg_file[0])
- alpha_file = fg_file.replace('/fg', '/alpha')
- fg = cv2.imread(fg_file)
- alpha = cv2.imread(alpha_file, 0)
- data['alpha'] = alpha
- data['gt_fields'] = []
-
- # line is: fg [bg] [trimap]
- if len(fg_bg_file) >= 2:
- bg_file = os.path.join(self.dataset_root, fg_bg_file[1])
- bg = cv2.imread(bg_file)
- data['img'], data['bg'] = self.composite(fg, alpha, bg)
- data['fg'] = fg
- if self.mode in ['train', 'trainval']:
- data['gt_fields'].append('fg')
- data['gt_fields'].append('bg')
- data['gt_fields'].append('alpha')
- if len(fg_bg_file) == 3 and self.get_trimap:
- if self.mode == 'val':
- trimap_path = os.path.join(self.dataset_root, fg_bg_file[2])
- if os.path.exists(trimap_path):
- data['trimap'] = trimap_path
- data['gt_fields'].append('trimap')
- data['ori_trimap'] = cv2.imread(trimap_path, 0)
- else:
- raise FileNotFoundError(
- 'trimap is not Found: {}'.format(fg_bg_file[2]))
- else:
- data['img'] = fg
- if self.mode in ['train', 'trainval']:
- data['fg'] = fg.copy()
- data['bg'] = fg.copy()
- data['gt_fields'].append('fg')
- data['gt_fields'].append('bg')
- data['gt_fields'].append('alpha')
-
- data['trans_info'] = [] # Record shape change information
-
- # Generate trimap from alpha if no trimap file provided
- if self.get_trimap:
- if 'trimap' not in data:
- data['trimap'] = self.gen_trimap(
- data['alpha'], mode=self.mode).astype('float32')
- data['gt_fields'].append('trimap')
- if self.mode == 'val':
- data['ori_trimap'] = data['trimap'].copy()
-
- data = self.transforms(data)
-
- # When evaluation, gt should not be transforms.
- if self.mode == 'val':
- data['gt_fields'].append('alpha')
-
- data['img'] = data['img'].astype('float32')
- for key in data.get('gt_fields', []):
- data[key] = data[key].astype('float32')
-
- if 'trimap' in data:
- data['trimap'] = data['trimap'][np.newaxis, :, :]
- if 'ori_trimap' in data:
- data['ori_trimap'] = data['ori_trimap'][np.newaxis, :, :]
-
- data['alpha'] = data['alpha'][np.newaxis, :, :] / 255.
-
- return data
-
- def __len__(self):
- return len(self.fg_bg_list)
-
- def composite(self, fg, alpha, ori_bg):
- fg_h, fg_w = fg.shape[:2]
- ori_bg_h, ori_bg_w = ori_bg.shape[:2]
-
- wratio = fg_w / ori_bg_w
- hratio = fg_h / ori_bg_h
- ratio = wratio if wratio > hratio else hratio
-
- # Resize ori_bg if it is smaller than fg.
- if ratio > 1:
- resize_h = math.ceil(ori_bg_h * ratio)
- resize_w = math.ceil(ori_bg_w * ratio)
- bg = cv2.resize(
- ori_bg, (resize_w, resize_h), interpolation=cv2.INTER_LINEAR)
- else:
- bg = ori_bg
-
- bg = bg[0:fg_h, 0:fg_w, :]
- alpha = alpha / 255
- alpha = np.expand_dims(alpha, axis=2)
- image = alpha * fg + (1 - alpha) * bg
- image = image.astype(np.uint8)
- return image, bg
-
- @staticmethod
- def gen_trimap(alpha, mode='train', eval_kernel=7):
- if mode == 'train':
- k_size = random.choice(range(2, 5))
- iterations = np.random.randint(5, 15)
- kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
- (k_size, k_size))
- dilated = cv2.dilate(alpha, kernel, iterations=iterations)
- eroded = cv2.erode(alpha, kernel, iterations=iterations)
- trimap = np.zeros(alpha.shape)
- trimap.fill(128)
- trimap[eroded > 254.5] = 255
- trimap[dilated < 0.5] = 0
- else:
- k_size = eval_kernel
- kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
- (k_size, k_size))
- dilated = cv2.dilate(alpha, kernel)
- trimap = np.zeros(alpha.shape)
- trimap.fill(128)
- trimap[alpha >= 250] = 255
- trimap[dilated <= 5] = 0
-
- return trimap
diff --git a/spaces/Dinoking/Guccio-AI-Designer/netdissect/upsegmodel/resnet.py b/spaces/Dinoking/Guccio-AI-Designer/netdissect/upsegmodel/resnet.py
deleted file mode 100644
index ea5fdf82fafa3058c5f00074d55fbb1e584d5865..0000000000000000000000000000000000000000
--- a/spaces/Dinoking/Guccio-AI-Designer/netdissect/upsegmodel/resnet.py
+++ /dev/null
@@ -1,235 +0,0 @@
-import os
-import sys
-import torch
-import torch.nn as nn
-import math
-try:
- from lib.nn import SynchronizedBatchNorm2d
-except ImportError:
- from torch.nn import BatchNorm2d as SynchronizedBatchNorm2d
-
-try:
- from urllib import urlretrieve
-except ImportError:
- from urllib.request import urlretrieve
-
-
-__all__ = ['ResNet', 'resnet50', 'resnet101'] # resnet101 is coming soon!
-
-
-model_urls = {
- 'resnet50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet50-imagenet.pth',
- 'resnet101': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet101-imagenet.pth'
-}
-
-
-def conv3x3(in_planes, out_planes, stride=1):
- "3x3 convolution with padding"
- return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
- padding=1, bias=False)
-
-
-class BasicBlock(nn.Module):
- expansion = 1
-
- def __init__(self, inplanes, planes, stride=1, downsample=None):
- super(BasicBlock, self).__init__()
- self.conv1 = conv3x3(inplanes, planes, stride)
- self.bn1 = SynchronizedBatchNorm2d(planes)
- self.relu = nn.ReLU(inplace=True)
- self.conv2 = conv3x3(planes, planes)
- self.bn2 = SynchronizedBatchNorm2d(planes)
- self.downsample = downsample
- self.stride = stride
-
- def forward(self, x):
- residual = x
-
- out = self.conv1(x)
- out = self.bn1(out)
- out = self.relu(out)
-
- out = self.conv2(out)
- out = self.bn2(out)
-
- if self.downsample is not None:
- residual = self.downsample(x)
-
- out += residual
- out = self.relu(out)
-
- return out
-
-
-class Bottleneck(nn.Module):
- expansion = 4
-
- def __init__(self, inplanes, planes, stride=1, downsample=None):
- super(Bottleneck, self).__init__()
- self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
- self.bn1 = SynchronizedBatchNorm2d(planes)
- self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
- padding=1, bias=False)
- self.bn2 = SynchronizedBatchNorm2d(planes)
- self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
- self.bn3 = SynchronizedBatchNorm2d(planes * 4)
- self.relu = nn.ReLU(inplace=True)
- self.downsample = downsample
- self.stride = stride
-
- def forward(self, x):
- residual = x
-
- out = self.conv1(x)
- out = self.bn1(out)
- out = self.relu(out)
-
- out = self.conv2(out)
- out = self.bn2(out)
- out = self.relu(out)
-
- out = self.conv3(out)
- out = self.bn3(out)
-
- if self.downsample is not None:
- residual = self.downsample(x)
-
- out += residual
- out = self.relu(out)
-
- return out
-
-
-class ResNet(nn.Module):
-
- def __init__(self, block, layers, num_classes=1000):
- self.inplanes = 128
- super(ResNet, self).__init__()
- self.conv1 = conv3x3(3, 64, stride=2)
- self.bn1 = SynchronizedBatchNorm2d(64)
- self.relu1 = nn.ReLU(inplace=True)
- self.conv2 = conv3x3(64, 64)
- self.bn2 = SynchronizedBatchNorm2d(64)
- self.relu2 = nn.ReLU(inplace=True)
- self.conv3 = conv3x3(64, 128)
- self.bn3 = SynchronizedBatchNorm2d(128)
- self.relu3 = nn.ReLU(inplace=True)
- self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
-
- self.layer1 = self._make_layer(block, 64, layers[0])
- self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
- self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
- self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
- self.avgpool = nn.AvgPool2d(7, stride=1)
- self.fc = nn.Linear(512 * block.expansion, num_classes)
-
- for m in self.modules():
- if isinstance(m, nn.Conv2d):
- n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
- m.weight.data.normal_(0, math.sqrt(2. / n))
- elif isinstance(m, SynchronizedBatchNorm2d):
- m.weight.data.fill_(1)
- m.bias.data.zero_()
-
- def _make_layer(self, block, planes, blocks, stride=1):
- downsample = None
- if stride != 1 or self.inplanes != planes * block.expansion:
- downsample = nn.Sequential(
- nn.Conv2d(self.inplanes, planes * block.expansion,
- kernel_size=1, stride=stride, bias=False),
- SynchronizedBatchNorm2d(planes * block.expansion),
- )
-
- layers = []
- layers.append(block(self.inplanes, planes, stride, downsample))
- self.inplanes = planes * block.expansion
- for i in range(1, blocks):
- layers.append(block(self.inplanes, planes))
-
- return nn.Sequential(*layers)
-
- def forward(self, x):
- x = self.relu1(self.bn1(self.conv1(x)))
- x = self.relu2(self.bn2(self.conv2(x)))
- x = self.relu3(self.bn3(self.conv3(x)))
- x = self.maxpool(x)
-
- x = self.layer1(x)
- x = self.layer2(x)
- x = self.layer3(x)
- x = self.layer4(x)
-
- x = self.avgpool(x)
- x = x.view(x.size(0), -1)
- x = self.fc(x)
-
- return x
-
-'''
-def resnet18(pretrained=False, **kwargs):
- """Constructs a ResNet-18 model.
-
- Args:
- pretrained (bool): If True, returns a model pre-trained on Places
- """
- model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
- if pretrained:
- model.load_state_dict(load_url(model_urls['resnet18']))
- return model
-
-
-def resnet34(pretrained=False, **kwargs):
- """Constructs a ResNet-34 model.
-
- Args:
- pretrained (bool): If True, returns a model pre-trained on Places
- """
- model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
- if pretrained:
- model.load_state_dict(load_url(model_urls['resnet34']))
- return model
-'''
-
-def resnet50(pretrained=False, **kwargs):
- """Constructs a ResNet-50 model.
-
- Args:
- pretrained (bool): If True, returns a model pre-trained on Places
- """
- model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
- if pretrained:
- model.load_state_dict(load_url(model_urls['resnet50']), strict=False)
- return model
-
-
-def resnet101(pretrained=False, **kwargs):
- """Constructs a ResNet-101 model.
-
- Args:
- pretrained (bool): If True, returns a model pre-trained on Places
- """
- model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
- if pretrained:
- model.load_state_dict(load_url(model_urls['resnet101']), strict=False)
- return model
-
-# def resnet152(pretrained=False, **kwargs):
-# """Constructs a ResNet-152 model.
-#
-# Args:
-# pretrained (bool): If True, returns a model pre-trained on Places
-# """
-# model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
-# if pretrained:
-# model.load_state_dict(load_url(model_urls['resnet152']))
-# return model
-
-def load_url(url, model_dir='./pretrained', map_location=None):
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
- filename = url.split('/')[-1]
- cached_file = os.path.join(model_dir, filename)
- if not os.path.exists(cached_file):
- sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
- urlretrieve(url, cached_file)
- return torch.load(cached_file, map_location=map_location)
diff --git a/spaces/DragGan/DragGan-Inversion/gradio_utils/__init__.py b/spaces/DragGan/DragGan-Inversion/gradio_utils/__init__.py
deleted file mode 100644
index 6a54920c53b4373690fd0ca59ee59159d33d1f92..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/gradio_utils/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from .utils import (ImageMask, draw_mask_on_image, draw_points_on_image,
- get_latest_points_pair, get_valid_mask,
- on_change_single_global_state)
-
-__all__ = [
- 'draw_mask_on_image', 'draw_points_on_image',
- 'on_change_single_global_state', 'get_latest_points_pair',
- 'get_valid_mask', 'ImageMask'
-]
diff --git a/spaces/DragGan/DragGan/stylegan_human/dnnlib/tflib/ops/__init__.py b/spaces/DragGan/DragGan/stylegan_human/dnnlib/tflib/ops/__init__.py
deleted file mode 100644
index 4ee9b7dd51bb69639724580f167b2eac39666266..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan/stylegan_human/dnnlib/tflib/ops/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright (c) SenseTime Research. All rights reserved.
-
-# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
-#
-# This work is made available under the Nvidia Source Code License-NC.
-# To view a copy of this license, visit
-# https://nvlabs.github.io/stylegan2/license.html
-
-# empty
diff --git a/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/include/STrack.h b/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/include/STrack.h
deleted file mode 100644
index 752cbefa8f7f7f4f0aff08e0e28ff036afe7d61a..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/include/STrack.h
+++ /dev/null
@@ -1,50 +0,0 @@
-#pragma once
-
-#include
-#include "kalmanFilter.h"
-
-using namespace cv;
-using namespace std;
-
-enum TrackState { New = 0, Tracked, Lost, Removed };
-
-class STrack
-{
-public:
- STrack(vector tlwh_, float score);
- ~STrack();
-
- vector static tlbr_to_tlwh(vector &tlbr);
- void static multi_predict(vector &stracks, byte_kalman::KalmanFilter &kalman_filter);
- void static_tlwh();
- void static_tlbr();
- vector tlwh_to_xyah(vector tlwh_tmp);
- vector to_xyah();
- void mark_lost();
- void mark_removed();
- int next_id();
- int end_frame();
-
- void activate(byte_kalman::KalmanFilter &kalman_filter, int frame_id);
- void re_activate(STrack &new_track, int frame_id, bool new_id = false);
- void update(STrack &new_track, int frame_id);
-
-public:
- bool is_activated;
- int track_id;
- int state;
-
- vector _tlwh;
- vector tlwh;
- vector tlbr;
- int frame_id;
- int tracklet_len;
- int start_frame;
-
- KAL_MEAN mean;
- KAL_COVA covariance;
- float score;
-
-private:
- byte_kalman::KalmanFilter kalman_filter;
-};
\ No newline at end of file
diff --git a/spaces/ECCV2022/bytetrack/tools/track_motdt.py b/spaces/ECCV2022/bytetrack/tools/track_motdt.py
deleted file mode 100644
index 303815dca938c66147ac0cfd301bb7bb11e240ae..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/tools/track_motdt.py
+++ /dev/null
@@ -1,293 +0,0 @@
-from loguru import logger
-
-import torch
-import torch.backends.cudnn as cudnn
-from torch.nn.parallel import DistributedDataParallel as DDP
-
-from yolox.core import launch
-from yolox.exp import get_exp
-from yolox.utils import configure_nccl, fuse_model, get_local_rank, get_model_info, setup_logger
-from yolox.evaluators import MOTEvaluator
-
-import argparse
-import os
-import random
-import warnings
-import glob
-import motmetrics as mm
-from collections import OrderedDict
-from pathlib import Path
-
-
-def make_parser():
- parser = argparse.ArgumentParser("YOLOX Eval")
- parser.add_argument("-expn", "--experiment-name", type=str, default=None)
- parser.add_argument("-n", "--name", type=str, default=None, help="model name")
-
- # distributed
- parser.add_argument(
- "--dist-backend", default="nccl", type=str, help="distributed backend"
- )
- parser.add_argument(
- "--dist-url",
- default=None,
- type=str,
- help="url used to set up distributed training",
- )
- parser.add_argument("-b", "--batch-size", type=int, default=64, help="batch size")
- parser.add_argument(
- "-d", "--devices", default=None, type=int, help="device for training"
- )
- parser.add_argument(
- "--local_rank", default=0, type=int, help="local rank for dist training"
- )
- parser.add_argument(
- "--num_machines", default=1, type=int, help="num of node for training"
- )
- parser.add_argument(
- "--machine_rank", default=0, type=int, help="node rank for multi-node training"
- )
- parser.add_argument(
- "-f",
- "--exp_file",
- default=None,
- type=str,
- help="pls input your expriment description file",
- )
- parser.add_argument(
- "--fp16",
- dest="fp16",
- default=False,
- action="store_true",
- help="Adopting mix precision evaluating.",
- )
- parser.add_argument(
- "--fuse",
- dest="fuse",
- default=False,
- action="store_true",
- help="Fuse conv and bn for testing.",
- )
- parser.add_argument(
- "--trt",
- dest="trt",
- default=False,
- action="store_true",
- help="Using TensorRT model for testing.",
- )
- parser.add_argument(
- "--test",
- dest="test",
- default=False,
- action="store_true",
- help="Evaluating on test-dev set.",
- )
- parser.add_argument(
- "--speed",
- dest="speed",
- default=False,
- action="store_true",
- help="speed test only.",
- )
- parser.add_argument(
- "opts",
- help="Modify config options using the command-line",
- default=None,
- nargs=argparse.REMAINDER,
- )
- # det args
- parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval")
- parser.add_argument("--conf", default=0.1, type=float, help="test conf")
- parser.add_argument("--nms", default=0.7, type=float, help="test nms threshold")
- parser.add_argument("--tsize", default=None, type=int, help="test img size")
- parser.add_argument("--seed", default=None, type=int, help="eval seed")
- # tracking args
- parser.add_argument("--track_thresh", type=float, default=0.6, help="tracking confidence threshold")
- parser.add_argument("--track_buffer", type=int, default=30, help="the frames for keep lost tracks")
- parser.add_argument("--match_thresh", type=int, default=0.9, help="matching threshold for tracking")
- parser.add_argument('--min-box-area', type=float, default=100, help='filter out tiny boxes')
- # deepsort args
- parser.add_argument("--model_folder", type=str, default='pretrained/googlenet_part8_all_xavier_ckpt_56.h5', help="reid model folder")
- return parser
-
-
-def compare_dataframes(gts, ts):
- accs = []
- names = []
- for k, tsacc in ts.items():
- if k in gts:
- logger.info('Comparing {}...'.format(k))
- accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5))
- names.append(k)
- else:
- logger.warning('No ground truth for {}, skipping.'.format(k))
-
- return accs, names
-
-
-@logger.catch
-def main(exp, args, num_gpu):
- if args.seed is not None:
- random.seed(args.seed)
- torch.manual_seed(args.seed)
- cudnn.deterministic = True
- warnings.warn(
- "You have chosen to seed testing. This will turn on the CUDNN deterministic setting, "
- )
-
- is_distributed = num_gpu > 1
-
- # set environment variables for distributed training
- cudnn.benchmark = True
-
- rank = args.local_rank
- # rank = get_local_rank()
-
- file_name = os.path.join(exp.output_dir, args.experiment_name)
-
- if rank == 0:
- os.makedirs(file_name, exist_ok=True)
-
- results_folder = os.path.join(file_name, "track_results_motdt")
- os.makedirs(results_folder, exist_ok=True)
- model_folder = args.model_folder
-
- setup_logger(file_name, distributed_rank=rank, filename="val_log.txt", mode="a")
- logger.info("Args: {}".format(args))
-
- if args.conf is not None:
- exp.test_conf = args.conf
- if args.nms is not None:
- exp.nmsthre = args.nms
- if args.tsize is not None:
- exp.test_size = (args.tsize, args.tsize)
-
- model = exp.get_model()
- logger.info("Model Summary: {}".format(get_model_info(model, exp.test_size)))
- #logger.info("Model Structure:\n{}".format(str(model)))
-
- #evaluator = exp.get_evaluator(args.batch_size, is_distributed, args.test)
-
- val_loader = exp.get_eval_loader(args.batch_size, is_distributed, args.test)
- evaluator = MOTEvaluator(
- args=args,
- dataloader=val_loader,
- img_size=exp.test_size,
- confthre=exp.test_conf,
- nmsthre=exp.nmsthre,
- num_classes=exp.num_classes,
- )
-
- torch.cuda.set_device(rank)
- model.cuda(rank)
- model.eval()
-
- if not args.speed and not args.trt:
- if args.ckpt is None:
- ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar")
- else:
- ckpt_file = args.ckpt
- logger.info("loading checkpoint")
- loc = "cuda:{}".format(rank)
- ckpt = torch.load(ckpt_file, map_location=loc)
- # load the model state dict
- model.load_state_dict(ckpt["model"])
- logger.info("loaded checkpoint done.")
-
- if is_distributed:
- model = DDP(model, device_ids=[rank])
-
- if args.fuse:
- logger.info("\tFusing model...")
- model = fuse_model(model)
-
- if args.trt:
- assert (
- not args.fuse and not is_distributed and args.batch_size == 1
- ), "TensorRT model is not support model fusing and distributed inferencing!"
- trt_file = os.path.join(file_name, "model_trt.pth")
- assert os.path.exists(
- trt_file
- ), "TensorRT model is not found!\n Run tools/trt.py first!"
- model.head.decode_in_inference = False
- decoder = model.head.decode_outputs
- else:
- trt_file = None
- decoder = None
-
- # start evaluate
- *_, summary = evaluator.evaluate_motdt(
- model, is_distributed, args.fp16, trt_file, decoder, exp.test_size, results_folder, model_folder
- )
- logger.info("\n" + summary)
-
- # evaluate MOTA
- mm.lap.default_solver = 'lap'
-
- gt_type = '_val_half'
- #gt_type = ''
- print('gt_type', gt_type)
- gtfiles = glob.glob(
- os.path.join('datasets/mot/train', '*/gt/gt{}.txt'.format(gt_type)))
- print('gt_files', gtfiles)
- tsfiles = [f for f in glob.glob(os.path.join(results_folder, '*.txt')) if not os.path.basename(f).startswith('eval')]
-
- logger.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))
- logger.info('Available LAP solvers {}'.format(mm.lap.available_solvers))
- logger.info('Default LAP solver \'{}\''.format(mm.lap.default_solver))
- logger.info('Loading files.')
-
- gt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=1)) for f in gtfiles])
- ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=-1)) for f in tsfiles])
-
- mh = mm.metrics.create()
- accs, names = compare_dataframes(gt, ts)
-
- logger.info('Running metrics')
- metrics = ['recall', 'precision', 'num_unique_objects', 'mostly_tracked',
- 'partially_tracked', 'mostly_lost', 'num_false_positives', 'num_misses',
- 'num_switches', 'num_fragmentations', 'mota', 'motp', 'num_objects']
- summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)
- # summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True)
- # print(mm.io.render_summary(
- # summary, formatters=mh.formatters,
- # namemap=mm.io.motchallenge_metric_names))
- div_dict = {
- 'num_objects': ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations'],
- 'num_unique_objects': ['mostly_tracked', 'partially_tracked', 'mostly_lost']}
- for divisor in div_dict:
- for divided in div_dict[divisor]:
- summary[divided] = (summary[divided] / summary[divisor])
- fmt = mh.formatters
- change_fmt_list = ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mostly_tracked',
- 'partially_tracked', 'mostly_lost']
- for k in change_fmt_list:
- fmt[k] = fmt['mota']
- print(mm.io.render_summary(summary, formatters=fmt, namemap=mm.io.motchallenge_metric_names))
-
- metrics = mm.metrics.motchallenge_metrics + ['num_objects']
- summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)
- print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))
- logger.info('Completed')
-
-
-if __name__ == "__main__":
- args = make_parser().parse_args()
- exp = get_exp(args.exp_file, args.name)
- exp.merge(args.opts)
-
- if not args.experiment_name:
- args.experiment_name = exp.exp_name
-
- num_gpu = torch.cuda.device_count() if args.devices is None else args.devices
- assert num_gpu <= torch.cuda.device_count()
-
- launch(
- main,
- num_gpu,
- args.num_machines,
- args.machine_rank,
- backend=args.dist_backend,
- dist_url=args.dist_url,
- args=(exp, args, num_gpu),
- )
diff --git a/spaces/Eddycrack864/Applio-Inference/i18n.py b/spaces/Eddycrack864/Applio-Inference/i18n.py
deleted file mode 100644
index b958c6f7244c4b920e097a9a9e67e81990d03f59..0000000000000000000000000000000000000000
--- a/spaces/Eddycrack864/Applio-Inference/i18n.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import json
-
-def load_language_list(language):
- try:
- with open(f"./i18n/locale/{language}.json", "r", encoding="utf-8") as f:
- return json.load(f)
- except FileNotFoundError:
- raise FileNotFoundError(
- f"Failed to load language file for {language}. Check if the correct .json file exists."
- )
-
-
-class I18nAuto:
- """
- A class used for internationalization using JSON language files.
-
- Examples
- --------
- >>> i18n = I18nAuto('en_US')
- >>> i18n.print()
- Using Language: en_US
- """
- def __init__(self, language=None):
- from locale import getdefaultlocale
- language = language or getdefaultlocale()[0]
- if not self._language_exists(language):
- language = "en_US"
-
- self.language_map = load_language_list(language)
- self.language = language
-
- @staticmethod
- def _language_exists(language):
- from os.path import exists
- return exists(f"./i18n/locale/{language}.json")
-
- def __call__(self, key):
- """Returns the translation of the given key if it exists, else returns the key itself."""
- return self.language_map.get(key, key)
-
- def print(self):
- """Prints the language currently in use."""
- print(f"Using Language: {self.language}")
\ No newline at end of file
diff --git a/spaces/Eddycrack864/Applio-Inference/lib/uvr5_pack/lib_v5/nets_537238KB.py b/spaces/Eddycrack864/Applio-Inference/lib/uvr5_pack/lib_v5/nets_537238KB.py
deleted file mode 100644
index a1bb530e006482704f234c2e739a695174142941..0000000000000000000000000000000000000000
--- a/spaces/Eddycrack864/Applio-Inference/lib/uvr5_pack/lib_v5/nets_537238KB.py
+++ /dev/null
@@ -1,123 +0,0 @@
-import torch
-import numpy as np
-from torch import nn
-import torch.nn.functional as F
-
-from . import layers_537238KB as layers
-
-
-class BaseASPPNet(nn.Module):
- def __init__(self, nin, ch, dilations=(4, 8, 16)):
- super(BaseASPPNet, self).__init__()
- self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
- self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
- self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
- self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
-
- self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
-
- self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
- self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
- self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
- self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
-
- def __call__(self, x):
- h, e1 = self.enc1(x)
- h, e2 = self.enc2(h)
- h, e3 = self.enc3(h)
- h, e4 = self.enc4(h)
-
- h = self.aspp(h)
-
- h = self.dec4(h, e4)
- h = self.dec3(h, e3)
- h = self.dec2(h, e2)
- h = self.dec1(h, e1)
-
- return h
-
-
-class CascadedASPPNet(nn.Module):
- def __init__(self, n_fft):
- super(CascadedASPPNet, self).__init__()
- self.stg1_low_band_net = BaseASPPNet(2, 64)
- self.stg1_high_band_net = BaseASPPNet(2, 64)
-
- self.stg2_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
- self.stg2_full_band_net = BaseASPPNet(32, 64)
-
- self.stg3_bridge = layers.Conv2DBNActiv(130, 64, 1, 1, 0)
- self.stg3_full_band_net = BaseASPPNet(64, 128)
-
- self.out = nn.Conv2d(128, 2, 1, bias=False)
- self.aux1_out = nn.Conv2d(64, 2, 1, bias=False)
- self.aux2_out = nn.Conv2d(64, 2, 1, bias=False)
-
- self.max_bin = n_fft // 2
- self.output_bin = n_fft // 2 + 1
-
- self.offset = 128
-
- def forward(self, x, aggressiveness=None):
- mix = x.detach()
- x = x.clone()
-
- x = x[:, :, : self.max_bin]
-
- bandw = x.size()[2] // 2
- aux1 = torch.cat(
- [
- self.stg1_low_band_net(x[:, :, :bandw]),
- self.stg1_high_band_net(x[:, :, bandw:]),
- ],
- dim=2,
- )
-
- h = torch.cat([x, aux1], dim=1)
- aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
-
- h = torch.cat([x, aux1, aux2], dim=1)
- h = self.stg3_full_band_net(self.stg3_bridge(h))
-
- mask = torch.sigmoid(self.out(h))
- mask = F.pad(
- input=mask,
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
- mode="replicate",
- )
-
- if self.training:
- aux1 = torch.sigmoid(self.aux1_out(aux1))
- aux1 = F.pad(
- input=aux1,
- pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
- mode="replicate",
- )
- aux2 = torch.sigmoid(self.aux2_out(aux2))
- aux2 = F.pad(
- input=aux2,
- pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
- mode="replicate",
- )
- return mask * mix, aux1 * mix, aux2 * mix
- else:
- if aggressiveness:
- mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
- mask[:, :, : aggressiveness["split_bin"]],
- 1 + aggressiveness["value"] / 3,
- )
- mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
- mask[:, :, aggressiveness["split_bin"] :],
- 1 + aggressiveness["value"],
- )
-
- return mask * mix
-
- def predict(self, x_mag, aggressiveness=None):
- h = self.forward(x_mag, aggressiveness)
-
- if self.offset > 0:
- h = h[:, :, :, self.offset : -self.offset]
- assert h.size()[3] > 0
-
- return h
diff --git a/spaces/Eddycrack864/Applio-Inference/lib/uvr5_pack/lib_v5/nets_new.py b/spaces/Eddycrack864/Applio-Inference/lib/uvr5_pack/lib_v5/nets_new.py
deleted file mode 100644
index bfaf72e48b31cc1130f2892b0973c9aa06f195a3..0000000000000000000000000000000000000000
--- a/spaces/Eddycrack864/Applio-Inference/lib/uvr5_pack/lib_v5/nets_new.py
+++ /dev/null
@@ -1,132 +0,0 @@
-import torch
-from torch import nn
-import torch.nn.functional as F
-from . import layers_new
-
-
-class BaseNet(nn.Module):
- def __init__(
- self, nin, nout, nin_lstm, nout_lstm, dilations=((4, 2), (8, 4), (12, 6))
- ):
- super(BaseNet, self).__init__()
- self.enc1 = layers_new.Conv2DBNActiv(nin, nout, 3, 1, 1)
- self.enc2 = layers_new.Encoder(nout, nout * 2, 3, 2, 1)
- self.enc3 = layers_new.Encoder(nout * 2, nout * 4, 3, 2, 1)
- self.enc4 = layers_new.Encoder(nout * 4, nout * 6, 3, 2, 1)
- self.enc5 = layers_new.Encoder(nout * 6, nout * 8, 3, 2, 1)
-
- self.aspp = layers_new.ASPPModule(nout * 8, nout * 8, dilations, dropout=True)
-
- self.dec4 = layers_new.Decoder(nout * (6 + 8), nout * 6, 3, 1, 1)
- self.dec3 = layers_new.Decoder(nout * (4 + 6), nout * 4, 3, 1, 1)
- self.dec2 = layers_new.Decoder(nout * (2 + 4), nout * 2, 3, 1, 1)
- self.lstm_dec2 = layers_new.LSTMModule(nout * 2, nin_lstm, nout_lstm)
- self.dec1 = layers_new.Decoder(nout * (1 + 2) + 1, nout * 1, 3, 1, 1)
-
- def __call__(self, x):
- e1 = self.enc1(x)
- e2 = self.enc2(e1)
- e3 = self.enc3(e2)
- e4 = self.enc4(e3)
- e5 = self.enc5(e4)
-
- h = self.aspp(e5)
-
- h = self.dec4(h, e4)
- h = self.dec3(h, e3)
- h = self.dec2(h, e2)
- h = torch.cat([h, self.lstm_dec2(h)], dim=1)
- h = self.dec1(h, e1)
-
- return h
-
-
-class CascadedNet(nn.Module):
- def __init__(self, n_fft, nout=32, nout_lstm=128):
- super(CascadedNet, self).__init__()
-
- self.max_bin = n_fft // 2
- self.output_bin = n_fft // 2 + 1
- self.nin_lstm = self.max_bin // 2
- self.offset = 64
-
- self.stg1_low_band_net = nn.Sequential(
- BaseNet(2, nout // 2, self.nin_lstm // 2, nout_lstm),
- layers_new.Conv2DBNActiv(nout // 2, nout // 4, 1, 1, 0),
- )
-
- self.stg1_high_band_net = BaseNet(
- 2, nout // 4, self.nin_lstm // 2, nout_lstm // 2
- )
-
- self.stg2_low_band_net = nn.Sequential(
- BaseNet(nout // 4 + 2, nout, self.nin_lstm // 2, nout_lstm),
- layers_new.Conv2DBNActiv(nout, nout // 2, 1, 1, 0),
- )
- self.stg2_high_band_net = BaseNet(
- nout // 4 + 2, nout // 2, self.nin_lstm // 2, nout_lstm // 2
- )
-
- self.stg3_full_band_net = BaseNet(
- 3 * nout // 4 + 2, nout, self.nin_lstm, nout_lstm
- )
-
- self.out = nn.Conv2d(nout, 2, 1, bias=False)
- self.aux_out = nn.Conv2d(3 * nout // 4, 2, 1, bias=False)
-
- def forward(self, x):
- x = x[:, :, : self.max_bin]
-
- bandw = x.size()[2] // 2
- l1_in = x[:, :, :bandw]
- h1_in = x[:, :, bandw:]
- l1 = self.stg1_low_band_net(l1_in)
- h1 = self.stg1_high_band_net(h1_in)
- aux1 = torch.cat([l1, h1], dim=2)
-
- l2_in = torch.cat([l1_in, l1], dim=1)
- h2_in = torch.cat([h1_in, h1], dim=1)
- l2 = self.stg2_low_band_net(l2_in)
- h2 = self.stg2_high_band_net(h2_in)
- aux2 = torch.cat([l2, h2], dim=2)
-
- f3_in = torch.cat([x, aux1, aux2], dim=1)
- f3 = self.stg3_full_band_net(f3_in)
-
- mask = torch.sigmoid(self.out(f3))
- mask = F.pad(
- input=mask,
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
- mode="replicate",
- )
-
- if self.training:
- aux = torch.cat([aux1, aux2], dim=1)
- aux = torch.sigmoid(self.aux_out(aux))
- aux = F.pad(
- input=aux,
- pad=(0, 0, 0, self.output_bin - aux.size()[2]),
- mode="replicate",
- )
- return mask, aux
- else:
- return mask
-
- def predict_mask(self, x):
- mask = self.forward(x)
-
- if self.offset > 0:
- mask = mask[:, :, :, self.offset : -self.offset]
- assert mask.size()[3] > 0
-
- return mask
-
- def predict(self, x, aggressiveness=None):
- mask = self.forward(x)
- pred_mag = x * mask
-
- if self.offset > 0:
- pred_mag = pred_mag[:, :, :, self.offset : -self.offset]
- assert pred_mag.size()[3] > 0
-
- return pred_mag
diff --git a/spaces/EuroPython2022/BayesCap/networks_SRGAN.py b/spaces/EuroPython2022/BayesCap/networks_SRGAN.py
deleted file mode 100644
index cd8a30dd8deecde53f527fb81c91b78409abc390..0000000000000000000000000000000000000000
--- a/spaces/EuroPython2022/BayesCap/networks_SRGAN.py
+++ /dev/null
@@ -1,347 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torchvision.models as models
-from torch import Tensor
-
-# __all__ = [
-# "ResidualConvBlock",
-# "Discriminator", "Generator",
-# ]
-
-
-class ResidualConvBlock(nn.Module):
- """Implements residual conv function.
-
- Args:
- channels (int): Number of channels in the input image.
- """
-
- def __init__(self, channels: int) -> None:
- super(ResidualConvBlock, self).__init__()
- self.rcb = nn.Sequential(
- nn.Conv2d(channels, channels, (3, 3), (1, 1), (1, 1), bias=False),
- nn.BatchNorm2d(channels),
- nn.PReLU(),
- nn.Conv2d(channels, channels, (3, 3), (1, 1), (1, 1), bias=False),
- nn.BatchNorm2d(channels),
- )
-
- def forward(self, x: Tensor) -> Tensor:
- identity = x
-
- out = self.rcb(x)
- out = torch.add(out, identity)
-
- return out
-
-
-class Discriminator(nn.Module):
- def __init__(self) -> None:
- super(Discriminator, self).__init__()
- self.features = nn.Sequential(
- # input size. (3) x 96 x 96
- nn.Conv2d(3, 64, (3, 3), (1, 1), (1, 1), bias=False),
- nn.LeakyReLU(0.2, True),
- # state size. (64) x 48 x 48
- nn.Conv2d(64, 64, (3, 3), (2, 2), (1, 1), bias=False),
- nn.BatchNorm2d(64),
- nn.LeakyReLU(0.2, True),
- nn.Conv2d(64, 128, (3, 3), (1, 1), (1, 1), bias=False),
- nn.BatchNorm2d(128),
- nn.LeakyReLU(0.2, True),
- # state size. (128) x 24 x 24
- nn.Conv2d(128, 128, (3, 3), (2, 2), (1, 1), bias=False),
- nn.BatchNorm2d(128),
- nn.LeakyReLU(0.2, True),
- nn.Conv2d(128, 256, (3, 3), (1, 1), (1, 1), bias=False),
- nn.BatchNorm2d(256),
- nn.LeakyReLU(0.2, True),
- # state size. (256) x 12 x 12
- nn.Conv2d(256, 256, (3, 3), (2, 2), (1, 1), bias=False),
- nn.BatchNorm2d(256),
- nn.LeakyReLU(0.2, True),
- nn.Conv2d(256, 512, (3, 3), (1, 1), (1, 1), bias=False),
- nn.BatchNorm2d(512),
- nn.LeakyReLU(0.2, True),
- # state size. (512) x 6 x 6
- nn.Conv2d(512, 512, (3, 3), (2, 2), (1, 1), bias=False),
- nn.BatchNorm2d(512),
- nn.LeakyReLU(0.2, True),
- )
-
- self.classifier = nn.Sequential(
- nn.Linear(512 * 6 * 6, 1024),
- nn.LeakyReLU(0.2, True),
- nn.Linear(1024, 1),
- )
-
- def forward(self, x: Tensor) -> Tensor:
- out = self.features(x)
- out = torch.flatten(out, 1)
- out = self.classifier(out)
-
- return out
-
-
-class Generator(nn.Module):
- def __init__(self) -> None:
- super(Generator, self).__init__()
- # First conv layer.
- self.conv_block1 = nn.Sequential(
- nn.Conv2d(3, 64, (9, 9), (1, 1), (4, 4)),
- nn.PReLU(),
- )
-
- # Features trunk blocks.
- trunk = []
- for _ in range(16):
- trunk.append(ResidualConvBlock(64))
- self.trunk = nn.Sequential(*trunk)
-
- # Second conv layer.
- self.conv_block2 = nn.Sequential(
- nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1), bias=False),
- nn.BatchNorm2d(64),
- )
-
- # Upscale conv block.
- self.upsampling = nn.Sequential(
- nn.Conv2d(64, 256, (3, 3), (1, 1), (1, 1)),
- nn.PixelShuffle(2),
- nn.PReLU(),
- nn.Conv2d(64, 256, (3, 3), (1, 1), (1, 1)),
- nn.PixelShuffle(2),
- nn.PReLU(),
- )
-
- # Output layer.
- self.conv_block3 = nn.Conv2d(64, 3, (9, 9), (1, 1), (4, 4))
-
- # Initialize neural network weights.
- self._initialize_weights()
-
- def forward(self, x: Tensor, dop=None) -> Tensor:
- if not dop:
- return self._forward_impl(x)
- else:
- return self._forward_w_dop_impl(x, dop)
-
- # Support torch.script function.
- def _forward_impl(self, x: Tensor) -> Tensor:
- out1 = self.conv_block1(x)
- out = self.trunk(out1)
- out2 = self.conv_block2(out)
- out = torch.add(out1, out2)
- out = self.upsampling(out)
- out = self.conv_block3(out)
-
- return out
-
- def _forward_w_dop_impl(self, x: Tensor, dop) -> Tensor:
- out1 = self.conv_block1(x)
- out = self.trunk(out1)
- out2 = F.dropout2d(self.conv_block2(out), p=dop)
- out = torch.add(out1, out2)
- out = self.upsampling(out)
- out = self.conv_block3(out)
-
- return out
-
- def _initialize_weights(self) -> None:
- for module in self.modules():
- if isinstance(module, nn.Conv2d):
- nn.init.kaiming_normal_(module.weight)
- if module.bias is not None:
- nn.init.constant_(module.bias, 0)
- elif isinstance(module, nn.BatchNorm2d):
- nn.init.constant_(module.weight, 1)
-
-
-#### BayesCap
-class BayesCap(nn.Module):
- def __init__(self, in_channels=3, out_channels=3) -> None:
- super(BayesCap, self).__init__()
- # First conv layer.
- self.conv_block1 = nn.Sequential(
- nn.Conv2d(
- in_channels, 64,
- kernel_size=9, stride=1, padding=4
- ),
- nn.PReLU(),
- )
-
- # Features trunk blocks.
- trunk = []
- for _ in range(16):
- trunk.append(ResidualConvBlock(64))
- self.trunk = nn.Sequential(*trunk)
-
- # Second conv layer.
- self.conv_block2 = nn.Sequential(
- nn.Conv2d(
- 64, 64,
- kernel_size=3, stride=1, padding=1, bias=False
- ),
- nn.BatchNorm2d(64),
- )
-
- # Output layer.
- self.conv_block3_mu = nn.Conv2d(
- 64, out_channels=out_channels,
- kernel_size=9, stride=1, padding=4
- )
- self.conv_block3_alpha = nn.Sequential(
- nn.Conv2d(
- 64, 64,
- kernel_size=9, stride=1, padding=4
- ),
- nn.PReLU(),
- nn.Conv2d(
- 64, 64,
- kernel_size=9, stride=1, padding=4
- ),
- nn.PReLU(),
- nn.Conv2d(
- 64, 1,
- kernel_size=9, stride=1, padding=4
- ),
- nn.ReLU(),
- )
- self.conv_block3_beta = nn.Sequential(
- nn.Conv2d(
- 64, 64,
- kernel_size=9, stride=1, padding=4
- ),
- nn.PReLU(),
- nn.Conv2d(
- 64, 64,
- kernel_size=9, stride=1, padding=4
- ),
- nn.PReLU(),
- nn.Conv2d(
- 64, 1,
- kernel_size=9, stride=1, padding=4
- ),
- nn.ReLU(),
- )
-
- # Initialize neural network weights.
- self._initialize_weights()
-
- def forward(self, x: Tensor) -> Tensor:
- return self._forward_impl(x)
-
- # Support torch.script function.
- def _forward_impl(self, x: Tensor) -> Tensor:
- out1 = self.conv_block1(x)
- out = self.trunk(out1)
- out2 = self.conv_block2(out)
- out = out1 + out2
- out_mu = self.conv_block3_mu(out)
- out_alpha = self.conv_block3_alpha(out)
- out_beta = self.conv_block3_beta(out)
- return out_mu, out_alpha, out_beta
-
- def _initialize_weights(self) -> None:
- for module in self.modules():
- if isinstance(module, nn.Conv2d):
- nn.init.kaiming_normal_(module.weight)
- if module.bias is not None:
- nn.init.constant_(module.bias, 0)
- elif isinstance(module, nn.BatchNorm2d):
- nn.init.constant_(module.weight, 1)
-
-
-class BayesCap_noID(nn.Module):
- def __init__(self, in_channels=3, out_channels=3) -> None:
- super(BayesCap_noID, self).__init__()
- # First conv layer.
- self.conv_block1 = nn.Sequential(
- nn.Conv2d(
- in_channels, 64,
- kernel_size=9, stride=1, padding=4
- ),
- nn.PReLU(),
- )
-
- # Features trunk blocks.
- trunk = []
- for _ in range(16):
- trunk.append(ResidualConvBlock(64))
- self.trunk = nn.Sequential(*trunk)
-
- # Second conv layer.
- self.conv_block2 = nn.Sequential(
- nn.Conv2d(
- 64, 64,
- kernel_size=3, stride=1, padding=1, bias=False
- ),
- nn.BatchNorm2d(64),
- )
-
- # Output layer.
- # self.conv_block3_mu = nn.Conv2d(
- # 64, out_channels=out_channels,
- # kernel_size=9, stride=1, padding=4
- # )
- self.conv_block3_alpha = nn.Sequential(
- nn.Conv2d(
- 64, 64,
- kernel_size=9, stride=1, padding=4
- ),
- nn.PReLU(),
- nn.Conv2d(
- 64, 64,
- kernel_size=9, stride=1, padding=4
- ),
- nn.PReLU(),
- nn.Conv2d(
- 64, 1,
- kernel_size=9, stride=1, padding=4
- ),
- nn.ReLU(),
- )
- self.conv_block3_beta = nn.Sequential(
- nn.Conv2d(
- 64, 64,
- kernel_size=9, stride=1, padding=4
- ),
- nn.PReLU(),
- nn.Conv2d(
- 64, 64,
- kernel_size=9, stride=1, padding=4
- ),
- nn.PReLU(),
- nn.Conv2d(
- 64, 1,
- kernel_size=9, stride=1, padding=4
- ),
- nn.ReLU(),
- )
-
- # Initialize neural network weights.
- self._initialize_weights()
-
- def forward(self, x: Tensor) -> Tensor:
- return self._forward_impl(x)
-
- # Support torch.script function.
- def _forward_impl(self, x: Tensor) -> Tensor:
- out1 = self.conv_block1(x)
- out = self.trunk(out1)
- out2 = self.conv_block2(out)
- out = out1 + out2
- # out_mu = self.conv_block3_mu(out)
- out_alpha = self.conv_block3_alpha(out)
- out_beta = self.conv_block3_beta(out)
- return out_alpha, out_beta
-
- def _initialize_weights(self) -> None:
- for module in self.modules():
- if isinstance(module, nn.Conv2d):
- nn.init.kaiming_normal_(module.weight)
- if module.bias is not None:
- nn.init.constant_(module.bias, 0)
- elif isinstance(module, nn.BatchNorm2d):
- nn.init.constant_(module.weight, 1)
\ No newline at end of file
diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/utils/file_client.py b/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/utils/file_client.py
deleted file mode 100644
index 7f38d9796da3899048924f2f803d1088927966b0..0000000000000000000000000000000000000000
--- a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/utils/file_client.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py # noqa: E501
-from abc import ABCMeta, abstractmethod
-
-
-class BaseStorageBackend(metaclass=ABCMeta):
- """Abstract class of storage backends.
-
- All backends need to implement two apis: ``get()`` and ``get_text()``.
- ``get()`` reads the file as a byte stream and ``get_text()`` reads the file
- as texts.
- """
-
- @abstractmethod
- def get(self, filepath):
- pass
-
- @abstractmethod
- def get_text(self, filepath):
- pass
-
-
-class MemcachedBackend(BaseStorageBackend):
- """Memcached storage backend.
-
- Attributes:
- server_list_cfg (str): Config file for memcached server list.
- client_cfg (str): Config file for memcached client.
- sys_path (str | None): Additional path to be appended to `sys.path`.
- Default: None.
- """
-
- def __init__(self, server_list_cfg, client_cfg, sys_path=None):
- if sys_path is not None:
- import sys
- sys.path.append(sys_path)
- try:
- import mc
- except ImportError:
- raise ImportError('Please install memcached to enable MemcachedBackend.')
-
- self.server_list_cfg = server_list_cfg
- self.client_cfg = client_cfg
- self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg, self.client_cfg)
- # mc.pyvector servers as a point which points to a memory cache
- self._mc_buffer = mc.pyvector()
-
- def get(self, filepath):
- filepath = str(filepath)
- import mc
- self._client.Get(filepath, self._mc_buffer)
- value_buf = mc.ConvertBuffer(self._mc_buffer)
- return value_buf
-
- def get_text(self, filepath):
- raise NotImplementedError
-
-
-class HardDiskBackend(BaseStorageBackend):
- """Raw hard disks storage backend."""
-
- def get(self, filepath):
- filepath = str(filepath)
- with open(filepath, 'rb') as f:
- value_buf = f.read()
- return value_buf
-
- def get_text(self, filepath):
- filepath = str(filepath)
- with open(filepath, 'r') as f:
- value_buf = f.read()
- return value_buf
-
-
-class LmdbBackend(BaseStorageBackend):
- """Lmdb storage backend.
-
- Args:
- db_paths (str | list[str]): Lmdb database paths.
- client_keys (str | list[str]): Lmdb client keys. Default: 'default'.
- readonly (bool, optional): Lmdb environment parameter. If True,
- disallow any write operations. Default: True.
- lock (bool, optional): Lmdb environment parameter. If False, when
- concurrent access occurs, do not lock the database. Default: False.
- readahead (bool, optional): Lmdb environment parameter. If False,
- disable the OS filesystem readahead mechanism, which may improve
- random read performance when a database is larger than RAM.
- Default: False.
-
- Attributes:
- db_paths (list): Lmdb database path.
- _client (list): A list of several lmdb envs.
- """
-
- def __init__(self, db_paths, client_keys='default', readonly=True, lock=False, readahead=False, **kwargs):
- try:
- import lmdb
- except ImportError:
- raise ImportError('Please install lmdb to enable LmdbBackend.')
-
- if isinstance(client_keys, str):
- client_keys = [client_keys]
-
- if isinstance(db_paths, list):
- self.db_paths = [str(v) for v in db_paths]
- elif isinstance(db_paths, str):
- self.db_paths = [str(db_paths)]
- assert len(client_keys) == len(self.db_paths), ('client_keys and db_paths should have the same length, '
- f'but received {len(client_keys)} and {len(self.db_paths)}.')
-
- self._client = {}
- for client, path in zip(client_keys, self.db_paths):
- self._client[client] = lmdb.open(path, readonly=readonly, lock=lock, readahead=readahead, **kwargs)
-
- def get(self, filepath, client_key):
- """Get values according to the filepath from one lmdb named client_key.
-
- Args:
- filepath (str | obj:`Path`): Here, filepath is the lmdb key.
- client_key (str): Used for distinguishing differnet lmdb envs.
- """
- filepath = str(filepath)
- assert client_key in self._client, (f'client_key {client_key} is not ' 'in lmdb clients.')
- client = self._client[client_key]
- with client.begin(write=False) as txn:
- value_buf = txn.get(filepath.encode('ascii'))
- return value_buf
-
- def get_text(self, filepath):
- raise NotImplementedError
-
-
-class FileClient(object):
- """A general file client to access files in different backend.
-
- The client loads a file or text in a specified backend from its path
- and return it as a binary file. it can also register other backend
- accessor with a given name and backend class.
-
- Attributes:
- backend (str): The storage backend type. Options are "disk",
- "memcached" and "lmdb".
- client (:obj:`BaseStorageBackend`): The backend object.
- """
-
- _backends = {
- 'disk': HardDiskBackend,
- 'memcached': MemcachedBackend,
- 'lmdb': LmdbBackend,
- }
-
- def __init__(self, backend='disk', **kwargs):
- if backend not in self._backends:
- raise ValueError(f'Backend {backend} is not supported. Currently supported ones'
- f' are {list(self._backends.keys())}')
- self.backend = backend
- self.client = self._backends[backend](**kwargs)
-
- def get(self, filepath, client_key='default'):
- # client_key is used only for lmdb, where different fileclients have
- # different lmdb environments.
- if self.backend == 'lmdb':
- return self.client.get(filepath, client_key)
- else:
- return self.client.get(filepath)
-
- def get_text(self, filepath):
- return self.client.get_text(filepath)
diff --git a/spaces/FroggyQc/ehartford-WizardLM-7B-Uncensored/README.md b/spaces/FroggyQc/ehartford-WizardLM-7B-Uncensored/README.md
deleted file mode 100644
index 51aec6935dc9019a17abc3f59aad63092673a934..0000000000000000000000000000000000000000
--- a/spaces/FroggyQc/ehartford-WizardLM-7B-Uncensored/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Ehartford WizardLM 7B Uncensored
-emoji: 🐠
-colorFrom: red
-colorTo: gray
-sdk: gradio
-sdk_version: 3.29.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/GastonMazzei/escher-inpaint-project/glide_text2im/fp16_util.py b/spaces/GastonMazzei/escher-inpaint-project/glide_text2im/fp16_util.py
deleted file mode 100644
index b69341c706f17ccf9ac9b08e966d10c630c72129..0000000000000000000000000000000000000000
--- a/spaces/GastonMazzei/escher-inpaint-project/glide_text2im/fp16_util.py
+++ /dev/null
@@ -1,25 +0,0 @@
-"""
-Helpers to inference with 16-bit precision.
-"""
-
-import torch.nn as nn
-
-
-def convert_module_to_f16(l):
- """
- Convert primitive modules to float16.
- """
- if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
- l.weight.data = l.weight.data.half()
- if l.bias is not None:
- l.bias.data = l.bias.data.half()
-
-
-def convert_module_to_f32(l):
- """
- Convert primitive modules to float32, undoing convert_module_to_f16().
- """
- if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
- l.weight.data = l.weight.data.float()
- if l.bias is not None:
- l.bias.data = l.bias.data.float()
diff --git a/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/common/protein.py b/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/common/protein.py
deleted file mode 100644
index 2848f5bbc52d646ddc22a8f2e1c6b4d98ae1ffce..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/common/protein.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# Copyright 2021 DeepMind Technologies Limited
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Protein data type."""
-import dataclasses
-import io
-from typing import Any, Mapping, Optional
-from alphafold.common import residue_constants
-from Bio.PDB import PDBParser
-import numpy as np
-
-FeatureDict = Mapping[str, np.ndarray]
-ModelOutput = Mapping[str, Any] # Is a nested dict.
-
-
-@dataclasses.dataclass(frozen=True)
-class Protein:
- """Protein structure representation."""
-
- # Cartesian coordinates of atoms in angstroms. The atom types correspond to
- # residue_constants.atom_types, i.e. the first three are N, CA, CB.
- atom_positions: np.ndarray # [num_res, num_atom_type, 3]
-
- # Amino-acid type for each residue represented as an integer between 0 and
- # 20, where 20 is 'X'.
- aatype: np.ndarray # [num_res]
-
- # Binary float mask to indicate presence of a particular atom. 1.0 if an atom
- # is present and 0.0 if not. This should be used for loss masking.
- atom_mask: np.ndarray # [num_res, num_atom_type]
-
- # Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
- residue_index: np.ndarray # [num_res]
-
- # B-factors, or temperature factors, of each residue (in sq. angstroms units),
- # representing the displacement of the residue from its ground truth mean
- # value.
- b_factors: np.ndarray # [num_res, num_atom_type]
-
-
-def from_pdb_string(pdb_str: str, chain_id: Optional[str] = None) -> Protein:
- """Takes a PDB string and constructs a Protein object.
-
- WARNING: All non-standard residue types will be converted into UNK. All
- non-standard atoms will be ignored.
-
- Args:
- pdb_str: The contents of the pdb file
- chain_id: If None, then the pdb file must contain a single chain (which
- will be parsed). If chain_id is specified (e.g. A), then only that chain
- is parsed.
-
- Returns:
- A new `Protein` parsed from the pdb contents.
- """
- pdb_fh = io.StringIO(pdb_str)
- parser = PDBParser(QUIET=True)
- structure = parser.get_structure('none', pdb_fh)
- models = list(structure.get_models())
- if len(models) != 1:
- raise ValueError(
- f'Only single model PDBs are supported. Found {len(models)} models.')
- model = models[0]
-
- if chain_id is not None:
- chain = model[chain_id]
- else:
- chains = list(model.get_chains())
- if len(chains) != 1:
- raise ValueError(
- 'Only single chain PDBs are supported when chain_id not specified. '
- f'Found {len(chains)} chains.')
- else:
- chain = chains[0]
-
- atom_positions = []
- aatype = []
- atom_mask = []
- residue_index = []
- b_factors = []
-
- for res in chain:
- if res.id[2] != ' ':
- raise ValueError(
- f'PDB contains an insertion code at chain {chain.id} and residue '
- f'index {res.id[1]}. These are not supported.')
- res_shortname = residue_constants.restype_3to1.get(res.resname, 'X')
- restype_idx = residue_constants.restype_order.get(
- res_shortname, residue_constants.restype_num)
- pos = np.zeros((residue_constants.atom_type_num, 3))
- mask = np.zeros((residue_constants.atom_type_num,))
- res_b_factors = np.zeros((residue_constants.atom_type_num,))
- for atom in res:
- if atom.name not in residue_constants.atom_types:
- continue
- pos[residue_constants.atom_order[atom.name]] = atom.coord
- mask[residue_constants.atom_order[atom.name]] = 1.
- res_b_factors[residue_constants.atom_order[atom.name]] = atom.bfactor
- if np.sum(mask) < 0.5:
- # If no known atom positions are reported for the residue then skip it.
- continue
- aatype.append(restype_idx)
- atom_positions.append(pos)
- atom_mask.append(mask)
- residue_index.append(res.id[1])
- b_factors.append(res_b_factors)
-
- return Protein(
- atom_positions=np.array(atom_positions),
- atom_mask=np.array(atom_mask),
- aatype=np.array(aatype),
- residue_index=np.array(residue_index),
- b_factors=np.array(b_factors))
-
-
-def to_pdb(prot: Protein) -> str:
- """Converts a `Protein` instance to a PDB string.
-
- Args:
- prot: The protein to convert to PDB.
-
- Returns:
- PDB string.
- """
- restypes = residue_constants.restypes + ['X']
- res_1to3 = lambda r: residue_constants.restype_1to3.get(restypes[r], 'UNK')
- atom_types = residue_constants.atom_types
-
- pdb_lines = []
-
- atom_mask = prot.atom_mask
- aatype = prot.aatype
- atom_positions = prot.atom_positions
- residue_index = prot.residue_index.astype(np.int32)
- b_factors = prot.b_factors
-
- if np.any(aatype > residue_constants.restype_num):
- raise ValueError('Invalid aatypes.')
-
- pdb_lines.append('MODEL 1')
- atom_index = 1
- chain_id = 'A'
- # Add all atom sites.
- for i in range(aatype.shape[0]):
- res_name_3 = res_1to3(aatype[i])
- for atom_name, pos, mask, b_factor in zip(
- atom_types, atom_positions[i], atom_mask[i], b_factors[i]):
- if mask < 0.5:
- continue
-
- record_type = 'ATOM'
- name = atom_name if len(atom_name) == 4 else f' {atom_name}'
- alt_loc = ''
- insertion_code = ''
- occupancy = 1.00
- element = atom_name[0] # Protein supports only C, N, O, S, this works.
- charge = ''
- # PDB is a columnar format, every space matters here!
- atom_line = (f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
- f'{res_name_3:>3} {chain_id:>1}'
- f'{residue_index[i]:>4}{insertion_code:>1} '
- f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
- f'{occupancy:>6.2f}{b_factor:>6.2f} '
- f'{element:>2}{charge:>2}')
- pdb_lines.append(atom_line)
- atom_index += 1
-
- # Close the chain.
- chain_end = 'TER'
- chain_termination_line = (
- f'{chain_end:<6}{atom_index:>5} {res_1to3(aatype[-1]):>3} '
- f'{chain_id:>1}{residue_index[-1]:>4}')
- pdb_lines.append(chain_termination_line)
- pdb_lines.append('ENDMDL')
-
- pdb_lines.append('END')
- pdb_lines.append('')
- return '\n'.join(pdb_lines)
-
-
-def ideal_atom_mask(prot: Protein) -> np.ndarray:
- """Computes an ideal atom mask.
-
- `Protein.atom_mask` typically is defined according to the atoms that are
- reported in the PDB. This function computes a mask according to heavy atoms
- that should be present in the given sequence of amino acids.
-
- Args:
- prot: `Protein` whose fields are `numpy.ndarray` objects.
-
- Returns:
- An ideal atom mask.
- """
- return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
-
-
-def from_prediction(features: FeatureDict, result: ModelOutput,
- b_factors: Optional[np.ndarray] = None) -> Protein:
- """Assembles a protein from a prediction.
-
- Args:
- features: Dictionary holding model inputs.
- result: Dictionary holding model outputs.
- b_factors: (Optional) B-factors to use for the protein.
-
- Returns:
- A protein instance.
- """
- fold_output = result['structure_module']
- if b_factors is None:
- b_factors = np.zeros_like(fold_output['final_atom_mask'])
-
- return Protein(
- aatype=features['aatype'][0],
- atom_positions=fold_output['final_atom_positions'],
- atom_mask=fold_output['final_atom_mask'],
- residue_index=features['residue_index'][0] + 1,
- b_factors=b_factors)
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/coder/bucketing_bbox_coder.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/coder/bucketing_bbox_coder.py
deleted file mode 100644
index 92d24b4519edece7a4af8f5cfa9af025b25f2dad..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/coder/bucketing_bbox_coder.py
+++ /dev/null
@@ -1,350 +0,0 @@
-import mmcv
-import numpy as np
-import torch
-import torch.nn.functional as F
-
-from ..builder import BBOX_CODERS
-from ..transforms import bbox_rescale
-from .base_bbox_coder import BaseBBoxCoder
-
-
-@BBOX_CODERS.register_module()
-class BucketingBBoxCoder(BaseBBoxCoder):
- """Bucketing BBox Coder for Side-Aware Boundary Localization (SABL).
-
- Boundary Localization with Bucketing and Bucketing Guided Rescoring
- are implemented here.
-
- Please refer to https://arxiv.org/abs/1912.04260 for more details.
-
- Args:
- num_buckets (int): Number of buckets.
- scale_factor (int): Scale factor of proposals to generate buckets.
- offset_topk (int): Topk buckets are used to generate
- bucket fine regression targets. Defaults to 2.
- offset_upperbound (float): Offset upperbound to generate
- bucket fine regression targets.
- To avoid too large offset displacements. Defaults to 1.0.
- cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.
- Defaults to True.
- clip_border (bool, optional): Whether clip the objects outside the
- border of the image. Defaults to True.
- """
-
- def __init__(self,
- num_buckets,
- scale_factor,
- offset_topk=2,
- offset_upperbound=1.0,
- cls_ignore_neighbor=True,
- clip_border=True):
- super(BucketingBBoxCoder, self).__init__()
- self.num_buckets = num_buckets
- self.scale_factor = scale_factor
- self.offset_topk = offset_topk
- self.offset_upperbound = offset_upperbound
- self.cls_ignore_neighbor = cls_ignore_neighbor
- self.clip_border = clip_border
-
- def encode(self, bboxes, gt_bboxes):
- """Get bucketing estimation and fine regression targets during
- training.
-
- Args:
- bboxes (torch.Tensor): source boxes, e.g., object proposals.
- gt_bboxes (torch.Tensor): target of the transformation, e.g.,
- ground truth boxes.
-
- Returns:
- encoded_bboxes(tuple[Tensor]): bucketing estimation
- and fine regression targets and weights
- """
-
- assert bboxes.size(0) == gt_bboxes.size(0)
- assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
- encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets,
- self.scale_factor, self.offset_topk,
- self.offset_upperbound,
- self.cls_ignore_neighbor)
- return encoded_bboxes
-
- def decode(self, bboxes, pred_bboxes, max_shape=None):
- """Apply transformation `pred_bboxes` to `boxes`.
- Args:
- boxes (torch.Tensor): Basic boxes.
- pred_bboxes (torch.Tensor): Predictions for bucketing estimation
- and fine regression
- max_shape (tuple[int], optional): Maximum shape of boxes.
- Defaults to None.
-
- Returns:
- torch.Tensor: Decoded boxes.
- """
- assert len(pred_bboxes) == 2
- cls_preds, offset_preds = pred_bboxes
- assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size(
- 0) == bboxes.size(0)
- decoded_bboxes = bucket2bbox(bboxes, cls_preds, offset_preds,
- self.num_buckets, self.scale_factor,
- max_shape, self.clip_border)
-
- return decoded_bboxes
-
-
-@mmcv.jit(coderize=True)
-def generat_buckets(proposals, num_buckets, scale_factor=1.0):
- """Generate buckets w.r.t bucket number and scale factor of proposals.
-
- Args:
- proposals (Tensor): Shape (n, 4)
- num_buckets (int): Number of buckets.
- scale_factor (float): Scale factor to rescale proposals.
-
- Returns:
- tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets,
- t_buckets, d_buckets)
-
- - bucket_w: Width of buckets on x-axis. Shape (n, ).
- - bucket_h: Height of buckets on y-axis. Shape (n, ).
- - l_buckets: Left buckets. Shape (n, ceil(side_num/2)).
- - r_buckets: Right buckets. Shape (n, ceil(side_num/2)).
- - t_buckets: Top buckets. Shape (n, ceil(side_num/2)).
- - d_buckets: Down buckets. Shape (n, ceil(side_num/2)).
- """
- proposals = bbox_rescale(proposals, scale_factor)
-
- # number of buckets in each side
- side_num = int(np.ceil(num_buckets / 2.0))
- pw = proposals[..., 2] - proposals[..., 0]
- ph = proposals[..., 3] - proposals[..., 1]
- px1 = proposals[..., 0]
- py1 = proposals[..., 1]
- px2 = proposals[..., 2]
- py2 = proposals[..., 3]
-
- bucket_w = pw / num_buckets
- bucket_h = ph / num_buckets
-
- # left buckets
- l_buckets = px1[:, None] + (0.5 + torch.arange(
- 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]
- # right buckets
- r_buckets = px2[:, None] - (0.5 + torch.arange(
- 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]
- # top buckets
- t_buckets = py1[:, None] + (0.5 + torch.arange(
- 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]
- # down buckets
- d_buckets = py2[:, None] - (0.5 + torch.arange(
- 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]
- return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets
-
-
-@mmcv.jit(coderize=True)
-def bbox2bucket(proposals,
- gt,
- num_buckets,
- scale_factor,
- offset_topk=2,
- offset_upperbound=1.0,
- cls_ignore_neighbor=True):
- """Generate buckets estimation and fine regression targets.
-
- Args:
- proposals (Tensor): Shape (n, 4)
- gt (Tensor): Shape (n, 4)
- num_buckets (int): Number of buckets.
- scale_factor (float): Scale factor to rescale proposals.
- offset_topk (int): Topk buckets are used to generate
- bucket fine regression targets. Defaults to 2.
- offset_upperbound (float): Offset allowance to generate
- bucket fine regression targets.
- To avoid too large offset displacements. Defaults to 1.0.
- cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.
- Defaults to True.
-
- Returns:
- tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights).
-
- - offsets: Fine regression targets. \
- Shape (n, num_buckets*2).
- - offsets_weights: Fine regression weights. \
- Shape (n, num_buckets*2).
- - bucket_labels: Bucketing estimation labels. \
- Shape (n, num_buckets*2).
- - cls_weights: Bucketing estimation weights. \
- Shape (n, num_buckets*2).
- """
- assert proposals.size() == gt.size()
-
- # generate buckets
- proposals = proposals.float()
- gt = gt.float()
- (bucket_w, bucket_h, l_buckets, r_buckets, t_buckets,
- d_buckets) = generat_buckets(proposals, num_buckets, scale_factor)
-
- gx1 = gt[..., 0]
- gy1 = gt[..., 1]
- gx2 = gt[..., 2]
- gy2 = gt[..., 3]
-
- # generate offset targets and weights
- # offsets from buckets to gts
- l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None]
- r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None]
- t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None]
- d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None]
-
- # select top-k nearset buckets
- l_topk, l_label = l_offsets.abs().topk(
- offset_topk, dim=1, largest=False, sorted=True)
- r_topk, r_label = r_offsets.abs().topk(
- offset_topk, dim=1, largest=False, sorted=True)
- t_topk, t_label = t_offsets.abs().topk(
- offset_topk, dim=1, largest=False, sorted=True)
- d_topk, d_label = d_offsets.abs().topk(
- offset_topk, dim=1, largest=False, sorted=True)
-
- offset_l_weights = l_offsets.new_zeros(l_offsets.size())
- offset_r_weights = r_offsets.new_zeros(r_offsets.size())
- offset_t_weights = t_offsets.new_zeros(t_offsets.size())
- offset_d_weights = d_offsets.new_zeros(d_offsets.size())
- inds = torch.arange(0, proposals.size(0)).to(proposals).long()
-
- # generate offset weights of top-k nearset buckets
- for k in range(offset_topk):
- if k >= 1:
- offset_l_weights[inds, l_label[:,
- k]] = (l_topk[:, k] <
- offset_upperbound).float()
- offset_r_weights[inds, r_label[:,
- k]] = (r_topk[:, k] <
- offset_upperbound).float()
- offset_t_weights[inds, t_label[:,
- k]] = (t_topk[:, k] <
- offset_upperbound).float()
- offset_d_weights[inds, d_label[:,
- k]] = (d_topk[:, k] <
- offset_upperbound).float()
- else:
- offset_l_weights[inds, l_label[:, k]] = 1.0
- offset_r_weights[inds, r_label[:, k]] = 1.0
- offset_t_weights[inds, t_label[:, k]] = 1.0
- offset_d_weights[inds, d_label[:, k]] = 1.0
-
- offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1)
- offsets_weights = torch.cat([
- offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights
- ],
- dim=-1)
-
- # generate bucket labels and weight
- side_num = int(np.ceil(num_buckets / 2.0))
- labels = torch.stack(
- [l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1)
-
- batch_size = labels.size(0)
- bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size,
- -1).float()
- bucket_cls_l_weights = (l_offsets.abs() < 1).float()
- bucket_cls_r_weights = (r_offsets.abs() < 1).float()
- bucket_cls_t_weights = (t_offsets.abs() < 1).float()
- bucket_cls_d_weights = (d_offsets.abs() < 1).float()
- bucket_cls_weights = torch.cat([
- bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights,
- bucket_cls_d_weights
- ],
- dim=-1)
- # ignore second nearest buckets for cls if necessary
- if cls_ignore_neighbor:
- bucket_cls_weights = (~((bucket_cls_weights == 1) &
- (bucket_labels == 0))).float()
- else:
- bucket_cls_weights[:] = 1.0
- return offsets, offsets_weights, bucket_labels, bucket_cls_weights
-
-
-@mmcv.jit(coderize=True)
-def bucket2bbox(proposals,
- cls_preds,
- offset_preds,
- num_buckets,
- scale_factor=1.0,
- max_shape=None,
- clip_border=True):
- """Apply bucketing estimation (cls preds) and fine regression (offset
- preds) to generate det bboxes.
-
- Args:
- proposals (Tensor): Boxes to be transformed. Shape (n, 4)
- cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2).
- offset_preds (Tensor): fine regression. Shape (n, num_buckets*2).
- num_buckets (int): Number of buckets.
- scale_factor (float): Scale factor to rescale proposals.
- max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)
- clip_border (bool, optional): Whether clip the objects outside the
- border of the image. Defaults to True.
-
- Returns:
- tuple[Tensor]: (bboxes, loc_confidence).
-
- - bboxes: predicted bboxes. Shape (n, 4)
- - loc_confidence: localization confidence of predicted bboxes.
- Shape (n,).
- """
-
- side_num = int(np.ceil(num_buckets / 2.0))
- cls_preds = cls_preds.view(-1, side_num)
- offset_preds = offset_preds.view(-1, side_num)
-
- scores = F.softmax(cls_preds, dim=1)
- score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True)
-
- rescaled_proposals = bbox_rescale(proposals, scale_factor)
-
- pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0]
- ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1]
- px1 = rescaled_proposals[..., 0]
- py1 = rescaled_proposals[..., 1]
- px2 = rescaled_proposals[..., 2]
- py2 = rescaled_proposals[..., 3]
-
- bucket_w = pw / num_buckets
- bucket_h = ph / num_buckets
-
- score_inds_l = score_label[0::4, 0]
- score_inds_r = score_label[1::4, 0]
- score_inds_t = score_label[2::4, 0]
- score_inds_d = score_label[3::4, 0]
- l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w
- r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w
- t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h
- d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h
-
- offsets = offset_preds.view(-1, 4, side_num)
- inds = torch.arange(proposals.size(0)).to(proposals).long()
- l_offsets = offsets[:, 0, :][inds, score_inds_l]
- r_offsets = offsets[:, 1, :][inds, score_inds_r]
- t_offsets = offsets[:, 2, :][inds, score_inds_t]
- d_offsets = offsets[:, 3, :][inds, score_inds_d]
-
- x1 = l_buckets - l_offsets * bucket_w
- x2 = r_buckets - r_offsets * bucket_w
- y1 = t_buckets - t_offsets * bucket_h
- y2 = d_buckets - d_offsets * bucket_h
-
- if clip_border and max_shape is not None:
- x1 = x1.clamp(min=0, max=max_shape[1] - 1)
- y1 = y1.clamp(min=0, max=max_shape[0] - 1)
- x2 = x2.clamp(min=0, max=max_shape[1] - 1)
- y2 = y2.clamp(min=0, max=max_shape[0] - 1)
- bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]],
- dim=-1)
-
- # bucketing guided rescoring
- loc_confidence = score_topk[:, 0]
- top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1
- loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float()
- loc_confidence = loc_confidence.view(-1, 4).mean(dim=1)
-
- return bboxes, loc_confidence
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/pipelines/compose.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/pipelines/compose.py
deleted file mode 100644
index ca48f1c935755c486edc2744e1713e2b5ba3cdc8..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/pipelines/compose.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import collections
-
-from mmcv.utils import build_from_cfg
-
-from ..builder import PIPELINES
-
-
-@PIPELINES.register_module()
-class Compose(object):
- """Compose multiple transforms sequentially.
-
- Args:
- transforms (Sequence[dict | callable]): Sequence of transform object or
- config dict to be composed.
- """
-
- def __init__(self, transforms):
- assert isinstance(transforms, collections.abc.Sequence)
- self.transforms = []
- for transform in transforms:
- if isinstance(transform, dict):
- transform = build_from_cfg(transform, PIPELINES)
- self.transforms.append(transform)
- elif callable(transform):
- self.transforms.append(transform)
- else:
- raise TypeError('transform must be callable or a dict')
-
- def __call__(self, data):
- """Call function to apply transforms sequentially.
-
- Args:
- data (dict): A result dict contains the data to transform.
-
- Returns:
- dict: Transformed data.
- """
-
- for t in self.transforms:
- data = t(data)
- if data is None:
- return None
- return data
-
- def __repr__(self):
- format_string = self.__class__.__name__ + '('
- for t in self.transforms:
- format_string += '\n'
- format_string += f' {t}'
- format_string += '\n)'
- return format_string
diff --git a/spaces/GrandaddyShmax/MusicGen_Plus/tests/modules/test_codebooks_patterns.py b/spaces/GrandaddyShmax/MusicGen_Plus/tests/modules/test_codebooks_patterns.py
deleted file mode 100644
index b658f4779a369f9ec8dde692a61b7f0fe3485724..0000000000000000000000000000000000000000
--- a/spaces/GrandaddyShmax/MusicGen_Plus/tests/modules/test_codebooks_patterns.py
+++ /dev/null
@@ -1,246 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import pytest
-import torch
-
-from audiocraft.modules.codebooks_patterns import (
- DelayedPatternProvider,
- ParallelPatternProvider,
- Pattern,
- UnrolledPatternProvider,
-)
-
-
-class TestParallelPatternProvider:
-
- @pytest.mark.parametrize("n_q", [1, 4, 32])
- @pytest.mark.parametrize("timesteps", [0, 1, 16, 100])
- def test_get_pattern(self, n_q: int, timesteps: int):
- provider = ParallelPatternProvider(n_q)
- pattern = provider.get_pattern(timesteps)
- # + 1 to account for 1st step
- assert len(pattern.layout) == timesteps + 1
-
- @pytest.mark.parametrize("n_q", [1, 4, 32])
- @pytest.mark.parametrize("timesteps", [8, 16, 100])
- def test_pattern_content(self, n_q: int, timesteps: int):
- provider = ParallelPatternProvider(n_q)
- pattern = provider.get_pattern(timesteps)
- for s, v in enumerate(pattern.layout):
- for i, code in enumerate(v):
- assert i == code.q
- assert code.t == s - 1 # account for the 1st empty step
-
- @pytest.mark.parametrize("n_q", [1, 4, 32])
- @pytest.mark.parametrize("timesteps", [8, 16, 100])
- def test_pattern_max_delay(self, n_q: int, timesteps: int):
- provider = ParallelPatternProvider(n_q)
- pattern = provider.get_pattern(timesteps)
- assert pattern.max_delay == 0
- assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay
-
-
-class TestDelayedPatternProvider:
-
- @pytest.mark.parametrize("n_q", [1, 4, 32])
- @pytest.mark.parametrize("timesteps", [0, 1, 16, 100])
- def test_get_pattern(self, n_q: int, timesteps: int):
- delays = [
- list(range(n_q)),
- [0] + [1] * (n_q - 1),
- [0] + [4] * (n_q - 1),
- ]
- for delay in delays:
- provider = DelayedPatternProvider(n_q, delay)
- pattern = provider.get_pattern(timesteps)
- # + 1 to account for 1st step
- assert len(pattern.layout) == timesteps + max(delay) + 1
-
- @pytest.mark.parametrize("n_q", [1, 4, 32])
- @pytest.mark.parametrize("timesteps", [8, 16, 100])
- def test_pattern_content(self, n_q: int, timesteps: int):
- provider = DelayedPatternProvider(n_q)
- pattern = provider.get_pattern(timesteps)
- for s, v in enumerate(pattern.layout):
- for i, code in enumerate(v):
- assert i == code.q
- assert code.t == max(0, s - code.q - 1)
-
- @pytest.mark.parametrize("timesteps", [8, 16, 100])
- @pytest.mark.parametrize("delay", [[0, 1, 2, 3], [0, 1, 1, 1], [0, 3, 3, 3], [0, 3]])
- def test_pattern_max_delay(self, timesteps: int, delay: list):
- provider = DelayedPatternProvider(len(delay), delay)
- pattern = provider.get_pattern(timesteps)
- assert pattern.max_delay == max(delay)
- assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay
-
-
-class TestUnrolledPatternProvider:
-
- @pytest.mark.parametrize("timesteps", [0, 1, 16])
- @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]])
- @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]])
- def test_get_pattern(self, timesteps: int, flattening: list, delays: list):
- n_q = len(flattening)
- max_delay = max(delays)
- provider = UnrolledPatternProvider(n_q, flattening, delays)
- pattern = provider.get_pattern(timesteps)
- assert len(pattern.layout) == provider.num_virtual_steps(timesteps) + max_delay
-
- @pytest.mark.parametrize("timesteps", [0, 1, 16])
- @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]])
- @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]])
- def test_pattern_max_delay(self, timesteps: int, flattening: list, delays: list):
- n_q = len(flattening)
- max_delay = max(delays)
- provider = UnrolledPatternProvider(n_q, flattening, delays)
- pattern = provider.get_pattern(timesteps)
- assert pattern.max_delay == max_delay
-
-
-class TestPattern:
-
- def ref_build_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int):
- """Reference method to build the sequence from the pattern without using fancy scatter."""
- bs, n_q, T = z.shape
- z = z.cpu().numpy()
- assert n_q == pattern.n_q
- assert T <= pattern.timesteps
- inp = torch.full((bs, n_q, len(pattern.layout)), special_token, dtype=torch.long).numpy()
- inp[:] = special_token
- for s, v in enumerate(pattern.layout):
- for (t, q) in v:
- if t < T:
- inp[:, q, s] = z[:, q, t]
- return torch.from_numpy(inp)
-
- def ref_revert_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int):
- """Reference method to revert the sequence from the pattern without using fancy scatter."""
- z = z.cpu().numpy()
- bs, n_q, S = z.shape
- assert pattern.n_q == n_q
- inp = torch.full((bs, pattern.n_q, pattern.timesteps), special_token, dtype=torch.long).numpy()
- inp[:] = special_token
- for s, v in enumerate(pattern.layout):
- for (t, q) in v:
- if t < pattern.timesteps:
- inp[:, q, t] = z[:, q, s]
- return torch.from_numpy(inp)
-
- def ref_revert_pattern_logits(self, z: torch.Tensor, pattern: Pattern, special_token: float):
- """Reference method to revert the logits from the pattern without using fancy scatter."""
- z = z.cpu().numpy()
- bs, card, n_q, S = z.shape
- assert pattern.n_q == n_q
- ref_layout = pattern.layout
- inp = torch.full((bs, card, pattern.n_q, pattern.timesteps), special_token, dtype=torch.float).numpy()
- inp[:] = special_token
- for s, v in enumerate(ref_layout[1:]):
- if s < S:
- for (t, q) in v:
- if t < pattern.timesteps:
- inp[:, :, q, t] = z[:, :, q, s]
- return torch.from_numpy(inp)
-
- def _get_pattern_providers(self, n_q: int):
- pattern_provider_1 = ParallelPatternProvider(n_q)
- pattern_provider_2 = DelayedPatternProvider(n_q, list(range(n_q)))
- pattern_provider_3 = DelayedPatternProvider(n_q, [0] + [1] * (n_q - 1))
- pattern_provider_4 = UnrolledPatternProvider(
- n_q, flattening=list(range(n_q)), delays=[0] * n_q
- )
- pattern_provider_5 = UnrolledPatternProvider(
- n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] * n_q
- )
- pattern_provider_6 = UnrolledPatternProvider(
- n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] + [5] * (n_q - 1)
- )
- return [
- pattern_provider_1,
- pattern_provider_2,
- pattern_provider_3,
- pattern_provider_4,
- pattern_provider_5,
- pattern_provider_6,
- ]
-
- @pytest.mark.parametrize("n_q", [1, 4, 32])
- @pytest.mark.parametrize("timesteps", [16, 72])
- def test_build_pattern_sequence(self, n_q: int, timesteps: int):
- bs = 2
- card = 256
- special_token = card
-
- pattern_providers = self._get_pattern_providers(n_q)
- for pattern_provider in pattern_providers:
- pattern = pattern_provider.get_pattern(timesteps)
- # we can correctly build the sequence from the pattern
- z = torch.randint(0, card, (bs, n_q, timesteps))
- ref_res = self.ref_build_pattern_sequence(z, pattern, special_token)
- res, indexes, mask = pattern.build_pattern_sequence(z, special_token)
- assert (res == ref_res).float().mean() == 1.0
-
- # expected assertion fails on the number of timesteps
- invalid_timesteps = [timesteps + 1]
- if pattern.num_sequence_steps != pattern.timesteps:
- invalid_timesteps.append(pattern.num_sequence_steps)
- for i_timesteps in invalid_timesteps:
- z2 = torch.randint(0, card, (bs, n_q, i_timesteps))
- with pytest.raises(AssertionError):
- pattern.build_pattern_sequence(z2, special_token)
-
- # expected assertion fails on the number of codebooks
- invalid_qs = [0, n_q - 1, n_q + 1]
- for i_q in invalid_qs:
- z3 = torch.randint(0, card, (bs, i_q, timesteps))
- with pytest.raises(AssertionError):
- pattern.build_pattern_sequence(z3, special_token)
-
- @pytest.mark.parametrize("n_q", [1, 4, 32])
- @pytest.mark.parametrize("timesteps", [16, 72])
- def test_revert_pattern_sequence(self, n_q: int, timesteps: int):
- bs = 2
- card = 256
- special_token = card
-
- pattern_providers = self._get_pattern_providers(n_q)
- for pattern_provider in pattern_providers:
- pattern = pattern_provider.get_pattern(timesteps)
- # this works assuming previous tests are successful
- z = torch.randint(0, card, (bs, n_q, timesteps))
- s = self.ref_build_pattern_sequence(z, pattern, special_token)
- ref_out = self.ref_revert_pattern_sequence(s, pattern, special_token)
- # ensure our reference script retrieve the original sequence
- assert z.shape == ref_out.shape
- assert (z == ref_out).float().mean() == 1.0
- # now we can test the scatter version
- out, indexes, mask = pattern.revert_pattern_sequence(s, special_token)
- assert out.shape == ref_out.shape
- assert (out == ref_out).float().mean() == 1.0
-
- @pytest.mark.parametrize("n_q", [1, 4, 32])
- @pytest.mark.parametrize("timesteps", [16, 72])
- @pytest.mark.parametrize("card", [1, 2, 256, 1024])
- def test_revert_pattern_logits(self, n_q: int, timesteps: int, card: int):
- bs = 2
- special_token = card
- logits_special_token = float('nan')
-
- pattern_providers = self._get_pattern_providers(n_q)
- for pattern_provider in pattern_providers:
- pattern = pattern_provider.get_pattern(timesteps)
- # this works assuming previous tests are successful
- z = torch.randint(0, card, (bs, n_q, timesteps))
- s = self.ref_build_pattern_sequence(z, pattern, special_token)
- logits = torch.randn((bs, card, n_q, s.shape[-1]))
- ref_out = self.ref_revert_pattern_logits(logits, pattern, logits_special_token)
- # ensure our reference script retrieve the original sequence
- assert ref_out.shape == torch.Size([bs, card, n_q, timesteps])
- # now we can test the scatter version
- out, indexes, mask = pattern.revert_pattern_logits(logits, logits_special_token)
- assert out.shape == ref_out.shape
- assert (out == ref_out).float().mean() == 1.0
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/models/search.py b/spaces/HarryLee/eCommerceImageCaptioning/models/search.py
deleted file mode 100644
index 568612212bdbbe787c7ab64017f8170ec67619f8..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/models/search.py
+++ /dev/null
@@ -1,814 +0,0 @@
-# Copyright 2022 The OFA-Sys Team.
-# All rights reserved.
-# This source code is licensed under the Apache 2.0 license
-# found in the LICENSE file in the root directory.
-
-import math
-from typing import List, Optional
-
-import torch
-import torch.nn as nn
-from fairseq.token_generation_constraints import (
- ConstraintState,
- OrderedConstraintState,
- UnorderedConstraintState,
-)
-from torch import Tensor
-
-
-class Search(nn.Module):
- def __init__(self, tgt_dict):
- super().__init__()
- self.pad = tgt_dict.pad()
- self.unk = tgt_dict.unk()
- self.eos = tgt_dict.eos()
- self.vocab_size = len(tgt_dict)
- self.src_lengths = torch.tensor(-1)
- self.supports_constraints = False
- self.stop_on_max_len = False
-
- def step(
- self, step, lprobs, scores, prev_output_tokens=None, original_batch_idxs=None
- ):
- """Take a single search step.
-
- Args:
- step: the current search step, starting at 0
- lprobs: (bsz x input_beam_size x vocab_size)
- the model's log-probabilities over the vocabulary at the current step
- scores: (bsz x input_beam_size x step)
- the historical model scores of each hypothesis up to this point
- prev_output_tokens: (bsz x step)
- the previously generated oputput tokens
- original_batch_idxs: (bsz)
- the tensor with the batch indices, in the range [0, bsz)
- this is useful in case there has been applied a re-ordering
- and we need to know the orignal indices
-
- Return: A tuple of (scores, indices, beams) where:
- scores: (bsz x output_beam_size)
- the scores of the chosen elements; output_beam_size can be
- larger than input_beam_size, e.g., we may return
- 2*input_beam_size to account for EOS
- indices: (bsz x output_beam_size)
- the indices of the chosen elements
- beams: (bsz x output_beam_size)
- the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
- """
- raise NotImplementedError
-
- @torch.jit.export
- def set_src_lengths(self, src_lengths):
- self.src_lengths = src_lengths
-
- @torch.jit.export
- def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int):
- """Initialize constraint states for constrained decoding (if supported).
-
- Args:
- batch_constraints: (torch.Tensor, optional)
- the list of constraints, in packed form
- beam_size: (int)
- the beam size
- Returns:
- *encoder_out* rearranged according to *new_order*
- """
- pass
-
- def prune_sentences(self, batch_idxs: Tensor):
- """
- Removes constraint states for completed sentences (if supported).
- This is called from sequence_generator._generate() when sentences are
- deleted from the batch.
-
- Args:
- batch_idxs: Indices of *sentences* whose constraint state should be *kept*.
- """
- pass
-
- def update_constraints(self, active_hypos: Tensor):
- """
- Updates the constraint states by selecting the beam items that are retained.
- This is called at each time step of sequence_generator._generate() when
- the set of 2 * {beam_size} candidate hypotheses are reduced to the beam size.
-
- Args:
- active_hypos: (batch size, beam size)
- list of integers denoting, for each sentence, which beam candidate items
- should be kept.
- """
- pass
-
-
-class BeamSearch(Search):
- def __init__(self, tgt_dict):
- super().__init__(tgt_dict)
- self.constraint_states = None
-
- @torch.jit.export
- def step(
- self,
- step: int,
- lprobs,
- scores: Optional[Tensor],
- prev_output_tokens: Optional[Tensor] = None,
- original_batch_idxs: Optional[Tensor] = None,
- ):
- bsz, beam_size, vocab_size = lprobs.size()
-
- if step == 0:
- # at the first step all hypotheses are equally likely, so use
- # only the first beam
- lprobs = lprobs[:, ::beam_size, :].contiguous()
- else:
- # make probs contain cumulative scores for each hypothesis
- assert scores is not None
- lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
-
- top_prediction = torch.topk(
- lprobs.view(bsz, -1),
- k=min(
- # Take the best 2 x beam_size predictions. We'll choose the first
- # beam_size of these which don't predict eos to continue with.
- beam_size * 2,
- lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
- ),
- )
- scores_buf = top_prediction[0]
- indices_buf = top_prediction[1]
- # Project back into relative indices and beams
- beams_buf = indices_buf // vocab_size
- indices_buf = indices_buf.fmod(vocab_size)
-
- # At this point, beams_buf and indices_buf are single-dim and contain relative indices
- return scores_buf, indices_buf, beams_buf
-
-
-class PrefixConstrainedBeamSearch(Search):
- def __init__(self, tgt_dict, prefix_allowed_tokens_fn):
- super().__init__(tgt_dict)
- self.prefix_allowed_tokens_fn = prefix_allowed_tokens_fn
- self.stop_on_max_len = True
-
- @torch.jit.export
- def apply_mask(self, x, prev_output_tokens, original_batch_idxs):
- beam_size = x.shape[0] // original_batch_idxs.shape[0]
- original_batch_idxs = (
- original_batch_idxs.unsqueeze(-1).repeat((1, beam_size)).flatten().tolist()
- )
-
- mask = torch.full_like(x, -math.inf)
- for sent_i, (sent, batch_i) in enumerate(
- zip(prev_output_tokens, original_batch_idxs)
- ):
- mask[sent_i, :, self.prefix_allowed_tokens_fn(batch_i, sent)] = 0
-
- return mask
-
- @torch.jit.export
- def step(
- self,
- step: int,
- lprobs: Tensor,
- scores: Tensor,
- prev_output_tokens: Tensor,
- original_batch_idxs: Tensor,
- ):
- bsz, beam_size, vocab_size = lprobs.size()
-
- lprobs += self.apply_mask(
- lprobs.view(bsz * beam_size, 1, vocab_size),
- prev_output_tokens,
- original_batch_idxs,
- ).view(bsz, beam_size, vocab_size)
-
- if step == 0:
- # at the first step all hypotheses are equally likely, so use
- # only the first beam
- lprobs = lprobs[:, ::beam_size, :].contiguous()
- else:
- # make probs contain cumulative scores for each hypothesis
- assert scores is not None
- lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
-
- top_prediction = torch.topk(
- lprobs.view(bsz, -1),
- k=min(
- # Take the best beam_size predictions. We'll choose the first
- # beam_size of these which don't predict eos to continue with.
- beam_size,
- lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
- ),
- )
- scores_buf = top_prediction[0]
- indices_buf = top_prediction[1]
- beams_buf = indices_buf // vocab_size
- indices_buf = indices_buf.fmod(vocab_size)
- return scores_buf, indices_buf, beams_buf
-
-
-class LexicallyConstrainedBeamSearch(Search):
- """Implements lexically constrained beam search as described in
-
- Fast Lexically Constrained Decoding with Dynamic Beam
- Allocation for Neural Machine Translation. Post & Vilar,
- NAACL 2018. https://www.aclweb.org/anthology/N18-1119/
-
- and
-
- Improved Lexically Constrained Decoding for Translation and
- Monolingual Rewriting. Hu et al, NAACL
- 2019. https://www.aclweb.org/anthology/N19-1090/
-
- This is accomplished by maintaining, for each beam hypothesis, a
- ConstraintState object (see constraints.py) that tracks which
- constraints have been generated and using this information to
- shape the beam for each input sentence.
- """
-
- def __init__(self, tgt_dict, representation):
- super().__init__(tgt_dict)
- self.representation = representation
- self.vocab_size = len(tgt_dict)
- self.num_cands = 0
- self.supports_constraints = True
-
- @torch.jit.export
- def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int):
- self.constraint_states = []
- for constraint_tensor in batch_constraints:
- if self.representation == "ordered":
- constraint_state = OrderedConstraintState.create(constraint_tensor)
- elif self.representation == "unordered":
- constraint_state = UnorderedConstraintState.create(constraint_tensor)
-
- self.constraint_states.append([constraint_state for i in range(beam_size)])
-
- @torch.jit.export
- def prune_sentences(self, batch_idxs: Tensor):
- self.constraint_states = [
- self.constraint_states[i] for i in batch_idxs.tolist()
- ]
-
- @torch.jit.export
- def update_constraints(self, active_hypos: Tensor):
- if self.constraint_states:
- batch_size = active_hypos.size(0)
- for sentid in range(batch_size):
- self.constraint_states[sentid] = [
- self.constraint_states[sentid][i] for i in active_hypos[sentid]
- ]
-
- @torch.jit.export
- def step(
- self,
- step: int,
- lprobs: Tensor,
- scores: Optional[Tensor],
- prev_output_tokens: Optional[Tensor] = None,
- original_batch_idxs: Optional[Tensor] = None,
- ):
- """
- A constrained step builds a large candidates list from the following:
- - the top 2 * {beam_size} items over the whole beam
- - for each item in the beam
- - the top {each_k} (default 1)
- - all next constraints
- We then compute the constrained state of each beam item, and assign
- stripe codes: 0 to the best in each bank, 1 to the 2nd-best, and so
- on. We then sort by (stripe, score), and truncate the list at
- 2 * beam size.
-
- Args:
- step: the decoder step
- lprobs: (batch size, beam size, target vocab)
- the target-vocab distributions for each item in the beam.
- Retrun: A tuple of (scores, indices, beams, constraints) where:
- scores: (batch, output beam size)
- the scores of the chosen elements
- indices: (batch, output beam size)
- the target vocab indices of the chosen elements
- beams: (batch, output beam size)
- the 0-indexed hypothesis ids of the chosen elements
- constraints: (batch, output beam size)
- the new constraint states
- """
- each_k = 1
- device = lprobs.device
-
- batch_size, beam_size, vocab_size = lprobs.size()
-
- self.num_cands = min(
- # Just take the k-best. We'll get another k from the 1-best from each
- # row, plus more from the constraints
- beam_size * 2,
- lprobs.view(batch_size, -1).size(1) - 1, # -1 so we never select pad
- )
-
- # STEP 0: Preliminary. Prevent EOS for unfinished hyps across all batch items
- constraint_states = self.constraint_states
- if constraint_states and step > 0:
- not_finished_indices = []
- for sentno, sent_constraints in enumerate(constraint_states):
- for beamno, state in enumerate(sent_constraints):
- index = sentno * beam_size + beamno
- if not state.finished:
- not_finished_indices.append(index)
- not_finished_indices = torch.tensor(not_finished_indices)
- if not_finished_indices.numel() > 0:
- lprobs.view(batch_size * beam_size, -1)[
- not_finished_indices, self.eos
- ] = -math.inf
-
- if step == 0:
- # at the first step all hypotheses are equally likely, so use
- # only the first beam entry for each batch item
- lprobs = lprobs[:, ::beam_size, :].contiguous()
- else:
- # make probs contain cumulative scores for each hypothesis
- assert scores is not None
- lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
-
- top_prediction = torch.topk(
- lprobs.view(batch_size, -1),
- self.num_cands,
- )
- scores_buf, indices_buf = top_prediction
- # Project back into relative indices and beams
- beams_buf = indices_buf // vocab_size
- indices_buf = indices_buf.fmod(vocab_size)
-
- # Short circuit if there are no constraints in this batch
- if not constraint_states:
- return scores_buf, indices_buf, beams_buf
-
- # STEP 1: get top-1 from each hypothesis across all sentences in the batch
- if step > 0:
- top_scores, top_indices = torch.topk(
- lprobs.view(batch_size * beam_size, -1),
- k=each_k,
- dim=1,
- )
- top_scores = top_scores.view(batch_size, -1)
- top_indices = top_indices.view(batch_size, -1)
- scores_buf = torch.cat((scores_buf, top_scores), dim=1)
- indices_buf = torch.cat((indices_buf, top_indices), dim=1)
- new_beams = torch.arange(0, beam_size, device=device).repeat(batch_size, 1)
- beams_buf = torch.cat((beams_buf, new_beams), dim=1)
-
- # Now, process sentences in the batch one by one.
- new_scores_buf = torch.zeros((batch_size, 2 * beam_size), device=device)
- new_indices_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long()
- new_beams_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long()
- for sentno, states in enumerate(constraint_states):
- scores, indices, beams, new_states = self.step_sentence(
- step,
- sentno,
- lprobs[sentno],
- constraint_states[sentno],
- beams_buf[sentno].clone(),
- indices_buf[sentno].clone(),
- scores_buf[sentno].clone(),
- )
- new_scores_buf[sentno] = scores
- new_indices_buf[sentno] = indices
- new_beams_buf[sentno] = beams
- self.constraint_states[sentno] = new_states
-
- return new_scores_buf, new_indices_buf, new_beams_buf
-
- @torch.jit.export
- def step_sentence(
- self,
- step: int,
- sentno: int,
- lprobs: Tensor,
- constraint_states: List[List[ConstraintState]],
- beams_buf: Tensor,
- indices_buf: Tensor,
- scores_buf: Tensor,
- ):
- """Does per-sentence processing. Adds all constraints for each
- hypothesis to the list of candidates; then removes duplicates,
- sorts, and dynamically stripes across the banks. All tensor inputs
- are collapsed to those pertaining to a single input sentence.
- """
- device = lprobs.device
-
- # STEP 2: Add all constraints for each beam item
- for beamno, state in enumerate(constraint_states):
- next_tokens = torch.tensor(list(state.next_tokens()), device=device).long()
- if next_tokens.numel() != 0:
- indices_buf = torch.cat((indices_buf, next_tokens))
- next_beams = (
- torch.tensor(beamno, device=device)
- .repeat(next_tokens.size(0))
- .long()
- )
- beams_buf = torch.cat((beams_buf, next_beams))
- next_values = lprobs[beamno].take(next_tokens.view(-1))
- scores_buf = torch.cat((scores_buf, next_values))
-
- # At the 0th time step, there is just one beam item
- if step == 0:
- break
-
- # STEP 3: Compute the "bank" for each candidate. This is the
- # number of constraints it's generated. We need this so that
- # we can do round-robin allocation of the beam across these
- # banks. If C is the number of constraints, we select the best
- # item in bank C, then the best in bank C-1, etc, followed by
- # the 2nd-best in bank C, the 2nd-best in bank C-1, etc, and so
- # on, until the maximum beam size. We accomplish this by
- # creating a sort key and striping across the banks.
-
- # Compute the new states for all candidates
- cands_size = indices_buf.size(0)
- constraint_states = [
- constraint_states[beams_buf[i]].advance(indices_buf[i])
- for i in range(cands_size)
- ]
-
- banks = torch.tensor([state.bank for state in constraint_states], device=device)
-
- # STEP 4: Sort
- num_constraint_tokens = len(state.tokens)
-
- # Sort by keys (bank, score) (i.e., sort banks together, and scores
- # within banks). AFAIK pytorch doesn't support either stable sort or
- # multi-key sorting, so we have to hack this.
- MAX_SCORE = -100
- sort_key = (num_constraint_tokens - banks) * MAX_SCORE + scores_buf
- sort_values, sort_indices = sort_key.sort(dim=0, descending=True)
- scores_buf = scores_buf[sort_indices]
- indices_buf = indices_buf[sort_indices]
- beams_buf = beams_buf[sort_indices]
- banks = banks[sort_indices]
-
- # Sort the constraints to follow suit
- constraint_states = [constraint_states[i] for i in sort_indices]
-
- # STEP 5: Remove duplicates. The topk calls (overall and
- # per-row) plus the per-row generation of constraints will
- # produce duplicates. Here we remove them.
-
- def roll(t):
- """Rolls a 1d tensor left by 1.
-
- [0, 1, 2, 3, 4] becomes [4, 0, 1, 2, 3]
- """
- return torch.cat((t[-1].unsqueeze(0), t[0:-1]), dim=0)
-
- # We map candidates (beam, token_id) to a single dimension.
- # This is then shifted by 1. We can then easily identify
- # duplicates and create a mask that identifies unique
- # extensions.
- uniques_mask = beams_buf * (self.vocab_size + 1) + indices_buf
- uniques_mask = roll(uniques_mask) != uniques_mask
-
- # Use the mask to pare down the data structures
- scores_buf = torch.masked_select(scores_buf, uniques_mask)
- indices_buf = torch.masked_select(indices_buf, uniques_mask)
- beams_buf = torch.masked_select(beams_buf, uniques_mask)
- banks = torch.masked_select(banks, uniques_mask)
- i = 1
- for mask in uniques_mask[1:]:
- if not mask:
- constraint_states.pop(i)
- i += mask
-
- # STEP 6: Assign IDs round-robin across banks, sort, and
- # truncate. Now that the candidates are sorted by (bank,
- # score) and uniqed, we dynamically allocate the {beam_size}
- # beam by striping across the candidates. These stripes will
- # be used as sort keys to do round-robin selection. This is
- # accomplished in a single pass with offsets. Sorting by
- # highest-banks (furthest-along hypotheses) first ensures
- # progress through the constraints.
- #
- # e.g., BANKS: 3 3 3 2 2 2 2 1 1 1 0 0
- # OLD STRIPES: 0 1 2 0 1 2 3 0 1 2 0 1
- # NEW STRIPES: 0 1+4 2+8 0+1 1+5 2+9 3+11 0+2 1+6 2+10 0+3 1+7
- # = 0 5 10 1 6 11 13 2 7 12 3 8
- #
- # Sorting by this then gives the following banks:
- #
- # 3 2 1 0 3 2 1 0 3 2 1 2
- #
- # We'll take the top {beam_size} of these.
- stripe_offsets = [offset * (len(banks) + 1) for offset in range(len(banks) + 1)]
- stripes = torch.zeros_like(banks)
- cur_bank_count = -1
- cur_bank = banks[0]
- for i, bank in enumerate(banks):
- if bank != cur_bank:
- cur_bank_count = 0
- cur_bank = bank
- else:
- cur_bank_count += 1
- stripes[i] = num_constraint_tokens - bank + stripe_offsets[cur_bank_count]
-
- # STEP 7: Sort by the stripes values
- sort_values, sort_indices = stripes.sort(dim=0)
- scores_buf = scores_buf[sort_indices]
- indices_buf = indices_buf[sort_indices]
- beams_buf = beams_buf[sort_indices]
- constraint_states = [constraint_states[i] for i in sort_indices]
-
- # STEP 8: Truncate to the candidates size!
- scores_buf = scores_buf[: self.num_cands]
- indices_buf = indices_buf[: self.num_cands]
- beams_buf = beams_buf[: self.num_cands]
-
- return scores_buf, indices_buf, beams_buf, constraint_states
-
-
-class LengthConstrainedBeamSearch(Search):
- def __init__(self, tgt_dict, min_len_a, min_len_b, max_len_a, max_len_b):
- super().__init__(tgt_dict)
- self.min_len_a = min_len_a
- self.min_len_b = min_len_b
- self.max_len_a = max_len_a
- self.max_len_b = max_len_b
- self.beam = BeamSearch(tgt_dict)
- self.needs_src_lengths = True
-
- def step(
- self,
- step: int,
- lprobs,
- scores,
- prev_output_tokens: Optional[Tensor] = None,
- original_batch_idxs: Optional[Tensor] = None,
- ):
- min_lens = self.min_len_a * self.src_lengths + self.min_len_b
- max_lens = self.max_len_a * self.src_lengths + self.max_len_b
- lprobs[step < min_lens, :, self.eos] = -math.inf
- lprobs[step >= max_lens, :, self.eos] = 0
- return self.beam.step(step, lprobs, scores)
-
-
-class DiverseBeamSearch(Search):
- """Diverse Beam Search.
-
- See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
- Models" for details.
-
- We only implement the Hamming Diversity penalty here, which performed best
- in the original paper.
- """
-
- def __init__(self, tgt_dict, num_groups, diversity_strength):
- super().__init__(tgt_dict)
- self.num_groups = num_groups
- self.diversity_strength = -diversity_strength
- self.beam = BeamSearch(tgt_dict)
-
- @torch.jit.export
- def step(
- self,
- step: int,
- lprobs,
- scores,
- prev_output_tokens: Optional[Tensor] = None,
- original_batch_idxs: Optional[Tensor] = None,
- ):
- bsz, beam_size, vocab_size = lprobs.size()
- if beam_size % self.num_groups != 0:
- raise ValueError(
- "DiverseBeamSearch requires --beam to be divisible by the number of groups"
- )
-
- # initialize diversity penalty
- diversity_buf = torch.zeros(lprobs[:, 0, :].size()).to(lprobs)
-
- scores_G, indices_G, beams_G = [], [], []
- for g in range(self.num_groups):
- lprobs_g = lprobs[:, g :: self.num_groups, :]
- scores_g = scores[:, g :: self.num_groups, :] if step > 0 else None
-
- # apply diversity penalty
- if g > 0:
- lprobs_g = torch.add(
- lprobs_g,
- other=diversity_buf.unsqueeze(1),
- alpha=self.diversity_strength,
- )
- else:
- lprobs_g = lprobs_g.contiguous()
-
- scores_buf, indices_buf, beams_buf = self.beam.step(
- step, lprobs_g, scores_g
- )
- beams_buf.mul_(self.num_groups).add_(g)
-
- scores_G.append(scores_buf.clone())
- indices_G.append(indices_buf.clone())
- beams_G.append(beams_buf.clone())
-
- # update diversity penalty
- diversity_buf.scatter_add_(
- 1, indices_buf, torch.ones(indices_buf.size()).to(diversity_buf)
- )
-
- # interleave results from different groups
- scores_buf = torch.stack(scores_G, dim=2).view(bsz, -1)
- indices_buf = torch.stack(indices_G, dim=2).view(bsz, -1)
- beams_buf = torch.stack(beams_G, dim=2).view(bsz, -1)
- return scores_buf, indices_buf, beams_buf
-
-
-class Sampling(Search):
- sampling_topk: int
- sampling_topp: float
-
- def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0):
- super().__init__(tgt_dict)
- self.sampling_topk = sampling_topk
- self.sampling_topp = sampling_topp
-
- def _sample_topp(self, lprobs):
- """Sample among the smallest set of elements whose cumulative probability mass exceeds p.
-
- See `"The Curious Case of Neural Text Degeneration"
- (Holtzman et al., 2019) `_.
-
- Args:
- lprobs: (bsz x input_beam_size x vocab_size)
- the model's log-probabilities over the vocabulary at the current step
-
- Return: A tuple of (trimed_probs, truncated_indices) where:
- trimed_probs: (bsz x input_beam_size x ?)
- the model's probabilities over the elements selected to sample from. The
- width of the third dimension is determined by top-P.
- truncated_indices: (bsz x input_beam_size x ?)
- the indices of the chosen elements.
- """
- probs = lprobs.exp_()
-
- # sort the last dimension (vocab dimension) in descending order
- sorted_probs, sorted_indices = probs.sort(descending=True)
-
- # compute a mask to indicate the words to be included in the top-P set.
- cumsum_probs = sorted_probs.cumsum(dim=2)
- mask = cumsum_probs.lt(self.sampling_topp)
-
- # note that mask was computed by 'lt'. One more word needs to be included
- # so that the cumulative probability mass can exceed p.
- cumsum_mask = mask.cumsum(dim=2)
- last_included = cumsum_mask[:, :, -1:]
- last_included.clamp_(0, mask.size()[2] - 1)
- mask = mask.scatter_(2, last_included, 1)
-
- # truncate unnecessary dims.
- max_dim = last_included.max()
- truncated_mask = mask[:, :, : max_dim + 1]
- truncated_probs = sorted_probs[:, :, : max_dim + 1]
- truncated_indices = sorted_indices[:, :, : max_dim + 1]
-
- # trim the words that are not in top-P by setting their probabilities
- # to 0, so that they would not be sampled later.
- trim_mask = ~truncated_mask
- trimed_probs = truncated_probs.masked_fill_(trim_mask, 0)
- return trimed_probs, truncated_indices
-
- @torch.jit.export
- def step(
- self,
- step: int,
- lprobs,
- scores,
- prev_output_tokens: Optional[Tensor] = None,
- original_batch_idxs: Optional[Tensor] = None,
- ):
- bsz, beam_size, vocab_size = lprobs.size()
-
- if step == 0:
- # at the first step all hypotheses are equally likely, so use
- # only the first beam
- lprobs = lprobs[:, ::beam_size, :].contiguous()
-
- if self.sampling_topp > 0:
- # only sample from the smallest set of words whose cumulative probability mass exceeds p
- probs, top_indices = self._sample_topp(lprobs)
- elif self.sampling_topk > 0:
- # only sample from top-k candidates
- lprobs, top_indices = lprobs.topk(self.sampling_topk)
- probs = lprobs.exp_()
- else:
- probs = lprobs.exp_()
-
- # dummy data to be consistent with true branch for type check
- top_indices = torch.empty(0).to(probs)
- # sample
- if step == 0:
- indices_buf = torch.multinomial(
- probs.view(bsz, -1),
- beam_size,
- replacement=True,
- ).view(bsz, beam_size)
- else:
- indices_buf = torch.multinomial(
- probs.view(bsz * beam_size, -1),
- 1,
- replacement=True,
- ).view(bsz, beam_size)
-
- if step == 0:
- # expand to beam size
- probs = probs.expand(bsz, beam_size, -1)
-
- # gather scores
- scores_buf = torch.gather(probs, dim=2, index=indices_buf.unsqueeze(-1))
- scores_buf = scores_buf.log_().view(bsz, -1)
-
- # remap indices if using top-k or top-P sampling
- if self.sampling_topk > 0 or self.sampling_topp > 0:
- indices_buf = torch.gather(
- top_indices.expand(bsz, beam_size, -1),
- dim=2,
- index=indices_buf.unsqueeze(-1),
- ).squeeze(2)
-
- if step == 0:
- beams_buf = indices_buf.new_zeros(bsz, beam_size)
- else:
- beams_buf = torch.arange(0, beam_size).to(indices_buf).repeat(bsz, 1)
- # make scores cumulative
- scores_buf.add_(
- torch.gather(scores[:, :, step - 1], dim=1, index=beams_buf)
- )
-
- return scores_buf, indices_buf, beams_buf
-
-
-class DiverseSiblingsSearch(Search):
- """
- Beam search with diverse siblings.
-
- See "A Simple, Fast Diverse Decoding Algorithm for Neural Generation" for details.
- https://arxiv.org/abs/1611.08562
-
- 1/ Calculate hypotheses for each beam
- 2/ Intra-sibling ordering
- 3/ Rewrite scores
- 4/ Choose top K hypotheses
-
- if diversity_rate == 0 is equivalent to BeamSearch
- """
-
- def __init__(self, tgt_dict, diversity_rate):
- super().__init__(tgt_dict)
- self.diversity_rate = diversity_rate
- self.beam = BeamSearch(tgt_dict)
-
- def step(
- self,
- step: int,
- lprobs,
- scores,
- prev_output_tokens: Optional[Tensor] = None,
- original_batch_idxs: Optional[Tensor] = None,
- ):
- bsz, beam_size, vocab_size = lprobs.size()
- k = min(
- # Take the best 2 x beam_size predictions. We'll choose the first
- # beam_size of these which don't predict eos to continue with.
- beam_size * 2,
- lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
- )
- s_list: List[Tensor]
- i_list: List[Tensor]
- s_list = [torch.empty(0).to(lprobs) for i in range(beam_size)]
- i_list = [torch.LongTensor().to(device=lprobs.device) for i in range(beam_size)]
- sibling_score = torch.arange(1, k + 1).to(lprobs) * self.diversity_rate
-
- if step == 0:
- return self.beam.step(step, lprobs, scores)
- lprobs.add_(scores[:, :, step - 1].unsqueeze(-1))
-
- # 1/ Calculate hypotheses for each beam
- for i in range(beam_size):
- torch.topk(lprobs[:, i, :].view(bsz, -1), k, out=(s_list[i], i_list[i]))
- i_list[i].fmod_(vocab_size)
-
- # 2/ Intra-sibling ordering by default from topk + 3/ Rewrite scores
- s_list[i].sub_(sibling_score)
-
- # 4/ Choose top K hypotheses
- indices = torch.stack(i_list, dim=1).view(bsz, -1)
-
- final_scores = torch.empty(0).to(lprobs)
- final_indices = torch.LongTensor().to(device=lprobs.device)
- final_beams = torch.LongTensor().to(device=lprobs.device)
- (final_scores, final_indices) = torch.topk(
- torch.stack(s_list, dim=1).view(bsz, -1),
- k,
- )
-
- final_beams = final_indices // k
-
- for i in range(bsz):
- final_indices[i] = indices[i][final_indices[i]]
-
- return final_scores, final_indices, final_beams
diff --git a/spaces/Hoodady/3DFuse/my/utils/seed.py b/spaces/Hoodady/3DFuse/my/utils/seed.py
deleted file mode 100644
index e3e81fad6c7610d11ec8d847f9a61a4e6675ecc4..0000000000000000000000000000000000000000
--- a/spaces/Hoodady/3DFuse/my/utils/seed.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# from pytorch lightning
-import random
-import numpy as np
-import torch
-
-max_seed_value = np.iinfo(np.uint32).max
-min_seed_value = np.iinfo(np.uint32).min
-
-
-def seed_everything(seed=None):
- seed = int(seed)
-
- if not (min_seed_value <= seed <= max_seed_value):
- raise ValueError(f"{seed} is not in bounds, numpy accepts from {min_seed_value} to {max_seed_value}")
-
- print(f"seed set to {seed}")
- random.seed(seed)
- np.random.seed(seed)
- torch.manual_seed(seed)
- torch.cuda.manual_seed_all(seed)
- return seed
diff --git a/spaces/Intel/intel-xai-tools-cam-demo/app.py b/spaces/Intel/intel-xai-tools-cam-demo/app.py
deleted file mode 100644
index 97f40cbdc4eafff12ed9c0711d7026d4c4ee259d..0000000000000000000000000000000000000000
--- a/spaces/Intel/intel-xai-tools-cam-demo/app.py
+++ /dev/null
@@ -1,309 +0,0 @@
-import gradio as gr
-import torch
-import cv2
-
-### CAM explainer code from Intel XAI tools (https://github.com/IntelAI/intel-xai-tools) ###
-class XGradCAM:
- def __init__(self, model, targetLayer, targetClass, image, dims, device):
-
- # set any frozen layers to trainable
- # gradcam cannot be calculated without it
- for param in model.parameters():
- if not param.requires_grad:
- param.requires_grad = True
-
- self.model = model
- self.targetLayer = targetLayer
- self.targetClass = targetClass
- self.image = image
- self.dims = dims
- self.device = device
-
- def visualize(self):
- from pytorch_grad_cam import XGradCAM, GuidedBackpropReLUModel
- from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
- from pytorch_grad_cam.utils.image import show_cam_on_image, deprocess_image, preprocess_image
- import torch
- import cv2
- import numpy as np
- import matplotlib.pyplot as plt
-
- self.model.eval().to(self.device)
-
- image = cv2.resize(self.image, self.dims)
- # convert to rgb if image is grayscale
- converted = False
- if len(image.shape) == 2:
- converted = True
- image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
-
- rgb_img = np.float32(image) / 255
- input_tensor = preprocess_image(rgb_img,
- mean=[0.485, 0.456, 0.406],
- std=[0.229, 0.224, 0.225])
- input_tensor = input_tensor.to(self.device)
-
- self.targetLayer = [self.targetLayer]
-
- if self.targetClass is None:
- targets = None
- else:
- targets = [ClassifierOutputTarget(self.targetClass)]
-
- cam = XGradCAM(self.model, self.targetLayer, use_cuda=torch.cuda.is_available())
-
- # convert back to grayscale if that is the initial dim
- if converted:
- input_tensor = input_tensor[:, 0:1, :, :]
-
- grayscale_cam = cam(input_tensor=input_tensor, targets=targets, aug_smooth=False,
- eigen_smooth=False)
- grayscale_cam = grayscale_cam[0, :]
- cam_image = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True)
- cam_image = cv2.cvtColor(cam_image, cv2.COLOR_RGB2BGR)
-
- gb_model = GuidedBackpropReLUModel(model=self.model, use_cuda=torch.cuda.is_available())
- gb = gb_model(input_tensor, target_category=None)
- cam_mask = cv2.merge([grayscale_cam, grayscale_cam, grayscale_cam])
- cam_gb = deprocess_image(cam_mask * gb)
- gb = deprocess_image(gb)
-
- print("XGradCAM, Guided backpropagation, and Guided XGradCAM are generated. ")
-
- return cv2.cvtColor(cam_image, cv2.COLOR_RGB2BGR)
-
-class EigenCAM:
- def __init__(self, model, targetLayer, boxes, classes, colors, reshape, image, device):
- self.model = model
- self.targetLayer = targetLayer
- self.boxes = boxes
- self.classes = classes
- self.colors = colors
- self.reshape = reshape
- self.image = image
- self.device = device
-
- def visualize(self):
- from pytorch_grad_cam import EigenCAM
- from pytorch_grad_cam.utils.image import show_cam_on_image, preprocess_image, scale_cam_image
- import torchvision
- import torch
- import cv2
- import numpy as np
-
- self.model.eval().to(self.device)
-
- rgb_img = np.float32(self.image) / 255
- transform = torchvision.transforms.ToTensor()
- input_tensor = transform(rgb_img)
- input_tensor = input_tensor.unsqueeze(0)
- input_tensor = input_tensor.to(self.device)
-
- self.targetLayer = [self.targetLayer]
-
- if self.reshape is None:
- cam = EigenCAM(self.model, self.targetLayer, use_cuda=torch.cuda.is_available())
- else:
- cam = EigenCAM(self.model, self.targetLayer, use_cuda=torch.cuda.is_available(),
- reshape_transform=self.reshape)
- targets = []
- grayscale_cam = cam(input_tensor=input_tensor, targets=targets, aug_smooth=False,
- eigen_smooth=False)
- grayscale_cam = grayscale_cam[0, :]
- cam_image = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True)
-
- renormalized_cam = np.zeros(grayscale_cam.shape, dtype=np.float32)
- for x1, y1, x2, y2 in self.boxes:
- renormalized_cam[y1:y2, x1:x2] = scale_cam_image(grayscale_cam[y1:y2, x1:x2].copy())
- renormalized_cam = scale_cam_image(renormalized_cam)
- eigencam_image_renormalized = show_cam_on_image(rgb_img, renormalized_cam, use_rgb=True)
- for i, box in enumerate(self.boxes):
- color = self.colors[i]
- cv2.rectangle(
- eigencam_image_renormalized,
- (box[0], box[1]),
- (box[2], box[3]),
- color, 2
- )
- cv2.putText(eigencam_image_renormalized, self.classes[i], (box[0], box[1] - 5),
- cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2,
- lineType=cv2.LINE_AA)
-
- print("EigenCAM is generated. ")
-
- return eigencam_image_renormalized
-
-### For Gradio Demo ###
-def xgradcam(image, model_code, target_class):
- global model, target_layer
- exec(model_code, globals())
- if target_class == "":
- target_class = None
- else:
- target_class = int(target_class)
- image_dims = (224, 224)
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
- xgradcam = XGradCAM(model, target_layer, target_class, image, image_dims, device)
-
- return xgradcam.visualize()
-
-def eigencam(image, model_code, class_code, process_code, reshape_code):
- global input_image, model, target_layer, bounding_box_coordinates, class_names, box_colors, reshape
- input_image = cv2.resize(image, (640, 640))
- exec(model_code, globals())
- exec(class_code, globals())
- exec(process_code, globals())
- exec(reshape_code, globals())
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
- eigencam = EigenCAM(model, target_layer, bounding_box_coordinates, class_names, box_colors, reshape, input_image, device)
-
- return eigencam.visualize()
-
-with gr.Blocks() as demo:
- gr.Markdown(
- """
- # Class Activation Mapping (CAM) Explainer Demo
- This is a demo for CAM explainer from Intel XAI tools (https://github.com/IntelAI/intel-xai-tools). \
- CAM is an approach which localizes regions in the image responsible for a class prediction. \
- The demo shows visualization of XGradCAM for object classification model and EigenCAM for object detection model.
- """
- )
-
- with gr.Tab("XGradCAM"):
- with gr.Row():
- with gr.Column():
- xgradcam_image = gr.Image(label="Input Image")
- gr.Markdown(
- """
- Load the pretrained model to the variable model
depending on how it was saved. Then, specify target_layer
(normally the last convolutional layer) to compute CAM for. \
- Here are some common choices:
- - FasterRCNN: model.backbone
- - ResNet18 and 50: model.layer4
- - VGG and DenseNet161: model.features
-
- Please don't change the variable names in the following code.
- """
- )
- xgradcam_model = gr.Code(label="Model and Target Layer", value=
- """
- from torchvision.models import resnet50, ResNet50_Weights
-
- model = resnet50(weights=ResNet50_Weights.IMAGENET1K_V2)
- target_layer = model.layer4
- """, language="python")
- gr.Markdown(
- """
- Enter the target category as an integer to compute CAM for. It is the category index in the range [0, NUM_OF_CLASSES-1]
based on the training dataset. \
- If it is left blank, the highest scoring category will be used.
- """
- )
- xgradcam_targetClass = gr.Textbox(label="Target Category")
- xgradcam_output = gr.Image()
- xgradcam_button = gr.Button("Submit")
-
- with gr.Tab("EigenCAM"):
- with gr.Row():
- with gr.Column():
- eigencam_image = gr.Image(label="Input Image")
- gr.Markdown(
- """
- Load the pretrained model to the variable model
depending on how it was saved. Then, specify target_layer
(normally the last convolutional layer) to compute CAM for. \
- Here are some common choices:
- - FasterRCNN: model.backbone
- - ResNet18 and 50: model.layer4
- - VGG and DenseNet161: model.features
-
- Please don't change the variable names in the following code.
- """
- )
- eigencam_model = gr.Code(label="Model and Target Layer", value=
- """
- from torchvision.models.detection import fasterrcnn_resnet50_fpn
-
- model = fasterrcnn_resnet50_fpn(pretrained=True).eval()
- target_layer = model.backbone
- """, language="python")
- gr.Markdown(
- """
- In the case there is no class name in the output from the model, specify class_labels
as a list to print them with corresponding bounding box in the image. \
- Depending on the model, the class name might not be needed (e.g. YOLO). Then, create color
as a list with a size of the number of classes.
- """
- )
- eigencam_class = gr.Code(label="Class Name", value=
- """
- import numpy as np
-
- class_labels = ['__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
- 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A',
- 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
- 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella',
- 'N/A', 'N/A', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
- 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
- 'surfboard', 'tennis racket', 'bottle', 'N/A', 'wine glass', 'cup', 'fork',
- 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
- 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
- 'potted plant', 'bed', 'N/A', 'dining table', 'N/A', 'N/A', 'toilet',
- 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
- 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book', 'clock', 'vase',
- 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
- color = np.random.uniform(0, 255, size=(len(class_labels), 3))
- """, language="python")
- gr.Markdown(
- """
- Get output
of the model (in the case of FasterRCNN, convert input_image
to a tensor first). Then, write a custom process_output
function to process the outputs from the model. \
- You should get bounding_box_coordinates
, class_names
, and box_colors
of the detected objects with a higher detection score than detection_threshold
value. \
- If you use other models than FasterRCNN, you need to make your own custom process function to match the structure of the outputs from this function.
- """
- )
- eigencam_process = gr.Code(label="Output Processing", value=
- """
- import torchvision
-
- transform = torchvision.transforms.ToTensor()
- input_tensor = transform(np.float32(input_image) / 255).unsqueeze(0)
- output = model(input_tensor)[0]
-
- def process_output(output, class_labels, color, detection_threshold):
- boxes, classes, labels, colors = [], [], [], []
- box = output['boxes'].tolist()
- name = [class_labels[i] for i in output['labels'].detach().numpy()]
- label = output['labels'].detach().numpy()
- for i in range(len(name)):
- score = output['scores'].detach().numpy()[i]
- if score < detection_threshold:
- continue
- boxes.append([int(b) for b in box[i]])
- classes.append(name[i])
- colors.append(color[label[i]])
-
- return boxes, classes, colors
-
- detection_threshold = 0.9
- bounding_box_coordinates, class_names, box_colors = process_output(output, class_labels, color, detection_threshold)
- """, language="python")
- gr.Markdown(
- """
- Write a custom reshape
function to get the activations from the model and process them into 2D format. \
- For example, the backbone of FasterRCNN outputs 5 different tenors with different spatial size as an Ordered Dict, \
- thus, we need a custom function which aggregates these image tensors, resizes them to a common shape, and concatenates them. \
- If you use other models than FasterRCNN, you need to write your own custom reshape function.
- """
- )
- eigencam_reshape = gr.Code(label="Reshape", value=
- """
- def reshape(x):
- target_size = x['pool'].size()[-2 : ]
- activations = []
- for key, value in x.items():
- activations.append(torch.nn.functional.interpolate(torch.abs(value), target_size, mode='bilinear'))
- activations = torch.cat(activations, axis=1)
-
- return activations
- """, language="python")
- eigencam_output = gr.Image()
- eigencam_button = gr.Button("Submit")
-
- xgradcam_button.click(xgradcam, inputs=[xgradcam_image, xgradcam_model, xgradcam_targetClass], outputs=xgradcam_output)
- eigencam_button.click(eigencam, inputs=[eigencam_image, eigencam_model, eigencam_class, eigencam_process, eigencam_reshape], outputs=eigencam_output)
-
-demo.launch()
diff --git a/spaces/Jackflack09/diffuse-custom/README_HG.md b/spaces/Jackflack09/diffuse-custom/README_HG.md
deleted file mode 100644
index 99a0776d1a4669fa8387cc77e162c60084100a92..0000000000000000000000000000000000000000
--- a/spaces/Jackflack09/diffuse-custom/README_HG.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Anything V3.0
-emoji: 🏃
-colorFrom: gray
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.10.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_lms_discrete.py b/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_lms_discrete.py
deleted file mode 100644
index 28bc9bd0c608650ba67982b4eb408bab9c215ba1..0000000000000000000000000000000000000000
--- a/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_lms_discrete.py
+++ /dev/null
@@ -1,278 +0,0 @@
-# Copyright 2022 Katherine Crowson and The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import warnings
-from dataclasses import dataclass
-from typing import List, Optional, Tuple, Union
-
-import numpy as np
-import torch
-
-from scipy import integrate
-
-from ..configuration_utils import ConfigMixin, register_to_config
-from ..utils import _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS, BaseOutput
-from .scheduling_utils import SchedulerMixin
-
-
-@dataclass
-# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->LMSDiscrete
-class LMSDiscreteSchedulerOutput(BaseOutput):
- """
- Output class for the scheduler's step function output.
-
- Args:
- prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
- Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
- denoising loop.
- pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
- The predicted denoised sample (x_{0}) based on the model output from the current timestep.
- `pred_original_sample` can be used to preview progress or for guidance.
- """
-
- prev_sample: torch.FloatTensor
- pred_original_sample: Optional[torch.FloatTensor] = None
-
-
-class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin):
- """
- Linear Multistep Scheduler for discrete beta schedules. Based on the original k-diffusion implementation by
- Katherine Crowson:
- https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181
-
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
- [`~SchedulerMixin.from_pretrained`] functions.
-
- Args:
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
- beta_start (`float`): the starting `beta` value of inference.
- beta_end (`float`): the final `beta` value.
- beta_schedule (`str`):
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
- `linear` or `scaled_linear`.
- trained_betas (`np.ndarray`, optional):
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
- prediction_type (`str`, default `epsilon`, optional):
- prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
- process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
- https://imagen.research.google/video/paper.pdf)
- """
-
- _compatibles = _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy()
- order = 1
-
- @register_to_config
- def __init__(
- self,
- num_train_timesteps: int = 1000,
- beta_start: float = 0.0001,
- beta_end: float = 0.02,
- beta_schedule: str = "linear",
- trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
- prediction_type: str = "epsilon",
- ):
- if trained_betas is not None:
- self.betas = torch.tensor(trained_betas, dtype=torch.float32)
- elif beta_schedule == "linear":
- self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
- elif beta_schedule == "scaled_linear":
- # this schedule is very specific to the latent diffusion model.
- self.betas = (
- torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
- )
- else:
- raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
-
- self.alphas = 1.0 - self.betas
- self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
-
- sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
- sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32)
- self.sigmas = torch.from_numpy(sigmas)
-
- # standard deviation of the initial noise distribution
- self.init_noise_sigma = self.sigmas.max()
-
- # setable values
- self.num_inference_steps = None
- timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=float)[::-1].copy()
- self.timesteps = torch.from_numpy(timesteps)
- self.derivatives = []
- self.is_scale_input_called = False
-
- def scale_model_input(
- self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor]
- ) -> torch.FloatTensor:
- """
- Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the K-LMS algorithm.
-
- Args:
- sample (`torch.FloatTensor`): input sample
- timestep (`float` or `torch.FloatTensor`): the current timestep in the diffusion chain
-
- Returns:
- `torch.FloatTensor`: scaled input sample
- """
- if isinstance(timestep, torch.Tensor):
- timestep = timestep.to(self.timesteps.device)
- step_index = (self.timesteps == timestep).nonzero().item()
- sigma = self.sigmas[step_index]
- sample = sample / ((sigma**2 + 1) ** 0.5)
- self.is_scale_input_called = True
- return sample
-
- def get_lms_coefficient(self, order, t, current_order):
- """
- Compute a linear multistep coefficient.
-
- Args:
- order (TODO):
- t (TODO):
- current_order (TODO):
- """
-
- def lms_derivative(tau):
- prod = 1.0
- for k in range(order):
- if current_order == k:
- continue
- prod *= (tau - self.sigmas[t - k]) / (self.sigmas[t - current_order] - self.sigmas[t - k])
- return prod
-
- integrated_coeff = integrate.quad(lms_derivative, self.sigmas[t], self.sigmas[t + 1], epsrel=1e-4)[0]
-
- return integrated_coeff
-
- def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
- """
- Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
-
- Args:
- num_inference_steps (`int`):
- the number of diffusion steps used when generating samples with a pre-trained model.
- device (`str` or `torch.device`, optional):
- the device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
- """
- self.num_inference_steps = num_inference_steps
-
- timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=float)[::-1].copy()
- sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
- sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas)
- sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32)
-
- self.sigmas = torch.from_numpy(sigmas).to(device=device)
- if str(device).startswith("mps"):
- # mps does not support float64
- self.timesteps = torch.from_numpy(timesteps).to(device, dtype=torch.float32)
- else:
- self.timesteps = torch.from_numpy(timesteps).to(device=device)
-
- self.derivatives = []
-
- def step(
- self,
- model_output: torch.FloatTensor,
- timestep: Union[float, torch.FloatTensor],
- sample: torch.FloatTensor,
- order: int = 4,
- return_dict: bool = True,
- ) -> Union[LMSDiscreteSchedulerOutput, Tuple]:
- """
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
- process from the learned model outputs (most often the predicted noise).
-
- Args:
- model_output (`torch.FloatTensor`): direct output from learned diffusion model.
- timestep (`float`): current timestep in the diffusion chain.
- sample (`torch.FloatTensor`):
- current instance of sample being created by diffusion process.
- order: coefficient for multi-step inference.
- return_dict (`bool`): option for returning tuple rather than LMSDiscreteSchedulerOutput class
-
- Returns:
- [`~schedulers.scheduling_utils.LMSDiscreteSchedulerOutput`] or `tuple`:
- [`~schedulers.scheduling_utils.LMSDiscreteSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`.
- When returning a tuple, the first element is the sample tensor.
-
- """
- if not self.is_scale_input_called:
- warnings.warn(
- "The `scale_model_input` function should be called before `step` to ensure correct denoising. "
- "See `StableDiffusionPipeline` for a usage example."
- )
-
- if isinstance(timestep, torch.Tensor):
- timestep = timestep.to(self.timesteps.device)
- step_index = (self.timesteps == timestep).nonzero().item()
- sigma = self.sigmas[step_index]
-
- # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
- if self.config.prediction_type == "epsilon":
- pred_original_sample = sample - sigma * model_output
- elif self.config.prediction_type == "v_prediction":
- # * c_out + input * c_skip
- pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1))
- else:
- raise ValueError(
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`"
- )
-
- # 2. Convert to an ODE derivative
- derivative = (sample - pred_original_sample) / sigma
- self.derivatives.append(derivative)
- if len(self.derivatives) > order:
- self.derivatives.pop(0)
-
- # 3. Compute linear multistep coefficients
- order = min(step_index + 1, order)
- lms_coeffs = [self.get_lms_coefficient(order, step_index, curr_order) for curr_order in range(order)]
-
- # 4. Compute previous sample based on the derivatives path
- prev_sample = sample + sum(
- coeff * derivative for coeff, derivative in zip(lms_coeffs, reversed(self.derivatives))
- )
-
- if not return_dict:
- return (prev_sample,)
-
- return LMSDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
-
- def add_noise(
- self,
- original_samples: torch.FloatTensor,
- noise: torch.FloatTensor,
- timesteps: torch.FloatTensor,
- ) -> torch.FloatTensor:
- # Make sure sigmas and timesteps have the same device and dtype as original_samples
- sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype)
- if original_samples.device.type == "mps" and torch.is_floating_point(timesteps):
- # mps does not support float64
- schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32)
- timesteps = timesteps.to(original_samples.device, dtype=torch.float32)
- else:
- schedule_timesteps = self.timesteps.to(original_samples.device)
- timesteps = timesteps.to(original_samples.device)
-
- step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
-
- sigma = sigmas[step_indices].flatten()
- while len(sigma.shape) < len(original_samples.shape):
- sigma = sigma.unsqueeze(-1)
-
- noisy_samples = original_samples + noise * sigma
- return noisy_samples
-
- def __len__(self):
- return self.config.num_train_timesteps
diff --git a/spaces/JavierIA/gccopen/models/common.py b/spaces/JavierIA/gccopen/models/common.py
deleted file mode 100644
index 111af708dea55cb11c8da3bb22d69e659ee78925..0000000000000000000000000000000000000000
--- a/spaces/JavierIA/gccopen/models/common.py
+++ /dev/null
@@ -1,2019 +0,0 @@
-import math
-from copy import copy
-from pathlib import Path
-
-import numpy as np
-import pandas as pd
-import requests
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from torchvision.ops import DeformConv2d
-from PIL import Image
-from torch.cuda import amp
-
-from utils.datasets import letterbox
-from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh
-from utils.plots import color_list, plot_one_box
-from utils.torch_utils import time_synchronized
-
-
-##### basic ####
-
-def autopad(k, p=None): # kernel, padding
- # Pad to 'same'
- if p is None:
- p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
- return p
-
-
-class MP(nn.Module):
- def __init__(self, k=2):
- super(MP, self).__init__()
- self.m = nn.MaxPool2d(kernel_size=k, stride=k)
-
- def forward(self, x):
- return self.m(x)
-
-
-class SP(nn.Module):
- def __init__(self, k=3, s=1):
- super(SP, self).__init__()
- self.m = nn.MaxPool2d(kernel_size=k, stride=s, padding=k // 2)
-
- def forward(self, x):
- return self.m(x)
-
-
-class ReOrg(nn.Module):
- def __init__(self):
- super(ReOrg, self).__init__()
-
- def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
- return torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)
-
-
-class Concat(nn.Module):
- def __init__(self, dimension=1):
- super(Concat, self).__init__()
- self.d = dimension
-
- def forward(self, x):
- return torch.cat(x, self.d)
-
-
-class Chuncat(nn.Module):
- def __init__(self, dimension=1):
- super(Chuncat, self).__init__()
- self.d = dimension
-
- def forward(self, x):
- x1 = []
- x2 = []
- for xi in x:
- xi1, xi2 = xi.chunk(2, self.d)
- x1.append(xi1)
- x2.append(xi2)
- return torch.cat(x1+x2, self.d)
-
-
-class Shortcut(nn.Module):
- def __init__(self, dimension=0):
- super(Shortcut, self).__init__()
- self.d = dimension
-
- def forward(self, x):
- return x[0]+x[1]
-
-
-class Foldcut(nn.Module):
- def __init__(self, dimension=0):
- super(Foldcut, self).__init__()
- self.d = dimension
-
- def forward(self, x):
- x1, x2 = x.chunk(2, self.d)
- return x1+x2
-
-
-class Conv(nn.Module):
- # Standard convolution
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
- super(Conv, self).__init__()
- self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
- self.bn = nn.BatchNorm2d(c2)
- self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
-
- def forward(self, x):
- return self.act(self.bn(self.conv(x)))
-
- def fuseforward(self, x):
- return self.act(self.conv(x))
-
-
-class RobustConv(nn.Module):
- # Robust convolution (use high kernel size 7-11 for: downsampling and other layers). Train for 300 - 450 epochs.
- def __init__(self, c1, c2, k=7, s=1, p=None, g=1, act=True, layer_scale_init_value=1e-6): # ch_in, ch_out, kernel, stride, padding, groups
- super(RobustConv, self).__init__()
- self.conv_dw = Conv(c1, c1, k=k, s=s, p=p, g=c1, act=act)
- self.conv1x1 = nn.Conv2d(c1, c2, 1, 1, 0, groups=1, bias=True)
- self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(c2)) if layer_scale_init_value > 0 else None
-
- def forward(self, x):
- x = x.to(memory_format=torch.channels_last)
- x = self.conv1x1(self.conv_dw(x))
- if self.gamma is not None:
- x = x.mul(self.gamma.reshape(1, -1, 1, 1))
- return x
-
-
-class RobustConv2(nn.Module):
- # Robust convolution 2 (use [32, 5, 2] or [32, 7, 4] or [32, 11, 8] for one of the paths in CSP).
- def __init__(self, c1, c2, k=7, s=4, p=None, g=1, act=True, layer_scale_init_value=1e-6): # ch_in, ch_out, kernel, stride, padding, groups
- super(RobustConv2, self).__init__()
- self.conv_strided = Conv(c1, c1, k=k, s=s, p=p, g=c1, act=act)
- self.conv_deconv = nn.ConvTranspose2d(in_channels=c1, out_channels=c2, kernel_size=s, stride=s,
- padding=0, bias=True, dilation=1, groups=1
- )
- self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(c2)) if layer_scale_init_value > 0 else None
-
- def forward(self, x):
- x = self.conv_deconv(self.conv_strided(x))
- if self.gamma is not None:
- x = x.mul(self.gamma.reshape(1, -1, 1, 1))
- return x
-
-
-def DWConv(c1, c2, k=1, s=1, act=True):
- # Depthwise convolution
- return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
-
-
-class GhostConv(nn.Module):
- # Ghost Convolution https://github.com/huawei-noah/ghostnet
- def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
- super(GhostConv, self).__init__()
- c_ = c2 // 2 # hidden channels
- self.cv1 = Conv(c1, c_, k, s, None, g, act)
- self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
-
- def forward(self, x):
- y = self.cv1(x)
- return torch.cat([y, self.cv2(y)], 1)
-
-
-class Stem(nn.Module):
- # Stem
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
- super(Stem, self).__init__()
- c_ = int(c2/2) # hidden channels
- self.cv1 = Conv(c1, c_, 3, 2)
- self.cv2 = Conv(c_, c_, 1, 1)
- self.cv3 = Conv(c_, c_, 3, 2)
- self.pool = torch.nn.MaxPool2d(2, stride=2)
- self.cv4 = Conv(2 * c_, c2, 1, 1)
-
- def forward(self, x):
- x = self.cv1(x)
- return self.cv4(torch.cat((self.cv3(self.cv2(x)), self.pool(x)), dim=1))
-
-
-class DownC(nn.Module):
- # Spatial pyramid pooling layer used in YOLOv3-SPP
- def __init__(self, c1, c2, n=1, k=2):
- super(DownC, self).__init__()
- c_ = int(c1) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c_, c2//2, 3, k)
- self.cv3 = Conv(c1, c2//2, 1, 1)
- self.mp = nn.MaxPool2d(kernel_size=k, stride=k)
-
- def forward(self, x):
- return torch.cat((self.cv2(self.cv1(x)), self.cv3(self.mp(x))), dim=1)
-
-
-class SPP(nn.Module):
- # Spatial pyramid pooling layer used in YOLOv3-SPP
- def __init__(self, c1, c2, k=(5, 9, 13)):
- super(SPP, self).__init__()
- c_ = c1 // 2 # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
- self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
-
- def forward(self, x):
- x = self.cv1(x)
- return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
-
-
-class Bottleneck(nn.Module):
- # Darknet bottleneck
- def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
- super(Bottleneck, self).__init__()
- c_ = int(c2 * e) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c_, c2, 3, 1, g=g)
- self.add = shortcut and c1 == c2
-
- def forward(self, x):
- return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
-
-
-class Res(nn.Module):
- # ResNet bottleneck
- def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
- super(Res, self).__init__()
- c_ = int(c2 * e) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c_, c_, 3, 1, g=g)
- self.cv3 = Conv(c_, c2, 1, 1)
- self.add = shortcut and c1 == c2
-
- def forward(self, x):
- return x + self.cv3(self.cv2(self.cv1(x))) if self.add else self.cv3(self.cv2(self.cv1(x)))
-
-
-class ResX(Res):
- # ResNet bottleneck
- def __init__(self, c1, c2, shortcut=True, g=32, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
- super().__init__(c1, c2, shortcut, g, e)
- c_ = int(c2 * e) # hidden channels
-
-
-class Ghost(nn.Module):
- # Ghost Bottleneck https://github.com/huawei-noah/ghostnet
- def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
- super(Ghost, self).__init__()
- c_ = c2 // 2
- self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
- DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
- GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
- self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
- Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
-
- def forward(self, x):
- return self.conv(x) + self.shortcut(x)
-
-##### end of basic #####
-
-
-##### cspnet #####
-
-class SPPCSPC(nn.Module):
- # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)):
- super(SPPCSPC, self).__init__()
- c_ = int(2 * c2 * e) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c1, c_, 1, 1)
- self.cv3 = Conv(c_, c_, 3, 1)
- self.cv4 = Conv(c_, c_, 1, 1)
- self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
- self.cv5 = Conv(4 * c_, c_, 1, 1)
- self.cv6 = Conv(c_, c_, 3, 1)
- self.cv7 = Conv(2 * c_, c2, 1, 1)
-
- def forward(self, x):
- x1 = self.cv4(self.cv3(self.cv1(x)))
- y1 = self.cv6(self.cv5(torch.cat([x1] + [m(x1) for m in self.m], 1)))
- y2 = self.cv2(x)
- return self.cv7(torch.cat((y1, y2), dim=1))
-
-class GhostSPPCSPC(SPPCSPC):
- # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)):
- super().__init__(c1, c2, n, shortcut, g, e, k)
- c_ = int(2 * c2 * e) # hidden channels
- self.cv1 = GhostConv(c1, c_, 1, 1)
- self.cv2 = GhostConv(c1, c_, 1, 1)
- self.cv3 = GhostConv(c_, c_, 3, 1)
- self.cv4 = GhostConv(c_, c_, 1, 1)
- self.cv5 = GhostConv(4 * c_, c_, 1, 1)
- self.cv6 = GhostConv(c_, c_, 3, 1)
- self.cv7 = GhostConv(2 * c_, c2, 1, 1)
-
-
-class GhostStem(Stem):
- # Stem
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
- super().__init__(c1, c2, k, s, p, g, act)
- c_ = int(c2/2) # hidden channels
- self.cv1 = GhostConv(c1, c_, 3, 2)
- self.cv2 = GhostConv(c_, c_, 1, 1)
- self.cv3 = GhostConv(c_, c_, 3, 2)
- self.cv4 = GhostConv(2 * c_, c2, 1, 1)
-
-
-class BottleneckCSPA(nn.Module):
- # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super(BottleneckCSPA, self).__init__()
- c_ = int(c2 * e) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c1, c_, 1, 1)
- self.cv3 = Conv(2 * c_, c2, 1, 1)
- self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
- def forward(self, x):
- y1 = self.m(self.cv1(x))
- y2 = self.cv2(x)
- return self.cv3(torch.cat((y1, y2), dim=1))
-
-
-class BottleneckCSPB(nn.Module):
- # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super(BottleneckCSPB, self).__init__()
- c_ = int(c2) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c_, c_, 1, 1)
- self.cv3 = Conv(2 * c_, c2, 1, 1)
- self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
- def forward(self, x):
- x1 = self.cv1(x)
- y1 = self.m(x1)
- y2 = self.cv2(x1)
- return self.cv3(torch.cat((y1, y2), dim=1))
-
-
-class BottleneckCSPC(nn.Module):
- # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super(BottleneckCSPC, self).__init__()
- c_ = int(c2 * e) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c1, c_, 1, 1)
- self.cv3 = Conv(c_, c_, 1, 1)
- self.cv4 = Conv(2 * c_, c2, 1, 1)
- self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
- def forward(self, x):
- y1 = self.cv3(self.m(self.cv1(x)))
- y2 = self.cv2(x)
- return self.cv4(torch.cat((y1, y2), dim=1))
-
-
-class ResCSPA(BottleneckCSPA):
- # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2 * e) # hidden channels
- self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=0.5) for _ in range(n)])
-
-
-class ResCSPB(BottleneckCSPB):
- # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2) # hidden channels
- self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=0.5) for _ in range(n)])
-
-
-class ResCSPC(BottleneckCSPC):
- # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2 * e) # hidden channels
- self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=0.5) for _ in range(n)])
-
-
-class ResXCSPA(ResCSPA):
- # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2 * e) # hidden channels
- self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
-
-class ResXCSPB(ResCSPB):
- # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2) # hidden channels
- self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
-
-class ResXCSPC(ResCSPC):
- # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2 * e) # hidden channels
- self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
-
-class GhostCSPA(BottleneckCSPA):
- # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2 * e) # hidden channels
- self.m = nn.Sequential(*[Ghost(c_, c_) for _ in range(n)])
-
-
-class GhostCSPB(BottleneckCSPB):
- # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2) # hidden channels
- self.m = nn.Sequential(*[Ghost(c_, c_) for _ in range(n)])
-
-
-class GhostCSPC(BottleneckCSPC):
- # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2 * e) # hidden channels
- self.m = nn.Sequential(*[Ghost(c_, c_) for _ in range(n)])
-
-##### end of cspnet #####
-
-
-##### yolor #####
-
-class ImplicitA(nn.Module):
- def __init__(self, channel, mean=0., std=.02):
- super(ImplicitA, self).__init__()
- self.channel = channel
- self.mean = mean
- self.std = std
- self.implicit = nn.Parameter(torch.zeros(1, channel, 1, 1))
- nn.init.normal_(self.implicit, mean=self.mean, std=self.std)
-
- def forward(self, x):
- return self.implicit + x
-
-
-class ImplicitM(nn.Module):
- def __init__(self, channel, mean=0., std=.02):
- super(ImplicitM, self).__init__()
- self.channel = channel
- self.mean = mean
- self.std = std
- self.implicit = nn.Parameter(torch.ones(1, channel, 1, 1))
- nn.init.normal_(self.implicit, mean=self.mean, std=self.std)
-
- def forward(self, x):
- return self.implicit * x
-
-##### end of yolor #####
-
-
-##### repvgg #####
-
-class RepConv(nn.Module):
- # Represented convolution
- # https://arxiv.org/abs/2101.03697
-
- def __init__(self, c1, c2, k=3, s=1, p=None, g=1, act=True, deploy=False):
- super(RepConv, self).__init__()
-
- self.deploy = deploy
- self.groups = g
- self.in_channels = c1
- self.out_channels = c2
-
- assert k == 3
- assert autopad(k, p) == 1
-
- padding_11 = autopad(k, p) - k // 2
-
- self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
-
- if deploy:
- self.rbr_reparam = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=True)
-
- else:
- self.rbr_identity = (nn.BatchNorm2d(num_features=c1) if c2 == c1 and s == 1 else None)
-
- self.rbr_dense = nn.Sequential(
- nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False),
- nn.BatchNorm2d(num_features=c2),
- )
-
- self.rbr_1x1 = nn.Sequential(
- nn.Conv2d( c1, c2, 1, s, padding_11, groups=g, bias=False),
- nn.BatchNorm2d(num_features=c2),
- )
-
- def forward(self, inputs):
- if hasattr(self, "rbr_reparam"):
- return self.act(self.rbr_reparam(inputs))
-
- if self.rbr_identity is None:
- id_out = 0
- else:
- id_out = self.rbr_identity(inputs)
-
- return self.act(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out)
-
- def get_equivalent_kernel_bias(self):
- kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense)
- kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1)
- kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity)
- return (
- kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid,
- bias3x3 + bias1x1 + biasid,
- )
-
- def _pad_1x1_to_3x3_tensor(self, kernel1x1):
- if kernel1x1 is None:
- return 0
- else:
- return nn.functional.pad(kernel1x1, [1, 1, 1, 1])
-
- def _fuse_bn_tensor(self, branch):
- if branch is None:
- return 0, 0
- if isinstance(branch, nn.Sequential):
- kernel = branch[0].weight
- running_mean = branch[1].running_mean
- running_var = branch[1].running_var
- gamma = branch[1].weight
- beta = branch[1].bias
- eps = branch[1].eps
- else:
- assert isinstance(branch, nn.BatchNorm2d)
- if not hasattr(self, "id_tensor"):
- input_dim = self.in_channels // self.groups
- kernel_value = np.zeros(
- (self.in_channels, input_dim, 3, 3), dtype=np.float32
- )
- for i in range(self.in_channels):
- kernel_value[i, i % input_dim, 1, 1] = 1
- self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device)
- kernel = self.id_tensor
- running_mean = branch.running_mean
- running_var = branch.running_var
- gamma = branch.weight
- beta = branch.bias
- eps = branch.eps
- std = (running_var + eps).sqrt()
- t = (gamma / std).reshape(-1, 1, 1, 1)
- return kernel * t, beta - running_mean * gamma / std
-
- def repvgg_convert(self):
- kernel, bias = self.get_equivalent_kernel_bias()
- return (
- kernel.detach().cpu().numpy(),
- bias.detach().cpu().numpy(),
- )
-
- def fuse_conv_bn(self, conv, bn):
-
- std = (bn.running_var + bn.eps).sqrt()
- bias = bn.bias - bn.running_mean * bn.weight / std
-
- t = (bn.weight / std).reshape(-1, 1, 1, 1)
- weights = conv.weight * t
-
- bn = nn.Identity()
- conv = nn.Conv2d(in_channels = conv.in_channels,
- out_channels = conv.out_channels,
- kernel_size = conv.kernel_size,
- stride=conv.stride,
- padding = conv.padding,
- dilation = conv.dilation,
- groups = conv.groups,
- bias = True,
- padding_mode = conv.padding_mode)
-
- conv.weight = torch.nn.Parameter(weights)
- conv.bias = torch.nn.Parameter(bias)
- return conv
-
- def fuse_repvgg_block(self):
- if self.deploy:
- return
- print(f"RepConv.fuse_repvgg_block")
-
- self.rbr_dense = self.fuse_conv_bn(self.rbr_dense[0], self.rbr_dense[1])
-
- self.rbr_1x1 = self.fuse_conv_bn(self.rbr_1x1[0], self.rbr_1x1[1])
- rbr_1x1_bias = self.rbr_1x1.bias
- weight_1x1_expanded = torch.nn.functional.pad(self.rbr_1x1.weight, [1, 1, 1, 1])
-
- # Fuse self.rbr_identity
- if (isinstance(self.rbr_identity, nn.BatchNorm2d) or isinstance(self.rbr_identity, nn.modules.batchnorm.SyncBatchNorm)):
- # print(f"fuse: rbr_identity == BatchNorm2d or SyncBatchNorm")
- identity_conv_1x1 = nn.Conv2d(
- in_channels=self.in_channels,
- out_channels=self.out_channels,
- kernel_size=1,
- stride=1,
- padding=0,
- groups=self.groups,
- bias=False)
- identity_conv_1x1.weight.data = identity_conv_1x1.weight.data.to(self.rbr_1x1.weight.data.device)
- identity_conv_1x1.weight.data = identity_conv_1x1.weight.data.squeeze().squeeze()
- # print(f" identity_conv_1x1.weight = {identity_conv_1x1.weight.shape}")
- identity_conv_1x1.weight.data.fill_(0.0)
- identity_conv_1x1.weight.data.fill_diagonal_(1.0)
- identity_conv_1x1.weight.data = identity_conv_1x1.weight.data.unsqueeze(2).unsqueeze(3)
- # print(f" identity_conv_1x1.weight = {identity_conv_1x1.weight.shape}")
-
- identity_conv_1x1 = self.fuse_conv_bn(identity_conv_1x1, self.rbr_identity)
- bias_identity_expanded = identity_conv_1x1.bias
- weight_identity_expanded = torch.nn.functional.pad(identity_conv_1x1.weight, [1, 1, 1, 1])
- else:
- # print(f"fuse: rbr_identity != BatchNorm2d, rbr_identity = {self.rbr_identity}")
- bias_identity_expanded = torch.nn.Parameter( torch.zeros_like(rbr_1x1_bias) )
- weight_identity_expanded = torch.nn.Parameter( torch.zeros_like(weight_1x1_expanded) )
-
-
- #print(f"self.rbr_1x1.weight = {self.rbr_1x1.weight.shape}, ")
- #print(f"weight_1x1_expanded = {weight_1x1_expanded.shape}, ")
- #print(f"self.rbr_dense.weight = {self.rbr_dense.weight.shape}, ")
-
- self.rbr_dense.weight = torch.nn.Parameter(self.rbr_dense.weight + weight_1x1_expanded + weight_identity_expanded)
- self.rbr_dense.bias = torch.nn.Parameter(self.rbr_dense.bias + rbr_1x1_bias + bias_identity_expanded)
-
- self.rbr_reparam = self.rbr_dense
- self.deploy = True
-
- if self.rbr_identity is not None:
- del self.rbr_identity
- self.rbr_identity = None
-
- if self.rbr_1x1 is not None:
- del self.rbr_1x1
- self.rbr_1x1 = None
-
- if self.rbr_dense is not None:
- del self.rbr_dense
- self.rbr_dense = None
-
-
-class RepBottleneck(Bottleneck):
- # Standard bottleneck
- def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
- super().__init__(c1, c2, shortcut=True, g=1, e=0.5)
- c_ = int(c2 * e) # hidden channels
- self.cv2 = RepConv(c_, c2, 3, 1, g=g)
-
-
-class RepBottleneckCSPA(BottleneckCSPA):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2 * e) # hidden channels
- self.m = nn.Sequential(*[RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
-
-class RepBottleneckCSPB(BottleneckCSPB):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2) # hidden channels
- self.m = nn.Sequential(*[RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
-
-class RepBottleneckCSPC(BottleneckCSPC):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2 * e) # hidden channels
- self.m = nn.Sequential(*[RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
-
-class RepRes(Res):
- # Standard bottleneck
- def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
- super().__init__(c1, c2, shortcut, g, e)
- c_ = int(c2 * e) # hidden channels
- self.cv2 = RepConv(c_, c_, 3, 1, g=g)
-
-
-class RepResCSPA(ResCSPA):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2 * e) # hidden channels
- self.m = nn.Sequential(*[RepRes(c_, c_, shortcut, g, e=0.5) for _ in range(n)])
-
-
-class RepResCSPB(ResCSPB):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2) # hidden channels
- self.m = nn.Sequential(*[RepRes(c_, c_, shortcut, g, e=0.5) for _ in range(n)])
-
-
-class RepResCSPC(ResCSPC):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2 * e) # hidden channels
- self.m = nn.Sequential(*[RepRes(c_, c_, shortcut, g, e=0.5) for _ in range(n)])
-
-
-class RepResX(ResX):
- # Standard bottleneck
- def __init__(self, c1, c2, shortcut=True, g=32, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
- super().__init__(c1, c2, shortcut, g, e)
- c_ = int(c2 * e) # hidden channels
- self.cv2 = RepConv(c_, c_, 3, 1, g=g)
-
-
-class RepResXCSPA(ResXCSPA):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2 * e) # hidden channels
- self.m = nn.Sequential(*[RepResX(c_, c_, shortcut, g, e=0.5) for _ in range(n)])
-
-
-class RepResXCSPB(ResXCSPB):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=False, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2) # hidden channels
- self.m = nn.Sequential(*[RepResX(c_, c_, shortcut, g, e=0.5) for _ in range(n)])
-
-
-class RepResXCSPC(ResXCSPC):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__(c1, c2, n, shortcut, g, e)
- c_ = int(c2 * e) # hidden channels
- self.m = nn.Sequential(*[RepResX(c_, c_, shortcut, g, e=0.5) for _ in range(n)])
-
-##### end of repvgg #####
-
-
-##### transformer #####
-
-class TransformerLayer(nn.Module):
- # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
- def __init__(self, c, num_heads):
- super().__init__()
- self.q = nn.Linear(c, c, bias=False)
- self.k = nn.Linear(c, c, bias=False)
- self.v = nn.Linear(c, c, bias=False)
- self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
- self.fc1 = nn.Linear(c, c, bias=False)
- self.fc2 = nn.Linear(c, c, bias=False)
-
- def forward(self, x):
- x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
- x = self.fc2(self.fc1(x)) + x
- return x
-
-
-class TransformerBlock(nn.Module):
- # Vision Transformer https://arxiv.org/abs/2010.11929
- def __init__(self, c1, c2, num_heads, num_layers):
- super().__init__()
- self.conv = None
- if c1 != c2:
- self.conv = Conv(c1, c2)
- self.linear = nn.Linear(c2, c2) # learnable position embedding
- self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])
- self.c2 = c2
-
- def forward(self, x):
- if self.conv is not None:
- x = self.conv(x)
- b, _, w, h = x.shape
- p = x.flatten(2)
- p = p.unsqueeze(0)
- p = p.transpose(0, 3)
- p = p.squeeze(3)
- e = self.linear(p)
- x = p + e
-
- x = self.tr(x)
- x = x.unsqueeze(3)
- x = x.transpose(0, 3)
- x = x.reshape(b, self.c2, w, h)
- return x
-
-##### end of transformer #####
-
-
-##### yolov5 #####
-
-class Focus(nn.Module):
- # Focus wh information into c-space
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
- super(Focus, self).__init__()
- self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
- # self.contract = Contract(gain=2)
-
- def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
- return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
- # return self.conv(self.contract(x))
-
-
-class SPPF(nn.Module):
- # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
- def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
- super().__init__()
- c_ = c1 // 2 # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c_ * 4, c2, 1, 1)
- self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
-
- def forward(self, x):
- x = self.cv1(x)
- y1 = self.m(x)
- y2 = self.m(y1)
- return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1))
-
-
-class Contract(nn.Module):
- # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
- def __init__(self, gain=2):
- super().__init__()
- self.gain = gain
-
- def forward(self, x):
- N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain'
- s = self.gain
- x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2)
- x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
- return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40)
-
-
-class Expand(nn.Module):
- # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
- def __init__(self, gain=2):
- super().__init__()
- self.gain = gain
-
- def forward(self, x):
- N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
- s = self.gain
- x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80)
- x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
- return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160)
-
-
-class NMS(nn.Module):
- # Non-Maximum Suppression (NMS) module
- conf = 0.25 # confidence threshold
- iou = 0.45 # IoU threshold
- classes = None # (optional list) filter by class
-
- def __init__(self):
- super(NMS, self).__init__()
-
- def forward(self, x):
- return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes)
-
-
-class autoShape(nn.Module):
- # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
- conf = 0.25 # NMS confidence threshold
- iou = 0.45 # NMS IoU threshold
- classes = None # (optional list) filter by class
-
- def __init__(self, model):
- super(autoShape, self).__init__()
- self.model = model.eval()
-
- def autoshape(self):
- print('autoShape already enabled, skipping... ') # model already converted to model.autoshape()
- return self
-
- @torch.no_grad()
- def forward(self, imgs, size=640, augment=False, profile=False):
- # Inference from various sources. For height=640, width=1280, RGB images example inputs are:
- # filename: imgs = 'data/samples/zidane.jpg'
- # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
- # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
- # PIL: = Image.open('image.jpg') # HWC x(640,1280,3)
- # numpy: = np.zeros((640,1280,3)) # HWC
- # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
- # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
-
- t = [time_synchronized()]
- p = next(self.model.parameters()) # for device and type
- if isinstance(imgs, torch.Tensor): # torch
- with amp.autocast(enabled=p.device.type != 'cpu'):
- return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
-
- # Pre-process
- n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
- shape0, shape1, files = [], [], [] # image and inference shapes, filenames
- for i, im in enumerate(imgs):
- f = f'image{i}' # filename
- if isinstance(im, str): # filename or uri
- im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im
- elif isinstance(im, Image.Image): # PIL Image
- im, f = np.asarray(im), getattr(im, 'filename', f) or f
- files.append(Path(f).with_suffix('.jpg').name)
- if im.shape[0] < 5: # image in CHW
- im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
- im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
- s = im.shape[:2] # HWC
- shape0.append(s) # image shape
- g = (size / max(s)) # gain
- shape1.append([y * g for y in s])
- imgs[i] = im # update
- shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
- x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
- x = np.stack(x, 0) if n > 1 else x[0][None] # stack
- x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
- x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
- t.append(time_synchronized())
-
- with amp.autocast(enabled=p.device.type != 'cpu'):
- # Inference
- y = self.model(x, augment, profile)[0] # forward
- t.append(time_synchronized())
-
- # Post-process
- y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS
- for i in range(n):
- scale_coords(shape1, y[i][:, :4], shape0[i])
-
- t.append(time_synchronized())
- return Detections(imgs, y, files, t, self.names, x.shape)
-
-
-class Detections:
- # detections class for YOLOv5 inference results
- def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
- super(Detections, self).__init__()
- d = pred[0].device # device
- gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
- self.imgs = imgs # list of images as numpy arrays
- self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
- self.names = names # class names
- self.files = files # image filenames
- self.xyxy = pred # xyxy pixels
- self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
- self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
- self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
- self.n = len(self.pred) # number of images (batch size)
- self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)
- self.s = shape # inference BCHW shape
-
- def display(self, pprint=False, show=False, save=False, render=False, save_dir=''):
- colors = color_list()
- for i, (img, pred) in enumerate(zip(self.imgs, self.pred)):
- str = f'image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} '
- if pred is not None:
- for c in pred[:, -1].unique():
- n = (pred[:, -1] == c).sum() # detections per class
- str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
- if show or save or render:
- for *box, conf, cls in pred: # xyxy, confidence, class
- label = f'{self.names[int(cls)]} {conf:.2f}'
- plot_one_box(box, img, label=label, color=colors[int(cls) % 10])
- img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np
- if pprint:
- print(str.rstrip(', '))
- if show:
- img.show(self.files[i]) # show
- if save:
- f = self.files[i]
- img.save(Path(save_dir) / f) # save
- print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n')
- if render:
- self.imgs[i] = np.asarray(img)
-
- def print(self):
- self.display(pprint=True) # print results
- print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t)
-
- def show(self):
- self.display(show=True) # show results
-
- def save(self, save_dir='runs/hub/exp'):
- save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp') # increment save_dir
- Path(save_dir).mkdir(parents=True, exist_ok=True)
- self.display(save=True, save_dir=save_dir) # save results
-
- def render(self):
- self.display(render=True) # render results
- return self.imgs
-
- def pandas(self):
- # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
- new = copy(self) # return copy
- ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
- cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
- for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
- a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
- setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
- return new
-
- def tolist(self):
- # return a list of Detections objects, i.e. 'for result in results.tolist():'
- x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]
- for d in x:
- for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
- setattr(d, k, getattr(d, k)[0]) # pop out of list
- return x
-
- def __len__(self):
- return self.n
-
-
-class Classify(nn.Module):
- # Classification head, i.e. x(b,c1,20,20) to x(b,c2)
- def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
- super(Classify, self).__init__()
- self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
- self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
- self.flat = nn.Flatten()
-
- def forward(self, x):
- z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
- return self.flat(self.conv(z)) # flatten to x(b,c2)
-
-##### end of yolov5 ######
-
-
-##### orepa #####
-
-def transI_fusebn(kernel, bn):
- gamma = bn.weight
- std = (bn.running_var + bn.eps).sqrt()
- return kernel * ((gamma / std).reshape(-1, 1, 1, 1)), bn.bias - bn.running_mean * gamma / std
-
-
-class ConvBN(nn.Module):
- def __init__(self, in_channels, out_channels, kernel_size,
- stride=1, padding=0, dilation=1, groups=1, deploy=False, nonlinear=None):
- super().__init__()
- if nonlinear is None:
- self.nonlinear = nn.Identity()
- else:
- self.nonlinear = nonlinear
- if deploy:
- self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
- stride=stride, padding=padding, dilation=dilation, groups=groups, bias=True)
- else:
- self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
- stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False)
- self.bn = nn.BatchNorm2d(num_features=out_channels)
-
- def forward(self, x):
- if hasattr(self, 'bn'):
- return self.nonlinear(self.bn(self.conv(x)))
- else:
- return self.nonlinear(self.conv(x))
-
- def switch_to_deploy(self):
- kernel, bias = transI_fusebn(self.conv.weight, self.bn)
- conv = nn.Conv2d(in_channels=self.conv.in_channels, out_channels=self.conv.out_channels, kernel_size=self.conv.kernel_size,
- stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups, bias=True)
- conv.weight.data = kernel
- conv.bias.data = bias
- for para in self.parameters():
- para.detach_()
- self.__delattr__('conv')
- self.__delattr__('bn')
- self.conv = conv
-
-class OREPA_3x3_RepConv(nn.Module):
-
- def __init__(self, in_channels, out_channels, kernel_size,
- stride=1, padding=0, dilation=1, groups=1,
- internal_channels_1x1_3x3=None,
- deploy=False, nonlinear=None, single_init=False):
- super(OREPA_3x3_RepConv, self).__init__()
- self.deploy = deploy
-
- if nonlinear is None:
- self.nonlinear = nn.Identity()
- else:
- self.nonlinear = nonlinear
-
- self.kernel_size = kernel_size
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.groups = groups
- assert padding == kernel_size // 2
-
- self.stride = stride
- self.padding = padding
- self.dilation = dilation
-
- self.branch_counter = 0
-
- self.weight_rbr_origin = nn.Parameter(torch.Tensor(out_channels, int(in_channels/self.groups), kernel_size, kernel_size))
- nn.init.kaiming_uniform_(self.weight_rbr_origin, a=math.sqrt(1.0))
- self.branch_counter += 1
-
-
- if groups < out_channels:
- self.weight_rbr_avg_conv = nn.Parameter(torch.Tensor(out_channels, int(in_channels/self.groups), 1, 1))
- self.weight_rbr_pfir_conv = nn.Parameter(torch.Tensor(out_channels, int(in_channels/self.groups), 1, 1))
- nn.init.kaiming_uniform_(self.weight_rbr_avg_conv, a=1.0)
- nn.init.kaiming_uniform_(self.weight_rbr_pfir_conv, a=1.0)
- self.weight_rbr_avg_conv.data
- self.weight_rbr_pfir_conv.data
- self.register_buffer('weight_rbr_avg_avg', torch.ones(kernel_size, kernel_size).mul(1.0/kernel_size/kernel_size))
- self.branch_counter += 1
-
- else:
- raise NotImplementedError
- self.branch_counter += 1
-
- if internal_channels_1x1_3x3 is None:
- internal_channels_1x1_3x3 = in_channels if groups < out_channels else 2 * in_channels # For mobilenet, it is better to have 2X internal channels
-
- if internal_channels_1x1_3x3 == in_channels:
- self.weight_rbr_1x1_kxk_idconv1 = nn.Parameter(torch.zeros(in_channels, int(in_channels/self.groups), 1, 1))
- id_value = np.zeros((in_channels, int(in_channels/self.groups), 1, 1))
- for i in range(in_channels):
- id_value[i, i % int(in_channels/self.groups), 0, 0] = 1
- id_tensor = torch.from_numpy(id_value).type_as(self.weight_rbr_1x1_kxk_idconv1)
- self.register_buffer('id_tensor', id_tensor)
-
- else:
- self.weight_rbr_1x1_kxk_conv1 = nn.Parameter(torch.Tensor(internal_channels_1x1_3x3, int(in_channels/self.groups), 1, 1))
- nn.init.kaiming_uniform_(self.weight_rbr_1x1_kxk_conv1, a=math.sqrt(1.0))
- self.weight_rbr_1x1_kxk_conv2 = nn.Parameter(torch.Tensor(out_channels, int(internal_channels_1x1_3x3/self.groups), kernel_size, kernel_size))
- nn.init.kaiming_uniform_(self.weight_rbr_1x1_kxk_conv2, a=math.sqrt(1.0))
- self.branch_counter += 1
-
- expand_ratio = 8
- self.weight_rbr_gconv_dw = nn.Parameter(torch.Tensor(in_channels*expand_ratio, 1, kernel_size, kernel_size))
- self.weight_rbr_gconv_pw = nn.Parameter(torch.Tensor(out_channels, in_channels*expand_ratio, 1, 1))
- nn.init.kaiming_uniform_(self.weight_rbr_gconv_dw, a=math.sqrt(1.0))
- nn.init.kaiming_uniform_(self.weight_rbr_gconv_pw, a=math.sqrt(1.0))
- self.branch_counter += 1
-
- if out_channels == in_channels and stride == 1:
- self.branch_counter += 1
-
- self.vector = nn.Parameter(torch.Tensor(self.branch_counter, self.out_channels))
- self.bn = nn.BatchNorm2d(out_channels)
-
- self.fre_init()
-
- nn.init.constant_(self.vector[0, :], 0.25) #origin
- nn.init.constant_(self.vector[1, :], 0.25) #avg
- nn.init.constant_(self.vector[2, :], 0.0) #prior
- nn.init.constant_(self.vector[3, :], 0.5) #1x1_kxk
- nn.init.constant_(self.vector[4, :], 0.5) #dws_conv
-
-
- def fre_init(self):
- prior_tensor = torch.Tensor(self.out_channels, self.kernel_size, self.kernel_size)
- half_fg = self.out_channels/2
- for i in range(self.out_channels):
- for h in range(3):
- for w in range(3):
- if i < half_fg:
- prior_tensor[i, h, w] = math.cos(math.pi*(h+0.5)*(i+1)/3)
- else:
- prior_tensor[i, h, w] = math.cos(math.pi*(w+0.5)*(i+1-half_fg)/3)
-
- self.register_buffer('weight_rbr_prior', prior_tensor)
-
- def weight_gen(self):
-
- weight_rbr_origin = torch.einsum('oihw,o->oihw', self.weight_rbr_origin, self.vector[0, :])
-
- weight_rbr_avg = torch.einsum('oihw,o->oihw', torch.einsum('oihw,hw->oihw', self.weight_rbr_avg_conv, self.weight_rbr_avg_avg), self.vector[1, :])
-
- weight_rbr_pfir = torch.einsum('oihw,o->oihw', torch.einsum('oihw,ohw->oihw', self.weight_rbr_pfir_conv, self.weight_rbr_prior), self.vector[2, :])
-
- weight_rbr_1x1_kxk_conv1 = None
- if hasattr(self, 'weight_rbr_1x1_kxk_idconv1'):
- weight_rbr_1x1_kxk_conv1 = (self.weight_rbr_1x1_kxk_idconv1 + self.id_tensor).squeeze()
- elif hasattr(self, 'weight_rbr_1x1_kxk_conv1'):
- weight_rbr_1x1_kxk_conv1 = self.weight_rbr_1x1_kxk_conv1.squeeze()
- else:
- raise NotImplementedError
- weight_rbr_1x1_kxk_conv2 = self.weight_rbr_1x1_kxk_conv2
-
- if self.groups > 1:
- g = self.groups
- t, ig = weight_rbr_1x1_kxk_conv1.size()
- o, tg, h, w = weight_rbr_1x1_kxk_conv2.size()
- weight_rbr_1x1_kxk_conv1 = weight_rbr_1x1_kxk_conv1.view(g, int(t/g), ig)
- weight_rbr_1x1_kxk_conv2 = weight_rbr_1x1_kxk_conv2.view(g, int(o/g), tg, h, w)
- weight_rbr_1x1_kxk = torch.einsum('gti,gothw->goihw', weight_rbr_1x1_kxk_conv1, weight_rbr_1x1_kxk_conv2).view(o, ig, h, w)
- else:
- weight_rbr_1x1_kxk = torch.einsum('ti,othw->oihw', weight_rbr_1x1_kxk_conv1, weight_rbr_1x1_kxk_conv2)
-
- weight_rbr_1x1_kxk = torch.einsum('oihw,o->oihw', weight_rbr_1x1_kxk, self.vector[3, :])
-
- weight_rbr_gconv = self.dwsc2full(self.weight_rbr_gconv_dw, self.weight_rbr_gconv_pw, self.in_channels)
- weight_rbr_gconv = torch.einsum('oihw,o->oihw', weight_rbr_gconv, self.vector[4, :])
-
- weight = weight_rbr_origin + weight_rbr_avg + weight_rbr_1x1_kxk + weight_rbr_pfir + weight_rbr_gconv
-
- return weight
-
- def dwsc2full(self, weight_dw, weight_pw, groups):
-
- t, ig, h, w = weight_dw.size()
- o, _, _, _ = weight_pw.size()
- tg = int(t/groups)
- i = int(ig*groups)
- weight_dw = weight_dw.view(groups, tg, ig, h, w)
- weight_pw = weight_pw.squeeze().view(o, groups, tg)
-
- weight_dsc = torch.einsum('gtihw,ogt->ogihw', weight_dw, weight_pw)
- return weight_dsc.view(o, i, h, w)
-
- def forward(self, inputs):
- weight = self.weight_gen()
- out = F.conv2d(inputs, weight, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups)
-
- return self.nonlinear(self.bn(out))
-
-class RepConv_OREPA(nn.Module):
-
- def __init__(self, c1, c2, k=3, s=1, padding=1, dilation=1, groups=1, padding_mode='zeros', deploy=False, use_se=False, nonlinear=nn.SiLU()):
- super(RepConv_OREPA, self).__init__()
- self.deploy = deploy
- self.groups = groups
- self.in_channels = c1
- self.out_channels = c2
-
- self.padding = padding
- self.dilation = dilation
- self.groups = groups
-
- assert k == 3
- assert padding == 1
-
- padding_11 = padding - k // 2
-
- if nonlinear is None:
- self.nonlinearity = nn.Identity()
- else:
- self.nonlinearity = nonlinear
-
- if use_se:
- self.se = SEBlock(self.out_channels, internal_neurons=self.out_channels // 16)
- else:
- self.se = nn.Identity()
-
- if deploy:
- self.rbr_reparam = nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=k, stride=s,
- padding=padding, dilation=dilation, groups=groups, bias=True, padding_mode=padding_mode)
-
- else:
- self.rbr_identity = nn.BatchNorm2d(num_features=self.in_channels) if self.out_channels == self.in_channels and s == 1 else None
- self.rbr_dense = OREPA_3x3_RepConv(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=k, stride=s, padding=padding, groups=groups, dilation=1)
- self.rbr_1x1 = ConvBN(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=1, stride=s, padding=padding_11, groups=groups, dilation=1)
- print('RepVGG Block, identity = ', self.rbr_identity)
-
-
- def forward(self, inputs):
- if hasattr(self, 'rbr_reparam'):
- return self.nonlinearity(self.se(self.rbr_reparam(inputs)))
-
- if self.rbr_identity is None:
- id_out = 0
- else:
- id_out = self.rbr_identity(inputs)
-
- out1 = self.rbr_dense(inputs)
- out2 = self.rbr_1x1(inputs)
- out3 = id_out
- out = out1 + out2 + out3
-
- return self.nonlinearity(self.se(out))
-
-
- # Optional. This improves the accuracy and facilitates quantization.
- # 1. Cancel the original weight decay on rbr_dense.conv.weight and rbr_1x1.conv.weight.
- # 2. Use like this.
- # loss = criterion(....)
- # for every RepVGGBlock blk:
- # loss += weight_decay_coefficient * 0.5 * blk.get_cust_L2()
- # optimizer.zero_grad()
- # loss.backward()
-
- # Not used for OREPA
- def get_custom_L2(self):
- K3 = self.rbr_dense.weight_gen()
- K1 = self.rbr_1x1.conv.weight
- t3 = (self.rbr_dense.bn.weight / ((self.rbr_dense.bn.running_var + self.rbr_dense.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach()
- t1 = (self.rbr_1x1.bn.weight / ((self.rbr_1x1.bn.running_var + self.rbr_1x1.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach()
-
- l2_loss_circle = (K3 ** 2).sum() - (K3[:, :, 1:2, 1:2] ** 2).sum() # The L2 loss of the "circle" of weights in 3x3 kernel. Use regular L2 on them.
- eq_kernel = K3[:, :, 1:2, 1:2] * t3 + K1 * t1 # The equivalent resultant central point of 3x3 kernel.
- l2_loss_eq_kernel = (eq_kernel ** 2 / (t3 ** 2 + t1 ** 2)).sum() # Normalize for an L2 coefficient comparable to regular L2.
- return l2_loss_eq_kernel + l2_loss_circle
-
- def get_equivalent_kernel_bias(self):
- kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense)
- kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1)
- kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity)
- return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid
-
- def _pad_1x1_to_3x3_tensor(self, kernel1x1):
- if kernel1x1 is None:
- return 0
- else:
- return torch.nn.functional.pad(kernel1x1, [1,1,1,1])
-
- def _fuse_bn_tensor(self, branch):
- if branch is None:
- return 0, 0
- if not isinstance(branch, nn.BatchNorm2d):
- if isinstance(branch, OREPA_3x3_RepConv):
- kernel = branch.weight_gen()
- elif isinstance(branch, ConvBN):
- kernel = branch.conv.weight
- else:
- raise NotImplementedError
- running_mean = branch.bn.running_mean
- running_var = branch.bn.running_var
- gamma = branch.bn.weight
- beta = branch.bn.bias
- eps = branch.bn.eps
- else:
- if not hasattr(self, 'id_tensor'):
- input_dim = self.in_channels // self.groups
- kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), dtype=np.float32)
- for i in range(self.in_channels):
- kernel_value[i, i % input_dim, 1, 1] = 1
- self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device)
- kernel = self.id_tensor
- running_mean = branch.running_mean
- running_var = branch.running_var
- gamma = branch.weight
- beta = branch.bias
- eps = branch.eps
- std = (running_var + eps).sqrt()
- t = (gamma / std).reshape(-1, 1, 1, 1)
- return kernel * t, beta - running_mean * gamma / std
-
- def switch_to_deploy(self):
- if hasattr(self, 'rbr_reparam'):
- return
- print(f"RepConv_OREPA.switch_to_deploy")
- kernel, bias = self.get_equivalent_kernel_bias()
- self.rbr_reparam = nn.Conv2d(in_channels=self.rbr_dense.in_channels, out_channels=self.rbr_dense.out_channels,
- kernel_size=self.rbr_dense.kernel_size, stride=self.rbr_dense.stride,
- padding=self.rbr_dense.padding, dilation=self.rbr_dense.dilation, groups=self.rbr_dense.groups, bias=True)
- self.rbr_reparam.weight.data = kernel
- self.rbr_reparam.bias.data = bias
- for para in self.parameters():
- para.detach_()
- self.__delattr__('rbr_dense')
- self.__delattr__('rbr_1x1')
- if hasattr(self, 'rbr_identity'):
- self.__delattr__('rbr_identity')
-
-##### end of orepa #####
-
-
-##### swin transformer #####
-
-class WindowAttention(nn.Module):
-
- def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
-
- super().__init__()
- self.dim = dim
- self.window_size = window_size # Wh, Ww
- self.num_heads = num_heads
- head_dim = dim // num_heads
- self.scale = qk_scale or head_dim ** -0.5
-
- # define a parameter table of relative position bias
- self.relative_position_bias_table = nn.Parameter(
- torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
-
- # get pair-wise relative position index for each token inside the window
- coords_h = torch.arange(self.window_size[0])
- coords_w = torch.arange(self.window_size[1])
- coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
- coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
- relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
- relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
- relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
- relative_coords[:, :, 1] += self.window_size[1] - 1
- relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
- relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
- self.register_buffer("relative_position_index", relative_position_index)
-
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
- self.proj_drop = nn.Dropout(proj_drop)
-
- nn.init.normal_(self.relative_position_bias_table, std=.02)
- self.softmax = nn.Softmax(dim=-1)
-
- def forward(self, x, mask=None):
-
- B_, N, C = x.shape
- qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
- q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
-
- q = q * self.scale
- attn = (q @ k.transpose(-2, -1))
-
- relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
- self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
- relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
- attn = attn + relative_position_bias.unsqueeze(0)
-
- if mask is not None:
- nW = mask.shape[0]
- attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
- attn = attn.view(-1, self.num_heads, N, N)
- attn = self.softmax(attn)
- else:
- attn = self.softmax(attn)
-
- attn = self.attn_drop(attn)
-
- # print(attn.dtype, v.dtype)
- try:
- x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
- except:
- #print(attn.dtype, v.dtype)
- x = (attn.half() @ v).transpose(1, 2).reshape(B_, N, C)
- x = self.proj(x)
- x = self.proj_drop(x)
- return x
-
-class Mlp(nn.Module):
-
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Linear(in_features, hidden_features)
- self.act = act_layer()
- self.fc2 = nn.Linear(hidden_features, out_features)
- self.drop = nn.Dropout(drop)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act(x)
- x = self.drop(x)
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-def window_partition(x, window_size):
-
- B, H, W, C = x.shape
- assert H % window_size == 0, 'feature map h and w can not divide by window size'
- x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
- windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
- return windows
-
-def window_reverse(windows, window_size, H, W):
-
- B = int(windows.shape[0] / (H * W / window_size / window_size))
- x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
- return x
-
-
-class SwinTransformerLayer(nn.Module):
-
- def __init__(self, dim, num_heads, window_size=8, shift_size=0,
- mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
- act_layer=nn.SiLU, norm_layer=nn.LayerNorm):
- super().__init__()
- self.dim = dim
- self.num_heads = num_heads
- self.window_size = window_size
- self.shift_size = shift_size
- self.mlp_ratio = mlp_ratio
- # if min(self.input_resolution) <= self.window_size:
- # # if window size is larger than input resolution, we don't partition windows
- # self.shift_size = 0
- # self.window_size = min(self.input_resolution)
- assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
-
- self.norm1 = norm_layer(dim)
- self.attn = WindowAttention(
- dim, window_size=(self.window_size, self.window_size), num_heads=num_heads,
- qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
-
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
-
- def create_mask(self, H, W):
- # calculate attention mask for SW-MSA
- img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
- h_slices = (slice(0, -self.window_size),
- slice(-self.window_size, -self.shift_size),
- slice(-self.shift_size, None))
- w_slices = (slice(0, -self.window_size),
- slice(-self.window_size, -self.shift_size),
- slice(-self.shift_size, None))
- cnt = 0
- for h in h_slices:
- for w in w_slices:
- img_mask[:, h, w, :] = cnt
- cnt += 1
-
- mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
- mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
- attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
- attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
-
- return attn_mask
-
- def forward(self, x):
- # reshape x[b c h w] to x[b l c]
- _, _, H_, W_ = x.shape
-
- Padding = False
- if min(H_, W_) < self.window_size or H_ % self.window_size!=0 or W_ % self.window_size!=0:
- Padding = True
- # print(f'img_size {min(H_, W_)} is less than (or not divided by) window_size {self.window_size}, Padding.')
- pad_r = (self.window_size - W_ % self.window_size) % self.window_size
- pad_b = (self.window_size - H_ % self.window_size) % self.window_size
- x = F.pad(x, (0, pad_r, 0, pad_b))
-
- # print('2', x.shape)
- B, C, H, W = x.shape
- L = H * W
- x = x.permute(0, 2, 3, 1).contiguous().view(B, L, C) # b, L, c
-
- # create mask from init to forward
- if self.shift_size > 0:
- attn_mask = self.create_mask(H, W).to(x.device)
- else:
- attn_mask = None
-
- shortcut = x
- x = self.norm1(x)
- x = x.view(B, H, W, C)
-
- # cyclic shift
- if self.shift_size > 0:
- shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
- else:
- shifted_x = x
-
- # partition windows
- x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
- x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
-
- # W-MSA/SW-MSA
- attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
-
- # merge windows
- attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
- shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
-
- # reverse cyclic shift
- if self.shift_size > 0:
- x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
- else:
- x = shifted_x
- x = x.view(B, H * W, C)
-
- # FFN
- x = shortcut + self.drop_path(x)
- x = x + self.drop_path(self.mlp(self.norm2(x)))
-
- x = x.permute(0, 2, 1).contiguous().view(-1, C, H, W) # b c h w
-
- if Padding:
- x = x[:, :, :H_, :W_] # reverse padding
-
- return x
-
-
-class SwinTransformerBlock(nn.Module):
- def __init__(self, c1, c2, num_heads, num_layers, window_size=8):
- super().__init__()
- self.conv = None
- if c1 != c2:
- self.conv = Conv(c1, c2)
-
- # remove input_resolution
- self.blocks = nn.Sequential(*[SwinTransformerLayer(dim=c2, num_heads=num_heads, window_size=window_size,
- shift_size=0 if (i % 2 == 0) else window_size // 2) for i in range(num_layers)])
-
- def forward(self, x):
- if self.conv is not None:
- x = self.conv(x)
- x = self.blocks(x)
- return x
-
-
-class STCSPA(nn.Module):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super(STCSPA, self).__init__()
- c_ = int(c2 * e) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c1, c_, 1, 1)
- self.cv3 = Conv(2 * c_, c2, 1, 1)
- num_heads = c_ // 32
- self.m = SwinTransformerBlock(c_, c_, num_heads, n)
- #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
- def forward(self, x):
- y1 = self.m(self.cv1(x))
- y2 = self.cv2(x)
- return self.cv3(torch.cat((y1, y2), dim=1))
-
-
-class STCSPB(nn.Module):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super(STCSPB, self).__init__()
- c_ = int(c2) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c_, c_, 1, 1)
- self.cv3 = Conv(2 * c_, c2, 1, 1)
- num_heads = c_ // 32
- self.m = SwinTransformerBlock(c_, c_, num_heads, n)
- #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
- def forward(self, x):
- x1 = self.cv1(x)
- y1 = self.m(x1)
- y2 = self.cv2(x1)
- return self.cv3(torch.cat((y1, y2), dim=1))
-
-
-class STCSPC(nn.Module):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super(STCSPC, self).__init__()
- c_ = int(c2 * e) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c1, c_, 1, 1)
- self.cv3 = Conv(c_, c_, 1, 1)
- self.cv4 = Conv(2 * c_, c2, 1, 1)
- num_heads = c_ // 32
- self.m = SwinTransformerBlock(c_, c_, num_heads, n)
- #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
- def forward(self, x):
- y1 = self.cv3(self.m(self.cv1(x)))
- y2 = self.cv2(x)
- return self.cv4(torch.cat((y1, y2), dim=1))
-
-##### end of swin transformer #####
-
-
-##### swin transformer v2 #####
-
-class WindowAttention_v2(nn.Module):
-
- def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.,
- pretrained_window_size=[0, 0]):
-
- super().__init__()
- self.dim = dim
- self.window_size = window_size # Wh, Ww
- self.pretrained_window_size = pretrained_window_size
- self.num_heads = num_heads
-
- self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True)
-
- # mlp to generate continuous relative position bias
- self.cpb_mlp = nn.Sequential(nn.Linear(2, 512, bias=True),
- nn.ReLU(inplace=True),
- nn.Linear(512, num_heads, bias=False))
-
- # get relative_coords_table
- relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32)
- relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32)
- relative_coords_table = torch.stack(
- torch.meshgrid([relative_coords_h,
- relative_coords_w])).permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2
- if pretrained_window_size[0] > 0:
- relative_coords_table[:, :, :, 0] /= (pretrained_window_size[0] - 1)
- relative_coords_table[:, :, :, 1] /= (pretrained_window_size[1] - 1)
- else:
- relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1)
- relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1)
- relative_coords_table *= 8 # normalize to -8, 8
- relative_coords_table = torch.sign(relative_coords_table) * torch.log2(
- torch.abs(relative_coords_table) + 1.0) / np.log2(8)
-
- self.register_buffer("relative_coords_table", relative_coords_table)
-
- # get pair-wise relative position index for each token inside the window
- coords_h = torch.arange(self.window_size[0])
- coords_w = torch.arange(self.window_size[1])
- coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
- coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
- relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
- relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
- relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
- relative_coords[:, :, 1] += self.window_size[1] - 1
- relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
- relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
- self.register_buffer("relative_position_index", relative_position_index)
-
- self.qkv = nn.Linear(dim, dim * 3, bias=False)
- if qkv_bias:
- self.q_bias = nn.Parameter(torch.zeros(dim))
- self.v_bias = nn.Parameter(torch.zeros(dim))
- else:
- self.q_bias = None
- self.v_bias = None
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
- self.proj_drop = nn.Dropout(proj_drop)
- self.softmax = nn.Softmax(dim=-1)
-
- def forward(self, x, mask=None):
-
- B_, N, C = x.shape
- qkv_bias = None
- if self.q_bias is not None:
- qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
- qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
- qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
- q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
-
- # cosine attention
- attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1))
- logit_scale = torch.clamp(self.logit_scale, max=torch.log(torch.tensor(1. / 0.01))).exp()
- attn = attn * logit_scale
-
- relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads)
- relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view(
- self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
- relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
- relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
- attn = attn + relative_position_bias.unsqueeze(0)
-
- if mask is not None:
- nW = mask.shape[0]
- attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
- attn = attn.view(-1, self.num_heads, N, N)
- attn = self.softmax(attn)
- else:
- attn = self.softmax(attn)
-
- attn = self.attn_drop(attn)
-
- try:
- x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
- except:
- x = (attn.half() @ v).transpose(1, 2).reshape(B_, N, C)
-
- x = self.proj(x)
- x = self.proj_drop(x)
- return x
-
- def extra_repr(self) -> str:
- return f'dim={self.dim}, window_size={self.window_size}, ' \
- f'pretrained_window_size={self.pretrained_window_size}, num_heads={self.num_heads}'
-
- def flops(self, N):
- # calculate flops for 1 window with token length of N
- flops = 0
- # qkv = self.qkv(x)
- flops += N * self.dim * 3 * self.dim
- # attn = (q @ k.transpose(-2, -1))
- flops += self.num_heads * N * (self.dim // self.num_heads) * N
- # x = (attn @ v)
- flops += self.num_heads * N * N * (self.dim // self.num_heads)
- # x = self.proj(x)
- flops += N * self.dim * self.dim
- return flops
-
-class Mlp_v2(nn.Module):
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Linear(in_features, hidden_features)
- self.act = act_layer()
- self.fc2 = nn.Linear(hidden_features, out_features)
- self.drop = nn.Dropout(drop)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act(x)
- x = self.drop(x)
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-
-def window_partition_v2(x, window_size):
-
- B, H, W, C = x.shape
- x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
- windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
- return windows
-
-
-def window_reverse_v2(windows, window_size, H, W):
-
- B = int(windows.shape[0] / (H * W / window_size / window_size))
- x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
- return x
-
-
-class SwinTransformerLayer_v2(nn.Module):
-
- def __init__(self, dim, num_heads, window_size=7, shift_size=0,
- mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0.,
- act_layer=nn.SiLU, norm_layer=nn.LayerNorm, pretrained_window_size=0):
- super().__init__()
- self.dim = dim
- #self.input_resolution = input_resolution
- self.num_heads = num_heads
- self.window_size = window_size
- self.shift_size = shift_size
- self.mlp_ratio = mlp_ratio
- #if min(self.input_resolution) <= self.window_size:
- # # if window size is larger than input resolution, we don't partition windows
- # self.shift_size = 0
- # self.window_size = min(self.input_resolution)
- assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
-
- self.norm1 = norm_layer(dim)
- self.attn = WindowAttention_v2(
- dim, window_size=(self.window_size, self.window_size), num_heads=num_heads,
- qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop,
- pretrained_window_size=(pretrained_window_size, pretrained_window_size))
-
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp_v2(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
-
- def create_mask(self, H, W):
- # calculate attention mask for SW-MSA
- img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
- h_slices = (slice(0, -self.window_size),
- slice(-self.window_size, -self.shift_size),
- slice(-self.shift_size, None))
- w_slices = (slice(0, -self.window_size),
- slice(-self.window_size, -self.shift_size),
- slice(-self.shift_size, None))
- cnt = 0
- for h in h_slices:
- for w in w_slices:
- img_mask[:, h, w, :] = cnt
- cnt += 1
-
- mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
- mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
- attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
- attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
-
- return attn_mask
-
- def forward(self, x):
- # reshape x[b c h w] to x[b l c]
- _, _, H_, W_ = x.shape
-
- Padding = False
- if min(H_, W_) < self.window_size or H_ % self.window_size!=0 or W_ % self.window_size!=0:
- Padding = True
- # print(f'img_size {min(H_, W_)} is less than (or not divided by) window_size {self.window_size}, Padding.')
- pad_r = (self.window_size - W_ % self.window_size) % self.window_size
- pad_b = (self.window_size - H_ % self.window_size) % self.window_size
- x = F.pad(x, (0, pad_r, 0, pad_b))
-
- # print('2', x.shape)
- B, C, H, W = x.shape
- L = H * W
- x = x.permute(0, 2, 3, 1).contiguous().view(B, L, C) # b, L, c
-
- # create mask from init to forward
- if self.shift_size > 0:
- attn_mask = self.create_mask(H, W).to(x.device)
- else:
- attn_mask = None
-
- shortcut = x
- x = x.view(B, H, W, C)
-
- # cyclic shift
- if self.shift_size > 0:
- shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
- else:
- shifted_x = x
-
- # partition windows
- x_windows = window_partition_v2(shifted_x, self.window_size) # nW*B, window_size, window_size, C
- x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
-
- # W-MSA/SW-MSA
- attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
-
- # merge windows
- attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
- shifted_x = window_reverse_v2(attn_windows, self.window_size, H, W) # B H' W' C
-
- # reverse cyclic shift
- if self.shift_size > 0:
- x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
- else:
- x = shifted_x
- x = x.view(B, H * W, C)
- x = shortcut + self.drop_path(self.norm1(x))
-
- # FFN
- x = x + self.drop_path(self.norm2(self.mlp(x)))
- x = x.permute(0, 2, 1).contiguous().view(-1, C, H, W) # b c h w
-
- if Padding:
- x = x[:, :, :H_, :W_] # reverse padding
-
- return x
-
- def extra_repr(self) -> str:
- return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
- f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
-
- def flops(self):
- flops = 0
- H, W = self.input_resolution
- # norm1
- flops += self.dim * H * W
- # W-MSA/SW-MSA
- nW = H * W / self.window_size / self.window_size
- flops += nW * self.attn.flops(self.window_size * self.window_size)
- # mlp
- flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
- # norm2
- flops += self.dim * H * W
- return flops
-
-
-class SwinTransformer2Block(nn.Module):
- def __init__(self, c1, c2, num_heads, num_layers, window_size=7):
- super().__init__()
- self.conv = None
- if c1 != c2:
- self.conv = Conv(c1, c2)
-
- # remove input_resolution
- self.blocks = nn.Sequential(*[SwinTransformerLayer_v2(dim=c2, num_heads=num_heads, window_size=window_size,
- shift_size=0 if (i % 2 == 0) else window_size // 2) for i in range(num_layers)])
-
- def forward(self, x):
- if self.conv is not None:
- x = self.conv(x)
- x = self.blocks(x)
- return x
-
-
-class ST2CSPA(nn.Module):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super(ST2CSPA, self).__init__()
- c_ = int(c2 * e) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c1, c_, 1, 1)
- self.cv3 = Conv(2 * c_, c2, 1, 1)
- num_heads = c_ // 32
- self.m = SwinTransformer2Block(c_, c_, num_heads, n)
- #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
- def forward(self, x):
- y1 = self.m(self.cv1(x))
- y2 = self.cv2(x)
- return self.cv3(torch.cat((y1, y2), dim=1))
-
-
-class ST2CSPB(nn.Module):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super(ST2CSPB, self).__init__()
- c_ = int(c2) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c_, c_, 1, 1)
- self.cv3 = Conv(2 * c_, c2, 1, 1)
- num_heads = c_ // 32
- self.m = SwinTransformer2Block(c_, c_, num_heads, n)
- #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
- def forward(self, x):
- x1 = self.cv1(x)
- y1 = self.m(x1)
- y2 = self.cv2(x1)
- return self.cv3(torch.cat((y1, y2), dim=1))
-
-
-class ST2CSPC(nn.Module):
- # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
- def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
- super(ST2CSPC, self).__init__()
- c_ = int(c2 * e) # hidden channels
- self.cv1 = Conv(c1, c_, 1, 1)
- self.cv2 = Conv(c1, c_, 1, 1)
- self.cv3 = Conv(c_, c_, 1, 1)
- self.cv4 = Conv(2 * c_, c2, 1, 1)
- num_heads = c_ // 32
- self.m = SwinTransformer2Block(c_, c_, num_heads, n)
- #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
-
- def forward(self, x):
- y1 = self.cv3(self.m(self.cv1(x)))
- y2 = self.cv2(x)
- return self.cv4(torch.cat((y1, y2), dim=1))
-
-##### end of swin transformer v2 #####
diff --git a/spaces/JeffJing/ZookChatBot/steamship/data/tags/tag_constants.py b/spaces/JeffJing/ZookChatBot/steamship/data/tags/tag_constants.py
deleted file mode 100644
index 6baf3f62d3bdc060bae163bec99459d1ac122f47..0000000000000000000000000000000000000000
--- a/spaces/JeffJing/ZookChatBot/steamship/data/tags/tag_constants.py
+++ /dev/null
@@ -1,246 +0,0 @@
-from enum import Enum
-from typing import Optional
-
-
-class TagKind(str, Enum):
- """A set of `kind` constants for Tags.
-
- These define broad categories of tags. Suggested `name` values for each category are found in
- separate enums. For example: kind=TagKind.DOCUMENT, name=DocTag.H1
- """
-
- PART_OF_SPEECH = "part-of-speech"
- DEPENDENCY = "dependency"
- SENTIMENT = "sentiment"
- EMOTION = "emotion"
- ENTITY = "entity"
- DOCUMENT = "document"
- TOKEN = "token" # noqa: S105
- INTENT = "intent"
- EMBEDDING = "embedding"
- GENERATION = "generation"
- PROVENANCE = "provenance"
- TOPIC = "topic"
- TOKENIZATION = "tokenization"
- KIND = "summary"
- TIMESTAMP = "timestamp"
- SUMMARY = "summary"
- SEARCH_RESULT = "search-result"
- ROLE = "role"
-
-
-class DocTag(str, Enum):
- """A set of `name` constants for Tags with a `kind` of `TagKind.doc`; appropriate for HTML and Markdown ideas."""
-
- DOCUMENT = "document"
- PAGE = "page" # E.g. in a PDF
- REGION = "region" # E.g., abstract catchall region in a document
- HEADER = "header"
- H1 = "h1"
- H2 = "h2"
- H3 = "h3"
- H4 = "h4"
- H5 = "h5"
- LINE = "line"
- TITLE = "title"
- SUBTITLE = "subtitle"
- FOOTER = "footer"
- PARAGRAPH = "paragraph"
- ORDERED_LIST = "ordered-list"
- UNORDERED_LIST = "unordered-list"
- LIST_ITEM = "list-item"
- LINK = "link"
- CAPTION = "caption"
- IMAGE = "image"
- BLOCK_QUOTE = "block-quote"
- BLOCK_CODE = "block-code"
- UNKNOWN = "unknown"
- SENTENCE = "sentence"
- TOKEN = "token" # noqa: S105
- SPAN = "span"
- DIV = "div"
- PRE = "pre"
- STRONG = "strong"
- EMPHASIZED = "emphasized"
- UNDERLINED = "underlined"
- TELETYPE = "teletype"
- ARTICLE = "article"
- MAIN = "main"
- CHAPTER = "chapter"
- TEXT = "text"
-
- @staticmethod
- def from_html_tag(tagname: Optional[str]) -> Optional["DocTag"]: # noqa: C901
- if tagname is None:
- return None
-
- name = tagname.lower().strip()
-
- if name == "p":
- return DocTag.PARAGRAPH
- elif name == "h1":
- return DocTag.H1
- elif name == "h2":
- return DocTag.H2
- elif name == "h3":
- return DocTag.H3
- elif name == "h4":
- return DocTag.H4
- elif name == "h5":
- return DocTag.H5
- elif name == "ul":
- return DocTag.UNORDERED_LIST
- elif name == "ol":
- return DocTag.ORDERED_LIST
- elif name == "li":
- return DocTag.LIST_ITEM
- elif name == "a":
- return DocTag.LINK
- elif name == "div":
- return DocTag.DIV
- elif name == "img":
- return DocTag.IMAGE
- elif name == "span":
- return DocTag.SPAN
- elif name == "pre":
- return DocTag.PRE
- elif name == "code":
- return DocTag.BLOCK_CODE
- elif name == "blockquote":
- return DocTag.BLOCK_QUOTE
- elif name == "strong":
- return DocTag.STRONG
- elif name == "b":
- return DocTag.STRONG
- elif name == "emph":
- return DocTag.EMPHASIZED
- elif name == "i":
- return DocTag.EMPHASIZED
- elif name == "u":
- return DocTag.UNDERLINED
- elif name == "tt":
- return DocTag.TELETYPE
- elif name == "article":
- return DocTag.ARTICLE
- elif name == "header":
- return DocTag.HEADER
- elif name == "footer":
- return DocTag.FOOTER
- elif name == "main":
- return DocTag.MAIN
-
- return None
-
-
-class TokenTag(str, Enum):
- """A set of `name` constants for Tags with a `kind` of `TagKind.token`; appropriate for parsing-level ideas."""
-
- TEXT_WITH_WHITESPACE = "text-with-whitespace"
- TEXT = "text"
- WHITESPACE = "whitespace"
- HEAD = "head"
- LEFT_EDGE = "left-edge"
- RIGHT_EDGE = "right-edge"
- ENTITY_TYPE = "entity-type"
- ENTITY_IOB = "entity-iob"
- LEMMA = "lemma"
- NORMALIZED = "normalized"
- SHAPE = "shape"
- PREFIX = "prefix"
- SUFFIX = "suffix"
- IS_ALPHA = "is-alpha"
- IS_ASCII = "is-ascii"
- IS_DIGIT = "is-digit"
- IS_TITLE = "is-title"
- IS_PUNCT = "is-punct"
- IS_LEFT_PUNCT = "is-left-punct"
- IS_RIGHT_PUNCT = "is-right-punct"
- IS_SPACE = "is-space"
- IS_BRACKET = "is-bracket"
- IS_QUOTE = "is-quote"
- IS_CURRENCY = "is-currency"
- LIKE_URL = "like-url"
- LIKE_NUM = "like-num"
- LIKE_EMAIL = "like-email"
- IS_OUT_OF_VOCABULARY = "is-out-of-vocabulary"
- IS_STOPWORD = "is-stopword"
- LANGUAGE = "language"
-
-
-class TagValueKey(str, Enum):
- """A set of key constants for the `value` object within a tag."""
-
- # Catch-all for confidence, score, ranking
- SCORE = "score"
-
- # Catch-all for values of different types such as integers, floats, booleans, and strings
- VALUE = "value"
-
- # An array of floats or integers
- VECTOR_VALUE = "vector-value"
-
- # A float or integer
- NUMBER_VALUE = "number-value"
-
- # A bool
- BOOL_VALUE = "bool-value"
-
- # A string
- STRING_VALUE = "string-value"
-
- # Whether some annotation is direct ("Susan said 'Hi'")
- DIRECT = "direct"
-
- # Start time of a region of a document, in some other medium (seconds)
- START_TIME_S = "start-time-s"
-
- # End time of a region of a document, in some other medium (seconds)
- END_TIME_S = "end-time-s"
-
- # The normalized name of an entity
- ENTITY_NAME = "entity_name"
-
- # Timestamp. Can be used to provide a time-based sort-ordering for tags.
- TIMESTAMP_VALUE = "timestamp-value"
-
-
-class GenerationTag(str, Enum):
- """A set of `name` constants for Tags with a `kind` of `TagKind.generation`."""
-
- # A generated summary of some region of a document
- SUMMARY = "summary"
-
- # A generated headline for some region of a document
- HEADLINE = "headline"
-
- # A generated "micro summary" of some region of a document
- GIST = "gist"
-
- # A generated completion using some region of the document as input
- PROMPT_COMPLETION = "prompt-completion"
-
-
-class ProvenanceTag(str, Enum):
- """A set of `name` constants for Tags with a `kind` of `TagKind.provenance`."""
-
- # The speaker of a section of a document
- SPEAKER = "speaker"
-
- # The URL from which some section of a document was sourced
- URL = "url"
-
- # The File from which some section of a document was sourced
- FILE = "file"
-
-
-class RoleTag(str, Enum):
- """A set of `name` constants for Tags with a `kind` of `TagKind.ROLE`."""
-
- # This block's content was created by the System; likely instructional text on how to respond
- SYSTEM = "system"
-
- # This block's content was created by an end user
- USER = "user"
-
- # This block's content was created by the generative AI assistant
- ASSISTANT = "assistant"
diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT/web_assets/stylesheet/chatbot.css b/spaces/JohnSmith9982/ChuanhuChatGPT/web_assets/stylesheet/chatbot.css
deleted file mode 100644
index d99584282c052861e5e401add62c3b94eb48ec65..0000000000000000000000000000000000000000
--- a/spaces/JohnSmith9982/ChuanhuChatGPT/web_assets/stylesheet/chatbot.css
+++ /dev/null
@@ -1,278 +0,0 @@
-
-hr.append-display {
- margin: 8px 0;
- border: none;
- height: 1px;
- border-top-width: 0;
- background-image: linear-gradient(to right, rgba(50,50,50, 0.1), rgba(150, 150, 150, 0.8), rgba(50,50,50, 0.1));
-}
-.source-a {
- font-size: 0.8em;
- max-width: 100%;
- margin: 0;
- display: flex;
- flex-direction: row;
- flex-wrap: wrap;
- align-items: center;
- /* background-color: #dddddd88; */
- border-radius: 1.5rem;
- padding: 0.2em;
-}
-.source-a a {
- display: inline-block;
- background-color: #aaaaaa50;
- border-radius: 1rem;
- padding: 0.5em;
- text-align: center;
- text-overflow: ellipsis;
- overflow: hidden;
- min-width: 20%;
- white-space: nowrap;
- margin: 0.2rem 0.1rem;
- text-decoration: none !important;
- flex: 1;
- transition: flex 0.5s;
-}
-.source-a a:hover {
- background-color: #aaaaaa20;
- flex: 2;
-}
-
-/* 川虎助理 */
-.agent-prefix {
- font-size: smaller;
- opacity: 0.6;
- padding: 6px 0 4px;
-}
-.agent-prefix::before {
- content: '🐯';
- filter: grayscale();
- padding: 0 4px;
-}
-
-/* 亮色(默认) */
-#chuanhu-chatbot {
- background-color: var(--chatbot-background-color-light) !important;
- color: var(--chatbot-color-light) !important;
-}
-[data-testid = "bot"] {
- background-color: var(--message-bot-background-color-light) !important;
-}
-[data-testid = "user"] {
- background-color: var(--message-user-background-color-light) !important;
-}
-/* 暗色 */
-.dark #chuanhu-chatbot {
- background-color: var(--chatbot-background-color-dark) !important;
- color: var(--chatbot-color-dark) !important;
-}
-.dark [data-testid = "bot"] {
- background-color: var(--message-bot-background-color-dark) !important;
-}
-.dark [data-testid = "user"] {
- background-color: var(--message-user-background-color-dark) !important;
-}
-
-/* 对话气泡 */
-.message {
- border-radius: var(--radius-xl) !important;
- border: none;
- padding: var(--spacing-xl) !important;
- font-size: var(--text-md) !important;
- line-height: var(--line-md) !important;
- min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
- min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
-}
-[data-testid = "bot"] {
- max-width: calc(85% - 38px);
- border-top-left-radius: 0 !important;
-}
-[data-testid = "user"] {
- max-width: calc(85% - 38px);
- width: auto !important;
- border-top-right-radius: 0 !important;
-}
-
-/* 屏幕宽度大于等于500px的设备 */
-/* update on 2023.4.8: 高度的细致调整已写入JavaScript */
-@media screen and (min-width: 500px) {
- #chuanhu-chatbot {
- height: calc(100vh - 200px);
- }
- #chuanhu-chatbot>.wrapper>.wrap {
- max-height: calc(100vh - 200px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
- }
-}
-/* 屏幕宽度小于500px的设备 */
-@media screen and (max-width: 499px) {
- #chuanhu-chatbot {
- height: calc(100vh - 140px);
- }
- #chuanhu-chatbot>.wrapper>.wrap {
- max-height: calc(100vh - 140px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
- }
- [data-testid = "bot"] {
- max-width: calc(98% - 20px) !important;
- }
- .chatbot-avatar {
- display: none;
- }
- #app-title h1{
- letter-spacing: -1px; font-size: 22px;
- }
-}
-
-#chuanhu-chatbot>.wrapper>.wrap {
- overflow-x: hidden;
-}
-
-.message.user p {
- white-space: pre-wrap;
-}
-.message .user-message {
- display: block;
- padding: 0 !important;
- white-space: pre-wrap;
-}
-
-.message .md-message p {
- margin-top: 0.6em !important;
- margin-bottom: 0.6em !important;
-}
-.message .md-message p:first-child { margin-top: 0 !important; }
-.message .md-message p:last-of-type { margin-bottom: 0 !important; }
-
-.message .md-message {
- display: block;
- padding: 0 !important;
-}
-.message .raw-message p {
- margin:0 !important;
-}
-.message .raw-message {
- display: block;
- padding: 0 !important;
- white-space: pre-wrap;
-}
-.message .hideM {
- display: none;
-}
-
-/* custom buttons */
-.chuanhu-btn {
- border-radius: 5px;
- /* background-color: #E6E6E6 !important; */
- color: rgba(120, 120, 120, 0.64) !important;
- padding: 4px !important;
- position: absolute;
- right: -22px;
- cursor: pointer !important;
- transition: color .2s ease, background-color .2s ease;
-}
-.chuanhu-btn:hover {
- background-color: rgba(167, 167, 167, 0.25) !important;
- color: unset !important;
-}
-.chuanhu-btn:active {
- background-color: rgba(167, 167, 167, 0.5) !important;
-}
-.chuanhu-btn:focus {
- outline: none;
-}
-
-.copy-bot-btn {
- /* top: 18px; */
- bottom: 0;
-}
-.toggle-md-btn {
- /* top: 0; */
- bottom: 20px;
-}
-
-/* note: this is deprecated */
-.copy-code-btn {
- position: relative;
- float: right;
- font-size: 1em;
- cursor: pointer;
-}
-/* note: the button below disabled in chatbot.py */
-.message div.icon-button > button[title="copy"] {
- display: none;
-}
-
-
-/* history message */
-.wrapper>.wrap>.history-message {
- padding-bottom: 10px !important;
-}
-.history-message {
- /* padding: 0 !important; */
- opacity: 80%;
- display: flex;
- flex-direction: column;
-}
-.history-message>.history-message {
- padding: 0 !important;
-}
-.history-message>.message-wrap {
- padding: 0 !important;
- margin-bottom: 16px;
-}
-.history-message>.message {
- margin-bottom: 16px;
-}
-.wrapper>.wrap>.history-message::after {
- content: "";
- display: block;
- height: 2px;
- background-color: var(--body-text-color-subdued);
- margin-bottom: 10px;
- margin-top: -10px;
- clear: both;
-}
-.wrapper>.wrap>.history-message>:last-child::after {
- content: "仅供查看";
- display: block;
- text-align: center;
- color: var(--body-text-color-subdued);
- font-size: 0.8em;
-}
-
-/* #chuanhu-chatbot {
- transition: height 0.3s ease;
- note: find it better without transition animation...;
-} */
-
-
-.message-row {
- flex-direction: row;
- display: flex;
- gap: 8px;
- width: 100%;
-}
-.bot-message-row {
- justify-content: flex-start;
-}
-.user-message-row {
- justify-content: flex-end;
-}
-.chatbot-avatar {
- width: 32px;
- height: 32px;
- background-color: transparent;
- background-size: cover;
- border-radius: 5px !important;
-}
-.chatbot-avatar.bot-avatar {
- margin-left: 5px;
-}
-.chatbot-avatar.user-avatar {
- margin-right: 10px;
-}
-.chatbot-avatar img {
- border-radius: 5px !important;
- object-fit: cover;
- width: 100%;
- height: 100%;
-}
\ No newline at end of file
diff --git a/spaces/Kangarroar/ApplioRVC-Inference/go-applio-manager-recode.bat b/spaces/Kangarroar/ApplioRVC-Inference/go-applio-manager-recode.bat
deleted file mode 100644
index 91b8acfc0c69a356fd5b1d77650b2cd728b1072b..0000000000000000000000000000000000000000
--- a/spaces/Kangarroar/ApplioRVC-Inference/go-applio-manager-recode.bat
+++ /dev/null
@@ -1,322 +0,0 @@
-@echo off
-title Applio Installer
-
-::: _ _ _____ _
-::: /\ | (_) | __ \ | |
-::: / \ _ __ _ __ | |_ ___ | |__) |___ ___ ___ __| | ___
-::: / /\ \ | '_ \| '_ \| | |/ _ \ | _ // _ \/ __/ _ \ / _` |/ _ \
-::: / ____ \| |_) | |_) | | | (_) | | | \ \ __/ (_| (_) | (_| | __/
-::: /_/ \_\ .__/| .__/|_|_|\___/ |_| \_\___|\___\___/ \__,_|\___|
-::: | | | |
-::: |_| |_|
-:::
-:::
-
-setlocal
-set "branch=applio-recode"
-set "runtime=runtime-recode"
-set "repoUrl=https://github.com/IAHispano/Applio-RVC-Fork/archive/refs/heads/%branch%.zip"
-set "fixesFolder=fixes"
-set "localFixesPy=local_fixes.py"
-set "principal=%cd%"
-set "URL_BASE=https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main"
-set "URL_EXTRA=https://huggingface.co/IAHispano/applio/resolve/main"
-
-:menu
-for /f "delims=: tokens=*" %%A in ('findstr /b ":::" "%~f0"') do @echo(%%A
-
-echo [1] Reinstall Applio
-echo [2] Update Applio
-echo [3] Update Applio + Runtime
-echo.
-
-set /p choice=Select an option:
-set choice=%choice: =%
-
-if "%choice%"=="1" (
- cls
- echo Starting Applio Reinstaller...
- echo.
- goto reinstaller
- pause
- cls
- goto menu
-
-)
-
-if "%choice%"=="2" (
- cls
- echo Starting Applio Updater...
- echo.
- goto updater
- pause
- cls
- goto menu
-)
-
-if "%choice%"=="3" (
- cls
- echo Updating Applio + Runtime...
- echo.
- goto updaterRuntime
- pause
- cls
- goto menu
-
-)
-
-cls
-echo Invalid option. Please enter a number from 1 to 3.
-echo.
-echo Press 'Enter' to access the main menu...
-pause>nul
-cls
-goto menu
-
-:reinstaller
-
-echo WARNING: Remember to install Microsoft C++ Build Tools, Redistributable, Python, and Git before continuing.
-echo.
-echo Step-by-step guide: https://rentry.org/appliolocal
-echo Build Tools: https://aka.ms/vs/17/release/vs_BuildTools.exe
-echo Redistributable: https://aka.ms/vs/17/release/vc_redist.x64.exe
-echo Git: https://github.com/git-for-windows/git/releases/download/v2.42.0.windows.2/Git-2.42.0.2-64-bit.exe
-echo Python: Add this route to the windows enviroment variables the user path variable: %principal%\runtime\Scripts
-echo.
-pause
-cls
-
-echo Downloading ZIP file...
-powershell -command "& { Invoke-WebRequest -Uri '%repoUrl%' -OutFile '%principal%\repo.zip' }"
-echo.
-
-echo Extracting ZIP file...
-powershell -command "& { Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory('%principal%\repo.zip', '%principal%') }"
-echo.
-
-echo Copying folder and file structure from subdirectory to main directory...
-robocopy "%principal%\Applio-RVC-Fork-%branch%" "%principal%" /E
-echo.
-
-echo Deleting contents of subdirectory (files and folders)...
-rmdir "%principal%\Applio-RVC-Fork-%branch%" /S /Q
-echo.
-
-echo Cleaning up...
-del "%principal%\repo.zip"
-echo.
-cls
-
-echo Proceeding to download the models...
-echo.
-
-echo WARNING: At this point, it's recommended to disable antivirus or firewall, as errors might occur when downloading pretrained models.
-pause
-cls
-
-echo Downloading models in the assets folder...
-cd "assets"
-echo.
-echo Downloading the "pretrained" folder...
-cd "pretrained"
-curl -LJO "%URL_BASE%/pretrained/D32k.pth"
-curl -LJO "%URL_BASE%/pretrained/D40k.pth"
-curl -LJO "%URL_BASE%/pretrained/D48k.pth"
-curl -LJO "%URL_BASE%/pretrained/G32k.pth"
-curl -LJO "%URL_BASE%/pretrained/G40k.pth"
-curl -LJO "%URL_BASE%/pretrained/G48k.pth"
-curl -LJO "%URL_BASE%/pretrained/f0D32k.pth"
-curl -LJO "%URL_BASE%/pretrained/f0D40k.pth"
-curl -LJO "%URL_BASE%/pretrained/f0D48k.pth"
-curl -LJO "%URL_BASE%/pretrained/f0G32k.pth"
-curl -LJO "%URL_BASE%/pretrained/f0G40k.pth"
-curl -LJO "%URL_BASE%/pretrained/f0G48k.pth"
-cd ".."
-echo.
-cls
-
-echo Downloading the "pretrained_v2" folder...
-cd "pretrained_v2"
-curl -LJO "%URL_BASE%/pretrained_v2/D32k.pth"
-curl -LJO "%URL_BASE%/pretrained_v2/D40k.pth"
-curl -LJO "%URL_BASE%/pretrained_v2/D48k.pth"
-curl -LJO "%URL_BASE%/pretrained_v2/G32k.pth"
-curl -LJO "%URL_BASE%/pretrained_v2/G40k.pth"
-curl -LJO "%URL_BASE%/pretrained_v2/G48k.pth"
-curl -LJO "%URL_BASE%/pretrained_v2/f0D32k.pth"
-curl -LJO "%URL_BASE%/pretrained_v2/f0D40k.pth"
-curl -LJO "%URL_BASE%/pretrained_v2/f0D48k.pth"
-curl -LJO "%URL_BASE%/pretrained_v2/f0G32k.pth"
-curl -LJO "%URL_BASE%/pretrained_v2/f0G40k.pth"
-curl -LJO "%URL_BASE%/pretrained_v2/f0G48k.pth"
-cd ".."
-echo.
-cls
-
-echo Downloading the hubert_base.pt file...
-cd "hubert"
-curl -LJO "%URL_BASE%/hubert_base.pt"
-cd ".."
-echo.
-cls
-
-
-echo Downloading the rmvpe.pt file...
-cd "rmvpe"
-curl -LJO "%URL_BASE%/rmvpe.pt"
-echo.
-cls
-
-echo Downloading the rmvpe.onnx file...
-curl -LJO "%URL_BASE%/rmvpe.onnx"
-cd ".."
-cd ".."
-echo.
-cls
-
-echo Downloading the rest of the large files
-
-echo Downloading the "uvr5_weights" folder...
-cd "uvr5_weights"
-curl -LJO "%URL_BASE%/uvr5_weights/HP2_all_vocals.pth"
-curl -LJO "%URL_BASE%/uvr5_weights/HP3_all_vocals.pth"
-curl -LJO "%URL_BASE%/uvr5_weights/HP5_only_main_vocal.pth"
-curl -LJO "%URL_BASE%/uvr5_weights/VR-DeEchoAggressive.pth"
-curl -LJO "%URL_BASE%/uvr5_weights/VR-DeEchoDeReverb.pth"
-curl -LJO "%URL_BASE%/uvr5_weights/VR-DeEchoNormal.pth"
-cd ".."
-echo.
-cls
-
-echo Downloading the ffmpeg.exe file...
-curl -LJO "%URL_BASE%/ffmpeg.exe"
-echo.
-cls
-
-echo Downloading the ffprobe.exe file...
-curl -LJO "%URL_BASE%/ffprobe.exe"
-echo.
-cls
-
-echo Downloading the runtime.zip file...
-curl -LJO "%URL_EXTRA%/%runtime%.zip"
-echo.
-cls
-
-echo Extracting the runtime.zip file, this might take a while...
-powershell -Command "Expand-Archive -Path '%runtime%.zip' -DestinationPath '.'"
-del %runtime%.zip
-echo.
-cls
-
-echo Downloads completed!
-echo.
-
-echo Checking if the local_fixes.py file exists in the Fixes folder...
-if exist "%fixesFolder%\%localFixesPy%" (
- echo Running the file...
- runtime\python.exe "%fixesFolder%\%localFixesPy%"
-) else (
- echo The "%localFixesPy%" file was not found in the "Fixes" folder.
-)
-echo.
-
-echo Fixes Applied!
-echo.
-
-echo Applio has been reinstalled!
-echo.
-echo Press 'Enter' to access the main menu...
-pause>nul
-cls
-goto menu
-
-
-:updater
-
-echo Downloading the ZIP file...
-powershell -command "& { Invoke-WebRequest -Uri '%repoUrl%' -OutFile '%principal%\repo.zip' }"
-echo.
-
-echo Extracting ZIP file...
-powershell -command "& { Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory('%principal%\repo.zip', '%principal%') }"
-echo.
-
-echo Copying folder and file structure from subdirectory to main directory...
-robocopy "%principal%\Applio-RVC-Fork-%branch%" "%principal%" /E
-echo.
-
-echo Deleting contents of the subdirectory (files and folders)...
-rmdir "%principal%\Applio-RVC-Fork-%branch%" /S /Q
-echo.
-
-echo Cleaning up...
-del "%principal%\repo.zip"
-echo.
-cls
-
-echo Verifying if the local_fixes.py file exists in the Fixes folder...
-if exist "%fixesFolder%\%localFixesPy%" (
- echo Running the file...
- runtime\python.exe "%fixesFolder%\%localFixesPy%"
-) else (
- echo The file "%localFixesPy%" was not found in the "Fixes" folder.
-)
-echo.
-
-echo Applio has been updated!
-echo.
-echo Press 'Enter' to access the main menu...
-pause>nul
-cls
-goto menu
-
-
-:updaterRuntime
-
-echo Downloading the ZIP file...
-powershell -command "& { Invoke-WebRequest -Uri '%repoUrl%' -OutFile '%principal%\repo.zip' }"
-echo.
-
-echo Extracting ZIP file...
-powershell -command "& { Add-Type -AssemblyName System.IO.Compression.FileSystem ; [System.IO.Compression.ZipFile]::ExtractToDirectory('%principal%\repo.zip', '%principal%') }"
-echo.
-
-echo Copying folder and file structure from subdirectory to main directory...
-robocopy "%principal%\Applio-RVC-Fork-%branch%" "%principal%" /E
-echo.
-
-echo Deleting contents of the subdirectory (files and folders)...
-rmdir "%principal%\Applio-RVC-Fork-%branch%" /S /Q
-echo.
-
-echo Cleaning up...
-del "%principal%\repo.zip"
-echo.
-cls
-
-echo Downloading the runtime.zip file...
-curl -LJO "%URL_EXTRA%/%runtime%.zip"
-echo.
-cls
-echo Extracting the runtime.zip file, this might take a while...
-powershell -Command "Expand-Archive -Path '%runtime%.zip' -DestinationPath '.'"
-del runtime.zip
-echo.
-cls
-
-echo Verifying if the local_fixes.py file exists in the Fixes folder...
-if exist "%fixesFolder%\%localFixesPy%" (
- echo Running the file...
- runtime\python.exe "%fixesFolder%\%localFixesPy%"
-) else (
- echo The file "%localFixesPy%" was not found in the "Fixes" folder.
-)
-echo.
-
-echo Applio has been updated!
-echo.
-echo Press 'Enter' to access the main menu...
-pause>nul
-cls
-goto menu
diff --git a/spaces/KenjieDec/GPEN/retinaface/utils/nms/__init__.py b/spaces/KenjieDec/GPEN/retinaface/utils/nms/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/KyanChen/RSPrompter/mmpl/evaluation/metrics/__init__.py b/spaces/KyanChen/RSPrompter/mmpl/evaluation/metrics/__init__.py
deleted file mode 100644
index 17edbe4e4d3a1defddeb23deceba504f9058c43e..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmpl/evaluation/metrics/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .builder import PL_METRICS
-from .coco_pl_metric import CocoPLMetric
-from .mean_ap import PLMeanAveragePrecision
diff --git a/spaces/Kyllano/ShrimpClassifier/README.md b/spaces/Kyllano/ShrimpClassifier/README.md
deleted file mode 100644
index da80294c4c143c1dad0b0e34bb66cef3693137bf..0000000000000000000000000000000000000000
--- a/spaces/Kyllano/ShrimpClassifier/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: ShrimpClassifier
-emoji: 🚀
-colorFrom: blue
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Lippppxy/AiAnimeVoice/utils.py b/spaces/Lippppxy/AiAnimeVoice/utils.py
deleted file mode 100644
index ee4b01ddfbe8173965371b29f770f3e87615fe71..0000000000000000000000000000000000000000
--- a/spaces/Lippppxy/AiAnimeVoice/utils.py
+++ /dev/null
@@ -1,225 +0,0 @@
-import os
-import sys
-import argparse
-import logging
-import json
-import subprocess
-import numpy as np
-import librosa
-import torch
-
-MATPLOTLIB_FLAG = False
-
-logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
-logger = logging
-
-
-def load_checkpoint(checkpoint_path, model, optimizer=None):
- assert os.path.isfile(checkpoint_path)
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
- iteration = checkpoint_dict['iteration']
- learning_rate = checkpoint_dict['learning_rate']
- if optimizer is not None:
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
- saved_state_dict = checkpoint_dict['model']
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- new_state_dict= {}
- for k, v in state_dict.items():
- try:
- new_state_dict[k] = saved_state_dict[k]
- except:
- logger.info("%s is not in the checkpoint" % k)
- new_state_dict[k] = v
- if hasattr(model, 'module'):
- model.module.load_state_dict(new_state_dict)
- else:
- model.load_state_dict(new_state_dict)
- logger.info("Loaded checkpoint '{}' (iteration {})" .format(
- checkpoint_path, iteration))
- return model, optimizer, learning_rate, iteration
-
-
-def plot_spectrogram_to_numpy(spectrogram):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(10,2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
- plt.xlabel("Frames")
- plt.ylabel("Channels")
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def plot_alignment_to_numpy(alignment, info=None):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(6, 4))
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
- interpolation='none')
- fig.colorbar(im, ax=ax)
- xlabel = 'Decoder timestep'
- if info is not None:
- xlabel += '\n\n' + info
- plt.xlabel(xlabel)
- plt.ylabel('Encoder timestep')
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def load_audio_to_torch(full_path, target_sampling_rate):
- audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True)
- return torch.FloatTensor(audio.astype(np.float32))
-
-
-def load_filepaths_and_text(filename, split="|"):
- with open(filename, encoding='utf-8') as f:
- filepaths_and_text = [line.strip().split(split) for line in f]
- return filepaths_and_text
-
-
-def get_hparams(init=True):
- parser = argparse.ArgumentParser()
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
- help='JSON file for configuration')
- parser.add_argument('-m', '--model', type=str, required=True,
- help='Model name')
-
- args = parser.parse_args()
- model_dir = os.path.join("./logs", args.model)
-
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
-
- config_path = args.config
- config_save_path = os.path.join(model_dir, "config.json")
- if init:
- with open(config_path, "r") as f:
- data = f.read()
- with open(config_save_path, "w") as f:
- f.write(data)
- else:
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_dir(model_dir):
- config_save_path = os.path.join(model_dir, "config.json")
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_file(config_path):
- with open(config_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- return hparams
-
-
-def check_git_hash(model_dir):
- source_dir = os.path.dirname(os.path.realpath(__file__))
- if not os.path.exists(os.path.join(source_dir, ".git")):
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
- source_dir
- ))
- return
-
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
-
- path = os.path.join(model_dir, "githash")
- if os.path.exists(path):
- saved_hash = open(path).read()
- if saved_hash != cur_hash:
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
- saved_hash[:8], cur_hash[:8]))
- else:
- open(path, "w").write(cur_hash)
-
-
-def get_logger(model_dir, filename="train.log"):
- global logger
- logger = logging.getLogger(os.path.basename(model_dir))
- logger.setLevel(logging.DEBUG)
-
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
- h = logging.FileHandler(os.path.join(model_dir, filename))
- h.setLevel(logging.DEBUG)
- h.setFormatter(formatter)
- logger.addHandler(h)
- return logger
-
-
-class HParams():
- def __init__(self, **kwargs):
- for k, v in kwargs.items():
- if type(v) == dict:
- v = HParams(**v)
- self[k] = v
-
- def keys(self):
- return self.__dict__.keys()
-
- def items(self):
- return self.__dict__.items()
-
- def values(self):
- return self.__dict__.values()
-
- def __len__(self):
- return len(self.__dict__)
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __setitem__(self, key, value):
- return setattr(self, key, value)
-
- def __contains__(self, key):
- return key in self.__dict__
-
- def __repr__(self):
- return self.__dict__.__repr__()
diff --git a/spaces/Loren/Streamlit_OCR_comparator/configs/_base_/det_pipelines/dbnet_pipeline.py b/spaces/Loren/Streamlit_OCR_comparator/configs/_base_/det_pipelines/dbnet_pipeline.py
deleted file mode 100644
index 40eee02db3b68d5682841532d1122c92bdca2a65..0000000000000000000000000000000000000000
--- a/spaces/Loren/Streamlit_OCR_comparator/configs/_base_/det_pipelines/dbnet_pipeline.py
+++ /dev/null
@@ -1,88 +0,0 @@
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline_r18 = [
- dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
- dict(
- type='LoadTextAnnotations',
- with_bbox=True,
- with_mask=True,
- poly2mask=False),
- dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(
- type='ImgAug',
- args=[['Fliplr', 0.5],
- dict(cls='Affine', rotate=[-10, 10]), ['Resize', [0.5, 3.0]]]),
- dict(type='EastRandomCrop', target_size=(640, 640)),
- dict(type='DBNetTargets', shrink_ratio=0.4),
- dict(type='Pad', size_divisor=32),
- dict(
- type='CustomFormatBundle',
- keys=['gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask'],
- visualize=dict(flag=False, boundary_key='gt_shrink')),
- dict(
- type='Collect',
- keys=['img', 'gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask'])
-]
-
-test_pipeline_1333_736 = [
- dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(1333, 736), # used by Resize
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-
-# for dbnet_r50dcnv2_fpnc
-img_norm_cfg_r50dcnv2 = dict(
- mean=[122.67891434, 116.66876762, 104.00698793],
- std=[58.395, 57.12, 57.375],
- to_rgb=True)
-
-train_pipeline_r50dcnv2 = [
- dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
- dict(
- type='LoadTextAnnotations',
- with_bbox=True,
- with_mask=True,
- poly2mask=False),
- dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5),
- dict(type='Normalize', **img_norm_cfg_r50dcnv2),
- dict(
- type='ImgAug',
- args=[['Fliplr', 0.5],
- dict(cls='Affine', rotate=[-10, 10]), ['Resize', [0.5, 3.0]]]),
- dict(type='EastRandomCrop', target_size=(640, 640)),
- dict(type='DBNetTargets', shrink_ratio=0.4),
- dict(type='Pad', size_divisor=32),
- dict(
- type='CustomFormatBundle',
- keys=['gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask'],
- visualize=dict(flag=False, boundary_key='gt_shrink')),
- dict(
- type='Collect',
- keys=['img', 'gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask'])
-]
-
-test_pipeline_4068_1024 = [
- dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(4068, 1024), # used by Resize
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='Normalize', **img_norm_cfg_r50dcnv2),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
diff --git a/spaces/Madhuri/vqa_audiobot/model/predictor.py b/spaces/Madhuri/vqa_audiobot/model/predictor.py
deleted file mode 100644
index 66dda44baadb0a19f2cdf7773fa3155bbb212aec..0000000000000000000000000000000000000000
--- a/spaces/Madhuri/vqa_audiobot/model/predictor.py
+++ /dev/null
@@ -1,74 +0,0 @@
-from happytransformer import HappyTextToText, TTSettings
-from transformers import ViltProcessor
-from transformers import ViltForQuestionAnswering
-from transformers import AutoTokenizer
-from transformers import AutoModelForSeq2SeqLM
-from joblib import load
-
-import os
-import re
-import string
-import torch
-import pandas as pd
-
-'''
-Visual Question Answering Model to generate answer statement for
-question.
-'''
-
-
-class Predictor:
- def __init__(self):
- auth_token = os.environ.get('TOKEN') or True
- self.vqa_processor = ViltProcessor.from_pretrained(
- 'dandelin/vilt-b32-finetuned-vqa')
- self.vqa_model = ViltForQuestionAnswering.from_pretrained(
- 'dandelin/vilt-b32-finetuned-vqa')
- self.qa_model = AutoModelForSeq2SeqLM.from_pretrained(
- 'Madhuri/t5_small_vqa_fs', use_auth_token=auth_token)
- self.qa_tokenizer = AutoTokenizer.from_pretrained(
- 'Madhuri/t5_small_vqa_fs', use_auth_token=auth_token)
- self.happy_tt = HappyTextToText(
- "T5", "vennify/t5-base-grammar-correction")
- self.tt_args = TTSettings(num_beams=5, min_length=1)
- model_path= os.path.join( os.path.dirname(os.path.abspath(__file__)), 'qa_classifier.joblib')
- self.qa_classifier = load(model_path)
-
- def is_valid_question(self, question):
- df=pd.DataFrame()
- df['sentence']=[question]
- return self.qa_classifier.predict(df['sentence'])[0] == 1
-
- def predict_answer_from_text(self, image, input):
- if image is None:
- return 'Please select an image and ask a question...'
-
- chars = re.escape(string.punctuation)
- question = re.sub(r'['+chars+']', '', input)
- if not question or len(question.split()) < 3:
- return 'I cannot understand, please ask a valid question...'
-
- if not self.is_valid_question(question):
- return 'I can understand only questions, can you please ask a valid question...'
-
- # process question using image model
- encoding = self.vqa_processor(image, question, return_tensors='pt')
- with torch.no_grad():
- outputs = self.vqa_model(**encoding)
- short_answer = self.vqa_model.config.id2label[outputs.logits.argmax(
- -1).item()]
-
- # generate statement using sentence generator model
- prompt = question + '. ' + short_answer
- input_ids = self.qa_tokenizer(prompt, return_tensors='pt').input_ids
- with torch.no_grad():
- output_ids = self.qa_model.generate(input_ids)
- answers = self.qa_tokenizer.batch_decode(
- output_ids, skip_special_tokens=True)
-
- # Correct the grammar of the answer
- answer = self.happy_tt.generate_text(
- 'grammar: ' + answers[0], args=self.tt_args).text
- print(
- f'question - {question}, answer - {answer}, original_answer - {answers[0]}')
- return answer
diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/three_interpolate.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/three_interpolate.py
deleted file mode 100644
index 203f47f05d58087e034fb3cd8cd6a09233947b4a..0000000000000000000000000000000000000000
--- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/three_interpolate.py
+++ /dev/null
@@ -1,68 +0,0 @@
-from typing import Tuple
-
-import torch
-from torch.autograd import Function
-
-from ..utils import ext_loader
-
-ext_module = ext_loader.load_ext(
- '_ext', ['three_interpolate_forward', 'three_interpolate_backward'])
-
-
-class ThreeInterpolate(Function):
- """Performs weighted linear interpolation on 3 features.
-
- Please refer to `Paper of PointNet++ `_
- for more details.
- """
-
- @staticmethod
- def forward(ctx, features: torch.Tensor, indices: torch.Tensor,
- weight: torch.Tensor) -> torch.Tensor:
- """
- Args:
- features (Tensor): (B, C, M) Features descriptors to be
- interpolated
- indices (Tensor): (B, n, 3) index three nearest neighbors
- of the target features in features
- weight (Tensor): (B, n, 3) weights of interpolation
-
- Returns:
- Tensor: (B, C, N) tensor of the interpolated features
- """
- assert features.is_contiguous()
- assert indices.is_contiguous()
- assert weight.is_contiguous()
-
- B, c, m = features.size()
- n = indices.size(1)
- ctx.three_interpolate_for_backward = (indices, weight, m)
- output = torch.cuda.FloatTensor(B, c, n)
-
- ext_module.three_interpolate_forward(
- features, indices, weight, output, b=B, c=c, m=m, n=n)
- return output
-
- @staticmethod
- def backward(
- ctx, grad_out: torch.Tensor
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
- """
- Args:
- grad_out (Tensor): (B, C, N) tensor with gradients of outputs
-
- Returns:
- Tensor: (B, C, M) tensor with gradients of features
- """
- idx, weight, m = ctx.three_interpolate_for_backward
- B, c, n = grad_out.size()
-
- grad_features = torch.cuda.FloatTensor(B, c, m).zero_()
- grad_out_data = grad_out.data.contiguous()
-
- ext_module.three_interpolate_backward(
- grad_out_data, idx, weight, grad_features.data, b=B, c=c, n=n, m=m)
- return grad_features, None, None
-
-
-three_interpolate = ThreeInterpolate.apply
diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/decode_heads/cascade_decode_head.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/decode_heads/cascade_decode_head.py
deleted file mode 100644
index d02122ca0e68743b1bf7a893afae96042f23838c..0000000000000000000000000000000000000000
--- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/decode_heads/cascade_decode_head.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from abc import ABCMeta, abstractmethod
-
-from .decode_head import BaseDecodeHead
-
-
-class BaseCascadeDecodeHead(BaseDecodeHead, metaclass=ABCMeta):
- """Base class for cascade decode head used in
- :class:`CascadeEncoderDecoder."""
-
- def __init__(self, *args, **kwargs):
- super(BaseCascadeDecodeHead, self).__init__(*args, **kwargs)
-
- @abstractmethod
- def forward(self, inputs, prev_output):
- """Placeholder of forward function."""
- pass
-
- def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg,
- train_cfg):
- """Forward function for training.
- Args:
- inputs (list[Tensor]): List of multi-level img features.
- prev_output (Tensor): The output of previous decode head.
- img_metas (list[dict]): List of image info dict where each dict
- has: 'img_shape', 'scale_factor', 'flip', and may also contain
- 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
- For details on the values of these keys see
- `mmseg/datasets/pipelines/formatting.py:Collect`.
- gt_semantic_seg (Tensor): Semantic segmentation masks
- used if the architecture supports semantic segmentation task.
- train_cfg (dict): The training config.
-
- Returns:
- dict[str, Tensor]: a dictionary of loss components
- """
- seg_logits = self.forward(inputs, prev_output)
- losses = self.losses(seg_logits, gt_semantic_seg)
-
- return losses
-
- def forward_test(self, inputs, prev_output, img_metas, test_cfg):
- """Forward function for testing.
-
- Args:
- inputs (list[Tensor]): List of multi-level img features.
- prev_output (Tensor): The output of previous decode head.
- img_metas (list[dict]): List of image info dict where each dict
- has: 'img_shape', 'scale_factor', 'flip', and may also contain
- 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
- For details on the values of these keys see
- `mmseg/datasets/pipelines/formatting.py:Collect`.
- test_cfg (dict): The testing config.
-
- Returns:
- Tensor: Output segmentation map.
- """
- return self.forward(inputs, prev_output)
diff --git a/spaces/MiloSobral/PortiloopDemo/portiloop/src/demo/phase_demo.py b/spaces/MiloSobral/PortiloopDemo/portiloop/src/demo/phase_demo.py
deleted file mode 100644
index 1f9b3c2ed220158673dc6f4e2ebfa54d9bc0950e..0000000000000000000000000000000000000000
--- a/spaces/MiloSobral/PortiloopDemo/portiloop/src/demo/phase_demo.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import gradio as gr
-
-from portiloop.src.demo.offline import run_offline
-
-
-def on_upload_file(file):
- # Check if file extension is .xdf
- if file.name.split(".")[-1] != "xdf":
- raise gr.Error("Please upload a .xdf file.")
- else:
- return file.name
-
-
-def main():
- with gr.Blocks(title="Portiloop") as demo:
- gr.Markdown("# Portiloop Demo")
- gr.Markdown("This Demo takes as input an XDF file coming from the Portiloop EEG device and allows you to convert it to CSV and perform the following actions:: \n * Filter the data offline \n * Perform offline spindle detection using Wamsley or Lacourse. \n * Simulate the Portiloop online filtering and spindle detection with different parameters.")
- gr.Markdown("Upload your XDF file and click **Run Inference** to start the processing...")
-
- with gr.Row():
- xdf_file_button = gr.UploadButton(label="Click to Upload", type="file", file_count="single")
- xdf_file_static = gr.File(label="XDF File", type='file', interactive=False)
-
- xdf_file_button.upload(on_upload_file, xdf_file_button, xdf_file_static)
-
- # Make a checkbox group for the options
- detect_filter = gr.CheckboxGroup(['Offline Filtering', 'Lacourse Detection', 'Wamsley Detection', 'Online Filtering', 'Online Detection'], type='index', label="Filtering/Detection options")
-
- # Options for phase stimulation
- with gr.Row():
- # Dropwdown for phase
- phase = gr.Dropdown(choices=["Peak", "Fast", "Valley"], value="Peak", label="Phase", interactive=True)
- buffer_time = gr.Slider(0, 1, value=0.3, step=0.01, label="Buffer Time", interactive=True)
-
- # Threshold value
- threshold = gr.Slider(0, 1, value=0.82, step=0.01, label="Threshold", interactive=True)
- # Detection Channel
- detect_channel = gr.Dropdown(choices=["1", "2", "3", "4", "5", "6", "7", "8"], value="2", label="Detection Channel in XDF recording", interactive=True)
- # Frequency
- freq = gr.Dropdown(choices=["100", "200", "250", "256", "500", "512", "1000", "1024"], value="250", label="Sampling Frequency (Hz)", interactive=True)
-
- with gr.Row():
- output_array = gr.File(label="Output CSV File")
- output_table = gr.Markdown(label="Output Table")
-
- run_inference = gr.Button(value="Run Inference")
- run_inference.click(
- fn=run_offline,
- inputs=[
- xdf_file_static,
- detect_filter,
- threshold,
- detect_channel,
- freq,
- phase,
- buffer_time],
- outputs=[output_array, output_table])
-
- demo.queue()
- demo.launch(share=False)
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/NCTCMumbai/NCTC/models/official/core/base_task.py b/spaces/NCTCMumbai/NCTC/models/official/core/base_task.py
deleted file mode 100644
index 31811cbe6606fac61b664973717f4c75b6b4b37b..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/core/base_task.py
+++ /dev/null
@@ -1,303 +0,0 @@
-# Lint as: python3
-# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Defines the base task abstraction."""
-import abc
-import functools
-from typing import Any, Callable, Optional
-
-import six
-import tensorflow as tf
-
-from official.modeling.hyperparams import config_definitions as cfg
-from official.utils import registry
-
-
-@six.add_metaclass(abc.ABCMeta)
-class Task(tf.Module):
- """A single-replica view of training procedure.
-
- Tasks provide artifacts for training/evalution procedures, including
- loading/iterating over Datasets, initializing the model, calculating the loss
- and customized metrics with reduction.
- """
-
- # Special keys in train/validate step returned logs.
- loss = "loss"
-
- def __init__(self, params: cfg.TaskConfig):
- self._task_config = params
-
- @property
- def task_config(self) -> cfg.TaskConfig:
- return self._task_config
-
- def initialize(self, model: tf.keras.Model):
- """A callback function used as CheckpointManager's init_fn.
-
- This function will be called when no checkpoint found for the model.
- If there is a checkpoint, the checkpoint will be loaded and this function
- will not be called. You can use this callback function to load a pretrained
- checkpoint, saved under a directory other than the model_dir.
-
- Args:
- model: The keras.Model built or used by this task.
- """
- pass
-
- @abc.abstractmethod
- def build_model(self) -> tf.keras.Model:
- """Creates the model architecture.
-
- Returns:
- A model instance.
- """
-
- def compile_model(self,
- model: tf.keras.Model,
- optimizer: tf.keras.optimizers.Optimizer,
- loss=None,
- train_step: Optional[Callable[..., Any]] = None,
- validation_step: Optional[Callable[..., Any]] = None,
- **kwargs) -> tf.keras.Model:
- """Compiles the model with objects created by the task.
-
- The method should not be used in any customized training implementation.
-
- Args:
- model: a keras.Model.
- optimizer: the keras optimizer.
- loss: a callable/list of losses.
- train_step: optional train step function defined by the task.
- validation_step: optional validation_step step function defined by the
- task.
- **kwargs: other kwargs consumed by keras.Model compile().
-
- Returns:
- a compiled keras.Model.
- """
- if bool(loss is None) == bool(train_step is None):
- raise ValueError("`loss` and `train_step` should be exclusive to "
- "each other.")
- model.compile(optimizer=optimizer, loss=loss, **kwargs)
-
- if train_step:
- model.train_step = functools.partial(
- train_step, model=model, optimizer=model.optimizer)
- if validation_step:
- model.test_step = functools.partial(validation_step, model=model)
- return model
-
- @abc.abstractmethod
- def build_inputs(self,
- params: cfg.DataConfig,
- input_context: Optional[tf.distribute.InputContext] = None):
- """Returns a dataset or a nested structure of dataset functions.
-
- Dataset functions define per-host datasets with the per-replica batch size.
-
- Args:
- params: hyperparams to create input pipelines.
- input_context: optional distribution input pipeline context.
-
- Returns:
- A nested structure of per-replica input functions.
- """
-
- def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
- """Standard interface to compute losses.
-
- Args:
- labels: optional label tensors.
- model_outputs: a nested structure of output tensors.
- aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model.
-
- Returns:
- The total loss tensor.
- """
- del model_outputs, labels
-
- if aux_losses is None:
- losses = [tf.constant(0.0, dtype=tf.float32)]
- else:
- losses = aux_losses
- total_loss = tf.add_n(losses)
- return total_loss
-
- def build_metrics(self, training: bool = True):
- """Gets streaming metrics for training/validation."""
- del training
- return []
-
- def process_metrics(self, metrics, labels, model_outputs):
- """Process and update metrics. Called when using custom training loop API.
-
- Args:
- metrics: a nested structure of metrics objects.
- The return of function self.build_metrics.
- labels: a tensor or a nested structure of tensors.
- model_outputs: a tensor or a nested structure of tensors.
- For example, output of the keras model built by self.build_model.
- """
- for metric in metrics:
- metric.update_state(labels, model_outputs)
-
- def process_compiled_metrics(self, compiled_metrics, labels, model_outputs):
- """Process and update compiled_metrics. call when using compile/fit API.
-
- Args:
- compiled_metrics: the compiled metrics (model.compiled_metrics).
- labels: a tensor or a nested structure of tensors.
- model_outputs: a tensor or a nested structure of tensors.
- For example, output of the keras model built by self.build_model.
- """
- compiled_metrics.update_state(labels, model_outputs)
-
- def train_step(self,
- inputs,
- model: tf.keras.Model,
- optimizer: tf.keras.optimizers.Optimizer,
- metrics=None):
- """Does forward and backward.
-
- Args:
- inputs: a dictionary of input tensors.
- model: the model, forward pass definition.
- optimizer: the optimizer for this training step.
- metrics: a nested structure of metrics objects.
-
- Returns:
- A dictionary of logs.
- """
- if isinstance(inputs, tuple) and len(inputs) == 2:
- features, labels = inputs
- else:
- features, labels = inputs, inputs
- with tf.GradientTape() as tape:
- outputs = model(features, training=True)
- # Computes per-replica loss.
- loss = self.build_losses(
- labels=labels, model_outputs=outputs, aux_losses=model.losses)
- # Scales loss as the default gradients allreduce performs sum inside the
- # optimizer.
- scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync
-
- # For mixed precision, when a LossScaleOptimizer is used, the loss is
- # scaled to avoid numeric underflow.
- if isinstance(optimizer,
- tf.keras.mixed_precision.experimental.LossScaleOptimizer):
- scaled_loss = optimizer.get_scaled_loss(scaled_loss)
-
- tvars = model.trainable_variables
- grads = tape.gradient(scaled_loss, tvars)
-
- if isinstance(optimizer,
- tf.keras.mixed_precision.experimental.LossScaleOptimizer):
- grads = optimizer.get_unscaled_gradients(grads)
- optimizer.apply_gradients(list(zip(grads, tvars)))
- logs = {self.loss: loss}
- if metrics:
- self.process_metrics(metrics, labels, outputs)
- logs.update({m.name: m.result() for m in metrics})
- elif model.compiled_metrics:
- self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
- logs.update({m.name: m.result() for m in model.metrics})
- return logs
-
- def validation_step(self, inputs, model: tf.keras.Model, metrics=None):
- """Validatation step.
-
- Args:
- inputs: a dictionary of input tensors.
- model: the keras.Model.
- metrics: a nested structure of metrics objects.
-
- Returns:
- A dictionary of logs.
- """
- if isinstance(inputs, tuple) and len(inputs) == 2:
- features, labels = inputs
- else:
- features, labels = inputs, inputs
- outputs = self.inference_step(features, model)
- loss = self.build_losses(
- labels=labels, model_outputs=outputs, aux_losses=model.losses)
- logs = {self.loss: loss}
- if metrics:
- self.process_metrics(metrics, labels, outputs)
- logs.update({m.name: m.result() for m in metrics})
- elif model.compiled_metrics:
- self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
- logs.update({m.name: m.result() for m in model.metrics})
- return logs
-
- def inference_step(self, inputs, model: tf.keras.Model):
- """Performs the forward step."""
- return model(inputs, training=False)
-
- def aggregate_logs(self, state, step_logs):
- """Optional aggregation over logs returned from a validation step."""
- pass
-
- def reduce_aggregated_logs(self, aggregated_logs):
- """Optional reduce of aggregated logs over validation steps."""
- return {}
-
-
-_REGISTERED_TASK_CLS = {}
-
-
-# TODO(b/158268740): Move these outside the base class file.
-# TODO(b/158741360): Add type annotations once pytype checks across modules.
-def register_task_cls(task_config_cls):
- """Decorates a factory of Tasks for lookup by a subclass of TaskConfig.
-
- This decorator supports registration of tasks as follows:
-
- ```
- @dataclasses.dataclass
- class MyTaskConfig(TaskConfig):
- # Add fields here.
- pass
-
- @register_task_cls(MyTaskConfig)
- class MyTask(Task):
- # Inherits def __init__(self, task_config).
- pass
-
- my_task_config = MyTaskConfig()
- my_task = get_task(my_task_config) # Returns MyTask(my_task_config).
- ```
-
- Besisdes a class itself, other callables that create a Task from a TaskConfig
- can be decorated by the result of this function, as long as there is at most
- one registration for each config class.
-
- Args:
- task_config_cls: a subclass of TaskConfig (*not* an instance of TaskConfig).
- Each task_config_cls can only be used for a single registration.
-
- Returns:
- A callable for use as class decorator that registers the decorated class
- for creation from an instance of task_config_cls.
- """
- return registry.register(_REGISTERED_TASK_CLS, task_config_cls)
-
-
-# The user-visible get_task() is defined after classes have been registered.
-# TODO(b/158741360): Add type annotations once pytype checks across modules.
-def get_task_cls(task_config_cls):
- task_cls = registry.lookup(_REGISTERED_TASK_CLS, task_config_cls)
- return task_cls
diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/embedding_layer.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/embedding_layer.py
deleted file mode 100644
index 6694e2b42af47673ee3ce0b9572ec5867d69cb7d..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/embedding_layer.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Implementation of embedding layer with shared weights."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import tensorflow as tf
-
-
-class EmbeddingSharedWeights(tf.keras.layers.Layer):
- """Calculates input embeddings and pre-softmax linear with shared weights."""
-
- def __init__(self, vocab_size, hidden_size):
- """Specify characteristic parameters of embedding layer.
-
- Args:
- vocab_size: Number of tokens in the embedding. (Typically ~32,000)
- hidden_size: Dimensionality of the embedding. (Typically 512 or 1024)
- """
- super(EmbeddingSharedWeights, self).__init__()
- self.vocab_size = vocab_size
- self.hidden_size = hidden_size
-
- def build(self, input_shape):
- """Build embedding layer."""
- with tf.name_scope("embedding_and_softmax"):
- # Create and initialize weights. The random normal initializer was chosen
- # arbitrarily, and works well.
- self.shared_weights = self.add_weight(
- "weights",
- shape=[self.vocab_size, self.hidden_size],
- initializer=tf.random_normal_initializer(
- mean=0., stddev=self.hidden_size**-0.5))
- super(EmbeddingSharedWeights, self).build(input_shape)
-
- def get_config(self):
- return {
- "vocab_size": self.vocab_size,
- "hidden_size": self.hidden_size,
- }
-
- def call(self, inputs, mode="embedding"):
- """Get token embeddings of inputs.
-
- Args:
- inputs: An int64 tensor with shape [batch_size, length]
- mode: string, a valid value is one of "embedding" and "linear".
- Returns:
- outputs: (1) If mode == "embedding", output embedding tensor, float32 with
- shape [batch_size, length, embedding_size]; (2) mode == "linear", output
- linear tensor, float32 with shape [batch_size, length, vocab_size].
- Raises:
- ValueError: if mode is not valid.
- """
- if mode == "embedding":
- return self._embedding(inputs)
- elif mode == "linear":
- return self._linear(inputs)
- else:
- raise ValueError("mode {} is not valid.".format(mode))
-
- def _embedding(self, inputs):
- """Applies embedding based on inputs tensor."""
- with tf.name_scope("embedding"):
- # Create binary mask of size [batch_size, length]
- embeddings = tf.gather(self.shared_weights, inputs)
- mask = tf.cast(tf.not_equal(inputs, 0), embeddings.dtype)
- embeddings *= tf.expand_dims(mask, -1)
- # Scale embedding by the sqrt of the hidden size
- embeddings *= self.hidden_size ** 0.5
-
- return embeddings
-
- def _linear(self, inputs):
- """Computes logits by running inputs through a linear layer.
-
- Args:
- inputs: A float32 tensor with shape [batch_size, length, hidden_size]
- Returns:
- float32 tensor with shape [batch_size, length, vocab_size].
- """
- with tf.name_scope("presoftmax_linear"):
- batch_size = tf.shape(inputs)[0]
- length = tf.shape(inputs)[1]
-
- x = tf.reshape(inputs, [-1, self.hidden_size])
- logits = tf.matmul(x, self.shared_weights, transpose_b=True)
-
- return tf.reshape(logits, [batch_size, length, self.vocab_size])
diff --git a/spaces/Najaf-Zawar/Image-Super-Resolution/app.py b/spaces/Najaf-Zawar/Image-Super-Resolution/app.py
deleted file mode 100644
index 9e9f1e13a0d108c7d19c3b6475d34f4fc8fced22..0000000000000000000000000000000000000000
--- a/spaces/Najaf-Zawar/Image-Super-Resolution/app.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import cv2
-import os
-import torch
-from basicsr.archs.rrdbnet_arch import RRDBNet
-from realesrgan import RealESRGANer
-
-import gradio as gr
-import torchvision.transforms as transforms
-
-
-model_path = "Trained_ESRGAN.pth"
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
-upsampler = RealESRGANer(scale=4, model_path=model_path, model=model)
-
-
-
-def esrgan(input_image):
- output_img, _ = upsampler.enhance(input_image, outscale=3.5)
- filename = "output.jpg"
- output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2RGB)
- cv2.imwrite(filename, output_img)
- return filename
-
-
-# Define the Gradio app interface
-inputs = gr.Image(label="Input Image")
-outputs = gr.Image(label="Enhanced_Image.")
-title = "Image Super-Resolution Using ESR-GAN"
-description = "Enhance the Quality of your Low Resolution Images To High Resolution Using Artificial Intelligence"
-
-iface = gr.Interface(fn=esrgan, inputs=inputs, outputs=outputs, title=title, description=description, allow_flagging="never")
-
-iface.launch(inline = False)
\ No newline at end of file
diff --git a/spaces/Navneet574/Heart_Disease_Prediciton/README.md b/spaces/Navneet574/Heart_Disease_Prediciton/README.md
deleted file mode 100644
index 0da0926a8a158552f0a87448f2701fea6f778abe..0000000000000000000000000000000000000000
--- a/spaces/Navneet574/Heart_Disease_Prediciton/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Heart Disease Prediciton
-emoji: 🚀
-colorFrom: purple
-colorTo: gray
-sdk: gradio
-sdk_version: 3.27.0
-app_file: app.py
-pinned: false
-license: cc-by-nc-nd-4.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Nekomaru180/rvc-model/infer_pack/models.py b/spaces/Nekomaru180/rvc-model/infer_pack/models.py
deleted file mode 100644
index 96165f73644e6fb92d0ffedb4a3c9e1a457cb989..0000000000000000000000000000000000000000
--- a/spaces/Nekomaru180/rvc-model/infer_pack/models.py
+++ /dev/null
@@ -1,982 +0,0 @@
-import math, pdb, os
-from time import time as ttime
-import torch
-from torch import nn
-from torch.nn import functional as F
-from infer_pack import modules
-from infer_pack import attentions
-from infer_pack import commons
-from infer_pack.commons import init_weights, get_padding
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from infer_pack.commons import init_weights
-import numpy as np
-from infer_pack import commons
-
-
-class TextEncoder256(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(256, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class TextEncoder256Sim(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(256, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- x = self.proj(x) * x_mask
- return x, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0,
- ):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(
- modules.ResidualCouplingLayer(
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- mean_only=True,
- )
- )
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
- def remove_weight_norm(self):
- for i in range(self.n_flows):
- self.flows[i * 2].remove_weight_norm()
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
- def remove_weight_norm(self):
- self.enc.remove_weight_norm()
-
-
-class Generator(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=0,
- ):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class SineGen(torch.nn.Module):
- """Definition of sine generator
- SineGen(samp_rate, harmonic_num = 0,
- sine_amp = 0.1, noise_std = 0.003,
- voiced_threshold = 0,
- flag_for_pulse=False)
- samp_rate: sampling rate in Hz
- harmonic_num: number of harmonic overtones (default 0)
- sine_amp: amplitude of sine-wavefrom (default 0.1)
- noise_std: std of Gaussian noise (default 0.003)
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
- Note: when flag_for_pulse is True, the first time step of a voiced
- segment is always sin(np.pi) or cos(0)
- """
-
- def __init__(
- self,
- samp_rate,
- harmonic_num=0,
- sine_amp=0.1,
- noise_std=0.003,
- voiced_threshold=0,
- flag_for_pulse=False,
- ):
- super(SineGen, self).__init__()
- self.sine_amp = sine_amp
- self.noise_std = noise_std
- self.harmonic_num = harmonic_num
- self.dim = self.harmonic_num + 1
- self.sampling_rate = samp_rate
- self.voiced_threshold = voiced_threshold
-
- def _f02uv(self, f0):
- # generate uv signal
- uv = torch.ones_like(f0)
- uv = uv * (f0 > self.voiced_threshold)
- return uv
-
- def forward(self, f0, upp):
- """sine_tensor, uv = forward(f0)
- input F0: tensor(batchsize=1, length, dim=1)
- f0 for unvoiced steps should be 0
- output sine_tensor: tensor(batchsize=1, length, dim)
- output uv: tensor(batchsize=1, length, 1)
- """
- with torch.no_grad():
- f0 = f0[:, None].transpose(1, 2)
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
- # fundamental component
- f0_buf[:, :, 0] = f0[:, :, 0]
- for idx in np.arange(self.harmonic_num):
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
- idx + 2
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
- rand_ini = torch.rand(
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
- )
- rand_ini[:, 0] = 0
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
- tmp_over_one *= upp
- tmp_over_one = F.interpolate(
- tmp_over_one.transpose(2, 1),
- scale_factor=upp,
- mode="linear",
- align_corners=True,
- ).transpose(2, 1)
- rad_values = F.interpolate(
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(
- 2, 1
- ) #######
- tmp_over_one %= 1
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
- cumsum_shift = torch.zeros_like(rad_values)
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
- sine_waves = torch.sin(
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
- )
- sine_waves = sine_waves * self.sine_amp
- uv = self._f02uv(f0)
- uv = F.interpolate(
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(2, 1)
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
- noise = noise_amp * torch.randn_like(sine_waves)
- sine_waves = sine_waves * uv + noise
- return sine_waves, uv, noise
-
-
-class SourceModuleHnNSF(torch.nn.Module):
- """SourceModule for hn-nsf
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
- add_noise_std=0.003, voiced_threshod=0)
- sampling_rate: sampling_rate in Hz
- harmonic_num: number of harmonic above F0 (default: 0)
- sine_amp: amplitude of sine source signal (default: 0.1)
- add_noise_std: std of additive Gaussian noise (default: 0.003)
- note that amplitude of noise in unvoiced is decided
- by sine_amp
- voiced_threshold: threhold to set U/V given F0 (default: 0)
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
- F0_sampled (batchsize, length, 1)
- Sine_source (batchsize, length, 1)
- noise_source (batchsize, length 1)
- uv (batchsize, length, 1)
- """
-
- def __init__(
- self,
- sampling_rate,
- harmonic_num=0,
- sine_amp=0.1,
- add_noise_std=0.003,
- voiced_threshod=0,
- is_half=True,
- ):
- super(SourceModuleHnNSF, self).__init__()
-
- self.sine_amp = sine_amp
- self.noise_std = add_noise_std
- self.is_half = is_half
- # to produce sine waveforms
- self.l_sin_gen = SineGen(
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
- )
-
- # to merge source harmonics into a single excitation
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
- self.l_tanh = torch.nn.Tanh()
-
- def forward(self, x, upp=None):
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
- if self.is_half:
- sine_wavs = sine_wavs.half()
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
- return sine_merge, None, None # noise, uv
-
-
-class GeneratorNSF(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels,
- sr,
- is_half=False,
- ):
- super(GeneratorNSF, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
-
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
- self.m_source = SourceModuleHnNSF(
- sampling_rate=sr, harmonic_num=0, is_half=is_half
- )
- self.noise_convs = nn.ModuleList()
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- c_cur = upsample_initial_channel // (2 ** (i + 1))
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
- if i + 1 < len(upsample_rates):
- stride_f0 = np.prod(upsample_rates[i + 1 :])
- self.noise_convs.append(
- Conv1d(
- 1,
- c_cur,
- kernel_size=stride_f0 * 2,
- stride=stride_f0,
- padding=stride_f0 // 2,
- )
- )
- else:
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- self.upp = np.prod(upsample_rates)
-
- def forward(self, x, f0, g=None):
- har_source, noi_source, uv = self.m_source(f0, self.upp)
- har_source = har_source.transpose(1, 2)
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- x_source = self.noise_convs[i](har_source)
- x = x + x_source
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-sr2sr = {
- "32k": 32000,
- "40k": 40000,
- "48k": 48000,
-}
-
-
-class SynthesizerTrnMs256NSFsid(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr,
- **kwargs
- ):
- super().__init__()
- if type(sr) == type("strr"):
- sr = sr2sr[sr]
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder256(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- sr=sr,
- is_half=kwargs["is_half"],
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
- ): # 这里ds是id,[bs,1]
- # print(1,pitch.shape)#[bs,t]
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
- # print(-2,pitchf.shape,z_slice.shape)
- o = self.dec(z_slice, pitchf, g=g)
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
- return o, x_mask, (z, z_p, m_p, logs_p)
-
-
-class SynthesizerTrnMs256NSFsid_nono(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr=None,
- **kwargs
- ):
- super().__init__()
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder256(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=False,
- )
- self.dec = Generator(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- o = self.dec(z_slice, g=g)
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, phone, phone_lengths, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], g=g)
- return o, x_mask, (z, z_p, m_p, logs_p)
-
-
-class SynthesizerTrnMs256NSFsid_sim(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- # hop_length,
- gin_channels=0,
- use_sdp=True,
- **kwargs
- ):
- super().__init__()
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder256Sim(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- is_half=kwargs["is_half"],
- )
-
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(
- self, phone, phone_lengths, pitch, pitchf, y_lengths, ds
- ): # y是spec不需要了现在
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- x, x_mask = self.enc_p(phone, pitch, phone_lengths)
- x = self.flow(x, x_mask, g=g, reverse=True)
- z_slice, ids_slice = commons.rand_slice_segments(
- x, y_lengths, self.segment_size
- )
-
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
- o = self.dec(z_slice, pitchf, g=g)
- return o, ids_slice
-
- def infer(
- self, phone, phone_lengths, pitch, pitchf, ds, max_len=None
- ): # y是spec不需要了现在
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- x, x_mask = self.enc_p(phone, pitch, phone_lengths)
- x = self.flow(x, x_mask, g=g, reverse=True)
- o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g)
- return o, o
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2, 3, 5, 7, 11, 17]
- # periods = [3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ]
- )
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(
- Conv2d(
- 1,
- 32,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 32,
- 128,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 128,
- 512,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 512,
- 1024,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 1024,
- 1024,
- (kernel_size, 1),
- 1,
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- ]
- )
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
diff --git a/spaces/NimaBoscarino/climategan/figures/human_evaluation.py b/spaces/NimaBoscarino/climategan/figures/human_evaluation.py
deleted file mode 100644
index 2889c0a945879830b844259f203612f96f759bef..0000000000000000000000000000000000000000
--- a/spaces/NimaBoscarino/climategan/figures/human_evaluation.py
+++ /dev/null
@@ -1,208 +0,0 @@
-"""
-This script plots the result of the human evaluation on Amazon Mechanical Turk, where
-human participants chose between an image from ClimateGAN or from a different method.
-"""
-print("Imports...", end="")
-from argparse import ArgumentParser
-import os
-import yaml
-import numpy as np
-import pandas as pd
-import seaborn as sns
-from pathlib import Path
-import matplotlib.pyplot as plt
-
-
-# -----------------------
-# ----- Constants -----
-# -----------------------
-
-comparables_dict = {
- "munit_flooded": "MUNIT",
- "cyclegan": "CycleGAN",
- "instagan": "InstaGAN",
- "instagan_copypaste": "Mask-InstaGAN",
- "painted_ground": "Painted ground",
-}
-
-
-# Colors
-palette_colorblind = sns.color_palette("colorblind")
-color_climategan = palette_colorblind[9]
-
-palette_colorblind = sns.color_palette("colorblind")
-color_munit = palette_colorblind[1]
-color_cyclegan = palette_colorblind[2]
-color_instagan = palette_colorblind[3]
-color_maskinstagan = palette_colorblind[6]
-color_paintedground = palette_colorblind[8]
-palette_comparables = [
- color_munit,
- color_cyclegan,
- color_instagan,
- color_maskinstagan,
- color_paintedground,
-]
-palette_comparables_light = [
- sns.light_palette(color, n_colors=3)[1] for color in palette_comparables
-]
-
-
-def parsed_args():
- """
- Parse and returns command-line args
-
- Returns:
- argparse.Namespace: the parsed arguments
- """
- parser = ArgumentParser()
- parser.add_argument(
- "--input_csv",
- default="amt_omni-vs-other.csv",
- type=str,
- help="CSV containing the results of the human evaluation, pre-processed",
- )
- parser.add_argument(
- "--output_dir",
- default=None,
- type=str,
- help="Output directory",
- )
- parser.add_argument(
- "--dpi",
- default=200,
- type=int,
- help="DPI for the output images",
- )
- parser.add_argument(
- "--n_bs",
- default=1e6,
- type=int,
- help="Number of bootrstrap samples",
- )
- parser.add_argument(
- "--bs_seed",
- default=17,
- type=int,
- help="Bootstrap random seed, for reproducibility",
- )
-
- return parser.parse_args()
-
-
-if __name__ == "__main__":
- # -----------------------------
- # ----- Parse arguments -----
- # -----------------------------
- args = parsed_args()
- print("Args:\n" + "\n".join([f" {k:20}: {v}" for k, v in vars(args).items()]))
-
- # Determine output dir
- if args.output_dir is None:
- output_dir = Path(os.environ["SLURM_TMPDIR"])
- else:
- output_dir = Path(args.output_dir)
- if not output_dir.exists():
- output_dir.mkdir(parents=True, exist_ok=False)
-
- # Store args
- output_yml = output_dir / "args_human_evaluation.yml"
- with open(output_yml, "w") as f:
- yaml.dump(vars(args), f)
-
- # Read CSV
- df = pd.read_csv(args.input_csv)
-
- # Sort Y labels
- comparables = df.comparable.unique()
- is_climategan_sum = [
- df.loc[df.comparable == c, "climategan"].sum() for c in comparables
- ]
- comparables = comparables[np.argsort(is_climategan_sum)[::-1]]
-
- # Plot setup
- sns.set(style="whitegrid")
- plt.rcParams.update({"font.family": "serif"})
- plt.rcParams.update(
- {
- "font.serif": [
- "Computer Modern Roman",
- "Times New Roman",
- "Utopia",
- "New Century Schoolbook",
- "Century Schoolbook L",
- "ITC Bookman",
- "Bookman",
- "Times",
- "Palatino",
- "Charter",
- "serif" "Bitstream Vera Serif",
- "DejaVu Serif",
- ]
- }
- )
- fontsize = "medium"
-
- # Initialize the matplotlib figure
- fig, ax = plt.subplots(figsize=(10.5, 3), dpi=args.dpi)
-
- # Plot the total (right)
- sns.barplot(
- data=df.loc[df.is_valid],
- x="is_valid",
- y="comparable",
- order=comparables,
- orient="h",
- label="comparable",
- palette=palette_comparables_light,
- ci=None,
- )
-
- # Plot the left
- sns.barplot(
- data=df.loc[df.is_valid],
- x="climategan",
- y="comparable",
- order=comparables,
- orient="h",
- label="climategan",
- color=color_climategan,
- ci=99,
- n_boot=args.n_bs,
- seed=args.bs_seed,
- errcolor="black",
- errwidth=1.5,
- capsize=0.1,
- )
-
- # Draw line at 0.5
- y = np.arange(ax.get_ylim()[1] + 0.1, ax.get_ylim()[0], 0.1)
- x = 0.5 * np.ones(y.shape[0])
- ax.plot(x, y, linestyle=":", linewidth=1.5, color="black")
-
- # Change Y-Tick labels
- yticklabels = [comparables_dict[ytick.get_text()] for ytick in ax.get_yticklabels()]
- yticklabels_text = ax.set_yticklabels(
- yticklabels, fontsize=fontsize, horizontalalignment="right", x=0.96
- )
- for ytl in yticklabels_text:
- ax.add_artist(ytl)
-
- # Remove Y-label
- ax.set_ylabel(ylabel="")
-
- # Change X-Tick labels
- xlim = [0.0, 1.1]
- xticks = np.arange(xlim[0], xlim[1], 0.1)
- ax.set(xticks=xticks)
- plt.setp(ax.get_xticklabels(), fontsize=fontsize)
-
- # Set X-label
- ax.set_xlabel(None)
-
- # Change spines
- sns.despine(left=True, bottom=True)
-
- # Save figure
- output_fig = output_dir / "human_evaluation_rate_climategan.png"
- fig.savefig(output_fig, dpi=fig.dpi, bbox_inches="tight")
diff --git a/spaces/Norod78/Dragness/README.md b/spaces/Norod78/Dragness/README.md
deleted file mode 100644
index fd6c1512df2a4ba3346d5a0f1c7db3c4aaa8df69..0000000000000000000000000000000000000000
--- a/spaces/Norod78/Dragness/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-title: Dragness
-emoji: 👸
-colorFrom: yellow
-colorTo: blue
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/NotFungibleIO/GFPGAN/README.md b/spaces/NotFungibleIO/GFPGAN/README.md
deleted file mode 100644
index 6ea52fba1169fccc56ab3ef7bcdcc322d13da2a8..0000000000000000000000000000000000000000
--- a/spaces/NotFungibleIO/GFPGAN/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: GFPGAN
-emoji: 😁
-colorFrom: yellow
-colorTo: green
-sdk: gradio
-sdk_version: 3.1.7
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/criterions/fairseq_criterion.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/criterions/fairseq_criterion.py
deleted file mode 100644
index ff4beb02503ea48a6c09596630aad4c710be94b6..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/criterions/fairseq_criterion.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import inspect
-from typing import Any, Dict, List
-
-from fairseq import metrics, utils
-from fairseq.dataclass import FairseqDataclass
-from fairseq.dataclass.utils import gen_parser_from_dataclass
-from torch.nn.modules.loss import _Loss
-
-
-class FairseqCriterion(_Loss):
- def __init__(self, task):
- super().__init__()
- self.task = task
- if hasattr(task, "target_dictionary"):
- tgt_dict = task.target_dictionary
- self.padding_idx = tgt_dict.pad() if tgt_dict is not None else -100
-
- @classmethod
- def add_args(cls, parser):
- """Add criterion-specific arguments to the parser."""
- dc = getattr(cls, "__dataclass", None)
- if dc is not None:
- gen_parser_from_dataclass(parser, dc())
-
- @classmethod
- def build_criterion(cls, cfg: FairseqDataclass, task):
- """Construct a criterion from command-line args."""
- # arguments in the __init__.
- init_args = {}
- for p in inspect.signature(cls).parameters.values():
- if (
- p.kind == p.POSITIONAL_ONLY
- or p.kind == p.VAR_POSITIONAL
- or p.kind == p.VAR_KEYWORD
- ):
- # we haven't implemented inference for these argument types,
- # but PRs welcome :)
- raise NotImplementedError("{} not supported".format(p.kind))
-
- assert p.kind in {p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY}
-
- if p.name == "task":
- init_args["task"] = task
- elif p.name == "cfg":
- init_args["cfg"] = cfg
- elif hasattr(cfg, p.name):
- init_args[p.name] = getattr(cfg, p.name)
- elif p.default != p.empty:
- pass # we'll use the default value
- else:
- raise NotImplementedError(
- "Unable to infer Criterion arguments, please implement "
- "{}.build_criterion".format(cls.__name__)
- )
- return cls(**init_args)
-
- def forward(self, model, sample, reduce=True):
- """Compute the loss for the given sample.
-
- Returns a tuple with three elements:
- 1) the loss
- 2) the sample size, which is used as the denominator for the gradient
- 3) logging outputs to display while training
- """
- raise NotImplementedError
-
- @staticmethod
- def aggregate_logging_outputs(
- logging_outputs: List[Dict[str, Any]]
- ) -> Dict[str, Any]:
- """Aggregate logging outputs from data parallel training."""
- utils.deprecation_warning(
- "The aggregate_logging_outputs API is deprecated. "
- "Please use the reduce_metrics API instead."
- )
- raise NotImplementedError
-
- @classmethod
- def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:
- """Aggregate logging outputs from data parallel training."""
- utils.deprecation_warning(
- "Criterions should implement the reduce_metrics API. "
- "Falling back to deprecated aggregate_logging_outputs API."
- )
- agg_logging_outputs = cls.aggregate_logging_outputs(logging_outputs)
- for k, v in agg_logging_outputs.items():
- if k in {"nsentences", "ntokens", "sample_size"}:
- continue
- metrics.log_scalar(k, v)
-
- @staticmethod
- def logging_outputs_can_be_summed() -> bool:
- """
- Whether the logging outputs returned by `forward` can be summed
- across workers prior to calling `reduce_metrics`. Setting this
- to True will improves distributed training speed.
- """
- return False
-
-
-class LegacyFairseqCriterion(FairseqCriterion):
- def __init__(self, args, task):
- super().__init__(task=task)
- self.args = args
-
- utils.deprecation_warning(
- "Criterions should take explicit arguments instead of an "
- "argparse.Namespace object, please update your criterion by "
- "extending FairseqCriterion instead of LegacyFairseqCriterion."
- )
-
- @classmethod
- def build_criterion(cls, args, task):
- """Construct a criterion from command-line args."""
- return cls(args, task)
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py
deleted file mode 100644
index 5ee9c1be4a59ad3d072412827ab4e9b62dc7434e..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from dataclasses import dataclass, field
-from typing import List
-
-import torch.optim.lr_scheduler
-from omegaconf import II
-
-from fairseq.dataclass import FairseqDataclass
-from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
-
-
-@dataclass
-class ReduceLROnPlateauLRScheduleConfig(FairseqDataclass):
- lr_shrink: float = field(
- default=0.1, metadata={"help": "shrink factor for annealing"}
- )
- lr_threshold: float = field(
- default=1e-4,
- metadata={
- "help": (
- "threshold for measuring the new optimum, to only focus on "
- "significant changes"
- )
- },
- )
- lr_patience: int = field(
- default=0,
- metadata={
- "help": (
- "number of epochs with no improvement after which learning rate will "
- "be reduced"
- )
- },
- )
- warmup_updates: int = field(
- default=0,
- metadata={"help": "warmup the learning rate linearly for the first N updates"},
- )
- warmup_init_lr: float = field(
- default=-1,
- metadata={
- "help": "initial learning rate during warmup phase; default is cfg.lr"
- },
- )
- lr: List[float] = II("optimization.lr")
- maximize_best_checkpoint_metric: bool = II(
- "checkpoint.maximize_best_checkpoint_metric"
- )
-
-
-@register_lr_scheduler(
- "reduce_lr_on_plateau", dataclass=ReduceLROnPlateauLRScheduleConfig
-)
-class ReduceLROnPlateauLRSchedule(FairseqLRScheduler):
- """
- Decay the LR by a factor every time the validation loss plateaus.
- Also comes with optional warmup phase, where we linearly increase
- the learning rate from some initial learning rate
- (``--warmup-init-lr``) until the configured learning rate
- (``--lr``). Thereafter the lr is adjusted according to original
- reduce_on_plateau scheme.
-
- During warmup::
-
- lrs = torch.linspace(
- cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates
- )
- lr = lrs[update_num]
- """
-
- def __init__(self, cfg: ReduceLROnPlateauLRScheduleConfig, optimizer):
- super().__init__(cfg, optimizer)
- if len(cfg.lr) > 1:
- raise ValueError(
- "Cannot use a fixed learning rate schedule with reduce_lr_on_plateau."
- " Consider --lr-scheduler=fixed instead."
- )
- self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
- self.optimizer.optimizer,
- patience=cfg.lr_patience,
- factor=cfg.lr_shrink,
- mode="max" if cfg.maximize_best_checkpoint_metric else "min",
- threshold=cfg.lr_threshold,
- )
- warmup_end_lr = cfg.lr[0]
- # if no warm up, sets initial lr to be cfg.lr[0]
- if cfg.warmup_init_lr < 0:
- cfg.warmup_init_lr = 0 if cfg.warmup_updates > 0 else warmup_end_lr
-
- # linearly warmup for the first cfg.warmup_updates
- if cfg.warmup_updates > 0:
- self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates
-
- # this flag is either set from arg when no warm up, or set by
- # step_update() when warmup finishes
- self.warmup_end = True if cfg.warmup_updates <= 0 else False
-
- # initial learning rate
- # this self.lr is used only during init and/or warm up period
- self.lr = warmup_end_lr if self.warmup_end else cfg.warmup_init_lr
- self.optimizer.set_lr(self.lr)
-
- def state_dict(self):
- """Return the LR scheduler state dict."""
- return {
- "best": self.lr_scheduler.best,
- "last_epoch": self.lr_scheduler.last_epoch,
- }
-
- def load_state_dict(self, state_dict):
- """Load an LR scheduler state dict."""
- self.lr_scheduler.best = state_dict["best"]
- if "last_epoch" in state_dict:
- self.lr_scheduler.last_epoch = state_dict["last_epoch"]
-
- def step(self, epoch, val_loss=None):
- """
- Update the learning rate at the end of the given epoch if warmup
- finishes otherwise no update of lr on epoch boundaries
- """
- if val_loss is not None and self.warmup_end is True:
- self.lr_scheduler.step(val_loss)
- else:
- self.lr_scheduler.last_epoch = epoch
- return self.optimizer.get_lr()
-
- def step_update(self, num_updates):
- """
- Update the learning rate after each update."""
- # if there is warmup
- if self.cfg.warmup_updates > 0:
- if num_updates <= self.cfg.warmup_updates:
- self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step
- self.optimizer.set_lr(self.lr)
- else:
- if self.warmup_end is False:
- self.warmup_end = True
- # else do nothing
- return self.optimizer.get_lr()
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/model.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/model.py
deleted file mode 100644
index 7f30dd98bb19b7bc414790787053efb231855129..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/model_parallel/models/pipeline_parallel_transformer/model.py
+++ /dev/null
@@ -1,767 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from fairseq import utils
-from fairseq.model_parallel.models.pipeline_parallel_transformer.layers import (
- Embedding,
- TransformerDecoderEmbedding,
- TransformerDecoderLayer,
- TransformerDecoderOutputLayer,
- TransformerEncoderEmbedding,
- TransformerEncoderLayer,
- TransformerEncoderLayerNorm,
-)
-from fairseq.models import (
- BaseFairseqModel,
- FairseqDecoder,
- FairseqEncoder,
- register_model,
- register_model_architecture,
-)
-from fairseq.models.fairseq_encoder import EncoderOut
-from fairseq.models.transformer import (
- base_architecture,
- transformer_iwslt_de_en,
- transformer_wmt_en_de_big,
-)
-from fairseq.modules import SinusoidalPositionalEmbedding
-
-
-logger = logging.getLogger(__name__)
-
-
-DEFAULT_MAX_SOURCE_POSITIONS = 1024
-DEFAULT_MAX_TARGET_POSITIONS = 1024
-TORCH_PIPE = False
-RPC_INIT = False
-
-def import_pipe():
- global TORCH_PIPE
- global RPC_INIT
- try:
- from torch.distributed.pipeline.sync import Pipe # noqa
- global Pipe
- from torch.distributed.pipeline.sync.utils import partition_model
- global partition_model
- from torch.distributed import rpc
- import tempfile
- TORCH_PIPE = True
- # Initialize single process RPC agent since TORCH_PIPE requires
- # RRef. RRef depends on RPC being initialized and as a result we initialize
- # RPC with a single node.
- tmpfile = tempfile.NamedTemporaryFile()
- if not RPC_INIT:
- rpc.init_rpc(
- name="worker",
- rank=0,
- world_size=1,
- rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
- init_method="file://{}".format(tmpfile.name),
- )
- )
- RPC_INIT = True
- logger.info('Using torch pipe')
- except ImportError:
- try:
- from fairscale.nn import Pipe # noqa
- logger.info('Using fairscale pipe')
- except ImportError:
- raise ImportError("Please install fairscale with: pip install fairscale")
-
-
-@register_model("pipeline_parallel_transformer")
-class PipelineParallelTransformerModel(BaseFairseqModel):
- def __init__(self, encoder, decoder, balance, devices, chunks, checkpoint):
- import_pipe()
- super().__init__()
- assert isinstance(encoder, FairseqEncoder)
- assert isinstance(decoder, FairseqDecoder)
- encoder_module_list = (
- [encoder.embedding_layer]
- + list(encoder.encoder_layers)
- + [encoder.final_layer_norm]
- )
- self.num_encoder_modules = len(encoder_module_list)
- decoder_module_list = (
- [decoder.embedding_layer]
- + list(decoder.decoder_layers)
- + [decoder.decoder_output_layer]
- )
- self.num_decoder_modules = len(decoder_module_list)
- module_list = encoder_module_list + decoder_module_list
- self.devices = devices
- if TORCH_PIPE:
- self.model = Pipe(
- partition_model(nn.Sequential(*module_list), balance, devices),
- chunks=chunks,
- checkpoint=checkpoint,
- )
- else:
- self.model = Pipe(
- nn.Sequential(*module_list),
- balance=balance,
- devices=devices,
- chunks=chunks,
- checkpoint=checkpoint,
- )
- self.encoder_max_positions = self.max_positions_helper(
- encoder.embedding_layer, "max_source_positions"
- )
- self.decoder_max_positions = self.max_positions_helper(
- decoder.embedding_layer, "max_target_positions"
- )
- self.adaptive_softmax = getattr(decoder, "adaptive_softmax", None)
- # Note: To be populated during inference
- self.encoder = None
- self.decoder = None
-
- def forward(self, src_tokens, src_lengths, prev_output_tokens):
- if self.training:
- input_lst = [src_tokens, src_lengths, prev_output_tokens]
- input = tuple(i.to(self.devices[0], non_blocking=True) for i in input_lst)
- if TORCH_PIPE:
- return self.model(input).local_value()
- else:
- return self.model(input)
- else:
- assert self.encoder is not None and self.decoder is not None, (
- "encoder and decoder need to be initialized by "
- + "calling the `prepare_for_inference_()` method"
- )
- encoder_output_tuple = self.encoder(input)
- return self.decoder(encoder_output_tuple)
-
- def prepare_for_inference_(self, cfg):
- if self.encoder is not None and self.decoder is not None:
- logger.info("Encoder and Decoder already initialized")
- return
- encoder_module_list = []
- decoder_module_list = []
- module_count = 0
- for partition in self.model.partitions:
- for module in partition:
- if module_count < self.num_encoder_modules:
- encoder_module_list.append(module)
- else:
- decoder_module_list.append(module)
- module_count += 1
- self.model = None
- self.encoder = TransformerEncoder(cfg.distributed_training, None, None, encoder_module_list)
- self.decoder = TransformerDecoder(
- cfg.distributed_training, None, None, decoder_module_list=decoder_module_list
- )
-
- @staticmethod
- def add_args(parser):
- """Add model-specific arguments to the parser."""
- # fmt: off
- parser.add_argument('--activation-fn',
- choices=utils.get_available_activation_fns(),
- help='activation function to use')
- parser.add_argument('--dropout', type=float, metavar='D',
- help='dropout probability')
- parser.add_argument('--attention-dropout', type=float, metavar='D',
- help='dropout probability for attention weights')
- parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
- help='dropout probability after activation in FFN.')
- parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
- help='path to pre-trained encoder embedding')
- parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
- help='encoder embedding dimension')
- parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
- help='encoder embedding dimension for FFN')
- parser.add_argument('--encoder-layers', type=int, metavar='N',
- help='num encoder layers')
- parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
- help='num encoder attention heads')
- parser.add_argument('--encoder-normalize-before', action='store_true',
- help='apply layernorm before each encoder block')
- parser.add_argument('--encoder-learned-pos', action='store_true',
- help='use learned positional embeddings in the encoder')
- parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
- help='path to pre-trained decoder embedding')
- parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
- help='decoder embedding dimension')
- parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
- help='decoder embedding dimension for FFN')
- parser.add_argument('--decoder-layers', type=int, metavar='N',
- help='num decoder layers')
- parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
- help='num decoder attention heads')
- parser.add_argument('--decoder-learned-pos', action='store_true',
- help='use learned positional embeddings in the decoder')
- parser.add_argument('--decoder-normalize-before', action='store_true',
- help='apply layernorm before each decoder block')
- parser.add_argument('--share-decoder-input-output-embed', action='store_true',
- help='share decoder input and output embeddings')
- parser.add_argument('--share-all-embeddings', action='store_true',
- help='share encoder, decoder and output embeddings'
- ' (requires shared dictionary and embed dim)')
- parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
- help='if set, disables positional embeddings (outside self attention)')
- parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
- help='comma separated list of adaptive softmax cutoff points. '
- 'Must be used with adaptive_loss criterion'),
- parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
- help='sets adaptive softmax dropout for the tail projections')
- parser.add_argument('--num-embedding-chunks', type=int, metavar='N', default=1,
- help='Number of embedding layer chunks (enables more even distribution'
- 'of optimizer states across data parallel nodes'
- 'when using optimizer state sharding and'
- 'a big embedding vocabulary)')
- # fmt: on
-
- @classmethod
- def build_model_base(cls, args, task):
- """Build a new model instance."""
-
- # make sure all arguments are present in older models
- base_architecture(args)
-
- if not hasattr(args, "max_source_positions"):
- args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
- if not hasattr(args, "max_target_positions"):
- args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
-
- src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
-
- def build_embedding(dictionary, embed_dim, path=None, num_embed_chunks=1):
- assert embed_dim % num_embed_chunks == 0, (
- f"Number of embedding chunks = {num_embed_chunks} should be "
- + f"divisible by the embedding dimension = {embed_dim}"
- )
- assert path is None or num_embed_chunks == 1, (
- "Loading embedding from a path with number of embedding chunks > 1"
- + " is not yet supported"
- )
- num_embeddings = len(dictionary)
- padding_idx = dictionary.pad()
- # if provided, load from preloaded dictionaries
- if path:
- emb = Embedding(num_embeddings, embed_dim, padding_idx)
- embed_dict = utils.parse_embedding(path)
- utils.load_embedding(embed_dict, dictionary, emb)
- else:
- embed_chunk_dim = embed_dim // num_embed_chunks
- emb = nn.ModuleList()
- for i in range(num_embed_chunks):
- emb.append(Embedding(num_embeddings, embed_chunk_dim, padding_idx))
- return emb
-
- num_embed_chunks = args.num_embedding_chunks
- if args.share_all_embeddings:
- if src_dict != tgt_dict:
- raise ValueError("--share-all-embeddings requires a joined dictionary")
- if args.encoder_embed_dim != args.decoder_embed_dim:
- raise ValueError(
- "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
- )
- if args.decoder_embed_path and (
- args.decoder_embed_path != args.encoder_embed_path
- ):
- raise ValueError(
- "--share-all-embeddings not compatible with --decoder-embed-path"
- )
- encoder_embed_tokens = build_embedding(
- src_dict,
- args.encoder_embed_dim,
- args.encoder_embed_path,
- num_embed_chunks,
- )
- decoder_embed_tokens = encoder_embed_tokens
- args.share_decoder_input_output_embed = True
- else:
- assert args.share_decoder_input_output_embed or num_embed_chunks == 1, (
- "Not sharing decoder I/O embeddings is not yet supported with number of "
- + "embedding chunks > 1"
- )
- encoder_embed_tokens = build_embedding(
- src_dict,
- args.encoder_embed_dim,
- args.encoder_embed_path,
- num_embed_chunks,
- )
- decoder_embed_tokens = build_embedding(
- tgt_dict,
- args.decoder_embed_dim,
- args.decoder_embed_path,
- num_embed_chunks,
- )
-
- encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
- decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
- return (encoder, decoder)
-
- @classmethod
- def build_encoder(cls, args, src_dict, embed_tokens):
- return TransformerEncoder(args, src_dict, embed_tokens)
-
- @classmethod
- def build_decoder(cls, args, tgt_dict, embed_tokens):
- return TransformerDecoder(args, tgt_dict, embed_tokens)
-
- @classmethod
- def build_model(cls, args, task):
- encoder, decoder = cls.build_model_base(args, task)
- return PipelineParallelTransformerModel(
- encoder=encoder,
- decoder=decoder,
- balance=utils.eval_str_list(args.pipeline_balance, type=int),
- devices=utils.eval_str_list(args.pipeline_devices, type=int),
- chunks=args.pipeline_chunks,
- checkpoint=args.pipeline_checkpoint,
- )
-
- def output_layer(self, features, **kwargs):
- """Project features to the default output size (typically vocabulary size)."""
- return self.decoder.output_layer(features, **kwargs)
-
- def max_positions(self):
- """Maximum length supported by the model."""
- return (self.encoder_max_positions, self.decoder_max_positions)
-
- def max_positions_helper(
- self, embedding_layer, max_positions_field="max_source_positions"
- ):
- """Maximum input length supported by the encoder or decoder."""
- if embedding_layer.embed_positions is None:
- return getattr(embedding_layer, max_positions_field)
- return min(
- getattr(embedding_layer, max_positions_field),
- embedding_layer.embed_positions.max_positions,
- )
-
- def get_normalized_probs(self, net_output, log_probs, sample=None):
- """Get normalized probabilities (or log probs) from a net's output."""
-
- if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None:
- if sample is not None:
- assert "target" in sample
- target = sample["target"]
- else:
- target = None
- out = self.adaptive_softmax.get_log_prob(net_output, target=target)
- return out.exp_() if not log_probs else out
-
- # A Pipe() module returns a tuple of tensors as the output.
- # In this case, the tuple has one element - the output tensor of logits
- logits = net_output if isinstance(net_output, torch.Tensor) else net_output[0]
- if log_probs:
- return utils.log_softmax(logits, dim=-1, onnx_trace=False)
- else:
- return utils.softmax(logits, dim=-1, onnx_trace=False)
-
- def max_decoder_positions(self):
- """Maximum length supported by the decoder."""
- return self.decoder_max_positions
-
- def load_state_dict(self, state_dict, strict=True, model_cfg=None):
- """Copies parameters and buffers from *state_dict* into this module and
- its descendants.
-
- Overrides the method in :class:`nn.Module`. Compared with that method
- this additionally "upgrades" *state_dicts* from old checkpoints.
- """
- self.upgrade_state_dict(state_dict)
- is_regular_transformer = not any("model.partitions" in k for k in state_dict)
- if is_regular_transformer:
- state_dict = self.convert_to_pipeline_parallel_state_dict(state_dict)
- return super().load_state_dict(state_dict, strict)
-
- def convert_to_pipeline_parallel_state_dict(self, state_dict):
- new_state_dict = self.state_dict()
- encoder_layer_idx = 0
- decoder_layer_idx = 0
- encoder_key_suffixes = [
- "self_attn.k_proj.weight",
- "self_attn.k_proj.bias",
- "self_attn.v_proj.weight",
- "self_attn.v_proj.bias",
- "self_attn.q_proj.weight",
- "self_attn.q_proj.bias",
- "self_attn.out_proj.weight",
- "self_attn.out_proj.bias",
- "self_attn_layer_norm.weight",
- "self_attn_layer_norm.bias",
- "fc1.weight",
- "fc1.bias",
- "fc2.weight",
- "fc2.bias",
- "final_layer_norm.weight",
- "final_layer_norm.bias",
- ]
- decoder_key_suffixes = [
- "self_attn.k_proj.weight",
- "self_attn.k_proj.bias",
- "self_attn.v_proj.weight",
- "self_attn.v_proj.bias",
- "self_attn.q_proj.weight",
- "self_attn.q_proj.bias",
- "self_attn.out_proj.weight",
- "self_attn.out_proj.bias",
- "self_attn_layer_norm.weight",
- "self_attn_layer_norm.bias",
- "encoder_attn.k_proj.weight",
- "encoder_attn.k_proj.bias",
- "encoder_attn.v_proj.weight",
- "encoder_attn.v_proj.bias",
- "encoder_attn.q_proj.weight",
- "encoder_attn.q_proj.bias",
- "encoder_attn.out_proj.weight",
- "encoder_attn.out_proj.bias",
- "encoder_attn_layer_norm.weight",
- "encoder_attn_layer_norm.bias",
- "fc1.weight",
- "fc1.bias",
- "fc2.weight",
- "fc2.bias",
- "final_layer_norm.weight",
- "final_layer_norm.bias",
- ]
- for pid, partition in enumerate(self.model.partitions):
- logger.info(f"Begin Partition {pid}")
- for mid, module in enumerate(partition):
- # fmt: off
- if isinstance(module, TransformerEncoderEmbedding):
- new_state_dict[f'model.partitions.{pid}.{mid}.embed_tokens.weight'] = state_dict['encoder.embed_tokens.weight']
- new_state_dict[f'model.partitions.{pid}.{mid}.embed_positions._float_tensor'] = state_dict['encoder.embed_positions._float_tensor']
- if isinstance(module, TransformerEncoderLayer):
- for suffix in encoder_key_suffixes:
- new_state_dict[f'model.partitions.{pid}.{mid}.{suffix}'] = state_dict[f'encoder.layers.{encoder_layer_idx}.{suffix}']
- encoder_layer_idx += 1
- if isinstance(module, TransformerDecoderLayer):
- for suffix in decoder_key_suffixes:
- new_state_dict[f'model.partitions.{pid}.{mid}.{suffix}'] = state_dict[f'decoder.layers.{decoder_layer_idx}.{suffix}']
- decoder_layer_idx += 1
- if isinstance(module, TransformerEncoderLayerNorm):
- if 'encoder.layer_norm.weight' in state_dict:
- new_state_dict[f'model.partitions.{pid}.{mid}.layer_norm.weight'] = state_dict['encoder.layer_norm.weight']
- new_state_dict[f'model.partitions.{pid}.{mid}.layer_norm.bias'] = state_dict['encoder.layer_norm.bias']
- if isinstance(module, TransformerDecoderEmbedding):
- new_state_dict[f'model.partitions.{pid}.{mid}.embed_tokens.weight'] = state_dict['decoder.embed_tokens.weight']
- new_state_dict[f'model.partitions.{pid}.{mid}.embed_positions._float_tensor'] = state_dict['decoder.embed_positions._float_tensor']
- if isinstance(module, TransformerDecoderOutputLayer):
- new_state_dict[f'model.partitions.{pid}.{mid}.output_projection.weight'] = state_dict['decoder.output_projection.weight']
- # fmt: on
- return new_state_dict
-
-
-class TransformerEncoder(FairseqEncoder):
- """
- Transformer encoder consisting of *args.encoder_layers* layers. Each layer
- is a :class:`TransformerEncoderLayer`.
-
- Args:
- args (argparse.Namespace): parsed command-line arguments
- dictionary (~fairseq.data.Dictionary): encoding dictionary
- embed_tokens (torch.nn.Embedding): input embedding
- """
-
- def __init__(self, args, dictionary, embed_tokens, encoder_module_list=None):
- super().__init__(dictionary)
- self.register_buffer("version", torch.Tensor([3]))
- import_pipe()
- self.use_pipeline = encoder_module_list is not None
- if not self.use_pipeline:
- self.embedding_layer = TransformerEncoderEmbedding(args, embed_tokens)
- self.encoder_layers = nn.Sequential(*[TransformerEncoderLayer(args) for i in range(args.encoder_layers)])
- if isinstance(embed_tokens, nn.ModuleList):
- emb_dim = sum(e.embedding_dim for e in embed_tokens)
- else:
- emb_dim = embed_tokens.embedding_dim
- self.final_layer_norm = TransformerEncoderLayerNorm(args, emb_dim)
- else:
- encoder_balance = utils.eval_str_list(
- args.pipeline_encoder_balance, type=int
- )
- encoder_devices = utils.eval_str_list(
- args.pipeline_encoder_devices, type=int
- )
- assert sum(encoder_balance) == len(encoder_module_list), (
- f"Sum of encoder_balance={encoder_balance} is not equal "
- + f"to num_encoder_modules={len(encoder_module_list)}"
- )
- if TORCH_PIPE:
- self.model = Pipe(
- module=partition_model(nn.Sequential(*encoder_module_list), encoder_balance, encoder_devices),
- chunks=args.pipeline_chunks,
- checkpoint=args.pipeline_checkpoint,
- )
- else:
- self.model = Pipe(
- module=nn.Sequential(*encoder_module_list),
- balance=encoder_balance,
- devices=encoder_devices,
- chunks=args.pipeline_chunks,
- checkpoint=args.pipeline_checkpoint,
- )
-
- def forward(self, src_tokens, src_lengths):
- """
- Args:
- input_tuple(
- src_tokens (LongTensor): tokens in the source language of shape
- `(batch, src_len)`
- src_lengths (torch.LongTensor): lengths of each source sentence of
- shape `(batch)`
- )
-
- Returns:
- output_tuple(
- - **encoder_out** (Tensor): the last encoder layer's output of
- shape `(src_len, batch, embed_dim)`
- - **encoder_padding_mask** (ByteTensor): the positions of
- padding elements of shape `(batch, src_len)`
- - prev_output_tokens
- - **encoder_states** (List[Tensor]): all intermediate
- hidden states of shape `(src_len, batch, embed_dim)`.
- Only populated if *return_all_hiddens* is True.
- )
- """
- dummy_prev_output_tokens = torch.zeros(
- 1, dtype=src_tokens.dtype, device=src_tokens.device
- )
- input_tuple = (src_tokens, src_lengths, dummy_prev_output_tokens)
- if self.use_pipeline:
- input_tuple = tuple(i.to(self.model.devices[0]) for i in input_tuple)
- if TORCH_PIPE:
- encoder_out = self.model(input_tuple).local_value()
- else:
- encoder_out = self.model(input_tuple)
- else:
- encoder_embed_output_tuple = self.embedding_layer(input_tuple)
- encoder_layers_output = self.encoder_layers(encoder_embed_output_tuple)
- encoder_out = self.final_layer_norm(encoder_layers_output)
- # first element is the encoder output
- # second element is the encoder padding mask
- # the remaining elements of EncoderOut are not computed by
- # the PipelineParallelTransformer
- return EncoderOut(encoder_out[0], encoder_out[1], None, None, None, None)
-
- def reorder_encoder_out(self, encoder_out, new_order):
- """
- Reorder encoder output according to *new_order*.
-
- Args:
- encoder_out: output from the ``forward()`` method
- new_order (LongTensor): desired order
-
- Returns:
- *encoder_out* rearranged according to *new_order*
- """
- if encoder_out.encoder_out is not None:
- encoder_out = encoder_out._replace(
- encoder_out=encoder_out.encoder_out.index_select(1, new_order)
- )
- if encoder_out.encoder_padding_mask is not None:
- encoder_out = encoder_out._replace(
- encoder_padding_mask=encoder_out.encoder_padding_mask.index_select(
- 0, new_order
- )
- )
- if encoder_out.encoder_embedding is not None:
- encoder_out = encoder_out._replace(
- encoder_embedding=encoder_out.encoder_embedding.index_select(
- 0, new_order
- )
- )
- if encoder_out.encoder_states is not None:
- for idx, state in enumerate(encoder_out.encoder_states):
- encoder_out.encoder_states[idx] = state.index_select(1, new_order)
- return encoder_out
-
- def max_positions(self):
- """Maximum input length supported by the encoder."""
- if self.embedding_layer.embed_positions is None:
- return self.embedding_layer.max_source_positions
- return min(
- self.embedding_layer.max_source_positions,
- self.embedding_layer.embed_positions.max_positions,
- )
-
-
-class TransformerDecoder(FairseqDecoder):
- """
- Transformer decoder consisting of *args.decoder_layers* layers. Each layer
- is a :class:`TransformerDecoderLayer`.
-
- Args:
- args (argparse.Namespace): parsed command-line arguments
- dictionary (~fairseq.data.Dictionary): decoding dictionary
- embed_tokens (torch.nn.Embedding): output embedding
- no_encoder_attn (bool, optional): whether to attend to encoder outputs
- (default: False).
- """
-
- def __init__(
- self,
- args,
- dictionary,
- embed_tokens,
- no_encoder_attn=False,
- decoder_module_list=None,
- ):
- super().__init__(dictionary)
- self.register_buffer("version", torch.Tensor([3]))
- import_pipe()
- self.use_pipeline = decoder_module_list is not None
- if not self.use_pipeline:
- self.embedding_layer = TransformerDecoderEmbedding(args, embed_tokens)
- self.decoder_layers = nn.Sequential(*[
- TransformerDecoderLayer(args, no_encoder_attn)
- for _ in range(args.decoder_layers)
- ])
- self.decoder_output_layer = TransformerDecoderOutputLayer(
- args, embed_tokens, dictionary
- )
- else:
- decoder_balance = utils.eval_str_list(
- args.pipeline_decoder_balance, type=int
- )
- decoder_devices = utils.eval_str_list(
- args.pipeline_decoder_devices, type=int
- )
- assert sum(decoder_balance) == len(decoder_module_list), (
- f"Sum of decoder_balance={decoder_balance} is not equal "
- + f"to num_decoder_modules={len(decoder_module_list)}"
- )
- if TORCH_PIPE:
- self.model = Pipe(
- module=partition_model(nn.Sequential(*decoder_module_list), decoder_balance, decoder_devices),
- chunks=args.pipeline_chunks,
- checkpoint=args.pipeline_checkpoint,
- )
- else:
- self.model = Pipe(
- module=nn.Sequential(*decoder_module_list),
- balance=decoder_balance,
- devices=decoder_devices,
- chunks=args.pipeline_chunks,
- checkpoint=args.pipeline_checkpoint,
- )
-
- def forward(
- self,
- prev_output_tokens,
- encoder_out=None,
- ):
- """
- Args:
- prev_output_tokens (LongTensor): previous decoder outputs of shape
- `(batch, tgt_len)`, for teacher forcing
- encoder_out (optional): output from the encoder, used for
- encoder-side attention
- incremental_state (dict): dictionary used for storing state during
- :ref:`Incremental decoding`
- features_only (bool, optional): only return features without
- applying output layer (default: False).
-
- Returns:
- tuple:
- - the decoder's output of shape `(batch, tgt_len, vocab)`
- - a dictionary with any model-specific outputs
- """
- input_tuple = (
- encoder_out.encoder_out,
- encoder_out.encoder_padding_mask,
- prev_output_tokens,
- )
- if self.use_pipeline:
- input_tuple = tuple(i.to(self.model.devices[0]) for i in input_tuple)
- if TORCH_PIPE:
- return (self.model(input_tuple).local_value(),)
- else:
- return (self.model(input_tuple),)
- else:
- embed_layer_output = self.embedding_layer(input_tuple)
- state = self.decoder_layers(embed_layer_output)
- return (self.decoder_output_layer(state),)
-
- def output_layer(self, features, **kwargs):
- """Project features to the vocabulary size."""
- if self.adaptive_softmax is None:
- # project back to size of vocabulary
- if self.share_input_output_embed:
- return F.linear(features, self.embed_tokens.weight)
- else:
- return F.linear(features, self.embed_out)
- else:
- return features
-
- def max_positions(self):
- """Maximum output length supported by the decoder."""
- if self.embedding_layer.embed_positions is None:
- return self.embedding_layer.max_target_positions
- return min(
- self.embedding_layer.max_target_positions,
- self.embedding_layer.embed_positions.max_positions,
- )
-
- def buffered_future_mask(self, tensor):
- dim = tensor.size(0)
- if (
- not hasattr(self, "_future_mask")
- or self._future_mask is None
- or self._future_mask.device != tensor.device
- or self._future_mask.size(0) < dim
- ):
- self._future_mask = torch.triu(
- utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
- )
- return self._future_mask[:dim, :dim]
-
- def upgrade_state_dict_named(self, state_dict, name):
- """Upgrade a (possibly old) state dict for new versions of fairseq."""
- if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
- weights_key = "{}.embed_positions.weights".format(name)
- if weights_key in state_dict:
- del state_dict[weights_key]
- state_dict[
- "{}.embed_positions._float_tensor".format(name)
- ] = torch.FloatTensor(1)
-
- for i in range(len(self.layers)):
- # update layer norms
- layer_norm_map = {
- "0": "self_attn_layer_norm",
- "1": "encoder_attn_layer_norm",
- "2": "final_layer_norm",
- }
- for old, new in layer_norm_map.items():
- for m in ("weight", "bias"):
- k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
- if k in state_dict:
- state_dict[
- "{}.layers.{}.{}.{}".format(name, i, new, m)
- ] = state_dict[k]
- del state_dict[k]
-
- version_key = "{}.version".format(name)
- if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
- # earlier checkpoints did not normalize after the stack of layers
- self.layer_norm = None
- self.normalize = False
- state_dict[version_key] = torch.Tensor([1])
-
- return state_dict
-
-
-@register_model_architecture(
- "pipeline_parallel_transformer", "transformer_iwslt_de_en_pipeline_parallel"
-)
-def transformer_iwslt_de_en_dist(args):
- transformer_iwslt_de_en(args)
-
-
-@register_model_architecture(
- "pipeline_parallel_transformer", "transformer_wmt_en_de_big_pipeline_parallel"
-)
-def transformer_wmt_en_de_big_dist(args):
- transformer_wmt_en_de_big(args)
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp
deleted file mode 100644
index 744c363e550231b8e0fbb94f998d46039daf5c00..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Copyright (c) Facebook, Inc. and its affiliates.
- *
- * This source code is licensed under the MIT license found in the
- * LICENSE file in the root directory of this source tree.
- */
-
-#include
-#include
-
-std::vector
-dynamicconv_cuda_forward(at::Tensor input, at::Tensor filters, int padding_l);
-
-std::vector dynamicconv_cuda_backward(
- at::Tensor gradOutput,
- int padding_l,
- at::Tensor input,
- at::Tensor filters);
-
-#define CHECK_CUDA(x) \
- AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")
-#define CHECK_CONTIGUOUS(x) \
- AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
-#define CHECK_INPUT(x) \
- CHECK_CUDA(x); \
- CHECK_CONTIGUOUS(x)
-
-std::vector
-dynamicconv_forward(at::Tensor input, at::Tensor filters, int padding_l) {
- CHECK_INPUT(input);
- CHECK_INPUT(filters);
-
- return dynamicconv_cuda_forward(input, filters, padding_l);
-}
-
-std::vector dynamicconv_backward(
- at::Tensor gradOutput,
- int padding_l,
- at::Tensor input,
- at::Tensor filters) {
- CHECK_INPUT(gradOutput);
- CHECK_INPUT(input);
- CHECK_INPUT(filters);
-
- return dynamicconv_cuda_backward(gradOutput, padding_l, input, filters);
-}
-
-PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
- m.def("forward", &dynamicconv_forward, "dynamicconv forward (CUDA)");
- m.def("backward", &dynamicconv_backward, "dynamicconv backward (CUDA)");
-}
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/online_backtranslation.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/online_backtranslation.py
deleted file mode 100644
index 2e27ca237cde1980b2c3ca497e12f458da230c37..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/online_backtranslation.py
+++ /dev/null
@@ -1,682 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import contextlib
-import json
-import logging
-import math
-import os
-from argparse import Namespace
-from collections import OrderedDict, defaultdict
-from pathlib import Path
-from typing import Dict, Sequence, Tuple
-from argparse import ArgumentError
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-import fairseq
-from fairseq import metrics, options, utils
-from fairseq.data import (
- FairseqDataset,
- LanguagePairDataset,
- NoisingDataset,
- PrependTokenDataset,
- RoundRobinZipDatasets,
- TransformEosLangPairDataset,
- data_utils,
- encoders,
-)
-from fairseq.sequence_generator import SequenceGenerator
-from fairseq.tasks import register_task
-from fairseq.tasks.translation import TranslationTask, load_langpair_dataset
-
-logger = logging.getLogger(__name__)
-
-
-class PiecewiseLinearFn:
- """Piecewise linear function. Can be configured with a string."""
-
- def __init__(self, pieces: Sequence[Tuple[int, float]]):
- assert pieces == sorted(
- pieces
- ), f"PiecewiseLinearFn configuration should be sorted, received: {pieces}"
-
- self.pieces = pieces
-
- def __call__(self, x: int) -> float:
- for i, (x_a, y_a) in enumerate(self.pieces[:-1]):
- x_b, y_b = self.pieces[i + 1]
- if x_a <= x <= x_b:
- return y_a + (x - x_a) * (y_b - y_a) / (x_b - x_a)
-
- return self.pieces[-1][1]
-
- @staticmethod
- def from_string(configuration: str) -> "PiecewiseLinearFn":
- """
- Parse the configuration of lambda coefficient (for scheduling).
- x = "3" # lambda will be a constant equal to x
- x = "0:1,1000:0" # lambda will start from 1 and linearly decrease
- # to 0 during the first 1000 iterations
- x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000
- # iterations, then will linearly increase to 1 until iteration 2000
- """
- if isinstance(configuration, float):
- return PiecewiseLinearFn([(0, configuration)])
-
- try:
- parts = configuration.split(",")
- if len(parts) == 1:
- v = float(configuration)
- return PiecewiseLinearFn([(0, v)])
-
- split = [s.split(":") for s in parts]
- pieces = [(int(t), float(v)) for t, v in split]
- return PiecewiseLinearFn(pieces)
- except Exception:
- raise ValueError(
- f"Invalid PiecewiseLinearFn configuration: {configuration!r}"
- )
-
- @staticmethod
- def one() -> "PiecewiseLinearFn":
- return PiecewiseLinearFn([(0, 1.0)])
-
-
-@register_task("online_backtranslation")
-class OnlineBackTranslationTask(TranslationTask):
- @staticmethod
- def add_args(parser):
- """Add task-specific arguments to the parser."""
- # fmt: off
- # Generic translation args
- parser.add_argument('data', help='colon separated path to data directories list, \
- will be iterated upon during epochs in round-robin manner; \
- however, valid and test data are always in the first directory to \
- avoid the need for repeating them in all directories')
- parser.add_argument('--mono-langs', metavar='MONO_LANGS',
- help='monolingual languages for training')
- parser.add_argument('--valid-lang-pairs', default=None, metavar='VALID_LANG_PAIRS',
- help='language pairs for validation')
- parser.add_argument('--load-alignments', action='store_true',
- help='load the binarized alignments')
- parser.add_argument('--left-pad-source', default='False', type=str, metavar='BOOL',
- help='pad the source on the left')
- parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
- help='pad the target on the left')
- parser.add_argument('--upsample-primary', default=1, type=int,
- help='amount to upsample primary dataset')
- try:
- parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
- help='max number of tokens in the source sequence')
- parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
- help='max number of tokens in the target sequence')
- except ArgumentError:
- # this might have already been defined. Once we transition this to hydra it should be fine to add it here.
- pass
- parser.add_argument('--truncate-source', action='store_true', default=False,
- help='truncate source to max-source-positions')
- parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
- help='if >0, then bucket source and target lengths into N '
- 'buckets and pad accordingly; this is useful on TPUs '
- 'to minimize the number of compilations')
-
- # Denoising args
- parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N',
- help='maximum word shuffle distance for denoising autoencoding data generation')
- parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N',
- help='word dropout probability for denoising autoencoding data generation')
- parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N',
- help='word blanking probability for denoising autoencoding data generation')
-
- # Backtranslation args
- parser.add_argument('--lambda-bt', default="1.0", type=str, metavar='N',
- help='back-translation weight')
- parser.add_argument('--lambda-dae', default="1.0", type=str, metavar='N',
- help='denoising auto-encoder weight')
-
- # Evaluation args
- parser.add_argument('--generate-one-by-one', action='store_true',
- help='generate one sentence at a time for backtranslation')
-
- parser.add_argument('--eval-bleu', action='store_true',
- help='evaluation with BLEU scores')
- parser.add_argument('--eval-bleu-detok', type=str, default="space",
- help='detokenize before computing BLEU (e.g., "moses"); '
- 'required if using --eval-bleu; use "space" to '
- 'disable detokenization; see fairseq.data.encoders '
- 'for other options')
- parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
- help='args for building the tokenizer, if needed')
- parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
- help='compute tokenized BLEU instead of sacrebleu')
- parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
- help='remove BPE before computing BLEU')
- parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
- help='generation args for BLUE scoring, '
- 'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
- parser.add_argument('--eval-bleu-print-samples', action='store_true',
- help='print sample generations during validation')
- # fmt: on
-
- def __init__(self, args, common_dict, mono_langs, valid_lang_pairs):
- super().__init__(args, common_dict, common_dict)
- self.common_dict = common_dict
- self.mono_langs = mono_langs
- self.valid_lang_pairs = valid_lang_pairs
-
- self.SHOW_SAMPLES_INTERVAL = 1000
- # Start by showing samples
- self._show_samples_ctr = self.SHOW_SAMPLES_INTERVAL
- self.SHOW_SAMPLES_NUMBER = 5
- self.lambda_bt = PiecewiseLinearFn.from_string(args.lambda_bt)
- self.lambda_dae = PiecewiseLinearFn.from_string(args.lambda_dae)
-
- self.args = args
- self.data = utils.split_paths(self.args.data)
- if len(self.data) == 1:
- shards = list(Path(self.data[0]).glob("shard*"))
- if len(shards) > 0:
- # keep this as strings, since it can also be a manifold path
- old_data = self.data
- self.data = [str(shard) for shard in shards]
- logging.warning(f"Expanded data directory {old_data} to {self.data}")
-
- @classmethod
- def setup_task(cls, args, **kwargs):
- """Setup the task (e.g., load dictionaries).
-
- Args:
- args (argparse.Namespace): parsed command-line arguments
- """
- args.left_pad_source = options.eval_bool(args.left_pad_source)
- args.left_pad_target = options.eval_bool(args.left_pad_target)
-
- paths = utils.split_paths(args.data)
- assert len(paths) > 0
- assert args.mono_langs is not None
-
- mono_langs = args.mono_langs.split(",")
- valid_lang_pairs = args.valid_lang_pairs.split(",")
-
- # load dictionary
- dict_path = os.path.join(paths[0], "dict.txt")
- common_dict = cls.load_dictionary(dict_path)
-
- return cls(args, common_dict, mono_langs, valid_lang_pairs)
-
- def load_dataset(self, split, epoch=1, combine=False, **kwargs) -> FairseqDataset:
- """Load a given dataset split.
-
- Args:
- split (str): name of the split (e.g., train, valid, test)
- """
- if split == "train":
- data_path = self.data[(epoch - 1) % len(self.data)]
- dataset = self.load_train_dataset(data_path)
- else:
- # valid/test should always be the same.
- dataset = self.load_translation_dataset(split, self.data[0])
-
- self.datasets[split] = dataset
- return dataset
-
- def load_train_dataset(self, data_path: str) -> FairseqDataset:
- """The training dataset is made of backtranslation dataset and denoising dataset."""
- data = []
- for lang in self.mono_langs:
- train_path = os.path.join(data_path, lang, "train")
- # TODO: could we do the BT using denoise sample ?
- # this would half the data loading work
- data.append((f"{lang}-BT", self.load_bt_dataset(train_path, lang)))
- data.append(
- (f"{lang}-DENOISE", self.load_denoise_dataset(train_path, lang))
- )
-
- return RoundRobinZipDatasets(OrderedDict(data))
-
- def _langpair_dataset(
- self, src: FairseqDataset, tgt: FairseqDataset
- ) -> LanguagePairDataset:
- return LanguagePairDataset(
- src,
- src.sizes,
- self.dictionary,
- tgt=tgt,
- tgt_sizes=tgt.sizes,
- tgt_dict=self.dictionary,
- left_pad_source=self.args.left_pad_source,
- left_pad_target=self.args.left_pad_target,
- # TODO: should we shuffle ? we are already sorting batch by sizes so ?
- # shuffle=True,
- )
-
- def _prepend_lang_bos_to_target(
- self, dataset: LanguagePairDataset, lang: str
- ) -> LanguagePairDataset:
- bos = _lang_token_index(self.dictionary, lang)
- return TransformEosLangPairDataset(
- dataset,
- src_eos=self.dictionary.eos(),
- new_src_eos=self.dictionary.eos(),
- tgt_bos=self.dictionary.eos(),
- new_tgt_bos=bos,
- )
-
- def load_bt_dataset(self, data_path: str, lang: str) -> FairseqDataset:
- """The BT dataset is generated with (tgt, tgt) pairs.
- The actual translation to a (generated_src, tgt) pair
- is done on the fly during training.
- """
- mono_dataset = data_utils.load_indexed_dataset(
- data_path, self.common_dict, self.args.dataset_impl
- )
- assert mono_dataset is not None, f"No dataset found for {lang}"
-
- mono_dataset_src = PrependTokenDataset(
- mono_dataset, _lang_token_index(self.dictionary, lang)
- )
-
- mono_dataset_bt = self._langpair_dataset(mono_dataset_src, mono_dataset)
- logger.info(
- f"mono_lang = {lang} "
- f"lang token index = {_lang_token_index(self.dictionary, lang)} "
- f"lang token = {_lang_token(lang)}"
- )
-
- mono_dataset_bt = self._prepend_lang_bos_to_target(mono_dataset_bt, lang)
- return mono_dataset_bt
-
- def load_denoise_dataset(self, data_path: str, lang: str) -> FairseqDataset:
- """Classic denoising dataset"""
- dataset = data_utils.load_indexed_dataset(
- data_path, self.common_dict, self.args.dataset_impl
- )
- noisy_dataset = NoisingDataset(
- dataset,
- self.dictionary,
- seed=1,
- max_word_shuffle_distance=self.args.max_word_shuffle_distance,
- word_dropout_prob=self.args.word_dropout_prob,
- word_blanking_prob=self.args.word_blanking_prob,
- )
- noisy_dataset = PrependTokenDataset(
- noisy_dataset, _lang_token_index(self.dictionary, lang)
- )
-
- clean_dataset = data_utils.load_indexed_dataset(
- data_path, self.common_dict, self.args.dataset_impl
- )
- denoising_dataset = self._langpair_dataset(noisy_dataset, clean_dataset)
- denoising_dataset = self._prepend_lang_bos_to_target(denoising_dataset, lang)
- return denoising_dataset
-
- def load_translation_dataset(
- self, split: str, data_path: str, combine: bool = False
- ):
- # only judging with one language pair for the moment,
- # since ConcatDataset doesn't work as expected
- assert len(self.valid_lang_pairs) == 1, "For now..."
- valid_lang_pair = self.valid_lang_pairs[0]
- src, tgt = valid_lang_pair.split("-")
-
- # use the same function than TranslationTask
- src_tgt_dt = load_langpair_dataset(
- data_path,
- split,
- src,
- self.common_dict,
- tgt,
- self.common_dict,
- combine=combine,
- dataset_impl=self.args.dataset_impl,
- upsample_primary=self.args.upsample_primary,
- left_pad_source=self.args.left_pad_source,
- left_pad_target=self.args.left_pad_target,
- max_source_positions=self.args.max_source_positions,
- max_target_positions=self.args.max_target_positions,
- load_alignments=self.args.load_alignments,
- truncate_source=self.args.truncate_source,
- num_buckets=self.args.num_batch_buckets,
- shuffle=(split != "test"),
- prepend_bos_src=_lang_token_index(self.dictionary, src),
- )
-
- src_tgt_eos_dt = self._prepend_lang_bos_to_target(src_tgt_dt, tgt)
- src_tgt_eos_dt.args = self.args
- return src_tgt_eos_dt
-
- def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
- raise NotImplementedError
-
- def build_model(self, args):
- # torch.autograd.set_detect_anomaly(True)
- model = super().build_model(args)
-
- add_secial_tokens_to_dict_and_model(self.common_dict, model, self.mono_langs)
-
- self.sequence_generators = {}
- for mono_lang in self.mono_langs:
- self.sequence_generators[mono_lang] = SequenceGenerator(
- [model],
- tgt_dict=self.dictionary,
- beam_size=1,
- max_len_a=1.3,
- max_len_b=5,
- min_len=5,
- # keep 1 to be able to prepend bos
- max_len=model.max_decoder_positions() - 1,
- )
-
- if getattr(args, "eval_bleu", False):
- assert getattr(args, "eval_bleu_detok", None) is not None, (
- "--eval-bleu-detok is required if using --eval-bleu; "
- "try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
- "to disable detokenization, e.g., when using sentencepiece)"
- )
- detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
- self.tokenizer = encoders.build_tokenizer(
- Namespace(
- tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
- )
- )
-
- gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
- self.bleu_sequence_generator = self.build_generator(
- [model], Namespace(**gen_args)
- )
-
- return model
-
- def max_positions(self):
- """Return the max sentence length allowed by the task."""
- return (self.args.max_source_positions, self.args.max_target_positions)
-
- @property
- def dictionary(self):
- """Return the source :class:`~fairseq.data.Dictionary`."""
- return self.common_dict
-
- def display_samples_once_in_a_while(self, smp, mono_lang, other_lang):
- self._show_samples_ctr += 1
- if self._show_samples_ctr < self.SHOW_SAMPLES_INTERVAL:
- return
- self._show_samples_ctr = 0
-
- ln = smp["net_input"]["src_tokens"].shape[0]
-
- logger.info(
- f"(r:{self.args.distributed_rank}) : "
- f"{other_lang} ---> {mono_lang} "
- f"({other_lang} was generated by back-translation.) {ln} samples"
- )
-
- for i in range(min(ln, self.SHOW_SAMPLES_NUMBER)):
- src_tokens = smp["net_input"]["src_tokens"][i]
- tgt_tokens = smp["target"][i]
-
- src_str = self.dictionary.string(src_tokens, "sentencepiece")
- tgt_str = self.dictionary.string(tgt_tokens, "sentencepiece")
- logger.info(
- f"\n{i}\t\t[{other_lang} generated] {src_str}\n"
- f"\t\t[{mono_lang} original ] {tgt_str}\n"
- f"\t\t[ src tokens] {src_tokens}\n"
- )
-
- def backtranslate_sample(self, smp, orig_lang, other_lang) -> None:
- """
- * WARNING: smp is modified in place.
- * At the start of this function, `smp` has the same input and target:
- |--------------------------------------------------------|
- | smp['net_input']['src_tokens'] | smp['target'] |
- | (from data) __en__ hello world | __en__ hello world |
- |--------------------------------------------------------|
-
- * We call generator.generate(smp, bos_token = token("ro")),
- and copy the result as input
- * At the end, `smp` has the translation to other language.
- |--------------------------------------------------------|
- | smp['net_input']['src_tokens'] | smp['target'] |
- | (generated) __ro__ salut lume | __en__ hello world |
- |--------------------------------------------------------|
-
- """
- bos_token = _lang_token_index(self.dictionary, other_lang)
- generated = self.sequence_generators[orig_lang].generate(
- models=[], sample=smp, bos_token=bos_token
- )
-
- max_lngth = max([gn[0]["tokens"].size(0) for gn in generated])
- net_input = smp["net_input"]
- n_src_tokens = torch.empty(
- size=(len(generated), max_lngth + 1), dtype=net_input["src_tokens"].dtype
- )
- n_src_lengths = torch.empty(
- len(generated), dtype=net_input["src_lengths"].dtype
- )
-
- for i, gn in enumerate(generated):
- tokens = gn[0]["tokens"]
- tokens_size = tokens.size(0)
- padding_needed = max_lngth - tokens_size
- tokens = torch.cat([tokens.new([bos_token]), tokens])
- tokens = F.pad(tokens, (0, padding_needed), value=self.dictionary.pad())
- n_src_tokens[i] = tokens
- n_src_lengths[i] = tokens_size + 1
-
- device = net_input["src_tokens"].device
- # This seems to be important
- del net_input["src_tokens"]
- del net_input["src_lengths"]
- net_input["src_tokens"] = n_src_tokens.to(device)
- net_input["src_lengths"] = n_src_lengths.to(device)
-
- def generate(self, smp, model):
- model.eval()
- orig_lang = (
- self.dictionary[smp["net_input"]["src_tokens"][0][0]]
- .replace(" ", "")
- .replace("_", "")
- )
- bos_token = smp["net_input"]["prev_output_tokens"][0][0]
- with torch.no_grad():
- generated = self.sequence_generators[orig_lang].generate(
- models=[model], sample=smp, bos_token=bos_token
- )
- return generated
-
- def get_other_lang(self, lang):
- # TODO: allow more complex mapping
- if lang != self.mono_langs[0]:
- return self.mono_langs[0]
- if len(self.mono_langs) == 2:
- return self.mono_langs[1]
- return self.mono_langs[np.random.randint(1, len(self.mono_langs))]
-
- def train_step(
- self, sample, model, criterion, optimizer, update_num, ignore_grad=False
- ):
-
- model.train()
- model.set_num_updates(update_num)
-
- agg_loss, agg_sample_size = 0.0, 0.0
- agg_logging_output: Dict[str, float] = defaultdict(float)
-
- dataset_keys = self.datasets["train"].datasets.keys()
-
- weights = {
- "BT": self.lambda_bt(update_num),
- "DENOISE": self.lambda_dae(update_num),
- }
- log_keys = {"BT": "bt_", "DENOISE": "dae_"}
-
- for dataset_key in dataset_keys:
- smp = sample[dataset_key]
- mono_lang, task_subtype = dataset_key.split("-")
- if weights[task_subtype] == 0:
- continue
-
- if task_subtype == "BT":
- with torch.autograd.profiler.record_function("backtranslation"):
- model.eval()
- # TODO: Could we translate to several language at once ?
- # this would allow to share encoder_out and maximize GPU usage.
- other_lang = self.get_other_lang(mono_lang)
- self.backtranslate_sample(smp, mono_lang, other_lang)
- self.display_samples_once_in_a_while(smp, mono_lang, other_lang)
- model.train()
-
- # Like in FairseqTask.train_step
- with torch.autograd.profiler.record_function("forward"):
- loss, sample_size, logging_output = criterion(model, smp)
- loss *= weights[task_subtype]
- if ignore_grad:
- loss *= 0
- with torch.autograd.profiler.record_function("backward"):
- optimizer.backward(loss)
-
- agg_loss += loss.item()
- agg_sample_size += sample_size
- for k in logging_output:
- agg_logging_output[log_keys[task_subtype] + k] += logging_output[k]
- agg_logging_output[k] += logging_output[k]
-
- return agg_loss, agg_sample_size, agg_logging_output
-
- def get_bos_token_from_sample(self, sample):
- net_input = sample["net_input"]
- source_lang_token_id = torch.unique(net_input["src_tokens"][:, 0]).item()
- source_lang_token = self.dictionary[source_lang_token_id].replace("_", "")
- target_lang_token_id = _lang_token_index(
- self.dictionary, self.get_other_lang(source_lang_token)
- )
-
- return target_lang_token_id
-
- def reduce_metrics(self, logging_outputs, criterion):
- super().reduce_metrics(logging_outputs, criterion)
- bt_sample_size = sum(x.get("bt_sample_size", 0) for x in logging_outputs)
- if bt_sample_size:
- bt_loss_sum = sum(x.get("bt_loss", 0) for x in logging_outputs)
- bt_loss_sum *= 1 / bt_sample_size / math.log(2)
- metrics.log_scalar("bt_loss", bt_loss_sum, bt_sample_size, round=3)
-
- bt_nll_loss_sum = sum(x.get("bt_nll_loss", 0) for x in logging_outputs)
- bt_ntokens = sum(x.get("bt_ntokens", 0) for x in logging_outputs)
- bt_nll_loss_sum *= 1 / bt_ntokens / math.log(2)
- metrics.log_scalar("bt_nll_loss", bt_nll_loss_sum, bt_ntokens, round=3)
- metrics.log_derived(
- "bt_ppl", lambda meters: utils.get_perplexity(meters["bt_nll_loss"].avg)
- )
-
- dae_sample_size = sum(x.get("dae_sample_size", 0) for x in logging_outputs)
- if dae_sample_size:
- dae_loss_sum = sum(x.get("dae_loss", 0) for x in logging_outputs)
- dae_loss_sum *= 1 / dae_sample_size / math.log(2)
- metrics.log_scalar("dae_loss", dae_loss_sum, dae_sample_size, round=3)
-
- dae_nll_loss_sum = sum(x.get("dae_nll_loss", 0) for x in logging_outputs)
- dae_ntokens = sum(x.get("dae_ntokens", 0) for x in logging_outputs)
- dae_nll_loss_sum *= 1 / dae_ntokens / math.log(2)
- metrics.log_scalar("dae_nll_loss", dae_nll_loss_sum, dae_ntokens, round=3)
- metrics.log_derived(
- "dae_ppl",
- lambda meters: utils.get_perplexity(meters["dae_nll_loss"].avg),
- )
-
-
-@torch.no_grad()
-def extend_embedding(
- emb: nn.Module, new_vocab_size: int, copy_from_token_id: int
-) -> None:
- old_emb_data = emb.weight.data
- (old_vocab_size, dim) = old_emb_data.shape
- assert new_vocab_size >= old_vocab_size
-
- if new_vocab_size > old_vocab_size:
- emb.weight.data = torch.zeros((new_vocab_size, dim))
- emb.weight.data[:old_vocab_size, :] = old_emb_data
- # initialize new embeddings
- emb.weight.data[old_vocab_size:, :] = old_emb_data[copy_from_token_id]
- if hasattr(emb, "num_embeddings"):
- emb.num_embeddings = new_vocab_size
- if hasattr(emb, "out_features"):
- emb.out_features = new_vocab_size
-
- if getattr(emb, "bias", None) is None:
- return
-
- # Fix the bias.
- # Bias shape can be different from the previous vocab size
- # if the weight matrix was shared and alread extended but not the bias.
- (old_vocab_size,) = emb.bias.shape
- assert new_vocab_size >= old_vocab_size
- if new_vocab_size > old_vocab_size:
- old_bias = emb.bias.data
- new_bias = torch.zeros(
- (new_vocab_size,), dtype=old_bias.dtype, device=old_bias.device
- )
- new_bias[:old_vocab_size] = old_bias
- emb.bias.data = new_bias
-
-
-def add_secial_tokens_to_dict_and_model(
- dictionary: "fairseq.data.Dictionary",
- model: nn.Module,
- mono_langs: Sequence[str],
-) -> None:
- embs = model.encoder.embed_tokens
- vocab_size, embedding_dim = embs.weight.shape
-
- # The model may or may not have a '' embedding yet
- assert (
- len(dictionary) <= vocab_size <= len(dictionary) + 1
- ), f"Dictionary len ({len(dictionary)}) doesn't match embs shape ({embs.weight.shape})"
- # TODO: we should reuse the pretrained model dict which already has
- dictionary.add_symbol("")
-
- for lang in mono_langs:
- lang_token = _lang_token(lang)
- dictionary.add_symbol(lang_token)
- logger.info(
- f"dictionary: {len(dictionary)} -> {vocab_size} tokens "
- f"after adding {len(mono_langs)} lang tokens."
- )
-
- if len(dictionary) <= vocab_size:
- return
-
- extend_embedding(embs, len(dictionary), dictionary.bos())
- dec_embs = model.decoder.embed_tokens
- extend_embedding(dec_embs, len(dictionary), dictionary.bos())
- lm_head = model.decoder.output_projection
- extend_embedding(lm_head, len(dictionary), dictionary.bos())
- assert lm_head.weight.shape == (len(dictionary), embedding_dim)
-
-
-def _lang_token(lang: str) -> str:
- return f"__{lang}__"
-
-
-def _lang_token_index(dictionary, lang: str) -> int:
- return dictionary.index(_lang_token(lang))
-
-
-@contextlib.contextmanager
-def assert_weights_have_changed(model: nn.Module):
- def checksum(model: nn.Module) -> float:
- return sum(p.sum().item() for p in model.parameters())
-
- initial_checksum = checksum(model)
- yield model
- final_checksum = checksum(model)
- logger.info(
- f"initial_checksum={initial_checksum} -> final_checksum={final_checksum}"
- )
- assert initial_checksum != final_checksum, "Model hasn't changed !"
diff --git a/spaces/OFA-Sys/OFA-vqa/criterions/label_smoothed_cross_entropy.py b/spaces/OFA-Sys/OFA-vqa/criterions/label_smoothed_cross_entropy.py
deleted file mode 100644
index 73b36e750a0037cad8403e383d790f868b509d24..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/criterions/label_smoothed_cross_entropy.py
+++ /dev/null
@@ -1,343 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-from dataclasses import dataclass, field
-from typing import Optional
-
-import torch
-import torch.nn.functional as F
-import numpy as np
-from fairseq import metrics, utils
-from fairseq.criterions import FairseqCriterion, register_criterion
-from fairseq.dataclass import FairseqDataclass
-from omegaconf import II
-
-
-@dataclass
-class AjustLabelSmoothedCrossEntropyCriterionConfig(FairseqDataclass):
- label_smoothing: float = field(
- default=0.0,
- metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"},
- )
- report_accuracy: bool = field(
- default=False,
- metadata={"help": "report accuracy metric"},
- )
- ignore_prefix_size: int = field(
- default=0,
- metadata={"help": "Ignore first N tokens"},
- )
- ignore_eos: bool = field(
- default=False,
- metadata={"help": "Ignore eos token"},
- )
- sentence_avg: bool = II("optimization.sentence_avg")
- drop_worst_ratio: float = field(
- default=0.0,
- metadata={"help": "ratio for discarding bad samples"},
- )
- drop_worst_after: int = field(
- default=0,
- metadata={"help": "steps for discarding bad samples"},
- )
- use_rdrop: bool = field(
- default=False, metadata={"help": "use R-Drop"}
- )
- reg_alpha: float = field(
- default=1.0, metadata={"help": "weight for R-Drop"}
- )
- sample_patch_num: int = field(
- default=196, metadata={"help": "sample patchs for v1"}
- )
- constraint_range: Optional[str] = field(
- default=None,
- metadata={"help": "constraint range"}
- )
-
-
-def construct_rdrop_sample(x):
- if isinstance(x, dict):
- for key in x:
- x[key] = construct_rdrop_sample(x[key])
- return x
- elif isinstance(x, torch.Tensor):
- return x.repeat(2, *([1] * (x.dim()-1)))
- elif isinstance(x, int):
- return x * 2
- elif isinstance(x, np.ndarray):
- return x.repeat(2)
- else:
- raise NotImplementedError
-
-
-def kl_loss(p, q):
- p_loss = F.kl_div(p, torch.exp(q), reduction='sum')
- q_loss = F.kl_div(q, torch.exp(p), reduction='sum')
- loss = (p_loss + q_loss) / 2
- return loss
-
-
-def label_smoothed_nll_loss(
- lprobs, target, epsilon, update_num, reduce=True,
- drop_worst_ratio=0.0, drop_worst_after=0, use_rdrop=False, reg_alpha=1.0,
- constraint_masks=None, constraint_start=None, constraint_end=None
-):
- if target.dim() == lprobs.dim() - 1:
- target = target.unsqueeze(-1)
- nll_loss = -lprobs.gather(dim=-1, index=target).squeeze(-1)
- if constraint_masks is not None:
- smooth_loss = -lprobs.masked_fill(~constraint_masks, 0).sum(dim=-1, keepdim=True).squeeze(-1)
- eps_i = epsilon / (constraint_masks.sum(1) - 1 + 1e-6)
- elif constraint_start is not None and constraint_end is not None:
- constraint_range = [0, 1, 2, 3] + list(range(constraint_start, constraint_end))
- smooth_loss = -lprobs[:, constraint_range].sum(dim=-1, keepdim=True).squeeze(-1)
- eps_i = epsilon / (len(constraint_range) - 1 + 1e-6)
- else:
- smooth_loss = -lprobs.sum(dim=-1, keepdim=True).squeeze(-1)
- eps_i = epsilon / (lprobs.size(-1) - 1)
- loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
- if drop_worst_ratio > 0 and update_num > drop_worst_after:
- if use_rdrop:
- true_batch_size = loss.size(0) // 2
- _, indices = torch.topk(loss[:true_batch_size], k=int(true_batch_size * (1 - drop_worst_ratio)), largest=False)
- loss = torch.cat([loss[indices], loss[indices+true_batch_size]])
- nll_loss = torch.cat([nll_loss[indices], nll_loss[indices+true_batch_size]])
- lprobs = torch.cat([lprobs[indices], lprobs[indices+true_batch_size]])
- else:
- loss, indices = torch.topk(loss, k=int(loss.shape[0] * (1 - drop_worst_ratio)), largest=False)
- nll_loss = nll_loss[indices]
- lprobs = lprobs[indices]
-
- ntokens = loss.numel()
- nll_loss = nll_loss.sum()
- loss = loss.sum()
- if use_rdrop:
- true_batch_size = lprobs.size(0) // 2
- p = lprobs[:true_batch_size]
- q = lprobs[true_batch_size:]
- if constraint_start is not None and constraint_end is not None:
- constraint_range = [0, 1, 2, 3] + list(range(constraint_start, constraint_end))
- p = p[:, constraint_range]
- q = q[:, constraint_range]
- loss += kl_loss(p, q) * reg_alpha
-
- return loss, nll_loss, ntokens
-
-
-@register_criterion(
- "ajust_label_smoothed_cross_entropy", dataclass=AjustLabelSmoothedCrossEntropyCriterionConfig
-)
-class AjustLabelSmoothedCrossEntropyCriterion(FairseqCriterion):
- def __init__(
- self,
- task,
- sentence_avg,
- label_smoothing,
- ignore_prefix_size=0,
- ignore_eos=False,
- report_accuracy=False,
- drop_worst_ratio=0,
- drop_worst_after=0,
- use_rdrop=False,
- reg_alpha=1.0,
- sample_patch_num=196,
- constraint_range=None
- ):
- super().__init__(task)
- self.sentence_avg = sentence_avg
- self.eps = label_smoothing
- self.ignore_prefix_size = ignore_prefix_size
- self.ignore_eos = ignore_eos
- self.report_accuracy = report_accuracy
- self.drop_worst_ratio = drop_worst_ratio
- self.drop_worst_after = drop_worst_after
- self.use_rdrop = use_rdrop
- self.reg_alpha = reg_alpha
- self.sample_patch_num = sample_patch_num
-
- self.constraint_start = None
- self.constraint_end = None
- if constraint_range is not None:
- constraint_start, constraint_end = constraint_range.split(',')
- self.constraint_start = int(constraint_start)
- self.constraint_end = int(constraint_end)
-
- def forward(self, model, sample, update_num=0, reduce=True):
- """Compute the loss for the given sample.
-
- Returns a tuple with three elements:
- 1) the loss
- 2) the sample size, which is used as the denominator for the gradient
- 3) logging outputs to display while training
- """
- if isinstance(sample, list):
- if self.sample_patch_num > 0:
- sample[0]['net_input']['sample_patch_num'] = self.sample_patch_num
- loss_v1, sample_size_v1, logging_output_v1 = self.forward(model, sample[0], update_num, reduce)
- loss_v2, sample_size_v2, logging_output_v2 = self.forward(model, sample[1], update_num, reduce)
- loss = loss_v1 / sample_size_v1 + loss_v2 / sample_size_v2
- sample_size = 1
- logging_output = {
- "loss": loss.data,
- "loss_v1": loss_v1.data,
- "loss_v2": loss_v2.data,
- "nll_loss": logging_output_v1["nll_loss"].data / sample_size_v1 + logging_output_v2["nll_loss"].data / sample_size_v2,
- "ntokens": logging_output_v1["ntokens"] + logging_output_v2["ntokens"],
- "nsentences": logging_output_v1["nsentences"] + logging_output_v2["nsentences"],
- "sample_size": 1,
- "sample_size_v1": sample_size_v1,
- "sample_size_v2": sample_size_v2,
- }
- return loss, sample_size, logging_output
-
- if self.use_rdrop:
- construct_rdrop_sample(sample)
-
- net_output = model(**sample["net_input"])
- loss, nll_loss, ntokens = self.compute_loss(model, net_output, sample, update_num, reduce=reduce)
- sample_size = (
- sample["target"].size(0) if self.sentence_avg else ntokens
- )
- logging_output = {
- "loss": loss.data,
- "nll_loss": nll_loss.data,
- "ntokens": sample["ntokens"],
- "nsentences": sample["nsentences"],
- "sample_size": sample_size,
- }
- if self.report_accuracy:
- n_correct, total = self.compute_accuracy(model, net_output, sample)
- logging_output["n_correct"] = utils.item(n_correct.data)
- logging_output["total"] = utils.item(total.data)
- return loss, sample_size, logging_output
-
- def get_lprobs_and_target(self, model, net_output, sample):
- conf = sample['conf'][:, None, None] if 'conf' in sample and sample['conf'] is not None else 1
- constraint_masks = None
- if "constraint_masks" in sample and sample["constraint_masks"] is not None:
- constraint_masks = sample["constraint_masks"]
- net_output[0].masked_fill_(~constraint_masks, -math.inf)
- if self.constraint_start is not None and self.constraint_end is not None:
- net_output[0][:, :, 4:self.constraint_start] = -math.inf
- net_output[0][:, :, self.constraint_end:] = -math.inf
- lprobs = model.get_normalized_probs(net_output, log_probs=True) * conf
- target = model.get_targets(sample, net_output)
- if self.ignore_prefix_size > 0:
- lprobs = lprobs[:, self.ignore_prefix_size :, :].contiguous()
- target = target[:, self.ignore_prefix_size :].contiguous()
- if constraint_masks is not None:
- constraint_masks = constraint_masks[:, self.ignore_prefix_size :, :].contiguous()
- if self.ignore_eos:
- bsz, seq_len, embed_dim = lprobs.size()
- eos_indices = target.eq(self.task.tgt_dict.eos())
- lprobs = lprobs[~eos_indices].reshape(bsz, seq_len-1, embed_dim)
- target = target[~eos_indices].reshape(bsz, seq_len-1)
- if constraint_masks is not None:
- constraint_masks = constraint_masks[~eos_indices].reshape(bsz, seq_len-1, embed_dim)
- if constraint_masks is not None:
- constraint_masks = constraint_masks.view(-1, constraint_masks.size(-1))
- return lprobs.view(-1, lprobs.size(-1)), target.view(-1), constraint_masks
-
- def compute_loss(self, model, net_output, sample, update_num, reduce=True):
- lprobs, target, constraint_masks = self.get_lprobs_and_target(model, net_output, sample)
- if constraint_masks is not None:
- constraint_masks = constraint_masks[target != self.padding_idx]
- lprobs = lprobs[target != self.padding_idx]
- target = target[target != self.padding_idx]
- loss, nll_loss, ntokens = label_smoothed_nll_loss(
- lprobs,
- target,
- self.eps,
- update_num,
- reduce=reduce,
- drop_worst_ratio=self.drop_worst_ratio,
- drop_worst_after=self.drop_worst_after,
- use_rdrop=self.use_rdrop,
- reg_alpha=self.reg_alpha,
- constraint_masks=constraint_masks,
- constraint_start=self.constraint_start,
- constraint_end=self.constraint_end
- )
- return loss, nll_loss, ntokens
-
- def compute_accuracy(self, model, net_output, sample):
- lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
- mask = target.ne(self.padding_idx)
- n_correct = torch.sum(
- lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask))
- )
- total = torch.sum(mask)
- return n_correct, total
-
- @classmethod
- def reduce_metrics(cls, logging_outputs) -> None:
- """Aggregate logging outputs from data parallel training."""
- loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
- loss_sum_v1 = sum(log.get("loss_v1", 0) for log in logging_outputs)
- loss_sum_v2 = sum(log.get("loss_v2", 0) for log in logging_outputs)
- nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
- ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
- nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
- sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
- sample_size_v1 = sum(log.get("sample_size_v1", 0) for log in logging_outputs)
- sample_size_v2 = sum(log.get("sample_size_v2", 0) for log in logging_outputs)
-
- metrics.log_scalar(
- "loss", loss_sum / sample_size, sample_size, round=3
- )
- metrics.log_scalar(
- "loss_v1", loss_sum_v1 / max(sample_size_v1, 1), max(sample_size_v1, 1), round=3
- )
- metrics.log_scalar(
- "loss_v2", loss_sum_v2 / max(sample_size_v2, 1), max(sample_size_v2, 1), round=3
- )
- metrics.log_scalar(
- "nll_loss", nll_loss_sum / sample_size, ntokens, round=3
- )
- metrics.log_derived(
- "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
- )
-
- metrics.log_scalar(
- "ntokens", ntokens, 1, round=3
- )
- metrics.log_scalar(
- "nsentences", nsentences, 1, round=3
- )
- metrics.log_scalar(
- "sample_size", sample_size, 1, round=3
- )
- metrics.log_scalar(
- "sample_size_v1", sample_size_v1, 1, round=3
- )
- metrics.log_scalar(
- "sample_size_v2", sample_size_v2, 1, round=3
- )
-
- total = utils.item(sum(log.get("total", 0) for log in logging_outputs))
- if total > 0:
- metrics.log_scalar("total", total)
- n_correct = utils.item(
- sum(log.get("n_correct", 0) for log in logging_outputs)
- )
- metrics.log_scalar("n_correct", n_correct)
- metrics.log_derived(
- "accuracy",
- lambda meters: round(
- meters["n_correct"].sum * 100.0 / meters["total"].sum, 3
- )
- if meters["total"].sum > 0
- else float("nan"),
- )
-
- @staticmethod
- def logging_outputs_can_be_summed() -> bool:
- """
- Whether the logging outputs returned by `forward` can be summed
- across workers prior to calling `reduce_metrics`. Setting this
- to True will improves distributed training speed.
- """
- return True
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/tasks/speech_recognition.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/tasks/speech_recognition.py
deleted file mode 100644
index d9f011d55ff4fdfeb4c04ca790c314d685708c3a..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/tasks/speech_recognition.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import json
-import os
-import re
-import sys
-
-import torch
-from examples.speech_recognition.data import AsrDataset
-from examples.speech_recognition.data.replabels import replabel_symbol
-from fairseq.data import Dictionary
-from fairseq.tasks import LegacyFairseqTask, register_task
-
-
-def get_asr_dataset_from_json(data_json_path, tgt_dict):
- """
- Parse data json and create dataset.
- See scripts/asr_prep_json.py which pack json from raw files
-
- Json example:
- {
- "utts": {
- "4771-29403-0025": {
- "input": {
- "length_ms": 170,
- "path": "/tmp/file1.flac"
- },
- "output": {
- "text": "HELLO \n",
- "token": "HE LLO",
- "tokenid": "4815, 861"
- }
- },
- "1564-142299-0096": {
- ...
- }
- }
- """
- if not os.path.isfile(data_json_path):
- raise FileNotFoundError("Dataset not found: {}".format(data_json_path))
- with open(data_json_path, "rb") as f:
- data_samples = json.load(f)["utts"]
- assert len(data_samples) != 0
- sorted_samples = sorted(
- data_samples.items(),
- key=lambda sample: int(sample[1]["input"]["length_ms"]),
- reverse=True,
- )
- aud_paths = [s[1]["input"]["path"] for s in sorted_samples]
- ids = [s[0] for s in sorted_samples]
- speakers = []
- for s in sorted_samples:
- m = re.search("(.+?)-(.+?)-(.+?)", s[0])
- speakers.append(m.group(1) + "_" + m.group(2))
- frame_sizes = [s[1]["input"]["length_ms"] for s in sorted_samples]
- tgt = [
- [int(i) for i in s[1]["output"]["tokenid"].split(", ")]
- for s in sorted_samples
- ]
- # append eos
- tgt = [[*t, tgt_dict.eos()] for t in tgt]
- return AsrDataset(aud_paths, frame_sizes, tgt, tgt_dict, ids, speakers)
-
-
-@register_task("speech_recognition")
-class SpeechRecognitionTask(LegacyFairseqTask):
- """
- Task for training speech recognition model.
- """
-
- @staticmethod
- def add_args(parser):
- """Add task-specific arguments to the parser."""
- parser.add_argument("data", help="path to data directory")
- parser.add_argument(
- "--silence-token", default="\u2581", help="token for silence (used by w2l)"
- )
- parser.add_argument(
- "--max-source-positions",
- default=sys.maxsize,
- type=int,
- metavar="N",
- help="max number of frames in the source sequence",
- )
- parser.add_argument(
- "--max-target-positions",
- default=1024,
- type=int,
- metavar="N",
- help="max number of tokens in the target sequence",
- )
-
- def __init__(self, args, tgt_dict):
- super().__init__(args)
- self.tgt_dict = tgt_dict
-
- @classmethod
- def setup_task(cls, args, **kwargs):
- """Setup the task (e.g., load dictionaries)."""
- dict_path = os.path.join(args.data, "dict.txt")
- if not os.path.isfile(dict_path):
- raise FileNotFoundError("Dict not found: {}".format(dict_path))
- tgt_dict = Dictionary.load(dict_path)
-
- if args.criterion == "ctc_loss":
- tgt_dict.add_symbol("")
- elif args.criterion == "asg_loss":
- for i in range(1, args.max_replabel + 1):
- tgt_dict.add_symbol(replabel_symbol(i))
-
- print("| dictionary: {} types".format(len(tgt_dict)))
- return cls(args, tgt_dict)
-
- def load_dataset(self, split, combine=False, **kwargs):
- """Load a given dataset split.
-
- Args:
- split (str): name of the split (e.g., train, valid, test)
- """
- data_json_path = os.path.join(self.args.data, "{}.json".format(split))
- self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict)
-
- def build_generator(self, models, args, **unused):
- w2l_decoder = getattr(args, "w2l_decoder", None)
- if w2l_decoder == "viterbi":
- from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
-
- return W2lViterbiDecoder(args, self.target_dictionary)
- elif w2l_decoder == "kenlm":
- from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
-
- return W2lKenLMDecoder(args, self.target_dictionary)
- elif w2l_decoder == "fairseqlm":
- from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
-
- return W2lFairseqLMDecoder(args, self.target_dictionary)
- else:
- return super().build_generator(models, args)
-
- @property
- def target_dictionary(self):
- """Return the :class:`~fairseq.data.Dictionary` for the language
- model."""
- return self.tgt_dict
-
- @property
- def source_dictionary(self):
- """Return the source :class:`~fairseq.data.Dictionary` (if applicable
- for this task)."""
- return None
-
- def max_positions(self):
- """Return the max speech and sentence length allowed by the task."""
- return (self.args.max_source_positions, self.args.max_target_positions)
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/quantization/scalar/modules/qemb.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/quantization/scalar/modules/qemb.py
deleted file mode 100644
index d6cf06e5872cb86e5c2e726153c7a80c78db9d1e..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/quantization/scalar/modules/qemb.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from ..ops import emulate_int
-
-
-class IntEmbedding(nn.Module):
- """
- Quantized counterpart of the nn.Embedding module that applies QuantNoise during training.
-
- Args:
- - num_embeddings: number of tokens
- - embedding_dim: embedding dimension
- - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights)
- - bits: number of bits
- - method: choose among {"tensor", "histogram", "channel"}
- - update_step: recompute scale and zero_point every update_steps iterations
-
- Remarks:
- - We use the straight-through estimator so that the gradients
- back-propagate nicely in the network, this is implemented with
- the detach() trick
- - Parameters scale and zero_point are recomputed every update_step
- forward pass to reduce the overhead
- - At test time, the weights are fully quantized
- """
-
- def __init__(
- self,
- num_embeddings,
- embedding_dim,
- padding_idx=None,
- max_norm=None,
- norm_type=2.0,
- scale_grad_by_freq=False,
- sparse=False,
- _weight=None,
- p=0,
- update_step=1000,
- bits=8,
- method="histogram",
- ):
- super(IntEmbedding, self).__init__()
- self.num_embeddings = num_embeddings
- self.embedding_dim = embedding_dim
- if padding_idx is not None:
- if padding_idx > 0:
- assert (
- padding_idx < self.num_embeddings
- ), "Padding_idx must be within num_embeddings"
- elif padding_idx < 0:
- assert (
- padding_idx >= -self.num_embeddings
- ), "Padding_idx must be within num_embeddings"
- padding_idx = self.num_embeddings + padding_idx
- self.padding_idx = padding_idx
- self.max_norm = max_norm
- self.norm_type = norm_type
- self.scale_grad_by_freq = scale_grad_by_freq
- if _weight is None:
- self.weight = nn.Parameter(torch.Tensor(num_embeddings, embedding_dim))
- self.reset_parameters()
- else:
- assert list(_weight.shape) == [
- num_embeddings,
- embedding_dim,
- ], "Shape of weight does not match num_embeddings and embedding_dim"
- self.weight = nn.Parameter(_weight)
- self.sparse = sparse
-
- # quantization parameters
- self.p = p
- self.bits = bits
- self.method = method
- self.update_step = update_step
- self.counter = 0
-
- def reset_parameters(self):
- nn.init.normal_(self.weight)
- if self.padding_idx is not None:
- with torch.no_grad():
- self.weight[self.padding_idx].fill_(0)
-
- def forward(self, input):
- # train with QuantNoise and evaluate the fully quantized network
- p = self.p if self.training else 1
-
- # update parameters every 1000 iterations
- if self.counter % self.update_step == 0:
- self.scale = None
- self.zero_point = None
- self.counter += 1
-
- # quantize weight
- weight_quantized, self.scale, self.zero_point = emulate_int(
- self.weight.detach(),
- bits=self.bits,
- method=self.method,
- scale=self.scale,
- zero_point=self.zero_point,
- )
-
- # mask to apply noise
- mask = torch.zeros_like(self.weight)
- mask.bernoulli_(1 - p)
- noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0)
-
- # using straight-through estimator (STE)
- clamp_low = -self.scale * self.zero_point
- clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point)
- weight = (
- torch.clamp(self.weight, clamp_low.item(), clamp_high.item())
- + noise.detach()
- )
-
- # return output
- output = F.embedding(
- input,
- weight,
- self.padding_idx,
- self.max_norm,
- self.norm_type,
- self.scale_grad_by_freq,
- self.sparse,
- )
- return output
-
- def extra_repr(self):
- s = "{num_embeddings}, {embedding_dim}"
- if self.padding_idx is not None:
- s += ", padding_idx={padding_idx}"
- if self.max_norm is not None:
- s += ", max_norm={max_norm}"
- if self.norm_type != 2:
- s += ", norm_type={norm_type}"
- if self.scale_grad_by_freq is not False:
- s += ", scale_grad_by_freq={scale_grad_by_freq}"
- if self.sparse is not False:
- s += ", sparse=True"
- s += "quant_noise={p}, bits={bits}, method={method}"
- return s.format(**self.__dict__)
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/sparse_transformer_sentence_encoder_layer.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/sparse_transformer_sentence_encoder_layer.py
deleted file mode 100644
index d95da59c2471bfa858fd627605196d7f41f9ec12..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/sparse_transformer_sentence_encoder_layer.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from fairseq.modules import TransformerSentenceEncoderLayer
-from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention
-
-
-class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer):
- """
- Implements a Sprase Transformer Encoder Layer (see SparseMultiheadAttention)
- """
-
- def __init__(
- self,
- embedding_dim: int = 768,
- ffn_embedding_dim: int = 3072,
- num_attention_heads: int = 8,
- dropout: float = 0.1,
- attention_dropout: float = 0.1,
- activation_dropout: float = 0.1,
- activation_fn: str = "relu",
- export: bool = False,
- is_bidirectional: bool = True,
- stride: int = 32,
- expressivity: int = 8,
- ) -> None:
-
- super().__init__(
- embedding_dim,
- ffn_embedding_dim,
- num_attention_heads,
- dropout,
- attention_dropout,
- activation_dropout,
- activation_fn,
- export,
- )
-
- self.self_attn = SparseMultiheadAttention(
- self.embedding_dim,
- num_attention_heads,
- dropout=attention_dropout,
- add_bias_kv=False,
- add_zero_attn=False,
- self_attention=True,
- is_bidirectional=is_bidirectional,
- stride=stride,
- expressivity=expressivity,
- )
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/__init__.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/__init__.py
deleted file mode 100644
index be783be896396ff659c0bd173a7acebb8a2d165d..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/__init__.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-"""isort:skip_file"""
-
-import importlib
-import os
-
-from fairseq import registry
-from fairseq.optim.bmuf import FairseqBMUF # noqa
-from fairseq.optim.fairseq_optimizer import ( # noqa
- FairseqOptimizer,
- LegacyFairseqOptimizer,
-)
-from fairseq.optim.amp_optimizer import AMPOptimizer
-from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer
-from fairseq.optim.shard import shard_
-from omegaconf import DictConfig
-
-__all__ = [
- "AMPOptimizer",
- "FairseqOptimizer",
- "FP16Optimizer",
- "MemoryEfficientFP16Optimizer",
- "shard_",
-]
-
-(
- _build_optimizer,
- register_optimizer,
- OPTIMIZER_REGISTRY,
- OPTIMIZER_DATACLASS_REGISTRY,
-) = registry.setup_registry("--optimizer", base_class=FairseqOptimizer, required=True)
-
-
-def build_optimizer(cfg: DictConfig, params, *extra_args, **extra_kwargs):
- if all(isinstance(p, dict) for p in params):
- params = [t for p in params for t in p.values()]
- params = list(filter(lambda p: p.requires_grad, params))
- return _build_optimizer(cfg, params, *extra_args, **extra_kwargs)
-
-
-# automatically import any Python files in the optim/ directory
-for file in sorted(os.listdir(os.path.dirname(__file__))):
- if file.endswith(".py") and not file.startswith("_"):
- file_name = file[: file.find(".py")]
- importlib.import_module("fairseq.optim." + file_name)
diff --git a/spaces/OIUGLK/bingo/src/components/turn-counter.tsx b/spaces/OIUGLK/bingo/src/components/turn-counter.tsx
deleted file mode 100644
index 08a9e488f044802a8600f4d195b106567c35aab4..0000000000000000000000000000000000000000
--- a/spaces/OIUGLK/bingo/src/components/turn-counter.tsx
+++ /dev/null
@@ -1,23 +0,0 @@
-import React from 'react'
-import { Throttling } from '@/lib/bots/bing/types'
-
-export interface TurnCounterProps {
- throttling?: Throttling
-}
-
-export function TurnCounter({ throttling }: TurnCounterProps) {
- if (!throttling) {
- return null
- }
-
- return (
-
-
- {throttling.numUserMessagesInConversation}
- 共
- {throttling.maxNumUserMessagesInConversation}
-
-
-
- )
-}
diff --git a/spaces/Omnibus/TTS-voice-clone/README.md b/spaces/Omnibus/TTS-voice-clone/README.md
deleted file mode 100644
index 970a308912fada668a7f5de7d8379350cd9f72cb..0000000000000000000000000000000000000000
--- a/spaces/Omnibus/TTS-voice-clone/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: TTS Voice Clone
-emoji: 🐨
-colorFrom: indigo
-colorTo: red
-sdk: gradio
-sdk_version: 3.41.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/evaluation/evaluator.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/evaluation/evaluator.py
deleted file mode 100644
index baf996002b2fddc8c1952408d450b5bf69394f0a..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/evaluation/evaluator.py
+++ /dev/null
@@ -1,224 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import datetime
-import logging
-import time
-from collections import OrderedDict, abc
-from contextlib import ExitStack, contextmanager
-from typing import List, Union
-import torch
-from torch import nn
-
-from detectron2.utils.comm import get_world_size, is_main_process
-from detectron2.utils.logger import log_every_n_seconds
-
-
-class DatasetEvaluator:
- """
- Base class for a dataset evaluator.
-
- The function :func:`inference_on_dataset` runs the model over
- all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.
-
- This class will accumulate information of the inputs/outputs (by :meth:`process`),
- and produce evaluation results in the end (by :meth:`evaluate`).
- """
-
- def reset(self):
- """
- Preparation for a new round of evaluation.
- Should be called before starting a round of evaluation.
- """
- pass
-
- def process(self, inputs, outputs):
- """
- Process the pair of inputs and outputs.
- If they contain batches, the pairs can be consumed one-by-one using `zip`:
-
- .. code-block:: python
-
- for input_, output in zip(inputs, outputs):
- # do evaluation on single input/output pair
- ...
-
- Args:
- inputs (list): the inputs that's used to call the model.
- outputs (list): the return value of `model(inputs)`
- """
- pass
-
- def evaluate(self):
- """
- Evaluate/summarize the performance, after processing all input/output pairs.
-
- Returns:
- dict:
- A new evaluator class can return a dict of arbitrary format
- as long as the user can process the results.
- In our train_net.py, we expect the following format:
-
- * key: the name of the task (e.g., bbox)
- * value: a dict of {metric name: score}, e.g.: {"AP50": 80}
- """
- pass
-
-
-class DatasetEvaluators(DatasetEvaluator):
- """
- Wrapper class to combine multiple :class:`DatasetEvaluator` instances.
-
- This class dispatches every evaluation call to
- all of its :class:`DatasetEvaluator`.
- """
-
- def __init__(self, evaluators):
- """
- Args:
- evaluators (list): the evaluators to combine.
- """
- super().__init__()
- self._evaluators = evaluators
-
- def reset(self):
- for evaluator in self._evaluators:
- evaluator.reset()
-
- def process(self, inputs, outputs):
- for evaluator in self._evaluators:
- evaluator.process(inputs, outputs)
-
- def evaluate(self):
- results = OrderedDict()
- for evaluator in self._evaluators:
- result = evaluator.evaluate()
- if is_main_process() and result is not None:
- for k, v in result.items():
- assert (
- k not in results
- ), "Different evaluators produce results with the same key {}".format(k)
- results[k] = v
- return results
-
-
-def inference_on_dataset(
- model, data_loader, evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None]
-):
- """
- Run model on the data_loader and evaluate the metrics with evaluator.
- Also benchmark the inference speed of `model.__call__` accurately.
- The model will be used in eval mode.
-
- Args:
- model (callable): a callable which takes an object from
- `data_loader` and returns some outputs.
-
- If it's an nn.Module, it will be temporarily set to `eval` mode.
- If you wish to evaluate a model in `training` mode instead, you can
- wrap the given model and override its behavior of `.eval()` and `.train()`.
- data_loader: an iterable object with a length.
- The elements it generates will be the inputs to the model.
- evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark,
- but don't want to do any evaluation.
-
- Returns:
- The return value of `evaluator.evaluate()`
- """
- num_devices = get_world_size()
- logger = logging.getLogger(__name__)
- logger.info("Start inference on {} batches".format(len(data_loader)))
-
- total = len(data_loader) # inference data loader must have a fixed length
- if evaluator is None:
- # create a no-op evaluator
- evaluator = DatasetEvaluators([])
- if isinstance(evaluator, abc.MutableSequence):
- evaluator = DatasetEvaluators(evaluator)
- evaluator.reset()
-
- num_warmup = min(5, total - 1)
- start_time = time.perf_counter()
- total_data_time = 0
- total_compute_time = 0
- total_eval_time = 0
- with ExitStack() as stack:
- if isinstance(model, nn.Module):
- stack.enter_context(inference_context(model))
- stack.enter_context(torch.no_grad())
-
- start_data_time = time.perf_counter()
- for idx, inputs in enumerate(data_loader):
- total_data_time += time.perf_counter() - start_data_time
- if idx == num_warmup:
- start_time = time.perf_counter()
- total_data_time = 0
- total_compute_time = 0
- total_eval_time = 0
-
- start_compute_time = time.perf_counter()
- outputs = model(inputs)
- if torch.cuda.is_available():
- torch.cuda.synchronize()
- total_compute_time += time.perf_counter() - start_compute_time
-
- start_eval_time = time.perf_counter()
- evaluator.process(inputs, outputs)
- total_eval_time += time.perf_counter() - start_eval_time
-
- iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
- data_seconds_per_iter = total_data_time / iters_after_start
- compute_seconds_per_iter = total_compute_time / iters_after_start
- eval_seconds_per_iter = total_eval_time / iters_after_start
- total_seconds_per_iter = (time.perf_counter() - start_time) / iters_after_start
- if idx >= num_warmup * 2 or compute_seconds_per_iter > 5:
- eta = datetime.timedelta(seconds=int(total_seconds_per_iter * (total - idx - 1)))
- log_every_n_seconds(
- logging.INFO,
- (
- f"Inference done {idx + 1}/{total}. "
- f"Dataloading: {data_seconds_per_iter:.4f} s/iter. "
- f"Inference: {compute_seconds_per_iter:.4f} s/iter. "
- f"Eval: {eval_seconds_per_iter:.4f} s/iter. "
- f"Total: {total_seconds_per_iter:.4f} s/iter. "
- f"ETA={eta}"
- ),
- n=5,
- )
- start_data_time = time.perf_counter()
-
- # Measure the time only for this worker (before the synchronization barrier)
- total_time = time.perf_counter() - start_time
- total_time_str = str(datetime.timedelta(seconds=total_time))
- # NOTE this format is parsed by grep
- logger.info(
- "Total inference time: {} ({:.6f} s / iter per device, on {} devices)".format(
- total_time_str, total_time / (total - num_warmup), num_devices
- )
- )
- total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time)))
- logger.info(
- "Total inference pure compute time: {} ({:.6f} s / iter per device, on {} devices)".format(
- total_compute_time_str, total_compute_time / (total - num_warmup), num_devices
- )
- )
-
- results = evaluator.evaluate()
- # An evaluator may return None when not in main process.
- # Replace it by an empty dict instead to make it easier for downstream code to handle
- if results is None:
- results = {}
- return results
-
-
-@contextmanager
-def inference_context(model):
- """
- A context where the model is temporarily changed to eval mode,
- and restored to previous mode afterwards.
-
- Args:
- model: a torch Module
- """
- training_mode = model.training
- model.eval()
- yield
- model.train(training_mode)
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/export/README.md b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/export/README.md
deleted file mode 100644
index 9fcd33513fb81ef3aeb4d3c8d9732324dffa2646..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/export/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-
-This directory contains code to prepare a detectron2 model for deployment.
-Currently it supports exporting a detectron2 model to Caffe2 format through ONNX.
-
-Please see [documentation](https://detectron2.readthedocs.io/tutorials/deployment.html) for its usage.
-
-
-### Acknowledgements
-
-Thanks to Mobile Vision team at Facebook for developing the Caffe2 conversion tools.
-
-Thanks to Computing Platform Department - PAI team at Alibaba Group (@bddpqq, @chenbohua3) who
-help export Detectron2 models to TorchScript.
diff --git a/spaces/OpenShape/openshape-demo/fetch_sample_images.py b/spaces/OpenShape/openshape-demo/fetch_sample_images.py
deleted file mode 100644
index fa13dfe010cfabd9636181737b0becd1f0f68eb9..0000000000000000000000000000000000000000
--- a/spaces/OpenShape/openshape-demo/fetch_sample_images.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import io
-import os
-import cv2
-import tqdm
-import numpy
-import requests
-
-
-def get_bytes(x: str):
- return numpy.frombuffer(requests.get(x).content, numpy.uint8)
-
-
-def get_image(x):
- return cv2.imdecode(get_bytes(x), cv2.IMREAD_COLOR)
-
-
-os.chdir(os.path.dirname(os.path.abspath(__file__)))
-# classification
-# uids = """
-# a784af0713a643b19ffcf65194bc0fbf
-# 569a71ccf4d94c1585c9573521fb998f
-# 4e6d591f6e50493aa5e31355084fc4e8
-# """.split()
-
-# caption
-# uids = """
-# 283c845f2c2c4567971d42dc46831372
-# fc655111af5b49bf84722affc3ddba00
-# fa17099f18804409bc6d9e8e397b4681
-# d3c0e3495b5d40d087a7f82d1690b9cb
-# 4b27adcf92f644bdabf8ecc6c5bef399
-# f8c13a19e84343e7b644c19f7b9488d3
-# """.split()
-
-# sd
-uids = """
-b464ff8d732d44fab00b903652c8274e
-efae586a477b49cea1a0777487cc2df3
-f8272460c67d476a8af29e1f2e344bc0
-ff2875fb1a5b4771805a5fd35c8fe7bb
-b8db8dc5caad4fa5842a9ed6dbd2e9d6
-tpvzmLUXAURQ7ZxccJIBZvcIDlr
-""".split()
-
-
-uri_fmt = 'https://objaverse-thumbnail-images.s3.us-west-2.amazonaws.com/{}.jpg'
-for u in tqdm.tqdm(uids):
- img = get_image(uri_fmt.format(u))
- max_edge = max(img.shape)
- if max_edge > 512:
- s = 512 / max_edge
- img = cv2.resize(img, [0, 0], fx=s, fy=s, interpolation=cv2.INTER_CUBIC)
- cv2.imwrite("samples/sd/%s.jpg" % u, img)
diff --git a/spaces/PAIR/PAIR-Diffusion/ldm/modules/midas/midas/__init__.py b/spaces/PAIR/PAIR-Diffusion/ldm/modules/midas/midas/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/PKUWilliamYang/StyleGANEX/models/stylegan2/lpips/networks_basic.py b/spaces/PKUWilliamYang/StyleGANEX/models/stylegan2/lpips/networks_basic.py
deleted file mode 100644
index ec3f045f9f22dbf49e18e9edca25d04ccc551da9..0000000000000000000000000000000000000000
--- a/spaces/PKUWilliamYang/StyleGANEX/models/stylegan2/lpips/networks_basic.py
+++ /dev/null
@@ -1,187 +0,0 @@
-
-from __future__ import absolute_import
-
-import sys
-import torch
-import torch.nn as nn
-import torch.nn.init as init
-from torch.autograd import Variable
-import numpy as np
-from pdb import set_trace as st
-from skimage import color
-from IPython import embed
-from models.stylegan2.lpips import pretrained_networks as pn
-
-import models.stylegan2.lpips as util
-
-def spatial_average(in_tens, keepdim=True):
- return in_tens.mean([2,3],keepdim=keepdim)
-
-def upsample(in_tens, out_H=64): # assumes scale factor is same for H and W
- in_H = in_tens.shape[2]
- scale_factor = 1.*out_H/in_H
-
- return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens)
-
-# Learned perceptual metric
-class PNetLin(nn.Module):
- def __init__(self, pnet_type='vgg', pnet_rand=False, pnet_tune=False, use_dropout=True, spatial=False, version='0.1', lpips=True):
- super(PNetLin, self).__init__()
-
- self.pnet_type = pnet_type
- self.pnet_tune = pnet_tune
- self.pnet_rand = pnet_rand
- self.spatial = spatial
- self.lpips = lpips
- self.version = version
- self.scaling_layer = ScalingLayer()
-
- if(self.pnet_type in ['vgg','vgg16']):
- net_type = pn.vgg16
- self.chns = [64,128,256,512,512]
- elif(self.pnet_type=='alex'):
- net_type = pn.alexnet
- self.chns = [64,192,384,256,256]
- elif(self.pnet_type=='squeeze'):
- net_type = pn.squeezenet
- self.chns = [64,128,256,384,384,512,512]
- self.L = len(self.chns)
-
- self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune)
-
- if(lpips):
- self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
- self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
- self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
- self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
- self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
- self.lins = [self.lin0,self.lin1,self.lin2,self.lin3,self.lin4]
- if(self.pnet_type=='squeeze'): # 7 layers for squeezenet
- self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout)
- self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout)
- self.lins+=[self.lin5,self.lin6]
-
- def forward(self, in0, in1, retPerLayer=False):
- # v0.0 - original release had a bug, where input was not scaled
- in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) if self.version=='0.1' else (in0, in1)
- outs0, outs1 = self.net.forward(in0_input), self.net.forward(in1_input)
- feats0, feats1, diffs = {}, {}, {}
-
- for kk in range(self.L):
- feats0[kk], feats1[kk] = util.normalize_tensor(outs0[kk]), util.normalize_tensor(outs1[kk])
- diffs[kk] = (feats0[kk]-feats1[kk])**2
-
- if(self.lpips):
- if(self.spatial):
- res = [upsample(self.lins[kk].model(diffs[kk]), out_H=in0.shape[2]) for kk in range(self.L)]
- else:
- res = [spatial_average(self.lins[kk].model(diffs[kk]), keepdim=True) for kk in range(self.L)]
- else:
- if(self.spatial):
- res = [upsample(diffs[kk].sum(dim=1,keepdim=True), out_H=in0.shape[2]) for kk in range(self.L)]
- else:
- res = [spatial_average(diffs[kk].sum(dim=1,keepdim=True), keepdim=True) for kk in range(self.L)]
-
- val = res[0]
- for l in range(1,self.L):
- val += res[l]
-
- if(retPerLayer):
- return (val, res)
- else:
- return val
-
-class ScalingLayer(nn.Module):
- def __init__(self):
- super(ScalingLayer, self).__init__()
- self.register_buffer('shift', torch.Tensor([-.030,-.088,-.188])[None,:,None,None])
- self.register_buffer('scale', torch.Tensor([.458,.448,.450])[None,:,None,None])
-
- def forward(self, inp):
- return (inp - self.shift) / self.scale
-
-
-class NetLinLayer(nn.Module):
- ''' A single linear layer which does a 1x1 conv '''
- def __init__(self, chn_in, chn_out=1, use_dropout=False):
- super(NetLinLayer, self).__init__()
-
- layers = [nn.Dropout(),] if(use_dropout) else []
- layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),]
- self.model = nn.Sequential(*layers)
-
-
-class Dist2LogitLayer(nn.Module):
- ''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) '''
- def __init__(self, chn_mid=32, use_sigmoid=True):
- super(Dist2LogitLayer, self).__init__()
-
- layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True),]
- layers += [nn.LeakyReLU(0.2,True),]
- layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True),]
- layers += [nn.LeakyReLU(0.2,True),]
- layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True),]
- if(use_sigmoid):
- layers += [nn.Sigmoid(),]
- self.model = nn.Sequential(*layers)
-
- def forward(self,d0,d1,eps=0.1):
- return self.model.forward(torch.cat((d0,d1,d0-d1,d0/(d1+eps),d1/(d0+eps)),dim=1))
-
-class BCERankingLoss(nn.Module):
- def __init__(self, chn_mid=32):
- super(BCERankingLoss, self).__init__()
- self.net = Dist2LogitLayer(chn_mid=chn_mid)
- # self.parameters = list(self.net.parameters())
- self.loss = torch.nn.BCELoss()
-
- def forward(self, d0, d1, judge):
- per = (judge+1.)/2.
- self.logit = self.net.forward(d0,d1)
- return self.loss(self.logit, per)
-
-# L2, DSSIM metrics
-class FakeNet(nn.Module):
- def __init__(self, use_gpu=True, colorspace='Lab'):
- super(FakeNet, self).__init__()
- self.use_gpu = use_gpu
- self.colorspace=colorspace
-
-class L2(FakeNet):
-
- def forward(self, in0, in1, retPerLayer=None):
- assert(in0.size()[0]==1) # currently only supports batchSize 1
-
- if(self.colorspace=='RGB'):
- (N,C,X,Y) = in0.size()
- value = torch.mean(torch.mean(torch.mean((in0-in1)**2,dim=1).view(N,1,X,Y),dim=2).view(N,1,1,Y),dim=3).view(N)
- return value
- elif(self.colorspace=='Lab'):
- value = util.l2(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)),
- util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')
- ret_var = Variable( torch.Tensor((value,) ) )
- if(self.use_gpu):
- ret_var = ret_var.cuda()
- return ret_var
-
-class DSSIM(FakeNet):
-
- def forward(self, in0, in1, retPerLayer=None):
- assert(in0.size()[0]==1) # currently only supports batchSize 1
-
- if(self.colorspace=='RGB'):
- value = util.dssim(1.*util.tensor2im(in0.data), 1.*util.tensor2im(in1.data), range=255.).astype('float')
- elif(self.colorspace=='Lab'):
- value = util.dssim(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)),
- util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')
- ret_var = Variable( torch.Tensor((value,) ) )
- if(self.use_gpu):
- ret_var = ret_var.cuda()
- return ret_var
-
-def print_network(net):
- num_params = 0
- for param in net.parameters():
- num_params += param.numel()
- print('Network',net)
- print('Total number of parameters: %d' % num_params)
diff --git a/spaces/PKUWilliamYang/VToonify/vtoonify/model/encoder/align_all_parallel.py b/spaces/PKUWilliamYang/VToonify/vtoonify/model/encoder/align_all_parallel.py
deleted file mode 100644
index 05b520cd6590dc02ee533d3f0d69e6a364447d9f..0000000000000000000000000000000000000000
--- a/spaces/PKUWilliamYang/VToonify/vtoonify/model/encoder/align_all_parallel.py
+++ /dev/null
@@ -1,217 +0,0 @@
-"""
-brief: face alignment with FFHQ method (https://github.com/NVlabs/ffhq-dataset)
-author: lzhbrian (https://lzhbrian.me)
-date: 2020.1.5
-note: code is heavily borrowed from
- https://github.com/NVlabs/ffhq-dataset
- http://dlib.net/face_landmark_detection.py.html
-
-requirements:
- apt install cmake
- conda install Pillow numpy scipy
- pip install dlib
- # download face landmark model from:
- # http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
-"""
-from argparse import ArgumentParser
-import time
-import numpy as np
-import PIL
-import PIL.Image
-import os
-import scipy
-import scipy.ndimage
-import dlib
-import multiprocessing as mp
-import math
-
-#from configs.paths_config import model_paths
-SHAPE_PREDICTOR_PATH = 'shape_predictor_68_face_landmarks.dat'#model_paths["shape_predictor"]
-
-
-def get_landmark(filepath, predictor):
- """get landmark with dlib
- :return: np.array shape=(68, 2)
- """
- detector = dlib.get_frontal_face_detector()
- if type(filepath) == str:
- img = dlib.load_rgb_image(filepath)
- else:
- img = filepath
- dets = detector(img, 1)
-
- if len(dets) == 0:
- print('Error: no face detected!')
- return None
-
- shape = None
- for k, d in enumerate(dets):
- shape = predictor(img, d)
-
- if shape is None:
- print('Error: No face detected! If you are sure there are faces in your input, you may rerun the code several times until the face is detected. Sometimes the detector is unstable.')
- t = list(shape.parts())
- a = []
- for tt in t:
- a.append([tt.x, tt.y])
- lm = np.array(a)
- return lm
-
-
-def align_face(filepath, predictor):
- """
- :param filepath: str
- :return: PIL Image
- """
-
- lm = get_landmark(filepath, predictor)
- if lm is None:
- return None
-
- lm_chin = lm[0: 17] # left-right
- lm_eyebrow_left = lm[17: 22] # left-right
- lm_eyebrow_right = lm[22: 27] # left-right
- lm_nose = lm[27: 31] # top-down
- lm_nostrils = lm[31: 36] # top-down
- lm_eye_left = lm[36: 42] # left-clockwise
- lm_eye_right = lm[42: 48] # left-clockwise
- lm_mouth_outer = lm[48: 60] # left-clockwise
- lm_mouth_inner = lm[60: 68] # left-clockwise
-
- # Calculate auxiliary vectors.
- eye_left = np.mean(lm_eye_left, axis=0)
- eye_right = np.mean(lm_eye_right, axis=0)
- eye_avg = (eye_left + eye_right) * 0.5
- eye_to_eye = eye_right - eye_left
- mouth_left = lm_mouth_outer[0]
- mouth_right = lm_mouth_outer[6]
- mouth_avg = (mouth_left + mouth_right) * 0.5
- eye_to_mouth = mouth_avg - eye_avg
-
- # Choose oriented crop rectangle.
- x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
- x /= np.hypot(*x)
- x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
- y = np.flipud(x) * [-1, 1]
- c = eye_avg + eye_to_mouth * 0.1
- quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
- qsize = np.hypot(*x) * 2
-
- # read image
- if type(filepath) == str:
- img = PIL.Image.open(filepath)
- else:
- img = PIL.Image.fromarray(filepath)
-
- output_size = 256
- transform_size = 256
- enable_padding = True
-
- # Shrink.
- shrink = int(np.floor(qsize / output_size * 0.5))
- if shrink > 1:
- rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
- img = img.resize(rsize, PIL.Image.ANTIALIAS)
- quad /= shrink
- qsize /= shrink
-
- # Crop.
- border = max(int(np.rint(qsize * 0.1)), 3)
- crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
- int(np.ceil(max(quad[:, 1]))))
- crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
- min(crop[3] + border, img.size[1]))
- if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
- img = img.crop(crop)
- quad -= crop[0:2]
-
- # Pad.
- pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
- int(np.ceil(max(quad[:, 1]))))
- pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
- max(pad[3] - img.size[1] + border, 0))
- if enable_padding and max(pad) > border - 4:
- pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
- img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
- h, w, _ = img.shape
- y, x, _ = np.ogrid[:h, :w, :1]
- mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
- 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
- blur = qsize * 0.02
- img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
- img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
- img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
- quad += pad[:2]
-
- # Transform.
- img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
- if output_size < transform_size:
- img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
-
- # Save aligned image.
- return img
-
-
-def chunks(lst, n):
- """Yield successive n-sized chunks from lst."""
- for i in range(0, len(lst), n):
- yield lst[i:i + n]
-
-
-def extract_on_paths(file_paths):
- predictor = dlib.shape_predictor(SHAPE_PREDICTOR_PATH)
- pid = mp.current_process().name
- print('\t{} is starting to extract on #{} images'.format(pid, len(file_paths)))
- tot_count = len(file_paths)
- count = 0
- for file_path, res_path in file_paths:
- count += 1
- if count % 100 == 0:
- print('{} done with {}/{}'.format(pid, count, tot_count))
- try:
- res = align_face(file_path, predictor)
- res = res.convert('RGB')
- os.makedirs(os.path.dirname(res_path), exist_ok=True)
- res.save(res_path)
- except Exception:
- continue
- print('\tDone!')
-
-
-def parse_args():
- parser = ArgumentParser(add_help=False)
- parser.add_argument('--num_threads', type=int, default=1)
- parser.add_argument('--root_path', type=str, default='')
- args = parser.parse_args()
- return args
-
-
-def run(args):
- root_path = args.root_path
- out_crops_path = root_path + '_crops'
- if not os.path.exists(out_crops_path):
- os.makedirs(out_crops_path, exist_ok=True)
-
- file_paths = []
- for root, dirs, files in os.walk(root_path):
- for file in files:
- file_path = os.path.join(root, file)
- fname = os.path.join(out_crops_path, os.path.relpath(file_path, root_path))
- res_path = '{}.jpg'.format(os.path.splitext(fname)[0])
- if os.path.splitext(file_path)[1] == '.txt' or os.path.exists(res_path):
- continue
- file_paths.append((file_path, res_path))
-
- file_chunks = list(chunks(file_paths, int(math.ceil(len(file_paths) / args.num_threads))))
- print(len(file_chunks))
- pool = mp.Pool(args.num_threads)
- print('Running on {} paths\nHere we goooo'.format(len(file_paths)))
- tic = time.time()
- pool.map(extract_on_paths, file_chunks)
- toc = time.time()
- print('Mischief managed in {}s'.format(toc - tic))
-
-
-if __name__ == '__main__':
- args = parse_args()
- run(args)
diff --git a/spaces/PascalNotin/Tranception_design/tranception/activations.py b/spaces/PascalNotin/Tranception_design/tranception/activations.py
deleted file mode 100644
index 25702efc8ff20c62819d22fb2c2aa440a6210045..0000000000000000000000000000000000000000
--- a/spaces/PascalNotin/Tranception_design/tranception/activations.py
+++ /dev/null
@@ -1,114 +0,0 @@
-import math
-
-import torch
-from packaging import version
-from torch import nn
-
-from transformers.utils import logging
-
-
-logger = logging.get_logger(__name__)
-
-
-def _gelu_python(x):
- """
- Original Implementation of the GELU activation function in Google BERT repo when initially created. For
- information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
- torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional
- Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
- """
- return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
-
-
-def gelu_new(x):
- """
- Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
- the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
- """
- return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
-
-
-if version.parse(torch.__version__) < version.parse("1.4"):
- gelu = _gelu_python
-else:
- gelu = nn.functional.gelu
-
-
-def gelu_fast(x):
- return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
-
-
-def quick_gelu(x):
- return x * torch.sigmoid(1.702 * x)
-
-
-def _silu_python(x):
- """
- See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear
- Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function
- Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated
- Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with
- later.
- """
- return x * torch.sigmoid(x)
-
-
-if version.parse(torch.__version__) < version.parse("1.7"):
- silu = _silu_python
-else:
- silu = nn.functional.silu
-
-
-def _mish_python(x):
- """
- See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also
- visit the official repository for the paper: https://github.com/digantamisra98/Mish
- """
- return x * torch.tanh(nn.functional.softplus(x))
-
-
-if version.parse(torch.__version__) < version.parse("1.9"):
- mish = _mish_python
-else:
- mish = nn.functional.mish
-
-
-def linear_act(x):
- return x
-
-def squared_relu(x):
- """
- Squared ReLU variant that is fastest with Pytorch.
- """
- x = nn.functional.relu(x)
- return x*x
-
-def squared_relu_xla(x):
- """
- Squared ReLU variant that is fastest with JAX.
- """
- x = nn.functional.relu(x)
- return x**2
-
-tranception_ACT2FN = {
- "relu": nn.functional.relu,
- "silu": silu,
- "swish": silu,
- "gelu": gelu,
- "tanh": torch.tanh,
- "gelu_new": gelu_new,
- "gelu_fast": gelu_fast,
- "quick_gelu": quick_gelu,
- "mish": mish,
- "linear": linear_act,
- "sigmoid": torch.sigmoid,
- "squared_relu": squared_relu,
- "squared_relu_xla": squared_relu_xla,
-}
-
-
-def get_activation(activation_string):
- if activation_string in tranception_ACT2FN:
- return tranception_ACT2FN[activation_string]
- else:
- raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(tranception_ACT2FN.keys())}")
\ No newline at end of file
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/ecmascript/function.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/ecmascript/function.go
deleted file mode 100644
index 52c93bb9c755ed2f7b80baf9a86c056947fbbfef..0000000000000000000000000000000000000000
Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/ecmascript/function.go and /dev/null differ
diff --git a/spaces/PeepDaSlan9/SDXL-artists-browser/index.html b/spaces/PeepDaSlan9/SDXL-artists-browser/index.html
deleted file mode 100644
index b3fb0c4c79ac210f2867054ab61d000b4b7c30df..0000000000000000000000000000000000000000
--- a/spaces/PeepDaSlan9/SDXL-artists-browser/index.html
+++ /dev/null
@@ -1,212 +0,0 @@
-
-
-
- SDXL Artist Style Explorer by Mattthew
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- documents:
- ⁉️
- 📓
- 📤
-
-
- show me:
- 🎨
- 🧑
- 🏞️
-
-
- sort artists by:
- 🎰
- 🔠
-
-
- sort tags by:
- 📶
- 🔠
-
-
-
- all artists
-
-
-
-
-
-
- permissive
-
-
- use categories
-
-
- hide low-use tags
-
-
- hide deprecated
-
-
- favorited
-
-
-
edit
-
-
-
-
-
- these filters hide every image
- check-mark any tag or ‟permissive”
- 👀
-
-
-
-
-
-
-
-
SDXL Artist Style Explorer, v1.0, by
-
Mattthew
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/saconv.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/saconv.py
deleted file mode 100644
index b4ee3978e097fca422805db4e31ae481006d7971..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/saconv.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from annotator.uniformer.mmcv.cnn import CONV_LAYERS, ConvAWS2d, constant_init
-from annotator.uniformer.mmcv.ops.deform_conv import deform_conv2d
-from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version
-
-
-@CONV_LAYERS.register_module(name='SAC')
-class SAConv2d(ConvAWS2d):
- """SAC (Switchable Atrous Convolution)
-
- This is an implementation of SAC in DetectoRS
- (https://arxiv.org/pdf/2006.02334.pdf).
-
- Args:
- in_channels (int): Number of channels in the input image
- out_channels (int): Number of channels produced by the convolution
- kernel_size (int or tuple): Size of the convolving kernel
- stride (int or tuple, optional): Stride of the convolution. Default: 1
- padding (int or tuple, optional): Zero-padding added to both sides of
- the input. Default: 0
- padding_mode (string, optional): ``'zeros'``, ``'reflect'``,
- ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
- dilation (int or tuple, optional): Spacing between kernel elements.
- Default: 1
- groups (int, optional): Number of blocked connections from input
- channels to output channels. Default: 1
- bias (bool, optional): If ``True``, adds a learnable bias to the
- output. Default: ``True``
- use_deform: If ``True``, replace convolution with deformable
- convolution. Default: ``False``.
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- kernel_size,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- bias=True,
- use_deform=False):
- super().__init__(
- in_channels,
- out_channels,
- kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- groups=groups,
- bias=bias)
- self.use_deform = use_deform
- self.switch = nn.Conv2d(
- self.in_channels, 1, kernel_size=1, stride=stride, bias=True)
- self.weight_diff = nn.Parameter(torch.Tensor(self.weight.size()))
- self.pre_context = nn.Conv2d(
- self.in_channels, self.in_channels, kernel_size=1, bias=True)
- self.post_context = nn.Conv2d(
- self.out_channels, self.out_channels, kernel_size=1, bias=True)
- if self.use_deform:
- self.offset_s = nn.Conv2d(
- self.in_channels,
- 18,
- kernel_size=3,
- padding=1,
- stride=stride,
- bias=True)
- self.offset_l = nn.Conv2d(
- self.in_channels,
- 18,
- kernel_size=3,
- padding=1,
- stride=stride,
- bias=True)
- self.init_weights()
-
- def init_weights(self):
- constant_init(self.switch, 0, bias=1)
- self.weight_diff.data.zero_()
- constant_init(self.pre_context, 0)
- constant_init(self.post_context, 0)
- if self.use_deform:
- constant_init(self.offset_s, 0)
- constant_init(self.offset_l, 0)
-
- def forward(self, x):
- # pre-context
- avg_x = F.adaptive_avg_pool2d(x, output_size=1)
- avg_x = self.pre_context(avg_x)
- avg_x = avg_x.expand_as(x)
- x = x + avg_x
- # switch
- avg_x = F.pad(x, pad=(2, 2, 2, 2), mode='reflect')
- avg_x = F.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0)
- switch = self.switch(avg_x)
- # sac
- weight = self._get_weight(self.weight)
- zero_bias = torch.zeros(
- self.out_channels, device=weight.device, dtype=weight.dtype)
-
- if self.use_deform:
- offset = self.offset_s(avg_x)
- out_s = deform_conv2d(x, offset, weight, self.stride, self.padding,
- self.dilation, self.groups, 1)
- else:
- if (TORCH_VERSION == 'parrots'
- or digit_version(TORCH_VERSION) < digit_version('1.5.0')):
- out_s = super().conv2d_forward(x, weight)
- elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'):
- # bias is a required argument of _conv_forward in torch 1.8.0
- out_s = super()._conv_forward(x, weight, zero_bias)
- else:
- out_s = super()._conv_forward(x, weight)
- ori_p = self.padding
- ori_d = self.dilation
- self.padding = tuple(3 * p for p in self.padding)
- self.dilation = tuple(3 * d for d in self.dilation)
- weight = weight + self.weight_diff
- if self.use_deform:
- offset = self.offset_l(avg_x)
- out_l = deform_conv2d(x, offset, weight, self.stride, self.padding,
- self.dilation, self.groups, 1)
- else:
- if (TORCH_VERSION == 'parrots'
- or digit_version(TORCH_VERSION) < digit_version('1.5.0')):
- out_l = super().conv2d_forward(x, weight)
- elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'):
- # bias is a required argument of _conv_forward in torch 1.8.0
- out_l = super()._conv_forward(x, weight, zero_bias)
- else:
- out_l = super()._conv_forward(x, weight)
-
- out = switch * out_s + (1 - switch) * out_l
- self.padding = ori_p
- self.dilation = ori_d
- # post-context
- avg_x = F.adaptive_avg_pool2d(out, output_size=1)
- avg_x = self.post_context(avg_x)
- avg_x = avg_x.expand_as(out)
- out = out + avg_x
- return out
diff --git a/spaces/ProteinDesignLab/protpardelle/ProteinMPNN/examples/submit_example_7.sh b/spaces/ProteinDesignLab/protpardelle/ProteinMPNN/examples/submit_example_7.sh
deleted file mode 100644
index bc4501e54285cf726a55907da472532244223e11..0000000000000000000000000000000000000000
--- a/spaces/ProteinDesignLab/protpardelle/ProteinMPNN/examples/submit_example_7.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-#SBATCH -p gpu
-#SBATCH --mem=32g
-#SBATCH --gres=gpu:rtx2080:1
-#SBATCH -c 2
-#SBATCH --output=example_7.out
-
-source activate mlfold
-
-folder_with_pdbs="../inputs/PDB_monomers/pdbs/"
-
-output_dir="../outputs/example_7_outputs"
-if [ ! -d $output_dir ]
-then
- mkdir -p $output_dir
-fi
-
-path_for_parsed_chains=$output_dir"/parsed_pdbs.jsonl"
-
-python ../helper_scripts/parse_multiple_chains.py --input_path=$folder_with_pdbs --output_path=$path_for_parsed_chains
-
-python ../protein_mpnn_run.py \
- --jsonl_path $path_for_parsed_chains \
- --out_folder $output_dir \
- --num_seq_per_target 1 \
- --sampling_temp "0.1" \
- --unconditional_probs_only 1 \
- --seed 37 \
- --batch_size 1
diff --git a/spaces/PushkarA07/Sanskrit-Text-To-Speech/monotonic_align/core.py b/spaces/PushkarA07/Sanskrit-Text-To-Speech/monotonic_align/core.py
deleted file mode 100644
index dddc688d76172b880054e544b7a217acd013f14f..0000000000000000000000000000000000000000
--- a/spaces/PushkarA07/Sanskrit-Text-To-Speech/monotonic_align/core.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import numba
-
-
-@numba.jit(numba.void(numba.int32[:,:,::1], numba.float32[:,:,::1], numba.int32[::1], numba.int32[::1]), nopython=True, nogil=True)
-def maximum_path_jit(paths, values, t_ys, t_xs):
- b = paths.shape[0]
- max_neg_val=-1e9
- for i in range(int(b)):
- path = paths[i]
- value = values[i]
- t_y = t_ys[i]
- t_x = t_xs[i]
-
- v_prev = v_cur = 0.0
- index = t_x - 1
-
- for y in range(t_y):
- for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
- if x == y:
- v_cur = max_neg_val
- else:
- v_cur = value[y-1, x]
- if x == 0:
- if y == 0:
- v_prev = 0.
- else:
- v_prev = max_neg_val
- else:
- v_prev = value[y-1, x-1]
- value[y, x] += max(v_prev, v_cur)
-
- for y in range(t_y - 1, -1, -1):
- path[y, index] = 1
- if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
- index = index - 1
diff --git a/spaces/Raghavan1988/falcon-lablabai-hackathon-brainstorming-buddy-for-researchers/README.md b/spaces/Raghavan1988/falcon-lablabai-hackathon-brainstorming-buddy-for-researchers/README.md
deleted file mode 100644
index 59f39baf2f6a633c17695698f582035ae296823d..0000000000000000000000000000000000000000
--- a/spaces/Raghavan1988/falcon-lablabai-hackathon-brainstorming-buddy-for-researchers/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Falcon-180B Demo
-emoji: 💬
-colorFrom: indigo
-colorTo: purple
-sdk: gradio
-sdk_version: 3.42.0
-app_file: app.py
-pinned: true
----
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/operations/check.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/operations/check.py
deleted file mode 100644
index fb3ac8b9c9ea57ec1bb667cb8e904a8b5b2f9df2..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/operations/check.py
+++ /dev/null
@@ -1,149 +0,0 @@
-"""Validation of dependencies of packages
-"""
-
-import logging
-from typing import Callable, Dict, List, NamedTuple, Optional, Set, Tuple
-
-from pip._vendor.packaging.requirements import Requirement
-from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
-
-from pip._internal.distributions import make_distribution_for_install_requirement
-from pip._internal.metadata import get_default_environment
-from pip._internal.metadata.base import DistributionVersion
-from pip._internal.req.req_install import InstallRequirement
-
-logger = logging.getLogger(__name__)
-
-
-class PackageDetails(NamedTuple):
- version: DistributionVersion
- dependencies: List[Requirement]
-
-
-# Shorthands
-PackageSet = Dict[NormalizedName, PackageDetails]
-Missing = Tuple[NormalizedName, Requirement]
-Conflicting = Tuple[NormalizedName, DistributionVersion, Requirement]
-
-MissingDict = Dict[NormalizedName, List[Missing]]
-ConflictingDict = Dict[NormalizedName, List[Conflicting]]
-CheckResult = Tuple[MissingDict, ConflictingDict]
-ConflictDetails = Tuple[PackageSet, CheckResult]
-
-
-def create_package_set_from_installed() -> Tuple[PackageSet, bool]:
- """Converts a list of distributions into a PackageSet."""
- package_set = {}
- problems = False
- env = get_default_environment()
- for dist in env.iter_installed_distributions(local_only=False, skip=()):
- name = dist.canonical_name
- try:
- dependencies = list(dist.iter_dependencies())
- package_set[name] = PackageDetails(dist.version, dependencies)
- except (OSError, ValueError) as e:
- # Don't crash on unreadable or broken metadata.
- logger.warning("Error parsing requirements for %s: %s", name, e)
- problems = True
- return package_set, problems
-
-
-def check_package_set(
- package_set: PackageSet, should_ignore: Optional[Callable[[str], bool]] = None
-) -> CheckResult:
- """Check if a package set is consistent
-
- If should_ignore is passed, it should be a callable that takes a
- package name and returns a boolean.
- """
-
- missing = {}
- conflicting = {}
-
- for package_name, package_detail in package_set.items():
- # Info about dependencies of package_name
- missing_deps: Set[Missing] = set()
- conflicting_deps: Set[Conflicting] = set()
-
- if should_ignore and should_ignore(package_name):
- continue
-
- for req in package_detail.dependencies:
- name = canonicalize_name(req.name)
-
- # Check if it's missing
- if name not in package_set:
- missed = True
- if req.marker is not None:
- missed = req.marker.evaluate()
- if missed:
- missing_deps.add((name, req))
- continue
-
- # Check if there's a conflict
- version = package_set[name].version
- if not req.specifier.contains(version, prereleases=True):
- conflicting_deps.add((name, version, req))
-
- if missing_deps:
- missing[package_name] = sorted(missing_deps, key=str)
- if conflicting_deps:
- conflicting[package_name] = sorted(conflicting_deps, key=str)
-
- return missing, conflicting
-
-
-def check_install_conflicts(to_install: List[InstallRequirement]) -> ConflictDetails:
- """For checking if the dependency graph would be consistent after \
- installing given requirements
- """
- # Start from the current state
- package_set, _ = create_package_set_from_installed()
- # Install packages
- would_be_installed = _simulate_installation_of(to_install, package_set)
-
- # Only warn about directly-dependent packages; create a whitelist of them
- whitelist = _create_whitelist(would_be_installed, package_set)
-
- return (
- package_set,
- check_package_set(
- package_set, should_ignore=lambda name: name not in whitelist
- ),
- )
-
-
-def _simulate_installation_of(
- to_install: List[InstallRequirement], package_set: PackageSet
-) -> Set[NormalizedName]:
- """Computes the version of packages after installing to_install."""
- # Keep track of packages that were installed
- installed = set()
-
- # Modify it as installing requirement_set would (assuming no errors)
- for inst_req in to_install:
- abstract_dist = make_distribution_for_install_requirement(inst_req)
- dist = abstract_dist.get_metadata_distribution()
- name = dist.canonical_name
- package_set[name] = PackageDetails(dist.version, list(dist.iter_dependencies()))
-
- installed.add(name)
-
- return installed
-
-
-def _create_whitelist(
- would_be_installed: Set[NormalizedName], package_set: PackageSet
-) -> Set[NormalizedName]:
- packages_affected = set(would_be_installed)
-
- for package_name in package_set:
- if package_name in packages_affected:
- continue
-
- for req in package_set[package_name].dependencies:
- if canonicalize_name(req.name) in packages_affected:
- packages_affected.add(package_name)
- break
-
- return packages_affected
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/extension.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/extension.py
deleted file mode 100644
index 58c023f6b4479c631f382e5062932793d2bee26b..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/extension.py
+++ /dev/null
@@ -1,148 +0,0 @@
-import re
-import functools
-import distutils.core
-import distutils.errors
-import distutils.extension
-
-from .monkey import get_unpatched
-
-
-def _have_cython():
- """
- Return True if Cython can be imported.
- """
- cython_impl = 'Cython.Distutils.build_ext'
- try:
- # from (cython_impl) import build_ext
- __import__(cython_impl, fromlist=['build_ext']).build_ext
- return True
- except Exception:
- pass
- return False
-
-
-# for compatibility
-have_pyrex = _have_cython
-
-_Extension = get_unpatched(distutils.core.Extension)
-
-
-class Extension(_Extension):
- """
- Describes a single extension module.
-
- This means that all source files will be compiled into a single binary file
- ``.`` (with ```` derived from ``name`` and
- ```` defined by one of the values in
- ``importlib.machinery.EXTENSION_SUFFIXES``).
-
- In the case ``.pyx`` files are passed as ``sources and`` ``Cython`` is **not**
- installed in the build environment, ``setuptools`` may also try to look for the
- equivalent ``.cpp`` or ``.c`` files.
-
- :arg str name:
- the full name of the extension, including any packages -- ie.
- *not* a filename or pathname, but Python dotted name
-
- :arg list[str] sources:
- list of source filenames, relative to the distribution root
- (where the setup script lives), in Unix form (slash-separated)
- for portability. Source files may be C, C++, SWIG (.i),
- platform-specific resource files, or whatever else is recognized
- by the "build_ext" command as source for a Python extension.
-
- :keyword list[str] include_dirs:
- list of directories to search for C/C++ header files (in Unix
- form for portability)
-
- :keyword list[tuple[str, str|None]] define_macros:
- list of macros to define; each macro is defined using a 2-tuple:
- the first item corresponding to the name of the macro and the second
- item either a string with its value or None to
- define it without a particular value (equivalent of "#define
- FOO" in source or -DFOO on Unix C compiler command line)
-
- :keyword list[str] undef_macros:
- list of macros to undefine explicitly
-
- :keyword list[str] library_dirs:
- list of directories to search for C/C++ libraries at link time
-
- :keyword list[str] libraries:
- list of library names (not filenames or paths) to link against
-
- :keyword list[str] runtime_library_dirs:
- list of directories to search for C/C++ libraries at run time
- (for shared extensions, this is when the extension is loaded).
- Setting this will cause an exception during build on Windows
- platforms.
-
- :keyword list[str] extra_objects:
- list of extra files to link with (eg. object files not implied
- by 'sources', static library that must be explicitly specified,
- binary resource files, etc.)
-
- :keyword list[str] extra_compile_args:
- any extra platform- and compiler-specific information to use
- when compiling the source files in 'sources'. For platforms and
- compilers where "command line" makes sense, this is typically a
- list of command-line arguments, but for other platforms it could
- be anything.
-
- :keyword list[str] extra_link_args:
- any extra platform- and compiler-specific information to use
- when linking object files together to create the extension (or
- to create a new static Python interpreter). Similar
- interpretation as for 'extra_compile_args'.
-
- :keyword list[str] export_symbols:
- list of symbols to be exported from a shared extension. Not
- used on all platforms, and not generally necessary for Python
- extensions, which typically export exactly one symbol: "init" +
- extension_name.
-
- :keyword list[str] swig_opts:
- any extra options to pass to SWIG if a source file has the .i
- extension.
-
- :keyword list[str] depends:
- list of files that the extension depends on
-
- :keyword str language:
- extension language (i.e. "c", "c++", "objc"). Will be detected
- from the source extensions if not provided.
-
- :keyword bool optional:
- specifies that a build failure in the extension should not abort the
- build process, but simply not install the failing extension.
-
- :keyword bool py_limited_api:
- opt-in flag for the usage of :doc:`Python's limited API `.
-
- :raises setuptools.errors.PlatformError: if 'runtime_library_dirs' is
- specified on Windows. (since v63)
- """
-
- def __init__(self, name, sources, *args, **kw):
- # The *args is needed for compatibility as calls may use positional
- # arguments. py_limited_api may be set only via keyword.
- self.py_limited_api = kw.pop("py_limited_api", False)
- super().__init__(name, sources, *args, **kw)
-
- def _convert_pyx_sources_to_lang(self):
- """
- Replace sources with .pyx extensions to sources with the target
- language extension. This mechanism allows language authors to supply
- pre-converted sources but to prefer the .pyx sources.
- """
- if _have_cython():
- # the build has Cython, so allow it to compile the .pyx files
- return
- lang = self.language or ''
- target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
- sub = functools.partial(re.sub, '.pyx$', target_ext)
- self.sources = list(map(sub, self.sources))
-
-
-class Library(Extension):
- """Just like a regular Extension, but built as a library instead"""
diff --git a/spaces/Rbrq/DeticChatGPT/README.md b/spaces/Rbrq/DeticChatGPT/README.md
deleted file mode 100644
index 15e4d59222dd185085c1399189f6e5d6f32d579f..0000000000000000000000000000000000000000
--- a/spaces/Rbrq/DeticChatGPT/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
----
-title: Detic+ChatGPT
-emoji: 👀
-colorFrom: blue
-colorTo: red
-sdk: gradio
-app_file: app.py
-pinned: false
-duplicated_from: taesiri/DeticChatGPT
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio`, `streamlit`, or `static`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/Realcat/image-matching-webui/third_party/r2d2/datasets/dataset.py b/spaces/Realcat/image-matching-webui/third_party/r2d2/datasets/dataset.py
deleted file mode 100644
index 5f4474e7dc8b81f091cac1e13f431c5c9f1840f3..0000000000000000000000000000000000000000
--- a/spaces/Realcat/image-matching-webui/third_party/r2d2/datasets/dataset.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2019-present NAVER Corp.
-# CC BY-NC-SA 3.0
-# Available only for non-commercial use
-
-import os
-import json
-import pdb
-import numpy as np
-
-
-class Dataset(object):
- """Base class for a dataset. To be overloaded."""
-
- root = ""
- img_dir = ""
- nimg = 0
-
- def __len__(self):
- return self.nimg
-
- def get_key(self, img_idx):
- raise NotImplementedError()
-
- def get_filename(self, img_idx, root=None):
- return os.path.join(root or self.root, self.img_dir, self.get_key(img_idx))
-
- def get_image(self, img_idx):
- from PIL import Image
-
- fname = self.get_filename(img_idx)
- try:
- return Image.open(fname).convert("RGB")
- except Exception as e:
- raise IOError("Could not load image %s (reason: %s)" % (fname, str(e)))
-
- def __repr__(self):
- res = "Dataset: %s\n" % self.__class__.__name__
- res += " %d images" % self.nimg
- res += "\n root: %s...\n" % self.root
- return res
-
-
-class CatDataset(Dataset):
- """Concatenation of several datasets."""
-
- def __init__(self, *datasets):
- assert len(datasets) >= 1
- self.datasets = datasets
- offsets = [0]
- for db in datasets:
- offsets.append(db.nimg)
- self.offsets = np.cumsum(offsets)
- self.nimg = self.offsets[-1]
- self.root = None
-
- def which(self, i):
- pos = np.searchsorted(self.offsets, i, side="right") - 1
- assert pos < self.nimg, "Bad image index %d >= %d" % (i, self.nimg)
- return pos, i - self.offsets[pos]
-
- def get_key(self, i):
- b, i = self.which(i)
- return self.datasets[b].get_key(i)
-
- def get_filename(self, i):
- b, i = self.which(i)
- return self.datasets[b].get_filename(i)
-
- def __repr__(self):
- fmt_str = "CatDataset("
- for db in self.datasets:
- fmt_str += str(db).replace("\n", " ") + ", "
- return fmt_str[:-2] + ")"
diff --git a/spaces/Rezuwan/parrot_classifier/app.py b/spaces/Rezuwan/parrot_classifier/app.py
deleted file mode 100644
index efbb9811ee721a70c314d5b30d38b780a58fb6a7..0000000000000000000000000000000000000000
--- a/spaces/Rezuwan/parrot_classifier/app.py
+++ /dev/null
@@ -1,122 +0,0 @@
-
-from fastai.vision.all import *
-from fastai.vision.all import load_learner
-import fastai
-import os
-import gradio as gr
-import pathlib
-
-
-# from google.colab import drive
-# drive.mount('/content/drive/')
-
-temp = pathlib.WindowsPath
-pathlib.WindowsPath = pathlib.PosixPath
-
-model_dir = "models/parrot-recognizer-v10.pkl"
-
-model = load_learner(model_dir)
-
-parrot_species = ['african grey parrot',
- 'australian king parrot',
- 'australian night parrot',
- 'bare eyed cockatoo',
- 'blue and yellow macaw',
- 'blue headed parrot',
- 'blue lorikeet',
- 'brown hooded parrot',
- 'budgerigar',
- 'burrowing parrot',
- 'caique parrot',
- 'catalina macaw',
- 'chestnut-fronted macaw',
- 'citron cockatoo',
- 'cockatiels',
- 'crimson rosella',
- 'cuban amazon',
- 'eclectus parrot',
- 'galah cockatoo',
- 'gang gang cockatoo',
- 'golden parakeet',
- 'great green macaw',
- 'great hanging parrot',
- 'greater vasa parrot',
- 'hahn_s macaws',
- 'hooded parrot',
- 'hyacinth macaw',
- 'kea',
- 'kākāpō',
- 'lovebirds',
- 'major mitchell_s cockatoo',
- 'monk parakeet',
- 'musk lorikeet',
- 'palm cockatoo',
- 'parrotlet',
- 'plum headed parakeet',
- 'puerto rican amazon',
- 'rainbow lorikeet',
- 'red breasted parakeet',
- 'red crowned amazon',
- 'red crowned parakeet',
- 'red fan parrot',
- 'red lory',
- 'red rumped parrot',
- 'red shouldered macaw',
- 'red tailed black cockatoos',
- 'rose ringed parakeet',
- 'saint vincent amazon',
- 'salmon crested cockatoo',
- 'scarlet macaw',
- 'senegal parrot',
- 'spixs macaw',
- 'sulpher crested cockatoo',
- 'sun conure',
- 'thick billed parrot',
- 'turquoise fronted amazon',
- 'umbrella cockatoo',
- 'vernal hanging parrot',
- 'yellow collared macaws',
- 'yellow headed amazon']
-
-def recognize_image(image):
- pred, idx, probs = model.predict(image)
- return dict(zip(parrot_species, map(float, probs)))
-
-
-# im = "/content/drive/MyDrive/Learnings/fai/test_images/unknown_12.jpg"
-# img = PILImage.create(im)
-# img.thumbnail((192,192))
-# img
-
-# recognize_image(img)
-
-
-
-image = gr.inputs.Image(shape=(192,192))
-label = gr.outputs.Label(num_top_classes=5)
-
-examples = [
- "test_images/unknown_00.jpg",
- "test_images/unknown_01.jpg",
- "test_images/unknown_02.jpg",
- "test_images/unknown_03.jpg",
- "test_images/unknown_04.jpg",
- "test_images/unknown_05.jpg",
- "test_images/unknown_06.jpg",
- "test_images/unknown_07.jpg",
- "test_images/unknown_08.jpg",
- "test_images/unknown_09.jpg",
- "test_images/unknown_10.jpg",
- "test_images/unknown_11.jpg",
- "test_images/unknown_12.jpg",
- "test_images/unknown_13.jpg",
- "test_images/unknown_14.jpg",
- "test_images/unknown_15.jpg",
- "test_images/unknown_16.jpg",
- "test_images/unknown_17.jpg",
- "test_images/unknown_18.jpg",
- "test_images/unknown_19.jpg",
- ]
-
-iface = gr.Interface(fn=recognize_image, inputs=image, outputs=label, examples=examples)
-iface.launch(inline=False)
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/evaluation/bbox_overlaps.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/evaluation/bbox_overlaps.py
deleted file mode 100644
index 93559ea0f25369d552a5365312fa32b9ffec9226..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/core/evaluation/bbox_overlaps.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import numpy as np
-
-
-def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6):
- """Calculate the ious between each bbox of bboxes1 and bboxes2.
-
- Args:
- bboxes1(ndarray): shape (n, 4)
- bboxes2(ndarray): shape (k, 4)
- mode(str): iou (intersection over union) or iof (intersection
- over foreground)
-
- Returns:
- ious(ndarray): shape (n, k)
- """
-
- assert mode in ['iou', 'iof']
-
- bboxes1 = bboxes1.astype(np.float32)
- bboxes2 = bboxes2.astype(np.float32)
- rows = bboxes1.shape[0]
- cols = bboxes2.shape[0]
- ious = np.zeros((rows, cols), dtype=np.float32)
- if rows * cols == 0:
- return ious
- exchange = False
- if bboxes1.shape[0] > bboxes2.shape[0]:
- bboxes1, bboxes2 = bboxes2, bboxes1
- ious = np.zeros((cols, rows), dtype=np.float32)
- exchange = True
- area1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1])
- area2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1])
- for i in range(bboxes1.shape[0]):
- x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
- y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
- x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
- y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
- overlap = np.maximum(x_end - x_start, 0) * np.maximum(
- y_end - y_start, 0)
- if mode == 'iou':
- union = area1[i] + area2 - overlap
- else:
- union = area1[i] if not exchange else area2
- union = np.maximum(union, eps)
- ious[i, :] = overlap / union
- if exchange:
- ious = ious.T
- return ious
diff --git a/spaces/SUPERpuper/Text-to-image-AI-3/app.py b/spaces/SUPERpuper/Text-to-image-AI-3/app.py
deleted file mode 100644
index 6f9a0f0853d6e44b1882522ac59c3a1031b82744..0000000000000000000000000000000000000000
--- a/spaces/SUPERpuper/Text-to-image-AI-3/app.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import numpy as np
-import tensorflow as tf
-from tensorflow.keras.preprocessing import image as kp_image
-from tensorflow.keras.applications import vgg19
-import matplotlib.pyplot as plt
-
-# Load the content and style images
-content_path = 'content_image.jpg'
-style_path = 'futuristic_style_image.jpg'
-content_img = kp_image.load_img(content_path)
-style_img = kp_image.load_img(style_path)
-
-# Convert the images to arrays and preprocess them for use with VGG19
-content_array = kp_image.img_to_array(content_img)
-style_array = kp_image.img_to_array(style_img)
-content_array = np.expand_dims(content_array, axis=0)
-style_array = np.expand_dims(style_array, axis=0)
-content_array = vgg19.preprocess_input(content_array)
-style_array = vgg19.preprocess_input(style_array)
-
-# Load the VGG19 model and extract the content and style features
-vgg_model = vgg19.VGG19(include_top=False, weights='imagenet')
-content_features = vgg_model.predict(content_array)['block5_conv2']
-style_features = vgg_model.predict(style_array)['block5_conv2']
-
-# Define a function to compute the style transfer loss
-def style_transfer_loss(style_features, generated_features):
- style_loss = tf.reduce_mean(tf.square(style_features - generated_features))
- return style_loss
-
-# Define a function to generate the output image using neural style transfer
-def generate_output_image(content_array, style_features, num_iterations=100):
- # Initialize the output image with the content image
- output_array = content_array.copy()
-
- # Convert the output array to a tensor and create a variable from it
- output_tensor = tf.Variable(output_array)
-
- # Define a function to compute the content loss
- def content_loss(content_features, generated_features):
- content_loss = tf.reduce_mean(tf.square(content_features - generated_features))
- return content_loss
-
- # Define the optimizer and learning rate
- optimizer = tf.optimizers.Adam(learning_rate=0.01)
-
- # Generate the output image using neural style transfer
- for i in range(num_iterations):
- with tf.GradientTape() as tape:
- # Compute the content and style losses
- generated_features = vgg_model(output_tensor)['block5_conv2']
- content_loss_value = content_loss(content_features, generated_features)
- style_loss_value = style_transfer_loss(style_features, generated_features)
-
- # Compute the total loss
- total_loss = content_loss_value + style_loss_value
-
- # Compute the gradients of the total loss with respect to the output tensor
- gradients = tape.gradient(total_loss, output_tensor)
-
- # Apply the gradients to the output tensor
- optimizer.apply_gradients([(gradients, output_tensor)])
-
- # Clip the output tensor to the range [0, 255]
- output_tensor.assign(tf.clip_by_value(output_tensor, 0.0, 255.0))
-
- # Convert the output tensor back to an array
- output_array = output_tensor.numpy()[0]
- return output_array
-
-# Generate the output image
-output_array = generate_output_image(content_array, style_features)
-
-# Convert the output array to an image and display it
-output_img = kp_image.array_to_img(output_array)
-plt.imshow(output_img)
-plt.show()
\ No newline at end of file
diff --git a/spaces/Sadashiv/BERT-NER/utils.py b/spaces/Sadashiv/BERT-NER/utils.py
deleted file mode 100644
index 92532d123dd92ea34549165aad3e89667af6d7ee..0000000000000000000000000000000000000000
--- a/spaces/Sadashiv/BERT-NER/utils.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import requests
-from dotenv import load_dotenv
-import os
-
-# load the .env file
-load_dotenv()
-
-API_KEY = os.getenv("API")
-
-API_URL = "https://api-inference.huggingface.co/models/Sadashiv/BERT-ner"
-headers = {"Authorization": f"Bearer {API_KEY}"}
-
-tag_color_combination = {'O': '#FF5733',
- 'PER': '#35B7FF',
- 'ORG': '#00FF00',
- 'LOC': '#FFA500',
- 'MISC': '#BA55D3'}
-
-
-class ner_extraction:
- def __init__(self, input_text):
- self.input_text = input_text
-
- def query(self):
- response = requests.post(API_URL, headers=headers, json=self.input_text)
- return response.json()
-
- def entity_position_locator(self):
- output = self.query()
- entity_position = {}
-
- for i in range(len(output)):
- entity_position[i]={}
- entity_position[i]["start"]=output[i]['start']
- entity_position[i]["end"]=output[i]['end']
-
- return entity_position
-
- def entity_update(self):
- entity_list = []
- output = self.query()
-
- for i in range(len(output)):
- entity_list.append(
- (
- output[i]['word'],
- output[i]['entity_group'],
- tag_color_combination.get(output[i]['entity_group'])
- )
- )
-
- return entity_list
-
- def text_list(self):
-
- input_text = self.input_text
- entity_position = self.entity_position_locator()
-
- split_text = []
-
- for i in entity_position:
- split_text.append(input_text[entity_position[i]['start']:entity_position[i]['end']])
-
- if entity_position[i]['end']!=len(input_text):
-
- if i+1= float(span[0]) and t<= float(span[1]):
- answers.append('yes')
- flag = True
- break
- if not flag:
- answers.append('no')
- else:
- for t in time_stamp:
- answers.append('no') # for test
-
- answers = '_'.join(answers)
-
- result = True
- except Exception as e:
-
- print(f"Error while read file idx")
- print("video is: {}".format(ann['video']))
- index = random.randint(0, len(self.annotation) - 1)
-
- return {
- "video": frms,
- "qa_input": qa_prompt,
- "loc_input": loc_prompt,
- "qa_output": answers,
- "question_id": qid,
- 'duration': duration
- }
diff --git a/spaces/ShaunWithGPT/ChuanhuChatGPT/llama_func.py b/spaces/ShaunWithGPT/ChuanhuChatGPT/llama_func.py
deleted file mode 100644
index c71027dd4e6f99c0c12626cbbf276f407877be04..0000000000000000000000000000000000000000
--- a/spaces/ShaunWithGPT/ChuanhuChatGPT/llama_func.py
+++ /dev/null
@@ -1,192 +0,0 @@
-import os
-import logging
-
-from llama_index import GPTSimpleVectorIndex
-from llama_index import download_loader
-from llama_index import (
- Document,
- LLMPredictor,
- PromptHelper,
- QuestionAnswerPrompt,
- RefinePrompt,
-)
-from langchain.llms import OpenAI
-import colorama
-
-
-from presets import *
-from utils import *
-
-
-def get_documents(file_src):
- documents = []
- index_name = ""
- logging.debug("Loading documents...")
- logging.debug(f"file_src: {file_src}")
- for file in file_src:
- logging.debug(f"file: {file.name}")
- index_name += file.name
- if os.path.splitext(file.name)[1] == ".pdf":
- logging.debug("Loading PDF...")
- CJKPDFReader = download_loader("CJKPDFReader")
- loader = CJKPDFReader()
- documents += loader.load_data(file=file.name)
- elif os.path.splitext(file.name)[1] == ".docx":
- logging.debug("Loading DOCX...")
- DocxReader = download_loader("DocxReader")
- loader = DocxReader()
- documents += loader.load_data(file=file.name)
- elif os.path.splitext(file.name)[1] == ".epub":
- logging.debug("Loading EPUB...")
- EpubReader = download_loader("EpubReader")
- loader = EpubReader()
- documents += loader.load_data(file=file.name)
- else:
- logging.debug("Loading text file...")
- with open(file.name, "r", encoding="utf-8") as f:
- text = add_space(f.read())
- documents += [Document(text)]
- index_name = sha1sum(index_name)
- return documents, index_name
-
-
-def construct_index(
- api_key,
- file_src,
- max_input_size=4096,
- num_outputs=1,
- max_chunk_overlap=20,
- chunk_size_limit=600,
- embedding_limit=None,
- separator=" ",
- num_children=10,
- max_keywords_per_chunk=10,
-):
- os.environ["OPENAI_API_KEY"] = api_key
- chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit
- embedding_limit = None if embedding_limit == 0 else embedding_limit
- separator = " " if separator == "" else separator
-
- llm_predictor = LLMPredictor(
- llm=OpenAI(model_name="gpt-3.5-turbo-0301", openai_api_key=api_key)
- )
- prompt_helper = PromptHelper(
- max_input_size,
- num_outputs,
- max_chunk_overlap,
- embedding_limit,
- chunk_size_limit,
- separator=separator,
- )
- documents, index_name = get_documents(file_src)
- if os.path.exists(f"./index/{index_name}.json"):
- logging.info("找到了缓存的索引文件,加载中……")
- return GPTSimpleVectorIndex.load_from_disk(f"./index/{index_name}.json")
- else:
- try:
- logging.debug("构建索引中……")
- index = GPTSimpleVectorIndex(
- documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper
- )
- os.makedirs("./index", exist_ok=True)
- index.save_to_disk(f"./index/{index_name}.json")
- return index
- except Exception as e:
- print(e)
- return None
-
-
-def chat_ai(
- api_key,
- index,
- question,
- context,
- chatbot,
-):
- os.environ["OPENAI_API_KEY"] = api_key
-
- logging.info(f"Question: {question}")
-
- response, chatbot_display, status_text = ask_ai(
- api_key,
- index,
- question,
- replace_today(PROMPT_TEMPLATE),
- REFINE_TEMPLATE,
- SIM_K,
- INDEX_QUERY_TEMPRATURE,
- context,
- )
- if response is None:
- status_text = "查询失败,请换个问法试试"
- return context, chatbot
- response = response
-
- context.append({"role": "user", "content": question})
- context.append({"role": "assistant", "content": response})
- chatbot.append((question, chatbot_display))
-
- os.environ["OPENAI_API_KEY"] = ""
- return context, chatbot, status_text
-
-
-def ask_ai(
- api_key,
- index,
- question,
- prompt_tmpl,
- refine_tmpl,
- sim_k=1,
- temprature=0,
- prefix_messages=[],
-):
- os.environ["OPENAI_API_KEY"] = api_key
-
- logging.debug("Index file found")
- logging.debug("Querying index...")
- llm_predictor = LLMPredictor(
- llm=OpenAI(
- temperature=temprature,
- model_name="gpt-3.5-turbo-0301",
- prefix_messages=prefix_messages,
- )
- )
-
- response = None # Initialize response variable to avoid UnboundLocalError
- qa_prompt = QuestionAnswerPrompt(prompt_tmpl)
- rf_prompt = RefinePrompt(refine_tmpl)
- response = index.query(
- question,
- llm_predictor=llm_predictor,
- similarity_top_k=sim_k,
- text_qa_template=qa_prompt,
- refine_template=rf_prompt,
- response_mode="compact",
- )
-
- if response is not None:
- logging.info(f"Response: {response}")
- ret_text = response.response
- nodes = []
- for index, node in enumerate(response.source_nodes):
- brief = node.source_text[:25].replace("\n", "")
- nodes.append(
- f"[{index+1}]\t{brief}... {node.source_text}
"
- )
- new_response = ret_text + "\n----------\n" + "\n\n".join(nodes)
- logging.info(
- f"Response: {colorama.Fore.BLUE}{ret_text}{colorama.Style.RESET_ALL}"
- )
- os.environ["OPENAI_API_KEY"] = ""
- return ret_text, new_response, f"查询消耗了{llm_predictor.last_token_usage} tokens"
- else:
- logging.warning("No response found, returning None")
- os.environ["OPENAI_API_KEY"] = ""
- return None
-
-
-def add_space(text):
- punctuations = {",": ", ", "。": "。 ", "?": "? ", "!": "! ", ":": ": ", ";": "; "}
- for cn_punc, en_punc in punctuations.items():
- text = text.replace(cn_punc, en_punc)
- return text
diff --git a/spaces/Slava917/pronunciation-trainer/predict.py b/spaces/Slava917/pronunciation-trainer/predict.py
deleted file mode 100644
index cfeba17de83e7c4ea0fd638a619e530d8cdb3283..0000000000000000000000000000000000000000
--- a/spaces/Slava917/pronunciation-trainer/predict.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import torch
-import torchaudio
-
-#fixes second prediction bug
-torch._C._jit_override_can_fuse_on_cpu(False)
-torch._C._jit_override_can_fuse_on_gpu(False)
-torch._C._jit_set_texpr_fuser_enabled(False)
-torch._C._jit_set_nvfuser_enabled(False)
-
-loader = torch.jit.load("audio_loader.pt")
-model = torch.jit.load('QuartzNet_thunderspeech_3.pt')
-
-vocab = model.text_transform.vocab.itos
-vocab[-1] = ''
-
-def convert_probs(probs):
- ids = probs.argmax(1)[0]
- s = []
- if vocab[ids[0]]: s.append(vocab[ids[0]])
- for i in range(1,len(ids)):
- if ids[i-1] != ids[i]:
- new = vocab[ids[i]]
- if new: s.append(new)
- #return '.'.join(s)
- return s
-
-def predict(path):
- audio = loader(path)
- probs = model(audio, torch.tensor(audio.shape[0] * [audio.shape[-1]], device=audio.device))[0]
- return convert_probs(probs)
\ No newline at end of file
diff --git a/spaces/SuYuanS/AudioCraft_Plus/tests/adversarial/__init__.py b/spaces/SuYuanS/AudioCraft_Plus/tests/adversarial/__init__.py
deleted file mode 100644
index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000
--- a/spaces/SuYuanS/AudioCraft_Plus/tests/adversarial/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/spaces/SuYuanS/AudioCraft_Plus/tests/modules/test_transformer.py b/spaces/SuYuanS/AudioCraft_Plus/tests/modules/test_transformer.py
deleted file mode 100644
index 2bb79bfd58d535469f9b3c56b8a5fe254db5d8ba..0000000000000000000000000000000000000000
--- a/spaces/SuYuanS/AudioCraft_Plus/tests/modules/test_transformer.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from itertools import product
-
-import pytest
-import torch
-
-from audiocraft.modules.transformer import (
- StreamingMultiheadAttention, StreamingTransformer, set_efficient_attention_backend)
-
-
-def test_transformer_causal_streaming():
- torch.manual_seed(1234)
-
- for context, custom in product([None, 10], [False, True]):
- # Test that causality and receptive fields are properly handled.
- # looking at the gradients
- tr = StreamingTransformer(
- 16, 4, 1 if context else 2,
- causal=True, past_context=context, custom=custom,
- dropout=0.)
- steps = 20
- for k in [0, 10, 15, 19]:
- x = torch.randn(4, steps, 16, requires_grad=True)
- y = tr(x)
- y[:, k].abs().sum().backward()
- if k + 1 < steps:
- assert torch.allclose(x.grad[:, k + 1:], torch.tensor(0.)), x.grad[:, k + 1:].norm()
- assert not torch.allclose(x.grad[:, :k + 1], torch.tensor(0.)), x.grad[:, :k + 1].norm()
- if context is not None and k > context:
- limit = k - context - 1
- assert torch.allclose(x.grad[:, :limit],
- torch.tensor(0.)), x.grad[:, :limit].norm()
-
- # Now check that streaming gives the same result at batch eval.
- x = torch.randn(4, steps, 16)
- y = tr(x)
- ys = []
- with tr.streaming():
- for k in range(steps):
- chunk = x[:, k:k + 1, :]
- ys.append(tr(chunk))
- y_stream = torch.cat(ys, dim=1)
- delta = torch.norm(y_stream - y) / torch.norm(y)
- assert delta < 1e-6, delta
-
-
-def test_transformer_vs_pytorch():
- torch.manual_seed(1234)
- # Check that in the non causal setting, we get the same result as
- # PyTorch Transformer encoder.
- for custom in [False, True]:
- tr = StreamingTransformer(
- 16, 4, 2,
- causal=False, custom=custom, dropout=0., positional_scale=0.)
- layer = torch.nn.TransformerEncoderLayer(16, 4, dropout=0., batch_first=True)
- tr_ref = torch.nn.TransformerEncoder(layer, 2)
- tr.load_state_dict(tr_ref.state_dict())
-
- x = torch.randn(4, 20, 16)
- y = tr(x)
- y2 = tr_ref(x)
- delta = torch.norm(y2 - y) / torch.norm(y)
- assert delta < 1e-6, delta
-
-
-def test_streaming_api():
- tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0.)
- tr.eval()
- steps = 12
- x = torch.randn(1, steps, 16)
-
- with torch.no_grad():
- with tr.streaming():
- _ = tr(x[:, :1])
- state = {k: v.clone() for k, v in tr.get_streaming_state().items()}
- y = tr(x[:, 1:2])
- tr.set_streaming_state(state)
- y2 = tr(x[:, 1:2])
- assert torch.allclose(y, y2), (y - y2).norm()
- assert tr.flush() is None
-
-
-def test_memory_efficient():
- for backend in ['torch', 'xformers']:
- torch.manual_seed(1234)
- set_efficient_attention_backend(backend)
-
- tr = StreamingTransformer(
- 16, 4, 2, custom=True, dropout=0., layer_scale=0.1)
- tr_mem_efficient = StreamingTransformer(
- 16, 4, 2, dropout=0., memory_efficient=True, layer_scale=0.1)
- tr_mem_efficient.load_state_dict(tr.state_dict())
- tr.eval()
- steps = 12
- x = torch.randn(3, steps, 16)
-
- with torch.no_grad():
- y = tr(x)
- y2 = tr_mem_efficient(x)
- assert torch.allclose(y, y2), ((y - y2).norm(), backend)
-
-
-def test_attention_as_float32():
- torch.manual_seed(1234)
- cases = [
- {'custom': True},
- {'custom': False},
- ]
- for case in cases:
- tr = StreamingTransformer(16, 4, 2, dropout=0., dtype=torch.bfloat16, **case)
- tr_float32 = StreamingTransformer(
- 16, 4, 2, dropout=0., attention_as_float32=True, dtype=torch.bfloat16, **case)
- if not case['custom']:
- # we are not using autocast here because it doesn't really
- # work as expected on CPU, so we have to manually cast the weights of the MHA.
- for layer in tr_float32.layers:
- layer.self_attn.mha.to(torch.float32)
- tr_float32.load_state_dict(tr.state_dict())
- steps = 12
- x = torch.randn(3, steps, 16, dtype=torch.bfloat16)
-
- with torch.no_grad():
- y = tr(x)
- y2 = tr_float32(x)
- assert not torch.allclose(y, y2), (y - y2).norm()
-
-
-@torch.no_grad()
-def test_streaming_memory_efficient():
- for backend in ['torch', 'xformers']:
- torch.manual_seed(1234)
- set_efficient_attention_backend(backend)
- tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0., custom=True)
- tr_mem_efficient = StreamingTransformer(
- 16, 4, 2, dropout=0., memory_efficient=True, causal=True)
- tr.load_state_dict(tr_mem_efficient.state_dict())
- tr.eval()
- tr_mem_efficient.eval()
- steps = 12
- x = torch.randn(3, steps, 16)
-
- ref = tr(x)
-
- with tr_mem_efficient.streaming():
- outs = []
- # frame_sizes = [2] + [1] * (steps - 2)
- frame_sizes = [1] * steps
-
- for frame_size in frame_sizes:
- frame = x[:, :frame_size]
- x = x[:, frame_size:]
- outs.append(tr_mem_efficient(frame))
-
- out = torch.cat(outs, dim=1)
- delta = torch.norm(out - ref) / torch.norm(out)
- assert delta < 1e-6, delta
-
-
-def test_cross_attention():
- torch.manual_seed(1234)
- for norm_first in [True, False]:
- m = StreamingTransformer(
- 16, 4, 2, cross_attention=False, norm_first=norm_first, dropout=0., custom=True)
- m_cross = StreamingTransformer(
- 16, 4, 2, cross_attention=True, norm_first=norm_first, dropout=0., custom=True)
- m_cross.load_state_dict(m.state_dict(), strict=False)
- x = torch.randn(2, 5, 16)
- cross_x = torch.randn(2, 3, 16)
- y_ref = m(x)
- y_cross_zero = m_cross(x, cross_attention_src=0 * cross_x)
- # With norm_first, the two should be exactly the same,
- # but with norm_first=False, we get 2 normalization in a row
- # and the epsilon value leads to a tiny change.
- atol = 0. if norm_first else 1e-6
- print((y_ref - y_cross_zero).norm() / y_ref.norm())
- assert torch.allclose(y_ref, y_cross_zero, atol=atol)
-
- # We now expect a difference even with a generous atol of 1e-2.
- y_cross = m_cross(x, cross_attention_src=cross_x)
- assert not torch.allclose(y_cross, y_cross_zero, atol=1e-2)
-
- with pytest.raises(AssertionError):
- _ = m_cross(x)
- _ = m(x, cross_attention_src=cross_x)
-
-
-def test_cross_attention_compat():
- torch.manual_seed(1234)
- num_heads = 2
- dim = num_heads * 64
- with pytest.raises(AssertionError):
- StreamingMultiheadAttention(dim, num_heads, causal=True, cross_attention=True)
-
- cross_attn = StreamingMultiheadAttention(
- dim, num_heads, dropout=0, cross_attention=True, custom=True)
- ref_attn = torch.nn.MultiheadAttention(dim, num_heads, dropout=0, batch_first=True)
-
- # We can load the regular attention state dict
- # so we have compat when loading old checkpoints.
- cross_attn.load_state_dict(ref_attn.state_dict())
-
- queries = torch.randn(3, 7, dim)
- keys = torch.randn(3, 9, dim)
- values = torch.randn(3, 9, dim)
-
- y = cross_attn(queries, keys, values)[0]
- y_ref = ref_attn(queries, keys, values)[0]
- assert torch.allclose(y, y_ref, atol=1e-7), (y - y_ref).norm() / y_ref.norm()
-
- # Now let's check that streaming is working properly.
- with cross_attn.streaming():
- ys = []
- for step in range(queries.shape[1]):
- ys.append(cross_attn(queries[:, step: step + 1], keys, values)[0])
- y_streaming = torch.cat(ys, dim=1)
- assert torch.allclose(y_streaming, y, atol=1e-7)
-
-
-def test_repeat_kv():
- torch.manual_seed(1234)
- num_heads = 8
- kv_repeat = 4
- dim = num_heads * 64
- with pytest.raises(AssertionError):
- mha = StreamingMultiheadAttention(
- dim, num_heads, causal=True, kv_repeat=kv_repeat, cross_attention=True)
- mha = StreamingMultiheadAttention(
- dim, num_heads, causal=True, kv_repeat=kv_repeat)
- mha = StreamingMultiheadAttention(
- dim, num_heads, causal=True, kv_repeat=kv_repeat, custom=True)
- x = torch.randn(4, 18, dim)
- y = mha(x, x, x)[0]
- assert x.shape == y.shape
-
-
-def test_qk_layer_norm():
- torch.manual_seed(1234)
- tr = StreamingTransformer(
- 16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, bias_attn=False)
- steps = 12
- x = torch.randn(3, steps, 16)
- y = tr(x)
-
- tr = StreamingTransformer(
- 16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, cross_attention=True)
- z = torch.randn(3, 21, 16)
- y = tr(x, cross_attention_src=z)
- assert y.shape == x.shape
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/tests/test_dir2.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/tests/test_dir2.py
deleted file mode 100644
index bf7f5e57ea2219cb320772be79270feb229553a9..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/utils/tests/test_dir2.py
+++ /dev/null
@@ -1,66 +0,0 @@
-from IPython.utils.dir2 import dir2
-
-import pytest
-
-
-class Base(object):
- x = 1
- z = 23
-
-
-def test_base():
- res = dir2(Base())
- assert "x" in res
- assert "z" in res
- assert "y" not in res
- assert "__class__" in res
- assert res.count("x") == 1
- assert res.count("__class__") == 1
-
-
-def test_SubClass():
- class SubClass(Base):
- y = 2
-
- res = dir2(SubClass())
- assert "y" in res
- assert res.count("y") == 1
- assert res.count("x") == 1
-
-
-def test_SubClass_with_trait_names_attr():
- # usecase: trait_names is used in a class describing psychological classification
-
- class SubClass(Base):
- y = 2
- trait_names = 44
-
- res = dir2(SubClass())
- assert "trait_names" in res
-
-
-def test_misbehaving_object_without_trait_names():
- # dir2 shouldn't raise even when objects are dumb and raise
- # something other than AttribteErrors on bad getattr.
-
- class MisbehavingGetattr:
- def __getattr__(self, attr):
- raise KeyError("I should be caught")
-
- def some_method(self):
- return True
-
- class SillierWithDir(MisbehavingGetattr):
- def __dir__(self):
- return ["some_method"]
-
- for bad_klass in (MisbehavingGetattr, SillierWithDir):
- obj = bad_klass()
-
- assert obj.some_method()
-
- with pytest.raises(KeyError):
- obj.other_method()
-
- res = dir2(obj)
- assert "some_method" in res
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/BmpImagePlugin.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/BmpImagePlugin.py
deleted file mode 100644
index 5bda0a5b05d8b6a6a0ccaa91da3475e34c9b1cf3..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/BmpImagePlugin.py
+++ /dev/null
@@ -1,471 +0,0 @@
-#
-# The Python Imaging Library.
-# $Id$
-#
-# BMP file handler
-#
-# Windows (and OS/2) native bitmap storage format.
-#
-# history:
-# 1995-09-01 fl Created
-# 1996-04-30 fl Added save
-# 1997-08-27 fl Fixed save of 1-bit images
-# 1998-03-06 fl Load P images as L where possible
-# 1998-07-03 fl Load P images as 1 where possible
-# 1998-12-29 fl Handle small palettes
-# 2002-12-30 fl Fixed load of 1-bit palette images
-# 2003-04-21 fl Fixed load of 1-bit monochrome images
-# 2003-04-23 fl Added limited support for BI_BITFIELDS compression
-#
-# Copyright (c) 1997-2003 by Secret Labs AB
-# Copyright (c) 1995-2003 by Fredrik Lundh
-#
-# See the README file for information on usage and redistribution.
-#
-
-
-import os
-
-from . import Image, ImageFile, ImagePalette
-from ._binary import i16le as i16
-from ._binary import i32le as i32
-from ._binary import o8
-from ._binary import o16le as o16
-from ._binary import o32le as o32
-
-#
-# --------------------------------------------------------------------
-# Read BMP file
-
-BIT2MODE = {
- # bits => mode, rawmode
- 1: ("P", "P;1"),
- 4: ("P", "P;4"),
- 8: ("P", "P"),
- 16: ("RGB", "BGR;15"),
- 24: ("RGB", "BGR"),
- 32: ("RGB", "BGRX"),
-}
-
-
-def _accept(prefix):
- return prefix[:2] == b"BM"
-
-
-def _dib_accept(prefix):
- return i32(prefix) in [12, 40, 64, 108, 124]
-
-
-# =============================================================================
-# Image plugin for the Windows BMP format.
-# =============================================================================
-class BmpImageFile(ImageFile.ImageFile):
- """Image plugin for the Windows Bitmap format (BMP)"""
-
- # ------------------------------------------------------------- Description
- format_description = "Windows Bitmap"
- format = "BMP"
-
- # -------------------------------------------------- BMP Compression values
- COMPRESSIONS = {"RAW": 0, "RLE8": 1, "RLE4": 2, "BITFIELDS": 3, "JPEG": 4, "PNG": 5}
- for k, v in COMPRESSIONS.items():
- vars()[k] = v
-
- def _bitmap(self, header=0, offset=0):
- """Read relevant info about the BMP"""
- read, seek = self.fp.read, self.fp.seek
- if header:
- seek(header)
- # read bmp header size @offset 14 (this is part of the header size)
- file_info = {"header_size": i32(read(4)), "direction": -1}
-
- # -------------------- If requested, read header at a specific position
- # read the rest of the bmp header, without its size
- header_data = ImageFile._safe_read(self.fp, file_info["header_size"] - 4)
-
- # -------------------------------------------------- IBM OS/2 Bitmap v1
- # ----- This format has different offsets because of width/height types
- if file_info["header_size"] == 12:
- file_info["width"] = i16(header_data, 0)
- file_info["height"] = i16(header_data, 2)
- file_info["planes"] = i16(header_data, 4)
- file_info["bits"] = i16(header_data, 6)
- file_info["compression"] = self.RAW
- file_info["palette_padding"] = 3
-
- # --------------------------------------------- Windows Bitmap v2 to v5
- # v3, OS/2 v2, v4, v5
- elif file_info["header_size"] in (40, 64, 108, 124):
- file_info["y_flip"] = header_data[7] == 0xFF
- file_info["direction"] = 1 if file_info["y_flip"] else -1
- file_info["width"] = i32(header_data, 0)
- file_info["height"] = (
- i32(header_data, 4)
- if not file_info["y_flip"]
- else 2**32 - i32(header_data, 4)
- )
- file_info["planes"] = i16(header_data, 8)
- file_info["bits"] = i16(header_data, 10)
- file_info["compression"] = i32(header_data, 12)
- # byte size of pixel data
- file_info["data_size"] = i32(header_data, 16)
- file_info["pixels_per_meter"] = (
- i32(header_data, 20),
- i32(header_data, 24),
- )
- file_info["colors"] = i32(header_data, 28)
- file_info["palette_padding"] = 4
- self.info["dpi"] = tuple(x / 39.3701 for x in file_info["pixels_per_meter"])
- if file_info["compression"] == self.BITFIELDS:
- if len(header_data) >= 52:
- for idx, mask in enumerate(
- ["r_mask", "g_mask", "b_mask", "a_mask"]
- ):
- file_info[mask] = i32(header_data, 36 + idx * 4)
- else:
- # 40 byte headers only have the three components in the
- # bitfields masks, ref:
- # https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx
- # See also
- # https://github.com/python-pillow/Pillow/issues/1293
- # There is a 4th component in the RGBQuad, in the alpha
- # location, but it is listed as a reserved component,
- # and it is not generally an alpha channel
- file_info["a_mask"] = 0x0
- for mask in ["r_mask", "g_mask", "b_mask"]:
- file_info[mask] = i32(read(4))
- file_info["rgb_mask"] = (
- file_info["r_mask"],
- file_info["g_mask"],
- file_info["b_mask"],
- )
- file_info["rgba_mask"] = (
- file_info["r_mask"],
- file_info["g_mask"],
- file_info["b_mask"],
- file_info["a_mask"],
- )
- else:
- msg = f"Unsupported BMP header type ({file_info['header_size']})"
- raise OSError(msg)
-
- # ------------------ Special case : header is reported 40, which
- # ---------------------- is shorter than real size for bpp >= 16
- self._size = file_info["width"], file_info["height"]
-
- # ------- If color count was not found in the header, compute from bits
- file_info["colors"] = (
- file_info["colors"]
- if file_info.get("colors", 0)
- else (1 << file_info["bits"])
- )
- if offset == 14 + file_info["header_size"] and file_info["bits"] <= 8:
- offset += 4 * file_info["colors"]
-
- # ---------------------- Check bit depth for unusual unsupported values
- self.mode, raw_mode = BIT2MODE.get(file_info["bits"], (None, None))
- if self.mode is None:
- msg = f"Unsupported BMP pixel depth ({file_info['bits']})"
- raise OSError(msg)
-
- # ---------------- Process BMP with Bitfields compression (not palette)
- decoder_name = "raw"
- if file_info["compression"] == self.BITFIELDS:
- SUPPORTED = {
- 32: [
- (0xFF0000, 0xFF00, 0xFF, 0x0),
- (0xFF000000, 0xFF0000, 0xFF00, 0x0),
- (0xFF000000, 0xFF0000, 0xFF00, 0xFF),
- (0xFF, 0xFF00, 0xFF0000, 0xFF000000),
- (0xFF0000, 0xFF00, 0xFF, 0xFF000000),
- (0x0, 0x0, 0x0, 0x0),
- ],
- 24: [(0xFF0000, 0xFF00, 0xFF)],
- 16: [(0xF800, 0x7E0, 0x1F), (0x7C00, 0x3E0, 0x1F)],
- }
- MASK_MODES = {
- (32, (0xFF0000, 0xFF00, 0xFF, 0x0)): "BGRX",
- (32, (0xFF000000, 0xFF0000, 0xFF00, 0x0)): "XBGR",
- (32, (0xFF000000, 0xFF0000, 0xFF00, 0xFF)): "ABGR",
- (32, (0xFF, 0xFF00, 0xFF0000, 0xFF000000)): "RGBA",
- (32, (0xFF0000, 0xFF00, 0xFF, 0xFF000000)): "BGRA",
- (32, (0x0, 0x0, 0x0, 0x0)): "BGRA",
- (24, (0xFF0000, 0xFF00, 0xFF)): "BGR",
- (16, (0xF800, 0x7E0, 0x1F)): "BGR;16",
- (16, (0x7C00, 0x3E0, 0x1F)): "BGR;15",
- }
- if file_info["bits"] in SUPPORTED:
- if (
- file_info["bits"] == 32
- and file_info["rgba_mask"] in SUPPORTED[file_info["bits"]]
- ):
- raw_mode = MASK_MODES[(file_info["bits"], file_info["rgba_mask"])]
- self.mode = "RGBA" if "A" in raw_mode else self.mode
- elif (
- file_info["bits"] in (24, 16)
- and file_info["rgb_mask"] in SUPPORTED[file_info["bits"]]
- ):
- raw_mode = MASK_MODES[(file_info["bits"], file_info["rgb_mask"])]
- else:
- msg = "Unsupported BMP bitfields layout"
- raise OSError(msg)
- else:
- msg = "Unsupported BMP bitfields layout"
- raise OSError(msg)
- elif file_info["compression"] == self.RAW:
- if file_info["bits"] == 32 and header == 22: # 32-bit .cur offset
- raw_mode, self.mode = "BGRA", "RGBA"
- elif file_info["compression"] in (self.RLE8, self.RLE4):
- decoder_name = "bmp_rle"
- else:
- msg = f"Unsupported BMP compression ({file_info['compression']})"
- raise OSError(msg)
-
- # --------------- Once the header is processed, process the palette/LUT
- if self.mode == "P": # Paletted for 1, 4 and 8 bit images
- # ---------------------------------------------------- 1-bit images
- if not (0 < file_info["colors"] <= 65536):
- msg = f"Unsupported BMP Palette size ({file_info['colors']})"
- raise OSError(msg)
- else:
- padding = file_info["palette_padding"]
- palette = read(padding * file_info["colors"])
- greyscale = True
- indices = (
- (0, 255)
- if file_info["colors"] == 2
- else list(range(file_info["colors"]))
- )
-
- # ----------------- Check if greyscale and ignore palette if so
- for ind, val in enumerate(indices):
- rgb = palette[ind * padding : ind * padding + 3]
- if rgb != o8(val) * 3:
- greyscale = False
-
- # ------- If all colors are grey, white or black, ditch palette
- if greyscale:
- self.mode = "1" if file_info["colors"] == 2 else "L"
- raw_mode = self.mode
- else:
- self.mode = "P"
- self.palette = ImagePalette.raw(
- "BGRX" if padding == 4 else "BGR", palette
- )
-
- # ---------------------------- Finally set the tile data for the plugin
- self.info["compression"] = file_info["compression"]
- args = [raw_mode]
- if decoder_name == "bmp_rle":
- args.append(file_info["compression"] == self.RLE4)
- else:
- args.append(((file_info["width"] * file_info["bits"] + 31) >> 3) & (~3))
- args.append(file_info["direction"])
- self.tile = [
- (
- decoder_name,
- (0, 0, file_info["width"], file_info["height"]),
- offset or self.fp.tell(),
- tuple(args),
- )
- ]
-
- def _open(self):
- """Open file, check magic number and read header"""
- # read 14 bytes: magic number, filesize, reserved, header final offset
- head_data = self.fp.read(14)
- # choke if the file does not have the required magic bytes
- if not _accept(head_data):
- msg = "Not a BMP file"
- raise SyntaxError(msg)
- # read the start position of the BMP image data (u32)
- offset = i32(head_data, 10)
- # load bitmap information (offset=raster info)
- self._bitmap(offset=offset)
-
-
-class BmpRleDecoder(ImageFile.PyDecoder):
- _pulls_fd = True
-
- def decode(self, buffer):
- rle4 = self.args[1]
- data = bytearray()
- x = 0
- while len(data) < self.state.xsize * self.state.ysize:
- pixels = self.fd.read(1)
- byte = self.fd.read(1)
- if not pixels or not byte:
- break
- num_pixels = pixels[0]
- if num_pixels:
- # encoded mode
- if x + num_pixels > self.state.xsize:
- # Too much data for row
- num_pixels = max(0, self.state.xsize - x)
- if rle4:
- first_pixel = o8(byte[0] >> 4)
- second_pixel = o8(byte[0] & 0x0F)
- for index in range(num_pixels):
- if index % 2 == 0:
- data += first_pixel
- else:
- data += second_pixel
- else:
- data += byte * num_pixels
- x += num_pixels
- else:
- if byte[0] == 0:
- # end of line
- while len(data) % self.state.xsize != 0:
- data += b"\x00"
- x = 0
- elif byte[0] == 1:
- # end of bitmap
- break
- elif byte[0] == 2:
- # delta
- bytes_read = self.fd.read(2)
- if len(bytes_read) < 2:
- break
- right, up = self.fd.read(2)
- data += b"\x00" * (right + up * self.state.xsize)
- x = len(data) % self.state.xsize
- else:
- # absolute mode
- if rle4:
- # 2 pixels per byte
- byte_count = byte[0] // 2
- bytes_read = self.fd.read(byte_count)
- for byte_read in bytes_read:
- data += o8(byte_read >> 4)
- data += o8(byte_read & 0x0F)
- else:
- byte_count = byte[0]
- bytes_read = self.fd.read(byte_count)
- data += bytes_read
- if len(bytes_read) < byte_count:
- break
- x += byte[0]
-
- # align to 16-bit word boundary
- if self.fd.tell() % 2 != 0:
- self.fd.seek(1, os.SEEK_CUR)
- rawmode = "L" if self.mode == "L" else "P"
- self.set_as_raw(bytes(data), (rawmode, 0, self.args[-1]))
- return -1, 0
-
-
-# =============================================================================
-# Image plugin for the DIB format (BMP alias)
-# =============================================================================
-class DibImageFile(BmpImageFile):
- format = "DIB"
- format_description = "Windows Bitmap"
-
- def _open(self):
- self._bitmap()
-
-
-#
-# --------------------------------------------------------------------
-# Write BMP file
-
-
-SAVE = {
- "1": ("1", 1, 2),
- "L": ("L", 8, 256),
- "P": ("P", 8, 256),
- "RGB": ("BGR", 24, 0),
- "RGBA": ("BGRA", 32, 0),
-}
-
-
-def _dib_save(im, fp, filename):
- _save(im, fp, filename, False)
-
-
-def _save(im, fp, filename, bitmap_header=True):
- try:
- rawmode, bits, colors = SAVE[im.mode]
- except KeyError as e:
- msg = f"cannot write mode {im.mode} as BMP"
- raise OSError(msg) from e
-
- info = im.encoderinfo
-
- dpi = info.get("dpi", (96, 96))
-
- # 1 meter == 39.3701 inches
- ppm = tuple(map(lambda x: int(x * 39.3701 + 0.5), dpi))
-
- stride = ((im.size[0] * bits + 7) // 8 + 3) & (~3)
- header = 40 # or 64 for OS/2 version 2
- image = stride * im.size[1]
-
- if im.mode == "1":
- palette = b"".join(o8(i) * 4 for i in (0, 255))
- elif im.mode == "L":
- palette = b"".join(o8(i) * 4 for i in range(256))
- elif im.mode == "P":
- palette = im.im.getpalette("RGB", "BGRX")
- colors = len(palette) // 4
- else:
- palette = None
-
- # bitmap header
- if bitmap_header:
- offset = 14 + header + colors * 4
- file_size = offset + image
- if file_size > 2**32 - 1:
- msg = "File size is too large for the BMP format"
- raise ValueError(msg)
- fp.write(
- b"BM" # file type (magic)
- + o32(file_size) # file size
- + o32(0) # reserved
- + o32(offset) # image data offset
- )
-
- # bitmap info header
- fp.write(
- o32(header) # info header size
- + o32(im.size[0]) # width
- + o32(im.size[1]) # height
- + o16(1) # planes
- + o16(bits) # depth
- + o32(0) # compression (0=uncompressed)
- + o32(image) # size of bitmap
- + o32(ppm[0]) # resolution
- + o32(ppm[1]) # resolution
- + o32(colors) # colors used
- + o32(colors) # colors important
- )
-
- fp.write(b"\0" * (header - 40)) # padding (for OS/2 format)
-
- if palette:
- fp.write(palette)
-
- ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, stride, -1))])
-
-
-#
-# --------------------------------------------------------------------
-# Registry
-
-
-Image.register_open(BmpImageFile.format, BmpImageFile, _accept)
-Image.register_save(BmpImageFile.format, _save)
-
-Image.register_extension(BmpImageFile.format, ".bmp")
-
-Image.register_mime(BmpImageFile.format, "image/bmp")
-
-Image.register_decoder("bmp_rle", BmpRleDecoder)
-
-Image.register_open(DibImageFile.format, DibImageFile, _dib_accept)
-Image.register_save(DibImageFile.format, _dib_save)
-
-Image.register_extension(DibImageFile.format, ".dib")
-
-Image.register_mime(DibImageFile.format, "image/bmp")
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/coloredlogs/tests.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/coloredlogs/tests.py
deleted file mode 100644
index 650d3281a8584c1e863347643444f55db463edf9..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/coloredlogs/tests.py
+++ /dev/null
@@ -1,673 +0,0 @@
-# Automated tests for the `coloredlogs' package.
-#
-# Author: Peter Odding
-# Last Change: June 11, 2021
-# URL: https://coloredlogs.readthedocs.io
-
-"""Automated tests for the `coloredlogs` package."""
-
-# Standard library modules.
-import contextlib
-import logging
-import logging.handlers
-import os
-import re
-import subprocess
-import sys
-import tempfile
-
-# External dependencies.
-from humanfriendly.compat import StringIO
-from humanfriendly.terminal import ANSI_COLOR_CODES, ANSI_CSI, ansi_style, ansi_wrap
-from humanfriendly.testing import PatchedAttribute, PatchedItem, TestCase, retry
-from humanfriendly.text import format, random_string
-
-# The module we're testing.
-import coloredlogs
-import coloredlogs.cli
-from coloredlogs import (
- CHROOT_FILES,
- ColoredFormatter,
- NameNormalizer,
- decrease_verbosity,
- find_defined_levels,
- find_handler,
- find_hostname,
- find_program_name,
- find_username,
- get_level,
- increase_verbosity,
- install,
- is_verbose,
- level_to_number,
- match_stream_handler,
- parse_encoded_styles,
- set_level,
- walk_propagation_tree,
-)
-from coloredlogs.demo import demonstrate_colored_logging
-from coloredlogs.syslog import SystemLogging, is_syslog_supported, match_syslog_handler
-from coloredlogs.converter import (
- ColoredCronMailer,
- EIGHT_COLOR_PALETTE,
- capture,
- convert,
-)
-
-# External test dependencies.
-from capturer import CaptureOutput
-from verboselogs import VerboseLogger
-
-# Compiled regular expression that matches a single line of output produced by
-# the default log format (does not include matching of ANSI escape sequences).
-PLAIN_TEXT_PATTERN = re.compile(r'''
- (?P \d{4}-\d{2}-\d{2} )
- \s (?P \d{2}:\d{2}:\d{2} )
- \s (?P \S+ )
- \s (?P \w+ )
- \[ (?P \d+ ) \]
- \s (?P [A-Z]+ )
- \s (?P .* )
-''', re.VERBOSE)
-
-# Compiled regular expression that matches a single line of output produced by
-# the default log format with milliseconds=True.
-PATTERN_INCLUDING_MILLISECONDS = re.compile(r'''
- (?P \d{4}-\d{2}-\d{2} )
- \s (?P \d{2}:\d{2}:\d{2},\d{3} )
- \s (?P \S+ )
- \s (?P \w+ )
- \[ (?P \d+ ) \]
- \s (?P [A-Z]+ )
- \s (?P .* )
-''', re.VERBOSE)
-
-
-def setUpModule():
- """Speed up the tests by disabling the demo's artificial delay."""
- os.environ['COLOREDLOGS_DEMO_DELAY'] = '0'
- coloredlogs.demo.DEMO_DELAY = 0
-
-
-class ColoredLogsTestCase(TestCase):
-
- """Container for the `coloredlogs` tests."""
-
- def find_system_log(self):
- """Find the system log file or skip the current test."""
- filename = ('/var/log/system.log' if sys.platform == 'darwin' else (
- '/var/log/syslog' if 'linux' in sys.platform else None
- ))
- if not filename:
- self.skipTest("Location of system log file unknown!")
- elif not os.path.isfile(filename):
- self.skipTest("System log file not found! (%s)" % filename)
- elif not os.access(filename, os.R_OK):
- self.skipTest("Insufficient permissions to read system log file! (%s)" % filename)
- else:
- return filename
-
- def test_level_to_number(self):
- """Make sure :func:`level_to_number()` works as intended."""
- # Make sure the default levels are translated as expected.
- assert level_to_number('debug') == logging.DEBUG
- assert level_to_number('info') == logging.INFO
- assert level_to_number('warning') == logging.WARNING
- assert level_to_number('error') == logging.ERROR
- assert level_to_number('fatal') == logging.FATAL
- # Make sure bogus level names don't blow up.
- assert level_to_number('bogus-level') == logging.INFO
-
- def test_find_hostname(self):
- """Make sure :func:`~find_hostname()` works correctly."""
- assert find_hostname()
- # Create a temporary file as a placeholder for e.g. /etc/debian_chroot.
- fd, temporary_file = tempfile.mkstemp()
- try:
- with open(temporary_file, 'w') as handle:
- handle.write('first line\n')
- handle.write('second line\n')
- CHROOT_FILES.insert(0, temporary_file)
- # Make sure the chroot file is being read.
- assert find_hostname() == 'first line'
- finally:
- # Clean up.
- CHROOT_FILES.pop(0)
- os.unlink(temporary_file)
- # Test that unreadable chroot files don't break coloredlogs.
- try:
- CHROOT_FILES.insert(0, temporary_file)
- # Make sure that a usable value is still produced.
- assert find_hostname()
- finally:
- # Clean up.
- CHROOT_FILES.pop(0)
-
- def test_host_name_filter(self):
- """Make sure :func:`install()` integrates with :class:`~coloredlogs.HostNameFilter()`."""
- install(fmt='%(hostname)s')
- with CaptureOutput() as capturer:
- logging.info("A truly insignificant message ..")
- output = capturer.get_text()
- assert find_hostname() in output
-
- def test_program_name_filter(self):
- """Make sure :func:`install()` integrates with :class:`~coloredlogs.ProgramNameFilter()`."""
- install(fmt='%(programname)s')
- with CaptureOutput() as capturer:
- logging.info("A truly insignificant message ..")
- output = capturer.get_text()
- assert find_program_name() in output
-
- def test_username_filter(self):
- """Make sure :func:`install()` integrates with :class:`~coloredlogs.UserNameFilter()`."""
- install(fmt='%(username)s')
- with CaptureOutput() as capturer:
- logging.info("A truly insignificant message ..")
- output = capturer.get_text()
- assert find_username() in output
-
- def test_system_logging(self):
- """Make sure the :class:`coloredlogs.syslog.SystemLogging` context manager works."""
- system_log_file = self.find_system_log()
- expected_message = random_string(50)
- with SystemLogging(programname='coloredlogs-test-suite') as syslog:
- if not syslog:
- return self.skipTest("couldn't connect to syslog daemon")
- # When I tried out the system logging support on macOS 10.13.1 on
- # 2018-01-05 I found that while WARNING and ERROR messages show up
- # in the system log DEBUG and INFO messages don't. This explains
- # the importance of the level of the log message below.
- logging.error("%s", expected_message)
- # Retry the following assertion (for up to 60 seconds) to give the
- # logging daemon time to write our log message to disk. This
- # appears to be needed on MacOS workers on Travis CI, see:
- # https://travis-ci.org/xolox/python-coloredlogs/jobs/325245853
- retry(lambda: check_contents(system_log_file, expected_message, True))
-
- def test_system_logging_override(self):
- """Make sure the :class:`coloredlogs.syslog.is_syslog_supported` respects the override."""
- with PatchedItem(os.environ, 'COLOREDLOGS_SYSLOG', 'true'):
- assert is_syslog_supported() is True
- with PatchedItem(os.environ, 'COLOREDLOGS_SYSLOG', 'false'):
- assert is_syslog_supported() is False
-
- def test_syslog_shortcut_simple(self):
- """Make sure that ``coloredlogs.install(syslog=True)`` works."""
- system_log_file = self.find_system_log()
- expected_message = random_string(50)
- with cleanup_handlers():
- # See test_system_logging() for the importance of this log level.
- coloredlogs.install(syslog=True)
- logging.error("%s", expected_message)
- # See the comments in test_system_logging() on why this is retried.
- retry(lambda: check_contents(system_log_file, expected_message, True))
-
- def test_syslog_shortcut_enhanced(self):
- """Make sure that ``coloredlogs.install(syslog='warning')`` works."""
- system_log_file = self.find_system_log()
- the_expected_message = random_string(50)
- not_an_expected_message = random_string(50)
- with cleanup_handlers():
- # See test_system_logging() for the importance of these log levels.
- coloredlogs.install(syslog='error')
- logging.warning("%s", not_an_expected_message)
- logging.error("%s", the_expected_message)
- # See the comments in test_system_logging() on why this is retried.
- retry(lambda: check_contents(system_log_file, the_expected_message, True))
- retry(lambda: check_contents(system_log_file, not_an_expected_message, False))
-
- def test_name_normalization(self):
- """Make sure :class:`~coloredlogs.NameNormalizer` works as intended."""
- nn = NameNormalizer()
- for canonical_name in ['debug', 'info', 'warning', 'error', 'critical']:
- assert nn.normalize_name(canonical_name) == canonical_name
- assert nn.normalize_name(canonical_name.upper()) == canonical_name
- assert nn.normalize_name('warn') == 'warning'
- assert nn.normalize_name('fatal') == 'critical'
-
- def test_style_parsing(self):
- """Make sure :func:`~coloredlogs.parse_encoded_styles()` works as intended."""
- encoded_styles = 'debug=green;warning=yellow;error=red;critical=red,bold'
- decoded_styles = parse_encoded_styles(encoded_styles, normalize_key=lambda k: k.upper())
- assert sorted(decoded_styles.keys()) == sorted(['debug', 'warning', 'error', 'critical'])
- assert decoded_styles['debug']['color'] == 'green'
- assert decoded_styles['warning']['color'] == 'yellow'
- assert decoded_styles['error']['color'] == 'red'
- assert decoded_styles['critical']['color'] == 'red'
- assert decoded_styles['critical']['bold'] is True
-
- def test_is_verbose(self):
- """Make sure is_verbose() does what it should :-)."""
- set_level(logging.INFO)
- assert not is_verbose()
- set_level(logging.DEBUG)
- assert is_verbose()
- set_level(logging.VERBOSE)
- assert is_verbose()
-
- def test_increase_verbosity(self):
- """Make sure increase_verbosity() respects default and custom levels."""
- # Start from a known state.
- set_level(logging.INFO)
- assert get_level() == logging.INFO
- # INFO -> VERBOSE.
- increase_verbosity()
- assert get_level() == logging.VERBOSE
- # VERBOSE -> DEBUG.
- increase_verbosity()
- assert get_level() == logging.DEBUG
- # DEBUG -> SPAM.
- increase_verbosity()
- assert get_level() == logging.SPAM
- # SPAM -> NOTSET.
- increase_verbosity()
- assert get_level() == logging.NOTSET
- # NOTSET -> NOTSET.
- increase_verbosity()
- assert get_level() == logging.NOTSET
-
- def test_decrease_verbosity(self):
- """Make sure decrease_verbosity() respects default and custom levels."""
- # Start from a known state.
- set_level(logging.INFO)
- assert get_level() == logging.INFO
- # INFO -> NOTICE.
- decrease_verbosity()
- assert get_level() == logging.NOTICE
- # NOTICE -> WARNING.
- decrease_verbosity()
- assert get_level() == logging.WARNING
- # WARNING -> SUCCESS.
- decrease_verbosity()
- assert get_level() == logging.SUCCESS
- # SUCCESS -> ERROR.
- decrease_verbosity()
- assert get_level() == logging.ERROR
- # ERROR -> CRITICAL.
- decrease_verbosity()
- assert get_level() == logging.CRITICAL
- # CRITICAL -> CRITICAL.
- decrease_verbosity()
- assert get_level() == logging.CRITICAL
-
- def test_level_discovery(self):
- """Make sure find_defined_levels() always reports the levels defined in Python's standard library."""
- defined_levels = find_defined_levels()
- level_values = defined_levels.values()
- for number in (0, 10, 20, 30, 40, 50):
- assert number in level_values
-
- def test_walk_propagation_tree(self):
- """Make sure walk_propagation_tree() properly walks the tree of loggers."""
- root, parent, child, grand_child = self.get_logger_tree()
- # Check the default mode of operation.
- loggers = list(walk_propagation_tree(grand_child))
- assert loggers == [grand_child, child, parent, root]
- # Now change the propagation (non-default mode of operation).
- child.propagate = False
- loggers = list(walk_propagation_tree(grand_child))
- assert loggers == [grand_child, child]
-
- def test_find_handler(self):
- """Make sure find_handler() works as intended."""
- root, parent, child, grand_child = self.get_logger_tree()
- # Add some handlers to the tree.
- stream_handler = logging.StreamHandler()
- syslog_handler = logging.handlers.SysLogHandler()
- child.addHandler(stream_handler)
- parent.addHandler(syslog_handler)
- # Make sure the first matching handler is returned.
- matched_handler, matched_logger = find_handler(grand_child, lambda h: isinstance(h, logging.Handler))
- assert matched_handler is stream_handler
- # Make sure the first matching handler of the given type is returned.
- matched_handler, matched_logger = find_handler(child, lambda h: isinstance(h, logging.handlers.SysLogHandler))
- assert matched_handler is syslog_handler
-
- def get_logger_tree(self):
- """Create and return a tree of loggers."""
- # Get the root logger.
- root = logging.getLogger()
- # Create a top level logger for ourselves.
- parent_name = random_string()
- parent = logging.getLogger(parent_name)
- # Create a child logger.
- child_name = '%s.%s' % (parent_name, random_string())
- child = logging.getLogger(child_name)
- # Create a grand child logger.
- grand_child_name = '%s.%s' % (child_name, random_string())
- grand_child = logging.getLogger(grand_child_name)
- return root, parent, child, grand_child
-
- def test_support_for_milliseconds(self):
- """Make sure milliseconds are hidden by default but can be easily enabled."""
- # Check that the default log format doesn't include milliseconds.
- stream = StringIO()
- install(reconfigure=True, stream=stream)
- logging.info("This should not include milliseconds.")
- assert all(map(PLAIN_TEXT_PATTERN.match, stream.getvalue().splitlines()))
- # Check that milliseconds can be enabled via a shortcut.
- stream = StringIO()
- install(milliseconds=True, reconfigure=True, stream=stream)
- logging.info("This should include milliseconds.")
- assert all(map(PATTERN_INCLUDING_MILLISECONDS.match, stream.getvalue().splitlines()))
-
- def test_support_for_milliseconds_directive(self):
- """Make sure milliseconds using the ``%f`` directive are supported."""
- stream = StringIO()
- install(reconfigure=True, stream=stream, datefmt='%Y-%m-%dT%H:%M:%S.%f%z')
- logging.info("This should be timestamped according to #45.")
- assert re.match(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}[+-]\d{4}\s', stream.getvalue())
-
- def test_plain_text_output_format(self):
- """Inspect the plain text output of coloredlogs."""
- logger = VerboseLogger(random_string(25))
- stream = StringIO()
- install(level=logging.NOTSET, logger=logger, stream=stream)
- # Test that filtering on severity works.
- logger.setLevel(logging.INFO)
- logger.debug("No one should see this message.")
- assert len(stream.getvalue().strip()) == 0
- # Test that the default output format looks okay in plain text.
- logger.setLevel(logging.NOTSET)
- for method, severity in ((logger.debug, 'DEBUG'),
- (logger.info, 'INFO'),
- (logger.verbose, 'VERBOSE'),
- (logger.warning, 'WARNING'),
- (logger.error, 'ERROR'),
- (logger.critical, 'CRITICAL')):
- # XXX Workaround for a regression in Python 3.7 caused by the
- # Logger.isEnabledFor() method using stale cache entries. If we
- # don't clear the cache then logger.isEnabledFor(logging.DEBUG)
- # returns False and no DEBUG message is emitted.
- try:
- logger._cache.clear()
- except AttributeError:
- pass
- # Prepare the text.
- text = "This is a message with severity %r." % severity.lower()
- # Log the message with the given severity.
- method(text)
- # Get the line of output generated by the handler.
- output = stream.getvalue()
- lines = output.splitlines()
- last_line = lines[-1]
- assert text in last_line
- assert severity in last_line
- assert PLAIN_TEXT_PATTERN.match(last_line)
-
- def test_dynamic_stderr_lookup(self):
- """Make sure coloredlogs.install() uses StandardErrorHandler when possible."""
- coloredlogs.install()
- # Redirect sys.stderr to a temporary buffer.
- initial_stream = StringIO()
- initial_text = "Which stream will receive this text?"
- with PatchedAttribute(sys, 'stderr', initial_stream):
- logging.info(initial_text)
- assert initial_text in initial_stream.getvalue()
- # Redirect sys.stderr again, to a different destination.
- subsequent_stream = StringIO()
- subsequent_text = "And which stream will receive this other text?"
- with PatchedAttribute(sys, 'stderr', subsequent_stream):
- logging.info(subsequent_text)
- assert subsequent_text in subsequent_stream.getvalue()
-
- def test_force_enable(self):
- """Make sure ANSI escape sequences can be forced (bypassing auto-detection)."""
- interpreter = subprocess.Popen([
- sys.executable, "-c", ";".join([
- "import coloredlogs, logging",
- "coloredlogs.install(isatty=True)",
- "logging.info('Hello world')",
- ]),
- ], stderr=subprocess.PIPE)
- stdout, stderr = interpreter.communicate()
- assert ANSI_CSI in stderr.decode('UTF-8')
-
- def test_auto_disable(self):
- """
- Make sure ANSI escape sequences are not emitted when logging output is being redirected.
-
- This is a regression test for https://github.com/xolox/python-coloredlogs/issues/100.
-
- It works as follows:
-
- 1. We mock an interactive terminal using 'capturer' to ensure that this
- test works inside test drivers that capture output (like pytest).
-
- 2. We launch a subprocess (to ensure a clean process state) where
- stderr is captured but stdout is not, emulating issue #100.
-
- 3. The output captured on stderr contained ANSI escape sequences after
- this test was written and before the issue was fixed, so now this
- serves as a regression test for issue #100.
- """
- with CaptureOutput():
- interpreter = subprocess.Popen([
- sys.executable, "-c", ";".join([
- "import coloredlogs, logging",
- "coloredlogs.install()",
- "logging.info('Hello world')",
- ]),
- ], stderr=subprocess.PIPE)
- stdout, stderr = interpreter.communicate()
- assert ANSI_CSI not in stderr.decode('UTF-8')
-
- def test_env_disable(self):
- """Make sure ANSI escape sequences can be disabled using ``$NO_COLOR``."""
- with PatchedItem(os.environ, 'NO_COLOR', 'I like monochrome'):
- with CaptureOutput() as capturer:
- subprocess.check_call([
- sys.executable, "-c", ";".join([
- "import coloredlogs, logging",
- "coloredlogs.install()",
- "logging.info('Hello world')",
- ]),
- ])
- output = capturer.get_text()
- assert ANSI_CSI not in output
-
- def test_html_conversion(self):
- """Check the conversion from ANSI escape sequences to HTML."""
- # Check conversion of colored text.
- for color_name, ansi_code in ANSI_COLOR_CODES.items():
- ansi_encoded_text = 'plain text followed by %s text' % ansi_wrap(color_name, color=color_name)
- expected_html = format(
- 'plain text followed by {name} text
',
- css=EIGHT_COLOR_PALETTE[ansi_code], name=color_name,
- )
- self.assertEqual(expected_html, convert(ansi_encoded_text))
- # Check conversion of bright colored text.
- expected_html = 'bright yellow
'
- self.assertEqual(expected_html, convert(ansi_wrap('bright yellow', color='yellow', bright=True)))
- # Check conversion of text with a background color.
- expected_html = 'red background
'
- self.assertEqual(expected_html, convert(ansi_wrap('red background', background='red')))
- # Check conversion of text with a bright background color.
- expected_html = 'bright red background
'
- self.assertEqual(expected_html, convert(ansi_wrap('bright red background', background='red', bright=True)))
- # Check conversion of text that uses the 256 color mode palette as a foreground color.
- expected_html = '256 color mode foreground
'
- self.assertEqual(expected_html, convert(ansi_wrap('256 color mode foreground', color=214)))
- # Check conversion of text that uses the 256 color mode palette as a background color.
- expected_html = '256 color mode background
'
- self.assertEqual(expected_html, convert(ansi_wrap('256 color mode background', background=124)))
- # Check that invalid 256 color mode indexes don't raise exceptions.
- expected_html = 'plain text expected
'
- self.assertEqual(expected_html, convert('\x1b[38;5;256mplain text expected\x1b[0m'))
- # Check conversion of bold text.
- expected_html = 'bold text
'
- self.assertEqual(expected_html, convert(ansi_wrap('bold text', bold=True)))
- # Check conversion of underlined text.
- expected_html = 'underlined text
'
- self.assertEqual(expected_html, convert(ansi_wrap('underlined text', underline=True)))
- # Check conversion of strike-through text.
- expected_html = 'strike-through text
'
- self.assertEqual(expected_html, convert(ansi_wrap('strike-through text', strike_through=True)))
- # Check conversion of inverse text.
- expected_html = 'inverse
'
- self.assertEqual(expected_html, convert(ansi_wrap('inverse', color='yellow', inverse=True)))
- # Check conversion of URLs.
- for sample_text in 'www.python.org', 'http://coloredlogs.rtfd.org', 'https://coloredlogs.rtfd.org':
- sample_url = sample_text if '://' in sample_text else ('http://' + sample_text)
- expected_html = '%s
' % (sample_url, sample_text)
- self.assertEqual(expected_html, convert(sample_text))
- # Check that the capture pattern for URLs doesn't match ANSI escape
- # sequences and also check that the short hand for the 0 reset code is
- # supported. These are tests for regressions of bugs found in
- # coloredlogs <= 8.0.
- reset_short_hand = '\x1b[0m'
- blue_underlined = ansi_style(color='blue', underline=True)
- ansi_encoded_text = '<%shttps://coloredlogs.readthedocs.io%s>' % (blue_underlined, reset_short_hand)
- expected_html = (
- '<'
- ''
- 'https://coloredlogs.readthedocs.io'
- ' >
'
- )
- self.assertEqual(expected_html, convert(ansi_encoded_text))
-
- def test_output_interception(self):
- """Test capturing of output from external commands."""
- expected_output = 'testing, 1, 2, 3 ..'
- actual_output = capture(['echo', expected_output])
- assert actual_output.strip() == expected_output.strip()
-
- def test_enable_colored_cron_mailer(self):
- """Test that automatic ANSI to HTML conversion when running under ``cron`` can be enabled."""
- with PatchedItem(os.environ, 'CONTENT_TYPE', 'text/html'):
- with ColoredCronMailer() as mailer:
- assert mailer.is_enabled
-
- def test_disable_colored_cron_mailer(self):
- """Test that automatic ANSI to HTML conversion when running under ``cron`` can be disabled."""
- with PatchedItem(os.environ, 'CONTENT_TYPE', 'text/plain'):
- with ColoredCronMailer() as mailer:
- assert not mailer.is_enabled
-
- def test_auto_install(self):
- """Test :func:`coloredlogs.auto_install()`."""
- needle = random_string()
- command_line = [sys.executable, '-c', 'import logging; logging.info(%r)' % needle]
- # Sanity check that log messages aren't enabled by default.
- with CaptureOutput() as capturer:
- os.environ['COLOREDLOGS_AUTO_INSTALL'] = 'false'
- subprocess.check_call(command_line)
- output = capturer.get_text()
- assert needle not in output
- # Test that the $COLOREDLOGS_AUTO_INSTALL environment variable can be
- # used to automatically call coloredlogs.install() during initialization.
- with CaptureOutput() as capturer:
- os.environ['COLOREDLOGS_AUTO_INSTALL'] = 'true'
- subprocess.check_call(command_line)
- output = capturer.get_text()
- assert needle in output
-
- def test_cli_demo(self):
- """Test the command line colored logging demonstration."""
- with CaptureOutput() as capturer:
- main('coloredlogs', '--demo')
- output = capturer.get_text()
- # Make sure the output contains all of the expected logging level names.
- for name in 'debug', 'info', 'warning', 'error', 'critical':
- assert name.upper() in output
-
- def test_cli_conversion(self):
- """Test the command line HTML conversion."""
- output = main('coloredlogs', '--convert', 'coloredlogs', '--demo', capture=True)
- # Make sure the output is encoded as HTML.
- assert '``. By not emitting the wrapper element when no other
- HTML is generated, cron will not send out an email.
- """
- output = main('coloredlogs', '--convert', 'true', capture=True)
- assert not output.strip()
-
- def test_implicit_usage_message(self):
- """Test that the usage message is shown when no actions are given."""
- assert 'Usage:' in main('coloredlogs', capture=True)
-
- def test_explicit_usage_message(self):
- """Test that the usage message is shown when ``--help`` is given."""
- assert 'Usage:' in main('coloredlogs', '--help', capture=True)
-
- def test_custom_record_factory(self):
- """
- Test that custom LogRecord factories are supported.
-
- This test is a bit convoluted because the logging module suppresses
- exceptions. We monkey patch the method suspected of encountering
- exceptions so that we can tell after it was called whether any
- exceptions occurred (despite the exceptions not propagating).
- """
- if not hasattr(logging, 'getLogRecordFactory'):
- return self.skipTest("this test requires Python >= 3.2")
-
- exceptions = []
- original_method = ColoredFormatter.format
- original_factory = logging.getLogRecordFactory()
-
- def custom_factory(*args, **kwargs):
- record = original_factory(*args, **kwargs)
- record.custom_attribute = 0xdecafbad
- return record
-
- def custom_method(*args, **kw):
- try:
- return original_method(*args, **kw)
- except Exception as e:
- exceptions.append(e)
- raise
-
- with PatchedAttribute(ColoredFormatter, 'format', custom_method):
- logging.setLogRecordFactory(custom_factory)
- try:
- demonstrate_colored_logging()
- finally:
- logging.setLogRecordFactory(original_factory)
-
- # Ensure that no exceptions were triggered.
- assert not exceptions
-
-
-def check_contents(filename, contents, match):
- """Check if a line in a file contains an expected string."""
- with open(filename) as handle:
- assert any(contents in line for line in handle) == match
-
-
-def main(*arguments, **options):
- """Wrap the command line interface to make it easier to test."""
- capture = options.get('capture', False)
- saved_argv = sys.argv
- saved_stdout = sys.stdout
- try:
- sys.argv = arguments
- if capture:
- sys.stdout = StringIO()
- coloredlogs.cli.main()
- if capture:
- return sys.stdout.getvalue()
- finally:
- sys.argv = saved_argv
- sys.stdout = saved_stdout
-
-
-@contextlib.contextmanager
-def cleanup_handlers():
- """Context manager to cleanup output handlers."""
- # There's nothing to set up so we immediately yield control.
- yield
- # After the with block ends we cleanup any output handlers.
- for match_func in match_stream_handler, match_syslog_handler:
- handler, logger = find_handler(logging.getLogger(), match_func)
- if handler and logger:
- logger.removeHandler(handler)
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/typing/tensor/tensor.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/typing/tensor/tensor.py
deleted file mode 100644
index 19a89cbed3fe11f3b9e40928071862bb4100d0d1..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/typing/tensor/tensor.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from typing import Union
-
-from docarray.typing.tensor.ndarray import NdArray
-from docarray.utils._internal.misc import is_tf_available, is_torch_available
-
-torch_available = is_torch_available()
-if torch_available:
- from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
-
-
-tf_available = is_tf_available()
-if tf_available:
- from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
-
-
-AnyTensor = Union[NdArray]
-if torch_available and tf_available:
- AnyTensor = Union[NdArray, TorchTensor, TensorFlowTensor] # type: ignore
-elif torch_available:
- AnyTensor = Union[NdArray, TorchTensor] # type: ignore
-elif tf_available:
- AnyTensor = Union[NdArray, TensorFlowTensor] # type: ignore
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/typing/url/text_url.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/typing/url/text_url.py
deleted file mode 100644
index 86da87790e6e819db063354c3ff6bbf9ce876993..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/typing/url/text_url.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from typing import Optional, TypeVar
-
-from docarray.typing.proto_register import _register_proto
-from docarray.typing.url.any_url import AnyUrl
-
-T = TypeVar('T', bound='TextUrl')
-
-
-@_register_proto(proto_type_name='text_url')
-class TextUrl(AnyUrl):
- """
- URL to a text file.
- Can be remote (web) URL, or a local file path.
- """
-
- def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
- """
- Load the text file into a string.
-
-
- ---
-
- ```python
- from docarray import BaseDoc
- from docarray.typing import TextUrl
-
-
- class MyDoc(BaseDoc):
- remote_url: TextUrl
-
-
- doc = MyDoc(
- remote_url='https://de.wikipedia.org/wiki/Brixen',
- )
-
- remote_txt = doc.remote_url.load()
- ```
-
- ---
-
-
- :param timeout: timeout (sec) for urlopen network request.
- Only relevant if URL is not local
- :param charset: decoding charset; may be any character set registered with IANA
- :return: the text file content
- """
- _bytes = self.load_bytes(timeout=timeout)
- return _bytes.decode(charset)
diff --git a/spaces/Syrahealthorg/HealthCare_workforce/style.css b/spaces/Syrahealthorg/HealthCare_workforce/style.css
deleted file mode 100644
index e6fa587ae837e8806754c530f0d19b48fd5bf657..0000000000000000000000000000000000000000
--- a/spaces/Syrahealthorg/HealthCare_workforce/style.css
+++ /dev/null
@@ -1,14 +0,0 @@
-.leftimage{
- padding-top:75px;
- margin-left:250px;
-}
-.rightimage{
- margin-right:260px;
- margin-top:15px;
-}
-#suggestheight{
- height:360
-}
-.height{
- height:60px;
-}
\ No newline at end of file
diff --git a/spaces/THUDM/CogVideo/style.css b/spaces/THUDM/CogVideo/style.css
deleted file mode 100644
index 8e4d705815014cffc50ff1d4c5720797c6206cab..0000000000000000000000000000000000000000
--- a/spaces/THUDM/CogVideo/style.css
+++ /dev/null
@@ -1,7 +0,0 @@
-h1 {
- text-align: center;
-}
-img#visitor-badge {
- display: block;
- margin: auto;
-}
diff --git a/spaces/TacosHero/flax-midjourney-v4-diffusion-2/README.md b/spaces/TacosHero/flax-midjourney-v4-diffusion-2/README.md
deleted file mode 100644
index 5ff8d70cb4cc51b9f7bea29ec7f80ce4dcae3a65..0000000000000000000000000000000000000000
--- a/spaces/TacosHero/flax-midjourney-v4-diffusion-2/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Flax Midjourney V4 Diffusion 2
-emoji: 🌍
-colorFrom: indigo
-colorTo: blue
-sdk: gradio
-sdk_version: 3.12.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/progress_bar.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/progress_bar.py
deleted file mode 100644
index 67361df2e49d48dd56c91e291ba92553e9afe344..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/progress_bar.py
+++ /dev/null
@@ -1,224 +0,0 @@
-import math
-from functools import lru_cache
-from time import monotonic
-from typing import Iterable, List, Optional
-
-from .color import Color, blend_rgb
-from .color_triplet import ColorTriplet
-from .console import Console, ConsoleOptions, RenderResult
-from .jupyter import JupyterMixin
-from .measure import Measurement
-from .segment import Segment
-from .style import Style, StyleType
-
-# Number of characters before 'pulse' animation repeats
-PULSE_SIZE = 20
-
-
-class ProgressBar(JupyterMixin):
- """Renders a (progress) bar. Used by rich.progress.
-
- Args:
- total (float, optional): Number of steps in the bar. Defaults to 100. Set to None to render a pulsing animation.
- completed (float, optional): Number of steps completed. Defaults to 0.
- width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None.
- pulse (bool, optional): Enable pulse effect. Defaults to False. Will pulse if a None total was passed.
- style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
- complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
- finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
- pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
- animation_time (Optional[float], optional): Time in seconds to use for animation, or None to use system time.
- """
-
- def __init__(
- self,
- total: Optional[float] = 100.0,
- completed: float = 0,
- width: Optional[int] = None,
- pulse: bool = False,
- style: StyleType = "bar.back",
- complete_style: StyleType = "bar.complete",
- finished_style: StyleType = "bar.finished",
- pulse_style: StyleType = "bar.pulse",
- animation_time: Optional[float] = None,
- ):
- self.total = total
- self.completed = completed
- self.width = width
- self.pulse = pulse
- self.style = style
- self.complete_style = complete_style
- self.finished_style = finished_style
- self.pulse_style = pulse_style
- self.animation_time = animation_time
-
- self._pulse_segments: Optional[List[Segment]] = None
-
- def __repr__(self) -> str:
- return f""
-
- @property
- def percentage_completed(self) -> Optional[float]:
- """Calculate percentage complete."""
- if self.total is None:
- return None
- completed = (self.completed / self.total) * 100.0
- completed = min(100, max(0.0, completed))
- return completed
-
- @lru_cache(maxsize=16)
- def _get_pulse_segments(
- self,
- fore_style: Style,
- back_style: Style,
- color_system: str,
- no_color: bool,
- ascii: bool = False,
- ) -> List[Segment]:
- """Get a list of segments to render a pulse animation.
-
- Returns:
- List[Segment]: A list of segments, one segment per character.
- """
- bar = "-" if ascii else "━"
- segments: List[Segment] = []
- if color_system not in ("standard", "eight_bit", "truecolor") or no_color:
- segments += [Segment(bar, fore_style)] * (PULSE_SIZE // 2)
- segments += [Segment(" " if no_color else bar, back_style)] * (
- PULSE_SIZE - (PULSE_SIZE // 2)
- )
- return segments
-
- append = segments.append
- fore_color = (
- fore_style.color.get_truecolor()
- if fore_style.color
- else ColorTriplet(255, 0, 255)
- )
- back_color = (
- back_style.color.get_truecolor()
- if back_style.color
- else ColorTriplet(0, 0, 0)
- )
- cos = math.cos
- pi = math.pi
- _Segment = Segment
- _Style = Style
- from_triplet = Color.from_triplet
-
- for index in range(PULSE_SIZE):
- position = index / PULSE_SIZE
- fade = 0.5 + cos((position * pi * 2)) / 2.0
- color = blend_rgb(fore_color, back_color, cross_fade=fade)
- append(_Segment(bar, _Style(color=from_triplet(color))))
- return segments
-
- def update(self, completed: float, total: Optional[float] = None) -> None:
- """Update progress with new values.
-
- Args:
- completed (float): Number of steps completed.
- total (float, optional): Total number of steps, or ``None`` to not change. Defaults to None.
- """
- self.completed = completed
- self.total = total if total is not None else self.total
-
- def _render_pulse(
- self, console: Console, width: int, ascii: bool = False
- ) -> Iterable[Segment]:
- """Renders the pulse animation.
-
- Args:
- console (Console): Console instance.
- width (int): Width in characters of pulse animation.
-
- Returns:
- RenderResult: [description]
-
- Yields:
- Iterator[Segment]: Segments to render pulse
- """
- fore_style = console.get_style(self.pulse_style, default="white")
- back_style = console.get_style(self.style, default="black")
-
- pulse_segments = self._get_pulse_segments(
- fore_style, back_style, console.color_system, console.no_color, ascii=ascii
- )
- segment_count = len(pulse_segments)
- current_time = (
- monotonic() if self.animation_time is None else self.animation_time
- )
- segments = pulse_segments * (int(width / segment_count) + 2)
- offset = int(-current_time * 15) % segment_count
- segments = segments[offset : offset + width]
- yield from segments
-
- def __rich_console__(
- self, console: Console, options: ConsoleOptions
- ) -> RenderResult:
-
- width = min(self.width or options.max_width, options.max_width)
- ascii = options.legacy_windows or options.ascii_only
- should_pulse = self.pulse or self.total is None
- if should_pulse:
- yield from self._render_pulse(console, width, ascii=ascii)
- return
-
- completed: Optional[float] = (
- min(self.total, max(0, self.completed)) if self.total is not None else None
- )
-
- bar = "-" if ascii else "━"
- half_bar_right = " " if ascii else "╸"
- half_bar_left = " " if ascii else "╺"
- complete_halves = (
- int(width * 2 * completed / self.total)
- if self.total and completed is not None
- else width * 2
- )
- bar_count = complete_halves // 2
- half_bar_count = complete_halves % 2
- style = console.get_style(self.style)
- is_finished = self.total is None or self.completed >= self.total
- complete_style = console.get_style(
- self.finished_style if is_finished else self.complete_style
- )
- _Segment = Segment
- if bar_count:
- yield _Segment(bar * bar_count, complete_style)
- if half_bar_count:
- yield _Segment(half_bar_right * half_bar_count, complete_style)
-
- if not console.no_color:
- remaining_bars = width - bar_count - half_bar_count
- if remaining_bars and console.color_system is not None:
- if not half_bar_count and bar_count:
- yield _Segment(half_bar_left, style)
- remaining_bars -= 1
- if remaining_bars:
- yield _Segment(bar * remaining_bars, style)
-
- def __rich_measure__(
- self, console: Console, options: ConsoleOptions
- ) -> Measurement:
- return (
- Measurement(self.width, self.width)
- if self.width is not None
- else Measurement(4, options.max_width)
- )
-
-
-if __name__ == "__main__": # pragma: no cover
- console = Console()
- bar = ProgressBar(width=50, total=100)
-
- import time
-
- console.show_cursor(False)
- for n in range(0, 101, 1):
- bar.update(n)
- console.print(bar)
- console.file.write("\r")
- time.sleep(0.05)
- console.show_cursor(True)
- console.print()
diff --git a/spaces/Techis/resume-screening-tool/splitter.py b/spaces/Techis/resume-screening-tool/splitter.py
deleted file mode 100644
index 9b618635d3350d57b30c8a6eafdfcaf1be287693..0000000000000000000000000000000000000000
--- a/spaces/Techis/resume-screening-tool/splitter.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import docx2txt
-import re
-import string
-def split_string(path):
- doc = docx2txt.process(path)
- #global text_list
- res = re.sub('['+string.punctuation+']', '', doc).split()
- return res
\ No newline at end of file
diff --git a/spaces/Tonic/BibleScriptures/README.md b/spaces/Tonic/BibleScriptures/README.md
deleted file mode 100644
index 0f66b5fa62356e0cbbfef6fe383e872dc638273f..0000000000000000000000000000000000000000
--- a/spaces/Tonic/BibleScriptures/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: 'Bible Scriptures '
-emoji: 👑✨
-colorFrom: green
-colorTo: blue
-sdk: gradio
-sdk_version: 3.46.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/TushDeMort/yolo/README.md b/spaces/TushDeMort/yolo/README.md
deleted file mode 100644
index bec8e94f10505bd91034996d82df5222a5a51a7b..0000000000000000000000000000000000000000
--- a/spaces/TushDeMort/yolo/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Yolo
-emoji: 🌍
-colorFrom: yellow
-colorTo: purple
-sdk: docker
-pinned: false
-license: other
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/User1342/WatchTower/Pinpoint/Aggregator_TfIdf.py b/spaces/User1342/WatchTower/Pinpoint/Aggregator_TfIdf.py
deleted file mode 100644
index 7f10ef3c42a68eb1a8f40190c4b7b7c876071b82..0000000000000000000000000000000000000000
--- a/spaces/User1342/WatchTower/Pinpoint/Aggregator_TfIdf.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from sklearn.feature_extraction.text import TfidfVectorizer
-
-from Pinpoint.Logger import *
-
-
-class tf_idf_aggregator():
- """
- A wrapper class around SKlearn for retrieving TF-IDF scores.
- """
-
- def get_tf_idf_scores(self, ngrams_vocabulary, corpus_data=None, file_name_to_read=None):
- """
- Used to generate a TF IDF score based of a vocabulary of Ngrams and a data corpus.
- :param ngrams_vocabulary:
- :param corpus_data:
- :param file_name_to_read:
- :return: a dictionary of the pairing name and their score
- """
- logger.print_message("Getting TF IDF scores")
-
- if corpus_data is None and file_name_to_read is None:
- raise Exception("No data supplied to retrieve n_grams")
-
- if corpus_data is None and file_name_to_read is not None:
- with open(file_name_to_read, 'r') as file_to_read:
- corpus_data = file_to_read.read()
-
- tfidf = TfidfVectorizer(vocabulary=ngrams_vocabulary, stop_words='english', ngram_range=(1, 2))
- tfs = tfidf.fit_transform([corpus_data])
-
- feature_names = tfidf.get_feature_names()
- corpus_index = [n for n in corpus_data]
- rows, cols = tfs.nonzero()
-
- dict_of_scores = {}
-
- for row, col in zip(rows, cols):
- dict_of_scores[feature_names[col]] = tfs[row, col]
- logger.print_message((feature_names[col], corpus_index[row]), tfs[row, col])
-
- return dict_of_scores
diff --git a/spaces/Veucci/turkish-lyric-to-genre/README.md b/spaces/Veucci/turkish-lyric-to-genre/README.md
deleted file mode 100644
index 22cc386c519ccea6b1e6525d44d35ba401df2353..0000000000000000000000000000000000000000
--- a/spaces/Veucci/turkish-lyric-to-genre/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Turkish Lyric To Genre
-emoji: 🐨
-colorFrom: red
-colorTo: pink
-sdk: gradio
-sdk_version: 3.41.2
-app_file: app.py
-pinned: false
-license: cc-by-nc-4.0
----
\ No newline at end of file
diff --git a/spaces/VickyKira/NASAGPT/client/js/icons.js b/spaces/VickyKira/NASAGPT/client/js/icons.js
deleted file mode 100644
index 84fed38dd35e0d0203370a8314a360d27f350dd6..0000000000000000000000000000000000000000
--- a/spaces/VickyKira/NASAGPT/client/js/icons.js
+++ /dev/null
@@ -1 +0,0 @@
-window.FontAwesomeKitConfig={asyncLoading:{enabled:!1},autoA11y:{enabled:!0},baseUrl:"https://ka-f.fontawesome.com",baseUrlKit:"https://kit-pro.fontawesome.com",detectConflictsUntil:null,iconUploads:{},id:96462084,license:"pro",method:"css",minify:{enabled:!0},token:"d0514f1901",v4FontFaceShim:{enabled:!0},v4shim:{enabled:!0},v5FontFaceShim:{enabled:!0},version:"6.1.1"},function(t){"function"==typeof define&&define.amd?define("kit-loader",t):t()}(function(){"use strict";function t(e){return(t="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(e)}function e(t,e,n){return e in t?Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[e]=n,t}function n(t,e){var n=Object.keys(t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(t);e&&(o=o.filter(function(e){return Object.getOwnPropertyDescriptor(t,e).enumerable})),n.push.apply(n,o)}return n}function o(t){for(var o=1;ot.length)&&(e=t.length);for(var n=0,o=new Array(e);n2&&void 0!==arguments[2]?arguments[2]:function(){},r=e.document||r,i=a.bind(a,r,["fa","fab","fas","far","fal","fad","fak"]),u=Object.keys(t.iconUploads||{}).length>0;t.autoA11y.enabled&&n(i);var f=[{id:"fa-main",addOn:void 0}];t.v4shim&&t.v4shim.enabled&&f.push({id:"fa-v4-shims",addOn:"-v4-shims"}),t.v5FontFaceShim&&t.v5FontFaceShim.enabled&&f.push({id:"fa-v5-font-face",addOn:"-v5-font-face"}),t.v4FontFaceShim&&t.v4FontFaceShim.enabled&&f.push({id:"fa-v4-font-face",addOn:"-v4-font-face"}),u&&f.push({id:"fa-kit-upload",customCss:!0});var s=f.map(function(n){return new F(function(r,i){E(n.customCss?function(t){return t.baseUrlKit+"/"+t.token+"/"+t.id+"/kit-upload.css"}(t):c(t,{addOn:n.addOn,minify:t.minify.enabled}),e).then(function(i){r(function(t,e){var n=e.contentFilter||function(t,e){return t},o=document.createElement("style"),r=document.createTextNode(n(t,e));return o.appendChild(r),o.media="all",e.id&&o.setAttribute("id",e.id),e&&e.detectingConflicts&&e.detectionIgnoreAttr&&o.setAttributeNode(document.createAttribute(e.detectionIgnoreAttr)),o}(i,o(o({},e),{},{baseUrl:t.baseUrl,version:t.version,id:n.id,contentFilter:function(t,e){return _(t,e.baseUrl,e.version)}})))}).catch(i)})});return F.all(s)}function P(t,e){var n=document.createElement("SCRIPT"),o=document.createTextNode(t);return n.appendChild(o),n.referrerPolicy="strict-origin",e.id&&n.setAttribute("id",e.id),e&&e.detectingConflicts&&e.detectionIgnoreAttr&&n.setAttributeNode(document.createAttribute(e.detectionIgnoreAttr)),n}function U(t){var e,n=[],o=document,r=(o.documentElement.doScroll?/^loaded|^c/:/^loaded|^i|^c/).test(o.readyState);r||o.addEventListener("DOMContentLoaded",e=function(){for(o.removeEventListener("DOMContentLoaded",e),r=1;e=n.shift();)e()}),r?setTimeout(t,0):n.push(t)}try{if(window.FontAwesomeKitConfig){var k=window.FontAwesomeKitConfig,L={detectingConflicts:k.detectConflictsUntil&&new Date<=new Date(k.detectConflictsUntil),detectionIgnoreAttr:"data-fa-detection-ignore",fetch:window.fetch,token:k.token,XMLHttpRequest:window.XMLHttpRequest,document:document},I=document.currentScript,T=I?I.parentElement:document.head;(function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return"js"===t.method?function(t,e){e.autoA11y=t.autoA11y.enabled,"pro"===t.license&&(e.autoFetchSvg=!0,e.fetchSvgFrom=t.baseUrl+"/releases/"+("latest"===t.version?"latest":"v".concat(t.version))+"/svgs",e.fetchUploadedSvgFrom=t.uploadsUrl);var n=[];return t.v4shim.enabled&&n.push(new F(function(n,r){E(c(t,{addOn:"-v4-shims",minify:t.minify.enabled}),e).then(function(t){n(P(t,o(o({},e),{},{id:"fa-v4-shims"})))}).catch(r)})),n.push(new F(function(n,r){E(c(t,{minify:t.minify.enabled}),e).then(function(t){var r=P(t,o(o({},e),{},{id:"fa-main"}));n(function(t,e){var n=e&&void 0!==e.autoFetchSvg?e.autoFetchSvg:void 0,o=e&&void 0!==e.autoA11y?e.autoA11y:void 0;return void 0!==o&&t.setAttribute("data-auto-a11y",o?"true":"false"),n&&(t.setAttributeNode(document.createAttribute("data-auto-fetch-svg")),t.setAttribute("data-fetch-svg-from",e.fetchSvgFrom),t.setAttribute("data-fetch-uploaded-svg-from",e.fetchUploadedSvgFrom)),t}(r,e))}).catch(r)})),F.all(n)}(t,e):"css"===t.method?C(t,e,function(t){U(t),function(t){"undefined"!=typeof MutationObserver&&new MutationObserver(t).observe(document,{childList:!0,subtree:!0})}(t)}):void 0})(k,L).then(function(t){t.map(function(t){try{T.insertBefore(t,I?I.nextSibling:null)}catch(e){T.appendChild(t)}}),L.detectingConflicts&&I&&U(function(){I.setAttributeNode(document.createAttribute(L.detectionIgnoreAttr));var t=function(t,e){var n=document.createElement("script");return e&&e.detectionIgnoreAttr&&n.setAttributeNode(document.createAttribute(e.detectionIgnoreAttr)),n.src=c(t,{baseFilename:"conflict-detection",fileSuffix:"js",subdir:"js",minify:t.minify.enabled}),n}(k,L);document.body.appendChild(t)})}).catch(function(t){console.error("".concat("Font Awesome Kit:"," ").concat(t))})}}catch(t){console.error("".concat("Font Awesome Kit:"," ").concat(t))}});
\ No newline at end of file
diff --git a/spaces/Voicemod/Text-To-Speech/app.py b/spaces/Voicemod/Text-To-Speech/app.py
deleted file mode 100644
index 3d52e49ebae9a815e1189a9c2378ad0f17db69c7..0000000000000000000000000000000000000000
--- a/spaces/Voicemod/Text-To-Speech/app.py
+++ /dev/null
@@ -1,149 +0,0 @@
-import gradio as gr
-import requests
-import time
-import tempfile
-import os
-
-token = os.environ['apikey']
-#discord_id = os.environ['discord-id']
-API_HOST = "https://labs-proxy.voicemod.net/"
-#API_HOST = "https://pg-labs-proxy.voicemod.dev/"
-
-
-
-
-def getPayload(index, text):
-
- voice_ids = ["aff03ae0-a04b-4f21-99d7-6ec161233a3c",
- "0bc20872-bca3-4a05-9d39-f8500484fdf9",
- "b59ae384-4a63-44b0-88b5-6bd024c6c4c2",
- "6926ecc5-ff5e-47c6-912b-3ffdb880bf56",
- "e3ed13f7-a0c0-4c4e-8609-a4583e1cef45",
- "c970b112-b63b-4208-986d-24a96ff37539",
- "eca21af5-813e-4854-93bc-5a8c8b0f239d",
- "e3ed13f7-a0c0-4c4e-8609-a4583e1cef45"
- ]
-
- return {
- "text": text[:200] if len(text) > 200 else text,
- "voiceId": voice_ids[index]
- }
-
-
-
-def cleanUpLines(lines):
- return list(filter(None, lines))
-
-
-def greet(index, text):
- url = API_HOST + "api/v1/tts/create"
-
- print("Calling api with "+ text)
- print("URL: " + url)
-
-
- payload = getPayload(index, text)
-
-
- headers = {
- 'x-api-key': token,
- # 'x-discord-id': discord_id
- }
-
-
- print(payload)
- print("Before the call")
- response = requests.request("POST", url, headers=headers, json=payload)
- print("After the call...")
- jsonResp = response.json()
- print(response.text)
-
- return gr.make_waveform(download_file(jsonResp['audioUrl']))
-
-
-
-
-def download_file(url):
- response = requests.get(url)
- if response.status_code == 200:
- with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
- tmp_file.write(response.content)
- tmp_file.flush()
- return tmp_file.name
- else:
- print("Error: Unable to download file")
-
-
-with gr.Blocks() as demo:
-
- gr.Markdown("""
- ## Voicemod's Text-to-Speech API
- To use this API follow the instructions:
- 1. First, select the voice you want from the dropdown
- 2. Then write the text you want to hear
- 3. ???
- 4. PROFIT!
-
- ## A note about language
-
- This API is currently optimized for English language input, if you enter text in any other language (or alphabet) the results will not be ideal.
-
- ## Join our Community
- If you'd like to know more and meet other developers working with this technology, join our [Discord Server](https://discord.gg/vm-dev-community)!
-
- """)
-
- voices = [
- "Bob",
- "Alice",
- "Claudia",
- "The Narrator",
- "Haley",
- "Agatha",
- "Chad",
- "TEST"
- ]
-
- with gr.Row():
- with gr.Column():
- with gr.Row():
- dd = gr.Dropdown(choices=voices, type="index", label="Select the voice...")
- lines = gr.Textbox(lines=10, placeholder="Write your the text here...", label="Text (max 200 characters)...")
-
- with gr.Row():
- btn = gr.Button("Run")
- with gr.Column():
- video = gr.Video(label="Generated output")
- video.style(height=300)
-
- gr.Markdown("""
- ## Cached examples
-
- To make your life easier, the following examples have been cached...
- """)
-
- gr.Examples(fn=greet,
- examples=[
- ["The Narrator", "Learning how to be still, to really be still and let life happen - that stillness becomes a radiance."],
- ["Chad", "What's the point of having a voice if you're gonna be silent in those moments you shouldn't be?"],
- ["Alice", "I am not afraid of storms, for I am learning how to sail my ship"],
- ["Bob", "Once upon a time there was a boy who loved a girl, and her laughter was a question he wanted to spend his whole life answering"]
- ],
- outputs=video,
- inputs=[dd, lines],
- cache_examples=True)
-
- gr.Markdown("""
- ## Want to use this API for your project?
-
- If you'd like to use this API for your own project, request access through our [form here](https://voicemod.typeform.com/to/uZZxqB98) and join our [Dev Community on Discord](https://discord.gg/vm-dev-community)!
- """)
-
- btn.click(fn=greet,
- inputs=[
- dd,
- lines
- ],
- outputs=video)
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/WhyLIM/ChatGPT-academic/show_math.py b/spaces/WhyLIM/ChatGPT-academic/show_math.py
deleted file mode 100644
index 80fa881d1c2ace5813f75b5d8a19ca056a8bfa4f..0000000000000000000000000000000000000000
--- a/spaces/WhyLIM/ChatGPT-academic/show_math.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# This program is written by: https://github.com/polarwinkel/mdtex2html
-
-from latex2mathml.converter import convert as tex2mathml
-import re
-
-incomplete = '⚠formula incomplete '
-convError = '⚠LaTeX-convert-error '
-
-def convert(mdtex, extensions=[], splitParagraphs=True):
- ''' converts recursively the Markdown-LaTeX-mixture to HTML with MathML '''
- found = False
- # handle all paragraphs separately (prevents aftereffects)
- if splitParagraphs:
- parts = re.split("\n\n", mdtex)
- result = ''
- for part in parts:
- result += convert(part, extensions, splitParagraphs=False)
- return result
- # find first $$-formula:
- parts = re.split('\${2}', mdtex, 2)
- if len(parts)>1:
- found = True
- result = convert(parts[0], extensions, splitParagraphs=False)+'\n'
- try:
- result += ''+tex2mathml(parts[1])+'
\n'
- except:
- result += ''+convError+'
'
- if len(parts)==3:
- result += convert(parts[2], extensions, splitParagraphs=False)
- else:
- result += ''+incomplete+'
'
- # else find first $-formulas:
- else:
- parts = re.split('\${1}', mdtex, 2)
- if len(parts)>1 and not found:
- found = True
- try:
- mathml = tex2mathml(parts[1])
- except:
- mathml = convError
- if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula!
- parts[0]=parts[0]+''
- if len(parts)==3:
- result = convert(parts[0]+mathml+parts[2], extensions, splitParagraphs=False)
- else:
- result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False)
- # else find first \[..\]-equation:
- else:
- parts = re.split(r'\\\[', mdtex, 1)
- if len(parts)>1 and not found:
- found = True
- result = convert(parts[0], extensions, splitParagraphs=False)+'\n'
- parts = re.split(r'\\\]', parts[1], 1)
- try:
- result += ''+tex2mathml(parts[0])+'
\n'
- except:
- result += ''+convError+'
'
- if len(parts)==2:
- result += convert(parts[1], extensions, splitParagraphs=False)
- else:
- result += ''+incomplete+'
'
- # else find first \(..\)-equation:
- else:
- parts = re.split(r'\\\(', mdtex, 1)
- if len(parts)>1 and not found:
- found = True
- subp = re.split(r'\\\)', parts[1], 1)
- try:
- mathml = tex2mathml(subp[0])
- except:
- mathml = convError
- if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula!
- parts[0]=parts[0]+''
- if len(subp)==2:
- result = convert(parts[0]+mathml+subp[1], extensions, splitParagraphs=False)
- else:
- result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False)
- if not found:
- result = mdtex
- return result
diff --git a/spaces/Xhaheen/meme_world/README.md b/spaces/Xhaheen/meme_world/README.md
deleted file mode 100644
index 9b1bed1da600a234de6a9dffc9c42e8a01f21d74..0000000000000000000000000000000000000000
--- a/spaces/Xhaheen/meme_world/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Meme World
-emoji: 📚
-colorFrom: green
-colorTo: pink
-sdk: gradio
-sdk_version: 3.6
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Xule/ChuanhuChatGPT/chatgpt - windows.bat b/spaces/Xule/ChuanhuChatGPT/chatgpt - windows.bat
deleted file mode 100644
index 0b78fdc3a559abd692e3a9e9af5e482124d13a99..0000000000000000000000000000000000000000
--- a/spaces/Xule/ChuanhuChatGPT/chatgpt - windows.bat
+++ /dev/null
@@ -1,14 +0,0 @@
-@echo off
-echo Opening ChuanhuChatGPT...
-
-REM Open powershell via bat
-start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py"
-
-REM The web page can be accessed with delayed start http://127.0.0.1:7860/
-ping -n 5 127.0.0.1>nul
-
-REM access chargpt via your default browser
-start "" "http://127.0.0.1:7860/"
-
-
-echo Finished opening ChuanhuChatGPT (http://127.0.0.1:7860/).
\ No newline at end of file
diff --git a/spaces/XzJosh/Bekki-Bert-VITS2/monotonic_align/core.py b/spaces/XzJosh/Bekki-Bert-VITS2/monotonic_align/core.py
deleted file mode 100644
index dddc688d76172b880054e544b7a217acd013f14f..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/Bekki-Bert-VITS2/monotonic_align/core.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import numba
-
-
-@numba.jit(numba.void(numba.int32[:,:,::1], numba.float32[:,:,::1], numba.int32[::1], numba.int32[::1]), nopython=True, nogil=True)
-def maximum_path_jit(paths, values, t_ys, t_xs):
- b = paths.shape[0]
- max_neg_val=-1e9
- for i in range(int(b)):
- path = paths[i]
- value = values[i]
- t_y = t_ys[i]
- t_x = t_xs[i]
-
- v_prev = v_cur = 0.0
- index = t_x - 1
-
- for y in range(t_y):
- for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
- if x == y:
- v_cur = max_neg_val
- else:
- v_cur = value[y-1, x]
- if x == 0:
- if y == 0:
- v_prev = 0.
- else:
- v_prev = max_neg_val
- else:
- v_prev = value[y-1, x-1]
- value[y, x] += max(v_prev, v_cur)
-
- for y in range(t_y - 1, -1, -1):
- path[y, index] = 1
- if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
- index = index - 1
diff --git a/spaces/XzJosh/XingTong-Bert-VITS2/attentions.py b/spaces/XzJosh/XingTong-Bert-VITS2/attentions.py
deleted file mode 100644
index 1192dd7268c20c11010e73a6017ed09549695afe..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/XingTong-Bert-VITS2/attentions.py
+++ /dev/null
@@ -1,344 +0,0 @@
-import copy
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import logging
-
-logger = logging.getLogger(__name__)
-
-class LayerNorm(nn.Module):
- def __init__(self, channels, eps=1e-5):
- super().__init__()
- self.channels = channels
- self.eps = eps
-
- self.gamma = nn.Parameter(torch.ones(channels))
- self.beta = nn.Parameter(torch.zeros(channels))
-
- def forward(self, x):
- x = x.transpose(1, -1)
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
- return x.transpose(1, -1)
-
-
-
-@torch.jit.script
-def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
- n_channels_int = n_channels[0]
- in_act = input_a + input_b
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
- acts = t_act * s_act
- return acts
-
-class Encoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, isflow = True, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
- #if isflow:
- # cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1)
- # self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1)
- # self.cond_layer = weight_norm(cond_layer, name='weight')
- # self.gin_channels = 256
- self.cond_layer_idx = self.n_layers
- if 'gin_channels' in kwargs:
- self.gin_channels = kwargs['gin_channels']
- if self.gin_channels != 0:
- self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)
- # vits2 says 3rd block, so idx is 2 by default
- self.cond_layer_idx = kwargs['cond_layer_idx'] if 'cond_layer_idx' in kwargs else 2
- logging.debug(self.gin_channels, self.cond_layer_idx)
- assert self.cond_layer_idx < self.n_layers, 'cond_layer_idx should be less than n_layers'
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
- def forward(self, x, x_mask, g=None):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- if i == self.cond_layer_idx and g is not None:
- g = self.spk_emb_linear(g.transpose(1, 2))
- g = g.transpose(1, 2)
- x = x + g
- x = x * x_mask
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert t_s == t_t, "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert t_s == t_t, "Local attention is only available for self-attention."
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
- x_flat = x.view([batch, heads, length**2 + length*(length -1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/INSTALL.md b/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/INSTALL.md
deleted file mode 100644
index b40768913742ca2b2e11c74d5944561931ecb326..0000000000000000000000000000000000000000
--- a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/INSTALL.md
+++ /dev/null
@@ -1,261 +0,0 @@
-## Installation
-
-### Requirements
-- Linux or macOS with Python ≥ 3.6
-- PyTorch ≥ 1.8 and [torchvision](https://github.com/pytorch/vision/) that matches the PyTorch installation.
- Install them together at [pytorch.org](https://pytorch.org) to make sure of this
-- OpenCV is optional but needed by demo and visualization
-
-
-### Build Detectron2 from Source
-
-gcc & g++ ≥ 5.4 are required. [ninja](https://ninja-build.org/) is optional but recommended for faster build.
-After having them, run:
-```
-python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
-# (add --user if you don't have permission)
-
-# Or, to install it from a local clone:
-git clone https://github.com/facebookresearch/detectron2.git
-python -m pip install -e detectron2
-
-# On macOS, you may need to prepend the above commands with a few environment variables:
-CC=clang CXX=clang++ ARCHFLAGS="-arch x86_64" python -m pip install ...
-```
-
-To __rebuild__ detectron2 that's built from a local clone, use `rm -rf build/ **/*.so` to clean the
-old build first. You often need to rebuild detectron2 after reinstalling PyTorch.
-
-### Install Pre-Built Detectron2 (Linux only)
-
-Choose from this table to install [v0.6 (Oct 2021)](https://github.com/facebookresearch/detectron2/releases):
-
- CUDA torch 1.10 torch 1.9 torch 1.8 11.3 install python -m pip install detectron2 -f \
- https://dl.fbaipublicfiles.com/detectron2/wheels/cu113/torch1.10/index.html
-
11.1 install python -m pip install detectron2 -f \
- https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.10/index.html
-
install python -m pip install detectron2 -f \
- https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.9/index.html
-
install python -m pip install detectron2 -f \
- https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.8/index.html
-
10.2 install python -m pip install detectron2 -f \
- https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.10/index.html
-
install python -m pip install detectron2 -f \
- https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.9/index.html
-
install python -m pip install detectron2 -f \
- https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.8/index.html
-
10.1 install python -m pip install detectron2 -f \
- https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html
-
cpu install python -m pip install detectron2 -f \
- https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.10/index.html
-
install python -m pip install detectron2 -f \
- https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.9/index.html
-
install python -m pip install detectron2 -f \
- https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.8/index.html
-
-
-Note that:
-1. The pre-built packages have to be used with corresponding version of CUDA and the official package of PyTorch.
- Otherwise, please build detectron2 from source.
-2. New packages are released every few months. Therefore, packages may not contain latest features in the main
- branch and may not be compatible with the main branch of a research project that uses detectron2
- (e.g. those in [projects](projects)).
-
-### Common Installation Issues
-
-Click each issue for its solutions:
-
-
-
-Undefined symbols that looks like "TH..","at::Tensor...","torch..."
-
-
-
-This usually happens when detectron2 or torchvision is not
-compiled with the version of PyTorch you're running.
-
-If the error comes from a pre-built torchvision, uninstall torchvision and pytorch and reinstall them
-following [pytorch.org](http://pytorch.org). So the versions will match.
-
-If the error comes from a pre-built detectron2, check [release notes](https://github.com/facebookresearch/detectron2/releases),
-uninstall and reinstall the correct pre-built detectron2 that matches pytorch version.
-
-If the error comes from detectron2 or torchvision that you built manually from source,
-remove files you built (`build/`, `**/*.so`) and rebuild it so it can pick up the version of pytorch currently in your environment.
-
-If the above instructions do not resolve this problem, please provide an environment (e.g. a dockerfile) that can reproduce the issue.
-
-
-
-
-Missing torch dynamic libraries, OR segmentation fault immediately when using detectron2.
-
-This usually happens when detectron2 or torchvision is not
-compiled with the version of PyTorch you're running. See the previous common issue for the solution.
-
-
-
-
-Undefined C++ symbols (e.g. "GLIBCXX..") or C++ symbols not found.
-
-
-Usually it's because the library is compiled with a newer C++ compiler but run with an old C++ runtime.
-
-This often happens with old anaconda.
-It may help to run `conda update libgcc` to upgrade its runtime.
-
-The fundamental solution is to avoid the mismatch, either by compiling using older version of C++
-compiler, or run the code with proper C++ runtime.
-To run the code with a specific C++ runtime, you can use environment variable `LD_PRELOAD=/path/to/libstdc++.so`.
-
-
-
-
-
-"nvcc not found" or "Not compiled with GPU support" or "Detectron2 CUDA Compiler: not available".
-
-
-CUDA is not found when building detectron2.
-You should make sure
-
-```
-python -c 'import torch; from torch.utils.cpp_extension import CUDA_HOME; print(torch.cuda.is_available(), CUDA_HOME)'
-```
-
-print `(True, a directory with cuda)` at the time you build detectron2.
-
-Most models can run inference (but not training) without GPU support. To use CPUs, set `MODEL.DEVICE='cpu'` in the config.
-
-
-
-
-"invalid device function" or "no kernel image is available for execution".
-
-
-Two possibilities:
-
-* You build detectron2 with one version of CUDA but run it with a different version.
-
- To check whether it is the case,
- use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions.
- In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA"
- to contain cuda libraries of the same version.
-
- When they are inconsistent,
- you need to either install a different build of PyTorch (or build by yourself)
- to match your local CUDA installation, or install a different version of CUDA to match PyTorch.
-
-* PyTorch/torchvision/Detectron2 is not built for the correct GPU SM architecture (aka. compute capability).
-
- The architecture included by PyTorch/detectron2/torchvision is available in the "architecture flags" in
- `python -m detectron2.utils.collect_env`. It must include
- the architecture of your GPU, which can be found at [developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus).
-
- If you're using pre-built PyTorch/detectron2/torchvision, they have included support for most popular GPUs already.
- If not supported, you need to build them from source.
-
- When building detectron2/torchvision from source, they detect the GPU device and build for only the device.
- This means the compiled code may not work on a different GPU device.
- To recompile them for the correct architecture, remove all installed/compiled files,
- and rebuild them with the `TORCH_CUDA_ARCH_LIST` environment variable set properly.
- For example, `export TORCH_CUDA_ARCH_LIST="6.0;7.0"` makes it compile for both P100s and V100s.
-
-
-
-
-Undefined CUDA symbols; Cannot open libcudart.so
-
-
-The version of NVCC you use to build detectron2 or torchvision does
-not match the version of CUDA you are running with.
-This often happens when using anaconda's CUDA runtime.
-
-Use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions.
-In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA"
-to contain cuda libraries of the same version.
-
-When they are inconsistent,
-you need to either install a different build of PyTorch (or build by yourself)
-to match your local CUDA installation, or install a different version of CUDA to match PyTorch.
-
-
-
-
-
-C++ compilation errors from NVCC / NVRTC, or "Unsupported gpu architecture"
-
-
-A few possibilities:
-
-1. Local CUDA/NVCC version has to match the CUDA version of your PyTorch. Both can be found in `python collect_env.py`.
- When they are inconsistent, you need to either install a different build of PyTorch (or build by yourself)
- to match your local CUDA installation, or install a different version of CUDA to match PyTorch.
-
-2. Local CUDA/NVCC version shall support the SM architecture (a.k.a. compute capability) of your GPU.
- The capability of your GPU can be found at [developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus).
- The capability supported by NVCC is listed at [here](https://gist.github.com/ax3l/9489132).
- If your NVCC version is too old, this can be workaround by setting environment variable
- `TORCH_CUDA_ARCH_LIST` to a lower, supported capability.
-
-3. The combination of NVCC and GCC you use is incompatible. You need to change one of their versions.
- See [here](https://gist.github.com/ax3l/9489132) for some valid combinations.
- Notably, CUDA<=10.1.105 doesn't support GCC>7.3.
-
- The CUDA/GCC version used by PyTorch can be found by `print(torch.__config__.show())`.
-
-
-
-
-
-
-"ImportError: cannot import name '_C'".
-
-
-Please build and install detectron2 following the instructions above.
-
-Or, if you are running code from detectron2's root directory, `cd` to a different one.
-Otherwise you may not import the code that you installed.
-
-
-
-
-
-Any issue on windows.
-
-
-
-Detectron2 is continuously built on windows with [CircleCI](https://app.circleci.com/pipelines/github/facebookresearch/detectron2?branch=main).
-However we do not provide official support for it.
-PRs that improves code compatibility on windows are welcome.
-
-
-
-
-ONNX conversion segfault after some "TraceWarning".
-
-
-The ONNX package is compiled with a too old compiler.
-
-Please build and install ONNX from its source code using a compiler
-whose version is closer to what's used by PyTorch (available in `torch.__config__.show()`).
-
-
-
-
-
-"library not found for -lstdc++" on older version of MacOS
-
-
-See
-[this stackoverflow answer](https://stackoverflow.com/questions/56083725/macos-build-issues-lstdc-not-found-while-building-python-package).
-
-
-
-
-### Installation inside specific environments:
-
-* __Colab__: see our [Colab Tutorial](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5)
- which has step-by-step instructions.
-
-* __Docker__: The official [Dockerfile](docker) installs detectron2 with a few simple commands.
-
diff --git a/spaces/YuAnthony/Audio-Caption/tools/captions_functions.py b/spaces/YuAnthony/Audio-Caption/tools/captions_functions.py
deleted file mode 100644
index e35b487cecd32ff291f7f4eb048d2ae6680c1a47..0000000000000000000000000000000000000000
--- a/spaces/YuAnthony/Audio-Caption/tools/captions_functions.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from typing import Optional, List, MutableSequence
-from re import sub as re_sub
-from collections import Counter
-from itertools import chain
-from functools import partial
-
-__author__ = 'Konstantinos Drossos -- Tampere University'
-__docformat__ = 'reStructuredText'
-__all__ = ['get_words_counter', 'clean_sentence', 'get_sentence_words']
-
-
-def get_sentence_words(sentence: str,
- unique: Optional[bool] = False,
- keep_case: Optional[bool] = False,
- remove_punctuation: Optional[bool] = True,
- remove_specials: Optional[bool] = True) -> List[str]:
- """Splits input sentence into words.
-
- :param sentence: Sentence to split
- :type sentence: str
- :param unique: Returns a list of unique words.
- :type unique: bool
- :param keep_case: Keep capitals and small (True) or turn\
- everything to small case (False)
- :type keep_case: bool
- :param remove_punctuation: Remove punctuation from sentence?
- :type remove_punctuation: bool
- :param remove_specials: Remove special tokens?
- :type remove_specials: bool
- :return: Sentence words
- :rtype: list[str]
- """
- words = clean_sentence(
- sentence, keep_case=keep_case,
- remove_punctuation=remove_punctuation,
- remove_specials=remove_specials).strip().split()
-
- if unique:
- words = list(set(words))
-
- return words
-
-
-def clean_sentence(sentence: str,
- keep_case: Optional[bool] = False,
- remove_punctuation: Optional[bool] = True,
- remove_specials: Optional[bool] = True) -> str:
- """Cleans a sentence.
-
- :param sentence: Sentence to be clean.
- :type sentence: str
- :param keep_case: Keep capitals and small (True) or turn\
- everything to small case (False)
- :type keep_case: bool
- :param remove_punctuation: Remove punctuation from sentence?
- :type remove_punctuation: bool
- :param remove_specials: Remove special tokens?
- :type remove_specials: bool
- :return: Cleaned sentence.
- :rtype: str
- """
- the_sentence = sentence if keep_case else sentence.lower()
-
- # Remove any forgotten space before punctuation and double space.
- the_sentence = re_sub(r'\s([,.!?;:"](?:\s|$))', r'\1', the_sentence).replace(' ', ' ')
-
- if remove_specials:
- the_sentence = the_sentence.replace(' ', '').replace(' ', '')
- the_sentence = the_sentence.replace(' ', '').replace(' ', '')
-
- if remove_punctuation:
- the_sentence = re_sub('[,.!?;:\"]', '', the_sentence)
-
- return the_sentence
-
-
-def get_words_counter(captions: MutableSequence[str],
- use_unique: Optional[bool] = False,
- keep_case: Optional[bool] = False,
- remove_punctuation: Optional[bool] = True,
- remove_specials: Optional[bool] = True) -> Counter:
- """Creates a Counter object from the\
- words in the captions.
-
- :param captions: The captions.
- :type captions: list[str]|iterable
- :param use_unique: Use unique only words from the captions?
- :type use_unique: bool
- :param keep_case: Keep capitals and small (True) or turn\
- everything to small case (False)
- :type keep_case: bool
- :param remove_punctuation: Remove punctuation from captions?
- :type remove_punctuation: bool
- :param remove_specials: Remove special tokens?
- :type remove_specials: bool
- :return: Counter object from\
- the words in the captions.
- :rtype: collections.Counter
- """
- partial_func = partial(
- get_sentence_words,
- unique=use_unique, keep_case=keep_case,
- remove_punctuation=remove_punctuation,
- remove_specials=remove_specials
- )
- return Counter(chain.from_iterable(map(partial_func, captions)))
-
-# EOF
diff --git a/spaces/Yuliang/ECON/lib/pixielib/pixie.py b/spaces/Yuliang/ECON/lib/pixielib/pixie.py
deleted file mode 100644
index 6cadc83bd8b67f32d5819e0d5218de194e63692c..0000000000000000000000000000000000000000
--- a/spaces/Yuliang/ECON/lib/pixielib/pixie.py
+++ /dev/null
@@ -1,575 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
-# holder of all proprietary rights on this computer program.
-# Using this computer program means that you agree to the terms
-# in the LICENSE file included with this software distribution.
-# Any use not explicitly granted by the LICENSE is prohibited.
-#
-# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
-# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
-# for Intelligent Systems. All rights reserved.
-#
-# For comments or questions, please email us at pixie@tue.mpg.de
-# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
-
-import os
-
-import cv2
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torchvision
-from skimage.io import imread
-
-from .models.encoders import MLP, HRNEncoder, ResnetEncoder
-from .models.moderators import TempSoftmaxFusion
-from .models.SMPLX import SMPLX
-from .utils import rotation_converter as converter
-from .utils import tensor_cropper, util
-from .utils.config import cfg
-
-
-class PIXIE(object):
- def __init__(self, config=None, device="cuda:0"):
- if config is None:
- self.cfg = cfg
- else:
- self.cfg = config
-
- self.device = device
- # parameters setting
- self.param_list_dict = {}
- for lst in self.cfg.params.keys():
- param_list = cfg.params.get(lst)
- self.param_list_dict[lst] = {i: cfg.model.get("n_" + i) for i in param_list}
-
- # Build the models
- self._create_model()
- # Set up the cropping modules used to generate face/hand crops from the body predictions
- self._setup_cropper()
-
- def forward(self, data):
-
- # encode + decode
- param_dict = self.encode(
- {"body": {"image": data}},
- threthold=True,
- keep_local=True,
- copy_and_paste=False,
- )
- opdict = self.decode(param_dict["body"], param_type="body")
-
- return opdict
-
- def _setup_cropper(self):
- self.Cropper = {}
- for crop_part in ["head", "hand"]:
- data_cfg = self.cfg.dataset[crop_part]
- scale_size = (data_cfg.scale_min + data_cfg.scale_max) * 0.5
- self.Cropper[crop_part] = tensor_cropper.Cropper(
- crop_size=data_cfg.image_size,
- scale=[scale_size, scale_size],
- trans_scale=0,
- )
-
- def _create_model(self):
- self.model_dict = {}
- # Build all image encoders
- # Hand encoder only works for right hand, for left hand, flip inputs and flip the results back
- self.Encoder = {}
- for key in self.cfg.network.encoder.keys():
- if self.cfg.network.encoder.get(key).type == "resnet50":
- self.Encoder[key] = ResnetEncoder().to(self.device)
- elif self.cfg.network.encoder.get(key).type == "hrnet":
- self.Encoder[key] = HRNEncoder().to(self.device)
- self.model_dict[f"Encoder_{key}"] = self.Encoder[key].state_dict()
-
- # Build the parameter regressors
- self.Regressor = {}
- for key in self.cfg.network.regressor.keys():
- n_output = sum(self.param_list_dict[f"{key}_list"].values())
- channels = ([2048] + self.cfg.network.regressor.get(key).channels + [n_output])
- if self.cfg.network.regressor.get(key).type == "mlp":
- self.Regressor[key] = MLP(channels=channels).to(self.device)
- self.model_dict[f"Regressor_{key}"] = self.Regressor[key].state_dict()
-
- # Build the extractors
- # to extract separate head/left hand/right hand feature from body feature
- self.Extractor = {}
- for key in self.cfg.network.extractor.keys():
- channels = [2048] + self.cfg.network.extractor.get(key).channels + [2048]
- if self.cfg.network.extractor.get(key).type == "mlp":
- self.Extractor[key] = MLP(channels=channels).to(self.device)
- self.model_dict[f"Extractor_{key}"] = self.Extractor[key].state_dict()
-
- # Build the moderators
- self.Moderator = {}
- for key in self.cfg.network.moderator.keys():
- share_part = key.split("_")[0]
- detach_inputs = self.cfg.network.moderator.get(key).detach_inputs
- detach_feature = self.cfg.network.moderator.get(key).detach_feature
- channels = [2048 * 2] + self.cfg.network.moderator.get(key).channels + [2]
- self.Moderator[key] = TempSoftmaxFusion(
- detach_inputs=detach_inputs,
- detach_feature=detach_feature,
- channels=channels,
- ).to(self.device)
- self.model_dict[f"Moderator_{key}"] = self.Moderator[key].state_dict()
-
- # Build the SMPL-X body model, which we also use to represent faces and
- # hands, using the relevant parts only
- self.smplx = SMPLX(self.cfg.model).to(self.device)
- self.part_indices = self.smplx.part_indices
-
- # -- resume model
- model_path = self.cfg.pretrained_modelpath
- if os.path.exists(model_path):
- checkpoint = torch.load(model_path)
- for key in self.model_dict.keys():
- util.copy_state_dict(self.model_dict[key], checkpoint[key])
- else:
- print(f"pixie trained model path: {model_path} does not exist!")
- exit()
- # eval mode
- for module in [self.Encoder, self.Regressor, self.Moderator, self.Extractor]:
- for net in module.values():
- net.eval()
-
- def decompose_code(self, code, num_dict):
- """Convert a flattened parameter vector to a dictionary of parameters"""
- code_dict = {}
- start = 0
- for key in num_dict:
- end = start + int(num_dict[key])
- code_dict[key] = code[:, start:end]
- start = end
- return code_dict
-
- def part_from_body(self, image, part_key, points_dict, crop_joints=None):
- """crop part(head/left_hand/right_hand) out from body data, joints also change accordingly"""
- assert part_key in ["head", "left_hand", "right_hand"]
- assert "smplx_kpt" in points_dict.keys()
- if part_key == "head":
- # use face 68 kpts for cropping head image
- indices_key = "face"
- elif part_key == "left_hand":
- indices_key = "left_hand"
- elif part_key == "right_hand":
- indices_key = "right_hand"
-
- # get points for cropping
- part_indices = self.part_indices[indices_key]
- if crop_joints is not None:
- points_for_crop = crop_joints[:, part_indices]
- else:
- points_for_crop = points_dict["smplx_kpt"][:, part_indices]
-
- # crop
- cropper_key = "hand" if "hand" in part_key else part_key
- points_scale = image.shape[-2:]
- cropped_image, tform = self.Cropper[cropper_key].crop(image, points_for_crop, points_scale)
- # transform points(must be normalized to [-1.1]) accordingly
- cropped_points_dict = {}
- for points_key in points_dict.keys():
- points = points_dict[points_key]
- cropped_points = self.Cropper[cropper_key].transform_points(
- points, tform, points_scale, normalize=True
- )
- cropped_points_dict[points_key] = cropped_points
- return cropped_image, cropped_points_dict
-
- @torch.no_grad()
- def encode(
- self,
- data,
- threthold=True,
- keep_local=True,
- copy_and_paste=False,
- body_only=False,
- ):
- """Encode images to smplx parameters
- Args:
- data: dict
- key: image_type (body/head/hand)
- value:
- image: [bz, 3, 224, 224], range [0,1]
- image_hd(needed if key==body): a high res version of image, only for cropping parts from body image
- head_image: optinal, well-cropped head from body image
- left_hand_image: optinal, well-cropped left hand from body image
- right_hand_image: optinal, well-cropped right hand from body image
- Returns:
- param_dict: dict
- key: image_type (body/head/hand)
- value: param_dict
- """
- for key in data.keys():
- assert key in ["body", "head", "hand"]
-
- feature = {}
- param_dict = {}
-
- # Encode features
- for key in data.keys():
- part = key
- # encode feature
- feature[key] = {}
- feature[key][part] = self.Encoder[part](data[key]["image"])
-
- # for head/hand image
- if key == "head" or key == "hand":
- # predict head/hand-only parameters from part feature
- part_dict = self.decompose_code(
- self.Regressor[part](feature[key][part]),
- self.param_list_dict[f"{part}_list"],
- )
- # if input is part data, skip feature fusion: share feature is the same as part feature
- # then predict share parameters
- feature[key][f"{key}_share"] = feature[key][key]
- share_dict = self.decompose_code(
- self.Regressor[f"{part}_share"](feature[key][f"{part}_share"]),
- self.param_list_dict[f"{part}_share_list"],
- )
- # compose parameters
- param_dict[key] = {**share_dict, **part_dict}
-
- # for body image
- if key == "body":
- fusion_weight = {}
- f_body = feature["body"]["body"]
- # extract part feature
- for part_name in ["head", "left_hand", "right_hand"]:
- feature["body"][f"{part_name}_share"] = self.Extractor[f"{part_name}_share"](
- f_body
- )
-
- # -- check if part crops are given, if not, crop parts by coarse body estimation
- if (
- "head_image" not in data[key].keys() or
- "left_hand_image" not in data[key].keys() or
- "right_hand_image" not in data[key].keys()
- ):
- # - run without fusion to get coarse estimation, for cropping parts
- # body only
- body_dict = self.decompose_code(
- self.Regressor[part](feature[key][part]),
- self.param_list_dict[part + "_list"],
- )
- # head share
- head_share_dict = self.decompose_code(
- self.Regressor["head" + "_share"](feature[key]["head" + "_share"]),
- self.param_list_dict["head" + "_share_list"],
- )
- # right hand share
- right_hand_share_dict = self.decompose_code(
- self.Regressor["hand" + "_share"](feature[key]["right_hand" + "_share"]),
- self.param_list_dict["hand" + "_share_list"],
- )
- # left hand share
- left_hand_share_dict = self.decompose_code(
- self.Regressor["hand" + "_share"](feature[key]["left_hand" + "_share"]),
- self.param_list_dict["hand" + "_share_list"],
- )
- # change the dict name from right to left
- left_hand_share_dict["left_hand_pose"] = left_hand_share_dict.pop(
- "right_hand_pose"
- )
- left_hand_share_dict["left_wrist_pose"] = left_hand_share_dict.pop(
- "right_wrist_pose"
- )
- param_dict[key] = {
- **body_dict,
- **head_share_dict,
- **left_hand_share_dict,
- **right_hand_share_dict,
- }
- if body_only:
- param_dict["moderator_weight"] = None
- return param_dict
- prediction_body_only = self.decode(param_dict[key], param_type="body")
- # crop
- for part_name in ["head", "left_hand", "right_hand"]:
- part = part_name.split("_")[-1]
- points_dict = {
- "smplx_kpt": prediction_body_only["smplx_kpt"],
- "trans_verts": prediction_body_only["transformed_vertices"],
- }
- image_hd = torchvision.transforms.Resize(1024)(data["body"]["image"])
- cropped_image, cropped_joints_dict = self.part_from_body(
- image_hd, part_name, points_dict
- )
- data[key][part_name + "_image"] = cropped_image
-
- # -- encode features from part crops, then fuse feature using the weight from moderator
- for part_name in ["head", "left_hand", "right_hand"]:
- part = part_name.split("_")[-1]
- cropped_image = data[key][part_name + "_image"]
- # if left hand, flip it as if it is right hand
- if part_name == "left_hand":
- cropped_image = torch.flip(cropped_image, dims=(-1, ))
- # run part regressor
- f_part = self.Encoder[part](cropped_image)
- part_dict = self.decompose_code(
- self.Regressor[part](f_part),
- self.param_list_dict[f"{part}_list"],
- )
- part_share_dict = self.decompose_code(
- self.Regressor[f"{part}_share"](f_part),
- self.param_list_dict[f"{part}_share_list"],
- )
- param_dict["body_" + part_name] = {**part_dict, **part_share_dict}
-
- # moderator to assign weight, then integrate features
- f_body_out, f_part_out, f_weight = self.Moderator[f"{part}_share"](
- feature["body"][f"{part_name}_share"], f_part, work=True
- )
- if copy_and_paste:
- # copy and paste strategy always trusts the results from part
- feature["body"][f"{part_name}_share"] = f_part
- elif threthold and part == "hand":
- # for hand, if part weight > 0.7 (very confident, then fully trust part)
- part_w = f_weight[:, [1]]
- part_w[part_w > 0.7] = 1.0
- f_body_out = (
- feature["body"][f"{part_name}_share"] * (1.0 - part_w) + f_part * part_w
- )
- feature["body"][f"{part_name}_share"] = f_body_out
- else:
- feature["body"][f"{part_name}_share"] = f_body_out
- fusion_weight[part_name] = f_weight
- # save weights from moderator, that can be further used for optimization/running specific tasks on parts
- param_dict["moderator_weight"] = fusion_weight
-
- # -- predict parameters from fused body feature
- # head share
- head_share_dict = self.decompose_code(
- self.Regressor["head" + "_share"](feature[key]["head" + "_share"]),
- self.param_list_dict["head" + "_share_list"],
- )
- # right hand share
- right_hand_share_dict = self.decompose_code(
- self.Regressor["hand" + "_share"](feature[key]["right_hand" + "_share"]),
- self.param_list_dict["hand" + "_share_list"],
- )
- # left hand share
- left_hand_share_dict = self.decompose_code(
- self.Regressor["hand" + "_share"](feature[key]["left_hand" + "_share"]),
- self.param_list_dict["hand" + "_share_list"],
- )
- # change the dict name from right to left
- left_hand_share_dict["left_hand_pose"] = left_hand_share_dict.pop("right_hand_pose")
- left_hand_share_dict["left_wrist_pose"] = left_hand_share_dict.pop(
- "right_wrist_pose"
- )
- param_dict["body"] = {
- **body_dict,
- **head_share_dict,
- **left_hand_share_dict,
- **right_hand_share_dict,
- }
- # copy tex param from head param dict to body param dict
- param_dict["body"]["tex"] = param_dict["body_head"]["tex"]
- param_dict["body"]["light"] = param_dict["body_head"]["light"]
-
- if keep_local:
- # for local change that will not affect whole body and produce unnatral pose, trust part
- param_dict[key]["exp"] = param_dict["body_head"]["exp"]
- param_dict[key]["right_hand_pose"] = param_dict["body_right_hand"][
- "right_hand_pose"]
- param_dict[key]["left_hand_pose"] = param_dict["body_left_hand"][
- "right_hand_pose"]
-
- return param_dict
-
- def convert_pose(self, param_dict, param_type):
- """Convert pose parameters to rotation matrix
- Args:
- param_dict: smplx parameters
- param_type: should be one of body/head/hand
- Returns:
- param_dict: smplx parameters
- """
- assert param_type in ["body", "head", "hand"]
-
- # convert pose representations: the output from network are continous repre or axis angle,
- # while the input pose for smplx need to be rotation matrix
- for key in param_dict:
- if "pose" in key and "jaw" not in key:
- param_dict[key] = converter.batch_cont2matrix(param_dict[key])
- if param_type == "body" or param_type == "head":
- param_dict["jaw_pose"] = converter.batch_euler2matrix(param_dict["jaw_pose"]
- )[:, None, :, :]
-
- # complement params if it's not in given param dict
- if param_type == "head":
- batch_size = param_dict["shape"].shape[0]
- param_dict["abs_head_pose"] = param_dict["head_pose"].clone()
- param_dict["global_pose"] = param_dict["head_pose"]
- param_dict["partbody_pose"] = self.smplx.body_pose.unsqueeze(0).expand(
- batch_size, -1, -1, -1
- )[:, :self.param_list_dict["body_list"]["partbody_pose"]]
- param_dict["neck_pose"] = self.smplx.neck_pose.unsqueeze(0).expand(
- batch_size, -1, -1, -1
- )
- param_dict["left_wrist_pose"] = self.smplx.neck_pose.unsqueeze(0).expand(
- batch_size, -1, -1, -1
- )
- param_dict["left_hand_pose"] = self.smplx.left_hand_pose.unsqueeze(0).expand(
- batch_size, -1, -1, -1
- )
- param_dict["right_wrist_pose"] = self.smplx.neck_pose.unsqueeze(0).expand(
- batch_size, -1, -1, -1
- )
- param_dict["right_hand_pose"] = self.smplx.right_hand_pose.unsqueeze(0).expand(
- batch_size, -1, -1, -1
- )
- elif param_type == "hand":
- batch_size = param_dict["right_hand_pose"].shape[0]
- param_dict["abs_right_wrist_pose"] = param_dict["right_wrist_pose"].clone()
- dtype = param_dict["right_hand_pose"].dtype
- device = param_dict["right_hand_pose"].device
- x_180_pose = (torch.eye(3, dtype=dtype, device=device).unsqueeze(0).repeat(1, 1, 1))
- x_180_pose[0, 2, 2] = -1.0
- x_180_pose[0, 1, 1] = -1.0
- param_dict["global_pose"] = x_180_pose.unsqueeze(0).expand(batch_size, -1, -1, -1)
- param_dict["shape"] = self.smplx.shape_params.expand(batch_size, -1)
- param_dict["exp"] = self.smplx.expression_params.expand(batch_size, -1)
- param_dict["head_pose"] = self.smplx.head_pose.unsqueeze(0).expand(
- batch_size, -1, -1, -1
- )
- param_dict["neck_pose"] = self.smplx.neck_pose.unsqueeze(0).expand(
- batch_size, -1, -1, -1
- )
- param_dict["jaw_pose"] = self.smplx.jaw_pose.unsqueeze(0).expand(batch_size, -1, -1, -1)
- param_dict["partbody_pose"] = self.smplx.body_pose.unsqueeze(0).expand(
- batch_size, -1, -1, -1
- )[:, :self.param_list_dict["body_list"]["partbody_pose"]]
- param_dict["left_wrist_pose"] = self.smplx.neck_pose.unsqueeze(0).expand(
- batch_size, -1, -1, -1
- )
- param_dict["left_hand_pose"] = self.smplx.left_hand_pose.unsqueeze(0).expand(
- batch_size, -1, -1, -1
- )
- elif param_type == "body":
- # the predcition from the head and hand share regressor is always absolute pose
- batch_size = param_dict["shape"].shape[0]
- param_dict["abs_head_pose"] = param_dict["head_pose"].clone()
- param_dict["abs_right_wrist_pose"] = param_dict["right_wrist_pose"].clone()
- param_dict["abs_left_wrist_pose"] = param_dict["left_wrist_pose"].clone()
- # the body-hand share regressor is working for right hand
- # so we assume body network get the flipped feature for the left hand. then get the parameters
- # then we need to flip it back to left, which matches the input left hand
- param_dict["left_wrist_pose"] = util.flip_pose(param_dict["left_wrist_pose"])
- param_dict["left_hand_pose"] = util.flip_pose(param_dict["left_hand_pose"])
- else:
- exit()
-
- return param_dict
-
- def decode(self, param_dict, param_type):
- """Decode model parameters to smplx vertices & joints & texture
- Args:
- param_dict: smplx parameters
- param_type: should be one of body/head/hand
- Returns:
- predictions: smplx predictions
- """
- if "jaw_pose" in param_dict.keys() and len(param_dict["jaw_pose"].shape) == 2:
- self.convert_pose(param_dict, param_type)
- elif param_dict["right_wrist_pose"].shape[-1] == 6:
- self.convert_pose(param_dict, param_type)
-
- # concatenate body pose
- partbody_pose = param_dict["partbody_pose"]
- param_dict["body_pose"] = torch.cat(
- [
- partbody_pose[:, :11],
- param_dict["neck_pose"],
- partbody_pose[:, 11:11 + 2],
- param_dict["head_pose"],
- partbody_pose[:, 13:13 + 4],
- param_dict["left_wrist_pose"],
- param_dict["right_wrist_pose"],
- ],
- dim=1,
- )
-
- # change absolute head&hand pose to relative pose according to rest body pose
- if param_type == "head" or param_type == "body":
- param_dict["body_pose"] = self.smplx.pose_abs2rel(
- param_dict["global_pose"], param_dict["body_pose"], abs_joint="head"
- )
- if param_type == "hand" or param_type == "body":
- param_dict["body_pose"] = self.smplx.pose_abs2rel(
- param_dict["global_pose"],
- param_dict["body_pose"],
- abs_joint="left_wrist",
- )
- param_dict["body_pose"] = self.smplx.pose_abs2rel(
- param_dict["global_pose"],
- param_dict["body_pose"],
- abs_joint="right_wrist",
- )
-
- if self.cfg.model.check_pose:
- # check if pose is natural (relative rotation), if not, set relative to 0 (especially for head pose)
- # xyz: pitch(positive for looking down), yaw(positive for looking left), roll(rolling chin to left)
- for pose_ind in [14]: # head [15-1, 20-1, 21-1]:
- curr_pose = param_dict["body_pose"][:, pose_ind]
- euler_pose = converter._compute_euler_from_matrix(curr_pose)
- for i, max_angle in enumerate([20, 70, 10]):
- euler_pose_curr = euler_pose[:, i]
- euler_pose_curr[euler_pose_curr != torch.clamp(
- euler_pose_curr,
- min=-max_angle * np.pi / 180,
- max=max_angle * np.pi / 180,
- )] = 0.0
- param_dict["body_pose"][:, pose_ind] = converter.batch_euler2matrix(euler_pose)
-
- # SMPLX
- verts, landmarks, joints = self.smplx(
- shape_params=param_dict["shape"],
- expression_params=param_dict["exp"],
- global_pose=param_dict["global_pose"],
- body_pose=param_dict["body_pose"],
- jaw_pose=param_dict["jaw_pose"],
- left_hand_pose=param_dict["left_hand_pose"],
- right_hand_pose=param_dict["right_hand_pose"],
- )
- smplx_kpt3d = joints.clone()
-
- # projection
- cam = param_dict[param_type + "_cam"]
- trans_verts = util.batch_orth_proj(verts, cam)
- predicted_landmarks = util.batch_orth_proj(landmarks, cam)[:, :, :2]
- predicted_joints = util.batch_orth_proj(joints, cam)[:, :, :2]
-
- prediction = {
- "vertices": verts,
- "transformed_vertices": trans_verts,
- "face_kpt": predicted_landmarks,
- "smplx_kpt": predicted_joints,
- "smplx_kpt3d": smplx_kpt3d,
- "joints": joints,
- "cam": param_dict[param_type + "_cam"],
- }
-
- # change the order of face keypoints, to be the same as "standard" 68 keypoints
- prediction["face_kpt"] = torch.cat([
- prediction["face_kpt"][:, -17:], prediction["face_kpt"][:, :-17]
- ],
- dim=1)
-
- prediction.update(param_dict)
-
- return prediction
-
- def decode_Tpose(self, param_dict):
- """return body mesh in T pose, support body and head param dict only"""
- verts, _, _ = self.smplx(
- shape_params=param_dict["shape"],
- expression_params=param_dict["exp"],
- jaw_pose=param_dict["jaw_pose"],
- )
- return verts
diff --git a/spaces/Yunshansongbai/SVC-Nahida/flask_api.py b/spaces/Yunshansongbai/SVC-Nahida/flask_api.py
deleted file mode 100644
index 1872019efc90d3e500625ab7dd300791c91136c6..0000000000000000000000000000000000000000
--- a/spaces/Yunshansongbai/SVC-Nahida/flask_api.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import io
-import logging
-
-import librosa
-import soundfile
-import paddle
-import paddle.audio as paddleaudio
-from flask import Flask, request, send_file
-from flask_cors import CORS
-
-from inference.infer_tool import Svc, RealTimeVC
-
-app = Flask(__name__)
-
-CORS(app)
-
-logging.getLogger('numba').setLevel(logging.WARNING)
-
-
-@app.route("/voiceChangeModel", methods=["POST"])
-def voice_change_model():
- request_form = request.form
- wave_file = request.files.get("sample", None)
- # 变调信息
- f_pitch_change = float(request_form.get("fPitchChange", 0))
- # DAW所需的采样率
- daw_sample = int(float(request_form.get("sampleRate", 0)))
- speaker_id = int(float(request_form.get("sSpeakId", 0)))
- # http获得wav文件并转换
- input_wav_path = io.BytesIO(wave_file.read())
-
- # 模型推理
- if raw_infer:
- out_audio, out_sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path)
- tar_audio = librosa.resample(out_audio.numpy(), svc_model.target_sample, daw_sample)
- else:
- out_audio = svc.process(svc_model, speaker_id, f_pitch_change, input_wav_path)
- tar_audio = librosa.resample(out_audio, svc_model.target_sample, daw_sample)
- # 返回音频
- out_wav_path = io.BytesIO()
- soundfile.write(out_wav_path, tar_audio, daw_sample, format="wav")
- out_wav_path.seek(0)
- return send_file(out_wav_path, download_name="temp.wav", as_attachment=True)
-
-
-if __name__ == '__main__':
- # 启用则为直接切片合成,False为交叉淡化方式
- # vst插件调整0.3-0.5s切片时间可以降低延迟,直接切片方法会有连接处爆音、交叉淡化会有轻微重叠声音
- # 自行选择能接受的方法,或将vst最大切片时间调整为1s,此处设为Ture,延迟大音质稳定一些
- raw_infer = True
- # 每个模型和config是唯一对应的
- model_name = "logs/44k/G_1005.pdparams"
- config_name = "configs/config.json"
- svc_model = Svc(model_name, config_name)
- svc = RealTimeVC()
- # 此处与vst插件对应,不建议更改
- app.run(port=6842, host="0.0.0.0", debug=False, threaded=False)
diff --git a/spaces/a-v-bely/russian-task-generator/utilities_database/user_database_utils.py b/spaces/a-v-bely/russian-task-generator/utilities_database/user_database_utils.py
deleted file mode 100644
index 094047804c492a2eb912dad4e6f143809caaf85b..0000000000000000000000000000000000000000
--- a/spaces/a-v-bely/russian-task-generator/utilities_database/user_database_utils.py
+++ /dev/null
@@ -1,281 +0,0 @@
-import re
-import secrets
-import requests
-import pandas as pd
-import streamlit as st
-from trycourier import Courier
-from argon2 import PasswordHasher
-from argon2.exceptions import VerifyMismatchError
-
-ph = PasswordHasher()
-
-
-def check_usr_pass(user_log_in_database, user_name: str, password: str) -> bool:
- """
- Authenticates the user_name and password.
- """
- registered_user = user_log_in_database.fetch({'user_name': user_name}).items
- try:
- passwd_verification_bool = ph.verify(registered_user[0]['password'], password)
- if passwd_verification_bool:
- return True
- return False
- except VerifyMismatchError:
- pass
- return False
-
-
-def load_lottie_url(url: str) -> str or None:
- """
- Fetches the lottie animation using the URL.
- """
- r = requests.get(url)
- if r.status_code != 200:
- return None
- return r.json()
-
-
-def check_valid_name(name_sign_up: str) -> bool:
- """
- Checks if the user entered a valid name while creating the account.
- """
- name_regex_eng = r'^[A-Za-z_]\w *'
- name_regex_rus = r'^[А-Яа-я_][А-Яа-я0-9_] *'
-
- if re.search(name_regex_eng, name_sign_up) or re.search(name_regex_rus, name_sign_up):
- return True
- return False
-
-
-def check_valid_email(email_sign_up: str) -> bool:
- """
- Checks if the user entered a valid e-mail while creating the account.
- """
- regex = re.compile(r'([A-Za-z0-9]+[.-_])*[A-Za-z0-9]+@[A-Za-z0-9-]+(\.[A-Z|a-z]{2,})+')
-
- if re.fullmatch(regex, email_sign_up):
- return True
- return False
-
-
-def check_unique_email(user_log_in_database, email_sign_up: str) -> bool:
- """
- Checks if the e-mail already exists (since e-mail needs to be unique).
- """
- authorized_users_data = user_log_in_database.fetch({'e-mail': email_sign_up}).items
- if len(authorized_users_data) == 0:
- return True
- return False
-
-
-def non_empty_str_check(user_name_sign_up: str) -> bool:
- """
- Checks for non-empty strings.
- """
- empty_count = 0
- for i in user_name_sign_up:
- if i == ' ':
- empty_count = empty_count + 1
- if empty_count == len(user_name_sign_up):
- return False
- if not user_name_sign_up:
- return False
- return True
-
-
-def check_unique_usr(user_log_in_database, user_name_sign_up: str):
- """
- Checks if the user_name already exists (since user_name needs to be unique),
- also checks for non-empty user_name.
- """
- authorized_users_data = user_log_in_database.fetch({'user_name': user_name_sign_up}).items
- if len(authorized_users_data) != 0:
- return False
- if user_name_sign_up in authorized_users_data:
- return False
- non_empty_check = non_empty_str_check(user_name_sign_up)
- if not non_empty_check:
- return None
- return True
-
-
-def register_new_usr(user_log_in_database, name_sign_up: str, email_sign_up: str, user_name_sign_up: str,
- password_sign_up: str, professional_level: str) -> None:
- """
- Saves the information of the new user in the _secret_auth.json file.
- """
- new_usr_data = {'user_name': user_name_sign_up,
- 'name': name_sign_up,
- 'e-mail': email_sign_up,
- 'password': ph.hash(password_sign_up),
- 'professional_level': professional_level}
-
- return user_log_in_database.put(new_usr_data)
-
-
-def check_user_name_exists(user_log_in_database, user_name: str) -> bool:
- """
- Checks if the user_name exists in the _secret_auth.json file.
- """
- authorized_users_data = user_log_in_database.fetch({'user_name': user_name}).items
- if len(authorized_users_data) == 1:
- return True
- return False
-
-
-def check_email_exists(user_log_in_database, email_forgot_passwd: str):
- """
- Checks if the e-mail entered is present in the _secret_auth.json file.
- """
- authorized_users_data = user_log_in_database.fetch({'e-mail': email_forgot_passwd}).items
- if len(authorized_users_data) == 1:
- return True, authorized_users_data[0]['user_name']
- return False, None
-
-
-def generate_random_passwd() -> str:
- """
- Generates a random password to be sent in e-mail.
- """
- password_length = 10
- return secrets.token_urlsafe(password_length)
-
-
-def send_passwd_in_email(auth_token: str, user_name_forgot_passwd: str, email_forgot_passwd: str, company_name: str,
- random_password: str) -> None:
- """
- Triggers an e-mail to the user containing the randomly generated password.
- """
- client = Courier(auth_token=auth_token)
-
- client.send_message(
- message={
- "to": {
- "email": email_forgot_passwd
- },
- "content": {
- "title": f'{company_name}: Login Password!',
- "body": f'Hi! {user_name_forgot_passwd},\n\nYour temporary login password is: {random_password}\n\n'
- + '{{info}}'
- },
- "data": {
- "info": "Please reset your password at the earliest for security reasons."
- }
- }
- )
-
-
-def change_passwd(user_log_in_database, email_forgot_passwd: str, random_password: str) -> None:
- """
- Replaces the old password with the newly generated password.
- """
- user_key = user_log_in_database.fetch({'e-mail': email_forgot_passwd}).items[0]['key']
- updates = {'password': ph.hash(random_password)}
- return user_log_in_database.update(updates, user_key)
-
-
-def check_current_passwd(user_log_in_database, email_reset_passwd: str, current_passwd: str = None) -> bool:
- """
- Authenticates the password entered against the user_name when
- resetting the password.
- """
- authorized_user_data = user_log_in_database.fetch({'e-mail': email_reset_passwd}).items[0]
- if current_passwd is None:
- current_passwd = 'b'
- try:
- if ph.verify(authorized_user_data['password'], current_passwd):
- return True
- except VerifyMismatchError:
- pass
- return False
-
-
-def save_data_in_database(user_task_database, save_type, save_name, cefr_level, time_stamp, creator_name=None,
- generated_result=None, test_taker_name=None, test_taker_answers=None, test_taker_result=None,
- comments=None, distractor_model=None):
- already_saved_names = user_task_database.fetch({'creator_name': creator_name,
- 'save_name': save_name,
- 'cefr_level': cefr_level}).items
- already_saved_tasks = user_task_database.fetch({'creator_name': creator_name,
- 'generated_result': generated_result,
- 'cefr_level': cefr_level}).items
- already_saved_tests = user_task_database.fetch({'test_taker_name': test_taker_name,
- 'save_name': save_name,
- 'cefr_level': cefr_level}).items
- if save_name == '' and save_type == 'download':
- save_name = generated_result['name']
- if len(already_saved_names) != 0 and save_type == 'download':
- return st.success('Файл с таким названием уже существует! Введите другое название и повторите попытку.')
- elif len(already_saved_tasks) != 0 and save_type == 'download':
- return st.error(f'Вы уже сохраняли эти задания под именем {already_saved_tasks[0]["save_name"]}. ')
- elif (len(already_saved_tests) != 0
- and save_type == 'online_test'): # and int(test_taker_result) == int(already_saved_tests[0]["user_points"])
- return st.error('Вы уже решали данный тест!')
- else:
- if save_type == 'download':
- new_save_data = {
- 'save_type': save_type,
- 'save_name': save_name,
- 'cefr_level': cefr_level,
- 'time_stamp': time_stamp,
- 'creator_name': creator_name,
- 'generated_result': generated_result,
- 'distractor_model': distractor_model
- }
- else:
- new_save_data = {
- 'save_type': save_type,
- 'save_name': save_name,
- 'cefr_level': cefr_level,
- 'time_stamp': time_stamp,
- 'creator_name': creator_name,
- 'test_taker_name': test_taker_name,
- 'test_taker_answers': test_taker_answers,
- 'generated_result': generated_result,
- 'test_taker_result': test_taker_result,
- 'comments': comments}
- user_task_database.put(new_save_data)
- if save_type == 'download':
- return st.success('Задания успешно сохранены! Можете переходить на следующие вкладки')
- elif save_type == 'online_test':
- return st.success('Ответы успешно сохранены!')
-
-
-def load_user_tasks_data(user_task_database, save_type, creator_name=None, test_taker_name=None):
- if save_type == 'download':
- ITEMS = []
- user_data = user_task_database.fetch({'creator_name': creator_name, 'save_type': save_type})
- _last = user_data.last
- while _last is not None:
- ITEMS.extend(user_data.items)
- user_data = user_task_database.fetch({'creator_name': creator_name, 'save_type': save_type}, last=_last)
- _last = user_data.last
- names = [item['save_name'] for item in ITEMS]
- cefr_level = [item['cefr_level'] for item in ITEMS]
- time_stamps = [item['time_stamp'] for item in ITEMS]
- return_data = pd.DataFrame([names, cefr_level, time_stamps]).transpose()
- return_data.columns = ['Название', 'Уровень', 'Время создания']
- else:
- ITEMS = []
- user_data = user_task_database.fetch({'test_taker_name': test_taker_name, 'save_type': save_type})
- _last = user_data.last
- while _last is not None:
- ITEMS.extend(user_data.items)
- user_data = user_task_database.fetch({'test_taker_name': test_taker_name, 'save_type': save_type}, last=_last)
- _last = user_data.last
- names = [item['save_name'] for item in ITEMS]
- cefr_level = [item['cefr_level'] for item in ITEMS]
- time_stamps = [item['time_stamp'] for item in ITEMS]
- creator_name = [item['creator_name'] for item in ITEMS]
- test_taker_result = [item['test_taker_result'] for item in ITEMS]
- return_data = pd.DataFrame([names, cefr_level, test_taker_result, time_stamps, creator_name]).transpose()
- return_data.columns = ['Название', 'Уровень', 'Оценка', 'Дата прохождения', 'Автор заданий']
- return return_data
-
-
-def load_users_particular_task(user_task_database, load_mode, creator_name, save_name, cefr_level,):
- return_data = user_task_database.fetch({'creator_name': creator_name,
- 'save_name': save_name,
- 'save_type': load_mode,
- 'cefr_level': cefr_level}).items[0]['generated_result']
- return return_data
diff --git a/spaces/abdvl/datahub_qa_bot/docs/components.md b/spaces/abdvl/datahub_qa_bot/docs/components.md
deleted file mode 100644
index ef76729bb37fbf94c5e7cfc8a634ed7ab10e3ce7..0000000000000000000000000000000000000000
--- a/spaces/abdvl/datahub_qa_bot/docs/components.md
+++ /dev/null
@@ -1,60 +0,0 @@
----
-title: "Components"
----
-
-# DataHub Components Overview
-
-The DataHub platform consists of the components shown in the following diagram.
-
-
-
-## Metadata Store
-
-The Metadata Store is responsible for storing the [Entities & Aspects](https://datahubproject.io/docs/metadata-modeling/metadata-model/) comprising the Metadata Graph. This includes
-exposing an API for [ingesting metadata](https://datahubproject.io/docs/metadata-service#ingesting-entities), [fetching Metadata by primary key](https://datahubproject.io/docs/metadata-service#retrieving-entities), [searching entities](https://datahubproject.io/docs/metadata-service#search-an-entity), and [fetching Relationships](https://datahubproject.io/docs/metadata-service#get-relationships-edges) between
-entities. It consists of a Spring Java Service hosting a set of [Rest.li](https://linkedin.github.io/rest.li/) API endpoints, along with
-MySQL, Elasticsearch, & Kafka for primary storage & indexing.
-
-Get started with the Metadata Store by following the [Quickstart Guide](https://datahubproject.io/docs/quickstart/).
-
-## Metadata Models
-
-Metadata Models are schemas defining the shape of the Entities & Aspects comprising the Metadata Graph, along with the relationships between them. They are defined
-using [PDL](https://linkedin.github.io/rest.li/pdl_schema), a modeling language quite similar in form to Protobuf while serializes to JSON. Entities represent a specific class of Metadata
-Asset such as a Dataset, a Dashboard, a Data Pipeline, and beyond. Each *instance* of an Entity is identified by a unique identifier called an `urn`. Aspects represent related bundles of data attached
-to an instance of an Entity such as its descriptions, tags, and more. View the current set of Entities supported [here](https://datahubproject.io/docs/metadata-modeling/metadata-model#exploring-datahubs-metadata-model).
-
-Learn more about DataHub models Metadata [here](https://datahubproject.io/docs/metadata-modeling/metadata-model/).
-
-## Ingestion Framework
-
-The Ingestion Framework is a modular, extensible Python library for extracting Metadata from external source systems (e.g.
-Snowflake, Looker, MySQL, Kafka), transforming it into DataHub's [Metadata Model](https://datahubproject.io/docs/metadata-modeling/metadata-model/), and writing it into DataHub via
-either Kafka or using the Metadata Store Rest APIs directly. DataHub supports an [extensive list of Source connectors](https://datahubproject.io/docs/metadata-ingestion/#installing-plugins) to choose from, along with
-a host of capabilities including schema extraction, table & column profiling, usage information extraction, and more.
-
-Getting started with the Ingestion Framework is as simple: just define a YAML file and execute the `datahub ingest` command.
-Learn more by heading over the the [Metadata Ingestion](https://datahubproject.io/docs/metadata-ingestion/) guide.
-
-## GraphQL API
-
-The [GraphQL](https://graphql.org/) API provides a strongly-typed, entity-oriented API that makes interacting with the Entities comprising the Metadata
-Graph simple, including APIs for adding and removing tags, owners, links & more to Metadata Entities! Most notably, this API is consumed by the User Interface (discussed below) for enabling Search & Discovery, Governance, Observability
-and more.
-
-To get started using the GraphQL API, check out the [Getting Started with GraphQL](https://datahubproject.io/docs/api/graphql/getting-started) guide.
-
-## User Interface
-
-DataHub comes with a React UI including an ever-evolving set of features to make Discovering, Governing, & Debugging your Data Assets easy & delightful.
-For a full overview of the capabilities currently supported, take a look at the [Features](https://datahubproject.io/docs/features/) overview. For a look at what's coming next,
-head over to the [Roadmap](https://datahubproject.io/docs/roadmap/).
-
-## Learn More
-
-Learn more about the specifics of the [DataHub Architecture](./architecture/architecture.md) in the Architecture Overview. Learn about using & developing the components
-of the Platform by visiting the Module READMEs.
-
-## Feedback / Questions / Concerns
-
-We want to hear from you! For any inquiries, including Feedback, Questions, or Concerns, reach out on [Slack](https://datahubspace.slack.com/join/shared_invite/zt-nx7i0dj7-I3IJYC551vpnvvjIaNRRGw#/shared-invite/email)!
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/bbox_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/bbox_head.py
deleted file mode 100644
index 408abef3a244115b4e73748049a228e37ad0665c..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/bbox_head.py
+++ /dev/null
@@ -1,483 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from mmcv.runner import auto_fp16, force_fp32
-from torch.nn.modules.utils import _pair
-
-from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms
-from mmdet.models.builder import HEADS, build_loss
-from mmdet.models.losses import accuracy
-
-
-@HEADS.register_module()
-class BBoxHead(nn.Module):
- """Simplest RoI head, with only two fc layers for classification and
- regression respectively."""
-
- def __init__(self,
- with_avg_pool=False,
- with_cls=True,
- with_reg=True,
- roi_feat_size=7,
- in_channels=256,
- num_classes=80,
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- clip_border=True,
- target_means=[0., 0., 0., 0.],
- target_stds=[0.1, 0.1, 0.2, 0.2]),
- reg_class_agnostic=False,
- reg_decoded_bbox=False,
- loss_cls=dict(
- type='CrossEntropyLoss',
- use_sigmoid=False,
- loss_weight=1.0),
- loss_bbox=dict(
- type='SmoothL1Loss', beta=1.0, loss_weight=1.0)):
- super(BBoxHead, self).__init__()
- assert with_cls or with_reg
- self.with_avg_pool = with_avg_pool
- self.with_cls = with_cls
- self.with_reg = with_reg
- self.roi_feat_size = _pair(roi_feat_size)
- self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]
- self.in_channels = in_channels
- self.num_classes = num_classes
- self.reg_class_agnostic = reg_class_agnostic
- self.reg_decoded_bbox = reg_decoded_bbox
- self.fp16_enabled = False
-
- self.bbox_coder = build_bbox_coder(bbox_coder)
- self.loss_cls = build_loss(loss_cls)
- self.loss_bbox = build_loss(loss_bbox)
-
- in_channels = self.in_channels
- if self.with_avg_pool:
- self.avg_pool = nn.AvgPool2d(self.roi_feat_size)
- else:
- in_channels *= self.roi_feat_area
- if self.with_cls:
- # need to add background class
- self.fc_cls = nn.Linear(in_channels, num_classes + 1)
- if self.with_reg:
- out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes
- self.fc_reg = nn.Linear(in_channels, out_dim_reg)
- self.debug_imgs = None
-
- def init_weights(self):
- # conv layers are already initialized by ConvModule
- if self.with_cls:
- nn.init.normal_(self.fc_cls.weight, 0, 0.01)
- nn.init.constant_(self.fc_cls.bias, 0)
- if self.with_reg:
- nn.init.normal_(self.fc_reg.weight, 0, 0.001)
- nn.init.constant_(self.fc_reg.bias, 0)
-
- @auto_fp16()
- def forward(self, x):
- if self.with_avg_pool:
- x = self.avg_pool(x)
- x = x.view(x.size(0), -1)
- cls_score = self.fc_cls(x) if self.with_cls else None
- bbox_pred = self.fc_reg(x) if self.with_reg else None
- return cls_score, bbox_pred
-
- def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes,
- pos_gt_labels, cfg):
- """Calculate the ground truth for proposals in the single image
- according to the sampling results.
-
- Args:
- pos_bboxes (Tensor): Contains all the positive boxes,
- has shape (num_pos, 4), the last dimension 4
- represents [tl_x, tl_y, br_x, br_y].
- neg_bboxes (Tensor): Contains all the negative boxes,
- has shape (num_neg, 4), the last dimension 4
- represents [tl_x, tl_y, br_x, br_y].
- pos_gt_bboxes (Tensor): Contains all the gt_boxes,
- has shape (num_gt, 4), the last dimension 4
- represents [tl_x, tl_y, br_x, br_y].
- pos_gt_labels (Tensor): Contains all the gt_labels,
- has shape (num_gt).
- cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.
-
- Returns:
- Tuple[Tensor]: Ground truth for proposals
- in a single image. Containing the following Tensors:
-
- - labels(Tensor): Gt_labels for all proposals, has
- shape (num_proposals,).
- - label_weights(Tensor): Labels_weights for all
- proposals, has shape (num_proposals,).
- - bbox_targets(Tensor):Regression target for all
- proposals, has shape (num_proposals, 4), the
- last dimension 4 represents [tl_x, tl_y, br_x, br_y].
- - bbox_weights(Tensor):Regression weights for all
- proposals, has shape (num_proposals, 4).
- """
- num_pos = pos_bboxes.size(0)
- num_neg = neg_bboxes.size(0)
- num_samples = num_pos + num_neg
-
- # original implementation uses new_zeros since BG are set to be 0
- # now use empty & fill because BG cat_id = num_classes,
- # FG cat_id = [0, num_classes-1]
- labels = pos_bboxes.new_full((num_samples, ),
- self.num_classes,
- dtype=torch.long)
- label_weights = pos_bboxes.new_zeros(num_samples)
- bbox_targets = pos_bboxes.new_zeros(num_samples, 4)
- bbox_weights = pos_bboxes.new_zeros(num_samples, 4)
- if num_pos > 0:
- labels[:num_pos] = pos_gt_labels
- pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
- label_weights[:num_pos] = pos_weight
- if not self.reg_decoded_bbox:
- pos_bbox_targets = self.bbox_coder.encode(
- pos_bboxes, pos_gt_bboxes)
- else:
- # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
- # is applied directly on the decoded bounding boxes, both
- # the predicted boxes and regression targets should be with
- # absolute coordinate format.
- pos_bbox_targets = pos_gt_bboxes
- bbox_targets[:num_pos, :] = pos_bbox_targets
- bbox_weights[:num_pos, :] = 1
- if num_neg > 0:
- label_weights[-num_neg:] = 1.0
-
- return labels, label_weights, bbox_targets, bbox_weights
-
- def get_targets(self,
- sampling_results,
- gt_bboxes,
- gt_labels,
- rcnn_train_cfg,
- concat=True):
- """Calculate the ground truth for all samples in a batch according to
- the sampling_results.
-
- Almost the same as the implementation in bbox_head, we passed
- additional parameters pos_inds_list and neg_inds_list to
- `_get_target_single` function.
-
- Args:
- sampling_results (List[obj:SamplingResults]): Assign results of
- all images in a batch after sampling.
- gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,
- each tensor has shape (num_gt, 4), the last dimension 4
- represents [tl_x, tl_y, br_x, br_y].
- gt_labels (list[Tensor]): Gt_labels of all images in a batch,
- each tensor has shape (num_gt,).
- rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
- concat (bool): Whether to concatenate the results of all
- the images in a single batch.
-
- Returns:
- Tuple[Tensor]: Ground truth for proposals in a single image.
- Containing the following list of Tensors:
-
- - labels (list[Tensor],Tensor): Gt_labels for all
- proposals in a batch, each tensor in list has
- shape (num_proposals,) when `concat=False`, otherwise
- just a single tensor has shape (num_all_proposals,).
- - label_weights (list[Tensor]): Labels_weights for
- all proposals in a batch, each tensor in list has
- shape (num_proposals,) when `concat=False`, otherwise
- just a single tensor has shape (num_all_proposals,).
- - bbox_targets (list[Tensor],Tensor): Regression target
- for all proposals in a batch, each tensor in list
- has shape (num_proposals, 4) when `concat=False`,
- otherwise just a single tensor has shape
- (num_all_proposals, 4), the last dimension 4 represents
- [tl_x, tl_y, br_x, br_y].
- - bbox_weights (list[tensor],Tensor): Regression weights for
- all proposals in a batch, each tensor in list has shape
- (num_proposals, 4) when `concat=False`, otherwise just a
- single tensor has shape (num_all_proposals, 4).
- """
- pos_bboxes_list = [res.pos_bboxes for res in sampling_results]
- neg_bboxes_list = [res.neg_bboxes for res in sampling_results]
- pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
- pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
- labels, label_weights, bbox_targets, bbox_weights = multi_apply(
- self._get_target_single,
- pos_bboxes_list,
- neg_bboxes_list,
- pos_gt_bboxes_list,
- pos_gt_labels_list,
- cfg=rcnn_train_cfg)
-
- if concat:
- labels = torch.cat(labels, 0)
- label_weights = torch.cat(label_weights, 0)
- bbox_targets = torch.cat(bbox_targets, 0)
- bbox_weights = torch.cat(bbox_weights, 0)
- return labels, label_weights, bbox_targets, bbox_weights
-
- @force_fp32(apply_to=('cls_score', 'bbox_pred'))
- def loss(self,
- cls_score,
- bbox_pred,
- rois,
- labels,
- label_weights,
- bbox_targets,
- bbox_weights,
- reduction_override=None):
- losses = dict()
- if cls_score is not None:
- avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
- if cls_score.numel() > 0:
- losses['loss_cls'] = self.loss_cls(
- cls_score,
- labels,
- label_weights,
- avg_factor=avg_factor,
- reduction_override=reduction_override)
- losses['acc'] = accuracy(cls_score, labels)
- if bbox_pred is not None:
- bg_class_ind = self.num_classes
- # 0~self.num_classes-1 are FG, self.num_classes is BG
- pos_inds = (labels >= 0) & (labels < bg_class_ind)
- # do not perform bounding box regression for BG anymore.
- if pos_inds.any():
- if self.reg_decoded_bbox:
- # When the regression loss (e.g. `IouLoss`,
- # `GIouLoss`, `DIouLoss`) is applied directly on
- # the decoded bounding boxes, it decodes the
- # already encoded coordinates to absolute format.
- bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)
- if self.reg_class_agnostic:
- pos_bbox_pred = bbox_pred.view(
- bbox_pred.size(0), 4)[pos_inds.type(torch.bool)]
- else:
- pos_bbox_pred = bbox_pred.view(
- bbox_pred.size(0), -1,
- 4)[pos_inds.type(torch.bool),
- labels[pos_inds.type(torch.bool)]]
- losses['loss_bbox'] = self.loss_bbox(
- pos_bbox_pred,
- bbox_targets[pos_inds.type(torch.bool)],
- bbox_weights[pos_inds.type(torch.bool)],
- avg_factor=bbox_targets.size(0),
- reduction_override=reduction_override)
- else:
- losses['loss_bbox'] = bbox_pred[pos_inds].sum()
- return losses
-
- @force_fp32(apply_to=('cls_score', 'bbox_pred'))
- def get_bboxes(self,
- rois,
- cls_score,
- bbox_pred,
- img_shape,
- scale_factor,
- rescale=False,
- cfg=None):
- """Transform network output for a batch into bbox predictions.
-
- If the input rois has batch dimension, the function would be in
- `batch_mode` and return is a tuple[list[Tensor], list[Tensor]],
- otherwise, the return is a tuple[Tensor, Tensor].
-
- Args:
- rois (Tensor): Boxes to be transformed. Has shape (num_boxes, 5)
- or (B, num_boxes, 5)
- cls_score (list[Tensor] or Tensor): Box scores for
- each scale level, each is a 4D-tensor, the channel number is
- num_points * num_classes.
- bbox_pred (Tensor, optional): Box energies / deltas for each scale
- level, each is a 4D-tensor, the channel number is
- num_classes * 4.
- img_shape (Sequence[int] or torch.Tensor or Sequence[
- Sequence[int]], optional): Maximum bounds for boxes, specifies
- (H, W, C) or (H, W). If rois shape is (B, num_boxes, 4), then
- the max_shape should be a Sequence[Sequence[int]]
- and the length of max_shape should also be B.
- scale_factor (tuple[ndarray] or ndarray): Scale factor of the
- image arange as (w_scale, h_scale, w_scale, h_scale). In
- `batch_mode`, the scale_factor shape is tuple[ndarray].
- rescale (bool): If True, return boxes in original image space.
- Default: False.
- cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None
-
- Returns:
- tuple[list[Tensor], list[Tensor]] or tuple[Tensor, Tensor]:
- If the input has a batch dimension, the return value is
- a tuple of the list. The first list contains the boxes of
- the corresponding image in a batch, each tensor has the
- shape (num_boxes, 5) and last dimension 5 represent
- (tl_x, tl_y, br_x, br_y, score). Each Tensor in the second
- list is the labels with shape (num_boxes, ). The length of
- both lists should be equal to batch_size. Otherwise return
- value is a tuple of two tensors, the first tensor is the
- boxes with scores, the second tensor is the labels, both
- have the same shape as the first case.
- """
- if isinstance(cls_score, list):
- cls_score = sum(cls_score) / float(len(cls_score))
-
- scores = F.softmax(
- cls_score, dim=-1) if cls_score is not None else None
-
- batch_mode = True
- if rois.ndim == 2:
- # e.g. AugTest, Cascade R-CNN, HTC, SCNet...
- batch_mode = False
-
- # add batch dimension
- if scores is not None:
- scores = scores.unsqueeze(0)
- if bbox_pred is not None:
- bbox_pred = bbox_pred.unsqueeze(0)
- rois = rois.unsqueeze(0)
-
- if bbox_pred is not None:
- bboxes = self.bbox_coder.decode(
- rois[..., 1:], bbox_pred, max_shape=img_shape)
- else:
- bboxes = rois[..., 1:].clone()
- if img_shape is not None:
- max_shape = bboxes.new_tensor(img_shape)[..., :2]
- min_xy = bboxes.new_tensor(0)
- max_xy = torch.cat(
- [max_shape] * 2, dim=-1).flip(-1).unsqueeze(-2)
- bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
- bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
-
- if rescale and bboxes.size(-2) > 0:
- if not isinstance(scale_factor, tuple):
- scale_factor = tuple([scale_factor])
- # B, 1, bboxes.size(-1)
- scale_factor = bboxes.new_tensor(scale_factor).unsqueeze(1).repeat(
- 1, 1,
- bboxes.size(-1) // 4)
- bboxes /= scale_factor
-
- det_bboxes = []
- det_labels = []
- for (bbox, score) in zip(bboxes, scores):
- if cfg is not None:
- det_bbox, det_label = multiclass_nms(bbox, score,
- cfg.score_thr, cfg.nms,
- cfg.max_per_img)
- else:
- det_bbox, det_label = bbox, score
- det_bboxes.append(det_bbox)
- det_labels.append(det_label)
-
- if not batch_mode:
- det_bboxes = det_bboxes[0]
- det_labels = det_labels[0]
- return det_bboxes, det_labels
-
- @force_fp32(apply_to=('bbox_preds', ))
- def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
- """Refine bboxes during training.
-
- Args:
- rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,
- and bs is the sampled RoIs per image. The first column is
- the image id and the next 4 columns are x1, y1, x2, y2.
- labels (Tensor): Shape (n*bs, ).
- bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class).
- pos_is_gts (list[Tensor]): Flags indicating if each positive bbox
- is a gt bbox.
- img_metas (list[dict]): Meta info of each image.
-
- Returns:
- list[Tensor]: Refined bboxes of each image in a mini-batch.
-
- Example:
- >>> # xdoctest: +REQUIRES(module:kwarray)
- >>> import kwarray
- >>> import numpy as np
- >>> from mmdet.core.bbox.demodata import random_boxes
- >>> self = BBoxHead(reg_class_agnostic=True)
- >>> n_roi = 2
- >>> n_img = 4
- >>> scale = 512
- >>> rng = np.random.RandomState(0)
- >>> img_metas = [{'img_shape': (scale, scale)}
- ... for _ in range(n_img)]
- >>> # Create rois in the expected format
- >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)
- >>> img_ids = torch.randint(0, n_img, (n_roi,))
- >>> img_ids = img_ids.float()
- >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1)
- >>> # Create other args
- >>> labels = torch.randint(0, 2, (n_roi,)).long()
- >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)
- >>> # For each image, pretend random positive boxes are gts
- >>> is_label_pos = (labels.numpy() > 0).astype(np.int)
- >>> lbl_per_img = kwarray.group_items(is_label_pos,
- ... img_ids.numpy())
- >>> pos_per_img = [sum(lbl_per_img.get(gid, []))
- ... for gid in range(n_img)]
- >>> pos_is_gts = [
- >>> torch.randint(0, 2, (npos,)).byte().sort(
- >>> descending=True)[0]
- >>> for npos in pos_per_img
- >>> ]
- >>> bboxes_list = self.refine_bboxes(rois, labels, bbox_preds,
- >>> pos_is_gts, img_metas)
- >>> print(bboxes_list)
- """
- img_ids = rois[:, 0].long().unique(sorted=True)
- assert img_ids.numel() <= len(img_metas)
-
- bboxes_list = []
- for i in range(len(img_metas)):
- inds = torch.nonzero(
- rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
- num_rois = inds.numel()
-
- bboxes_ = rois[inds, 1:]
- label_ = labels[inds]
- bbox_pred_ = bbox_preds[inds]
- img_meta_ = img_metas[i]
- pos_is_gts_ = pos_is_gts[i]
-
- bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
- img_meta_)
-
- # filter gt bboxes
- pos_keep = 1 - pos_is_gts_
- keep_inds = pos_is_gts_.new_ones(num_rois)
- keep_inds[:len(pos_is_gts_)] = pos_keep
-
- bboxes_list.append(bboxes[keep_inds.type(torch.bool)])
-
- return bboxes_list
-
- @force_fp32(apply_to=('bbox_pred', ))
- def regress_by_class(self, rois, label, bbox_pred, img_meta):
- """Regress the bbox for the predicted class. Used in Cascade R-CNN.
-
- Args:
- rois (Tensor): shape (n, 4) or (n, 5)
- label (Tensor): shape (n, )
- bbox_pred (Tensor): shape (n, 4*(#class)) or (n, 4)
- img_meta (dict): Image meta info.
-
- Returns:
- Tensor: Regressed bboxes, the same shape as input rois.
- """
- assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape)
-
- if not self.reg_class_agnostic:
- label = label * 4
- inds = torch.stack((label, label + 1, label + 2, label + 3), 1)
- bbox_pred = torch.gather(bbox_pred, 1, inds)
- assert bbox_pred.size(1) == 4
-
- if rois.size(1) == 4:
- new_rois = self.bbox_coder.decode(
- rois, bbox_pred, max_shape=img_meta['img_shape'])
- else:
- bboxes = self.bbox_coder.decode(
- rois[:, 1:], bbox_pred, max_shape=img_meta['img_shape'])
- new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
-
- return new_rois
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/detectors/yolact.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/detectors/yolact.py
deleted file mode 100644
index f32fde0d3dcbb55a405e05df433c4353938a148b..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/detectors/yolact.py
+++ /dev/null
@@ -1,146 +0,0 @@
-import torch
-
-from mmdet.core import bbox2result
-from ..builder import DETECTORS, build_head
-from .single_stage import SingleStageDetector
-
-
-@DETECTORS.register_module()
-class YOLACT(SingleStageDetector):
- """Implementation of `YOLACT `_"""
-
- def __init__(self,
- backbone,
- neck,
- bbox_head,
- segm_head,
- mask_head,
- train_cfg=None,
- test_cfg=None,
- pretrained=None):
- super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg,
- test_cfg, pretrained)
- self.segm_head = build_head(segm_head)
- self.mask_head = build_head(mask_head)
- self.init_segm_mask_weights()
-
- def init_segm_mask_weights(self):
- """Initialize weights of the YOLACT segm head and YOLACT mask head."""
- self.segm_head.init_weights()
- self.mask_head.init_weights()
-
- def forward_dummy(self, img):
- """Used for computing network flops.
-
- See `mmdetection/tools/analysis_tools/get_flops.py`
- """
- raise NotImplementedError
-
- def forward_train(self,
- img,
- img_metas,
- gt_bboxes,
- gt_labels,
- gt_bboxes_ignore=None,
- gt_masks=None):
- """
- Args:
- img (Tensor): of shape (N, C, H, W) encoding input images.
- Typically these should be mean centered and std scaled.
- img_metas (list[dict]): list of image info dict where each dict
- has: 'img_shape', 'scale_factor', 'flip', and may also contain
- 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
- For details on the values of these keys see
- `mmdet/datasets/pipelines/formatting.py:Collect`.
- gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
- shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
- gt_labels (list[Tensor]): class indices corresponding to each box
- gt_bboxes_ignore (None | list[Tensor]): specify which bounding
- boxes can be ignored when computing the loss.
- gt_masks (None | Tensor) : true segmentation masks for each box
- used if the architecture supports a segmentation task.
-
- Returns:
- dict[str, Tensor]: a dictionary of loss components
- """
- # convert Bitmap mask or Polygon Mask to Tensor here
- gt_masks = [
- gt_mask.to_tensor(dtype=torch.uint8, device=img.device)
- for gt_mask in gt_masks
- ]
-
- x = self.extract_feat(img)
-
- cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
- bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels,
- img_metas)
- losses, sampling_results = self.bbox_head.loss(
- *bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
-
- segm_head_outs = self.segm_head(x[0])
- loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels)
- losses.update(loss_segm)
-
- mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas,
- sampling_results)
- loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes,
- img_metas, sampling_results)
- losses.update(loss_mask)
-
- # check NaN and Inf
- for loss_name in losses.keys():
- assert torch.isfinite(torch.stack(losses[loss_name]))\
- .all().item(), '{} becomes infinite or NaN!'\
- .format(loss_name)
-
- return losses
-
- def simple_test(self, img, img_metas, rescale=False):
- """Test function without test time augmentation."""
- x = self.extract_feat(img)
-
- cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
-
- bbox_inputs = (cls_score, bbox_pred,
- coeff_pred) + (img_metas, self.test_cfg, rescale)
- det_bboxes, det_labels, det_coeffs = self.bbox_head.get_bboxes(
- *bbox_inputs)
- bbox_results = [
- bbox2result(det_bbox, det_label, self.bbox_head.num_classes)
- for det_bbox, det_label in zip(det_bboxes, det_labels)
- ]
-
- num_imgs = len(img_metas)
- scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
- if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
- segm_results = [[[] for _ in range(self.mask_head.num_classes)]
- for _ in range(num_imgs)]
- else:
- # if det_bboxes is rescaled to the original image size, we need to
- # rescale it back to the testing scale to obtain RoIs.
- if rescale and not isinstance(scale_factors[0], float):
- scale_factors = [
- torch.from_numpy(scale_factor).to(det_bboxes[0].device)
- for scale_factor in scale_factors
- ]
- _bboxes = [
- det_bboxes[i][:, :4] *
- scale_factors[i] if rescale else det_bboxes[i][:, :4]
- for i in range(len(det_bboxes))
- ]
- mask_preds = self.mask_head(x[0], det_coeffs, _bboxes, img_metas)
- # apply mask post-processing to each image individually
- segm_results = []
- for i in range(num_imgs):
- if det_bboxes[i].shape[0] == 0:
- segm_results.append(
- [[] for _ in range(self.mask_head.num_classes)])
- else:
- segm_result = self.mask_head.get_seg_masks(
- mask_preds[i], det_labels[i], img_metas[i], rescale)
- segm_results.append(segm_result)
- return list(zip(bbox_results, segm_results))
-
- def aug_test(self, imgs, img_metas, rescale=False):
- """Test with augmentations."""
- raise NotImplementedError
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/decode_heads/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/decode_heads/__init__.py
deleted file mode 100644
index ac66d3cfe0ea04af45c0f3594bf135841c3812e3..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/decode_heads/__init__.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from .ann_head import ANNHead
-from .apc_head import APCHead
-from .aspp_head import ASPPHead
-from .cc_head import CCHead
-from .da_head import DAHead
-from .dm_head import DMHead
-from .dnl_head import DNLHead
-from .ema_head import EMAHead
-from .enc_head import EncHead
-from .fcn_head import FCNHead
-from .fpn_head import FPNHead
-from .gc_head import GCHead
-from .lraspp_head import LRASPPHead
-from .nl_head import NLHead
-from .ocr_head import OCRHead
-# from .point_head import PointHead
-from .psa_head import PSAHead
-from .psp_head import PSPHead
-from .sep_aspp_head import DepthwiseSeparableASPPHead
-from .sep_fcn_head import DepthwiseSeparableFCNHead
-from .uper_head import UPerHead
-
-__all__ = [
- 'FCNHead', 'PSPHead', 'ASPPHead', 'PSAHead', 'NLHead', 'GCHead', 'CCHead',
- 'UPerHead', 'DepthwiseSeparableASPPHead', 'ANNHead', 'DAHead', 'OCRHead',
- 'EncHead', 'DepthwiseSeparableFCNHead', 'FPNHead', 'EMAHead', 'DNLHead',
- 'APCHead', 'DMHead', 'LRASPPHead'
-]
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/necks/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/necks/__init__.py
deleted file mode 100644
index cc3209f7e4f29eaeff08839cb1ab2bc153c53eb8..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/necks/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-'''
- * Copyright (c) 2023 Salesforce, Inc.
- * All rights reserved.
- * SPDX-License-Identifier: Apache License 2.0
- * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
- * By Can Qin
- * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
- * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
- * Modified from MMCV repo: From https://github.com/open-mmlab/mmcv
- * Copyright (c) OpenMMLab. All rights reserved.
-'''
-
-from .fpn import FPN
-from .multilevel_neck import MultiLevelNeck
-
-__all__ = ['FPN', 'MultiLevelNeck']
diff --git a/spaces/aifartist/sdzoom-Latent-Consistency-Model/lcm_txt2img/pipeline.py b/spaces/aifartist/sdzoom-Latent-Consistency-Model/lcm_txt2img/pipeline.py
deleted file mode 100644
index 9ca93eb1f4e0e77434657477f0f07938c973ebd4..0000000000000000000000000000000000000000
--- a/spaces/aifartist/sdzoom-Latent-Consistency-Model/lcm_txt2img/pipeline.py
+++ /dev/null
@@ -1,760 +0,0 @@
-# Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
-# and https://github.com/hojonathanho/diffusion
-
-import math
-from dataclasses import dataclass
-from typing import Any, Dict, List, Optional, Tuple, Union
-
-import numpy as np
-import torch
-from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
-
-from diffusers import AutoencoderKL, ConfigMixin, DiffusionPipeline, SchedulerMixin, UNet2DConditionModel, logging
-from diffusers.configuration_utils import register_to_config
-from diffusers.image_processor import VaeImageProcessor
-from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
-from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
-from diffusers.utils import BaseOutput
-
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-
-class LatentConsistencyModelPipeline(DiffusionPipeline):
- _optional_components = ["scheduler"]
-
- def __init__(
- self,
- vae: AutoencoderKL,
- text_encoder: CLIPTextModel,
- tokenizer: CLIPTokenizer,
- unet: UNet2DConditionModel,
- scheduler: "LCMScheduler",
- safety_checker: StableDiffusionSafetyChecker,
- feature_extractor: CLIPImageProcessor,
- requires_safety_checker: bool = True,
- ):
- super().__init__()
-
- scheduler = (
- scheduler
- if scheduler is not None
- else LCMScheduler(
- beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear", prediction_type="epsilon"
- )
- )
-
- self.register_modules(
- vae=vae,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- unet=unet,
- scheduler=scheduler,
- safety_checker=safety_checker,
- feature_extractor=feature_extractor,
- )
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
-
- def _encode_prompt(
- self,
- prompt,
- device,
- num_images_per_prompt,
- prompt_embeds: None,
- ):
- r"""
- Encodes the prompt into text encoder hidden states.
- Args:
- prompt (`str` or `List[str]`, *optional*):
- prompt to be encoded
- device: (`torch.device`):
- torch device
- num_images_per_prompt (`int`):
- number of images that should be generated per prompt
- prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
- provided, text embeddings will be generated from `prompt` input argument.
- """
-
- if prompt is not None and isinstance(prompt, str):
- pass
- elif prompt is not None and isinstance(prompt, list):
- len(prompt)
- else:
- prompt_embeds.shape[0]
-
- if prompt_embeds is None:
- text_inputs = self.tokenizer(
- prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- text_input_ids = text_inputs.input_ids
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
-
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
- text_input_ids, untruncated_ids
- ):
- removed_text = self.tokenizer.batch_decode(
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
- )
- logger.warning(
- "The following part of your input was truncated because CLIP can only handle sequences up to"
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = text_inputs.attention_mask.to(device)
- else:
- attention_mask = None
-
- prompt_embeds = self.text_encoder(
- text_input_ids.to(device),
- attention_mask=attention_mask,
- )
- prompt_embeds = prompt_embeds[0]
-
- if self.text_encoder is not None:
- prompt_embeds_dtype = self.text_encoder.dtype
- elif self.unet is not None:
- prompt_embeds_dtype = self.unet.dtype
- else:
- prompt_embeds_dtype = prompt_embeds.dtype
-
- prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
-
- bs_embed, seq_len, _ = prompt_embeds.shape
- # duplicate text embeddings for each generation per prompt, using mps friendly method
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
-
- # Don't need to get uncond prompt embedding because of LCM Guided Distillation
- return prompt_embeds
-
- def run_safety_checker(self, image, device, dtype):
- if self.safety_checker is None:
- has_nsfw_concept = None
- else:
- if torch.is_tensor(image):
- feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
- else:
- feature_extractor_input = self.image_processor.numpy_to_pil(image)
- safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
- image, has_nsfw_concept = self.safety_checker(
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
- )
- return image, has_nsfw_concept
-
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents=None):
- #print(f"{batch_size}, {num_channels_latents}, {height // self.vae_scale_factor}, {width // self.vae_scale_factor}, {latents}")
- #print(f"init_noise_sigma = {self.scheduler.init_noise_sigma}")
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
- if latents is None:
- latents = torch.randn(shape, dtype=dtype).to(device)
- else:
- latents = latents.to(device)
- # scale the initial noise by the standard deviation required by the scheduler
- latents = latents * self.scheduler.init_noise_sigma
- return latents
-
- def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):
- """
- see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
- Args:
- timesteps: torch.Tensor: generate embedding vectors at these timesteps
- embedding_dim: int: dimension of the embeddings to generate
- dtype: data type of the generated embeddings
- Returns:
- embedding vectors with shape `(len(timesteps), embedding_dim)`
- """
- assert len(w.shape) == 1
- w = w * 1000.0
-
- half_dim = embedding_dim // 2
- emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
- emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
- emb = w.to(dtype)[:, None] * emb[None, :]
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
- if embedding_dim % 2 == 1: # zero pad
- emb = torch.nn.functional.pad(emb, (0, 1))
- assert emb.shape == (w.shape[0], embedding_dim)
- return emb
-
- @torch.no_grad()
- def __call__(
- self,
- prompt1: Union[str, List[str]] = None,
- prompt2: Union[str, List[str]] = None,
- sv: float = .5,
- sharpness: float = 1.,
- height: Optional[int] = 768,
- width: Optional[int] = 768,
- guidance_scale: float = 7.5,
- num_images_per_prompt: Optional[int] = 1,
- latents: Optional[torch.FloatTensor] = None,
- num_inference_steps: int = 4,
- lcm_origin_steps: int = 50,
- prompt_embeds: Optional[torch.FloatTensor] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
- ):
- # 0. Default height and width to unet
- height = height or self.unet.config.sample_size * self.vae_scale_factor
- width = width or self.unet.config.sample_size * self.vae_scale_factor
-
- # 2. Define call parameters
- #if prompt is not None and isinstance(prompt, str):
- # batch_size = 1
- #elif prompt is not None and isinstance(prompt, list):
- # batch_size = len(prompt)
- #else:
- # batch_size = prompt_embeds.shape[0]
-
- batch_size = 1
-
- device = self._execution_device
- # do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)
-
- # 3. Encode input prompt
- print(f"prompt1 = {prompt1}")
- pe1 = self._encode_prompt(
- prompt1,
- device,
- num_images_per_prompt,
- prompt_embeds=prompt_embeds,
- )
-
- print(f"prompt2 = {prompt2}")
- pe2 = self._encode_prompt(
- prompt2,
- device,
- num_images_per_prompt,
- prompt_embeds=None,
- )
-
- prompt_embeds = (100-sv)/100 * pe1 + sv/100 * pe2
-
- # 4. Prepare timesteps
- self.scheduler.set_timesteps(num_inference_steps, lcm_origin_steps)
- timesteps = self.scheduler.timesteps
-
- # 5. Prepare latent variable
- num_channels_latents = self.unet.config.in_channels
- latents = self.prepare_latents(
- batch_size * num_images_per_prompt,
- num_channels_latents,
- height,
- width,
- prompt_embeds.dtype,
- device,
- latents,
- )
- bs = batch_size * num_images_per_prompt
-
- # 6. Get Guidance Scale Embedding
- w = torch.tensor(guidance_scale).repeat(bs)
- w_embedding = self.get_w_embedding(w, embedding_dim=256).to(device=device, dtype=latents.dtype)
-
- # 7. LCM MultiStep Sampling Loop:
- #import time
- #tsLenm1 = len(timesteps) - 1
- #tm0 = time.time()
- with self.progress_bar(total=num_inference_steps) as progress_bar:
- for i, t in enumerate(timesteps):
- ts = torch.full((bs,), t, device=device, dtype=torch.long)
- latents = latents.to(prompt_embeds.dtype)
-
- # model prediction (v-prediction, eps, x)
- model_pred = self.unet(
- latents,
- ts,
- timestep_cond=w_embedding,
- encoder_hidden_states=prompt_embeds,
- cross_attention_kwargs=cross_attention_kwargs,
- return_dict=False,
- )[0]
-
- # compute the previous noisy sample x_t -> x_t-1
- latents, denoised = self.scheduler.step(model_pred, i, t, latents, return_dict=False)
-
- # # call the callback, if provided
- # if i == len(timesteps) - 1:
- #if i == tsLenm1:
- # print('SYNC')
- # torch.cuda.synchronize()
- progress_bar.update()
- #print(f"unet time = {time.time() - tm0}")
-
- denoised /= sharpness
-
- #denoised = denoised.to(prompt_embeds.dtype)
- if not output_type == "latent":
- image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
- has_nsfw_concept = None
- else:
- image = denoised
- has_nsfw_concept = None
-
- if has_nsfw_concept is None:
- do_denormalize = [True] * image.shape[0]
- else:
- do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
-
- image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
-
- if not return_dict:
- #return (image, has_nsfw_concept)
- print(f"image[0] isa {type(image[0])}")
- return image[0]
-
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
-
-
-@dataclass
-# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
-class LCMSchedulerOutput(BaseOutput):
- """
- Output class for the scheduler's `step` function output.
- Args:
- prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
- Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
- denoising loop.
- pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
- The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
- `pred_original_sample` can be used to preview progress or for guidance.
- """
-
- prev_sample: torch.FloatTensor
- denoised: Optional[torch.FloatTensor] = None
-
-
-# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
-def betas_for_alpha_bar(
- num_diffusion_timesteps,
- max_beta=0.999,
- alpha_transform_type="cosine",
-):
- """
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
- (1-beta) over time from t = [0,1].
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
- to that part of the diffusion process.
- Args:
- num_diffusion_timesteps (`int`): the number of betas to produce.
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
- prevent singularities.
- alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
- Choose from `cosine` or `exp`
- Returns:
- betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
- """
- if alpha_transform_type == "cosine":
-
- def alpha_bar_fn(t):
- return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
-
- elif alpha_transform_type == "exp":
-
- def alpha_bar_fn(t):
- return math.exp(t * -12.0)
-
- else:
- raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
-
- betas = []
- for i in range(num_diffusion_timesteps):
- t1 = i / num_diffusion_timesteps
- t2 = (i + 1) / num_diffusion_timesteps
- betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
- return torch.tensor(betas, dtype=torch.float32)
-
-
-def rescale_zero_terminal_snr(betas):
- """
- Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
- Args:
- betas (`torch.FloatTensor`):
- the betas that the scheduler is being initialized with.
- Returns:
- `torch.FloatTensor`: rescaled betas with zero terminal SNR
- """
- # Convert betas to alphas_bar_sqrt
- alphas = 1.0 - betas
- alphas_cumprod = torch.cumprod(alphas, dim=0)
- alphas_bar_sqrt = alphas_cumprod.sqrt()
-
- # Store old values.
- alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
- alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
-
- # Shift so the last timestep is zero.
- alphas_bar_sqrt -= alphas_bar_sqrt_T
-
- # Scale so the first timestep is back to the old value.
- alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
-
- # Convert alphas_bar_sqrt to betas
- alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
- alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
- alphas = torch.cat([alphas_bar[0:1], alphas])
- betas = 1 - alphas
-
- return betas
-
-
-class LCMScheduler(SchedulerMixin, ConfigMixin):
- """
- `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
- non-Markovian guidance.
- This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
- methods the library implements for all schedulers such as loading and saving.
- Args:
- num_train_timesteps (`int`, defaults to 1000):
- The number of diffusion steps to train the model.
- beta_start (`float`, defaults to 0.0001):
- The starting `beta` value of inference.
- beta_end (`float`, defaults to 0.02):
- The final `beta` value.
- beta_schedule (`str`, defaults to `"linear"`):
- The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
- `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
- trained_betas (`np.ndarray`, *optional*):
- Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
- clip_sample (`bool`, defaults to `True`):
- Clip the predicted sample for numerical stability.
- clip_sample_range (`float`, defaults to 1.0):
- The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
- set_alpha_to_one (`bool`, defaults to `True`):
- Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
- there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
- otherwise it uses the alpha value at step 0.
- steps_offset (`int`, defaults to 0):
- An offset added to the inference steps. You can use a combination of `offset=1` and
- `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable
- Diffusion.
- prediction_type (`str`, defaults to `epsilon`, *optional*):
- Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
- `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
- Video](https://imagen.research.google/video/paper.pdf) paper).
- thresholding (`bool`, defaults to `False`):
- Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
- as Stable Diffusion.
- dynamic_thresholding_ratio (`float`, defaults to 0.995):
- The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
- sample_max_value (`float`, defaults to 1.0):
- The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
- timestep_spacing (`str`, defaults to `"leading"`):
- The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
- Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
- rescale_betas_zero_snr (`bool`, defaults to `False`):
- Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
- dark samples instead of limiting it to samples with medium brightness. Loosely related to
- [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
- """
-
- # _compatibles = [e.name for e in KarrasDiffusionSchedulers]
- order = 1
-
- @register_to_config
- def __init__(
- self,
- num_train_timesteps: int = 1000,
- beta_start: float = 0.0001,
- beta_end: float = 0.02,
- beta_schedule: str = "linear",
- trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
- clip_sample: bool = True,
- set_alpha_to_one: bool = True,
- steps_offset: int = 0,
- prediction_type: str = "epsilon",
- thresholding: bool = False,
- dynamic_thresholding_ratio: float = 0.995,
- clip_sample_range: float = 1.0,
- sample_max_value: float = 1.0,
- timestep_spacing: str = "leading",
- rescale_betas_zero_snr: bool = False,
- ):
- if trained_betas is not None:
- self.betas = torch.tensor(trained_betas, dtype=torch.float32)
- elif beta_schedule == "linear":
- self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
- elif beta_schedule == "scaled_linear":
- # this schedule is very specific to the latent diffusion model.
- self.betas = (
- torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
- )
- elif beta_schedule == "squaredcos_cap_v2":
- # Glide cosine schedule
- self.betas = betas_for_alpha_bar(num_train_timesteps)
- else:
- raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
-
- # Rescale for zero SNR
- if rescale_betas_zero_snr:
- self.betas = rescale_zero_terminal_snr(self.betas)
-
- self.alphas = 1.0 - self.betas
- self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
-
- # At every step in ddim, we are looking into the previous alphas_cumprod
- # For the final step, there is no previous alphas_cumprod because we are already at 0
- # `set_alpha_to_one` decides whether we set this parameter simply to one or
- # whether we use the final alpha of the "non-previous" one.
- self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
-
- # standard deviation of the initial noise distribution
- self.init_noise_sigma = 1.0
-
- # setable values
- self.num_inference_steps = None
- self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
-
- def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
- """
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
- current timestep.
- Args:
- sample (`torch.FloatTensor`):
- The input sample.
- timestep (`int`, *optional*):
- The current timestep in the diffusion chain.
- Returns:
- `torch.FloatTensor`:
- A scaled input sample.
- """
- return sample
-
- def _get_variance(self, timestep, prev_timestep):
- alpha_prod_t = self.alphas_cumprod[timestep]
- alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
- beta_prod_t = 1 - alpha_prod_t
- beta_prod_t_prev = 1 - alpha_prod_t_prev
-
- variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
-
- return variance
-
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
- def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
- """
- "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
- prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
- s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
- pixels from saturation at each step. We find that dynamic thresholding results in significantly better
- photorealism as well as better image-text alignment, especially when using very large guidance weights."
- https://arxiv.org/abs/2205.11487
- """
- dtype = sample.dtype
- batch_size, channels, height, width = sample.shape
-
- if dtype not in (torch.float32, torch.float64):
- sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
-
- # Flatten sample for doing quantile calculation along each image
- sample = sample.reshape(batch_size, channels * height * width)
-
- abs_sample = sample.abs() # "a certain percentile absolute pixel value"
-
- s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
- s = torch.clamp(
- s, min=1, max=self.config.sample_max_value
- ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
-
- s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
- sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
-
- sample = sample.reshape(batch_size, channels, height, width)
- sample = sample.to(dtype)
-
- return sample
-
- def set_timesteps(self, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None):
- """
- Sets the discrete timesteps used for the diffusion chain (to be run before inference).
- Args:
- num_inference_steps (`int`):
- The number of diffusion steps used when generating samples with a pre-trained model.
- """
-
- if num_inference_steps > self.config.num_train_timesteps:
- raise ValueError(
- f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
- f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
- f" maximal {self.config.num_train_timesteps} timesteps."
- )
-
- self.num_inference_steps = num_inference_steps
-
- # LCM Timesteps Setting: # Linear Spacing
- c = self.config.num_train_timesteps // lcm_origin_steps
- lcm_origin_timesteps = np.asarray(list(range(1, lcm_origin_steps + 1))) * c - 1 # LCM Training Steps Schedule
- skipping_step = len(lcm_origin_timesteps) // num_inference_steps
- timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule
-
- self.timesteps = torch.from_numpy(timesteps.copy()).to(device)
-
- def get_scalings_for_boundary_condition_discrete(self, t):
- self.sigma_data = 0.5 # Default: 0.5
-
- # By dividing 0.1: This is almost a delta function at t=0.
- c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2)
- c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5
- return c_skip, c_out
-
- def step(
- self,
- model_output: torch.FloatTensor,
- timeindex: int,
- timestep: int,
- sample: torch.FloatTensor,
- eta: float = 0.0,
- use_clipped_model_output: bool = False,
- generator=None,
- variance_noise: Optional[torch.FloatTensor] = None,
- return_dict: bool = True,
- ) -> Union[LCMSchedulerOutput, Tuple]:
- """
- Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
- process from the learned model outputs (most often the predicted noise).
- Args:
- model_output (`torch.FloatTensor`):
- The direct output from learned diffusion model.
- timestep (`float`):
- The current discrete timestep in the diffusion chain.
- sample (`torch.FloatTensor`):
- A current instance of a sample created by the diffusion process.
- eta (`float`):
- The weight of noise for added noise in diffusion step.
- use_clipped_model_output (`bool`, defaults to `False`):
- If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary
- because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no
- clipping has happened, "corrected" `model_output` would coincide with the one provided as input and
- `use_clipped_model_output` has no effect.
- generator (`torch.Generator`, *optional*):
- A random number generator.
- variance_noise (`torch.FloatTensor`):
- Alternative to generating noise with `generator` by directly providing the noise for the variance
- itself. Useful for methods such as [`CycleDiffusion`].
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.
- Returns:
- [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:
- If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a
- tuple is returned where the first element is the sample tensor.
- """
- if self.num_inference_steps is None:
- raise ValueError(
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
- )
-
- # 1. get previous step value
- prev_timeindex = timeindex + 1
- if prev_timeindex < len(self.timesteps):
- prev_timestep = self.timesteps[prev_timeindex]
- else:
- prev_timestep = timestep
-
- # 2. compute alphas, betas
- alpha_prod_t = self.alphas_cumprod[timestep]
- alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
-
- beta_prod_t = 1 - alpha_prod_t
- beta_prod_t_prev = 1 - alpha_prod_t_prev
-
- # 3. Get scalings for boundary conditions
- c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)
-
- # 4. Different Parameterization:
- parameterization = self.config.prediction_type
-
- if parameterization == "epsilon": # noise-prediction
- pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()
-
- elif parameterization == "sample": # x-prediction
- pred_x0 = model_output
-
- elif parameterization == "v_prediction": # v-prediction
- pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output
-
- # 4. Denoise model output using boundary conditions
- denoised = c_out * pred_x0 + c_skip * sample
-
- # 5. Sample z ~ N(0, I), For MultiStep Inference
- # Noise is not used for one-step sampling.
- if len(self.timesteps) > 1:
- noise = torch.randn(model_output.shape).to(model_output.device)
- prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise
- else:
- prev_sample = denoised
-
- if not return_dict:
- return (prev_sample, denoised)
-
- return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)
-
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
- def add_noise(
- self,
- original_samples: torch.FloatTensor,
- noise: torch.FloatTensor,
- timesteps: torch.IntTensor,
- ) -> torch.FloatTensor:
- # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
- alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
- timesteps = timesteps.to(original_samples.device)
-
- sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
- sqrt_alpha_prod = sqrt_alpha_prod.flatten()
- while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
- sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
-
- sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
- while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
-
- noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
- return noisy_samples
-
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
- def get_velocity(
- self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor
- ) -> torch.FloatTensor:
- # Make sure alphas_cumprod and timestep have same device and dtype as sample
- alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
- timesteps = timesteps.to(sample.device)
-
- sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
- sqrt_alpha_prod = sqrt_alpha_prod.flatten()
- while len(sqrt_alpha_prod.shape) < len(sample.shape):
- sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
-
- sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
- while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
-
- velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
- return velocity
-
- def __len__(self):
- return self.config.num_train_timesteps
diff --git a/spaces/aijack/jojo/e4e_projection.py b/spaces/aijack/jojo/e4e_projection.py
deleted file mode 100644
index 900916fb1114eb169d1d88283b9b81a1ef180793..0000000000000000000000000000000000000000
--- a/spaces/aijack/jojo/e4e_projection.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import os
-import sys
-import numpy as np
-from PIL import Image
-import torch
-import torchvision.transforms as transforms
-from argparse import Namespace
-from e4e.models.psp import pSp
-from util import *
-
-
-
-@ torch.no_grad()
-def projection(img, name, device='cuda'):
-
-
- model_path = 'e4e.pt'
- ckpt = torch.load(model_path, map_location='cpu')
- opts = ckpt['opts']
- opts['checkpoint_path'] = model_path
- opts= Namespace(**opts)
- net = pSp(opts, device).eval().to(device)
-
- transform = transforms.Compose(
- [
- transforms.Resize(256),
- transforms.CenterCrop(256),
- transforms.ToTensor(),
- transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
- ]
- )
-
- img = transform(img).unsqueeze(0).to(device)
- images, w_plus = net(img, randomize_noise=False, return_latents=True)
- result_file = {}
- result_file['latent'] = w_plus[0]
- torch.save(result_file, name)
- return w_plus[0]
diff --git a/spaces/ak0601/news_sentiment_analysis/Dockerfile b/spaces/ak0601/news_sentiment_analysis/Dockerfile
deleted file mode 100644
index c3b917fbdd2eb5fca49a70d6753cb34a3d007422..0000000000000000000000000000000000000000
--- a/spaces/ak0601/news_sentiment_analysis/Dockerfile
+++ /dev/null
@@ -1,20 +0,0 @@
-FROM python:3.9
-
-WORKDIR /code
-
-COPY ./requirements.txt /code/requirements.txt
-
-RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
-
-RUN useradd -m -u 1000 user
-
-USER user
-
-ENV HOME=/home/user \
- PATH=/home/user/.local/bin:$PATH
-
-WORKDIR $HOME/app
-
-COPY --chown=user . $HOME/app
-
-CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
\ No newline at end of file
diff --git a/spaces/akashagarwal/ASRGenerateStory/app.py b/spaces/akashagarwal/ASRGenerateStory/app.py
deleted file mode 100644
index 802d78aff8e7fa6fc5ed4494c961c6cf4b75cebb..0000000000000000000000000000000000000000
--- a/spaces/akashagarwal/ASRGenerateStory/app.py
+++ /dev/null
@@ -1,174 +0,0 @@
-import gradio as gr
-from transformers import pipeline
-import io, base64
-from PIL import Image
-import numpy as np
-import tensorflow as tf
-import mediapy
-import os
-import sys
-from huggingface_hub import snapshot_download
-
-import streamlit as st
-import firebase_admin
-from firebase_admin import credentials
-from firebase_admin import firestore
-import datetime
-import tempfile
-from typing import Optional
-import numpy as np
-from TTS.utils.manage import ModelManager
-from TTS.utils.synthesizer import Synthesizer
-
-
-# firestore singleton is a cached multiuser instance to persist shared crowdsource memory
-@st.experimental_singleton
-def get_db_firestore():
- cred = credentials.Certificate('test.json')
- firebase_admin.initialize_app(cred, {'projectId': u'clinical-nlp-b9117',})
- db = firestore.client()
- return db
-
-#start firestore singleton
-db = get_db_firestore()
-
-# create ASR ML pipeline
-asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
-
-# create Text Classification pipeline
-classifier = pipeline("text-classification")
-
-# create text generator pipeline
-story_gen = pipeline("text-generation", "pranavpsv/gpt2-genre-story-generator")
-
-# transcribe function
-def transcribe(audio):
- text = asr(audio)["text"]
- return text
-
-def speech_to_text(speech):
- text = asr(speech)["text"]
- return text
-
-def text_to_sentiment(text):
- sentiment = classifier(text)[0]["label"]
- return sentiment
-
-def upsert(text):
- date_time =str(datetime.datetime.today())
- doc_ref = db.collection('Text2SpeechSentimentSave').document(date_time)
- doc_ref.set({u'firefield': 'Recognize Speech', u'first': 'https://huggingface.co/spaces/awacke1/Text2SpeechSentimentSave', u'last': text, u'born': date_time,})
- saved = select('Text2SpeechSentimentSave', date_time)
- # check it here: https://console.firebase.google.com/u/0/project/clinical-nlp-b9117/firestore/data/~2FStreamlitSpaces
- return saved
-
-def select(collection, document):
- doc_ref = db.collection(collection).document(document)
- doc = doc_ref.get()
- docid = ("The id is: ", doc.id)
- contents = ("The contents are: ", doc.to_dict())
- return contents
-
-def selectall(text):
- docs = db.collection('Text2SpeechSentimentSave').stream()
- doclist=''
- for doc in docs:
- r=(f'{doc.id} => {doc.to_dict()}')
- doclist += r
- return doclist
-
-# story gen
-def generate_story(choice, input_text):
- query = " <{0}> {1}".format(choice, input_text)
- generated_text = story_gen(query)
- generated_text = generated_text[0]['generated_text']
- generated_text = generated_text.split('> ')[2]
- return generated_text
-
-# images gen
-def generate_images(text):
- steps=50
- width=256
- height=256
- num_images=4
- diversity=6
- image_bytes = image_gen(text, steps, width, height, num_images, diversity)
- generated_images = []
- for image in image_bytes[1]:
- image_str = image[0]
- image_str = image_str.replace("data:image/png;base64,","")
- decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
- img = Image.open(io.BytesIO(decoded_bytes))
- generated_images.append(img)
- return generated_images
-
-# reductionism - interpolate 4 images - todo - unhardcode the pattern
-def generate_interpolation(gallery):
- times_to_interpolate = 4
- generated_images = []
- for image_str in gallery:
- image_str = image_str.replace("data:image/png;base64,","")
- decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
- img = Image.open(io.BytesIO(decoded_bytes))
- generated_images.append(img)
- generated_images[0].save('frame_0.png')
- generated_images[1].save('frame_1.png')
- generated_images[2].save('frame_2.png')
- generated_images[3].save('frame_3.png')
- input_frames = ["frame_0.png", "frame_1.png", "frame_2.png", "frame_3.png"]
- frames = list(util.interpolate_recursively_from_files(input_frames, times_to_interpolate, interpolator))
- mediapy.write_video("out.mp4", frames, fps=15)
- return "out.mp4"
-
-# image generator
-image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
-
-# video generator
-os.system("git clone https://github.com/google-research/frame-interpolation")
-sys.path.append("frame-interpolation")
-from eval import interpolator, util
-
-ffmpeg_path = util.get_ffmpeg_path()
-mediapy.set_ffmpeg(ffmpeg_path)
-model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
-interpolator = interpolator.Interpolator(model, None)
-
-demo = gr.Blocks()
-with demo:
-
- audio_file = gr.inputs.Audio(source="microphone", type="filepath")
- text = gr.Textbox()
- label = gr.Label()
- saved = gr.Textbox()
- savedAll = gr.Textbox()
- audio = gr.Audio(label="Output", interactive=False)
-
- b1 = gr.Button("Recognize Speech")
- b2 = gr.Button("Classify Sentiment")
- b3 = gr.Button("Save Speech to Text")
- b4 = gr.Button("Retrieve All")
-
- input_story_type = gr.Radio(choices=['superhero', 'action', 'drama', 'horror', 'thriller', 'sci_fi'], value='sci_fi', label="Genre")
- input_start_text = gr.Textbox(placeholder='A teddy bear outer space', label="Starting Text")
-
- gr.Markdown("1. Select a type of story, then write some starting text! Then hit the 'Generate Story' button to generate a story! Feel free to edit the generated story afterwards!")
- button_gen_story = gr.Button("Generate Story")
- gr.Markdown("2. After generating a story, hit the 'Generate Images' button to create some visuals for your story! (Can re-run multiple times!)")
- button_gen_images = gr.Button("Generate Images")
- gr.Markdown("3. After generating some images, hit the 'Generate Video' button to create a short video by interpolating the previously generated visuals!")
- button_gen_video = gr.Button("Generate Video")
- output_generated_story = gr.Textbox(label="Generated Story")
- output_gallery = gr.Gallery(label="Generated Story Images")
- output_interpolation = gr.Video(label="Generated Video")
-
- # Bind functions to buttons
- button_gen_story.click(fn=generate_story, inputs=[input_story_type , input_start_text], outputs=output_generated_story)
- button_gen_images.click(fn=generate_images, inputs=output_generated_story, outputs=output_gallery)
- button_gen_video.click(fn=generate_interpolation, inputs=output_gallery, outputs=output_interpolation)
-
- b1.click(speech_to_text, inputs=audio_file, outputs=input_start_text )
- b2.click(text_to_sentiment, inputs=text, outputs=label)
- b3.click(upsert, inputs=text, outputs=saved)
- b4.click(selectall, inputs=text, outputs=savedAll)
-
-demo.launch(debug=True, enable_queue=True)
\ No newline at end of file
diff --git a/spaces/akhaliq/Mask2Former/mask2former/data/dataset_mappers/__init__.py b/spaces/akhaliq/Mask2Former/mask2former/data/dataset_mappers/__init__.py
deleted file mode 100644
index 9020c2df23e2af280b7bb168b996ae9eaf312eb8..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/Mask2Former/mask2former/data/dataset_mappers/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
diff --git a/spaces/akhaliq/paint-by-example/app.py b/spaces/akhaliq/paint-by-example/app.py
deleted file mode 100644
index e6ed87f42bbec4380598f14dc790b28504345f67..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/paint-by-example/app.py
+++ /dev/null
@@ -1,160 +0,0 @@
-import gradio as gr
-
-from io import BytesIO
-import requests
-import PIL
-from PIL import Image
-import numpy as np
-import os
-import uuid
-import torch
-from torch import autocast
-import cv2
-from matplotlib import pyplot as plt
-from torchvision import transforms
-from diffusers import DiffusionPipeline
-from diffusers.utils import torch_device
-pipe = DiffusionPipeline.from_pretrained(
- "patrickvonplaten/new_inpaint_test",
- torch_dtype=torch.float16,
-)
-pipe = pipe.to("cuda")
-
-from share_btn import community_icon_html, loading_icon_html, share_js
-
-def read_content(file_path: str) -> str:
- """read the content of target file
- """
- with open(file_path, 'r', encoding='utf-8') as f:
- content = f.read()
-
- return content
-
-def predict(dict, reference, scale, seed, step):
- width,height=dict["image"].size
- if width div{min-height: 400px}
-#mask_radio .gr-form{background:transparent; border: none}
-#word_mask{margin-top: .75em !important}
-#word_mask textarea:disabled{opacity: 0.3}
-.footer {margin-bottom: 45px;margin-top: 35px;text-align: center;border-bottom: 1px solid #e5e5e5}
-.footer>p {font-size: .8rem; display: inline-block; padding: 0 10px;transform: translateY(10px);background: white}
-.dark .footer {border-color: #303030}
-.dark .footer>p {background: #0b0f19}
-.acknowledgments h4{margin: 1.25em 0 .25em 0;font-weight: bold;font-size: 115%}
-#image_upload .touch-none{display: flex}
-@keyframes spin {
- from {
- transform: rotate(0deg);
- }
- to {
- transform: rotate(360deg);
- }
-}
-#share-btn-container {
- display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
-}
-#share-btn {
- all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;
-}
-#share-btn * {
- all: unset;
-}
-#share-btn-container div:nth-child(-n+2){
- width: auto !important;
- min-height: 0px !important;
-}
-#share-btn-container .wrap {
- display: none !important;
-}
-'''
-example={}
-for i in range(1,4):
- ex_image_path='examples/image/example_'+str(i)+'.png'
- ex_mask_path='examples/mask/example_'+str(i)+'.png'
- ex_reference_path='examples/reference/example_'+str(i)+'.jpg'
- ex_image=Image.open(ex_image_path)
- ex_mask=Image.open(ex_mask_path)
- ex_reference=Image.open(ex_reference_path)
- example[i]={'image':{'image':ex_image,'mask':ex_mask},'reference':ex_reference}
-
-
-image_blocks = gr.Blocks(css=css)
-with image_blocks as demo:
- gr.HTML(read_content("header.html"))
- with gr.Group():
- with gr.Box():
- with gr.Row():
- with gr.Column():
- image = gr.Image(source='upload', tool='sketch', elem_id="image_upload", type="pil", label="Source Image")
- reference = gr.Image(source='upload', elem_id="image_upload", type="pil", label="Reference Image")
-
- with gr.Column():
- image_out = gr.Image(label="Output", elem_id="output-img").style(height=400)
- guidance = gr.Slider(label="Guidance scale", value=5, maximum=15,interactive=True)
- steps = gr.Slider(label="Steps", value=50, minimum=2, maximum=75, step=1,interactive=True)
-
- seed = gr.Slider(0, 10000, label='Seed (0 = random)', value=0, step=1)
-
- with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
- btn = gr.Button("Paint!").style(
- margin=False,
- rounded=(False, True, True, False),
- full_width=True,
- )
- with gr.Group(elem_id="share-btn-container"):
- community_icon = gr.HTML(community_icon_html, visible=True)
- loading_icon = gr.HTML(loading_icon_html, visible=True)
- share_button = gr.Button("Share to community", elem_id="share-btn", visible=True)
-
-
- with gr.Row():
- gr.Examples([
- ['examples/image/example_2.png', 'examples/reference/example_2.jpg',5,50],
- ['examples/image/example_3.png', 'examples/reference/example_3.jpg',5,50],
- ['examples/image/example_1.png', 'examples/reference/example_1.jpg',5,50],
- ], inputs=[image, reference, guidance, steps])
-
- btn.click(fn=predict, inputs=[image, reference, guidance, seed, steps], outputs=[image_out, community_icon, loading_icon, share_button])
- share_button.click(None, [], [], _js=share_js)
-
-
-
- gr.HTML(
- """
-
-
-
LICENSE
- The model is licensed with a
CreativeML Open RAIL-M license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please
read the license
- """
- )
-
-image_blocks.launch()
\ No newline at end of file
diff --git a/spaces/akhaliq/stylegan3_clip/dataset_tool.py b/spaces/akhaliq/stylegan3_clip/dataset_tool.py
deleted file mode 100644
index e9382fb1265489053eaed0166385a10ef67965c2..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/stylegan3_clip/dataset_tool.py
+++ /dev/null
@@ -1,455 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Tool for creating ZIP/PNG based datasets."""
-
-import functools
-import gzip
-import io
-import json
-import os
-import pickle
-import re
-import sys
-import tarfile
-import zipfile
-from pathlib import Path
-from typing import Callable, Optional, Tuple, Union
-
-import click
-import numpy as np
-import PIL.Image
-from tqdm import tqdm
-
-#----------------------------------------------------------------------------
-
-def error(msg):
- print('Error: ' + msg)
- sys.exit(1)
-
-#----------------------------------------------------------------------------
-
-def parse_tuple(s: str) -> Tuple[int, int]:
- '''Parse a 'M,N' or 'MxN' integer tuple.
-
- Example:
- '4x2' returns (4,2)
- '0,1' returns (0,1)
- '''
- if m := re.match(r'^(\d+)[x,](\d+)$', s):
- return (int(m.group(1)), int(m.group(2)))
- raise ValueError(f'cannot parse tuple {s}')
-
-#----------------------------------------------------------------------------
-
-def maybe_min(a: int, b: Optional[int]) -> int:
- if b is not None:
- return min(a, b)
- return a
-
-#----------------------------------------------------------------------------
-
-def file_ext(name: Union[str, Path]) -> str:
- return str(name).split('.')[-1]
-
-#----------------------------------------------------------------------------
-
-def is_image_ext(fname: Union[str, Path]) -> bool:
- ext = file_ext(fname).lower()
- return f'.{ext}' in PIL.Image.EXTENSION # type: ignore
-
-#----------------------------------------------------------------------------
-
-def open_image_folder(source_dir, *, max_images: Optional[int]):
- input_images = [str(f) for f in sorted(Path(source_dir).rglob('*')) if is_image_ext(f) and os.path.isfile(f)]
-
- # Load labels.
- labels = {}
- meta_fname = os.path.join(source_dir, 'dataset.json')
- if os.path.isfile(meta_fname):
- with open(meta_fname, 'r') as file:
- labels = json.load(file)['labels']
- if labels is not None:
- labels = { x[0]: x[1] for x in labels }
- else:
- labels = {}
-
- max_idx = maybe_min(len(input_images), max_images)
-
- def iterate_images():
- for idx, fname in enumerate(input_images):
- arch_fname = os.path.relpath(fname, source_dir)
- arch_fname = arch_fname.replace('\\', '/')
- img = np.array(PIL.Image.open(fname))
- yield dict(img=img, label=labels.get(arch_fname))
- if idx >= max_idx-1:
- break
- return max_idx, iterate_images()
-
-#----------------------------------------------------------------------------
-
-def open_image_zip(source, *, max_images: Optional[int]):
- with zipfile.ZipFile(source, mode='r') as z:
- input_images = [str(f) for f in sorted(z.namelist()) if is_image_ext(f)]
-
- # Load labels.
- labels = {}
- if 'dataset.json' in z.namelist():
- with z.open('dataset.json', 'r') as file:
- labels = json.load(file)['labels']
- if labels is not None:
- labels = { x[0]: x[1] for x in labels }
- else:
- labels = {}
-
- max_idx = maybe_min(len(input_images), max_images)
-
- def iterate_images():
- with zipfile.ZipFile(source, mode='r') as z:
- for idx, fname in enumerate(input_images):
- with z.open(fname, 'r') as file:
- img = PIL.Image.open(file) # type: ignore
- img = np.array(img)
- yield dict(img=img, label=labels.get(fname))
- if idx >= max_idx-1:
- break
- return max_idx, iterate_images()
-
-#----------------------------------------------------------------------------
-
-def open_lmdb(lmdb_dir: str, *, max_images: Optional[int]):
- import cv2 # pip install opencv-python # pylint: disable=import-error
- import lmdb # pip install lmdb # pylint: disable=import-error
-
- with lmdb.open(lmdb_dir, readonly=True, lock=False).begin(write=False) as txn:
- max_idx = maybe_min(txn.stat()['entries'], max_images)
-
- def iterate_images():
- with lmdb.open(lmdb_dir, readonly=True, lock=False).begin(write=False) as txn:
- for idx, (_key, value) in enumerate(txn.cursor()):
- try:
- try:
- img = cv2.imdecode(np.frombuffer(value, dtype=np.uint8), 1)
- if img is None:
- raise IOError('cv2.imdecode failed')
- img = img[:, :, ::-1] # BGR => RGB
- except IOError:
- img = np.array(PIL.Image.open(io.BytesIO(value)))
- yield dict(img=img, label=None)
- if idx >= max_idx-1:
- break
- except:
- print(sys.exc_info()[1])
-
- return max_idx, iterate_images()
-
-#----------------------------------------------------------------------------
-
-def open_cifar10(tarball: str, *, max_images: Optional[int]):
- images = []
- labels = []
-
- with tarfile.open(tarball, 'r:gz') as tar:
- for batch in range(1, 6):
- member = tar.getmember(f'cifar-10-batches-py/data_batch_{batch}')
- with tar.extractfile(member) as file:
- data = pickle.load(file, encoding='latin1')
- images.append(data['data'].reshape(-1, 3, 32, 32))
- labels.append(data['labels'])
-
- images = np.concatenate(images)
- labels = np.concatenate(labels)
- images = images.transpose([0, 2, 3, 1]) # NCHW -> NHWC
- assert images.shape == (50000, 32, 32, 3) and images.dtype == np.uint8
- assert labels.shape == (50000,) and labels.dtype in [np.int32, np.int64]
- assert np.min(images) == 0 and np.max(images) == 255
- assert np.min(labels) == 0 and np.max(labels) == 9
-
- max_idx = maybe_min(len(images), max_images)
-
- def iterate_images():
- for idx, img in enumerate(images):
- yield dict(img=img, label=int(labels[idx]))
- if idx >= max_idx-1:
- break
-
- return max_idx, iterate_images()
-
-#----------------------------------------------------------------------------
-
-def open_mnist(images_gz: str, *, max_images: Optional[int]):
- labels_gz = images_gz.replace('-images-idx3-ubyte.gz', '-labels-idx1-ubyte.gz')
- assert labels_gz != images_gz
- images = []
- labels = []
-
- with gzip.open(images_gz, 'rb') as f:
- images = np.frombuffer(f.read(), np.uint8, offset=16)
- with gzip.open(labels_gz, 'rb') as f:
- labels = np.frombuffer(f.read(), np.uint8, offset=8)
-
- images = images.reshape(-1, 28, 28)
- images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0)
- assert images.shape == (60000, 32, 32) and images.dtype == np.uint8
- assert labels.shape == (60000,) and labels.dtype == np.uint8
- assert np.min(images) == 0 and np.max(images) == 255
- assert np.min(labels) == 0 and np.max(labels) == 9
-
- max_idx = maybe_min(len(images), max_images)
-
- def iterate_images():
- for idx, img in enumerate(images):
- yield dict(img=img, label=int(labels[idx]))
- if idx >= max_idx-1:
- break
-
- return max_idx, iterate_images()
-
-#----------------------------------------------------------------------------
-
-def make_transform(
- transform: Optional[str],
- output_width: Optional[int],
- output_height: Optional[int]
-) -> Callable[[np.ndarray], Optional[np.ndarray]]:
- def scale(width, height, img):
- w = img.shape[1]
- h = img.shape[0]
- if width == w and height == h:
- return img
- img = PIL.Image.fromarray(img)
- ww = width if width is not None else w
- hh = height if height is not None else h
- img = img.resize((ww, hh), PIL.Image.LANCZOS)
- return np.array(img)
-
- def center_crop(width, height, img):
- crop = np.min(img.shape[:2])
- img = img[(img.shape[0] - crop) // 2 : (img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2 : (img.shape[1] + crop) // 2]
- img = PIL.Image.fromarray(img, 'RGB')
- img = img.resize((width, height), PIL.Image.LANCZOS)
- return np.array(img)
-
- def center_crop_wide(width, height, img):
- ch = int(np.round(width * img.shape[0] / img.shape[1]))
- if img.shape[1] < width or ch < height:
- return None
-
- img = img[(img.shape[0] - ch) // 2 : (img.shape[0] + ch) // 2]
- img = PIL.Image.fromarray(img, 'RGB')
- img = img.resize((width, height), PIL.Image.LANCZOS)
- img = np.array(img)
-
- canvas = np.zeros([width, width, 3], dtype=np.uint8)
- canvas[(width - height) // 2 : (width + height) // 2, :] = img
- return canvas
-
- if transform is None:
- return functools.partial(scale, output_width, output_height)
- if transform == 'center-crop':
- if (output_width is None) or (output_height is None):
- error ('must specify --resolution=WxH when using ' + transform + 'transform')
- return functools.partial(center_crop, output_width, output_height)
- if transform == 'center-crop-wide':
- if (output_width is None) or (output_height is None):
- error ('must specify --resolution=WxH when using ' + transform + ' transform')
- return functools.partial(center_crop_wide, output_width, output_height)
- assert False, 'unknown transform'
-
-#----------------------------------------------------------------------------
-
-def open_dataset(source, *, max_images: Optional[int]):
- if os.path.isdir(source):
- if source.rstrip('/').endswith('_lmdb'):
- return open_lmdb(source, max_images=max_images)
- else:
- return open_image_folder(source, max_images=max_images)
- elif os.path.isfile(source):
- if os.path.basename(source) == 'cifar-10-python.tar.gz':
- return open_cifar10(source, max_images=max_images)
- elif os.path.basename(source) == 'train-images-idx3-ubyte.gz':
- return open_mnist(source, max_images=max_images)
- elif file_ext(source) == 'zip':
- return open_image_zip(source, max_images=max_images)
- else:
- assert False, 'unknown archive type'
- else:
- error(f'Missing input file or directory: {source}')
-
-#----------------------------------------------------------------------------
-
-def open_dest(dest: str) -> Tuple[str, Callable[[str, Union[bytes, str]], None], Callable[[], None]]:
- dest_ext = file_ext(dest)
-
- if dest_ext == 'zip':
- if os.path.dirname(dest) != '':
- os.makedirs(os.path.dirname(dest), exist_ok=True)
- zf = zipfile.ZipFile(file=dest, mode='w', compression=zipfile.ZIP_STORED)
- def zip_write_bytes(fname: str, data: Union[bytes, str]):
- zf.writestr(fname, data)
- return '', zip_write_bytes, zf.close
- else:
- # If the output folder already exists, check that is is
- # empty.
- #
- # Note: creating the output directory is not strictly
- # necessary as folder_write_bytes() also mkdirs, but it's better
- # to give an error message earlier in case the dest folder
- # somehow cannot be created.
- if os.path.isdir(dest) and len(os.listdir(dest)) != 0:
- error('--dest folder must be empty')
- os.makedirs(dest, exist_ok=True)
-
- def folder_write_bytes(fname: str, data: Union[bytes, str]):
- os.makedirs(os.path.dirname(fname), exist_ok=True)
- with open(fname, 'wb') as fout:
- if isinstance(data, str):
- data = data.encode('utf8')
- fout.write(data)
- return dest, folder_write_bytes, lambda: None
-
-#----------------------------------------------------------------------------
-
-@click.command()
-@click.pass_context
-@click.option('--source', help='Directory or archive name for input dataset', required=True, metavar='PATH')
-@click.option('--dest', help='Output directory or archive name for output dataset', required=True, metavar='PATH')
-@click.option('--max-images', help='Output only up to `max-images` images', type=int, default=None)
-@click.option('--transform', help='Input crop/resize mode', type=click.Choice(['center-crop', 'center-crop-wide']))
-@click.option('--resolution', help='Output resolution (e.g., \'512x512\')', metavar='WxH', type=parse_tuple)
-def convert_dataset(
- ctx: click.Context,
- source: str,
- dest: str,
- max_images: Optional[int],
- transform: Optional[str],
- resolution: Optional[Tuple[int, int]]
-):
- """Convert an image dataset into a dataset archive usable with StyleGAN2 ADA PyTorch.
-
- The input dataset format is guessed from the --source argument:
-
- \b
- --source *_lmdb/ Load LSUN dataset
- --source cifar-10-python.tar.gz Load CIFAR-10 dataset
- --source train-images-idx3-ubyte.gz Load MNIST dataset
- --source path/ Recursively load all images from path/
- --source dataset.zip Recursively load all images from dataset.zip
-
- Specifying the output format and path:
-
- \b
- --dest /path/to/dir Save output files under /path/to/dir
- --dest /path/to/dataset.zip Save output files into /path/to/dataset.zip
-
- The output dataset format can be either an image folder or an uncompressed zip archive.
- Zip archives makes it easier to move datasets around file servers and clusters, and may
- offer better training performance on network file systems.
-
- Images within the dataset archive will be stored as uncompressed PNG.
- Uncompresed PNGs can be efficiently decoded in the training loop.
-
- Class labels are stored in a file called 'dataset.json' that is stored at the
- dataset root folder. This file has the following structure:
-
- \b
- {
- "labels": [
- ["00000/img00000000.png",6],
- ["00000/img00000001.png",9],
- ... repeated for every image in the datase
- ["00049/img00049999.png",1]
- ]
- }
-
- If the 'dataset.json' file cannot be found, the dataset is interpreted as
- not containing class labels.
-
- Image scale/crop and resolution requirements:
-
- Output images must be square-shaped and they must all have the same power-of-two
- dimensions.
-
- To scale arbitrary input image size to a specific width and height, use the
- --resolution option. Output resolution will be either the original
- input resolution (if resolution was not specified) or the one specified with
- --resolution option.
-
- Use the --transform=center-crop or --transform=center-crop-wide options to apply a
- center crop transform on the input image. These options should be used with the
- --resolution option. For example:
-
- \b
- python dataset_tool.py --source LSUN/raw/cat_lmdb --dest /tmp/lsun_cat \\
- --transform=center-crop-wide --resolution=512x384
- """
-
- PIL.Image.init() # type: ignore
-
- if dest == '':
- ctx.fail('--dest output filename or directory must not be an empty string')
-
- num_files, input_iter = open_dataset(source, max_images=max_images)
- archive_root_dir, save_bytes, close_dest = open_dest(dest)
-
- if resolution is None: resolution = (None, None)
- transform_image = make_transform(transform, *resolution)
-
- dataset_attrs = None
-
- labels = []
- for idx, image in tqdm(enumerate(input_iter), total=num_files):
- idx_str = f'{idx:08d}'
- archive_fname = f'{idx_str[:5]}/img{idx_str}.png'
-
- # Apply crop and resize.
- img = transform_image(image['img'])
-
- # Transform may drop images.
- if img is None:
- continue
-
- # Error check to require uniform image attributes across
- # the whole dataset.
- channels = img.shape[2] if img.ndim == 3 else 1
- cur_image_attrs = {
- 'width': img.shape[1],
- 'height': img.shape[0],
- 'channels': channels
- }
- if dataset_attrs is None:
- dataset_attrs = cur_image_attrs
- width = dataset_attrs['width']
- height = dataset_attrs['height']
- if width != height:
- error(f'Image dimensions after scale and crop are required to be square. Got {width}x{height}')
- if dataset_attrs['channels'] not in [1, 3]:
- error('Input images must be stored as RGB or grayscale')
- if width != 2 ** int(np.floor(np.log2(width))):
- error('Image width/height after scale and crop are required to be power-of-two')
- elif dataset_attrs != cur_image_attrs:
- err = [f' dataset {k}/cur image {k}: {dataset_attrs[k]}/{cur_image_attrs[k]}' for k in dataset_attrs.keys()] # pylint: disable=unsubscriptable-object
- error(f'Image {archive_fname} attributes must be equal across all images of the dataset. Got:\n' + '\n'.join(err))
-
- # Save the image as an uncompressed PNG.
- img = PIL.Image.fromarray(img, { 1: 'L', 3: 'RGB' }[channels])
- image_bits = io.BytesIO()
- img.save(image_bits, format='png', compress_level=0, optimize=False)
- save_bytes(os.path.join(archive_root_dir, archive_fname), image_bits.getbuffer())
- labels.append([archive_fname, image['label']] if image['label'] is not None else None)
-
- metadata = {
- 'labels': labels if all(x is not None for x in labels) else None
- }
- save_bytes(os.path.join(archive_root_dir, 'dataset.json'), json.dumps(metadata))
- close_dest()
-
-#----------------------------------------------------------------------------
-
-if __name__ == "__main__":
- convert_dataset() # pylint: disable=no-value-for-parameter
diff --git a/spaces/alexiserodriguez/whisper-transcription-app/app.py b/spaces/alexiserodriguez/whisper-transcription-app/app.py
deleted file mode 100644
index b3d5361937749f6e8ebeff4ed06a605e5835968e..0000000000000000000000000000000000000000
--- a/spaces/alexiserodriguez/whisper-transcription-app/app.py
+++ /dev/null
@@ -1,149 +0,0 @@
-import whisper
-import gradio as gr
-import datetime
-
-import subprocess
-
-import torch
-import pyannote.audio
-from pyannote.audio.pipelines.speaker_verification import PretrainedSpeakerEmbedding
-
-from pyannote.audio import Audio
-from pyannote.core import Segment
-
-import wave
-import contextlib
-
-from sklearn.cluster import AgglomerativeClustering
-import numpy as np
-
-# model = whisper.load_model("large-v2")
-embedding_model = PretrainedSpeakerEmbedding(
- "speechbrain/spkrec-ecapa-voxceleb",
- device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
-)
-
-#Transcribe a bulk of audio files
-def bulk_transcribe(files, model):
- chosen_model = whisper.load_model(model)
- output = ""
- for i in files:
- output += (
- '--Archivo "'
- + get_file_name(i.name)
- + '"'
- + "\n\n"
- + transcribe(i.name, chosen_model)
- + "\n\n"
- )
-
- with open("Transcripción.txt", "w") as file:
- file.write(output)
-
- return "Transcripción.txt", output
-
-#Getting the file name from the path
-def get_file_name(file):
- file_path = file.split("/")
- file_name = file_path[-1]
- return file_name
-
-#The main function that transcribe each audio file
-def transcribe(audio, model):
- num_speakers = 3
- path, error = convert_to_wav(audio)
- if error is not None:
- return error
-
- duration = get_duration(path)
- if duration > 4 * 60 * 60:
- return "La duración del audio es muy larga"
-
- result = model.transcribe(path)
-
- segments = result["segments"]
-
- num_speakers = min(max(round(num_speakers), 1), len(segments))
- if len(segments) == 1:
- segments[0]["speaker"] = "HABLANTE 1"
- else:
- embeddings = make_embeddings(path, segments, duration)
- add_speaker_labels(segments, embeddings, num_speakers)
- output = get_output(segments)
- return output
-
-
-def convert_to_wav(path):
- if path[-3:] != "wav":
- new_path = ".".join(path.split(".")[:-1]) + ".wav"
- try:
- subprocess.call(["ffmpeg", "-i", path, new_path, "-y"])
- except:
- return path, "Error: No se pudo convertir archivo a .wav"
- path = new_path
- return path, None
-
-
-def get_duration(path):
- with contextlib.closing(wave.open(path, "r")) as f:
- frames = f.getnframes()
- rate = f.getframerate()
- return frames / float(rate)
-
-
-def make_embeddings(path, segments, duration):
- embeddings = np.zeros(shape=(len(segments), 192))
- for i, segment in enumerate(segments):
- embeddings[i] = segment_embedding(path, segment, duration)
- return np.nan_to_num(embeddings)
-
-
-audio = Audio()
-
-
-def segment_embedding(path, segment, duration):
- start = segment["start"]
-
- end = min(duration, segment["end"])
- clip = Segment(start, end)
- waveform, sample_rate = audio.crop(path, clip)
- return embedding_model(waveform[None])
-
-
-def add_speaker_labels(segments, embeddings, num_speakers):
- clustering = AgglomerativeClustering(num_speakers).fit(embeddings)
-
- labels = clustering.labels_
- for i in range(len(segments)):
- segments[i]["speaker"] = "HABLANTE " + str(labels[i] + 1)
-
-
-def time(secs):
- return datetime.timedelta(seconds=round(secs))
-
-
-def get_output(segments):
- output = ""
- for i, segment in enumerate(segments):
- if i == 0 or segments[i - 1]["speaker"] != segment["speaker"]:
- if i != 0:
- output += "\n\n"
- output += segment["speaker"] + " " + str(time(segment["start"])) + "\n\n"
- output += segment["text"][1:] + " "
- return output
-
-
-gr.Interface(
- title="Reconocimiento de hablantes con Whisper en Español",
- description="La interfaz permite la transcripción de audios individuales y en conjunto a texto a través de los modelos de Whisper, para archivos donde existen específicamente tres hablantes. Por defecto, está seleccionado el modelo 'large-v2' que presenta el mejor rendimiento y requiere mayor procesamiento. Sin embargo, es posible seleccionar el modelo a aplicar sobre los archivos a través del dropdown que ha sido desarrollado. De igual forma, se genera una transcripción directa y un archivo .txt descargable que contiene el texto correspondiente al grupo de archivos seleccionados.",
- fn=bulk_transcribe,
- inputs=[
- gr.File(file_count="multiple", file_types=["audio"], label="Archivos de audio"),
- gr.Dropdown(
- label="Modelo de Whisper",
- choices=["tiny", "base", "small", "medium", "large", "large-v2"],
- value="large-v2",
- ),
- ],
- outputs=[gr.File(label="Archivo TXT con transcripción"), gr.Textbox(label="Transcripción de archivos de audio")],
-).launch()
diff --git a/spaces/alexrame/rewardedsoups/streamlit_app/data/locomotion/trajectories/4.html b/spaces/alexrame/rewardedsoups/streamlit_app/data/locomotion/trajectories/4.html
deleted file mode 100644
index edf9a3cb539c122ba04a53f6248346832b75442e..0000000000000000000000000000000000000000
--- a/spaces/alexrame/rewardedsoups/streamlit_app/data/locomotion/trajectories/4.html
+++ /dev/null
@@ -1,48 +0,0 @@
-
-
-
-
brax visualizer
-
-
-
-
-
-
-
-
diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/commands/download.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/commands/download.py
deleted file mode 100644
index 233b7e983facad4aa50f24572ae6016cfb7a1bc8..0000000000000000000000000000000000000000
--- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/commands/download.py
+++ /dev/null
@@ -1,140 +0,0 @@
-import logging
-import os
-from optparse import Values
-from typing import List
-
-from pip._internal.cli import cmdoptions
-from pip._internal.cli.cmdoptions import make_target_python
-from pip._internal.cli.req_command import RequirementCommand, with_cleanup
-from pip._internal.cli.status_codes import SUCCESS
-from pip._internal.req.req_tracker import get_requirement_tracker
-from pip._internal.utils.misc import ensure_dir, normalize_path, write_output
-from pip._internal.utils.temp_dir import TempDirectory
-
-logger = logging.getLogger(__name__)
-
-
-class DownloadCommand(RequirementCommand):
- """
- Download packages from:
-
- - PyPI (and other indexes) using requirement specifiers.
- - VCS project urls.
- - Local project directories.
- - Local or remote source archives.
-
- pip also supports downloading from "requirements files", which provide
- an easy way to specify a whole environment to be downloaded.
- """
-
- usage = """
- %prog [options]
[package-index-options] ...
- %prog [options] -r [package-index-options] ...
- %prog [options] ...
- %prog [options] ...
- %prog [options] ..."""
-
- def add_options(self) -> None:
- self.cmd_opts.add_option(cmdoptions.constraints())
- self.cmd_opts.add_option(cmdoptions.requirements())
- self.cmd_opts.add_option(cmdoptions.no_deps())
- self.cmd_opts.add_option(cmdoptions.global_options())
- self.cmd_opts.add_option(cmdoptions.no_binary())
- self.cmd_opts.add_option(cmdoptions.only_binary())
- self.cmd_opts.add_option(cmdoptions.prefer_binary())
- self.cmd_opts.add_option(cmdoptions.src())
- self.cmd_opts.add_option(cmdoptions.pre())
- self.cmd_opts.add_option(cmdoptions.require_hashes())
- self.cmd_opts.add_option(cmdoptions.progress_bar())
- self.cmd_opts.add_option(cmdoptions.no_build_isolation())
- self.cmd_opts.add_option(cmdoptions.use_pep517())
- self.cmd_opts.add_option(cmdoptions.no_use_pep517())
- self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
-
- self.cmd_opts.add_option(
- "-d",
- "--dest",
- "--destination-dir",
- "--destination-directory",
- dest="download_dir",
- metavar="dir",
- default=os.curdir,
- help="Download packages into .",
- )
-
- cmdoptions.add_target_python_options(self.cmd_opts)
-
- index_opts = cmdoptions.make_option_group(
- cmdoptions.index_group,
- self.parser,
- )
-
- self.parser.insert_option_group(0, index_opts)
- self.parser.insert_option_group(0, self.cmd_opts)
-
- @with_cleanup
- def run(self, options: Values, args: List[str]) -> int:
-
- options.ignore_installed = True
- # editable doesn't really make sense for `pip download`, but the bowels
- # of the RequirementSet code require that property.
- options.editables = []
-
- cmdoptions.check_dist_restriction(options)
-
- options.download_dir = normalize_path(options.download_dir)
- ensure_dir(options.download_dir)
-
- session = self.get_default_session(options)
-
- target_python = make_target_python(options)
- finder = self._build_package_finder(
- options=options,
- session=session,
- target_python=target_python,
- ignore_requires_python=options.ignore_requires_python,
- )
-
- req_tracker = self.enter_context(get_requirement_tracker())
-
- directory = TempDirectory(
- delete=not options.no_clean,
- kind="download",
- globally_managed=True,
- )
-
- reqs = self.get_requirements(args, options, finder, session)
-
- preparer = self.make_requirement_preparer(
- temp_build_dir=directory,
- options=options,
- req_tracker=req_tracker,
- session=session,
- finder=finder,
- download_dir=options.download_dir,
- use_user_site=False,
- verbosity=self.verbosity,
- )
-
- resolver = self.make_resolver(
- preparer=preparer,
- finder=finder,
- options=options,
- ignore_requires_python=options.ignore_requires_python,
- py_version_info=options.python_version,
- )
-
- self.trace_basic_info(finder)
-
- requirement_set = resolver.resolve(reqs, check_supported_wheels=True)
-
- downloaded: List[str] = []
- for req in requirement_set.requirements.values():
- if req.satisfied_by is None:
- assert req.name is not None
- preparer.save_linked_requirement(req)
- downloaded.append(req.name)
- if downloaded:
- write_output("Successfully downloaded %s", " ".join(downloaded))
-
- return SUCCESS
diff --git a/spaces/ali-ghamdan/realesrgan-models/docs/Training.md b/spaces/ali-ghamdan/realesrgan-models/docs/Training.md
deleted file mode 100644
index f6494f1db0b425eeec9b1dca5af317e1bd7f1e93..0000000000000000000000000000000000000000
--- a/spaces/ali-ghamdan/realesrgan-models/docs/Training.md
+++ /dev/null
@@ -1,271 +0,0 @@
-# :computer: How to Train/Finetune Real-ESRGAN
-
-- [Train Real-ESRGAN](#train-real-esrgan)
- - [Overview](#overview)
- - [Dataset Preparation](#dataset-preparation)
- - [Train Real-ESRNet](#Train-Real-ESRNet)
- - [Train Real-ESRGAN](#Train-Real-ESRGAN)
-- [Finetune Real-ESRGAN on your own dataset](#Finetune-Real-ESRGAN-on-your-own-dataset)
- - [Generate degraded images on the fly](#Generate-degraded-images-on-the-fly)
- - [Use paired training data](#use-your-own-paired-data)
-
-[English](Training.md) **|** [简体中文](Training_CN.md)
-
-## Train Real-ESRGAN
-
-### Overview
-
-The training has been divided into two stages. These two stages have the same data synthesis process and training pipeline, except for the loss functions. Specifically,
-
-1. We first train Real-ESRNet with L1 loss from the pre-trained model ESRGAN.
-1. We then use the trained Real-ESRNet model as an initialization of the generator, and train the Real-ESRGAN with a combination of L1 loss, perceptual loss and GAN loss.
-
-### Dataset Preparation
-
-We use DF2K (DIV2K and Flickr2K) + OST datasets for our training. Only HR images are required.
-You can download from :
-
-1. DIV2K: http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_HR.zip
-2. Flickr2K: https://cv.snu.ac.kr/research/EDSR/Flickr2K.tar
-3. OST: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/datasets/OST_dataset.zip
-
-Here are steps for data preparation.
-
-#### Step 1: [Optional] Generate multi-scale images
-
-For the DF2K dataset, we use a multi-scale strategy, *i.e.*, we downsample HR images to obtain several Ground-Truth images with different scales.
-You can use the [scripts/generate_multiscale_DF2K.py](scripts/generate_multiscale_DF2K.py) script to generate multi-scale images.
-Note that this step can be omitted if you just want to have a fast try.
-
-```bash
-python scripts/generate_multiscale_DF2K.py --input datasets/DF2K/DF2K_HR --output datasets/DF2K/DF2K_multiscale
-```
-
-#### Step 2: [Optional] Crop to sub-images
-
-We then crop DF2K images into sub-images for faster IO and processing.
-This step is optional if your IO is enough or your disk space is limited.
-
-You can use the [scripts/extract_subimages.py](scripts/extract_subimages.py) script. Here is the example:
-
-```bash
- python scripts/extract_subimages.py --input datasets/DF2K/DF2K_multiscale --output datasets/DF2K/DF2K_multiscale_sub --crop_size 400 --step 200
-```
-
-#### Step 3: Prepare a txt for meta information
-
-You need to prepare a txt file containing the image paths. The following are some examples in `meta_info_DF2Kmultiscale+OST_sub.txt` (As different users may have different sub-images partitions, this file is not suitable for your purpose and you need to prepare your own txt file):
-
-```txt
-DF2K_HR_sub/000001_s001.png
-DF2K_HR_sub/000001_s002.png
-DF2K_HR_sub/000001_s003.png
-...
-```
-
-You can use the [scripts/generate_meta_info.py](scripts/generate_meta_info.py) script to generate the txt file.
-You can merge several folders into one meta_info txt. Here is the example:
-
-```bash
- python scripts/generate_meta_info.py --input datasets/DF2K/DF2K_HR datasets/DF2K/DF2K_multiscale --root datasets/DF2K datasets/DF2K --meta_info datasets/DF2K/meta_info/meta_info_DF2Kmultiscale.txt
-```
-
-### Train Real-ESRNet
-
-1. Download pre-trained model [ESRGAN](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth) into `experiments/pretrained_models`.
- ```bash
- wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth -P experiments/pretrained_models
- ```
-1. Modify the content in the option file `options/train_realesrnet_x4plus.yml` accordingly:
- ```yml
- train:
- name: DF2K+OST
- type: RealESRGANDataset
- dataroot_gt: datasets/DF2K # modify to the root path of your folder
- meta_info: realesrgan/meta_info/meta_info_DF2Kmultiscale+OST_sub.txt # modify to your own generate meta info txt
- io_backend:
- type: disk
- ```
-1. If you want to perform validation during training, uncomment those lines and modify accordingly:
- ```yml
- # Uncomment these for validation
- # val:
- # name: validation
- # type: PairedImageDataset
- # dataroot_gt: path_to_gt
- # dataroot_lq: path_to_lq
- # io_backend:
- # type: disk
-
- ...
-
- # Uncomment these for validation
- # validation settings
- # val:
- # val_freq: !!float 5e3
- # save_img: True
-
- # metrics:
- # psnr: # metric name, can be arbitrary
- # type: calculate_psnr
- # crop_border: 4
- # test_y_channel: false
- ```
-1. Before the formal training, you may run in the `--debug` mode to see whether everything is OK. We use four GPUs for training:
- ```bash
- CUDA_VISIBLE_DEVICES=0,1,2,3 \
- python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --launcher pytorch --debug
- ```
-
- Train with **a single GPU** in the *debug* mode:
- ```bash
- python realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --debug
- ```
-1. The formal training. We use four GPUs for training. We use the `--auto_resume` argument to automatically resume the training if necessary.
- ```bash
- CUDA_VISIBLE_DEVICES=0,1,2,3 \
- python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --launcher pytorch --auto_resume
- ```
-
- Train with **a single GPU**:
- ```bash
- python realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --auto_resume
- ```
-
-### Train Real-ESRGAN
-
-1. After the training of Real-ESRNet, you now have the file `experiments/train_RealESRNetx4plus_1000k_B12G4_fromESRGAN/model/net_g_1000000.pth`. If you need to specify the pre-trained path to other files, modify the `pretrain_network_g` value in the option file `train_realesrgan_x4plus.yml`.
-1. Modify the option file `train_realesrgan_x4plus.yml` accordingly. Most modifications are similar to those listed above.
-1. Before the formal training, you may run in the `--debug` mode to see whether everything is OK. We use four GPUs for training:
- ```bash
- CUDA_VISIBLE_DEVICES=0,1,2,3 \
- python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --launcher pytorch --debug
- ```
-
- Train with **a single GPU** in the *debug* mode:
- ```bash
- python realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --debug
- ```
-1. The formal training. We use four GPUs for training. We use the `--auto_resume` argument to automatically resume the training if necessary.
- ```bash
- CUDA_VISIBLE_DEVICES=0,1,2,3 \
- python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --launcher pytorch --auto_resume
- ```
-
- Train with **a single GPU**:
- ```bash
- python realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --auto_resume
- ```
-
-## Finetune Real-ESRGAN on your own dataset
-
-You can finetune Real-ESRGAN on your own dataset. Typically, the fine-tuning process can be divided into two cases:
-
-1. [Generate degraded images on the fly](#Generate-degraded-images-on-the-fly)
-1. [Use your own **paired** data](#Use-paired-training-data)
-
-### Generate degraded images on the fly
-
-Only high-resolution images are required. The low-quality images are generated with the degradation process described in Real-ESRGAN during trainig.
-
-**1. Prepare dataset**
-
-See [this section](#dataset-preparation) for more details.
-
-**2. Download pre-trained models**
-
-Download pre-trained models into `experiments/pretrained_models`.
-
-- *RealESRGAN_x4plus.pth*:
- ```bash
- wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P experiments/pretrained_models
- ```
-
-- *RealESRGAN_x4plus_netD.pth*:
- ```bash
- wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.3/RealESRGAN_x4plus_netD.pth -P experiments/pretrained_models
- ```
-
-**3. Finetune**
-
-Modify [options/finetune_realesrgan_x4plus.yml](options/finetune_realesrgan_x4plus.yml) accordingly, especially the `datasets` part:
-
-```yml
-train:
- name: DF2K+OST
- type: RealESRGANDataset
- dataroot_gt: datasets/DF2K # modify to the root path of your folder
- meta_info: realesrgan/meta_info/meta_info_DF2Kmultiscale+OST_sub.txt # modify to your own generate meta info txt
- io_backend:
- type: disk
-```
-
-We use four GPUs for training. We use the `--auto_resume` argument to automatically resume the training if necessary.
-
-```bash
-CUDA_VISIBLE_DEVICES=0,1,2,3 \
-python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/finetune_realesrgan_x4plus.yml --launcher pytorch --auto_resume
-```
-
-Finetune with **a single GPU**:
-```bash
-python realesrgan/train.py -opt options/finetune_realesrgan_x4plus.yml --auto_resume
-```
-
-### Use your own paired data
-
-You can also finetune RealESRGAN with your own paired data. It is more similar to fine-tuning ESRGAN.
-
-**1. Prepare dataset**
-
-Assume that you already have two folders:
-
-- **gt folder** (Ground-truth, high-resolution images): *datasets/DF2K/DIV2K_train_HR_sub*
-- **lq folder** (Low quality, low-resolution images): *datasets/DF2K/DIV2K_train_LR_bicubic_X4_sub*
-
-Then, you can prepare the meta_info txt file using the script [scripts/generate_meta_info_pairdata.py](scripts/generate_meta_info_pairdata.py):
-
-```bash
-python scripts/generate_meta_info_pairdata.py --input datasets/DF2K/DIV2K_train_HR_sub datasets/DF2K/DIV2K_train_LR_bicubic_X4_sub --meta_info datasets/DF2K/meta_info/meta_info_DIV2K_sub_pair.txt
-```
-
-**2. Download pre-trained models**
-
-Download pre-trained models into `experiments/pretrained_models`.
-
-- *RealESRGAN_x4plus.pth*
- ```bash
- wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P experiments/pretrained_models
- ```
-
-- *RealESRGAN_x4plus_netD.pth*
- ```bash
- wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.3/RealESRGAN_x4plus_netD.pth -P experiments/pretrained_models
- ```
-
-**3. Finetune**
-
-Modify [options/finetune_realesrgan_x4plus_pairdata.yml](options/finetune_realesrgan_x4plus_pairdata.yml) accordingly, especially the `datasets` part:
-
-```yml
-train:
- name: DIV2K
- type: RealESRGANPairedDataset
- dataroot_gt: datasets/DF2K # modify to the root path of your folder
- dataroot_lq: datasets/DF2K # modify to the root path of your folder
- meta_info: datasets/DF2K/meta_info/meta_info_DIV2K_sub_pair.txt # modify to your own generate meta info txt
- io_backend:
- type: disk
-```
-
-We use four GPUs for training. We use the `--auto_resume` argument to automatically resume the training if necessary.
-
-```bash
-CUDA_VISIBLE_DEVICES=0,1,2,3 \
-python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/finetune_realesrgan_x4plus_pairdata.yml --launcher pytorch --auto_resume
-```
-
-Finetune with **a single GPU**:
-```bash
-python realesrgan/train.py -opt options/finetune_realesrgan_x4plus_pairdata.yml --auto_resume
-```
diff --git a/spaces/aliabid94/AutoGPT/run.bat b/spaces/aliabid94/AutoGPT/run.bat
deleted file mode 100644
index afbab57a0603a126b04845ec754d1ecf3fdea18d..0000000000000000000000000000000000000000
--- a/spaces/aliabid94/AutoGPT/run.bat
+++ /dev/null
@@ -1,8 +0,0 @@
-@echo off
-python scripts/check_requirements.py requirements.txt
-if errorlevel 1 (
- echo Installing missing packages...
- pip install -r requirements.txt
-)
-python -m autogpt %*
-pause
diff --git a/spaces/amin2809/rvc-models2023/infer_pack/attentions.py b/spaces/amin2809/rvc-models2023/infer_pack/attentions.py
deleted file mode 100644
index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000
--- a/spaces/amin2809/rvc-models2023/infer_pack/attentions.py
+++ /dev/null
@@ -1,417 +0,0 @@
-import copy
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from infer_pack import commons
-from infer_pack import modules
-from infer_pack.modules import LayerNorm
-
-
-class Encoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- window_size=10,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- window_size=window_size,
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- proximal_bias=False,
- proximal_init=True,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- proximal_bias=proximal_bias,
- proximal_init=proximal_init,
- )
- )
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(
- MultiHeadAttention(
- hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- causal=True,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
- device=x.device, dtype=x.dtype
- )
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(
- self,
- channels,
- out_channels,
- n_heads,
- p_dropout=0.0,
- window_size=None,
- heads_share=True,
- block_length=None,
- proximal_bias=False,
- proximal_init=False,
- ):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
- self.emb_rel_v = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert (
- t_s == t_t
- ), "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(
- query / math.sqrt(self.k_channels), key_relative_embeddings
- )
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(
- device=scores.device, dtype=scores.dtype
- )
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert (
- t_s == t_t
- ), "Local attention is only available for self-attention."
- block_mask = (
- torch.ones_like(scores)
- .triu(-self.block_length)
- .tril(self.block_length)
- )
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(
- self.emb_rel_v, t_s
- )
- output = output + self._matmul_with_relative_values(
- relative_weights, value_relative_embeddings
- )
- output = (
- output.transpose(2, 3).contiguous().view(b, d, t_t)
- ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
- )
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[
- :, slice_start_position:slice_end_position
- ]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(
- x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
- )
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
- :, :, :length, length - 1 :
- ]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(
- x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
- )
- x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- filter_channels,
- kernel_size,
- p_dropout=0.0,
- activation=None,
- causal=False,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/anakin87/who-killed-laura-palmer/app_utils/README.md b/spaces/anakin87/who-killed-laura-palmer/app_utils/README.md
deleted file mode 100644
index 53e54c732f70dfe80c29579def6b4914d3e99d51..0000000000000000000000000000000000000000
--- a/spaces/anakin87/who-killed-laura-palmer/app_utils/README.md
+++ /dev/null
@@ -1,8 +0,0 @@
-# App utils 🧰
-Python modules used in the [web app](../app.py).
-
-- [backend_utils.py](./backend_utils.py): backend functions to load the pipeline, answer a question and load random questions; *appropriate Streamlit caching*.
-
-- [frontend_utils.py](./frontend_utils.py): functions to manage the Streamlit web app appearance.
-
-- ⚙️ [config.py](./config.py): configurations, including score thresholds to accept answers and Hugging Face model names
\ No newline at end of file
diff --git a/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/g4f/Provider/Providers/Yqcloud.py b/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/g4f/Provider/Providers/Yqcloud.py
deleted file mode 100644
index ad5c3a4326c68ceb7ee012fbf5bc072da72a7e40..0000000000000000000000000000000000000000
--- a/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/g4f/Provider/Providers/Yqcloud.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import os
-import time
-import requests
-
-from ...typing import sha256, Dict, get_type_hints
-url = 'https://chat9.yqcloud.top/'
-model = [
- 'gpt-3.5-turbo',
-]
-supports_stream = True
-needs_auth = False
-
-
-def _create_completion(model: str, messages: list, stream: bool, chatId: str, **kwargs):
-
- headers = {
- 'authority': 'api.aichatos.cloud',
- 'origin': 'https://chat9.yqcloud.top',
- 'referer': 'https://chat9.yqcloud.top/',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
- }
-
- json_data = {
- 'prompt': str(messages),
- 'userId': f'#/chat/{chatId}',
- 'network': True,
- 'apikey': '',
- 'system': '',
- 'withoutContext': False,
- }
- response = requests.post('https://api.aichatos.cloud/api/generateStream',
- headers=headers, json=json_data, stream=True)
- for token in response.iter_content(chunk_size=2046):
- yield (token.decode('utf-8'))
-
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/spaces/aphenx/bingo/src/components/markdown.tsx b/spaces/aphenx/bingo/src/components/markdown.tsx
deleted file mode 100644
index d4491467a1f14d1d72e535caac9c40636054e5df..0000000000000000000000000000000000000000
--- a/spaces/aphenx/bingo/src/components/markdown.tsx
+++ /dev/null
@@ -1,9 +0,0 @@
-import { FC, memo } from 'react'
-import ReactMarkdown, { Options } from 'react-markdown'
-
-export const MemoizedReactMarkdown: FC = memo(
- ReactMarkdown,
- (prevProps, nextProps) =>
- prevProps.children === nextProps.children &&
- prevProps.className === nextProps.className
-)
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Hash/KangarooTwelve.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Hash/KangarooTwelve.py
deleted file mode 100644
index f5358d44ae140284df0d1d82e79ec5a5c3278cc7..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Hash/KangarooTwelve.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# ===================================================================
-#
-# Copyright (c) 2021, Legrandin
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-# ===================================================================
-
-from Crypto.Util._raw_api import (VoidPointer, SmartPointer,
- create_string_buffer,
- get_raw_buffer, c_size_t,
- c_uint8_ptr, c_ubyte)
-
-from Crypto.Util.number import long_to_bytes
-from Crypto.Util.py3compat import bchr
-
-from .keccak import _raw_keccak_lib
-
-
-def _length_encode(x):
- if x == 0:
- return b'\x00'
-
- S = long_to_bytes(x)
- return S + bchr(len(S))
-
-
-# Possible states for a KangarooTwelve instance, which depend on the amount of data processed so far.
-SHORT_MSG = 1 # Still within the first 8192 bytes, but it is not certain we will exceed them.
-LONG_MSG_S0 = 2 # Still within the first 8192 bytes, and it is certain we will exceed them.
-LONG_MSG_SX = 3 # Beyond the first 8192 bytes.
-SQUEEZING = 4 # No more data to process.
-
-
-class K12_XOF(object):
- """A KangarooTwelve hash object.
- Do not instantiate directly.
- Use the :func:`new` function.
- """
-
- def __init__(self, data, custom):
-
- if custom == None:
- custom = b''
-
- self._custom = custom + _length_encode(len(custom))
- self._state = SHORT_MSG
- self._padding = None # Final padding is only decided in read()
-
- # Internal hash that consumes FinalNode
- self._hash1 = self._create_keccak()
- self._length1 = 0
-
- # Internal hash that produces CV_i (reset each time)
- self._hash2 = None
- self._length2 = 0
-
- # Incremented by one for each 8192-byte block
- self._ctr = 0
-
- if data:
- self.update(data)
-
- def _create_keccak(self):
- state = VoidPointer()
- result = _raw_keccak_lib.keccak_init(state.address_of(),
- c_size_t(32), # 32 bytes of capacity (256 bits)
- c_ubyte(12)) # Reduced number of rounds
- if result:
- raise ValueError("Error %d while instantiating KangarooTwelve"
- % result)
- return SmartPointer(state.get(), _raw_keccak_lib.keccak_destroy)
-
- def _update(self, data, hash_obj):
- result = _raw_keccak_lib.keccak_absorb(hash_obj.get(),
- c_uint8_ptr(data),
- c_size_t(len(data)))
- if result:
- raise ValueError("Error %d while updating KangarooTwelve state"
- % result)
-
- def _squeeze(self, hash_obj, length, padding):
- bfr = create_string_buffer(length)
- result = _raw_keccak_lib.keccak_squeeze(hash_obj.get(),
- bfr,
- c_size_t(length),
- c_ubyte(padding))
- if result:
- raise ValueError("Error %d while extracting from KangarooTwelve"
- % result)
-
- return get_raw_buffer(bfr)
-
- def _reset(self, hash_obj):
- result = _raw_keccak_lib.keccak_reset(hash_obj.get())
- if result:
- raise ValueError("Error %d while resetting KangarooTwelve state"
- % result)
-
- def update(self, data):
- """Hash the next piece of data.
-
- .. note::
- For better performance, submit chunks with a length multiple of 8192 bytes.
-
- Args:
- data (byte string/byte array/memoryview): The next chunk of the
- message to hash.
- """
-
- if self._state == SQUEEZING:
- raise TypeError("You cannot call 'update' after the first 'read'")
-
- if self._state == SHORT_MSG:
- next_length = self._length1 + len(data)
-
- if next_length + len(self._custom) <= 8192:
- self._length1 = next_length
- self._update(data, self._hash1)
- return self
-
- # Switch to tree hashing
- self._state = LONG_MSG_S0
-
- if self._state == LONG_MSG_S0:
- data_mem = memoryview(data)
- assert(self._length1 < 8192)
- dtc = min(len(data), 8192 - self._length1)
- self._update(data_mem[:dtc], self._hash1)
- self._length1 += dtc
-
- if self._length1 < 8192:
- return self
-
- # Finish hashing S_0 and start S_1
- assert(self._length1 == 8192)
-
- divider = b'\x03' + b'\x00' * 7
- self._update(divider, self._hash1)
- self._length1 += 8
-
- self._hash2 = self._create_keccak()
- self._length2 = 0
- self._ctr = 1
-
- self._state = LONG_MSG_SX
- return self.update(data_mem[dtc:])
-
- # LONG_MSG_SX
- assert(self._state == LONG_MSG_SX)
- index = 0
- len_data = len(data)
-
- # All iteractions could actually run in parallel
- data_mem = memoryview(data)
- while index < len_data:
-
- new_index = min(index + 8192 - self._length2, len_data)
- self._update(data_mem[index:new_index], self._hash2)
- self._length2 += new_index - index
- index = new_index
-
- if self._length2 == 8192:
- cv_i = self._squeeze(self._hash2, 32, 0x0B)
- self._update(cv_i, self._hash1)
- self._length1 += 32
- self._reset(self._hash2)
- self._length2 = 0
- self._ctr += 1
-
- return self
-
- def read(self, length):
- """
- Produce more bytes of the digest.
-
- .. note::
- You cannot use :meth:`update` anymore after the first call to
- :meth:`read`.
-
- Args:
- length (integer): the amount of bytes this method must return
-
- :return: the next piece of XOF output (of the given length)
- :rtype: byte string
- """
-
- custom_was_consumed = False
-
- if self._state == SHORT_MSG:
- self._update(self._custom, self._hash1)
- self._padding = 0x07
- self._state = SQUEEZING
-
- if self._state == LONG_MSG_S0:
- self.update(self._custom)
- custom_was_consumed = True
- assert(self._state == LONG_MSG_SX)
-
- if self._state == LONG_MSG_SX:
- if not custom_was_consumed:
- self.update(self._custom)
-
- # Is there still some leftover data in hash2?
- if self._length2 > 0:
- cv_i = self._squeeze(self._hash2, 32, 0x0B)
- self._update(cv_i, self._hash1)
- self._length1 += 32
- self._reset(self._hash2)
- self._length2 = 0
- self._ctr += 1
-
- trailer = _length_encode(self._ctr - 1) + b'\xFF\xFF'
- self._update(trailer, self._hash1)
-
- self._padding = 0x06
- self._state = SQUEEZING
-
- return self._squeeze(self._hash1, length, self._padding)
-
- def new(self, data=None, custom=b''):
- return type(self)(data, custom)
-
-
-def new(data=None, custom=None):
- """Return a fresh instance of a KangarooTwelve object.
-
- Args:
- data (bytes/bytearray/memoryview):
- Optional.
- The very first chunk of the message to hash.
- It is equivalent to an early call to :meth:`update`.
- custom (bytes):
- Optional.
- A customization byte string.
-
- :Return: A :class:`K12_XOF` object
- """
-
- return K12_XOF(data, custom)
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/multiple_marks.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/multiple_marks.py
deleted file mode 100644
index e262dd306481044009faa815c5637017da8fe41e..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/multiple_marks.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""
-Multiple Marks
-==============
-This example demonstrates creating a single chart with multiple markers
-representing the same data.
-"""
-# category: other charts
-import altair as alt
-from vega_datasets import data
-
-source = data.stocks()
-
-alt.Chart(source).mark_line(point=True).encode(
- x='date:T',
- y='price:Q',
- color='symbol:N'
-)
diff --git a/spaces/arxnov/anotest/mel_processing.py b/spaces/arxnov/anotest/mel_processing.py
deleted file mode 100644
index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000
--- a/spaces/arxnov/anotest/mel_processing.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import torch
-import torch.utils.data
-from librosa.filters import mel as librosa_mel_fn
-
-MAX_WAV_VALUE = 32768.0
-
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- """
- PARAMS
- ------
- C: compression factor
- """
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-
-def dynamic_range_decompression_torch(x, C=1):
- """
- PARAMS
- ------
- C: compression factor used to compress
- """
- return torch.exp(x) / C
-
-
-def spectral_normalize_torch(magnitudes):
- output = dynamic_range_compression_torch(magnitudes)
- return output
-
-
-def spectral_de_normalize_torch(magnitudes):
- output = dynamic_range_decompression_torch(magnitudes)
- return output
-
-
-mel_basis = {}
-hann_window = {}
-
-
-def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
- return spec
-
-
-def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
- global mel_basis
- dtype_device = str(spec.dtype) + '_' + str(spec.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
- return spec
-
-
-def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global mel_basis, hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
-
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
-
- return spec
diff --git a/spaces/aseifert/ExplaiNER/src/subpages/lossy_samples.py b/spaces/aseifert/ExplaiNER/src/subpages/lossy_samples.py
deleted file mode 100644
index 0987e60d8fb5d6b71c22d76fc9431264b59c8e37..0000000000000000000000000000000000000000
--- a/spaces/aseifert/ExplaiNER/src/subpages/lossy_samples.py
+++ /dev/null
@@ -1,115 +0,0 @@
-"""Show every example sorted by loss (descending) for close inspection."""
-import pandas as pd
-import streamlit as st
-
-from src.subpages.page import Context, Page
-from src.utils import (
- colorize_classes,
- get_bg_color,
- get_fg_color,
- htmlify_labeled_example,
-)
-
-
-class LossySamplesPage(Page):
- name = "Samples by Loss"
- icon = "sort-numeric-down-alt"
-
- def _get_widget_defaults(self):
- return {
- "skip_correct": True,
- "samples_by_loss_show_df": True,
- }
-
- def render(self, context: Context):
- st.title(self.name)
- with st.expander("💡", expanded=True):
- st.write("Show every example sorted by loss (descending) for close inspection.")
- st.write(
- "The **dataframe** is mostly self-explanatory. The cells are color-coded by label, a lighter color signifies a continuation label. Cells in the loss row are filled red from left to right relative to the top loss."
- )
- st.write(
- "The **numbers to the left**: Top (black background) are sample number (listed here) and sample index (from the dataset). Below on yellow background is the total loss for the given sample."
- )
- st.write(
- "The **annotated sample**: Every predicted entity (every token, really) gets a black border. The text color signifies the predicted label, with the first token of a sequence of token also showing the label's icon. If (and only if) the prediction is wrong, a small little box after the entity (token) contains the correct target class, with a background color corresponding to that class."
- )
-
- st.subheader("💥 Samples ⬇loss")
- skip_correct = st.checkbox("Skip correct examples", value=True, key="skip_correct")
- show_df = st.checkbox("Show dataframes", key="samples_by_loss_show_df")
-
- st.write(
- """""",
- unsafe_allow_html=True,
- )
-
- top_indices = (
- context.df.sort_values(by="total_loss", ascending=False)
- .query("total_loss > 0.5")
- .index
- )
-
- cnt = 0
- for idx in top_indices:
- sample = context.df_tokens_merged.loc[idx]
-
- if isinstance(sample, pd.Series):
- continue
-
- if skip_correct and sum(sample.labels != sample.preds) == 0:
- continue
-
- if show_df:
-
- def colorize_col(col):
- if col.name == "labels" or col.name == "preds":
- bgs = []
- fgs = []
- ops = []
- for v in col.values:
- bgs.append(get_bg_color(v.split("-")[1]) if "-" in v else "#ffffff")
- fgs.append(get_fg_color(bgs[-1]))
- ops.append("1" if v.split("-")[0] == "B" or v == "O" else "0.5")
- return [
- f"background-color: {bg}; color: {fg}; opacity: {op};"
- for bg, fg, op in zip(bgs, fgs, ops)
- ]
- return [""] * len(col)
-
- df = sample.reset_index().drop(["index", "hidden_states", "ids"], axis=1).round(3)
- losses_slice = pd.IndexSlice["losses", :]
- # x = df.T.astype(str)
- # st.dataframe(x)
- # st.dataframe(x.loc[losses_slice])
- styler = (
- df.T.style.apply(colorize_col, axis=1)
- .bar(subset=losses_slice, axis=1)
- .format(precision=3)
- )
- # styler.data = styler.data.astype(str)
- st.write(styler.to_html(), unsafe_allow_html=True)
- st.write("")
- # st.dataframe(colorize_classes(sample.drop("hidden_states", axis=1)))#.bar(subset='losses')) # type: ignore
- # st.write(
- # colorize_errors(sample.round(3).drop("hidden_states", axis=1).astype(str))
- # )
-
- col1, _, col2 = st.columns([3.5 / 32, 0.5 / 32, 28 / 32])
-
- cnt += 1
- counter = f"[{cnt} | {idx}] "
- loss = f"𝐿 {sample.losses.sum():.3f} "
- col1.write(f"{counter}{loss}", unsafe_allow_html=True)
- col1.write("")
-
- col2.write(htmlify_labeled_example(sample), unsafe_allow_html=True)
- # st.write(f"[{i};{idx}] " + htmlify_corr_sample(sample), unsafe_allow_html=True)
diff --git a/spaces/awacke1/QRCodeAI/app.py b/spaces/awacke1/QRCodeAI/app.py
deleted file mode 100644
index bd06b41b2c800425562a600fc1377b789335bef4..0000000000000000000000000000000000000000
--- a/spaces/awacke1/QRCodeAI/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/DionTimmer/controlnet_qrcode-control_v1p_sd15").launch()
\ No newline at end of file
diff --git a/spaces/awacke1/WVW-WhisperVoiceWriter/README.md b/spaces/awacke1/WVW-WhisperVoiceWriter/README.md
deleted file mode 100644
index e187dfaa4501917dc650a6bfb71e01e8651e6d3d..0000000000000000000000000000000000000000
--- a/spaces/awacke1/WVW-WhisperVoiceWriter/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: WVW WhisperVoiceWriter
-emoji: 📊
-colorFrom: yellow
-colorTo: pink
-sdk: streamlit
-sdk_version: 1.26.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/awinml/api_vicuna-openblas/app.py b/spaces/awinml/api_vicuna-openblas/app.py
deleted file mode 100644
index 48530e44da8abcb818155e5bad12562892d80e15..0000000000000000000000000000000000000000
--- a/spaces/awinml/api_vicuna-openblas/app.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import os
-os.system('CMAKE_ARGS="-DLLAMA_OPENBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python')
-
-import gradio as gr
-from llama_cpp import Llama
-
-
-llm = Llama(model_path="eachadea_ggml-vic7b-q4_0.bin", n_ctx=2048, n_batch=126)
-
-def generate_text(prompt):
- output = llm(prompt, max_tokens=468, temperature=0.1, top_p=0.5, echo=False, stop=["#"])
- output_text = output['choices'][0]['text']
- return output_text
-
-description = "Vicuna-7B-GPTQ-4bit-128g.GGML, max_tokens=468, temperature=0.1, top_p=0.5"
-
-examples = [
- ["What is the capital of France? ", "The capital of France is Paris."],
- ["Who wrote the novel 'Pride and Prejudice'?", "The novel 'Pride and Prejudice' was written by Jane Austen."],
- ["What is the square root of 64?", "The square root of 64 is 8."]
-]
-
-gradio_interface = gr.Interface(
- fn=generate_text,
- inputs="text",
- outputs="text",
- title="Vicuna API",
-)
-gradio_interface.launch()
\ No newline at end of file
diff --git a/spaces/bahman/labequip/README.md b/spaces/bahman/labequip/README.md
deleted file mode 100644
index ec5704935a5cf4adb1aa7a06ef73b39ed0e4d143..0000000000000000000000000000000000000000
--- a/spaces/bahman/labequip/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Labequip
-emoji: 🏢
-colorFrom: green
-colorTo: green
-sdk: gradio
-sdk_version: 3.1.4
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/exporters/PLYExporter.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/exporters/PLYExporter.js
deleted file mode 100644
index 45602189ab3ec71e3b7c2f1972afbef6a5723ed0..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/examples/js/exporters/PLYExporter.js
+++ /dev/null
@@ -1,549 +0,0 @@
-/**
- * @author Garrett Johnson / http://gkjohnson.github.io/
- * https://github.com/gkjohnson/ply-exporter-js
- *
- * Usage:
- * var exporter = new THREE.PLYExporter();
- *
- * // second argument is a list of options
- * exporter.parse(mesh, data => console.log(data), { binary: true, excludeAttributes: [ 'color' ] });
- *
- * Format Definition:
- * http://paulbourke.net/dataformats/ply/
- */
-
-THREE.PLYExporter = function () {};
-
-THREE.PLYExporter.prototype = {
-
- constructor: THREE.PLYExporter,
-
- parse: function ( object, onDone, options ) {
-
- if ( onDone && typeof onDone === 'object' ) {
-
- console.warn( 'THREE.PLYExporter: The options parameter is now the third argument to the "parse" function. See the documentation for the new API.' );
- options = onDone;
- onDone = undefined;
-
- }
-
- // Iterate over the valid meshes in the object
- function traverseMeshes( cb ) {
-
- object.traverse( function ( child ) {
-
- if ( child.isMesh === true ) {
-
- var mesh = child;
- var geometry = mesh.geometry;
-
- if ( geometry.isGeometry === true ) {
-
- geometry = geomToBufferGeom.get( geometry );
-
- }
-
- if ( geometry.isBufferGeometry === true ) {
-
- if ( geometry.getAttribute( 'position' ) !== undefined ) {
-
- cb( mesh, geometry );
-
- }
-
- }
-
- }
-
- } );
-
- }
-
- // Default options
- var defaultOptions = {
- binary: false,
- excludeAttributes: [] // normal, uv, color, index
- };
-
- options = Object.assign( defaultOptions, options );
-
- var excludeAttributes = options.excludeAttributes;
- var geomToBufferGeom = new WeakMap();
- var includeNormals = false;
- var includeColors = false;
- var includeUVs = false;
-
- // count the vertices, check which properties are used,
- // and cache the BufferGeometry
- var vertexCount = 0;
- var faceCount = 0;
- object.traverse( function ( child ) {
-
- if ( child.isMesh === true ) {
-
- var mesh = child;
- var geometry = mesh.geometry;
-
- if ( geometry.isGeometry === true ) {
-
- var bufferGeometry = geomToBufferGeom.get( geometry ) || new THREE.BufferGeometry().setFromObject( mesh );
- geomToBufferGeom.set( geometry, bufferGeometry );
- geometry = bufferGeometry;
-
- }
-
- if ( geometry.isBufferGeometry === true ) {
-
- var vertices = geometry.getAttribute( 'position' );
- var normals = geometry.getAttribute( 'normal' );
- var uvs = geometry.getAttribute( 'uv' );
- var colors = geometry.getAttribute( 'color' );
- var indices = geometry.getIndex();
-
- if ( vertices === undefined ) {
-
- return;
-
- }
-
- vertexCount += vertices.count;
- faceCount += indices ? indices.count / 3 : vertices.count / 3;
-
- if ( normals !== undefined ) includeNormals = true;
-
- if ( uvs !== undefined ) includeUVs = true;
-
- if ( colors !== undefined ) includeColors = true;
-
- }
-
- }
-
- } );
-
- var includeIndices = excludeAttributes.indexOf( 'index' ) === - 1;
- includeNormals = includeNormals && excludeAttributes.indexOf( 'normal' ) === - 1;
- includeColors = includeColors && excludeAttributes.indexOf( 'color' ) === - 1;
- includeUVs = includeUVs && excludeAttributes.indexOf( 'uv' ) === - 1;
-
-
- if ( includeIndices && faceCount !== Math.floor( faceCount ) ) {
-
- // point cloud meshes will not have an index array and may not have a
- // number of vertices that is divisble by 3 (and therefore representable
- // as triangles)
- console.error(
-
- 'PLYExporter: Failed to generate a valid PLY file with triangle indices because the ' +
- 'number of indices is not divisible by 3.'
-
- );
-
- return null;
-
- }
-
- // get how many bytes will be needed to save out the faces
- // so we can use a minimal amount of memory / data
- var indexByteCount = 1;
-
- if ( vertexCount > 256 ) { // 2^8 bits
-
- indexByteCount = 2;
-
- }
-
- if ( vertexCount > 65536 ) { // 2^16 bits
-
- indexByteCount = 4;
-
- }
-
-
- var header =
- 'ply\n' +
- `format ${ options.binary ? 'binary_big_endian' : 'ascii' } 1.0\n` +
- `element vertex ${vertexCount}\n` +
-
- // position
- 'property float x\n' +
- 'property float y\n' +
- 'property float z\n';
-
- if ( includeNormals === true ) {
-
- // normal
- header +=
- 'property float nx\n' +
- 'property float ny\n' +
- 'property float nz\n';
-
- }
-
- if ( includeUVs === true ) {
-
- // uvs
- header +=
- 'property float s\n' +
- 'property float t\n';
-
- }
-
- if ( includeColors === true ) {
-
- // colors
- header +=
- 'property uchar red\n' +
- 'property uchar green\n' +
- 'property uchar blue\n';
-
- }
-
- if ( includeIndices === true ) {
-
- // faces
- header +=
- `element face ${faceCount}\n` +
- `property list uchar uint${ indexByteCount * 8 } vertex_index\n`;
-
- }
-
- header += 'end_header\n';
-
-
- // Generate attribute data
- var vertex = new THREE.Vector3();
- var normalMatrixWorld = new THREE.Matrix3();
- var result = null;
-
- if ( options.binary === true ) {
-
- // Binary File Generation
- var headerBin = new TextEncoder().encode( header );
-
- // 3 position values at 4 bytes
- // 3 normal values at 4 bytes
- // 3 color channels with 1 byte
- // 2 uv values at 4 bytes
- var vertexListLength = vertexCount * ( 4 * 3 + ( includeNormals ? 4 * 3 : 0 ) + ( includeColors ? 3 : 0 ) + ( includeUVs ? 4 * 2 : 0 ) );
-
- // 1 byte shape desciptor
- // 3 vertex indices at ${indexByteCount} bytes
- var faceListLength = includeIndices ? faceCount * ( indexByteCount * 3 + 1 ) : 0;
- var output = new DataView( new ArrayBuffer( headerBin.length + vertexListLength + faceListLength ) );
- new Uint8Array( output.buffer ).set( headerBin, 0 );
-
-
- var vOffset = headerBin.length;
- var fOffset = headerBin.length + vertexListLength;
- var writtenVertices = 0;
- traverseMeshes( function ( mesh, geometry ) {
-
- var vertices = geometry.getAttribute( 'position' );
- var normals = geometry.getAttribute( 'normal' );
- var uvs = geometry.getAttribute( 'uv' );
- var colors = geometry.getAttribute( 'color' );
- var indices = geometry.getIndex();
-
- normalMatrixWorld.getNormalMatrix( mesh.matrixWorld );
-
- for ( var i = 0, l = vertices.count; i < l; i ++ ) {
-
- vertex.x = vertices.getX( i );
- vertex.y = vertices.getY( i );
- vertex.z = vertices.getZ( i );
-
- vertex.applyMatrix4( mesh.matrixWorld );
-
-
- // Position information
- output.setFloat32( vOffset, vertex.x );
- vOffset += 4;
-
- output.setFloat32( vOffset, vertex.y );
- vOffset += 4;
-
- output.setFloat32( vOffset, vertex.z );
- vOffset += 4;
-
- // Normal information
- if ( includeNormals === true ) {
-
- if ( normals != null ) {
-
- vertex.x = normals.getX( i );
- vertex.y = normals.getY( i );
- vertex.z = normals.getZ( i );
-
- vertex.applyMatrix3( normalMatrixWorld );
-
- output.setFloat32( vOffset, vertex.x );
- vOffset += 4;
-
- output.setFloat32( vOffset, vertex.y );
- vOffset += 4;
-
- output.setFloat32( vOffset, vertex.z );
- vOffset += 4;
-
- } else {
-
- output.setFloat32( vOffset, 0 );
- vOffset += 4;
-
- output.setFloat32( vOffset, 0 );
- vOffset += 4;
-
- output.setFloat32( vOffset, 0 );
- vOffset += 4;
-
- }
-
- }
-
- // UV information
- if ( includeUVs === true ) {
-
- if ( uvs != null ) {
-
- output.setFloat32( vOffset, uvs.getX( i ) );
- vOffset += 4;
-
- output.setFloat32( vOffset, uvs.getY( i ) );
- vOffset += 4;
-
- } else if ( includeUVs !== false ) {
-
- output.setFloat32( vOffset, 0 );
- vOffset += 4;
-
- output.setFloat32( vOffset, 0 );
- vOffset += 4;
-
- }
-
- }
-
- // Color information
- if ( includeColors === true ) {
-
- if ( colors != null ) {
-
- output.setUint8( vOffset, Math.floor( colors.getX( i ) * 255 ) );
- vOffset += 1;
-
- output.setUint8( vOffset, Math.floor( colors.getY( i ) * 255 ) );
- vOffset += 1;
-
- output.setUint8( vOffset, Math.floor( colors.getZ( i ) * 255 ) );
- vOffset += 1;
-
- } else {
-
- output.setUint8( vOffset, 255 );
- vOffset += 1;
-
- output.setUint8( vOffset, 255 );
- vOffset += 1;
-
- output.setUint8( vOffset, 255 );
- vOffset += 1;
-
- }
-
- }
-
- }
-
- if ( includeIndices === true ) {
-
- // Create the face list
- var faceIndexFunc = `setUint${indexByteCount * 8}`;
- if ( indices !== null ) {
-
- for ( var i = 0, l = indices.count; i < l; i += 3 ) {
-
- output.setUint8( fOffset, 3 );
- fOffset += 1;
-
- output[ faceIndexFunc ]( fOffset, indices.getX( i + 0 ) + writtenVertices );
- fOffset += indexByteCount;
-
- output[ faceIndexFunc ]( fOffset, indices.getX( i + 1 ) + writtenVertices );
- fOffset += indexByteCount;
-
- output[ faceIndexFunc ]( fOffset, indices.getX( i + 2 ) + writtenVertices );
- fOffset += indexByteCount;
-
- }
-
- } else {
-
- for ( var i = 0, l = vertices.count; i < l; i += 3 ) {
-
- output.setUint8( fOffset, 3 );
- fOffset += 1;
-
- output[ faceIndexFunc ]( fOffset, writtenVertices + i );
- fOffset += indexByteCount;
-
- output[ faceIndexFunc ]( fOffset, writtenVertices + i + 1 );
- fOffset += indexByteCount;
-
- output[ faceIndexFunc ]( fOffset, writtenVertices + i + 2 );
- fOffset += indexByteCount;
-
- }
-
- }
-
- }
-
-
- // Save the amount of verts we've already written so we can offset
- // the face index on the next mesh
- writtenVertices += vertices.count;
-
- } );
-
- result = output.buffer;
-
- } else {
-
- // Ascii File Generation
- // count the number of vertices
- var writtenVertices = 0;
- var vertexList = '';
- var faceList = '';
-
- traverseMeshes( function ( mesh, geometry ) {
-
- var vertices = geometry.getAttribute( 'position' );
- var normals = geometry.getAttribute( 'normal' );
- var uvs = geometry.getAttribute( 'uv' );
- var colors = geometry.getAttribute( 'color' );
- var indices = geometry.getIndex();
-
- normalMatrixWorld.getNormalMatrix( mesh.matrixWorld );
-
- // form each line
- for ( var i = 0, l = vertices.count; i < l; i ++ ) {
-
- vertex.x = vertices.getX( i );
- vertex.y = vertices.getY( i );
- vertex.z = vertices.getZ( i );
-
- vertex.applyMatrix4( mesh.matrixWorld );
-
-
- // Position information
- var line =
- vertex.x + ' ' +
- vertex.y + ' ' +
- vertex.z;
-
- // Normal information
- if ( includeNormals === true ) {
-
- if ( normals != null ) {
-
- vertex.x = normals.getX( i );
- vertex.y = normals.getY( i );
- vertex.z = normals.getZ( i );
-
- vertex.applyMatrix3( normalMatrixWorld );
-
- line += ' ' +
- vertex.x + ' ' +
- vertex.y + ' ' +
- vertex.z;
-
- } else {
-
- line += ' 0 0 0';
-
- }
-
- }
-
- // UV information
- if ( includeUVs === true ) {
-
- if ( uvs != null ) {
-
- line += ' ' +
- uvs.getX( i ) + ' ' +
- uvs.getY( i );
-
- } else if ( includeUVs !== false ) {
-
- line += ' 0 0';
-
- }
-
- }
-
- // Color information
- if ( includeColors === true ) {
-
- if ( colors != null ) {
-
- line += ' ' +
- Math.floor( colors.getX( i ) * 255 ) + ' ' +
- Math.floor( colors.getY( i ) * 255 ) + ' ' +
- Math.floor( colors.getZ( i ) * 255 );
-
- } else {
-
- line += ' 255 255 255';
-
- }
-
- }
-
- vertexList += line + '\n';
-
- }
-
- // Create the face list
- if ( includeIndices === true ) {
-
- if ( indices !== null ) {
-
- for ( var i = 0, l = indices.count; i < l; i += 3 ) {
-
- faceList += `3 ${ indices.getX( i + 0 ) + writtenVertices }`;
- faceList += ` ${ indices.getX( i + 1 ) + writtenVertices }`;
- faceList += ` ${ indices.getX( i + 2 ) + writtenVertices }\n`;
-
- }
-
- } else {
-
- for ( var i = 0, l = vertices.count; i < l; i += 3 ) {
-
- faceList += `3 ${ writtenVertices + i } ${ writtenVertices + i + 1 } ${ writtenVertices + i + 2 }\n`;
-
- }
-
- }
-
- faceCount += indices ? indices.count / 3 : vertices.count / 3;
-
- }
-
- writtenVertices += vertices.count;
-
- } );
-
- result = `${ header }${vertexList}\n${ includeIndices ? `${faceList}\n` : '' }`;
-
- }
-
- if ( typeof onDone === 'function' ) requestAnimationFrame( () => onDone( result ) );
- return result;
-
- }
-
-};
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/geometries/PlaneGeometry.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/geometries/PlaneGeometry.d.ts
deleted file mode 100644
index f6dd7e02c4b88f394e7b5e183b3b945797ca071c..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/geometries/PlaneGeometry.d.ts
+++ /dev/null
@@ -1,34 +0,0 @@
-import { Geometry } from './../core/Geometry';
-import { BufferGeometry } from './../core/BufferGeometry';
-
-export class PlaneBufferGeometry extends BufferGeometry {
- constructor(
- width?: number,
- height?: number,
- widthSegments?: number,
- heightSegments?: number
- );
-
- parameters: {
- width: number;
- height: number;
- widthSegments: number;
- heightSegments: number;
- };
-}
-
-export class PlaneGeometry extends Geometry {
- constructor(
- width?: number,
- height?: number,
- widthSegments?: number,
- heightSegments?: number
- );
-
- parameters: {
- width: number;
- height: number;
- widthSegments: number;
- heightSegments: number;
- };
-}
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/geometries/TubeGeometry.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/geometries/TubeGeometry.d.ts
deleted file mode 100644
index 1da3bd37f893d38b21d863526e00e2f0ca2d3678..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/geometries/TubeGeometry.d.ts
+++ /dev/null
@@ -1,46 +0,0 @@
-import { Curve } from './../extras/core/Curve';
-import { Vector3 } from './../math/Vector3';
-import { Geometry } from './../core/Geometry';
-import { BufferGeometry } from './../core/BufferGeometry';
-
-export class TubeBufferGeometry extends BufferGeometry {
- constructor(
- path: Curve,
- tubularSegments?: number,
- radius?: number,
- radiusSegments?: number,
- closed?: boolean
- );
-
- parameters: {
- path: Curve;
- tubularSegments: number;
- radius: number;
- radialSegments: number;
- closed: boolean;
- };
- tangents: Vector3[];
- normals: Vector3[];
- binormals: Vector3[];
-}
-
-export class TubeGeometry extends Geometry {
- constructor(
- path: Curve,
- tubularSegments?: number,
- radius?: number,
- radiusSegments?: number,
- closed?: boolean
- );
-
- parameters: {
- path: Curve;
- tubularSegments: number;
- radius: number;
- radialSegments: number;
- closed: boolean;
- };
- tangents: Vector3[];
- normals: Vector3[];
- binormals: Vector3[];
-}
diff --git a/spaces/bigPear/digitalWDF/data/example_dataset/example_dataset.py b/spaces/bigPear/digitalWDF/data/example_dataset/example_dataset.py
deleted file mode 100644
index 2dd53daed3a0336cf46b1048bb4b31f932f435e7..0000000000000000000000000000000000000000
--- a/spaces/bigPear/digitalWDF/data/example_dataset/example_dataset.py
+++ /dev/null
@@ -1,46 +0,0 @@
-import json
-import datasets
-from typing import Any, Dict, List
-
-
-_DESCRIPTION = "An example of dataset for ChatGLM."
-_CITATION = ""
-_HOMEPAGE = ""
-_LICENSE = ""
-_URL = "examples.json"
-
-
-class ExampleDataset(datasets.GeneratorBasedBuilder):
-
- VERSION = datasets.Version("0.0.0")
-
- def _info(self) -> datasets.DatasetInfo:
- features = datasets.Features({
- "instruction": datasets.Value("string"),
- "input": datasets.Value("string"),
- "output": datasets.Value("string"),
- "history": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
- })
- return datasets.DatasetInfo(
- description=_DESCRIPTION,
- features=features,
- homepage=_HOMEPAGE,
- license=_LICENSE,
- citation=_CITATION
- )
-
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
- file_path = dl_manager.download(_URL)
- return [
- datasets.SplitGenerator(
- name=datasets.Split.TRAIN,
- gen_kwargs={
- "filepath": file_path
- }
- )
- ]
-
- def _generate_examples(self, filepath: str) -> Dict[int, Dict[str, Any]]:
- example_dataset = json.load(open(filepath, "r", encoding="utf-8"))
- for key, example in enumerate(example_dataset):
- yield key, example
diff --git a/spaces/bigcode/santacoder-endpoint/README.md b/spaces/bigcode/santacoder-endpoint/README.md
deleted file mode 100644
index b136d27a71f963b9d0423dfb9ef9cfc1f6c3a0b8..0000000000000000000000000000000000000000
--- a/spaces/bigcode/santacoder-endpoint/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Santacoder Endpoint
-emoji: 🐨
-colorFrom: red
-colorTo: red
-sdk: gradio
-sdk_version: 3.15.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/bigjoker/stable-diffusion-webui/modules/extras.py b/spaces/bigjoker/stable-diffusion-webui/modules/extras.py
deleted file mode 100644
index 6a9af2d8e641fdf1ebd29045078d29b5aeae3d6f..0000000000000000000000000000000000000000
--- a/spaces/bigjoker/stable-diffusion-webui/modules/extras.py
+++ /dev/null
@@ -1,258 +0,0 @@
-import os
-import re
-import shutil
-
-
-import torch
-import tqdm
-
-from modules import shared, images, sd_models, sd_vae, sd_models_config
-from modules.ui_common import plaintext_to_html
-import gradio as gr
-import safetensors.torch
-
-
-def run_pnginfo(image):
- if image is None:
- return '', '', ''
-
- geninfo, items = images.read_info_from_image(image)
- items = {**{'parameters': geninfo}, **items}
-
- info = ''
- for key, text in items.items():
- info += f"""
-
-
{plaintext_to_html(str(key))}
-
{plaintext_to_html(str(text))}
-
-""".strip()+"\n"
-
- if len(info) == 0:
- message = "Nothing found in the image."
- info = f""
-
- return '', geninfo, info
-
-
-def create_config(ckpt_result, config_source, a, b, c):
- def config(x):
- res = sd_models_config.find_checkpoint_config_near_filename(x) if x else None
- return res if res != shared.sd_default_config else None
-
- if config_source == 0:
- cfg = config(a) or config(b) or config(c)
- elif config_source == 1:
- cfg = config(b)
- elif config_source == 2:
- cfg = config(c)
- else:
- cfg = None
-
- if cfg is None:
- return
-
- filename, _ = os.path.splitext(ckpt_result)
- checkpoint_filename = filename + ".yaml"
-
- print("Copying config:")
- print(" from:", cfg)
- print(" to:", checkpoint_filename)
- shutil.copyfile(cfg, checkpoint_filename)
-
-
-checkpoint_dict_skip_on_merge = ["cond_stage_model.transformer.text_model.embeddings.position_ids"]
-
-
-def to_half(tensor, enable):
- if enable and tensor.dtype == torch.float:
- return tensor.half()
-
- return tensor
-
-
-def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights):
- shared.state.begin()
- shared.state.job = 'model-merge'
-
- def fail(message):
- shared.state.textinfo = message
- shared.state.end()
- return [*[gr.update() for _ in range(4)], message]
-
- def weighted_sum(theta0, theta1, alpha):
- return ((1 - alpha) * theta0) + (alpha * theta1)
-
- def get_difference(theta1, theta2):
- return theta1 - theta2
-
- def add_difference(theta0, theta1_2_diff, alpha):
- return theta0 + (alpha * theta1_2_diff)
-
- def filename_weighted_sum():
- a = primary_model_info.model_name
- b = secondary_model_info.model_name
- Ma = round(1 - multiplier, 2)
- Mb = round(multiplier, 2)
-
- return f"{Ma}({a}) + {Mb}({b})"
-
- def filename_add_difference():
- a = primary_model_info.model_name
- b = secondary_model_info.model_name
- c = tertiary_model_info.model_name
- M = round(multiplier, 2)
-
- return f"{a} + {M}({b} - {c})"
-
- def filename_nothing():
- return primary_model_info.model_name
-
- theta_funcs = {
- "Weighted sum": (filename_weighted_sum, None, weighted_sum),
- "Add difference": (filename_add_difference, get_difference, add_difference),
- "No interpolation": (filename_nothing, None, None),
- }
- filename_generator, theta_func1, theta_func2 = theta_funcs[interp_method]
- shared.state.job_count = (1 if theta_func1 else 0) + (1 if theta_func2 else 0)
-
- if not primary_model_name:
- return fail("Failed: Merging requires a primary model.")
-
- primary_model_info = sd_models.checkpoints_list[primary_model_name]
-
- if theta_func2 and not secondary_model_name:
- return fail("Failed: Merging requires a secondary model.")
-
- secondary_model_info = sd_models.checkpoints_list[secondary_model_name] if theta_func2 else None
-
- if theta_func1 and not tertiary_model_name:
- return fail(f"Failed: Interpolation method ({interp_method}) requires a tertiary model.")
-
- tertiary_model_info = sd_models.checkpoints_list[tertiary_model_name] if theta_func1 else None
-
- result_is_inpainting_model = False
- result_is_instruct_pix2pix_model = False
-
- if theta_func2:
- shared.state.textinfo = f"Loading B"
- print(f"Loading {secondary_model_info.filename}...")
- theta_1 = sd_models.read_state_dict(secondary_model_info.filename, map_location='cpu')
- else:
- theta_1 = None
-
- if theta_func1:
- shared.state.textinfo = f"Loading C"
- print(f"Loading {tertiary_model_info.filename}...")
- theta_2 = sd_models.read_state_dict(tertiary_model_info.filename, map_location='cpu')
-
- shared.state.textinfo = 'Merging B and C'
- shared.state.sampling_steps = len(theta_1.keys())
- for key in tqdm.tqdm(theta_1.keys()):
- if key in checkpoint_dict_skip_on_merge:
- continue
-
- if 'model' in key:
- if key in theta_2:
- t2 = theta_2.get(key, torch.zeros_like(theta_1[key]))
- theta_1[key] = theta_func1(theta_1[key], t2)
- else:
- theta_1[key] = torch.zeros_like(theta_1[key])
-
- shared.state.sampling_step += 1
- del theta_2
-
- shared.state.nextjob()
-
- shared.state.textinfo = f"Loading {primary_model_info.filename}..."
- print(f"Loading {primary_model_info.filename}...")
- theta_0 = sd_models.read_state_dict(primary_model_info.filename, map_location='cpu')
-
- print("Merging...")
- shared.state.textinfo = 'Merging A and B'
- shared.state.sampling_steps = len(theta_0.keys())
- for key in tqdm.tqdm(theta_0.keys()):
- if theta_1 and 'model' in key and key in theta_1:
-
- if key in checkpoint_dict_skip_on_merge:
- continue
-
- a = theta_0[key]
- b = theta_1[key]
-
- # this enables merging an inpainting model (A) with another one (B);
- # where normal model would have 4 channels, for latenst space, inpainting model would
- # have another 4 channels for unmasked picture's latent space, plus one channel for mask, for a total of 9
- if a.shape != b.shape and a.shape[0:1] + a.shape[2:] == b.shape[0:1] + b.shape[2:]:
- if a.shape[1] == 4 and b.shape[1] == 9:
- raise RuntimeError("When merging inpainting model with a normal one, A must be the inpainting model.")
- if a.shape[1] == 4 and b.shape[1] == 8:
- raise RuntimeError("When merging instruct-pix2pix model with a normal one, A must be the instruct-pix2pix model.")
-
- if a.shape[1] == 8 and b.shape[1] == 4:#If we have an Instruct-Pix2Pix model...
- theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)#Merge only the vectors the models have in common. Otherwise we get an error due to dimension mismatch.
- result_is_instruct_pix2pix_model = True
- else:
- assert a.shape[1] == 9 and b.shape[1] == 4, f"Bad dimensions for merged layer {key}: A={a.shape}, B={b.shape}"
- theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)
- result_is_inpainting_model = True
- else:
- theta_0[key] = theta_func2(a, b, multiplier)
-
- theta_0[key] = to_half(theta_0[key], save_as_half)
-
- shared.state.sampling_step += 1
-
- del theta_1
-
- bake_in_vae_filename = sd_vae.vae_dict.get(bake_in_vae, None)
- if bake_in_vae_filename is not None:
- print(f"Baking in VAE from {bake_in_vae_filename}")
- shared.state.textinfo = 'Baking in VAE'
- vae_dict = sd_vae.load_vae_dict(bake_in_vae_filename, map_location='cpu')
-
- for key in vae_dict.keys():
- theta_0_key = 'first_stage_model.' + key
- if theta_0_key in theta_0:
- theta_0[theta_0_key] = to_half(vae_dict[key], save_as_half)
-
- del vae_dict
-
- if save_as_half and not theta_func2:
- for key in theta_0.keys():
- theta_0[key] = to_half(theta_0[key], save_as_half)
-
- if discard_weights:
- regex = re.compile(discard_weights)
- for key in list(theta_0):
- if re.search(regex, key):
- theta_0.pop(key, None)
-
- ckpt_dir = shared.cmd_opts.ckpt_dir or sd_models.model_path
-
- filename = filename_generator() if custom_name == '' else custom_name
- filename += ".inpainting" if result_is_inpainting_model else ""
- filename += ".instruct-pix2pix" if result_is_instruct_pix2pix_model else ""
- filename += "." + checkpoint_format
-
- output_modelname = os.path.join(ckpt_dir, filename)
-
- shared.state.nextjob()
- shared.state.textinfo = "Saving"
- print(f"Saving to {output_modelname}...")
-
- _, extension = os.path.splitext(output_modelname)
- if extension.lower() == ".safetensors":
- safetensors.torch.save_file(theta_0, output_modelname, metadata={"format": "pt"})
- else:
- torch.save(theta_0, output_modelname)
-
- sd_models.list_models()
-
- create_config(output_modelname, config_source, primary_model_info, secondary_model_info, tertiary_model_info)
-
- print(f"Checkpoint saved to {output_modelname}.")
- shared.state.textinfo = "Checkpoint saved"
- shared.state.end()
-
- return [*[gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)], "Checkpoint saved to " + output_modelname]
diff --git a/spaces/bioriAsaeru/text-to-voice/Download the Mp4 Video Songs from Hindi Begum Jaan Movie The Film Narrated by Amitabh Bachchan.md b/spaces/bioriAsaeru/text-to-voice/Download the Mp4 Video Songs from Hindi Begum Jaan Movie The Film Narrated by Amitabh Bachchan.md
deleted file mode 100644
index b59e4856691c19df5794a5752353f1a3400d3cf6..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Download the Mp4 Video Songs from Hindi Begum Jaan Movie The Film Narrated by Amitabh Bachchan.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-download Songs Film Chhoti Begum 1956 unlimited Movies and videos Download Here.Songs Film Chhoti Begum 1956 Hd,3gp. mp4 320p and More Videos You Can Download Easyly. tamilrockers and movierulz, tamilgun, filmywap, and pagalworld videos and Movies download.
-download Azaadiyaan Song 3 Storeys unlimited Movies and videos Download Here.Azaadiyaan Song 3 Storeys Hd,3gp. mp4 320p and More Videos You Can Download Easyly. tamilrockers and movierulz, tamilgun, filmywap, and pagalworld videos and Movies download.
-Hindi Begum Jaan Movie Mp4 Video Songs Download Download File ⇔ https://urloso.com/2uyRXx
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/bioriAsaeru/text-to-voice/How to Download Step Up 4 Movie Soundtrack Mp3 13 Ensemble Imagier Inv for Free.md b/spaces/bioriAsaeru/text-to-voice/How to Download Step Up 4 Movie Soundtrack Mp3 13 Ensemble Imagier Inv for Free.md
deleted file mode 100644
index 19e263fe5240f829c727d8b115717252dced3629..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/How to Download Step Up 4 Movie Soundtrack Mp3 13 Ensemble Imagier Inv for Free.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Step Up 4 Movie Soundtrack Mp3 13 ensemble imagier inv Download ✵ https://urloso.com/2uyPBn
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/bzd4576/sovits-sin/text/symbols.py b/spaces/bzd4576/sovits-sin/text/symbols.py
deleted file mode 100644
index 869a53e763ae825bc02921842280ac9efe7f85dd..0000000000000000000000000000000000000000
--- a/spaces/bzd4576/sovits-sin/text/symbols.py
+++ /dev/null
@@ -1,16 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-
-'''
-Defines the set of symbols used in text input to the model.
-'''
-_pad = '_'
-_punctuation = ';:,.!?¡¿—…"«»“” '
-_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
-_letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
-
-
-# Export all symbols:
-symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa)
-
-# Special symbol ids
-SPACE_ID = symbols.index(" ")
diff --git a/spaces/cbg342/GPT-4-To-Midi/app.py b/spaces/cbg342/GPT-4-To-Midi/app.py
deleted file mode 100644
index 3feebedfb7361872369cf68d44eccac8e0870520..0000000000000000000000000000000000000000
--- a/spaces/cbg342/GPT-4-To-Midi/app.py
+++ /dev/null
@@ -1,165 +0,0 @@
-import os, random, re
-from fractions import Fraction
-from midiutil.MidiFile import MIDIFile
-import streamlit as st
-import mido, openai
-
-if 'path' not in st.session_state:
- st.session_state['path'] = os.path.realpath(os.path.dirname(__file__))
-if 'sessionID' not in st.session_state:
- st.session_state['sessionID'] = random.randint(0,99999999)
-if 'history' not in st.session_state:
- st.session_state['history'] = []
-if 'downloadable' not in st.session_state:
- st.session_state['downloadable'] = False
-notes = [['C'], ['Db', 'C#'], ['D'], ['Eb', 'D#'], ['E'], ['F'], ['Gb', 'F#'], ['G'], ['Ab', 'G#'], ['A'], ['Bb', 'A#'], ['B']]
-monsters = [r'(?=6 else noteDur
- if nIndex:
- midOut.append('-'.join([notes[msg.note%12][0]+str(msg.note//12-1), noteDur, str(round(noteTime,3))]))
- else:
- midOut.append('-'.join([notes[msg.note%12][0]+str(msg.note//12-1), noteDur]))
- del opens[msg.note]
- if msg.type == 'note_on':
- opens[msg.note] = globalT
- return ', '.join(midOut)
-
-st.markdown('# GPT-4 2 Midi\n#### AI Generated Polyphonic Music\n##### plus conversion tools for use with Chat-GPT\napp by [d3nt](https://github.com/d3n7/)')
-notation = st.selectbox('Notation', ('Polyphonic', 'Monophonic'))
-main, m2t, t2m = st.tabs(['GPT4-To-Midi', 'Midi-2-Text', 'Text-2-Midi'])
-
-with main:
- userPrompt = st.text_input('Prompt', 'Full piece of sad music with multiple parts. Plan out the structure beforehand, including chords, parts (soprano, alto, tenor, bass), meter, etc.')
- with st.expander('System Prompt'):
- sysPrompt = st.text_input('', 'You are MusicGPT, a music creation and completion chat bot that. When a user gives you a prompt, you return them a song showing the notes, durations, and times that they occur. Respond with just the music.')
- openaikey = st.text_input('OpenAI API Key', type='password')
- modelV = st.selectbox('Model', ('GPT-4', 'GPT-3.5-Turbo'))
- col1, col2 = st.columns(2)
- with col1:
- newSession = st.checkbox('New Session', True)
- with col2:
- showOutput = st.checkbox('Show Output', True)
- uploadMidi = st.file_uploader('Upload a midi file (OPTIONAL)')
- col3, col4 = st.columns(2)
- with col3:
- if st.button('Ask GPT'):
- if userPrompt != '' and sysPrompt != '' and openaikey != '':
- notationIndex = int(notation=='Polyphonic')
-
- if newSession:
- st.session_state['history'] = [{'role': 'system', 'content': sysPrompt+examples[notationIndex]}]
-
- prompt = userPrompt
- if uploadMidi:
- filename = ''.join(uploadMidi.name.split('.')[:-1])+str(st.session_state['sessionID'])+'.'+''.join(uploadMidi.name.split('.')[-1])
- midiPath = os.path.join(st.session_state['path'], filename)
- with open(midiPath, 'wb') as f:
- f.write(uploadMidi.getbuffer())
- prompt += '\n'+midiToStr(midiPath, notationIndex)
- os.remove(midiPath)
- st.session_state['history'].append({'role': 'user', 'content': prompt})
-
- openai.api_key = openaikey
- with st.spinner('Talking to OpenAI...'):
- r = openai.ChatCompletion.create(
- model=modelV.lower(),
- messages=st.session_state['history']
- )
- response = r['choices'][0]['message']['content']
- st.session_state['history'].append({'role': 'assistant', 'content': response})
-
- noteInfo = []
- for i in re.findall(monsters[notationIndex], response):
- n = i.split('-')
- if notationIndex:
- noteInfo.append([noteToInt(n[0]), float(Fraction(n[1]))*4, float(n[2])]) #note, duration, time
- else:
- noteInfo.append([noteToInt(n[0]), float(Fraction(n[1]))*4]) # note, duration
-
- song = MIDIFile(1, deinterleave=False)
- time = 0
- for i in noteInfo:
- if notationIndex:
- pitch, dur, time = i
- else:
- pitch, dur = i
- song.addNote(0, 0, pitch, time, dur, 100)
- if not notationIndex:
- time += dur
- with open(os.path.join(st.session_state['path'], 'out.mid'), 'wb') as f:
- song.writeFile(f)
- if not st.session_state['downloadable']:
- st.session_state['downloadable'] = True
- else:
- st.warning('Make sure OpenAI key, prompt, and system prompt are entered', icon='⚠️')
- with col4:
- if st.session_state['downloadable']:
- with open(os.path.join(st.session_state['path'], 'out.mid'), 'rb') as f:
- st.download_button('Download Midi', f, file_name='song.mid', key='main')
- if showOutput:
- with st.container():
- for i in st.session_state['history']:
- st.text(i['role']+': '+i['content']+'\n')
-
-with m2t:
- inMidi = st.file_uploader('Input')
- if st.button('Convert', key='1'):
- if inMidi:
- filename = ''.join(inMidi.name.split('.')[:-1]) + str(st.session_state['sessionID']) + '.' + ''.join(inMidi.name.split('.')[-1])
- midiPath = os.path.join(st.session_state['path'], filename)
- with open(midiPath, 'wb') as f:
- f.write(inMidi.getbuffer())
- st.text_area('Output', midiToStr(midiPath, notation=='Polyphonic'))
- os.remove(midiPath)
-
-with t2m:
- inText = st.text_input('Input')
- if st.button('Convert', key='2'):
- notationIndex = int(notation=='Polyphonic')
- noteInfo = []
- for i in re.findall(monsters[notationIndex], inText):
- n = i.split('-')
- if notationIndex:
- noteInfo.append([noteToInt(n[0]), float(Fraction(n[1])) * 4, float(n[2])]) # note, duration, time
- else:
- noteInfo.append([noteToInt(n[0]), float(Fraction(n[1])) * 4]) # note, duration
- song = MIDIFile(1, deinterleave=False)
- time = 0
- for i in noteInfo:
- if notationIndex:
- pitch, dur, time = i
- else:
- pitch, dur = i
- song.addNote(0, 0, pitch, time, dur, 100)
- if not notationIndex:
- time += dur
- with open(os.path.join(st.session_state['path'], 't2m.mid'), 'wb') as f:
- song.writeFile(f)
- with open(os.path.join(st.session_state['path'], 't2m.mid'), 'rb') as f:
- st.download_button('Download Midi', f, file_name='song.mid', key='t2m')
\ No newline at end of file
diff --git a/spaces/cfwef/gpt/crazy_functions/test_project/cpp/libJPG/jpgd.h b/spaces/cfwef/gpt/crazy_functions/test_project/cpp/libJPG/jpgd.h
deleted file mode 100644
index a1c0cac61839a6f66a42c341f50d5e36faad9a93..0000000000000000000000000000000000000000
--- a/spaces/cfwef/gpt/crazy_functions/test_project/cpp/libJPG/jpgd.h
+++ /dev/null
@@ -1,316 +0,0 @@
-// jpgd.h - C++ class for JPEG decompression.
-// Public domain, Rich Geldreich
-#ifndef JPEG_DECODER_H
-#define JPEG_DECODER_H
-
-#include
-#include
-#include
-
-namespace jpgd
-{
- typedef unsigned char uint8;
- typedef signed short int16;
- typedef unsigned short uint16;
- typedef unsigned int uint;
- typedef signed int int32;
-
- // Loads a JPEG image from a memory buffer or a file.
- // req_comps can be 1 (grayscale), 3 (RGB), or 4 (RGBA).
- // On return, width/height will be set to the image's dimensions, and actual_comps will be set to the either 1 (grayscale) or 3 (RGB).
- // Notes: For more control over where and how the source data is read, see the decompress_jpeg_image_from_stream() function below, or call the jpeg_decoder class directly.
- // Requesting a 8 or 32bpp image is currently a little faster than 24bpp because the jpeg_decoder class itself currently always unpacks to either 8 or 32bpp.
-// BEGIN EPIC MOD
-//unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps);
- unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format);
-// END EPIC MOD
- unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps);
-
- // Success/failure error codes.
- enum jpgd_status
- {
- JPGD_SUCCESS = 0, JPGD_FAILED = -1, JPGD_DONE = 1,
- JPGD_BAD_DHT_COUNTS = -256, JPGD_BAD_DHT_INDEX, JPGD_BAD_DHT_MARKER, JPGD_BAD_DQT_MARKER, JPGD_BAD_DQT_TABLE,
- JPGD_BAD_PRECISION, JPGD_BAD_HEIGHT, JPGD_BAD_WIDTH, JPGD_TOO_MANY_COMPONENTS,
- JPGD_BAD_SOF_LENGTH, JPGD_BAD_VARIABLE_MARKER, JPGD_BAD_DRI_LENGTH, JPGD_BAD_SOS_LENGTH,
- JPGD_BAD_SOS_COMP_ID, JPGD_W_EXTRA_BYTES_BEFORE_MARKER, JPGD_NO_ARITHMITIC_SUPPORT, JPGD_UNEXPECTED_MARKER,
- JPGD_NOT_JPEG, JPGD_UNSUPPORTED_MARKER, JPGD_BAD_DQT_LENGTH, JPGD_TOO_MANY_BLOCKS,
- JPGD_UNDEFINED_QUANT_TABLE, JPGD_UNDEFINED_HUFF_TABLE, JPGD_NOT_SINGLE_SCAN, JPGD_UNSUPPORTED_COLORSPACE,
- JPGD_UNSUPPORTED_SAMP_FACTORS, JPGD_DECODE_ERROR, JPGD_BAD_RESTART_MARKER, JPGD_ASSERTION_ERROR,
- JPGD_BAD_SOS_SPECTRAL, JPGD_BAD_SOS_SUCCESSIVE, JPGD_STREAM_READ, JPGD_NOTENOUGHMEM
- };
-
- // Input stream interface.
- // Derive from this class to read input data from sources other than files or memory. Set m_eof_flag to true when no more data is available.
- // The decoder is rather greedy: it will keep on calling this method until its internal input buffer is full, or until the EOF flag is set.
- // It the input stream contains data after the JPEG stream's EOI (end of image) marker it will probably be pulled into the internal buffer.
- // Call the get_total_bytes_read() method to determine the actual size of the JPEG stream after successful decoding.
- class jpeg_decoder_stream
- {
- public:
- jpeg_decoder_stream() { }
- virtual ~jpeg_decoder_stream() { }
-
- // The read() method is called when the internal input buffer is empty.
- // Parameters:
- // pBuf - input buffer
- // max_bytes_to_read - maximum bytes that can be written to pBuf
- // pEOF_flag - set this to true if at end of stream (no more bytes remaining)
- // Returns -1 on error, otherwise return the number of bytes actually written to the buffer (which may be 0).
- // Notes: This method will be called in a loop until you set *pEOF_flag to true or the internal buffer is full.
- virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) = 0;
- };
-
- // stdio FILE stream class.
- class jpeg_decoder_file_stream : public jpeg_decoder_stream
- {
- jpeg_decoder_file_stream(const jpeg_decoder_file_stream &);
- jpeg_decoder_file_stream &operator =(const jpeg_decoder_file_stream &);
-
- FILE *m_pFile;
- bool m_eof_flag, m_error_flag;
-
- public:
- jpeg_decoder_file_stream();
- virtual ~jpeg_decoder_file_stream();
-
- bool open(const char *Pfilename);
- void close();
-
- virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag);
- };
-
- // Memory stream class.
- class jpeg_decoder_mem_stream : public jpeg_decoder_stream
- {
- const uint8 *m_pSrc_data;
- uint m_ofs, m_size;
-
- public:
- jpeg_decoder_mem_stream() : m_pSrc_data(NULL), m_ofs(0), m_size(0) { }
- jpeg_decoder_mem_stream(const uint8 *pSrc_data, uint size) : m_pSrc_data(pSrc_data), m_ofs(0), m_size(size) { }
-
- virtual ~jpeg_decoder_mem_stream() { }
-
- bool open(const uint8 *pSrc_data, uint size);
- void close() { m_pSrc_data = NULL; m_ofs = 0; m_size = 0; }
-
- virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag);
- };
-
- // Loads JPEG file from a jpeg_decoder_stream.
- unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps);
-
- enum
- {
- JPGD_IN_BUF_SIZE = 8192, JPGD_MAX_BLOCKS_PER_MCU = 10, JPGD_MAX_HUFF_TABLES = 8, JPGD_MAX_QUANT_TABLES = 4,
- JPGD_MAX_COMPONENTS = 4, JPGD_MAX_COMPS_IN_SCAN = 4, JPGD_MAX_BLOCKS_PER_ROW = 8192, JPGD_MAX_HEIGHT = 16384, JPGD_MAX_WIDTH = 16384
- };
-
- typedef int16 jpgd_quant_t;
- typedef int16 jpgd_block_t;
-
- class jpeg_decoder
- {
- public:
- // Call get_error_code() after constructing to determine if the stream is valid or not. You may call the get_width(), get_height(), etc.
- // methods after the constructor is called. You may then either destruct the object, or begin decoding the image by calling begin_decoding(), then decode() on each scanline.
- jpeg_decoder(jpeg_decoder_stream *pStream);
-
- ~jpeg_decoder();
-
- // Call this method after constructing the object to begin decompression.
- // If JPGD_SUCCESS is returned you may then call decode() on each scanline.
- int begin_decoding();
-
- // Returns the next scan line.
- // For grayscale images, pScan_line will point to a buffer containing 8-bit pixels (get_bytes_per_pixel() will return 1).
- // Otherwise, it will always point to a buffer containing 32-bit RGBA pixels (A will always be 255, and get_bytes_per_pixel() will return 4).
- // Returns JPGD_SUCCESS if a scan line has been returned.
- // Returns JPGD_DONE if all scan lines have been returned.
- // Returns JPGD_FAILED if an error occurred. Call get_error_code() for a more info.
- int decode(const void** pScan_line, uint* pScan_line_len);
-
- inline jpgd_status get_error_code() const { return m_error_code; }
-
- inline int get_width() const { return m_image_x_size; }
- inline int get_height() const { return m_image_y_size; }
-
- inline int get_num_components() const { return m_comps_in_frame; }
-
- inline int get_bytes_per_pixel() const { return m_dest_bytes_per_pixel; }
- inline int get_bytes_per_scan_line() const { return m_image_x_size * get_bytes_per_pixel(); }
-
- // Returns the total number of bytes actually consumed by the decoder (which should equal the actual size of the JPEG file).
- inline int get_total_bytes_read() const { return m_total_bytes_read; }
-
- private:
- jpeg_decoder(const jpeg_decoder &);
- jpeg_decoder &operator =(const jpeg_decoder &);
-
- typedef void (*pDecode_block_func)(jpeg_decoder *, int, int, int);
-
- struct huff_tables
- {
- bool ac_table;
- uint look_up[256];
- uint look_up2[256];
- uint8 code_size[256];
- uint tree[512];
- };
-
- struct coeff_buf
- {
- uint8 *pData;
- int block_num_x, block_num_y;
- int block_len_x, block_len_y;
- int block_size;
- };
-
- struct mem_block
- {
- mem_block *m_pNext;
- size_t m_used_count;
- size_t m_size;
- char m_data[1];
- };
-
- jmp_buf m_jmp_state;
- mem_block *m_pMem_blocks;
- int m_image_x_size;
- int m_image_y_size;
- jpeg_decoder_stream *m_pStream;
- int m_progressive_flag;
- uint8 m_huff_ac[JPGD_MAX_HUFF_TABLES];
- uint8* m_huff_num[JPGD_MAX_HUFF_TABLES]; // pointer to number of Huffman codes per bit size
- uint8* m_huff_val[JPGD_MAX_HUFF_TABLES]; // pointer to Huffman codes per bit size
- jpgd_quant_t* m_quant[JPGD_MAX_QUANT_TABLES]; // pointer to quantization tables
- int m_scan_type; // Gray, Yh1v1, Yh1v2, Yh2v1, Yh2v2 (CMYK111, CMYK4114 no longer supported)
- int m_comps_in_frame; // # of components in frame
- int m_comp_h_samp[JPGD_MAX_COMPONENTS]; // component's horizontal sampling factor
- int m_comp_v_samp[JPGD_MAX_COMPONENTS]; // component's vertical sampling factor
- int m_comp_quant[JPGD_MAX_COMPONENTS]; // component's quantization table selector
- int m_comp_ident[JPGD_MAX_COMPONENTS]; // component's ID
- int m_comp_h_blocks[JPGD_MAX_COMPONENTS];
- int m_comp_v_blocks[JPGD_MAX_COMPONENTS];
- int m_comps_in_scan; // # of components in scan
- int m_comp_list[JPGD_MAX_COMPS_IN_SCAN]; // components in this scan
- int m_comp_dc_tab[JPGD_MAX_COMPONENTS]; // component's DC Huffman coding table selector
- int m_comp_ac_tab[JPGD_MAX_COMPONENTS]; // component's AC Huffman coding table selector
- int m_spectral_start; // spectral selection start
- int m_spectral_end; // spectral selection end
- int m_successive_low; // successive approximation low
- int m_successive_high; // successive approximation high
- int m_max_mcu_x_size; // MCU's max. X size in pixels
- int m_max_mcu_y_size; // MCU's max. Y size in pixels
- int m_blocks_per_mcu;
- int m_max_blocks_per_row;
- int m_mcus_per_row, m_mcus_per_col;
- int m_mcu_org[JPGD_MAX_BLOCKS_PER_MCU];
- int m_total_lines_left; // total # lines left in image
- int m_mcu_lines_left; // total # lines left in this MCU
- int m_real_dest_bytes_per_scan_line;
- int m_dest_bytes_per_scan_line; // rounded up
- int m_dest_bytes_per_pixel; // 4 (RGB) or 1 (Y)
- huff_tables* m_pHuff_tabs[JPGD_MAX_HUFF_TABLES];
- coeff_buf* m_dc_coeffs[JPGD_MAX_COMPONENTS];
- coeff_buf* m_ac_coeffs[JPGD_MAX_COMPONENTS];
- int m_eob_run;
- int m_block_y_mcu[JPGD_MAX_COMPONENTS];
- uint8* m_pIn_buf_ofs;
- int m_in_buf_left;
- int m_tem_flag;
- bool m_eof_flag;
- uint8 m_in_buf_pad_start[128];
- uint8 m_in_buf[JPGD_IN_BUF_SIZE + 128];
- uint8 m_in_buf_pad_end[128];
- int m_bits_left;
- uint m_bit_buf;
- int m_restart_interval;
- int m_restarts_left;
- int m_next_restart_num;
- int m_max_mcus_per_row;
- int m_max_blocks_per_mcu;
- int m_expanded_blocks_per_mcu;
- int m_expanded_blocks_per_row;
- int m_expanded_blocks_per_component;
- bool m_freq_domain_chroma_upsample;
- int m_max_mcus_per_col;
- uint m_last_dc_val[JPGD_MAX_COMPONENTS];
- jpgd_block_t* m_pMCU_coefficients;
- int m_mcu_block_max_zag[JPGD_MAX_BLOCKS_PER_MCU];
- uint8* m_pSample_buf;
- int m_crr[256];
- int m_cbb[256];
- int m_crg[256];
- int m_cbg[256];
- uint8* m_pScan_line_0;
- uint8* m_pScan_line_1;
- jpgd_status m_error_code;
- bool m_ready_flag;
- int m_total_bytes_read;
-
- void free_all_blocks();
- // BEGIN EPIC MOD
- UE_NORETURN void stop_decoding(jpgd_status status);
- // END EPIC MOD
- void *alloc(size_t n, bool zero = false);
- void word_clear(void *p, uint16 c, uint n);
- void prep_in_buffer();
- void read_dht_marker();
- void read_dqt_marker();
- void read_sof_marker();
- void skip_variable_marker();
- void read_dri_marker();
- void read_sos_marker();
- int next_marker();
- int process_markers();
- void locate_soi_marker();
- void locate_sof_marker();
- int locate_sos_marker();
- void init(jpeg_decoder_stream * pStream);
- void create_look_ups();
- void fix_in_buffer();
- void transform_mcu(int mcu_row);
- void transform_mcu_expand(int mcu_row);
- coeff_buf* coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y);
- inline jpgd_block_t *coeff_buf_getp(coeff_buf *cb, int block_x, int block_y);
- void load_next_row();
- void decode_next_row();
- void make_huff_table(int index, huff_tables *pH);
- void check_quant_tables();
- void check_huff_tables();
- void calc_mcu_block_order();
- int init_scan();
- void init_frame();
- void process_restart();
- void decode_scan(pDecode_block_func decode_block_func);
- void init_progressive();
- void init_sequential();
- void decode_start();
- void decode_init(jpeg_decoder_stream * pStream);
- void H2V2Convert();
- void H2V1Convert();
- void H1V2Convert();
- void H1V1Convert();
- void gray_convert();
- void expanded_convert();
- void find_eoi();
- inline uint get_char();
- inline uint get_char(bool *pPadding_flag);
- inline void stuff_char(uint8 q);
- inline uint8 get_octet();
- inline uint get_bits(int num_bits);
- inline uint get_bits_no_markers(int numbits);
- inline int huff_decode(huff_tables *pH);
- inline int huff_decode(huff_tables *pH, int& extrabits);
- static inline uint8 clamp(int i);
- static void decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y);
- static void decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y);
- static void decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y);
- static void decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y);
- };
-
-} // namespace jpgd
-
-#endif // JPEG_DECODER_H
diff --git a/spaces/chasemcdo/hf_localai/api/config.go b/spaces/chasemcdo/hf_localai/api/config.go
deleted file mode 100644
index ba84e0dfaa84d5a65ff5170cef4e64f4280fcd1a..0000000000000000000000000000000000000000
--- a/spaces/chasemcdo/hf_localai/api/config.go
+++ /dev/null
@@ -1,368 +0,0 @@
-package api
-
-import (
- "encoding/json"
- "fmt"
- "io/fs"
- "os"
- "path/filepath"
- "strings"
- "sync"
-
- model "github.com/go-skynet/LocalAI/pkg/model"
- "github.com/gofiber/fiber/v2"
- "github.com/rs/zerolog/log"
- "gopkg.in/yaml.v3"
-)
-
-type Config struct {
- OpenAIRequest `yaml:"parameters"`
- Name string `yaml:"name"`
- StopWords []string `yaml:"stopwords"`
- Cutstrings []string `yaml:"cutstrings"`
- TrimSpace []string `yaml:"trimspace"`
- ContextSize int `yaml:"context_size"`
- F16 bool `yaml:"f16"`
- NUMA bool `yaml:"numa"`
- Threads int `yaml:"threads"`
- Debug bool `yaml:"debug"`
- Roles map[string]string `yaml:"roles"`
- Embeddings bool `yaml:"embeddings"`
- Backend string `yaml:"backend"`
- TemplateConfig TemplateConfig `yaml:"template"`
- MirostatETA float64 `yaml:"mirostat_eta"`
- MirostatTAU float64 `yaml:"mirostat_tau"`
- Mirostat int `yaml:"mirostat"`
- NGPULayers int `yaml:"gpu_layers"`
- MMap bool `yaml:"mmap"`
- MMlock bool `yaml:"mmlock"`
- LowVRAM bool `yaml:"low_vram"`
-
- TensorSplit string `yaml:"tensor_split"`
- MainGPU string `yaml:"main_gpu"`
- ImageGenerationAssets string `yaml:"asset_dir"`
-
- PromptCachePath string `yaml:"prompt_cache_path"`
- PromptCacheAll bool `yaml:"prompt_cache_all"`
- PromptCacheRO bool `yaml:"prompt_cache_ro"`
-
- PromptStrings, InputStrings []string
- InputToken [][]int
-}
-
-type TemplateConfig struct {
- Completion string `yaml:"completion"`
- Chat string `yaml:"chat"`
- Edit string `yaml:"edit"`
-}
-
-type ConfigMerger struct {
- configs map[string]Config
- sync.Mutex
-}
-
-func defaultConfig(modelFile string) *Config {
- return &Config{
- OpenAIRequest: defaultRequest(modelFile),
- }
-}
-
-func NewConfigMerger() *ConfigMerger {
- return &ConfigMerger{
- configs: make(map[string]Config),
- }
-}
-func ReadConfigFile(file string) ([]*Config, error) {
- c := &[]*Config{}
- f, err := os.ReadFile(file)
- if err != nil {
- return nil, fmt.Errorf("cannot read config file: %w", err)
- }
- if err := yaml.Unmarshal(f, c); err != nil {
- return nil, fmt.Errorf("cannot unmarshal config file: %w", err)
- }
-
- return *c, nil
-}
-
-func ReadConfig(file string) (*Config, error) {
- c := &Config{}
- f, err := os.ReadFile(file)
- if err != nil {
- return nil, fmt.Errorf("cannot read config file: %w", err)
- }
- if err := yaml.Unmarshal(f, c); err != nil {
- return nil, fmt.Errorf("cannot unmarshal config file: %w", err)
- }
-
- return c, nil
-}
-
-func (cm *ConfigMerger) LoadConfigFile(file string) error {
- cm.Lock()
- defer cm.Unlock()
- c, err := ReadConfigFile(file)
- if err != nil {
- return fmt.Errorf("cannot load config file: %w", err)
- }
-
- for _, cc := range c {
- cm.configs[cc.Name] = *cc
- }
- return nil
-}
-
-func (cm *ConfigMerger) LoadConfig(file string) error {
- cm.Lock()
- defer cm.Unlock()
- c, err := ReadConfig(file)
- if err != nil {
- return fmt.Errorf("cannot read config file: %w", err)
- }
-
- cm.configs[c.Name] = *c
- return nil
-}
-
-func (cm *ConfigMerger) GetConfig(m string) (Config, bool) {
- cm.Lock()
- defer cm.Unlock()
- v, exists := cm.configs[m]
- return v, exists
-}
-
-func (cm *ConfigMerger) ListConfigs() []string {
- cm.Lock()
- defer cm.Unlock()
- var res []string
- for k := range cm.configs {
- res = append(res, k)
- }
- return res
-}
-
-func (cm *ConfigMerger) LoadConfigs(path string) error {
- cm.Lock()
- defer cm.Unlock()
- entries, err := os.ReadDir(path)
- if err != nil {
- return err
- }
- files := make([]fs.FileInfo, 0, len(entries))
- for _, entry := range entries {
- info, err := entry.Info()
- if err != nil {
- return err
- }
- files = append(files, info)
- }
- for _, file := range files {
- // Skip templates, YAML and .keep files
- if !strings.Contains(file.Name(), ".yaml") {
- continue
- }
- c, err := ReadConfig(filepath.Join(path, file.Name()))
- if err == nil {
- cm.configs[c.Name] = *c
- }
- }
-
- return nil
-}
-
-func updateConfig(config *Config, input *OpenAIRequest) {
- if input.Echo {
- config.Echo = input.Echo
- }
- if input.TopK != 0 {
- config.TopK = input.TopK
- }
- if input.TopP != 0 {
- config.TopP = input.TopP
- }
-
- if input.Temperature != 0 {
- config.Temperature = input.Temperature
- }
-
- if input.Maxtokens != 0 {
- config.Maxtokens = input.Maxtokens
- }
-
- switch stop := input.Stop.(type) {
- case string:
- if stop != "" {
- config.StopWords = append(config.StopWords, stop)
- }
- case []interface{}:
- for _, pp := range stop {
- if s, ok := pp.(string); ok {
- config.StopWords = append(config.StopWords, s)
- }
- }
- }
-
- if input.RepeatPenalty != 0 {
- config.RepeatPenalty = input.RepeatPenalty
- }
-
- if input.Keep != 0 {
- config.Keep = input.Keep
- }
-
- if input.Batch != 0 {
- config.Batch = input.Batch
- }
-
- if input.F16 {
- config.F16 = input.F16
- }
-
- if input.IgnoreEOS {
- config.IgnoreEOS = input.IgnoreEOS
- }
-
- if input.Seed != 0 {
- config.Seed = input.Seed
- }
-
- if input.Mirostat != 0 {
- config.Mirostat = input.Mirostat
- }
-
- if input.MirostatETA != 0 {
- config.MirostatETA = input.MirostatETA
- }
-
- if input.MirostatTAU != 0 {
- config.MirostatTAU = input.MirostatTAU
- }
-
- if input.TypicalP != 0 {
- config.TypicalP = input.TypicalP
- }
-
- switch inputs := input.Input.(type) {
- case string:
- if inputs != "" {
- config.InputStrings = append(config.InputStrings, inputs)
- }
- case []interface{}:
- for _, pp := range inputs {
- switch i := pp.(type) {
- case string:
- config.InputStrings = append(config.InputStrings, i)
- case []interface{}:
- tokens := []int{}
- for _, ii := range i {
- tokens = append(tokens, int(ii.(float64)))
- }
- config.InputToken = append(config.InputToken, tokens)
- }
- }
- }
-
- switch p := input.Prompt.(type) {
- case string:
- config.PromptStrings = append(config.PromptStrings, p)
- case []interface{}:
- for _, pp := range p {
- if s, ok := pp.(string); ok {
- config.PromptStrings = append(config.PromptStrings, s)
- }
- }
- }
-}
-func readInput(c *fiber.Ctx, loader *model.ModelLoader, randomModel bool) (string, *OpenAIRequest, error) {
- input := new(OpenAIRequest)
- // Get input data from the request body
- if err := c.BodyParser(input); err != nil {
- return "", nil, err
- }
-
- modelFile := input.Model
-
- if c.Params("model") != "" {
- modelFile = c.Params("model")
- }
-
- received, _ := json.Marshal(input)
-
- log.Debug().Msgf("Request received: %s", string(received))
-
- // Set model from bearer token, if available
- bearer := strings.TrimLeft(c.Get("authorization"), "Bearer ")
- bearerExists := bearer != "" && loader.ExistsInModelPath(bearer)
-
- // If no model was specified, take the first available
- if modelFile == "" && !bearerExists && randomModel {
- models, _ := loader.ListModels()
- if len(models) > 0 {
- modelFile = models[0]
- log.Debug().Msgf("No model specified, using: %s", modelFile)
- } else {
- log.Debug().Msgf("No model specified, returning error")
- return "", nil, fmt.Errorf("no model specified")
- }
- }
-
- // If a model is found in bearer token takes precedence
- if bearerExists {
- log.Debug().Msgf("Using model from bearer token: %s", bearer)
- modelFile = bearer
- }
- return modelFile, input, nil
-}
-
-func readConfig(modelFile string, input *OpenAIRequest, cm *ConfigMerger, loader *model.ModelLoader, debug bool, threads, ctx int, f16 bool) (*Config, *OpenAIRequest, error) {
- // Load a config file if present after the model name
- modelConfig := filepath.Join(loader.ModelPath, modelFile+".yaml")
-
- var config *Config
-
- defaults := func() {
- config = defaultConfig(modelFile)
- config.ContextSize = ctx
- config.Threads = threads
- config.F16 = f16
- config.Debug = debug
- }
-
- cfg, exists := cm.GetConfig(modelFile)
- if !exists {
- if _, err := os.Stat(modelConfig); err == nil {
- if err := cm.LoadConfig(modelConfig); err != nil {
- return nil, nil, fmt.Errorf("failed loading model config (%s) %s", modelConfig, err.Error())
- }
- cfg, exists = cm.GetConfig(modelFile)
- if exists {
- config = &cfg
- } else {
- defaults()
- }
- } else {
- defaults()
- }
- } else {
- config = &cfg
- }
-
- // Set the parameters for the language model prediction
- updateConfig(config, input)
-
- // Don't allow 0 as setting
- if config.Threads == 0 {
- if threads != 0 {
- config.Threads = threads
- } else {
- config.Threads = 4
- }
- }
-
- // Enforce debug flag if passed from CLI
- if debug {
- config.Debug = true
- }
-
- return config, input, nil
-}
diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/docs/manipulate_training_image_size.md b/spaces/chendl/compositional_test/multimodal/YOLOX/docs/manipulate_training_image_size.md
deleted file mode 100644
index 7a4e8560b672b931f3f64a2f502ea0e863ae2338..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/multimodal/YOLOX/docs/manipulate_training_image_size.md
+++ /dev/null
@@ -1,59 +0,0 @@
-# Manipulating Your Training Image Size
-
-This tutorial explains how to control your image size when training on your own data.
-
-## 1. Introduction
-
-There are 3 hyperparamters control the training size:
-
-- self.input_size = (640, 640) #(height, width)
-- self.multiscale_range = 5
-- self.random_size = (14, 26)
-
-There is 1 hyperparameter constrols the testing size:
-
-- self.test_size = (640, 640)
-
-The self.input_size is suggested to set to the same value as self.test_size. By default, it is set to (640, 640) for most models and (416, 416) for yolox-tiny and yolox-nano.
-
-## 2. Multi Scale Training
-
-When training on your custom dataset, you can use multiscale training in 2 ways:
-
-1. **【Default】Only specifying the self.input_size and leaving others unchanged.**
-
- If so, the actual multiscale sizes range from:
-
- [self.input_size[0] - self.multiscale_range\*32, self.input_size[0] + self.multiscale_range\*32]
-
- For example, if you only set:
-
- ```python
- self.input_size = (640, 640)
- ```
-
- the actual multiscale range is [640 - 5*32, 640 + 5\*32], i.e., [480, 800].
-
- You can modify self.multiscale_range to change the multiscale range.
-
-2. **Simultaneously specifying the self.input_size and self.random_size**
-
- ```python
- self.input_size = (416, 416)
- self.random_size = (10, 20)
- ```
-
- In this case, the actual multiscale range is [self.random_size[0]\*32, self.random_size[1]\*32], i.e., [320, 640]
-
- **Note: You must specify the self.input_size because it is used for initializing resize aug in dataset.**
-
-## 3. Single Scale Training
-
-If you want to train in a single scale. You need to specify the self.input_size and self.multiscale_range=0:
-
-```python
-self.input_size = (416, 416)
-self.multiscale_range = 0
-```
-
-**DO NOT** set the self.random_size.
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ufoLib/__init__.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ufoLib/__init__.py
deleted file mode 100644
index 1a456a206f815ffdf624e4c420539a9eaf1903ca..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ufoLib/__init__.py
+++ /dev/null
@@ -1,2464 +0,0 @@
-import os
-from copy import deepcopy
-from os import fsdecode
-import logging
-import zipfile
-import enum
-from collections import OrderedDict
-import fs
-import fs.base
-import fs.subfs
-import fs.errors
-import fs.copy
-import fs.osfs
-import fs.zipfs
-import fs.tempfs
-import fs.tools
-from fontTools.misc import plistlib
-from fontTools.ufoLib.validators import *
-from fontTools.ufoLib.filenames import userNameToFileName
-from fontTools.ufoLib.converters import convertUFO1OrUFO2KerningToUFO3Kerning
-from fontTools.ufoLib.errors import UFOLibError
-from fontTools.ufoLib.utils import numberTypes, _VersionTupleEnumMixin
-
-"""
-A library for importing .ufo files and their descendants.
-Refer to http://unifiedfontobject.com for the UFO specification.
-
-The UFOReader and UFOWriter classes support versions 1, 2 and 3
-of the specification.
-
-Sets that list the font info attribute names for the fontinfo.plist
-formats are available for external use. These are:
- fontInfoAttributesVersion1
- fontInfoAttributesVersion2
- fontInfoAttributesVersion3
-
-A set listing the fontinfo.plist attributes that were deprecated
-in version 2 is available for external use:
- deprecatedFontInfoAttributesVersion2
-
-Functions that do basic validation on values for fontinfo.plist
-are available for external use. These are
- validateFontInfoVersion2ValueForAttribute
- validateFontInfoVersion3ValueForAttribute
-
-Value conversion functions are available for converting
-fontinfo.plist values between the possible format versions.
- convertFontInfoValueForAttributeFromVersion1ToVersion2
- convertFontInfoValueForAttributeFromVersion2ToVersion1
- convertFontInfoValueForAttributeFromVersion2ToVersion3
- convertFontInfoValueForAttributeFromVersion3ToVersion2
-"""
-
-__all__ = [
- "makeUFOPath",
- "UFOLibError",
- "UFOReader",
- "UFOWriter",
- "UFOReaderWriter",
- "UFOFileStructure",
- "fontInfoAttributesVersion1",
- "fontInfoAttributesVersion2",
- "fontInfoAttributesVersion3",
- "deprecatedFontInfoAttributesVersion2",
- "validateFontInfoVersion2ValueForAttribute",
- "validateFontInfoVersion3ValueForAttribute",
- "convertFontInfoValueForAttributeFromVersion1ToVersion2",
- "convertFontInfoValueForAttributeFromVersion2ToVersion1",
-]
-
-__version__ = "3.0.0"
-
-
-logger = logging.getLogger(__name__)
-
-
-# ---------
-# Constants
-# ---------
-
-DEFAULT_GLYPHS_DIRNAME = "glyphs"
-DATA_DIRNAME = "data"
-IMAGES_DIRNAME = "images"
-METAINFO_FILENAME = "metainfo.plist"
-FONTINFO_FILENAME = "fontinfo.plist"
-LIB_FILENAME = "lib.plist"
-GROUPS_FILENAME = "groups.plist"
-KERNING_FILENAME = "kerning.plist"
-FEATURES_FILENAME = "features.fea"
-LAYERCONTENTS_FILENAME = "layercontents.plist"
-LAYERINFO_FILENAME = "layerinfo.plist"
-
-DEFAULT_LAYER_NAME = "public.default"
-
-
-class UFOFormatVersion(tuple, _VersionTupleEnumMixin, enum.Enum):
- FORMAT_1_0 = (1, 0)
- FORMAT_2_0 = (2, 0)
- FORMAT_3_0 = (3, 0)
-
-
-# python 3.11 doesn't like when a mixin overrides a dunder method like __str__
-# for some reasons it keep using Enum.__str__, see
-# https://github.com/fonttools/fonttools/pull/2655
-UFOFormatVersion.__str__ = _VersionTupleEnumMixin.__str__
-
-
-class UFOFileStructure(enum.Enum):
- ZIP = "zip"
- PACKAGE = "package"
-
-
-# --------------
-# Shared Methods
-# --------------
-
-
-class _UFOBaseIO:
- def getFileModificationTime(self, path):
- """
- Returns the modification time for the file at the given path, as a
- floating point number giving the number of seconds since the epoch.
- The path must be relative to the UFO path.
- Returns None if the file does not exist.
- """
- try:
- dt = self.fs.getinfo(fsdecode(path), namespaces=["details"]).modified
- except (fs.errors.MissingInfoNamespace, fs.errors.ResourceNotFound):
- return None
- else:
- return dt.timestamp()
-
- def _getPlist(self, fileName, default=None):
- """
- Read a property list relative to the UFO filesystem's root.
- Raises UFOLibError if the file is missing and default is None,
- otherwise default is returned.
-
- The errors that could be raised during the reading of a plist are
- unpredictable and/or too large to list, so, a blind try: except:
- is done. If an exception occurs, a UFOLibError will be raised.
- """
- try:
- with self.fs.open(fileName, "rb") as f:
- return plistlib.load(f)
- except fs.errors.ResourceNotFound:
- if default is None:
- raise UFOLibError(
- "'%s' is missing on %s. This file is required" % (fileName, self.fs)
- )
- else:
- return default
- except Exception as e:
- # TODO(anthrotype): try to narrow this down a little
- raise UFOLibError(f"'{fileName}' could not be read on {self.fs}: {e}")
-
- def _writePlist(self, fileName, obj):
- """
- Write a property list to a file relative to the UFO filesystem's root.
-
- Do this sort of atomically, making it harder to corrupt existing files,
- for example when plistlib encounters an error halfway during write.
- This also checks to see if text matches the text that is already in the
- file at path. If so, the file is not rewritten so that the modification
- date is preserved.
-
- The errors that could be raised during the writing of a plist are
- unpredictable and/or too large to list, so, a blind try: except: is done.
- If an exception occurs, a UFOLibError will be raised.
- """
- if self._havePreviousFile:
- try:
- data = plistlib.dumps(obj)
- except Exception as e:
- raise UFOLibError(
- "'%s' could not be written on %s because "
- "the data is not properly formatted: %s" % (fileName, self.fs, e)
- )
- if self.fs.exists(fileName) and data == self.fs.readbytes(fileName):
- return
- self.fs.writebytes(fileName, data)
- else:
- with self.fs.openbin(fileName, mode="w") as fp:
- try:
- plistlib.dump(obj, fp)
- except Exception as e:
- raise UFOLibError(
- "'%s' could not be written on %s because "
- "the data is not properly formatted: %s"
- % (fileName, self.fs, e)
- )
-
-
-# ----------
-# UFO Reader
-# ----------
-
-
-class UFOReader(_UFOBaseIO):
-
- """
- Read the various components of the .ufo.
-
- By default read data is validated. Set ``validate`` to
- ``False`` to not validate the data.
- """
-
- def __init__(self, path, validate=True):
- if hasattr(path, "__fspath__"): # support os.PathLike objects
- path = path.__fspath__()
-
- if isinstance(path, str):
- structure = _sniffFileStructure(path)
- try:
- if structure is UFOFileStructure.ZIP:
- parentFS = fs.zipfs.ZipFS(path, write=False, encoding="utf-8")
- else:
- parentFS = fs.osfs.OSFS(path)
- except fs.errors.CreateFailed as e:
- raise UFOLibError(f"unable to open '{path}': {e}")
-
- if structure is UFOFileStructure.ZIP:
- # .ufoz zip files must contain a single root directory, with arbitrary
- # name, containing all the UFO files
- rootDirs = [
- p.name
- for p in parentFS.scandir("/")
- # exclude macOS metadata contained in zip file
- if p.is_dir and p.name != "__MACOSX"
- ]
- if len(rootDirs) == 1:
- # 'ClosingSubFS' ensures that the parent zip file is closed when
- # its root subdirectory is closed
- self.fs = parentFS.opendir(
- rootDirs[0], factory=fs.subfs.ClosingSubFS
- )
- else:
- raise UFOLibError(
- "Expected exactly 1 root directory, found %d" % len(rootDirs)
- )
- else:
- # normal UFO 'packages' are just a single folder
- self.fs = parentFS
- # when passed a path string, we make sure we close the newly opened fs
- # upon calling UFOReader.close method or context manager's __exit__
- self._shouldClose = True
- self._fileStructure = structure
- elif isinstance(path, fs.base.FS):
- filesystem = path
- try:
- filesystem.check()
- except fs.errors.FilesystemClosed:
- raise UFOLibError("the filesystem '%s' is closed" % path)
- else:
- self.fs = filesystem
- try:
- path = filesystem.getsyspath("/")
- except fs.errors.NoSysPath:
- # network or in-memory FS may not map to the local one
- path = str(filesystem)
- # when user passed an already initialized fs instance, it is her
- # responsibility to close it, thus UFOReader.close/__exit__ are no-op
- self._shouldClose = False
- # default to a 'package' structure
- self._fileStructure = UFOFileStructure.PACKAGE
- else:
- raise TypeError(
- "Expected a path string or fs.base.FS object, found '%s'"
- % type(path).__name__
- )
- self._path = fsdecode(path)
- self._validate = validate
- self._upConvertedKerningData = None
-
- try:
- self.readMetaInfo(validate=validate)
- except UFOLibError:
- self.close()
- raise
-
- # properties
-
- def _get_path(self):
- import warnings
-
- warnings.warn(
- "The 'path' attribute is deprecated; use the 'fs' attribute instead",
- DeprecationWarning,
- stacklevel=2,
- )
- return self._path
-
- path = property(_get_path, doc="The path of the UFO (DEPRECATED).")
-
- def _get_formatVersion(self):
- import warnings
-
- warnings.warn(
- "The 'formatVersion' attribute is deprecated; use the 'formatVersionTuple'",
- DeprecationWarning,
- stacklevel=2,
- )
- return self._formatVersion.major
-
- formatVersion = property(
- _get_formatVersion,
- doc="The (major) format version of the UFO. DEPRECATED: Use formatVersionTuple",
- )
-
- @property
- def formatVersionTuple(self):
- """The (major, minor) format version of the UFO.
- This is determined by reading metainfo.plist during __init__.
- """
- return self._formatVersion
-
- def _get_fileStructure(self):
- return self._fileStructure
-
- fileStructure = property(
- _get_fileStructure,
- doc=(
- "The file structure of the UFO: "
- "either UFOFileStructure.ZIP or UFOFileStructure.PACKAGE"
- ),
- )
-
- # up conversion
-
- def _upConvertKerning(self, validate):
- """
- Up convert kerning and groups in UFO 1 and 2.
- The data will be held internally until each bit of data
- has been retrieved. The conversion of both must be done
- at once, so the raw data is cached and an error is raised
- if one bit of data becomes obsolete before it is called.
-
- ``validate`` will validate the data.
- """
- if self._upConvertedKerningData:
- testKerning = self._readKerning()
- if testKerning != self._upConvertedKerningData["originalKerning"]:
- raise UFOLibError(
- "The data in kerning.plist has been modified since it was converted to UFO 3 format."
- )
- testGroups = self._readGroups()
- if testGroups != self._upConvertedKerningData["originalGroups"]:
- raise UFOLibError(
- "The data in groups.plist has been modified since it was converted to UFO 3 format."
- )
- else:
- groups = self._readGroups()
- if validate:
- invalidFormatMessage = "groups.plist is not properly formatted."
- if not isinstance(groups, dict):
- raise UFOLibError(invalidFormatMessage)
- for groupName, glyphList in groups.items():
- if not isinstance(groupName, str):
- raise UFOLibError(invalidFormatMessage)
- elif not isinstance(glyphList, list):
- raise UFOLibError(invalidFormatMessage)
- for glyphName in glyphList:
- if not isinstance(glyphName, str):
- raise UFOLibError(invalidFormatMessage)
- self._upConvertedKerningData = dict(
- kerning={},
- originalKerning=self._readKerning(),
- groups={},
- originalGroups=groups,
- )
- # convert kerning and groups
- kerning, groups, conversionMaps = convertUFO1OrUFO2KerningToUFO3Kerning(
- self._upConvertedKerningData["originalKerning"],
- deepcopy(self._upConvertedKerningData["originalGroups"]),
- self.getGlyphSet(),
- )
- # store
- self._upConvertedKerningData["kerning"] = kerning
- self._upConvertedKerningData["groups"] = groups
- self._upConvertedKerningData["groupRenameMaps"] = conversionMaps
-
- # support methods
-
- def readBytesFromPath(self, path):
- """
- Returns the bytes in the file at the given path.
- The path must be relative to the UFO's filesystem root.
- Returns None if the file does not exist.
- """
- try:
- return self.fs.readbytes(fsdecode(path))
- except fs.errors.ResourceNotFound:
- return None
-
- def getReadFileForPath(self, path, encoding=None):
- """
- Returns a file (or file-like) object for the file at the given path.
- The path must be relative to the UFO path.
- Returns None if the file does not exist.
- By default the file is opened in binary mode (reads bytes).
- If encoding is passed, the file is opened in text mode (reads str).
-
- Note: The caller is responsible for closing the open file.
- """
- path = fsdecode(path)
- try:
- if encoding is None:
- return self.fs.openbin(path)
- else:
- return self.fs.open(path, mode="r", encoding=encoding)
- except fs.errors.ResourceNotFound:
- return None
-
- # metainfo.plist
-
- def _readMetaInfo(self, validate=None):
- """
- Read metainfo.plist and return raw data. Only used for internal operations.
-
- ``validate`` will validate the read data, by default it is set
- to the class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- data = self._getPlist(METAINFO_FILENAME)
- if validate and not isinstance(data, dict):
- raise UFOLibError("metainfo.plist is not properly formatted.")
- try:
- formatVersionMajor = data["formatVersion"]
- except KeyError:
- raise UFOLibError(
- f"Missing required formatVersion in '{METAINFO_FILENAME}' on {self.fs}"
- )
- formatVersionMinor = data.setdefault("formatVersionMinor", 0)
-
- try:
- formatVersion = UFOFormatVersion((formatVersionMajor, formatVersionMinor))
- except ValueError as e:
- unsupportedMsg = (
- f"Unsupported UFO format ({formatVersionMajor}.{formatVersionMinor}) "
- f"in '{METAINFO_FILENAME}' on {self.fs}"
- )
- if validate:
- from fontTools.ufoLib.errors import UnsupportedUFOFormat
-
- raise UnsupportedUFOFormat(unsupportedMsg) from e
-
- formatVersion = UFOFormatVersion.default()
- logger.warning(
- "%s. Assuming the latest supported version (%s). "
- "Some data may be skipped or parsed incorrectly",
- unsupportedMsg,
- formatVersion,
- )
- data["formatVersionTuple"] = formatVersion
- return data
-
- def readMetaInfo(self, validate=None):
- """
- Read metainfo.plist and set formatVersion. Only used for internal operations.
-
- ``validate`` will validate the read data, by default it is set
- to the class's validate value, can be overridden.
- """
- data = self._readMetaInfo(validate=validate)
- self._formatVersion = data["formatVersionTuple"]
-
- # groups.plist
-
- def _readGroups(self):
- groups = self._getPlist(GROUPS_FILENAME, {})
- # remove any duplicate glyphs in a kerning group
- for groupName, glyphList in groups.items():
- if groupName.startswith(("public.kern1.", "public.kern2.")):
- groups[groupName] = list(OrderedDict.fromkeys(glyphList))
- return groups
-
- def readGroups(self, validate=None):
- """
- Read groups.plist. Returns a dict.
- ``validate`` will validate the read data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- # handle up conversion
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- self._upConvertKerning(validate)
- groups = self._upConvertedKerningData["groups"]
- # normal
- else:
- groups = self._readGroups()
- if validate:
- valid, message = groupsValidator(groups)
- if not valid:
- raise UFOLibError(message)
- return groups
-
- def getKerningGroupConversionRenameMaps(self, validate=None):
- """
- Get maps defining the renaming that was done during any
- needed kerning group conversion. This method returns a
- dictionary of this form::
-
- {
- "side1" : {"old group name" : "new group name"},
- "side2" : {"old group name" : "new group name"}
- }
-
- When no conversion has been performed, the side1 and side2
- dictionaries will be empty.
-
- ``validate`` will validate the groups, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- if self._formatVersion >= UFOFormatVersion.FORMAT_3_0:
- return dict(side1={}, side2={})
- # use the public group reader to force the load and
- # conversion of the data if it hasn't happened yet.
- self.readGroups(validate=validate)
- return self._upConvertedKerningData["groupRenameMaps"]
-
- # fontinfo.plist
-
- def _readInfo(self, validate):
- data = self._getPlist(FONTINFO_FILENAME, {})
- if validate and not isinstance(data, dict):
- raise UFOLibError("fontinfo.plist is not properly formatted.")
- return data
-
- def readInfo(self, info, validate=None):
- """
- Read fontinfo.plist. It requires an object that allows
- setting attributes with names that follow the fontinfo.plist
- version 3 specification. This will write the attributes
- defined in the file into the object.
-
- ``validate`` will validate the read data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- infoDict = self._readInfo(validate)
- infoDataToSet = {}
- # version 1
- if self._formatVersion == UFOFormatVersion.FORMAT_1_0:
- for attr in fontInfoAttributesVersion1:
- value = infoDict.get(attr)
- if value is not None:
- infoDataToSet[attr] = value
- infoDataToSet = _convertFontInfoDataVersion1ToVersion2(infoDataToSet)
- infoDataToSet = _convertFontInfoDataVersion2ToVersion3(infoDataToSet)
- # version 2
- elif self._formatVersion == UFOFormatVersion.FORMAT_2_0:
- for attr, dataValidationDict in list(
- fontInfoAttributesVersion2ValueData.items()
- ):
- value = infoDict.get(attr)
- if value is None:
- continue
- infoDataToSet[attr] = value
- infoDataToSet = _convertFontInfoDataVersion2ToVersion3(infoDataToSet)
- # version 3.x
- elif self._formatVersion.major == UFOFormatVersion.FORMAT_3_0.major:
- for attr, dataValidationDict in list(
- fontInfoAttributesVersion3ValueData.items()
- ):
- value = infoDict.get(attr)
- if value is None:
- continue
- infoDataToSet[attr] = value
- # unsupported version
- else:
- raise NotImplementedError(self._formatVersion)
- # validate data
- if validate:
- infoDataToSet = validateInfoVersion3Data(infoDataToSet)
- # populate the object
- for attr, value in list(infoDataToSet.items()):
- try:
- setattr(info, attr, value)
- except AttributeError:
- raise UFOLibError(
- "The supplied info object does not support setting a necessary attribute (%s)."
- % attr
- )
-
- # kerning.plist
-
- def _readKerning(self):
- data = self._getPlist(KERNING_FILENAME, {})
- return data
-
- def readKerning(self, validate=None):
- """
- Read kerning.plist. Returns a dict.
-
- ``validate`` will validate the kerning data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- # handle up conversion
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- self._upConvertKerning(validate)
- kerningNested = self._upConvertedKerningData["kerning"]
- # normal
- else:
- kerningNested = self._readKerning()
- if validate:
- valid, message = kerningValidator(kerningNested)
- if not valid:
- raise UFOLibError(message)
- # flatten
- kerning = {}
- for left in kerningNested:
- for right in kerningNested[left]:
- value = kerningNested[left][right]
- kerning[left, right] = value
- return kerning
-
- # lib.plist
-
- def readLib(self, validate=None):
- """
- Read lib.plist. Returns a dict.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- data = self._getPlist(LIB_FILENAME, {})
- if validate:
- valid, message = fontLibValidator(data)
- if not valid:
- raise UFOLibError(message)
- return data
-
- # features.fea
-
- def readFeatures(self):
- """
- Read features.fea. Return a string.
- The returned string is empty if the file is missing.
- """
- try:
- with self.fs.open(FEATURES_FILENAME, "r", encoding="utf-8") as f:
- return f.read()
- except fs.errors.ResourceNotFound:
- return ""
-
- # glyph sets & layers
-
- def _readLayerContents(self, validate):
- """
- Rebuild the layer contents list by checking what glyphsets
- are available on disk.
-
- ``validate`` will validate the layer contents.
- """
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- return [(DEFAULT_LAYER_NAME, DEFAULT_GLYPHS_DIRNAME)]
- contents = self._getPlist(LAYERCONTENTS_FILENAME)
- if validate:
- valid, error = layerContentsValidator(contents, self.fs)
- if not valid:
- raise UFOLibError(error)
- return contents
-
- def getLayerNames(self, validate=None):
- """
- Get the ordered layer names from layercontents.plist.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- layerContents = self._readLayerContents(validate)
- layerNames = [layerName for layerName, directoryName in layerContents]
- return layerNames
-
- def getDefaultLayerName(self, validate=None):
- """
- Get the default layer name from layercontents.plist.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- layerContents = self._readLayerContents(validate)
- for layerName, layerDirectory in layerContents:
- if layerDirectory == DEFAULT_GLYPHS_DIRNAME:
- return layerName
- # this will already have been raised during __init__
- raise UFOLibError("The default layer is not defined in layercontents.plist.")
-
- def getGlyphSet(self, layerName=None, validateRead=None, validateWrite=None):
- """
- Return the GlyphSet associated with the
- glyphs directory mapped to layerName
- in the UFO. If layerName is not provided,
- the name retrieved with getDefaultLayerName
- will be used.
-
- ``validateRead`` will validate the read data, by default it is set to the
- class's validate value, can be overridden.
- ``validateWrite`` will validate the written data, by default it is set to the
- class's validate value, can be overridden.
- """
- from fontTools.ufoLib.glifLib import GlyphSet
-
- if validateRead is None:
- validateRead = self._validate
- if validateWrite is None:
- validateWrite = self._validate
- if layerName is None:
- layerName = self.getDefaultLayerName(validate=validateRead)
- directory = None
- layerContents = self._readLayerContents(validateRead)
- for storedLayerName, storedLayerDirectory in layerContents:
- if layerName == storedLayerName:
- directory = storedLayerDirectory
- break
- if directory is None:
- raise UFOLibError('No glyphs directory is mapped to "%s".' % layerName)
- try:
- glyphSubFS = self.fs.opendir(directory)
- except fs.errors.ResourceNotFound:
- raise UFOLibError(f"No '{directory}' directory for layer '{layerName}'")
- return GlyphSet(
- glyphSubFS,
- ufoFormatVersion=self._formatVersion,
- validateRead=validateRead,
- validateWrite=validateWrite,
- expectContentsFile=True,
- )
-
- def getCharacterMapping(self, layerName=None, validate=None):
- """
- Return a dictionary that maps unicode values (ints) to
- lists of glyph names.
- """
- if validate is None:
- validate = self._validate
- glyphSet = self.getGlyphSet(
- layerName, validateRead=validate, validateWrite=True
- )
- allUnicodes = glyphSet.getUnicodes()
- cmap = {}
- for glyphName, unicodes in allUnicodes.items():
- for code in unicodes:
- if code in cmap:
- cmap[code].append(glyphName)
- else:
- cmap[code] = [glyphName]
- return cmap
-
- # /data
-
- def getDataDirectoryListing(self):
- """
- Returns a list of all files in the data directory.
- The returned paths will be relative to the UFO.
- This will not list directory names, only file names.
- Thus, empty directories will be skipped.
- """
- try:
- self._dataFS = self.fs.opendir(DATA_DIRNAME)
- except fs.errors.ResourceNotFound:
- return []
- except fs.errors.DirectoryExpected:
- raise UFOLibError('The UFO contains a "data" file instead of a directory.')
- try:
- # fs Walker.files method returns "absolute" paths (in terms of the
- # root of the 'data' SubFS), so we strip the leading '/' to make
- # them relative
- return [p.lstrip("/") for p in self._dataFS.walk.files()]
- except fs.errors.ResourceError:
- return []
-
- def getImageDirectoryListing(self, validate=None):
- """
- Returns a list of all image file names in
- the images directory. Each of the images will
- have been verified to have the PNG signature.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- return []
- if validate is None:
- validate = self._validate
- try:
- self._imagesFS = imagesFS = self.fs.opendir(IMAGES_DIRNAME)
- except fs.errors.ResourceNotFound:
- return []
- except fs.errors.DirectoryExpected:
- raise UFOLibError(
- 'The UFO contains an "images" file instead of a directory.'
- )
- result = []
- for path in imagesFS.scandir("/"):
- if path.is_dir:
- # silently skip this as version control
- # systems often have hidden directories
- continue
- if validate:
- with imagesFS.openbin(path.name) as fp:
- valid, error = pngValidator(fileObj=fp)
- if valid:
- result.append(path.name)
- else:
- result.append(path.name)
- return result
-
- def readData(self, fileName):
- """
- Return bytes for the file named 'fileName' inside the 'data/' directory.
- """
- fileName = fsdecode(fileName)
- try:
- try:
- dataFS = self._dataFS
- except AttributeError:
- # in case readData is called before getDataDirectoryListing
- dataFS = self.fs.opendir(DATA_DIRNAME)
- data = dataFS.readbytes(fileName)
- except fs.errors.ResourceNotFound:
- raise UFOLibError(f"No data file named '{fileName}' on {self.fs}")
- return data
-
- def readImage(self, fileName, validate=None):
- """
- Return image data for the file named fileName.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- raise UFOLibError(
- f"Reading images is not allowed in UFO {self._formatVersion.major}."
- )
- fileName = fsdecode(fileName)
- try:
- try:
- imagesFS = self._imagesFS
- except AttributeError:
- # in case readImage is called before getImageDirectoryListing
- imagesFS = self.fs.opendir(IMAGES_DIRNAME)
- data = imagesFS.readbytes(fileName)
- except fs.errors.ResourceNotFound:
- raise UFOLibError(f"No image file named '{fileName}' on {self.fs}")
- if validate:
- valid, error = pngValidator(data=data)
- if not valid:
- raise UFOLibError(error)
- return data
-
- def close(self):
- if self._shouldClose:
- self.fs.close()
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, exc_tb):
- self.close()
-
-
-# ----------
-# UFO Writer
-# ----------
-
-
-class UFOWriter(UFOReader):
-
- """
- Write the various components of the .ufo.
-
- By default, the written data will be validated before writing. Set ``validate`` to
- ``False`` if you do not want to validate the data. Validation can also be overriden
- on a per method level if desired.
-
- The ``formatVersion`` argument allows to specify the UFO format version as a tuple
- of integers (major, minor), or as a single integer for the major digit only (minor
- is implied as 0). By default the latest formatVersion will be used; currently it's
- 3.0, which is equivalent to formatVersion=(3, 0).
-
- An UnsupportedUFOFormat exception is raised if the requested UFO formatVersion is
- not supported.
- """
-
- def __init__(
- self,
- path,
- formatVersion=None,
- fileCreator="com.github.fonttools.ufoLib",
- structure=None,
- validate=True,
- ):
- try:
- formatVersion = UFOFormatVersion(formatVersion)
- except ValueError as e:
- from fontTools.ufoLib.errors import UnsupportedUFOFormat
-
- raise UnsupportedUFOFormat(
- f"Unsupported UFO format: {formatVersion!r}"
- ) from e
-
- if hasattr(path, "__fspath__"): # support os.PathLike objects
- path = path.__fspath__()
-
- if isinstance(path, str):
- # normalize path by removing trailing or double slashes
- path = os.path.normpath(path)
- havePreviousFile = os.path.exists(path)
- if havePreviousFile:
- # ensure we use the same structure as the destination
- existingStructure = _sniffFileStructure(path)
- if structure is not None:
- try:
- structure = UFOFileStructure(structure)
- except ValueError:
- raise UFOLibError(
- "Invalid or unsupported structure: '%s'" % structure
- )
- if structure is not existingStructure:
- raise UFOLibError(
- "A UFO with a different structure (%s) already exists "
- "at the given path: '%s'" % (existingStructure, path)
- )
- else:
- structure = existingStructure
- else:
- # if not exists, default to 'package' structure
- if structure is None:
- structure = UFOFileStructure.PACKAGE
- dirName = os.path.dirname(path)
- if dirName and not os.path.isdir(dirName):
- raise UFOLibError(
- "Cannot write to '%s': directory does not exist" % path
- )
- if structure is UFOFileStructure.ZIP:
- if havePreviousFile:
- # we can't write a zip in-place, so we have to copy its
- # contents to a temporary location and work from there, then
- # upon closing UFOWriter we create the final zip file
- parentFS = fs.tempfs.TempFS()
- with fs.zipfs.ZipFS(path, encoding="utf-8") as origFS:
- fs.copy.copy_fs(origFS, parentFS)
- # if output path is an existing zip, we require that it contains
- # one, and only one, root directory (with arbitrary name), in turn
- # containing all the existing UFO contents
- rootDirs = [
- p.name
- for p in parentFS.scandir("/")
- # exclude macOS metadata contained in zip file
- if p.is_dir and p.name != "__MACOSX"
- ]
- if len(rootDirs) != 1:
- raise UFOLibError(
- "Expected exactly 1 root directory, found %d"
- % len(rootDirs)
- )
- else:
- # 'ClosingSubFS' ensures that the parent filesystem is closed
- # when its root subdirectory is closed
- self.fs = parentFS.opendir(
- rootDirs[0], factory=fs.subfs.ClosingSubFS
- )
- else:
- # if the output zip file didn't exist, we create the root folder;
- # we name it the same as input 'path', but with '.ufo' extension
- rootDir = os.path.splitext(os.path.basename(path))[0] + ".ufo"
- parentFS = fs.zipfs.ZipFS(path, write=True, encoding="utf-8")
- parentFS.makedir(rootDir)
- self.fs = parentFS.opendir(rootDir, factory=fs.subfs.ClosingSubFS)
- else:
- self.fs = fs.osfs.OSFS(path, create=True)
- self._fileStructure = structure
- self._havePreviousFile = havePreviousFile
- self._shouldClose = True
- elif isinstance(path, fs.base.FS):
- filesystem = path
- try:
- filesystem.check()
- except fs.errors.FilesystemClosed:
- raise UFOLibError("the filesystem '%s' is closed" % path)
- else:
- self.fs = filesystem
- try:
- path = filesystem.getsyspath("/")
- except fs.errors.NoSysPath:
- # network or in-memory FS may not map to the local one
- path = str(filesystem)
- # if passed an FS object, always use 'package' structure
- if structure and structure is not UFOFileStructure.PACKAGE:
- import warnings
-
- warnings.warn(
- "The 'structure' argument is not used when input is an FS object",
- UserWarning,
- stacklevel=2,
- )
- self._fileStructure = UFOFileStructure.PACKAGE
- # if FS contains a "metainfo.plist", we consider it non-empty
- self._havePreviousFile = filesystem.exists(METAINFO_FILENAME)
- # the user is responsible for closing the FS object
- self._shouldClose = False
- else:
- raise TypeError(
- "Expected a path string or fs object, found %s" % type(path).__name__
- )
-
- # establish some basic stuff
- self._path = fsdecode(path)
- self._formatVersion = formatVersion
- self._fileCreator = fileCreator
- self._downConversionKerningData = None
- self._validate = validate
- # if the file already exists, get the format version.
- # this will be needed for up and down conversion.
- previousFormatVersion = None
- if self._havePreviousFile:
- metaInfo = self._readMetaInfo(validate=validate)
- previousFormatVersion = metaInfo["formatVersionTuple"]
- # catch down conversion
- if previousFormatVersion > formatVersion:
- from fontTools.ufoLib.errors import UnsupportedUFOFormat
-
- raise UnsupportedUFOFormat(
- "The UFO located at this path is a higher version "
- f"({previousFormatVersion}) than the version ({formatVersion}) "
- "that is trying to be written. This is not supported."
- )
- # handle the layer contents
- self.layerContents = {}
- if previousFormatVersion is not None and previousFormatVersion.major >= 3:
- # already exists
- self.layerContents = OrderedDict(self._readLayerContents(validate))
- else:
- # previous < 3
- # imply the layer contents
- if self.fs.exists(DEFAULT_GLYPHS_DIRNAME):
- self.layerContents = {DEFAULT_LAYER_NAME: DEFAULT_GLYPHS_DIRNAME}
- # write the new metainfo
- self._writeMetaInfo()
-
- # properties
-
- def _get_fileCreator(self):
- return self._fileCreator
-
- fileCreator = property(
- _get_fileCreator,
- doc="The file creator of the UFO. This is set into metainfo.plist during __init__.",
- )
-
- # support methods for file system interaction
-
- def copyFromReader(self, reader, sourcePath, destPath):
- """
- Copy the sourcePath in the provided UFOReader to destPath
- in this writer. The paths must be relative. This works with
- both individual files and directories.
- """
- if not isinstance(reader, UFOReader):
- raise UFOLibError("The reader must be an instance of UFOReader.")
- sourcePath = fsdecode(sourcePath)
- destPath = fsdecode(destPath)
- if not reader.fs.exists(sourcePath):
- raise UFOLibError(
- 'The reader does not have data located at "%s".' % sourcePath
- )
- if self.fs.exists(destPath):
- raise UFOLibError('A file named "%s" already exists.' % destPath)
- # create the destination directory if it doesn't exist
- self.fs.makedirs(fs.path.dirname(destPath), recreate=True)
- if reader.fs.isdir(sourcePath):
- fs.copy.copy_dir(reader.fs, sourcePath, self.fs, destPath)
- else:
- fs.copy.copy_file(reader.fs, sourcePath, self.fs, destPath)
-
- def writeBytesToPath(self, path, data):
- """
- Write bytes to a path relative to the UFO filesystem's root.
- If writing to an existing UFO, check to see if data matches the data
- that is already in the file at path; if so, the file is not rewritten
- so that the modification date is preserved.
- If needed, the directory tree for the given path will be built.
- """
- path = fsdecode(path)
- if self._havePreviousFile:
- if self.fs.isfile(path) and data == self.fs.readbytes(path):
- return
- try:
- self.fs.writebytes(path, data)
- except fs.errors.FileExpected:
- raise UFOLibError("A directory exists at '%s'" % path)
- except fs.errors.ResourceNotFound:
- self.fs.makedirs(fs.path.dirname(path), recreate=True)
- self.fs.writebytes(path, data)
-
- def getFileObjectForPath(self, path, mode="w", encoding=None):
- """
- Returns a file (or file-like) object for the
- file at the given path. The path must be relative
- to the UFO path. Returns None if the file does
- not exist and the mode is "r" or "rb.
- An encoding may be passed if the file is opened in text mode.
-
- Note: The caller is responsible for closing the open file.
- """
- path = fsdecode(path)
- try:
- return self.fs.open(path, mode=mode, encoding=encoding)
- except fs.errors.ResourceNotFound as e:
- m = mode[0]
- if m == "r":
- # XXX I think we should just let it raise. The docstring,
- # however, says that this returns None if mode is 'r'
- return None
- elif m == "w" or m == "a" or m == "x":
- self.fs.makedirs(fs.path.dirname(path), recreate=True)
- return self.fs.open(path, mode=mode, encoding=encoding)
- except fs.errors.ResourceError as e:
- return UFOLibError(f"unable to open '{path}' on {self.fs}: {e}")
-
- def removePath(self, path, force=False, removeEmptyParents=True):
- """
- Remove the file (or directory) at path. The path
- must be relative to the UFO.
- Raises UFOLibError if the path doesn't exist.
- If force=True, ignore non-existent paths.
- If the directory where 'path' is located becomes empty, it will
- be automatically removed, unless 'removeEmptyParents' is False.
- """
- path = fsdecode(path)
- try:
- self.fs.remove(path)
- except fs.errors.FileExpected:
- self.fs.removetree(path)
- except fs.errors.ResourceNotFound:
- if not force:
- raise UFOLibError(f"'{path}' does not exist on {self.fs}")
- if removeEmptyParents:
- parent = fs.path.dirname(path)
- if parent:
- fs.tools.remove_empty(self.fs, parent)
-
- # alias kept for backward compatibility with old API
- removeFileForPath = removePath
-
- # UFO mod time
-
- def setModificationTime(self):
- """
- Set the UFO modification time to the current time.
- This is never called automatically. It is up to the
- caller to call this when finished working on the UFO.
- """
- path = self._path
- if path is not None and os.path.exists(path):
- try:
- # this may fail on some filesystems (e.g. SMB servers)
- os.utime(path, None)
- except OSError as e:
- logger.warning("Failed to set modified time: %s", e)
-
- # metainfo.plist
-
- def _writeMetaInfo(self):
- metaInfo = dict(
- creator=self._fileCreator,
- formatVersion=self._formatVersion.major,
- )
- if self._formatVersion.minor != 0:
- metaInfo["formatVersionMinor"] = self._formatVersion.minor
- self._writePlist(METAINFO_FILENAME, metaInfo)
-
- # groups.plist
-
- def setKerningGroupConversionRenameMaps(self, maps):
- """
- Set maps defining the renaming that should be done
- when writing groups and kerning in UFO 1 and UFO 2.
- This will effectively undo the conversion done when
- UFOReader reads this data. The dictionary should have
- this form::
-
- {
- "side1" : {"group name to use when writing" : "group name in data"},
- "side2" : {"group name to use when writing" : "group name in data"}
- }
-
- This is the same form returned by UFOReader's
- getKerningGroupConversionRenameMaps method.
- """
- if self._formatVersion >= UFOFormatVersion.FORMAT_3_0:
- return # XXX raise an error here
- # flip the dictionaries
- remap = {}
- for side in ("side1", "side2"):
- for writeName, dataName in list(maps[side].items()):
- remap[dataName] = writeName
- self._downConversionKerningData = dict(groupRenameMap=remap)
-
- def writeGroups(self, groups, validate=None):
- """
- Write groups.plist. This method requires a
- dict of glyph groups as an argument.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- # validate the data structure
- if validate:
- valid, message = groupsValidator(groups)
- if not valid:
- raise UFOLibError(message)
- # down convert
- if (
- self._formatVersion < UFOFormatVersion.FORMAT_3_0
- and self._downConversionKerningData is not None
- ):
- remap = self._downConversionKerningData["groupRenameMap"]
- remappedGroups = {}
- # there are some edge cases here that are ignored:
- # 1. if a group is being renamed to a name that
- # already exists, the existing group is always
- # overwritten. (this is why there are two loops
- # below.) there doesn't seem to be a logical
- # solution to groups mismatching and overwriting
- # with the specifiecd group seems like a better
- # solution than throwing an error.
- # 2. if side 1 and side 2 groups are being renamed
- # to the same group name there is no check to
- # ensure that the contents are identical. that
- # is left up to the caller.
- for name, contents in list(groups.items()):
- if name in remap:
- continue
- remappedGroups[name] = contents
- for name, contents in list(groups.items()):
- if name not in remap:
- continue
- name = remap[name]
- remappedGroups[name] = contents
- groups = remappedGroups
- # pack and write
- groupsNew = {}
- for key, value in groups.items():
- groupsNew[key] = list(value)
- if groupsNew:
- self._writePlist(GROUPS_FILENAME, groupsNew)
- elif self._havePreviousFile:
- self.removePath(GROUPS_FILENAME, force=True, removeEmptyParents=False)
-
- # fontinfo.plist
-
- def writeInfo(self, info, validate=None):
- """
- Write info.plist. This method requires an object
- that supports getting attributes that follow the
- fontinfo.plist version 2 specification. Attributes
- will be taken from the given object and written
- into the file.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- # gather version 3 data
- infoData = {}
- for attr in list(fontInfoAttributesVersion3ValueData.keys()):
- if hasattr(info, attr):
- try:
- value = getattr(info, attr)
- except AttributeError:
- raise UFOLibError(
- "The supplied info object does not support getting a necessary attribute (%s)."
- % attr
- )
- if value is None:
- continue
- infoData[attr] = value
- # down convert data if necessary and validate
- if self._formatVersion == UFOFormatVersion.FORMAT_3_0:
- if validate:
- infoData = validateInfoVersion3Data(infoData)
- elif self._formatVersion == UFOFormatVersion.FORMAT_2_0:
- infoData = _convertFontInfoDataVersion3ToVersion2(infoData)
- if validate:
- infoData = validateInfoVersion2Data(infoData)
- elif self._formatVersion == UFOFormatVersion.FORMAT_1_0:
- infoData = _convertFontInfoDataVersion3ToVersion2(infoData)
- if validate:
- infoData = validateInfoVersion2Data(infoData)
- infoData = _convertFontInfoDataVersion2ToVersion1(infoData)
- # write file if there is anything to write
- if infoData:
- self._writePlist(FONTINFO_FILENAME, infoData)
-
- # kerning.plist
-
- def writeKerning(self, kerning, validate=None):
- """
- Write kerning.plist. This method requires a
- dict of kerning pairs as an argument.
-
- This performs basic structural validation of the kerning,
- but it does not check for compliance with the spec in
- regards to conflicting pairs. The assumption is that the
- kerning data being passed is standards compliant.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- # validate the data structure
- if validate:
- invalidFormatMessage = "The kerning is not properly formatted."
- if not isDictEnough(kerning):
- raise UFOLibError(invalidFormatMessage)
- for pair, value in list(kerning.items()):
- if not isinstance(pair, (list, tuple)):
- raise UFOLibError(invalidFormatMessage)
- if not len(pair) == 2:
- raise UFOLibError(invalidFormatMessage)
- if not isinstance(pair[0], str):
- raise UFOLibError(invalidFormatMessage)
- if not isinstance(pair[1], str):
- raise UFOLibError(invalidFormatMessage)
- if not isinstance(value, numberTypes):
- raise UFOLibError(invalidFormatMessage)
- # down convert
- if (
- self._formatVersion < UFOFormatVersion.FORMAT_3_0
- and self._downConversionKerningData is not None
- ):
- remap = self._downConversionKerningData["groupRenameMap"]
- remappedKerning = {}
- for (side1, side2), value in list(kerning.items()):
- side1 = remap.get(side1, side1)
- side2 = remap.get(side2, side2)
- remappedKerning[side1, side2] = value
- kerning = remappedKerning
- # pack and write
- kerningDict = {}
- for left, right in kerning.keys():
- value = kerning[left, right]
- if left not in kerningDict:
- kerningDict[left] = {}
- kerningDict[left][right] = value
- if kerningDict:
- self._writePlist(KERNING_FILENAME, kerningDict)
- elif self._havePreviousFile:
- self.removePath(KERNING_FILENAME, force=True, removeEmptyParents=False)
-
- # lib.plist
-
- def writeLib(self, libDict, validate=None):
- """
- Write lib.plist. This method requires a
- lib dict as an argument.
-
- ``validate`` will validate the data, by default it is set to the
- class's validate value, can be overridden.
- """
- if validate is None:
- validate = self._validate
- if validate:
- valid, message = fontLibValidator(libDict)
- if not valid:
- raise UFOLibError(message)
- if libDict:
- self._writePlist(LIB_FILENAME, libDict)
- elif self._havePreviousFile:
- self.removePath(LIB_FILENAME, force=True, removeEmptyParents=False)
-
- # features.fea
-
- def writeFeatures(self, features, validate=None):
- """
- Write features.fea. This method requires a
- features string as an argument.
- """
- if validate is None:
- validate = self._validate
- if self._formatVersion == UFOFormatVersion.FORMAT_1_0:
- raise UFOLibError("features.fea is not allowed in UFO Format Version 1.")
- if validate:
- if not isinstance(features, str):
- raise UFOLibError("The features are not text.")
- if features:
- self.writeBytesToPath(FEATURES_FILENAME, features.encode("utf8"))
- elif self._havePreviousFile:
- self.removePath(FEATURES_FILENAME, force=True, removeEmptyParents=False)
-
- # glyph sets & layers
-
- def writeLayerContents(self, layerOrder=None, validate=None):
- """
- Write the layercontents.plist file. This method *must* be called
- after all glyph sets have been written.
- """
- if validate is None:
- validate = self._validate
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- return
- if layerOrder is not None:
- newOrder = []
- for layerName in layerOrder:
- if layerName is None:
- layerName = DEFAULT_LAYER_NAME
- newOrder.append(layerName)
- layerOrder = newOrder
- else:
- layerOrder = list(self.layerContents.keys())
- if validate and set(layerOrder) != set(self.layerContents.keys()):
- raise UFOLibError(
- "The layer order content does not match the glyph sets that have been created."
- )
- layerContents = [
- (layerName, self.layerContents[layerName]) for layerName in layerOrder
- ]
- self._writePlist(LAYERCONTENTS_FILENAME, layerContents)
-
- def _findDirectoryForLayerName(self, layerName):
- foundDirectory = None
- for existingLayerName, directoryName in list(self.layerContents.items()):
- if layerName is None and directoryName == DEFAULT_GLYPHS_DIRNAME:
- foundDirectory = directoryName
- break
- elif existingLayerName == layerName:
- foundDirectory = directoryName
- break
- if not foundDirectory:
- raise UFOLibError(
- "Could not locate a glyph set directory for the layer named %s."
- % layerName
- )
- return foundDirectory
-
- def getGlyphSet(
- self,
- layerName=None,
- defaultLayer=True,
- glyphNameToFileNameFunc=None,
- validateRead=None,
- validateWrite=None,
- expectContentsFile=False,
- ):
- """
- Return the GlyphSet object associated with the
- appropriate glyph directory in the .ufo.
- If layerName is None, the default glyph set
- will be used. The defaultLayer flag indictes
- that the layer should be saved into the default
- glyphs directory.
-
- ``validateRead`` will validate the read data, by default it is set to the
- class's validate value, can be overridden.
- ``validateWrte`` will validate the written data, by default it is set to the
- class's validate value, can be overridden.
- ``expectContentsFile`` will raise a GlifLibError if a contents.plist file is
- not found on the glyph set file system. This should be set to ``True`` if you
- are reading an existing UFO and ``False`` if you use ``getGlyphSet`` to create
- a fresh glyph set.
- """
- if validateRead is None:
- validateRead = self._validate
- if validateWrite is None:
- validateWrite = self._validate
- # only default can be written in < 3
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0 and (
- not defaultLayer or layerName is not None
- ):
- raise UFOLibError(
- f"Only the default layer can be writen in UFO {self._formatVersion.major}."
- )
- # locate a layer name when None has been given
- if layerName is None and defaultLayer:
- for existingLayerName, directory in self.layerContents.items():
- if directory == DEFAULT_GLYPHS_DIRNAME:
- layerName = existingLayerName
- if layerName is None:
- layerName = DEFAULT_LAYER_NAME
- elif layerName is None and not defaultLayer:
- raise UFOLibError("A layer name must be provided for non-default layers.")
- # move along to format specific writing
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- return self._getDefaultGlyphSet(
- validateRead,
- validateWrite,
- glyphNameToFileNameFunc=glyphNameToFileNameFunc,
- expectContentsFile=expectContentsFile,
- )
- elif self._formatVersion.major == UFOFormatVersion.FORMAT_3_0.major:
- return self._getGlyphSetFormatVersion3(
- validateRead,
- validateWrite,
- layerName=layerName,
- defaultLayer=defaultLayer,
- glyphNameToFileNameFunc=glyphNameToFileNameFunc,
- expectContentsFile=expectContentsFile,
- )
- else:
- raise NotImplementedError(self._formatVersion)
-
- def _getDefaultGlyphSet(
- self,
- validateRead,
- validateWrite,
- glyphNameToFileNameFunc=None,
- expectContentsFile=False,
- ):
- from fontTools.ufoLib.glifLib import GlyphSet
-
- glyphSubFS = self.fs.makedir(DEFAULT_GLYPHS_DIRNAME, recreate=True)
- return GlyphSet(
- glyphSubFS,
- glyphNameToFileNameFunc=glyphNameToFileNameFunc,
- ufoFormatVersion=self._formatVersion,
- validateRead=validateRead,
- validateWrite=validateWrite,
- expectContentsFile=expectContentsFile,
- )
-
- def _getGlyphSetFormatVersion3(
- self,
- validateRead,
- validateWrite,
- layerName=None,
- defaultLayer=True,
- glyphNameToFileNameFunc=None,
- expectContentsFile=False,
- ):
- from fontTools.ufoLib.glifLib import GlyphSet
-
- # if the default flag is on, make sure that the default in the file
- # matches the default being written. also make sure that this layer
- # name is not already linked to a non-default layer.
- if defaultLayer:
- for existingLayerName, directory in self.layerContents.items():
- if directory == DEFAULT_GLYPHS_DIRNAME:
- if existingLayerName != layerName:
- raise UFOLibError(
- "Another layer ('%s') is already mapped to the default directory."
- % existingLayerName
- )
- elif existingLayerName == layerName:
- raise UFOLibError(
- "The layer name is already mapped to a non-default layer."
- )
- # get an existing directory name
- if layerName in self.layerContents:
- directory = self.layerContents[layerName]
- # get a new directory name
- else:
- if defaultLayer:
- directory = DEFAULT_GLYPHS_DIRNAME
- else:
- # not caching this could be slightly expensive,
- # but caching it will be cumbersome
- existing = {d.lower() for d in self.layerContents.values()}
- directory = userNameToFileName(
- layerName, existing=existing, prefix="glyphs."
- )
- # make the directory
- glyphSubFS = self.fs.makedir(directory, recreate=True)
- # store the mapping
- self.layerContents[layerName] = directory
- # load the glyph set
- return GlyphSet(
- glyphSubFS,
- glyphNameToFileNameFunc=glyphNameToFileNameFunc,
- ufoFormatVersion=self._formatVersion,
- validateRead=validateRead,
- validateWrite=validateWrite,
- expectContentsFile=expectContentsFile,
- )
-
- def renameGlyphSet(self, layerName, newLayerName, defaultLayer=False):
- """
- Rename a glyph set.
-
- Note: if a GlyphSet object has already been retrieved for
- layerName, it is up to the caller to inform that object that
- the directory it represents has changed.
- """
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- # ignore renaming glyph sets for UFO1 UFO2
- # just write the data from the default layer
- return
- # the new and old names can be the same
- # as long as the default is being switched
- if layerName == newLayerName:
- # if the default is off and the layer is already not the default, skip
- if (
- self.layerContents[layerName] != DEFAULT_GLYPHS_DIRNAME
- and not defaultLayer
- ):
- return
- # if the default is on and the layer is already the default, skip
- if self.layerContents[layerName] == DEFAULT_GLYPHS_DIRNAME and defaultLayer:
- return
- else:
- # make sure the new layer name doesn't already exist
- if newLayerName is None:
- newLayerName = DEFAULT_LAYER_NAME
- if newLayerName in self.layerContents:
- raise UFOLibError("A layer named %s already exists." % newLayerName)
- # make sure the default layer doesn't already exist
- if defaultLayer and DEFAULT_GLYPHS_DIRNAME in self.layerContents.values():
- raise UFOLibError("A default layer already exists.")
- # get the paths
- oldDirectory = self._findDirectoryForLayerName(layerName)
- if defaultLayer:
- newDirectory = DEFAULT_GLYPHS_DIRNAME
- else:
- existing = {name.lower() for name in self.layerContents.values()}
- newDirectory = userNameToFileName(
- newLayerName, existing=existing, prefix="glyphs."
- )
- # update the internal mapping
- del self.layerContents[layerName]
- self.layerContents[newLayerName] = newDirectory
- # do the file system copy
- self.fs.movedir(oldDirectory, newDirectory, create=True)
-
- def deleteGlyphSet(self, layerName):
- """
- Remove the glyph set matching layerName.
- """
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- # ignore deleting glyph sets for UFO1 UFO2 as there are no layers
- # just write the data from the default layer
- return
- foundDirectory = self._findDirectoryForLayerName(layerName)
- self.removePath(foundDirectory, removeEmptyParents=False)
- del self.layerContents[layerName]
-
- def writeData(self, fileName, data):
- """
- Write data to fileName in the 'data' directory.
- The data must be a bytes string.
- """
- self.writeBytesToPath(f"{DATA_DIRNAME}/{fsdecode(fileName)}", data)
-
- def removeData(self, fileName):
- """
- Remove the file named fileName from the data directory.
- """
- self.removePath(f"{DATA_DIRNAME}/{fsdecode(fileName)}")
-
- # /images
-
- def writeImage(self, fileName, data, validate=None):
- """
- Write data to fileName in the images directory.
- The data must be a valid PNG.
- """
- if validate is None:
- validate = self._validate
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- raise UFOLibError(
- f"Images are not allowed in UFO {self._formatVersion.major}."
- )
- fileName = fsdecode(fileName)
- if validate:
- valid, error = pngValidator(data=data)
- if not valid:
- raise UFOLibError(error)
- self.writeBytesToPath(f"{IMAGES_DIRNAME}/{fileName}", data)
-
- def removeImage(self, fileName, validate=None): # XXX remove unused 'validate'?
- """
- Remove the file named fileName from the
- images directory.
- """
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- raise UFOLibError(
- f"Images are not allowed in UFO {self._formatVersion.major}."
- )
- self.removePath(f"{IMAGES_DIRNAME}/{fsdecode(fileName)}")
-
- def copyImageFromReader(self, reader, sourceFileName, destFileName, validate=None):
- """
- Copy the sourceFileName in the provided UFOReader to destFileName
- in this writer. This uses the most memory efficient method possible
- for copying the data possible.
- """
- if validate is None:
- validate = self._validate
- if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
- raise UFOLibError(
- f"Images are not allowed in UFO {self._formatVersion.major}."
- )
- sourcePath = f"{IMAGES_DIRNAME}/{fsdecode(sourceFileName)}"
- destPath = f"{IMAGES_DIRNAME}/{fsdecode(destFileName)}"
- self.copyFromReader(reader, sourcePath, destPath)
-
- def close(self):
- if self._havePreviousFile and self._fileStructure is UFOFileStructure.ZIP:
- # if we are updating an existing zip file, we can now compress the
- # contents of the temporary filesystem in the destination path
- rootDir = os.path.splitext(os.path.basename(self._path))[0] + ".ufo"
- with fs.zipfs.ZipFS(self._path, write=True, encoding="utf-8") as destFS:
- fs.copy.copy_fs(self.fs, destFS.makedir(rootDir))
- super().close()
-
-
-# just an alias, makes it more explicit
-UFOReaderWriter = UFOWriter
-
-
-# ----------------
-# Helper Functions
-# ----------------
-
-
-def _sniffFileStructure(ufo_path):
- """Return UFOFileStructure.ZIP if the UFO at path 'ufo_path' (str)
- is a zip file, else return UFOFileStructure.PACKAGE if 'ufo_path' is a
- directory.
- Raise UFOLibError if it is a file with unknown structure, or if the path
- does not exist.
- """
- if zipfile.is_zipfile(ufo_path):
- return UFOFileStructure.ZIP
- elif os.path.isdir(ufo_path):
- return UFOFileStructure.PACKAGE
- elif os.path.isfile(ufo_path):
- raise UFOLibError(
- "The specified UFO does not have a known structure: '%s'" % ufo_path
- )
- else:
- raise UFOLibError("No such file or directory: '%s'" % ufo_path)
-
-
-def makeUFOPath(path):
- """
- Return a .ufo pathname.
-
- >>> makeUFOPath("directory/something.ext") == (
- ... os.path.join('directory', 'something.ufo'))
- True
- >>> makeUFOPath("directory/something.another.thing.ext") == (
- ... os.path.join('directory', 'something.another.thing.ufo'))
- True
- """
- dir, name = os.path.split(path)
- name = ".".join([".".join(name.split(".")[:-1]), "ufo"])
- return os.path.join(dir, name)
-
-
-# ----------------------
-# fontinfo.plist Support
-# ----------------------
-
-# Version Validators
-
-# There is no version 1 validator and there shouldn't be.
-# The version 1 spec was very loose and there were numerous
-# cases of invalid values.
-
-
-def validateFontInfoVersion2ValueForAttribute(attr, value):
- """
- This performs very basic validation of the value for attribute
- following the UFO 2 fontinfo.plist specification. The results
- of this should not be interpretted as *correct* for the font
- that they are part of. This merely indicates that the value
- is of the proper type and, where the specification defines
- a set range of possible values for an attribute, that the
- value is in the accepted range.
- """
- dataValidationDict = fontInfoAttributesVersion2ValueData[attr]
- valueType = dataValidationDict.get("type")
- validator = dataValidationDict.get("valueValidator")
- valueOptions = dataValidationDict.get("valueOptions")
- # have specific options for the validator
- if valueOptions is not None:
- isValidValue = validator(value, valueOptions)
- # no specific options
- else:
- if validator == genericTypeValidator:
- isValidValue = validator(value, valueType)
- else:
- isValidValue = validator(value)
- return isValidValue
-
-
-def validateInfoVersion2Data(infoData):
- """
- This performs very basic validation of the value for infoData
- following the UFO 2 fontinfo.plist specification. The results
- of this should not be interpretted as *correct* for the font
- that they are part of. This merely indicates that the values
- are of the proper type and, where the specification defines
- a set range of possible values for an attribute, that the
- value is in the accepted range.
- """
- validInfoData = {}
- for attr, value in list(infoData.items()):
- isValidValue = validateFontInfoVersion2ValueForAttribute(attr, value)
- if not isValidValue:
- raise UFOLibError(f"Invalid value for attribute {attr} ({value!r}).")
- else:
- validInfoData[attr] = value
- return validInfoData
-
-
-def validateFontInfoVersion3ValueForAttribute(attr, value):
- """
- This performs very basic validation of the value for attribute
- following the UFO 3 fontinfo.plist specification. The results
- of this should not be interpretted as *correct* for the font
- that they are part of. This merely indicates that the value
- is of the proper type and, where the specification defines
- a set range of possible values for an attribute, that the
- value is in the accepted range.
- """
- dataValidationDict = fontInfoAttributesVersion3ValueData[attr]
- valueType = dataValidationDict.get("type")
- validator = dataValidationDict.get("valueValidator")
- valueOptions = dataValidationDict.get("valueOptions")
- # have specific options for the validator
- if valueOptions is not None:
- isValidValue = validator(value, valueOptions)
- # no specific options
- else:
- if validator == genericTypeValidator:
- isValidValue = validator(value, valueType)
- else:
- isValidValue = validator(value)
- return isValidValue
-
-
-def validateInfoVersion3Data(infoData):
- """
- This performs very basic validation of the value for infoData
- following the UFO 3 fontinfo.plist specification. The results
- of this should not be interpretted as *correct* for the font
- that they are part of. This merely indicates that the values
- are of the proper type and, where the specification defines
- a set range of possible values for an attribute, that the
- value is in the accepted range.
- """
- validInfoData = {}
- for attr, value in list(infoData.items()):
- isValidValue = validateFontInfoVersion3ValueForAttribute(attr, value)
- if not isValidValue:
- raise UFOLibError(f"Invalid value for attribute {attr} ({value!r}).")
- else:
- validInfoData[attr] = value
- return validInfoData
-
-
-# Value Options
-
-fontInfoOpenTypeHeadFlagsOptions = list(range(0, 15))
-fontInfoOpenTypeOS2SelectionOptions = [1, 2, 3, 4, 7, 8, 9]
-fontInfoOpenTypeOS2UnicodeRangesOptions = list(range(0, 128))
-fontInfoOpenTypeOS2CodePageRangesOptions = list(range(0, 64))
-fontInfoOpenTypeOS2TypeOptions = [0, 1, 2, 3, 8, 9]
-
-# Version Attribute Definitions
-# This defines the attributes, types and, in some
-# cases the possible values, that can exist is
-# fontinfo.plist.
-
-fontInfoAttributesVersion1 = {
- "familyName",
- "styleName",
- "fullName",
- "fontName",
- "menuName",
- "fontStyle",
- "note",
- "versionMajor",
- "versionMinor",
- "year",
- "copyright",
- "notice",
- "trademark",
- "license",
- "licenseURL",
- "createdBy",
- "designer",
- "designerURL",
- "vendorURL",
- "unitsPerEm",
- "ascender",
- "descender",
- "capHeight",
- "xHeight",
- "defaultWidth",
- "slantAngle",
- "italicAngle",
- "widthName",
- "weightName",
- "weightValue",
- "fondName",
- "otFamilyName",
- "otStyleName",
- "otMacName",
- "msCharSet",
- "fondID",
- "uniqueID",
- "ttVendor",
- "ttUniqueID",
- "ttVersion",
-}
-
-fontInfoAttributesVersion2ValueData = {
- "familyName": dict(type=str),
- "styleName": dict(type=str),
- "styleMapFamilyName": dict(type=str),
- "styleMapStyleName": dict(
- type=str, valueValidator=fontInfoStyleMapStyleNameValidator
- ),
- "versionMajor": dict(type=int),
- "versionMinor": dict(type=int),
- "year": dict(type=int),
- "copyright": dict(type=str),
- "trademark": dict(type=str),
- "unitsPerEm": dict(type=(int, float)),
- "descender": dict(type=(int, float)),
- "xHeight": dict(type=(int, float)),
- "capHeight": dict(type=(int, float)),
- "ascender": dict(type=(int, float)),
- "italicAngle": dict(type=(float, int)),
- "note": dict(type=str),
- "openTypeHeadCreated": dict(
- type=str, valueValidator=fontInfoOpenTypeHeadCreatedValidator
- ),
- "openTypeHeadLowestRecPPEM": dict(type=(int, float)),
- "openTypeHeadFlags": dict(
- type="integerList",
- valueValidator=genericIntListValidator,
- valueOptions=fontInfoOpenTypeHeadFlagsOptions,
- ),
- "openTypeHheaAscender": dict(type=(int, float)),
- "openTypeHheaDescender": dict(type=(int, float)),
- "openTypeHheaLineGap": dict(type=(int, float)),
- "openTypeHheaCaretSlopeRise": dict(type=int),
- "openTypeHheaCaretSlopeRun": dict(type=int),
- "openTypeHheaCaretOffset": dict(type=(int, float)),
- "openTypeNameDesigner": dict(type=str),
- "openTypeNameDesignerURL": dict(type=str),
- "openTypeNameManufacturer": dict(type=str),
- "openTypeNameManufacturerURL": dict(type=str),
- "openTypeNameLicense": dict(type=str),
- "openTypeNameLicenseURL": dict(type=str),
- "openTypeNameVersion": dict(type=str),
- "openTypeNameUniqueID": dict(type=str),
- "openTypeNameDescription": dict(type=str),
- "openTypeNamePreferredFamilyName": dict(type=str),
- "openTypeNamePreferredSubfamilyName": dict(type=str),
- "openTypeNameCompatibleFullName": dict(type=str),
- "openTypeNameSampleText": dict(type=str),
- "openTypeNameWWSFamilyName": dict(type=str),
- "openTypeNameWWSSubfamilyName": dict(type=str),
- "openTypeOS2WidthClass": dict(
- type=int, valueValidator=fontInfoOpenTypeOS2WidthClassValidator
- ),
- "openTypeOS2WeightClass": dict(
- type=int, valueValidator=fontInfoOpenTypeOS2WeightClassValidator
- ),
- "openTypeOS2Selection": dict(
- type="integerList",
- valueValidator=genericIntListValidator,
- valueOptions=fontInfoOpenTypeOS2SelectionOptions,
- ),
- "openTypeOS2VendorID": dict(type=str),
- "openTypeOS2Panose": dict(
- type="integerList", valueValidator=fontInfoVersion2OpenTypeOS2PanoseValidator
- ),
- "openTypeOS2FamilyClass": dict(
- type="integerList", valueValidator=fontInfoOpenTypeOS2FamilyClassValidator
- ),
- "openTypeOS2UnicodeRanges": dict(
- type="integerList",
- valueValidator=genericIntListValidator,
- valueOptions=fontInfoOpenTypeOS2UnicodeRangesOptions,
- ),
- "openTypeOS2CodePageRanges": dict(
- type="integerList",
- valueValidator=genericIntListValidator,
- valueOptions=fontInfoOpenTypeOS2CodePageRangesOptions,
- ),
- "openTypeOS2TypoAscender": dict(type=(int, float)),
- "openTypeOS2TypoDescender": dict(type=(int, float)),
- "openTypeOS2TypoLineGap": dict(type=(int, float)),
- "openTypeOS2WinAscent": dict(type=(int, float)),
- "openTypeOS2WinDescent": dict(type=(int, float)),
- "openTypeOS2Type": dict(
- type="integerList",
- valueValidator=genericIntListValidator,
- valueOptions=fontInfoOpenTypeOS2TypeOptions,
- ),
- "openTypeOS2SubscriptXSize": dict(type=(int, float)),
- "openTypeOS2SubscriptYSize": dict(type=(int, float)),
- "openTypeOS2SubscriptXOffset": dict(type=(int, float)),
- "openTypeOS2SubscriptYOffset": dict(type=(int, float)),
- "openTypeOS2SuperscriptXSize": dict(type=(int, float)),
- "openTypeOS2SuperscriptYSize": dict(type=(int, float)),
- "openTypeOS2SuperscriptXOffset": dict(type=(int, float)),
- "openTypeOS2SuperscriptYOffset": dict(type=(int, float)),
- "openTypeOS2StrikeoutSize": dict(type=(int, float)),
- "openTypeOS2StrikeoutPosition": dict(type=(int, float)),
- "openTypeVheaVertTypoAscender": dict(type=(int, float)),
- "openTypeVheaVertTypoDescender": dict(type=(int, float)),
- "openTypeVheaVertTypoLineGap": dict(type=(int, float)),
- "openTypeVheaCaretSlopeRise": dict(type=int),
- "openTypeVheaCaretSlopeRun": dict(type=int),
- "openTypeVheaCaretOffset": dict(type=(int, float)),
- "postscriptFontName": dict(type=str),
- "postscriptFullName": dict(type=str),
- "postscriptSlantAngle": dict(type=(float, int)),
- "postscriptUniqueID": dict(type=int),
- "postscriptUnderlineThickness": dict(type=(int, float)),
- "postscriptUnderlinePosition": dict(type=(int, float)),
- "postscriptIsFixedPitch": dict(type=bool),
- "postscriptBlueValues": dict(
- type="integerList", valueValidator=fontInfoPostscriptBluesValidator
- ),
- "postscriptOtherBlues": dict(
- type="integerList", valueValidator=fontInfoPostscriptOtherBluesValidator
- ),
- "postscriptFamilyBlues": dict(
- type="integerList", valueValidator=fontInfoPostscriptBluesValidator
- ),
- "postscriptFamilyOtherBlues": dict(
- type="integerList", valueValidator=fontInfoPostscriptOtherBluesValidator
- ),
- "postscriptStemSnapH": dict(
- type="integerList", valueValidator=fontInfoPostscriptStemsValidator
- ),
- "postscriptStemSnapV": dict(
- type="integerList", valueValidator=fontInfoPostscriptStemsValidator
- ),
- "postscriptBlueFuzz": dict(type=(int, float)),
- "postscriptBlueShift": dict(type=(int, float)),
- "postscriptBlueScale": dict(type=(float, int)),
- "postscriptForceBold": dict(type=bool),
- "postscriptDefaultWidthX": dict(type=(int, float)),
- "postscriptNominalWidthX": dict(type=(int, float)),
- "postscriptWeightName": dict(type=str),
- "postscriptDefaultCharacter": dict(type=str),
- "postscriptWindowsCharacterSet": dict(
- type=int, valueValidator=fontInfoPostscriptWindowsCharacterSetValidator
- ),
- "macintoshFONDFamilyID": dict(type=int),
- "macintoshFONDName": dict(type=str),
-}
-fontInfoAttributesVersion2 = set(fontInfoAttributesVersion2ValueData.keys())
-
-fontInfoAttributesVersion3ValueData = deepcopy(fontInfoAttributesVersion2ValueData)
-fontInfoAttributesVersion3ValueData.update(
- {
- "versionMinor": dict(type=int, valueValidator=genericNonNegativeIntValidator),
- "unitsPerEm": dict(
- type=(int, float), valueValidator=genericNonNegativeNumberValidator
- ),
- "openTypeHeadLowestRecPPEM": dict(
- type=int, valueValidator=genericNonNegativeNumberValidator
- ),
- "openTypeHheaAscender": dict(type=int),
- "openTypeHheaDescender": dict(type=int),
- "openTypeHheaLineGap": dict(type=int),
- "openTypeHheaCaretOffset": dict(type=int),
- "openTypeOS2Panose": dict(
- type="integerList",
- valueValidator=fontInfoVersion3OpenTypeOS2PanoseValidator,
- ),
- "openTypeOS2TypoAscender": dict(type=int),
- "openTypeOS2TypoDescender": dict(type=int),
- "openTypeOS2TypoLineGap": dict(type=int),
- "openTypeOS2WinAscent": dict(
- type=int, valueValidator=genericNonNegativeNumberValidator
- ),
- "openTypeOS2WinDescent": dict(
- type=int, valueValidator=genericNonNegativeNumberValidator
- ),
- "openTypeOS2SubscriptXSize": dict(type=int),
- "openTypeOS2SubscriptYSize": dict(type=int),
- "openTypeOS2SubscriptXOffset": dict(type=int),
- "openTypeOS2SubscriptYOffset": dict(type=int),
- "openTypeOS2SuperscriptXSize": dict(type=int),
- "openTypeOS2SuperscriptYSize": dict(type=int),
- "openTypeOS2SuperscriptXOffset": dict(type=int),
- "openTypeOS2SuperscriptYOffset": dict(type=int),
- "openTypeOS2StrikeoutSize": dict(type=int),
- "openTypeOS2StrikeoutPosition": dict(type=int),
- "openTypeGaspRangeRecords": dict(
- type="dictList", valueValidator=fontInfoOpenTypeGaspRangeRecordsValidator
- ),
- "openTypeNameRecords": dict(
- type="dictList", valueValidator=fontInfoOpenTypeNameRecordsValidator
- ),
- "openTypeVheaVertTypoAscender": dict(type=int),
- "openTypeVheaVertTypoDescender": dict(type=int),
- "openTypeVheaVertTypoLineGap": dict(type=int),
- "openTypeVheaCaretOffset": dict(type=int),
- "woffMajorVersion": dict(
- type=int, valueValidator=genericNonNegativeIntValidator
- ),
- "woffMinorVersion": dict(
- type=int, valueValidator=genericNonNegativeIntValidator
- ),
- "woffMetadataUniqueID": dict(
- type=dict, valueValidator=fontInfoWOFFMetadataUniqueIDValidator
- ),
- "woffMetadataVendor": dict(
- type=dict, valueValidator=fontInfoWOFFMetadataVendorValidator
- ),
- "woffMetadataCredits": dict(
- type=dict, valueValidator=fontInfoWOFFMetadataCreditsValidator
- ),
- "woffMetadataDescription": dict(
- type=dict, valueValidator=fontInfoWOFFMetadataDescriptionValidator
- ),
- "woffMetadataLicense": dict(
- type=dict, valueValidator=fontInfoWOFFMetadataLicenseValidator
- ),
- "woffMetadataCopyright": dict(
- type=dict, valueValidator=fontInfoWOFFMetadataCopyrightValidator
- ),
- "woffMetadataTrademark": dict(
- type=dict, valueValidator=fontInfoWOFFMetadataTrademarkValidator
- ),
- "woffMetadataLicensee": dict(
- type=dict, valueValidator=fontInfoWOFFMetadataLicenseeValidator
- ),
- "woffMetadataExtensions": dict(
- type=list, valueValidator=fontInfoWOFFMetadataExtensionsValidator
- ),
- "guidelines": dict(type=list, valueValidator=guidelinesValidator),
- }
-)
-fontInfoAttributesVersion3 = set(fontInfoAttributesVersion3ValueData.keys())
-
-# insert the type validator for all attrs that
-# have no defined validator.
-for attr, dataDict in list(fontInfoAttributesVersion2ValueData.items()):
- if "valueValidator" not in dataDict:
- dataDict["valueValidator"] = genericTypeValidator
-
-for attr, dataDict in list(fontInfoAttributesVersion3ValueData.items()):
- if "valueValidator" not in dataDict:
- dataDict["valueValidator"] = genericTypeValidator
-
-# Version Conversion Support
-# These are used from converting from version 1
-# to version 2 or vice-versa.
-
-
-def _flipDict(d):
- flipped = {}
- for key, value in list(d.items()):
- flipped[value] = key
- return flipped
-
-
-fontInfoAttributesVersion1To2 = {
- "menuName": "styleMapFamilyName",
- "designer": "openTypeNameDesigner",
- "designerURL": "openTypeNameDesignerURL",
- "createdBy": "openTypeNameManufacturer",
- "vendorURL": "openTypeNameManufacturerURL",
- "license": "openTypeNameLicense",
- "licenseURL": "openTypeNameLicenseURL",
- "ttVersion": "openTypeNameVersion",
- "ttUniqueID": "openTypeNameUniqueID",
- "notice": "openTypeNameDescription",
- "otFamilyName": "openTypeNamePreferredFamilyName",
- "otStyleName": "openTypeNamePreferredSubfamilyName",
- "otMacName": "openTypeNameCompatibleFullName",
- "weightName": "postscriptWeightName",
- "weightValue": "openTypeOS2WeightClass",
- "ttVendor": "openTypeOS2VendorID",
- "uniqueID": "postscriptUniqueID",
- "fontName": "postscriptFontName",
- "fondID": "macintoshFONDFamilyID",
- "fondName": "macintoshFONDName",
- "defaultWidth": "postscriptDefaultWidthX",
- "slantAngle": "postscriptSlantAngle",
- "fullName": "postscriptFullName",
- # require special value conversion
- "fontStyle": "styleMapStyleName",
- "widthName": "openTypeOS2WidthClass",
- "msCharSet": "postscriptWindowsCharacterSet",
-}
-fontInfoAttributesVersion2To1 = _flipDict(fontInfoAttributesVersion1To2)
-deprecatedFontInfoAttributesVersion2 = set(fontInfoAttributesVersion1To2.keys())
-
-_fontStyle1To2 = {64: "regular", 1: "italic", 32: "bold", 33: "bold italic"}
-_fontStyle2To1 = _flipDict(_fontStyle1To2)
-# Some UFO 1 files have 0
-_fontStyle1To2[0] = "regular"
-
-_widthName1To2 = {
- "Ultra-condensed": 1,
- "Extra-condensed": 2,
- "Condensed": 3,
- "Semi-condensed": 4,
- "Medium (normal)": 5,
- "Semi-expanded": 6,
- "Expanded": 7,
- "Extra-expanded": 8,
- "Ultra-expanded": 9,
-}
-_widthName2To1 = _flipDict(_widthName1To2)
-# FontLab's default width value is "Normal".
-# Many format version 1 UFOs will have this.
-_widthName1To2["Normal"] = 5
-# FontLab has an "All" width value. In UFO 1
-# move this up to "Normal".
-_widthName1To2["All"] = 5
-# "medium" appears in a lot of UFO 1 files.
-_widthName1To2["medium"] = 5
-# "Medium" appears in a lot of UFO 1 files.
-_widthName1To2["Medium"] = 5
-
-_msCharSet1To2 = {
- 0: 1,
- 1: 2,
- 2: 3,
- 77: 4,
- 128: 5,
- 129: 6,
- 130: 7,
- 134: 8,
- 136: 9,
- 161: 10,
- 162: 11,
- 163: 12,
- 177: 13,
- 178: 14,
- 186: 15,
- 200: 16,
- 204: 17,
- 222: 18,
- 238: 19,
- 255: 20,
-}
-_msCharSet2To1 = _flipDict(_msCharSet1To2)
-
-# 1 <-> 2
-
-
-def convertFontInfoValueForAttributeFromVersion1ToVersion2(attr, value):
- """
- Convert value from version 1 to version 2 format.
- Returns the new attribute name and the converted value.
- If the value is None, None will be returned for the new value.
- """
- # convert floats to ints if possible
- if isinstance(value, float):
- if int(value) == value:
- value = int(value)
- if value is not None:
- if attr == "fontStyle":
- v = _fontStyle1To2.get(value)
- if v is None:
- raise UFOLibError(
- f"Cannot convert value ({value!r}) for attribute {attr}."
- )
- value = v
- elif attr == "widthName":
- v = _widthName1To2.get(value)
- if v is None:
- raise UFOLibError(
- f"Cannot convert value ({value!r}) for attribute {attr}."
- )
- value = v
- elif attr == "msCharSet":
- v = _msCharSet1To2.get(value)
- if v is None:
- raise UFOLibError(
- f"Cannot convert value ({value!r}) for attribute {attr}."
- )
- value = v
- attr = fontInfoAttributesVersion1To2.get(attr, attr)
- return attr, value
-
-
-def convertFontInfoValueForAttributeFromVersion2ToVersion1(attr, value):
- """
- Convert value from version 2 to version 1 format.
- Returns the new attribute name and the converted value.
- If the value is None, None will be returned for the new value.
- """
- if value is not None:
- if attr == "styleMapStyleName":
- value = _fontStyle2To1.get(value)
- elif attr == "openTypeOS2WidthClass":
- value = _widthName2To1.get(value)
- elif attr == "postscriptWindowsCharacterSet":
- value = _msCharSet2To1.get(value)
- attr = fontInfoAttributesVersion2To1.get(attr, attr)
- return attr, value
-
-
-def _convertFontInfoDataVersion1ToVersion2(data):
- converted = {}
- for attr, value in list(data.items()):
- # FontLab gives -1 for the weightValue
- # for fonts wil no defined value. Many
- # format version 1 UFOs will have this.
- if attr == "weightValue" and value == -1:
- continue
- newAttr, newValue = convertFontInfoValueForAttributeFromVersion1ToVersion2(
- attr, value
- )
- # skip if the attribute is not part of version 2
- if newAttr not in fontInfoAttributesVersion2:
- continue
- # catch values that can't be converted
- if value is None:
- raise UFOLibError(
- f"Cannot convert value ({value!r}) for attribute {newAttr}."
- )
- # store
- converted[newAttr] = newValue
- return converted
-
-
-def _convertFontInfoDataVersion2ToVersion1(data):
- converted = {}
- for attr, value in list(data.items()):
- newAttr, newValue = convertFontInfoValueForAttributeFromVersion2ToVersion1(
- attr, value
- )
- # only take attributes that are registered for version 1
- if newAttr not in fontInfoAttributesVersion1:
- continue
- # catch values that can't be converted
- if value is None:
- raise UFOLibError(
- f"Cannot convert value ({value!r}) for attribute {newAttr}."
- )
- # store
- converted[newAttr] = newValue
- return converted
-
-
-# 2 <-> 3
-
-_ufo2To3NonNegativeInt = {
- "versionMinor",
- "openTypeHeadLowestRecPPEM",
- "openTypeOS2WinAscent",
- "openTypeOS2WinDescent",
-}
-_ufo2To3NonNegativeIntOrFloat = {
- "unitsPerEm",
-}
-_ufo2To3FloatToInt = {
- "openTypeHeadLowestRecPPEM",
- "openTypeHheaAscender",
- "openTypeHheaDescender",
- "openTypeHheaLineGap",
- "openTypeHheaCaretOffset",
- "openTypeOS2TypoAscender",
- "openTypeOS2TypoDescender",
- "openTypeOS2TypoLineGap",
- "openTypeOS2WinAscent",
- "openTypeOS2WinDescent",
- "openTypeOS2SubscriptXSize",
- "openTypeOS2SubscriptYSize",
- "openTypeOS2SubscriptXOffset",
- "openTypeOS2SubscriptYOffset",
- "openTypeOS2SuperscriptXSize",
- "openTypeOS2SuperscriptYSize",
- "openTypeOS2SuperscriptXOffset",
- "openTypeOS2SuperscriptYOffset",
- "openTypeOS2StrikeoutSize",
- "openTypeOS2StrikeoutPosition",
- "openTypeVheaVertTypoAscender",
- "openTypeVheaVertTypoDescender",
- "openTypeVheaVertTypoLineGap",
- "openTypeVheaCaretOffset",
-}
-
-
-def convertFontInfoValueForAttributeFromVersion2ToVersion3(attr, value):
- """
- Convert value from version 2 to version 3 format.
- Returns the new attribute name and the converted value.
- If the value is None, None will be returned for the new value.
- """
- if attr in _ufo2To3FloatToInt:
- try:
- value = round(value)
- except (ValueError, TypeError):
- raise UFOLibError("Could not convert value for %s." % attr)
- if attr in _ufo2To3NonNegativeInt:
- try:
- value = int(abs(value))
- except (ValueError, TypeError):
- raise UFOLibError("Could not convert value for %s." % attr)
- elif attr in _ufo2To3NonNegativeIntOrFloat:
- try:
- v = float(abs(value))
- except (ValueError, TypeError):
- raise UFOLibError("Could not convert value for %s." % attr)
- if v == int(v):
- v = int(v)
- if v != value:
- value = v
- return attr, value
-
-
-def convertFontInfoValueForAttributeFromVersion3ToVersion2(attr, value):
- """
- Convert value from version 3 to version 2 format.
- Returns the new attribute name and the converted value.
- If the value is None, None will be returned for the new value.
- """
- return attr, value
-
-
-def _convertFontInfoDataVersion3ToVersion2(data):
- converted = {}
- for attr, value in list(data.items()):
- newAttr, newValue = convertFontInfoValueForAttributeFromVersion3ToVersion2(
- attr, value
- )
- if newAttr not in fontInfoAttributesVersion2:
- continue
- converted[newAttr] = newValue
- return converted
-
-
-def _convertFontInfoDataVersion2ToVersion3(data):
- converted = {}
- for attr, value in list(data.items()):
- attr, value = convertFontInfoValueForAttributeFromVersion2ToVersion3(
- attr, value
- )
- converted[attr] = value
- return converted
-
-
-if __name__ == "__main__":
- import doctest
-
- doctest.testmod()
diff --git a/spaces/cihyFjudo/fairness-paper-search/JetBrains PhpStorm 2018.3.0 Key Free Download Learn or Teach Coding with the Top-Rated PHP IDE.md b/spaces/cihyFjudo/fairness-paper-search/JetBrains PhpStorm 2018.3.0 Key Free Download Learn or Teach Coding with the Top-Rated PHP IDE.md
deleted file mode 100644
index 46406d531518fbd7f8ceb82118cf9e6ddb9934a6..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/JetBrains PhpStorm 2018.3.0 Key Free Download Learn or Teach Coding with the Top-Rated PHP IDE.md
+++ /dev/null
@@ -1,6 +0,0 @@
-JetBrains PhpStorm 2018.3.0 Key Free Download Download File 🔗 https://tinurli.com/2uwiMp
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Mathilukal-Novelpdf-LINK.md b/spaces/cihyFjudo/fairness-paper-search/Mathilukal-Novelpdf-LINK.md
deleted file mode 100644
index d5d9a3a4728f04ccab8dc183db9df134d505c69d..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Mathilukal-Novelpdf-LINK.md
+++ /dev/null
@@ -1,70 +0,0 @@
-## Mathilukal Novel.pdf
-
-
-
-
-
- 
-
-
-
-
-
-**Click Here >> [https://venemena.blogspot.com/?download=2txRfx](https://venemena.blogspot.com/?download=2txRfx)**
-
-
-
-
-
-
-
-
-
-
-
- Here is a possible title and article with SEO optimization and HTML formatting for the keyword "Mathilukal Novel.pdf":
-
-# Mathilukal Novel.pdf: A Review of Vaikom Muhammad Basheer's Classic Malayalam Love Story
-
-
-
-Mathilukal Novel.pdf is a digital version of the Malayalam novel Mathilukal, written by Vaikom Muhammad Basheer in 1965. Mathilukal, which means Walls in English, is one of the most cherished and well-known love stories in Malayalam literature. It is based on Basheer's own life incident at a jail in Thiruvananthapuram, where he fell in love with a female prisoner named Narayani, whom he never saw but only heard through a wall that separated the male and female sections of the jail.
-
-
-
-The novel is a masterpiece of Basheer's unique style of writing, which blends humor, sarcasm, satire, and realism. He uses colloquial language and simple sentences to convey profound emotions and insights. He also breaks the conventional rules of grammar, punctuation, and narration to create a direct and intimate connection with the reader. The novel is a testament to Basheer's courage and creativity as a writer, who dared to challenge the feudalistic and orthodox society of his time with his unconventional love story.
-
-
-
-Mathilukal Novel.pdf is available for free download from various online sources, such as PDFRoom.com[^1^], Goodreads.com[^2^], and Scribd.com[^4^]. The novel has also been adapted into a film by Adoor Gopalakrishnan in 1990, starring Mammootty as Basheer and KPAC Lalitha as Narayani. The film won several national and international awards, including the FIPRESCI Prize at the Venice Film Festival.
-
-
-
-If you are looking for a short and simple novel that will touch your heart and make you laugh and cry at the same time, Mathilukal Novel.pdf is the perfect choice for you. It is a timeless classic that will appeal to readers of all ages and backgrounds. Download Mathilukal Novel.pdf today and enjoy the magic of Basheer's writing.
-
-Here are a few more paragraphs for the article:
-
-## Who was Vaikom Muhammad Basheer?
-
-
-
-Vaikom Muhammad Basheer was a writer of Malayalam literature, who was born on 19 or 21 January 1908 in Thalayolaparambu, a village in the erstwhile princely state of Travancore (now in Kerala). He was the eldest son of Kayi Abdurahman and Kunjathumma. He had a formal education only up to the seventh grade, but he was an avid reader and learner of various languages, including Arabic, Urdu, Hindi, Tamil, and English.
-
-
-
-Basheer was not only a writer, but also a humanist, a freedom fighter, a journalist, and a political activist. He participated in the Salt Satyagraha and the Quit India Movement against the British colonial rule. He was arrested and imprisoned several times for his involvement in the nationalist struggle. He also worked as a newspaper editor, a school teacher, a sports coach, a book salesman, and a cook. He traveled extensively across India and abroad, meeting various people and experiencing different cultures.
-
-
-
-Basheer began his literary career by writing short stories for magazines. His first story, Ente Thankam (My Darling), was published in 1937. He wrote more than 50 short stories and 15 novels, as well as essays, memoirs, letters, and plays. His works are known for their realism, humor, satire, social criticism, and humanism. Some of his famous works include Balyakalasakhi (Childhood Companion), Pathummayude Aadu (Pathumma's Goat), Shabdangal (Voices), Mathilukal (Walls), Ntuppuppakkoranendarnnu (My Grandad Had an Elephant), and Anargha Nimisham (Invaluable Moment).
-
-
-
-Basheer died on 5 July 1994 at the age of 86. He was awarded the Padma Shri in 1982 and the Sahitya Akademi Award in 1970 for his contributions to Malayalam literature. He is widely regarded as one of the greatest writers in Malayalam and Indian literature. His works have been translated into many languages and adapted into films, television shows, and stage plays.
-
- dfd1c89656
-
-
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Nitro Pdf Professional 64bit 7.0.1.5 Final Crack Full Download Serial.rar How to Install and Activate Nitro PDF Pro.md b/spaces/cihyFjudo/fairness-paper-search/Nitro Pdf Professional 64bit 7.0.1.5 Final Crack Full Download Serial.rar How to Install and Activate Nitro PDF Pro.md
deleted file mode 100644
index 2c758628dd72bfd11df6d4dfa00f8885744eaaec..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Nitro Pdf Professional 64bit 7.0.1.5 Final Crack Full Download Serial.rar How to Install and Activate Nitro PDF Pro.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Nitro Pdf Professional 64bit 7.0.1.5 Final Crack Full Download Serial.rar Download Zip ··· https://tinurli.com/2uwjK8
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Prototype Download Experience the Thrill of Shapeshifting and Consuming Enemies.md b/spaces/cihyFjudo/fairness-paper-search/Prototype Download Experience the Thrill of Shapeshifting and Consuming Enemies.md
deleted file mode 100644
index 1db9820244d8e35cd41f0553175f58c178b5ba6a..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Prototype Download Experience the Thrill of Shapeshifting and Consuming Enemies.md
+++ /dev/null
@@ -1,35 +0,0 @@
-
-People love free steam games, no doubt. But what many people hate is downloading so many parts and trying to install them on their own. This is why we are the only site that pre-installs every game for you. We have many categories like shooters, action, racing, simulators and even VR games! We strive to satisfy our users and ask for nothing in return. We revolutionized the downloading scene and will continue being your #1 site for free games.
-Prototype download Download ★ https://tinurli.com/2uwki8
-Create from clickable prototypes to fully-functional simulations, without typing a single line of code. We provide a full range of tools, so that you can focus on creating delightful user experiences. Testing through simulations will reduce rework and boost user adoption.
-Create prototypes for websites and web apps that adapt to multiple screen resolutions for desktop and mobile. The UI elements in your screens will also adapt automatically. One free prototyping tool to rule all devices!
-Visualize your web and mobile prototypes, while you design them. Simulate prototype behavior while you design. Use our Emulator and Viewer App to test your prototype live on any iPhone, iPad or Android device.
-Browser settings can affect the rendering of design specs and prototypes. Learn how to change the browser settings, and find other browser-related fixes in Published Adobe XD prototypes do not appear in browsers.
-
-After publishing prototype link in Share mode, click the Behance icon in the Property Inspector to open a new Behance project, and then publish the project. For more information on sharing to Behance, see Publish design from Adobe XD to Behance.
-You can download your app from the Creative Cloud website. If prompted, sign in to your Adobe account, then click either Download or Install for your app. For more information, see Download your Creative Cloud apps.
-For solutions to a "failed to install" error, see Error: "Failed to install" Creative Cloud desktop app. To resolve other download, installation, and update issues, see this download and install troubleshooting guide.
-Tables 1 and 2 contain a scorecard for each prototype building. The scorecard is a spreadsheet (Microsoft® Excel®, .xls, format) that summarizes the building descriptions, thermal zone internal loads, schedules, and other key modeling input information. The suite of prototype models is available for download (compressed, .zip, format) for the respective edition of Standard 90.1 and IECC. Each file includes EnergyPlus model input files (.idf) and corresponding output files (.html) across all climate locations.
-Files may be downloaded either as complete packages, containing all building types, or by individual building type, either by specific Standard 90.1 or IECC editions or as complete sets from the tables below.
-The energy models for the 2015, 2018 and 2021 versions of the IECC are listed in Table 4 and can be downloaded either by specific IECC edition or as complete sets by climate zone. The complete sets contain prototypes with earlier versions of the IECC. The idf files may be opened and modified in EnergyPlus.
-The single family prototypes are now complete EnergyPlus files utilizing the airflow network for duct leakage modeling. Previous single family prototype models posted on the Energy Codes website did not contain duct leakage specifications. Calculating loads for duct leakage required multiple EnergyPlus simulations with and without duct leakage and post processing the results for both single family and multifamily buildings. As a result, there may be large differences in energy consumption when comparing the latest single family prototypes results to older prototype results downloaded from this website. The multifamily prototype models do not contain duct leakage specifications, and the duct leakage adjustment are applied during the post-processing. We are working on updating the MF models to incorporate the airflow network with duct leakage loops.
-The energy models for the HUD and the final rule are listed in Table 6 and can be downloaded either by specific code edition (i.e., HUD or Final Rule) or as complete sets by either each of the climate zone (all rows beside the last row of Table 6) or all the climate zones (last row of Table 6). The idf files may be opened and modified in EnergyPlus.
-I shared my prototype with a colleague who was going to present it, but he will not have internet access and therefore cannot share the prototype as an active project - only static images of my artboards which does not give the full experience of the prototype.
-We would like to be able to share/publish prototypes on a private/corporate cloud, not Adobe Cloud. We cannot publish to Adobe cloud for security reasons. Please create a way for us to be able to publish and share prototypes on a private network.
-The game assets you've just downloaded is available for free, check the license included in the download for more information. Consider a donation (not required) or share to support the creation of more game assets!
-Choose a device preset or enter a custom Artboard size to design for your favorite platform. Hover events and mouse wheel scrolling make it easy to make web and desktop prototypes that feel like the real thing.
-This prototype build is very incomplete, with a number of incomplete Zones. Attempting to play the game normally, only the entirety of Green Hill Zone and Marble Zone, as well as the first Acts of Spring Yard Zone (Sparkling Zone) and Star Light Zone are playable. Most other levels in the game can only be accessed via the level select, and neither of them can be completed due to the lack of signposts and bosses, with Scrap Brain Zone (Clock Work Zone) Act 3 and Final Zone being nonexistent.
-Among the Zones present in this prototype, there are many things that are different from the final build, such as different backgrounds, very different level layouts, and objects that would be deleted or dummied out. The existence of many of these differences has been known for years via early magazine scans and video footage, and their presence in this prototype reveals some insight on the game's development.
-An EPROM cartridge of the prototype was found by Buckaroo, who dumped the cartridge to be released by drx of Hidden Palace. The prototype was streamed on Twitch on December 31, 2020, with the ROM being released on January 1, 2021.
-In a lab test, a full duplex DOCSIS 4.0 system-on-chip (SoC) cable modem built by Broadcom delivered upload and download speeds faster than 4 gigabits per second (Gbps) powered by 10G network technology.
-That announcement followed major 10G milestone announcements in April 2021 of the first-ever live lab test of a 10G system-on-chip (SOC) and October 2020, of a trial delivering 1.25 gigabit-per-second (Gbps) upload and download speeds over a live production network using Network Function Virtualization (NFV) combined with the latest DOCSIS technology.
-A key component of 10G, DOCSIS 4.0 is an evolutionary leap forward in the ability to deliver multigigabit upload and download speeds over the connections already installed in hundreds of millions of homes worldwide.
-In addition to providing a path to multigigabit upload and download speeds at scale, 10G updates will deliver near-term benefits to customers in the form of increased reliability, performance, and lower latency, the company said.
-apunka games is very trusted website . it provide us all games which are present all over the world. i downloaded over 1500 games from this website and almost all games are working properly . thank you so much apunkagames.
-Considering the heavy download related to the full S2 prototype LC 20m map of Africa 2016, a web interface was developed to mainly visualize and interact with data. A left click, anywhere on the layer, highlights the LC-Map label of the selected pixel in the legend description of the left panel.
-In this section the user can download the full S2 prototype LC 20m map of Africa 2016 product in GeoTIFF format with a size of approximatively 6GB and the colour legend. Please ensure that you have the appropriate internet connection and computer capacity. Please be aware that this prototype map is the first step of the production of a land cover map at 20 m for Africa. The prototype is made available in order to collect user feedbacks for the necessary improvements.
-PVS has had many contributors over the years, but would probably not even exist without the continuing support and encouragement from the NASA Langley Formal Methods Research Program. They have been with us from the beginning, and have put together a huge library of PVS theories that are freely available. It is highly recommended that you download and use these, both as libraries and as examples of complex PVS specifications and proofs. Many of the tools currently in PVS were developed by this group. Check out the link for details on PVS related research sponsored by NASA. There you can even learn about how the NASA PVS Library was featured in the movie "The Martian"
-Publisher 505 Games has released a working prototype for Indivisible, offering a proof of concept the game's crowdfunding efforts. Indivisible follows a girl name Ajna, who has developed mysterious powers. She ventures across the world to learn the origins of her abilities, picking up a party of heroes along the way and taking on powerful enemies. The hand-drawn Skullgirls art style is on full display, set against a soundtrack from Secret of Mana composer Hiroki Kiruta.
-Prototype 1 Ocean Of Games (stylized As [PROTOTYPE]) is a 2009 open world action-adventure computer game created by Radical Entertainment and released by Activision. The game was released in North America on June 9, 2009, and also in southwestern parts of North America as Oceania on June 10, has been released in Europe on June 12. Versions for PlayStation four and Xbox One were published on July 14, 2015, aboard the sequel as prototype Biohazard Bundle. Separate versions of those games became available August 12, 2015.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/Reikan Focal Pro Cracked.48 A Review of the Best Lens Calibration Tool for Canon and Nikon.md b/spaces/cihyFjudo/fairness-paper-search/Reikan Focal Pro Cracked.48 A Review of the Best Lens Calibration Tool for Canon and Nikon.md
deleted file mode 100644
index 6962297df0e9fd4c1befe1be9d21d0f56a370d6d..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Reikan Focal Pro Cracked.48 A Review of the Best Lens Calibration Tool for Canon and Nikon.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-I was quite happy and drove up there later that afternoon to pick up my equipment. I talked to the owner who has been running the repair shop for 45 years. He explained to me that both bodies and both lenses were misaligned and that it took them two days to adjust every focal length and then pick an average that would give me good focus on any focal length. We took a couple of test shots, and all of them were perfectly in focus.
-Reikan Focal Pro Cracked.48 DOWNLOAD >>> https://tinurli.com/2uwjV0
-Furthermore, we can clearly see some focus breathing in the racking examples, as if the camera is hunting to confirm focus in addition to the evident focal length magnification due to refocusing. And if a subject isn't already reasonably in focus to begin with (the equivalent aperture is now F6.8 on the Pro models), it won't get very sharp if you choose to refocus onto it. Furthermore, if focus is already racking in a clip, we imagine it'd be difficult if not impossible to change or reverse it!
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/Train Simulator 2013 Pc Iso Torrent.md b/spaces/cihyFjudo/fairness-paper-search/Train Simulator 2013 Pc Iso Torrent.md
deleted file mode 100644
index ae81cbcda49fb6c369d973c0f42568c46ad00ae2..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Train Simulator 2013 Pc Iso Torrent.md
+++ /dev/null
@@ -1,6 +0,0 @@
-train simulator 2013 pc iso torrent DOWNLOAD ::: https://tinurli.com/2uwjCh
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Xforce Keygen 32bits Or 64bits Version Showcase 2014 Free Download Tips and Tricks for Successful Installation.md b/spaces/cihyFjudo/fairness-paper-search/Xforce Keygen 32bits Or 64bits Version Showcase 2014 Free Download Tips and Tricks for Successful Installation.md
deleted file mode 100644
index c0b2c246a8abadca580e1ec1f5328e1da58a7dd5..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Xforce Keygen 32bits Or 64bits Version Showcase 2014 Free Download Tips and Tricks for Successful Installation.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Xforce Keygen 32bits Or 64bits Version Showcase 2014 Free Download DOWNLOAD ››› https://tinurli.com/2uwksC
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/cu2qu/cli.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/cu2qu/cli.py
deleted file mode 100644
index 9144043ff176fb956cf075b5db38fcca88258430..0000000000000000000000000000000000000000
--- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/cu2qu/cli.py
+++ /dev/null
@@ -1,198 +0,0 @@
-import os
-import argparse
-import logging
-import shutil
-import multiprocessing as mp
-from contextlib import closing
-from functools import partial
-
-import fontTools
-from .ufo import font_to_quadratic, fonts_to_quadratic
-
-ufo_module = None
-try:
- import ufoLib2 as ufo_module
-except ImportError:
- try:
- import defcon as ufo_module
- except ImportError as e:
- pass
-
-
-logger = logging.getLogger("fontTools.cu2qu")
-
-
-def _cpu_count():
- try:
- return mp.cpu_count()
- except NotImplementedError: # pragma: no cover
- return 1
-
-
-def open_ufo(path):
- if hasattr(ufo_module.Font, "open"): # ufoLib2
- return ufo_module.Font.open(path)
- return ufo_module.Font(path) # defcon
-
-
-def _font_to_quadratic(input_path, output_path=None, **kwargs):
- ufo = open_ufo(input_path)
- logger.info("Converting curves for %s", input_path)
- if font_to_quadratic(ufo, **kwargs):
- logger.info("Saving %s", output_path)
- if output_path:
- ufo.save(output_path)
- else:
- ufo.save() # save in-place
- elif output_path:
- _copytree(input_path, output_path)
-
-
-def _samepath(path1, path2):
- # TODO on python3+, there's os.path.samefile
- path1 = os.path.normcase(os.path.abspath(os.path.realpath(path1)))
- path2 = os.path.normcase(os.path.abspath(os.path.realpath(path2)))
- return path1 == path2
-
-
-def _copytree(input_path, output_path):
- if _samepath(input_path, output_path):
- logger.debug("input and output paths are the same file; skipped copy")
- return
- if os.path.exists(output_path):
- shutil.rmtree(output_path)
- shutil.copytree(input_path, output_path)
-
-
-def main(args=None):
- """Convert a UFO font from cubic to quadratic curves"""
- parser = argparse.ArgumentParser(prog="cu2qu")
- parser.add_argument("--version", action="version", version=fontTools.__version__)
- parser.add_argument(
- "infiles",
- nargs="+",
- metavar="INPUT",
- help="one or more input UFO source file(s).",
- )
- parser.add_argument("-v", "--verbose", action="count", default=0)
- parser.add_argument(
- "-e",
- "--conversion-error",
- type=float,
- metavar="ERROR",
- default=None,
- help="maxiumum approximation error measured in EM (default: 0.001)",
- )
- parser.add_argument(
- "-m",
- "--mixed",
- default=False,
- action="store_true",
- help="whether to used mixed quadratic and cubic curves",
- )
- parser.add_argument(
- "--keep-direction",
- dest="reverse_direction",
- action="store_false",
- help="do not reverse the contour direction",
- )
-
- mode_parser = parser.add_mutually_exclusive_group()
- mode_parser.add_argument(
- "-i",
- "--interpolatable",
- action="store_true",
- help="whether curve conversion should keep interpolation compatibility",
- )
- mode_parser.add_argument(
- "-j",
- "--jobs",
- type=int,
- nargs="?",
- default=1,
- const=_cpu_count(),
- metavar="N",
- help="Convert using N multiple processes (default: %(default)s)",
- )
-
- output_parser = parser.add_mutually_exclusive_group()
- output_parser.add_argument(
- "-o",
- "--output-file",
- default=None,
- metavar="OUTPUT",
- help=(
- "output filename for the converted UFO. By default fonts are "
- "modified in place. This only works with a single input."
- ),
- )
- output_parser.add_argument(
- "-d",
- "--output-dir",
- default=None,
- metavar="DIRECTORY",
- help="output directory where to save converted UFOs",
- )
-
- options = parser.parse_args(args)
-
- if ufo_module is None:
- parser.error("Either ufoLib2 or defcon are required to run this script.")
-
- if not options.verbose:
- level = "WARNING"
- elif options.verbose == 1:
- level = "INFO"
- else:
- level = "DEBUG"
- logging.basicConfig(level=level)
-
- if len(options.infiles) > 1 and options.output_file:
- parser.error("-o/--output-file can't be used with multile inputs")
-
- if options.output_dir:
- output_dir = options.output_dir
- if not os.path.exists(output_dir):
- os.mkdir(output_dir)
- elif not os.path.isdir(output_dir):
- parser.error("'%s' is not a directory" % output_dir)
- output_paths = [
- os.path.join(output_dir, os.path.basename(p)) for p in options.infiles
- ]
- elif options.output_file:
- output_paths = [options.output_file]
- else:
- # save in-place
- output_paths = [None] * len(options.infiles)
-
- kwargs = dict(
- dump_stats=options.verbose > 0,
- max_err_em=options.conversion_error,
- reverse_direction=options.reverse_direction,
- all_quadratic=False if options.mixed else True,
- )
-
- if options.interpolatable:
- logger.info("Converting curves compatibly")
- ufos = [open_ufo(infile) for infile in options.infiles]
- if fonts_to_quadratic(ufos, **kwargs):
- for ufo, output_path in zip(ufos, output_paths):
- logger.info("Saving %s", output_path)
- if output_path:
- ufo.save(output_path)
- else:
- ufo.save()
- else:
- for input_path, output_path in zip(options.infiles, output_paths):
- if output_path:
- _copytree(input_path, output_path)
- else:
- jobs = min(len(options.infiles), options.jobs) if options.jobs > 1 else 1
- if jobs > 1:
- func = partial(_font_to_quadratic, **kwargs)
- logger.info("Running %d parallel processes", jobs)
- with closing(mp.Pool(jobs)) as pool:
- pool.starmap(func, zip(options.infiles, output_paths))
- else:
- for input_path, output_path in zip(options.infiles, output_paths):
- _font_to_quadratic(input_path, output_path, **kwargs)
diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/otlLib/optimize/__init__.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/otlLib/optimize/__init__.py
deleted file mode 100644
index 25bce9cd2cdaa51338c83b7ecb9059b592b5574f..0000000000000000000000000000000000000000
--- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/otlLib/optimize/__init__.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from argparse import RawTextHelpFormatter
-from fontTools.otlLib.optimize.gpos import COMPRESSION_LEVEL, compact
-from fontTools.ttLib import TTFont
-
-
-def main(args=None):
- """Optimize the layout tables of an existing font"""
- from argparse import ArgumentParser
-
- from fontTools import configLogger
-
- parser = ArgumentParser(
- prog="otlLib.optimize",
- description=main.__doc__,
- formatter_class=RawTextHelpFormatter,
- )
- parser.add_argument("font")
- parser.add_argument(
- "-o", metavar="OUTPUTFILE", dest="outfile", default=None, help="output file"
- )
- parser.add_argument(
- "--gpos-compression-level",
- help=COMPRESSION_LEVEL.help,
- default=COMPRESSION_LEVEL.default,
- choices=list(range(10)),
- type=int,
- )
- logging_group = parser.add_mutually_exclusive_group(required=False)
- logging_group.add_argument(
- "-v", "--verbose", action="store_true", help="Run more verbosely."
- )
- logging_group.add_argument(
- "-q", "--quiet", action="store_true", help="Turn verbosity off."
- )
- options = parser.parse_args(args)
-
- configLogger(
- level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
- )
-
- font = TTFont(options.font)
- compact(font, options.gpos_compression_level)
- font.save(options.outfile or options.font)
-
-
-if __name__ == "__main__":
- import sys
-
- if len(sys.argv) > 1:
- sys.exit(main())
- import doctest
-
- sys.exit(doctest.testmod().failed)
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/loongarch/vc1dsp_init_loongarch.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/loongarch/vc1dsp_init_loongarch.c
deleted file mode 100644
index e72a4a32031b91c489e9ca5271c711d851b7bd72..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/loongarch/vc1dsp_init_loongarch.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2021 Loongson Technology Corporation Limited
- * Contributed by Hao Chen
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "libavutil/loongarch/cpu.h"
-#include "libavutil/attributes.h"
-#include "libavcodec/vc1dsp.h"
-#include "vc1dsp_loongarch.h"
-
-#define FN_ASSIGN(OP, X, Y, INSN) \
- dsp->OP##vc1_mspel_pixels_tab[1][X+4*Y] = ff_##OP##vc1_mspel_mc##X##Y##INSN; \
- dsp->OP##vc1_mspel_pixels_tab[0][X+4*Y] = ff_##OP##vc1_mspel_mc##X##Y##_16##INSN
-
-#define FN_ASSIGN_V(OP, Y, INSN) \
- dsp->OP##vc1_mspel_pixels_tab[0][4*Y] = ff_##OP##vc1_mspel_mc0##Y##_16##INSN
-
-#define FN_ASSIGN_H(OP, X, INSN) \
- dsp->OP##vc1_mspel_pixels_tab[0][X] = ff_##OP##vc1_mspel_mc##X##0_16##INSN
-
-av_cold void ff_vc1dsp_init_loongarch(VC1DSPContext *dsp)
-{
- int cpu_flags = av_get_cpu_flags();
-
- if (have_lasx(cpu_flags)) {
- dsp->vc1_inv_trans_8x8 = ff_vc1_inv_trans_8x8_lasx;
- dsp->vc1_inv_trans_4x8 = ff_vc1_inv_trans_4x8_lasx;
- dsp->vc1_inv_trans_8x4 = ff_vc1_inv_trans_8x4_lasx;
- dsp->vc1_inv_trans_4x4 = ff_vc1_inv_trans_4x4_lasx;
- dsp->vc1_inv_trans_8x8_dc = ff_vc1_inv_trans_8x8_dc_lasx;
- dsp->vc1_inv_trans_4x8_dc = ff_vc1_inv_trans_4x8_dc_lasx;
- dsp->vc1_inv_trans_8x4_dc = ff_vc1_inv_trans_8x4_dc_lasx;
- dsp->vc1_inv_trans_4x4_dc = ff_vc1_inv_trans_4x4_dc_lasx;
- FN_ASSIGN(put_, 1, 1, _lasx);
- FN_ASSIGN(put_, 1, 2, _lasx);
- FN_ASSIGN(put_, 1, 3, _lasx);
- FN_ASSIGN(put_, 2, 1, _lasx);
- FN_ASSIGN(put_, 2, 2, _lasx);
- FN_ASSIGN(put_, 2, 3, _lasx);
- FN_ASSIGN(put_, 3, 1, _lasx);
- FN_ASSIGN(put_, 3, 2, _lasx);
- FN_ASSIGN(put_, 3, 3, _lasx);
- FN_ASSIGN_V(put_, 1, _lasx);
- FN_ASSIGN_V(put_, 2, _lasx);
- FN_ASSIGN_V(put_, 3, _lasx);
- FN_ASSIGN_H(put_, 1, _lasx);
- FN_ASSIGN_H(put_, 2, _lasx);
- FN_ASSIGN_H(put_, 3, _lasx);
- dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = ff_put_no_rnd_vc1_chroma_mc8_lasx;
- }
-}
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Bingo Journey Free Ticket and Power-ups in a Classic Bingo Game.md b/spaces/congsaPfin/Manga-OCR/logs/Bingo Journey Free Ticket and Power-ups in a Classic Bingo Game.md
deleted file mode 100644
index 58cd5c28840137b33c58a49e19645f006d454cc0..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Bingo Journey Free Ticket and Power-ups in a Classic Bingo Game.md
+++ /dev/null
@@ -1,111 +0,0 @@
-
-Bingo Journey APK: A Free Classic Bingo Game with Lots of Fun and Rewards
-If you are looking for a fun and relaxing way to spend your free time, you might want to try Bingo Journey APK, a free classic bingo game with lots of free tickets and power-ups to claim every day. In this article, we will tell you what Bingo Journey APK is, why you should play it, and how to play it better. Let's get started!
-bingo journey apk Download ··· https://urlca.com/2uO8q7
- What is Bingo Journey APK?
-A brief introduction to the game and its features
-Bingo Journey APK is a free classic bingo game developed by QY Games. It is available for Android devices and can be downloaded from Google Play Store or APKCombo. The game allows you to take a tour in Bingo Journey with your friends and family to join a free bingo world and enjoy an odyssey to places of interest around the world. You can play against thousands of real players in real time, join a club, complete quests, send gifts, and win rewards. The game also features various bingo rooms and themes, such as Halloween, Christmas, Egypt, Greece, etc. The game is updated frequently and adds seasonal and featured bingo games.
-How to download and install the game on your Android device
-To download and install Bingo Journey APK on your Android device, you need to follow these steps:
-
-Go to Google Play Store or APKCombo and search for Bingo Journey APK.
-Tap on the install button and wait for the download to finish.
-Open the game and grant the necessary permissions.
-Enjoy playing Bingo Journey APK!
-
- Why Play Bingo Journey APK?
-The benefits of playing bingo journey apk
-There are many reasons why you should play bingo journey apk. Here are some of them:
-Free tickets and power-ups every day
-You will get 500 tickets & 50 power-ups to start the bingo journey. You can also claim more free tickets and power-ups every day by logging in, completing tasks, opening treasure boxes, etc. You can use these tickets and power-ups to play more bingo games and win more rewards.
-bingo journey apk free download
-bingo journey apk mod
-bingo journey apk latest version
-bingo journey apk for pc
-bingo journey apk hack
-bingo journey apk old version
-bingo journey apk unlimited tickets
-bingo journey apk offline
-bingo journey apk update
-bingo journey apk 2.2.33
-bingo journey lucky casino apk
-bingo journey free classic game apk
-bingo journey super casino board games apk
-bingo journey free ticket and power-ups apk
-bingo journey free bingo world apk
-bingo journey qy games apk
-bingo journey android game apk
-bingo journey app download apk
-bingo journey online game apk
-bingo journey play store apk
-bingo journey modded apk
-bingo journey hacked apk
-bingo journey premium apk
-bingo journey pro apk
-bingo journey full version apk
-bingo journey cracked apk
-bingo journey unlocked apk
-bingo journey cheat apk
-bingo journey unlimited money apk
-bingo journey no ads apk
-download bingo journey - lucky casino 2.2.33 apk xapk
-download bingo journey - lucky casino 2.2.29 apk xapk
-download bingo journey - lucky casino 2.2.26 apk xapk
-download bingo journey - lucky casino 2.2.17 apk xapk
-download bingo journey - lucky casino 2.2.15 apk xapk
-download bingo journey - lucky casino 2.2.13 apk xapk
-download bingo journey - lucky casino 2.2.11 apk xapk
-download bingo journey - lucky casino 2.2.9 apk xapk
-download bingo journey - lucky casino 2.2.7 apk xapk
-download bingo journey - lucky casino 2.2.5 apk xapk
-how to install bingo journey - lucky casino xapk file on android device
-how to play bingo journey - lucky casino on pc with emulator
-how to update bingo journey - lucky casino to the latest version
-how to join a club in bingo journey - lucky casino
-how to get free ticket and power-ups in bingo journey - lucky casino
-how to complete quests in groups in bingo journey - lucky casino
-how to boost your cards for better rewards in bingo journey - lucky casino
-how to use power-ups to help you win more in bingo journey - lucky casino
-how to claim daily bonus and treasure boxes in bingo journey - lucky casino
-how to contact the developer of bingo journey - lucky casino for support or feedback
-Daily bonus and quests to complete
-You can also collect daily bonus of 180+ tickets every day. The higher level you reach; the more daily bonus you can get. You can also complete quests in groups or individually to earn more tickets, power-ups, coins, gems, etc. The quests are varied and challenging, such as daubing numbers, winning bingos, playing in different rooms, etc.
-Various bingo rooms and themes to explore
-Bingo Journey APK offers you a variety of bingo rooms and themes to choose from. You can play in different countries, such as USA, UK, France, Italy, etc. You can also play in different seasons, such as spring, summer, autumn, winter, etc. You can also play in different festivals, such as Halloween, Christmas, Valentine's Day, etc. Each room has its own unique design, music, sound effects, and special features. You can enjoy the scenery and culture of different places while playing bingo games.
-Social features and club activities
-Bingo Journey APK is not only a game, but also a social platform. You can chat with other players in real time, send and receive gifts, make friends, and join clubs. You can also participate in club activities, such as club bingo, club collection, club tournament, etc. You can cooperate with your club members to win more rewards and rank higher in the leaderboard. You can also compete with other clubs and show your bingo skills.
-Net energy gain and fusion experiments
-Bingo Journey APK is also a game that supports the development of nuclear fusion technology. The game has partnered with the Korea Superconducting Tokamak Advanced Research (KSTAR) facility, which is one of the leading fusion research centers in the world. The game allows you to donate your tickets and power-ups to support the KSTAR's net energy gain and fusion experiments. You can also learn more about the fusion science and technology through the game's information center. By playing Bingo Journey APK, you are not only having fun, but also contributing to the future of clean and sustainable energy.
-The drawbacks of playing bingo journey apk
-Of course, no game is perfect, and bingo journey apk has some drawbacks as well. Here are some of them:
-The game is not for real money gambling
-Bingo Journey APK is a free classic bingo game for entertainment purposes only. It does not offer real money gambling or an opportunity to win real money or prizes. The game is intended for adult audiences only and does not imply future success at real money gambling. If you are looking for a real money bingo game, you might want to look elsewhere.
-The game may require internet connection and storage space
-Bingo Journey APK is an online game that requires internet connection to play. If you have a poor or unstable network connection, you may experience lagging or crashing issues. The game also requires storage space on your device to download and update. If you have a low-end device or limited storage space, you may encounter performance or compatibility problems. You can try to clear cache or uninstall unnecessary apps to free up some space.
- Tips and Tricks for Playing Bingo Journey APK
-How to boost your cards and get more rewards
-One of the ways to improve your chances of winning bingo games is to boost your cards. You can boost your cards by using gems or watching ads. Boosting your cards will increase the number of bingos you can get on each card, as well as the rewards you can earn from each bingo. For example, if you boost your card by 100%, you can get 2 bingos on each card instead of 1, and double the rewards from each bingo. You can boost your cards up to 500%, depending on the room you are playing in.
-How to use power-ups and treasure boxes wisely
-Another way to enhance your bingo experience is to use power-ups and treasure boxes. Power-ups are special items that can help you daub numbers faster, get more bingos, or win more rewards. You can get power-ups by buying them with coins or gems, or by opening treasure boxes. Treasure boxes are randomly dropped on your cards during the game, and they contain various prizes, such as tickets, power-ups, coins, gems, etc. You can open treasure boxes by daubing them or by using keys.
-However, you should use power-ups and treasure boxes wisely, as they are limited and valuable resources. You should not waste them on low-level rooms or easy games, but save them for high-level rooms or challenging games. You should also use them strategically, depending on the situation and the goal of the game. For example, if you want to get more bingos, you can use power-ups like double daub or instant bingo. If you want to get more rewards, you can use power-ups like double reward or lucky reward.
-How to join a club and cooperate with other players
-The last tip we want to share with you is how to join a club and cooperate with other players. Joining a club is one of the best ways to enjoy Bingo Journey APK more fully. You can join a club by applying for one or creating your own. Once you join a club, you can chat with other club members, send and receive gifts, participate in club activities, and win more rewards.
-To cooperate with other players, you can use the chat feature to communicate with them during the game. You can also use the gift feature to send them tickets or power-ups to help them out. You can also join forces with them in club activities, such as club bingo or club tournament. In club bingo, you can play with your club members in a special room and share the rewards. In club tournament, you can compete with other clubs and win more prizes. You can also earn club points by playing bingo games and contribute to your club's ranking and reputation.
- Conclusion
-Bingo Journey APK is a free classic bingo game that offers you a lot of fun and rewards. You can play in various bingo rooms and themes, collect free tickets and power-ups, complete daily bonus and quests, join a club and cooperate with other players, and support the fusion research. The game is easy to play and suitable for all ages. However, the game is not for real money gambling and may require internet connection and storage space. If you are interested in playing Bingo Journey APK, you can download it from Google Play Store or APKCombo and start your bingo journey today!
- FAQs
-Here are some frequently asked questions about Bingo Journey APK:
-
-Q: How can I get more gems in Bingo Journey APK?
-A: You can get more gems by completing quests, opening treasure boxes, watching ads, or buying them with real money.
-Q: How can I change my avatar or name in Bingo Journey APK?
-A: You can change your avatar or name by tapping on the profile icon on the top left corner of the screen and then choosing the edit option.
-Q: How can I contact the customer service or report a problem in Bingo Journey APK?
-A: You can contact the customer service or report a problem by tapping on the settings icon on the top right corner of the screen and then choosing the feedback option.
-Q: How can I update Bingo Journey APK to the latest version?
-A: You can update Bingo Journey APK to the latest version by going to Google Play Store or APKCombo and checking for updates.
-Q: How can I uninstall Bingo Journey APK from my device?
-A: You can uninstall Bingo Journey APK from your device by going to your device's settings and then choosing the apps option. Then, find Bingo Journey APK and tap on the uninstall option.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Cara Mudah Download Video TikTok Tanpa Watermark Menggunakan TikTok Lite Mod APK.md b/spaces/congsaPfin/Manga-OCR/logs/Cara Mudah Download Video TikTok Tanpa Watermark Menggunakan TikTok Lite Mod APK.md
deleted file mode 100644
index 8d8544ebf517644d595397e7172cba13a628a602..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Cara Mudah Download Video TikTok Tanpa Watermark Menggunakan TikTok Lite Mod APK.md
+++ /dev/null
@@ -1,98 +0,0 @@
-
-How to Download APK TikTok Lite Mod Tanpa Watermark
-TikTok is one of the most popular social media platforms that allows you to create and share short videos with millions of users. However, if you want to enjoy TikTok without any ads, watermarks, or limitations, you might want to try TikTok Lite Mod APK. In this article, we will show you what TikTok Lite Mod APK is, how to remove watermark from TikTok videos, and how to download apk tiktok lite mod tanpa watermark.
- What is TikTok Lite Mod APK?
-TikTok Lite Mod APK is a modified version of the official TikTok app that offers some extra features and benefits. It is faster and smaller than the original app, which means it can run smoothly on low-end devices and save data usage. It also removes all the annoying ads and watermarks that appear on the videos, giving you a better viewing experience. Moreover, it allows you to access all the premium features of TikTok for free, such as filters, stickers, effects, music, and more.
-download apk tiktok lite mod tanpa watermark Download >>> https://urlca.com/2uObQ3
- Features of TikTok Lite Mod APK
-
-No ads or watermarks on videos
-All premium features unlocked
-Unlimited likes, followers, and comments
-High-quality video resolution
-Customizable interface and settings
-Supports multiple languages
-
- Benefits of TikTok Lite Mod APK
-
-It saves your device storage and data consumption
-It enhances your video editing and sharing skills
-It boosts your social media presence and popularity
-It entertains you with endless content and fun
-It respects your privacy and security
-
- How to Remove Watermark from TikTok Videos
-If you want to download or share TikTok videos without the watermark, you have several options. Here are some methods that you can try:
- Method 1: Crop it out of the video
-This is the simplest approach, but it will change the video's aspect ratio and may cut off some important parts of the video. You can use any video editing app that has a cropping feature, such as InShot or VivaVideo. Just import the video to the app, select the cropping tool, and adjust the frame to exclude the watermark. Then save and export the video.
- Method 2: Use an app to remove the watermark
-There are some apps that are designed to remove watermarks from videos automatically, such as Video Eraser or Remove & Add Watermark. You just need to download the app from Google Play or App Store, open it, and select the video that you want to edit. Then choose the option to remove watermark or erase logo, and highlight the area with the watermark. The app will process the video and remove the watermark for you.
- Method 3: Use a video editing tool to remove it
-If you want more control over the editing process, you can use a more advanced video editing tool, such as Kinemaster or FilmoraGo. These tools have more features and options that can help you remove watermarks from videos effectively. You can use tools like clone stamp to copy and paste the pixels from the surrounding area to cover the watermark, or tools like blur or mosaic to make the watermark less visible. You can also add some text or stickers to hide the watermark. After editing, save and export the video.
- Method 4: Save your video without a watermark in the first place
-The best way to avoid watermarks on your TikTok videos is to save them without a watermark in the first place. This can be done by using TikTok Lite Mod APK, which allows you to download and share videos without any watermarks. You can also use some third-party websites or apps that can download TikTok videos without watermarks, such as MusicallyDown or SnapTik. You just need to copy and paste the video link to the website or app, and download the video without watermark.
- How to Download APK TikTok Lite Mod Tanpa Watermark
-Now that you know how to remove watermarks from TikTok videos, you might want to download apk tiktok lite mod tanpa watermark. This will give you access to all the features and benefits of TikTok Lite Mod APK, as well as allow you to save and share videos without watermarks. Here are the steps to download apk tiktok lite mod tanpa watermark:
- Step 1: Go to a trusted website that offers the mod apk
-The first step is to find a reliable and safe website that provides the mod apk file for TikTok Lite Mod APK. You can search for it on Google or use some of the links below:
-Download video tiktok tanpa watermark dengan snaptik.app
-Tiktok lite mod apk no watermark terbaru 2023
-Cara download tiktok tanpa watermark di pc
-Tiktok lite mod apk ad-free dan no watermark
-Download video tiktok tanpa watermark online gratis
-Tiktok lite mod apk 30.0.2 no watermark dan ad-free
-Cara mudah download video tiktok tanpa watermark di android
-Tiktok lite mod apk unlimited followers dan no watermark
-Download video tiktok tanpa watermark dengan aplikasi
-Tiktok lite mod apk versi terbaru no watermark 2023
-Cara download video tiktok tanpa watermark di iphone
-Tiktok lite mod apk premium features unlocked dan no watermark
-Download video tiktok tanpa watermark dengan website
-Tiktok lite mod apk download for android no watermark
-Cara download video tiktok tanpa watermark dengan cepat
-Tiktok lite mod apk free download no watermark 2023
-Download video tiktok tanpa watermark tanpa aplikasi
-Tiktok lite mod apk latest version no watermark and ad-free
-Cara download video tiktok tanpa watermark di laptop
-Tiktok lite mod apk pro unlocked and no watermark
-Download video tiktok tanpa watermark dengan mudah
-Tiktok lite mod apk full version no watermark 2023
-Cara download video tiktok tanpa watermark di browser
-Tiktok lite mod apk no ads and no watermark
-Download video tiktok tanpa watermark dengan kualitas tinggi
-Tiktok lite mod apk 4k resolution and no watermark
-Cara download video tiktok tanpa watermark di instagram
-Tiktok lite mod apk unlimited likes and no watermark
-Download video tiktok tanpa watermark dengan cara simpel
-Tiktok lite mod apk hack version no watermark 2023
-
-Make sure that the website is secure and has positive reviews from other users. Avoid any websites that ask for your personal information or require you to complete surveys or offers.
- Step 2: Download the mod apk file to your device
-Once you have found a suitable website, click on the download button and wait for the mod apk file to be downloaded to your device. The file size may vary depending on the website, but it should be around 30 MB. You can check the progress of the download in your notification bar or in your file manager.
- Step 3: Enable unknown sources in your settings
-Before you can install the mod apk file, you need to enable unknown sources in your settings. This will allow you to install apps from sources other than Google Play Store. To do this, go to your settings, then security, then unknown sources, and toggle it on. You may see a warning message, but just ignore it and confirm your choice.
- Step 4: Install the mod apk file and launch the app
-The final step is to install the mod apk file and launch the app. To do this, go to your file manager and locate the mod apk file that you downloaded. Tap on it and follow the instructions on the screen to install it. Once it is installed, you can open it and enjoy TikTok Lite Mod APK without any watermarks.
- Conclusion
-TikTok Lite Mod APK is a great way to enjoy TikTok without any ads, watermarks, or limitations. It offers many features and benefits that can enhance your video creation and sharing experience. It also allows you to remove watermarks from TikTok videos easily and effectively. To download apk tiktok lite mod tanpa watermark, you just need to follow the steps above and install it on your device. We hope this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below.
- FAQs
-
-Is TikTok Lite Mod APK safe?
-TikTok Lite Mod APK is safe as long as you download it from a trusted website that does not contain any viruses or malware. However, you should always be careful when installing apps from unknown sources and scan them with an antivirus app before opening them.
-Is TikTok Lite Mod APK legal?
-TikTok Lite Mod APK is not legal as it violates the terms and conditions of TikTok. It also infringes on the intellectual property rights of TikTok and its content creators. Therefore, we do not recommend using it for any illegal or unethical purposes.
-Can I use TikTok Lite Mod APK with my existing TikTok account?
-Yes, you can use TikTok Lite Mod APK with your existing TikTok account. You just need to log in with your username and password as usual. However, you should be aware that using TikTok Lite Mod APK may result in your account being banned or suspended by TikTok if they detect any suspicious activity.
-What are the differences between TikTok Lite and TikTok Lite Mod APK?
-TikTok Lite is the official version of TikTok that is designed for low-end devices and low data consumption. It has fewer features and options than the original TikTok app, but it still allows you to create and watch videos. TikTok Lite Mod APK is a modified version of TikTok Lite that adds more features and benefits, such as no ads, no watermarks, all premium features unlocked, unlimited likes, followers, and comments, and more.
-How can I update TikTok Lite Mod APK?
-TikTok Lite Mod APK does not have an automatic update feature, so you need to manually update it whenever there is a new version available. To do this, you need to uninstall the old version of the mod apk file and download and install the new version from the same website that you used before. Alternatively, you can check for updates from within the app and follow the instructions on the screen.
-How can I contact the developer of TikTok Lite Mod APK?
-TikTok Lite Mod APK is not developed by TikTok or any official entity, but by independent developers who modify the original app for their own purposes. Therefore, there is no official way to contact the developer of TikTok Lite Mod APK. However, you may find some contact information or feedback forms on the website that you downloaded the mod apk file from.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Fly GPS for Pokemon GO and Enjoy the Game Like Never Before.md b/spaces/congsaPfin/Manga-OCR/logs/Download Fly GPS for Pokemon GO and Enjoy the Game Like Never Before.md
deleted file mode 100644
index 3aeb25e1176f3558d82839e68611eeb4713fa652..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Download Fly GPS for Pokemon GO and Enjoy the Game Like Never Before.md
+++ /dev/null
@@ -1,115 +0,0 @@
-
-Download Fly GPS Pokemon Go: A Complete Guide
-Pokemon Go is a popular mobile game that lets you catch, battle, and trade virtual creatures in the real world. However, not everyone has the time or opportunity to explore different locations and find rare Pokemon. That's why some players use a tool called fly gps pokemon go, which allows them to spoof their location and access any area in the game.
-But what is fly gps pokemon go exactly, and how can you use it? In this article, we will explain everything you need to know about this app, including how to download it, how to use it on Android and iOS devices, and what are the benefits and risks of using it. Let's get started!
-download fly gps pokemon go Download File >> https://urlca.com/2uOb7l
- How to Use Fly GPS Pokemon Go on Android Devices
-If you have an Android device, you can use fly gps pokemon go to change your location in Pokemon Go with a few simple steps. Here's how:
-
-Delete the official Pokemon Go app on your Android device.
-Download the PGSharp app from its official website or from an apk file online. This is a modified version of Pokemon Go that has a built-in spoofing feature.
-Log in to your account on the PGSharp app.
-Use the teleport feature to choose any location you want to spoof. You can also use the joystick to move around.
-Launch Pokemon Go from the PGSharp app and enjoy catching Pokemon from anywhere in the world.
-
- How to Use Fly GPS Pokemon Go on iOS Devices
-If you have an iOS device, you can also use fly gps pokemon go to spoof your location in Pokemon Go, but you will need a different method. Here's how:
-
-Download and install iMoveGo on your computer. This is a software that can change your iPhone's GPS location with one click.
-Connect your iPhone to your computer with a USB cable and launch iMoveGo.
-Select the Teleport Mode on iMoveGo and enter any location you want to spoof.
-Click on Move and confirm the change on your iPhone.
-Launch Pokemon Go on your iPhone and enjoy catching Pokemon from anywhere in the world.
-
- The Benefits and Risks of Using Fly GPS Pokemon Go
-Using fly gps pokemon go can have some benefits and risks for your gaming experience. Here are some of them:
- The Benefits
-
-You can catch more Pokemon, especially rare ones that are not available in your region.
-You can participate in more events, raids, and battles that are happening in other locations.
-You can save time and money by not having to travel to different places.
-You can have more fun and challenge by exploring new areas and discovering new Pokemon.
-
- The Risks
-
-You may violate the game's terms of service and get banned or suspended by Niantic, the game's developer.
-You may encounter bugs, glitches, or errors that may affect your gameplay or damage your device.
-You may lose the thrill and excitement of playing Pokemon Go as intended, which is to explore the real world and interact with other players.
-You may face ethical or legal issues if you spoof your location in restricted or sensitive areas.
-
- Conclusion
-Fly gps pokemon go is a tool that can help you spoof your location in Pokemon Go and access any area in the game. However, it also comes with some drawbacks and dangers that you should be aware of before using it. Ultimately, it is up to you to decide whether you want to use it or not, but we recommend that you play Pokemon Go responsibly and respect the game's rules and spirit.
- If you want to download fly gps pokemon go, you can follow the steps we provided above for Android or iOS devices. Alternatively, you can also use other apps or methods that can spoof your location for Pokemon Go, such as VPNs or mock location apps. However, make sure that you use them safely and wisely, and that you do not abuse them or harm others.
FAQs
-Here are some frequently asked questions and answers about fly gps pokemon go:
- Q: Is fly gps pokemon go legal?
-A: Fly gps pokemon go is not illegal, but it is against the game's terms of service and may result in a ban or suspension from Niantic. Therefore, we do not recommend using it or any other spoofing tool for Pokemon Go.
-How to download fly gps for pokemon go on android
-Fly gps pokemon go apk download latest version
-Download fly gps ios for pokemon go spoofing
-Fly gps pokemon go hack download free
-Best fly gps app for pokemon go download
-Download fly gps joystick for pokemon go
-Fly gps pokemon go not working after update
-How to fix fly gps pokemon go error 12
-Fly gps pokemon go settings and configuration guide
-Fly gps pokemon go ban risk and how to avoid it
-Download fly gps pro for pokemon go with no ads
-Fly gps pokemon go alternative apps for android and ios
-How to use fly gps with pokemon go plus
-Fly gps pokemon go reddit reviews and tips
-Fly gps pokemon go 2023 download link
-Download fly gps old version for pokemon go
-Fly gps pokemon go failed to detect location solution
-How to uninstall fly gps pokemon go from your device
-Fly gps pokemon go compatibility and requirements
-Fly gps pokemon go tutorial and video guide
-Download fly gps mod apk for pokemon go unlimited coins
-Fly gps pokemon go without root or jailbreak
-How to download fly gps for pc and play pokemon go on windows or mac
-Fly gps pokemon go update and new features
-Download fly gps premium for pokemon go with more options
-Fly gps pokemon go support and customer service
-How to download fly gps from google play store or app store
-Fly gps pokemon go advantages and disadvantages
-Download fly gps cracked version for pokemon go full access
-Fly gps pokemon go online and offline mode
-How to download fly gps safely and securely for pokemon go
-Fly gps pokemon go troubleshooting and common issues
-Download fly gps beta for pokemon go test new functions
-Fly gps pokemon go feedback and suggestions
-How to download fly gps from official website for pokemon go
-Fly gps pokemon go license key and activation code
-Download fly gps patch for pokemon go fix bugs and errors
-Fly gps pokemon go community and forum
-How to download fly gps with vpn for pokemon go bypass restrictions
-Fly gps pokemon go comparison and ratings with other apps
-Download fly gps emulator for pokemon go simulate different devices
-Fly gps pokemon go tips and tricks to catch more pokemons
-How to download fly gps from third-party sources for pokemon go
-Fly gps pokemon go warranty and refund policy
-Download fly gps backup for pokemon go restore your data
-Fly gps pokemon go donation and premium membership
-How to download fly gps with qr code for pokemon go
-Fly gps pokemon go news and updates
-Download fly gps widget for pokemon go access quickly
- Q: How can I avoid getting banned by using fly gps pokemon go?
-A: There is no guarantee that you will not get banned by using fly gps pokemon go, but you can reduce the risk by following some tips, such as:
-
-Do not spoof your location too far or too frequently.
-Do not use multiple accounts or devices for spoofing.
-Do not participate in activities that require verification, such as gym battles or trading.
-Do not brag or share your spoofing activities with others.
-
- Q: What are some alternatives to fly gps pokemon go?
-A: Some alternatives to fly gps pokemon go are:
-
-VPNs: These are apps that can change your IP address and make it appear that you are in a different location. However, they may not work well with Pokemon Go and may slow down your internet connection.
-Mock location apps: These are apps that can fake your GPS location and make it appear that you are in a different location. However, they may require root access or developer options on your device and may be detected by Niantic.
-Hardware devices: These are devices that can spoof your GPS signal and make it appear that you are in a different location. However, they may be expensive, complicated, or risky to use and may damage your device.
-
- Q: Can I use fly gps pokemon go with other apps or games?
-A: Yes, you can use fly gps pokemon go with other apps or games that use your location data, such as Google Maps, Snapchat, or Ingress. However, you should be careful not to violate their terms of service or cause any trouble for yourself or others.
- Q: Can I use fly gps pokemon go without internet connection?
-A: No, you cannot use fly gps pokemon go without internet connection. You need to have a stable and reliable internet connection to use this app and to play Pokemon Go. Otherwise, you may experience errors or failures in spoofing your location or accessing the game.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Free Fire Hack Zip File Download - Unlimited Diamonds and Coins for Free.md b/spaces/congsaPfin/Manga-OCR/logs/Free Fire Hack Zip File Download - Unlimited Diamonds and Coins for Free.md
deleted file mode 100644
index a27bde04d02005820620ed7e3aa4c436937ebc3c..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Free Fire Hack Zip File Download - Unlimited Diamonds and Coins for Free.md
+++ /dev/null
@@ -1,92 +0,0 @@
-
- - Benefits of having unlimited diamonds in Free Fire - Risks and drawbacks of using zip file download method | | H2: What is a Zip File Download and How Does It Work? | - Explanation of what a zip file is and how it can be used to modify game data - Steps to download and install a zip file for Free Fire - Examples of zip files that claim to offer unlimited diamonds in Free Fire | | H2: How to Verify the Legitimacy and Safety of a Zip File Download | - Tips to check the source, reviews, and ratings of a zip file before downloading it - How to scan a zip file for viruses and malware - How to backup your game data and account before using a zip file | | H2: Alternatives to Zip File Download for Getting Unlimited Diamonds in Free Fire | - Legal ways to earn diamonds in Free Fire, such as completing tasks, watching ads, and participating in events - Third-party apps and websites that offer free diamonds in exchange for surveys, referrals, or other activities - How to use promo codes, gift cards, and vouchers to get free diamonds in Free Fire | | H2: Conclusion and FAQs | - Summary of the main points and recommendations of the article - FAQs: Five common questions and answers about unlimited diamonds free fire zip file download | Table 2: Article with HTML formatting How to Get Unlimited Diamonds in Free Fire with Zip File Download
-If you are a fan of Free Fire, the popular battle royale game, you probably know how important diamonds are. Diamonds are the premium currency in Free Fire, which you can use to buy various items, such as skins, characters, pets, weapons, emotes, and more. Diamonds can also help you unlock elite passes, which give you access to exclusive rewards and challenges.
-However, diamonds are not easy to come by. You have to spend real money to buy them from the in-game store or other platforms. This can be quite expensive and not everyone can afford it. That's why many players look for ways to get unlimited diamonds in Free Fire without spending a dime.
-unlimited diamonds free fire zip file download Download –––––>>> https://urlca.com/2uOcyL
-One of the methods that some players use is zip file download. This is a technique that involves downloading and installing a zip file that contains modified game data that can give you unlimited diamonds in Free Fire. Sounds too good to be true, right? Well, there are some benefits and risks of using this method, which we will discuss in this article.
- What is a Zip File Download and How Does It Work?
-A zip file is a compressed file that contains one or more files or folders. Zip files are commonly used to reduce the size of files and make them easier to share or store. Zip files can also be encrypted or password-protected for security reasons.
-free fire max unlimited diamond config zip file
-free fire unlimited diamond generator apk download
-free fire unlimited diamond hack zip file 2023
-free fire unlimited diamond script zip file download
-free fire unlimited diamond mod apk zip file
-free fire unlimited diamond glitch config zip file
-free fire unlimited diamond access hack config zip file
-free fire unlimited diamond vip glitch pack zip file
-free fire unlimited diamond config file download 2023
-free fire unlimited diamond apk or app zip file
-free fire unlimited diamond config glitch file download
-free fire unlimited diamond mod menu zip file download
-free fire unlimited diamond no ban zip file 2023
-free fire unlimited diamond trick zip file download
-free fire unlimited diamond obb zip file download
-free fire unlimited diamond injector zip file 2023
-free fire unlimited diamond redeem code zip file download
-free fire unlimited diamond online zip file 2023
-free fire unlimited diamond tool zip file download
-free fire unlimited diamond website zip file 2023
-free fire unlimited diamond without human verification zip file
-free fire unlimited diamond and coins mod apk zip file download
-free fire unlimited diamond and gold config zip file 2023
-free fire unlimited diamond and money zip file download
-free fire unlimited diamond and skins zip file 2023
-free fire unlimited diamond and emotes zip file download
-free fire unlimited diamond and characters zip file 2023
-free fire unlimited diamond and bundles zip file download
-free fire unlimited diamond and pets zip file 2023
-free fire unlimited diamond and elite pass zip file download
-free fire unlimited diamond and dj alok zip file 2023
-free fire unlimited diamond and magic cube zip file download
-free fire unlimited diamond and rank up zip file 2023
-free fire unlimited diamond and headshot zip file download
-free fire unlimited diamond and auto aim zip file 2023
-free fire unlimited diamond and wall hack zip file download
-free fire unlimited diamond and speed hack zip file 2023
-free fire unlimited diamond and antena hack zip file download
-free fire unlimited diamond and ghost hack zip file 2023
-free fire unlimited diamond and fly hack zip file download
-A zip file download for Free Fire is a zip file that contains modified game data that can alter the game's features or functions. For example, some zip files claim to give you unlimited diamonds in Free Fire by changing the value of your diamond balance or bypassing the verification process. Some zip files may also offer other hacks or cheats, such as aimbot, wallhack, auto headshot, etc.
-To use a zip file download for Free Fire, you need to follow these steps:
-
-Find a reliable source that offers a zip file download for Free Fire. You can search online or ask other players for recommendations.
-Download the zip file to your device. Make sure you have enough storage space and a stable internet connection.
-Extract the zip file using a file manager app or a zip extractor app. You may need to enter a password if the zip file is encrypted.
-Copy or move the extracted files or folders to the Free Fire game folder. You may need to overwrite or delete some existing files or folders.
-Launch the game and enjoy your unlimited diamonds in Free Fire.
-
-Here are some examples of zip files that claim to offer unlimited diamonds in Free Fire:
-
- How to Verify the Legitimacy and Safety of a Zip File Download
-While zip file download may sound tempting, it is not without risks and drawbacks. There are many fake, malicious, or outdated zip files that can harm your device, your game, or your account. Here are some tips to verify the legitimacy and safety of a zip file download before using it:
-
-Check the source of the zip file. Is it from a reputable website, a trusted developer, or a verified user? Avoid downloading zip files from unknown, suspicious, or shady sources.
-Check the reviews and ratings of the zip file. What do other users say about it? Are they positive, negative, or mixed? Look for honest feedback and testimonials from real users who have tried the zip file.
-Check the date and version of the zip file. Is it compatible with the latest version of Free Fire? Does it have any bugs or glitches? Avoid using outdated or incompatible zip files that can cause errors or crashes.
-Scan the zip file for viruses and malware. Use a reliable antivirus or anti-malware app to scan the zip file before opening or extracting it. Delete any zip files that contain harmful or unwanted programs.
-Backup your game data and account before using the zip file. Make sure you have a copy of your game data and account information in case something goes wrong. You can use a cloud service or an external storage device to backup your data.
-
- Alternatives to Zip File Download for Getting Unlimited Diamonds in Free Fire
-If you are not comfortable with using zip file download for getting unlimited diamonds in Free Fire, there are other alternatives that you can try. These are legal and safe ways to earn diamonds in Free Fire without risking your device, your game, or your account. Here are some of them:
-
-Earn diamonds in Free Fire by completing tasks, watching ads, and participating in events. Free Fire offers various opportunities for players to earn free diamonds by playing the game. You can complete daily or weekly tasks, watch video ads, or join special events to get diamonds as rewards.
-Earn diamonds in Free Fire by using third-party apps and websites. There are some apps and websites that offer free diamonds in exchange for completing surveys, referrals, or other activities. For example, you can use Booyah! , Poll Pay , or Google Opinion Rewards to earn free diamonds in Free Fire. However, be careful when using these apps and websites and make sure they are legitimate and secure.
-Earn diamonds in Free Fire by using promo codes, gift cards, and vouchers. Free Fire occasionally releases promo codes that can be redeemed for free diamonds or other items. You can find these promo codes on the official social media pages or websites of Free Fire. You can also use gift cards or vouchers from various platforms, such as Google Play Store, Amazon, or Paytm, to buy diamonds in Free Fire at discounted prices.
-
- Conclusion and FAQs
-In conclusion, zip file download is one of the methods that some players use to get unlimited diamonds in Free Fire. However, this method is not recommended as it can be risky and illegal. There are many fake, malicious, or outdated zip files that can harm your device, your game, or your account. Therefore, it is better to use legal and safe alternatives to earn diamonds in Free Fire, such as completing tasks, watching ads, participating in events, using third-party apps and websites, or using promo codes, gift cards, and vouchers.
-If you have any questions about unlimited diamonds free fire zip file download, here are some FAQs that may help you:
-
-Q: Is zip file download for Free Fire legal? A: No, zip file download for Free Fire is not legal. It is considered as cheating or hacking, which violates the terms of service and policies of Free Fire. If you use zip file download for Free Fire, you may face consequences such as account suspension or ban.
-Q: Is zip file download for Free Fire safe? A: No, zip file download for Free Fire is not safe. There are many fake, malicious, or outdated zip files that can harm your device, your game, or your account. You may also expose your personal or financial information to hackers or scammers. Therefore, it is advisable to scan the zip file for viruses and malware, and backup your game data and account before using it.
-Q: How can I get unlimited diamonds in Free Fire legally and safely? A: There are several legal and safe ways to get unlimited diamonds in Free Fire, such as completing tasks, watching ads, participating in events, using third-party apps and websites, or using promo codes, gift cards, and vouchers. These methods may take some time and effort, but they are more reliable and rewarding than zip file download.
-Q: Where can I find the best zip file download for Free Fire? A: There is no definitive answer to this question, as different zip files may have different features, functions, and quality. However, some general tips to find the best zip file download for Free Fire are to check the source, reviews, ratings, date, and version of the zip file before downloading it. You can also ask other players for recommendations or suggestions.
-Q: What are the benefits of having unlimited diamonds in Free Fire? A: Having unlimited diamonds in Free Fire can give you many benefits, such as buying various items, unlocking elite passes, customizing your character, enhancing your gameplay, and more. However, these benefits may come at a cost if you use zip file download for Free Fire. You may lose your device, your game, or your account if you use a fake, malicious, or outdated zip file.
-
- I hope you enjoyed reading this article and learned something new about unlimited diamonds free fire zip file download. If you have any comments or feedback, please feel free to share them with me. Thank you for your time and attention.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/GB WhatsApp APK Download The Ultimate Guide to the Green Colour Theme.md b/spaces/congsaPfin/Manga-OCR/logs/GB WhatsApp APK Download The Ultimate Guide to the Green Colour Theme.md
deleted file mode 100644
index 665b2f158ee20879cced8c0ca0bb0ef7f123955a..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/GB WhatsApp APK Download The Ultimate Guide to the Green Colour Theme.md
+++ /dev/null
@@ -1,123 +0,0 @@
-
-GB WhatsApp APK Download Green Colour: What You Need to Know
-WhatsApp is one of the most popular and widely used messaging apps in the world. It allows you to send text messages, voice messages, photos, videos, documents, and more to your contacts for free. However, some users may want more features and customization options than the official WhatsApp app offers. That's where GB WhatsApp comes in.
-gb whatsapp apk download green colour Download File ->->->-> https://urlca.com/2uO8Q1
-GB WhatsApp is a modified version of WhatsApp that has many additional features and functions that are not available in the original app. For example, you can use two WhatsApp accounts on the same device, hide your online status, send larger files, schedule messages, and more. One of the most attractive features of GB WhatsApp is that you can change the theme and colour of the app according to your preference. In this article, we will show you how to download and install GB WhatsApp APK green colour, which is one of the most popular colours among users. We will also compare GB WhatsApp and official WhatsApp to help you decide which one is better for you.
- What is GB WhatsApp?
-GB WhatsApp is a modified version of WhatsApp that has been created by third-party developers who wanted to add more features and customization options to the original app. GB WhatsApp breaks the limitations set by the official WhatsApp app and also offers improved features. The most notable feature GB WhatsApp offers is the use of two Whatsapp accounts on the same device.
-GB WhatsApp Features
-Some of the features that GB WhatsApp offers are:
-
-You can send up to 90 images at once, instead of 30 in the original app.
-You can send voice messages up to 100 MB, instead of 16 MB in the original app.
-You can send media files or videos up to 50 MB, instead of 16 MB in the original app.
-You can set a group name with up to 35 characters, instead of 25 in the original app.
-You can create a broadcast list with up to 600 contacts, instead of 250 in the original app.
-You can hide your online status, last seen, blue ticks, double ticks, typing notification, and recording notification from specific contacts or groups .
-You can set an auto-reply message for incoming messages, like in WhatsApp Business.
-You can schedule messages to be sent at a specific time or date.
-You can download other users' status and copy their status text to your clipboard .
-You can customize the theme and colour of the app according to your preference . You can also use themes created by other users for free.
-You can lock your chats with a password or fingerprint .
-You can use multiple languages, including English, Spanish, Chinese, Arabic, Hindi, and more.
-You can use more emojis and stickers than the original app .
-
- GB WhatsApp Advantages
-Some of the advantages of using GB WhatsApp are:
-
-You can enjoy more features and customization options than the original app.
-You can have more control over your privacy and security settings.
-You can use two WhatsApp accounts on the same device without any hassle.
-You can send larger files and more media than the original app.
-You You can have more fun and creativity with your chats and status.
-gb whatsapp apk download green theme
-gb whatsapp apk download green mod
-gb whatsapp apk download green version
-gb whatsapp apk download green icon
-gb whatsapp apk download green background
-gb whatsapp apk download green chat
-gb whatsapp apk download green app
-gb whatsapp apk download green latest
-gb whatsapp apk download green update
-gb whatsapp apk download green 2023
-gb whatsapp apk download green filehippo
-gb whatsapp apk download green apkpure
-gb whatsapp apk download green uptodown
-gb whatsapp apk download green softonic
-gb whatsapp apk download green malavida
-gb whatsapp apk download green official
-gb whatsapp apk download green free
-gb whatsapp apk download green pro
-gb whatsapp apk download green premium
-gb whatsapp apk download green plus
-gb whatsapp apk download green features
-gb whatsapp apk download green benefits
-gb whatsapp apk download green advantages
-gb whatsapp apk download green review
-gb whatsapp apk download green rating
-gb whatsapp apk download green comparison
-gb whatsapp apk download green alternative
-gb whatsapp apk download green clone
-gb whatsapp apk download green modded
-gb whatsapp apk download green hacked
-gb whatsapp apk download green cracked
-gb whatsapp apk download green patched
-gb whatsapp apk download green unlocked
-gb whatsapp apk download green safe
-gb whatsapp apk download green secure
-gb whatsapp apk download green privacy
-gb whatsapp apk download green customization
-gb whatsapp apk download green themes
-gb whatsapp apk download green emojis
-gb whatsapp apk download green stickers
-gb whatsapp apk download green fonts
-gb whatsapp apk download green wallpapers
-gb whatsapp apk download green status
-gb whatsapp apk download green broadcast
-gb whatsapp apk download green group chat
-gb whatsapp apk download green video call
-gb whatsapp apk download green voice note
-gb whatsapp apk download green media sharing
-GB WhatsApp Disadvantages
-Some of the disadvantages of using GB WhatsApp are:
-
-You may face some bugs and glitches in the app, as it is not an official version of WhatsApp .
-You may risk losing your data or getting banned from WhatsApp, as using a modified version of WhatsApp violates their terms of service .
-You may not get the latest updates and features from the official WhatsApp app, as GB WhatsApp may take some time to update their version .
-You may compromise your security and privacy, as GB WhatsApp is not verified by Google Play Store or any other trusted source . You may also expose your device to malware or viruses by downloading GB WhatsApp from unknown sources.
-
- How to Download and Install GB WhatsApp APK Green Colour?
-If you want to download and install GB WhatsApp APK green colour on your device, you need to follow these steps:
-Step 1: Enable Unknown Sources
-Before you can install GB WhatsApp APK green colour, you need to enable unknown sources on your device. This will allow you to install apps that are not from the Google Play Store or any other trusted source. To do this, go to your device settings and look for security or privacy options. Then, find the option that says unknown sources or allow installation from unknown sources and turn it on. You may see a warning message that says installing from unknown sources may harm your device, but you can ignore it if you trust the source of GB WhatsApp APK green colour.
-Step 2: Download GB WhatsApp APK File
-Next, you need to download the GB WhatsApp APK file from a reliable source. You can search for GB WhatsApp APK green colour on Google or any other search engine and find a link that leads you to the download page. Make sure that the link is from a reputable website and that the file size matches the expected size of GB WhatsApp APK green colour. You can also scan the file with an antivirus app before downloading it to ensure that it is safe and free from malware or viruses. Once you have downloaded the file, save it in a location that you can easily access on your device.
-Step 3: Install and Launch GB WhatsApp
-Finally, you need to install and launch GB WhatsApp on your device. To do this, go to the location where you saved the GB WhatsApp APK file and tap on it. You may see a pop-up message that asks you to confirm the installation of GB WhatsApp. Tap on install and wait for the installation process to complete. Once it is done, you will see a message that says app installed. Tap on open and launch GB WhatsApp on your device. You will see a green colour theme on the app and you can start using it as you would use the official WhatsApp app. You can also verify your phone number and restore your chat backup if you have one.
- Comparison of GB WhatsApp and Official WhatsApp
-To help you decide which one is better for you, here is a comparison of GB WhatsApp and official WhatsApp based on some key aspects:
-
-Aspect GB WhatsApp Official WhatsApp
-Messages and Media You can send more messages and media than the original app. You can also send larger files and schedule messages. You can send text messages, voice messages, photos, videos, documents, and more to your contacts for free. However, there are some limitations on the number and size of files you can send.
-Status You can download other users' status and copy their status text to your clipboard. You can also set a longer status text than the original app. You can share your status with your contacts for 24 hours. You can also view other users' status and reply to them.
-Chats and Themes You can customize the theme and colour of the app according to your preference. You can also use themes created by other users for free. You can also lock your chats with a password or fingerprint. You can choose between light and dark modes for the app. You can also change the wallpaper of your chats. However, you cannot change the theme or colour of the app.
-Privacy You can hide your online status, last seen, blue ticks, double ticks, typing notification, and recording notification from specific contacts or groups. You can also set an auto-reply message for incoming messages, like in WhatsApp Business. You can block or mute contacts or groups that you don't want to receive messages from. You can also report spam or abuse. However, you cannot hide your online status, last seen, or read receipts from specific contacts or groups.
-
- Conclusion
-GB WhatsApp is a modified version of WhatsApp that offers more features and customization options than the original app. You can use two WhatsApp accounts on the same device, change the theme and colour of the app, send larger files and more media, hide your online status and read receipts, and more. However, GB WhatsApp also has some disadvantages, such as being prone to bugs and glitches, risking your data and account security, not getting the latest updates and features from the official app, and violating the terms of service of WhatsApp. Therefore, you should weigh the pros and cons of using GB WhatsApp before downloading and installing it on your device.
- FAQs
-Here are some frequently asked questions about GB WhatsApp:
-
-Is GB WhatsApp safe to use?
-GB WhatsApp is not an official version of WhatsApp and it is not verified by Google Play Store or any other trusted source. Therefore, it may not be safe to use, as it may expose your device to malware or viruses, compromise your data or account security, or violate the terms of service of WhatsApp. You should only download GB WhatsApp from a reliable source and scan it with an antivirus app before installing it.
-Can I use GB WhatsApp and official WhatsApp on the same device?
-Yes, you can use GB WhatsApp and official WhatsApp on the same device without any problem. You just need to use different phone numbers for each app and install them in different folders on your device.
-How can I update GB WhatsApp?
-GB WhatsApp does not update automatically like the official WhatsApp app. You need to check for updates manually by visiting the website of GB WhatsApp or any other source where you downloaded it from. You can also enable the auto-update option in the settings of GB WhatsApp to get notified when a new version is available.
-How can I change the theme and colour of GB WhatsApp?
-You can change the theme and colour of GB WhatsApp by going to the settings of the app and selecting themes. You can choose from a variety of themes and colours that are available for free. You can also download themes created by other users or create your own theme using the theme maker option.
-How can I backup my chats on GB WhatsApp?
-You can backup your chats on GB WhatsApp by going to the settings of the app and selecting chats. You can choose to backup your chats to Google Drive or your device storage. You can also set a frequency for your backup, such as daily, weekly, or monthly.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Get Dragon Ball Legends MOD APK for iOS and Dominate the Game with Critical Hits No Ads and More.md b/spaces/congsaPfin/Manga-OCR/logs/Get Dragon Ball Legends MOD APK for iOS and Dominate the Game with Critical Hits No Ads and More.md
deleted file mode 100644
index 440df9c6a0826eaf132ca1512fc46173068ac6eb..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Get Dragon Ball Legends MOD APK for iOS and Dominate the Game with Critical Hits No Ads and More.md
+++ /dev/null
@@ -1,116 +0,0 @@
-
-Dragon Ball Legends Mod APK iOS: How to Download and Play
-If you are a fan of the Dragon Ball franchise, you might have heard of Dragon Ball Legends, a popular mobile game that lets you fight with your favorite characters from the anime series. But did you know that you can also play the game with a mod apk on your iOS device? In this article, we will tell you everything you need to know about Dragon Ball Legends mod apk ios, including what it is, why you should use it, how to download and install it, and how to play it. Let's get started!
-dragon ball legends mod apk ios Download File →→→ https://urlca.com/2uOa2G
-What is Dragon Ball Legends?
-Dragon Ball Legends is a 3D action RPG game that was released in 2018 by Bandai Namco Entertainment. The game features an original story that involves a new character named Shallot, who is a Saiyan from the past. You can also play as other iconic characters from the Dragon Ball universe, such as Goku, Vegeta, Frieza, Cell, and more. The game has various modes, such as story mode, PvP mode, co-op mode, and events mode. You can also customize your characters with different outfits, skills, and equipment.
-What is a mod apk?
-A mod apk is a modified version of an original app that has been altered by someone to add or remove some features. For example, a mod apk for Dragon Ball Legends might have unlimited crystals, coins, and energy, which are the in-game currencies that you need to unlock new characters, items, and upgrades. A mod apk might also have other features, such as unlocked characters, unlimited health, damage boost, and more.
-However, a mod apk is not the same as the original app. It might not work properly on your device or have some bugs or errors. It might also be detected by the game developers or other players and result in a ban or suspension from the game. Moreover, a mod apk might contain viruses or malware that can harm your device or steal your personal information. Therefore, you should be careful when downloading and installing a mod apk from an unknown source.
-Why use a mod apk for Dragon Ball Legends?
-There are some reasons why you might want to use a mod apk for Dragon Ball Legends. Here are some of them:
-
-You can enjoy the game without spending any money. A mod apk can give you unlimited crystals, coins, and energy that you can use to unlock new characters, items, and upgrades. You can also skip the ads and surveys that might interrupt your gameplay.
-You can have more fun and challenge in the game. A mod apk can give you access to all the characters in the game, including the ones that are exclusive or limited. You can also try different skills and equipment combinations that might not be possible in the original app. You can also increase or decrease the difficulty level of the game according to your preference.
-You can explore the game more freely and creatively. A mod apk can let you play the game in different modes and scenarios that might not be available in the original app. You can also experiment with different settings and options that might not be allowed in the original app. You can also create your own scenarios and stories with the characters and events in the game.
-
-However, there are also some drawbacks of using a mod apk for Dragon Ball Legends. Here are some of them:
-dragon ball legends hack ios no jailbreak
-dragon ball legends mod menu ios download
-dragon ball legends ios modded ipa
-dragon ball legends unlimited crystals mod ios
-dragon ball legends mod apk ios 2023
-dragon ball legends god mode ios
-dragon ball legends ios mod platinmods
-dragon ball legends mod apk ios free download
-dragon ball legends one hit kill ios
-dragon ball legends mod apk ios latest version
-dragon ball legends cheat engine ios
-dragon ball legends mod apk ios no verification
-dragon ball legends mod apk ios offline
-dragon ball legends unlimited ki mod ios
-dragon ball legends mod apk ios unlimited everything
-dragon ball legends mod apk ios no human verification
-dragon ball legends mod apk ios reddit
-dragon ball legends mod apk ios 4.22.0
-dragon ball legends mod apk ios without jailbreak
-dragon ball legends mod apk ios 2021
-dragon ball legends hack tool ios
-dragon ball legends mod apk ios online
-dragon ball legends mod apk ios update
-dragon ball legends mod apk ios 4.21.0
-dragon ball legends mod apk ios 4.20.0
-dragon ball legends hack generator ios
-dragon ball legends mod apk ios 4.19.0
-dragon ball legends mod apk ios 4.18.0
-dragon ball legends hack app ios
-dragon ball legends mod apk ios 4.17.0
-dragon ball legends hack online ios
-dragon ball legends mod apk ios 4.16.0
-dragon ball legends hack download ios
-dragon ball legends mod apk ios 4.15.0
-dragon ball legends hack version ios
-dragon ball legends mod apk ios 4.14.0
-dragon ball legends hack no survey ios
-dragon ball legends mod apk ios 4.13.0
-dragon ball legends hack without verification ios
-dragon ball legends mod apk ios 4.12.0
-
-You might get banned or suspended from the game. A mod apk might be detected by the game developers or other players and result in a ban or suspension from the game. You might lose your progress, achievements, and rewards in the game. You might also face legal consequences for violating the terms and conditions of the game.
-You might get viruses or malware on your device. A mod apk might contain viruses or malware that can harm your device or steal your personal information. You might also expose your device to hackers or cybercriminals who might access your data or accounts. You might also damage your device or cause it to malfunction.
-You might miss out on the updates and features of the original app. A mod apk might not be compatible with the latest version of the game or the iOS system. You might not be able to enjoy the new updates and features that the game developers release regularly. You might also encounter bugs or errors that might affect your gameplay.
-
-How to download and install a mod apk for Dragon Ball Legends on iOS?
-If you still want to try a mod apk for Dragon Ball Legends on your iOS device, you will need to follow these steps:
-
-Find a trusted source that offers a mod apk for Dragon Ball Legends. You can search online for reviews, ratings, and feedback from other users who have tried the mod apk. You can also check the security and quality of the mod apk by using antivirus software or online tools.
-Download the mod apk file from the source. You will need to allow unknown sources on your device settings to download the file. You will also need to have enough storage space on your device to save the file.
-Install the mod apk file on your device. You will need to use a third-party app installer, such as TutuApp, AppValley, or Panda Helper, to install the mod apk file on your device. You will also need to trust the app installer and the mod apk on your device settings.
-Launch the mod apk app on your device. You will need to grant some permissions and accept some terms and conditions to use the app. You will also need to have a stable internet connection to play the game.
-
-However, you should also be aware of some requirements and precautions to use a mod apk on your iOS device:
-
-You will need to have a jailbroken device or use a jailbreak alternative, such as AltStore, Cydia Impactor, or iOSEmus, to use a mod apk on your iOS device. Jailbreaking is a process that removes some restrictions and limitations imposed by Apple on iOS devices. However, jailbreaking can also void your warranty, expose your device to security risks, and cause some issues with your device performance.
-You will need to backup your data and files before using a mod apk on your iOS device. Using a mod apk can cause some data loss or corruption on your device. You should also avoid using your main account or linking it with the mod apk app, as it can result in a ban or suspension from the game.
-You will need to update your mod apk app regularly to keep up with the original app updates and features. However, updating your mod apk app might also cause some compatibility issues or errors with your device or the game. You should also check for new versions of the mod apk from the source you downloaded it from.
- How to play Dragon Ball Legends with a mod apk on iOS?
-Once you have downloaded and installed a mod apk for Dragon Ball Legends on your iOS device, you can start playing the game with some advantages and disadvantages. Here are some tips and tricks to enjoy the game with a mod apk:
-
-Use different modes, characters, and strategies. A mod apk can let you play the game in different modes, such as story mode, PvP mode, co-op mode, and events mode. You can also use different characters, such as Shallot, Goku, Vegeta, Frieza, Cell, and more. You can also try different skills and equipment combinations that might give you an edge over your opponents.
-Be careful of online features and anti-cheat measures. A mod apk might not work well with some online features of the game, such as leaderboards, rankings, tournaments, and social features. You might also face some anti-cheat measures from the game developers or other players, such as detection, reporting, or banning. You should avoid using obvious or excessive cheats that might ruin the game experience for yourself or others.
-Update your mod apk app regularly and check for new versions. A mod apk might not be compatible with the latest version of the game or the iOS system. You might not be able to enjoy the new updates and features that the game developers release regularly. You might also encounter bugs or errors that might affect your gameplay. You should update your mod apk app regularly and check for new versions from the source you downloaded it from.
-
-However, you should also be aware of some limitations and challenges of playing with a mod apk:
-
-You might lose your progress, achievements, and rewards in the game. A mod apk might cause some data loss or corruption on your device or the game. You might also lose your progress, achievements, and rewards in the game if you get banned or suspended from the game. You should backup your data and files before using a mod apk on your iOS device.
-You might miss out on the original game experience and challenge. A mod apk might make the game too easy or too hard for you. You might not be able to enjoy the original game experience and challenge that the game developers intended for you. You might also miss out on some fun and exciting aspects of the game that are only available in the original app.
-You might face some ethical and legal issues for using a mod apk. A mod apk might violate the terms and conditions of the game and infringe on the intellectual property rights of the game developers. You might face some ethical and legal issues for using a mod apk on your iOS device. You should respect the rights and efforts of the game developers and support them by using the original app.
-
-Conclusion
-In conclusion, Dragon Ball Legends is a 3D action RPG game that lets you fight with your favorite characters from the Dragon Ball franchise. You can also play the game with a mod apk on your iOS device, which can give you some benefits and risks. You can download and install a mod apk from a trusted source and use it with some tips and tricks. However, you should also be careful of some requirements and precautions to use a mod apk on your iOS device. You should also be aware of some limitations and challenges of playing with a mod apk. Finally, you should consider the ethical and legal implications of using a mod apk and support the original app if you like the game.
-We hope this article has helped you learn more about Dragon Ball Legends mod apk ios. If you have any questions or comments, please feel free to leave them below. Thank you for reading!
- FAQs
-Here are some frequently asked questions about Dragon Ball Legends mod apk ios:
-
-What is Dragon Ball Legends?
-Dragon Ball Legends is a 3D action RPG game that was released in 2018 by Bandai Namco Entertainment. The game features an original story that involves a new character named Shallot, who is a Saiyan from the past. You can also play as other iconic characters from the Dragon Ball universe, such as Goku, Vegeta, Frieza, Cell, and more.
-What is a mod apk?
-A mod apk is a modified version of an original app that has been altered by someone to add or remove some features. For example, a mod apk for Dragon Ball Legends might have unlimited crystals, coins, and energy, which are the in-game currencies that you need to unlock new characters, items, and upgrades.
-How to download and install a mod apk for Dragon Ball Legends on iOS?
-You will need to follow these steps to download and install a mod apk for Dragon Ball Legends on your iOS device:
-Find a trusted source that offers a mod apk for Dragon Ball Legends. You can search online for reviews, ratings, and feedback from other users who have tried the mod apk. You can also check the security and quality of the mod apk by using antivirus software or online tools.
-Download the mod apk file from the source. You will need to allow unknown sources on your device settings to download the file. You will also need to have enough storage space on your device to save the file.
-Install the mod apk file on your device. You will need to use a third-party app installer, such as TutuApp, AppValley, or Panda Helper, to install the mod apk file on your device. You will also need to trust the app installer and the mod apk on your device settings.
-Launch the mod apk app on your device. You will need to grant some permissions and accept some terms and conditions to use the app. You will also need to have a stable internet connection to play the game.
-
- Is it safe to use a mod apk for Dragon Ball Legends on iOS?
-It depends on the source and quality of the mod apk you use. Some mod apks might be safe and reliable, while others might be risky and harmful. You should always do some research and check the security and quality of the mod apk before downloading and installing it on your iOS device. You should also backup your data and files before using a mod apk on your iOS device.
-What are some alternatives to using a mod apk for Dragon Ball Legends on iOS?
-If you don't want to use a mod apk for Dragon Ball Legends on iOS, you can try some alternatives, such as:
-
-Using cheats or hacks that don't require downloading or installing anything on your device. For example, you can use online generators or tools that can give you free crystals, coins, and energy in the game.
-Using tips or guides that can help you improve your skills and strategies in the game. For example, you can watch videos or read articles that can teach you how to play better and win more battles in the game.
-Using official or legal ways to get more crystals, coins, and energy in the game. For example, you can complete missions, achievements, and events in the game that can reward you with in-game currencies. You can also buy them with real money if you want to support the game developers.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Download Fluxus for Roblox Mobile in 3 Easy Steps.md b/spaces/congsaPfin/Manga-OCR/logs/How to Download Fluxus for Roblox Mobile in 3 Easy Steps.md
deleted file mode 100644
index bab8452c3e281fbd5ed5bd3a1f965fbf80ac4711..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/How to Download Fluxus for Roblox Mobile in 3 Easy Steps.md
+++ /dev/null
@@ -1,97 +0,0 @@
-
-How to Download and Use Fluxus on Roblox Mobile
-Roblox is one of the most popular online gaming platforms in the world, with millions of players creating and exploring various games every day. However, sometimes you may want to enhance your gaming experience by using scripts that can give you extra abilities, items, or advantages. That's where Fluxus comes in.
-What is Fluxus and why you should use it
-Fluxus is a powerful executor for Roblox scripts
-Fluxus is an app that allows you to run scripts on Roblox games. Scripts are pieces of code that can modify or add new features to a game, such as flying, teleporting, spawning items, changing your appearance, and more. Scripts can be written by anyone, and there are thousands of them available online for free.
-fluxus download roblox mobile DOWNLOAD >>> https://urlca.com/2uOf5z
-Fluxus allows you to run any script on any game
-Unlike some other executors, Fluxus does not limit you to specific games or scripts. You can use Fluxus on any game that supports scripting, and you can run any script that works with Roblox. This means that you have unlimited possibilities to customize your gameplay and have more fun.
-Fluxus has a user-friendly interface and a key system
-Fluxus has a simple and intuitive interface that lets you easily access and manage your scripts. You can browse through different categories of scripts, such as admin, GUI, hub, jailbreak, simulator, etc. You can also save your favorite scripts for quick access. To use Fluxus, you need to generate a key every time you open the app. The key system is designed to prevent abuse and ensure that Fluxus remains safe and reliable.
-How to download Fluxus on your Android device
-Visit the official Fluxus website and download the APK file
-To get started with Fluxus, you need to download the APK file from the official website. The APK file is the installer for Android apps that are not available on the Google Play Store. You can find the link to the website at the end of this article.
-Install the APK file and grant the necessary permissions
-Once you have downloaded the APK file, you need to install it on your device. To do this, you may need to enable unknown sources in your settings. This will allow you to install apps from sources other than the Google Play Store. After that, you can tap on the APK file and follow the instructions to install it. You may also need to grant some permissions to Fluxus, such as storage, overlay, and accessibility.
-Open the Fluxus app and generate a key
-After installing Fluxus, you can open it from your app drawer or home screen. You will see a screen with a button that says "Generate Key". Tap on it and wait for a few seconds until a key appears. Copy the key and paste it in the box below. Then tap on "Submit" and wait for Fluxus to load.
-How to download Roblox on your Android device
-Visit the Google Play Store and search for Roblox
-Roblox is the app that lets you play the games created by other users on the Roblox platform. You can also create your own games using the Roblox Studio app on your computer. To download Roblox on your Android device, you need to visit the Google Play Store and search for Roblox. You can find the link to the store at the end of this article.
-Install the Roblox app and sign in with your account
-Once you have found the Roblox app on the Google Play Store, you can tap on "Install" and wait for it to download and install on your device. You may need to grant some permissions to Roblox, such as camera, microphone, and location. After that, you can open the Roblox app and sign in with your account. If you don't have an account, you can create one for free by tapping on "Sign up".
-fluxus android roblox exploit download
-how to get fluxus on roblox mobile
-fluxus apk for roblox free download
-roblox mobile hacks with fluxus
-download fluxus roblox script executor
-fluxus roblox mobile tutorial
-best fluxus scripts for roblox mobile
-fluxus roblox mobile key
-fluxus roblox mobile premium
-fluxus roblox mobile discord
-fluxus roblox mobile no root
-fluxus roblox mobile update
-fluxus roblox mobile safe
-fluxus roblox mobile review
-fluxus roblox mobile jailbreak
-fluxus roblox mobile injector
-fluxus roblox mobile mod menu
-fluxus roblox mobile game guardian
-fluxus roblox mobile bypass
-fluxus roblox mobile ban
-fluxus roblox mobile support
-fluxus roblox mobile features
-fluxus roblox mobile commands
-fluxus roblox mobile codes
-fluxus roblox mobile cheats
-fluxus download for android roblox games
-how to use fluxus on roblox mobile
-fluxus apk download for roblox hack
-roblox mobile exploits with fluxus android
-download fluxus android for roblox mod apk
-fluxus android tutorial for roblox exploits
-best android scripts for roblox with fluxus
-how to get a key for fluxus android on roblox
-how to buy premium for fluxus android on roblox
-join the discord server for fluxus android on roblox
-how to install fluxus android on your device for roblox exploits
-how to update your fluxus android app for the latest roblox version
-is it safe to use fluxus android on your phone for roblox hacks
-what are the reviews of users who tried fluxus android on their roblox games
-how to jailbreak your device with fluxus android for more roblox exploits
-how to inject scripts into your roblox games with fluxus android app
-how to access the mod menu of your roblox games with fluxus android app
-how to use game guardian with your fluxus android app for more hacks on your roblox games
-how to bypass the anti-cheat system of your roblox games with your fluxus android app
-what are the risks of getting banned from your roblox account with your fluxus android app
-how to contact the support team of your fluxus android app if you have any issues with your roblox exploits
-what are the features and benefits of using your fluxus android app for your roblox games
-what are the commands and options you can use with your fluxus android app for your roblox exploits
-what are the codes and coupons you can use to get discounts or freebies with your fluxus android app for your roblox exploits
-Open the Roblox app and choose a game to play
-After signing in, you can access the Roblox app's main menu, where you can see different categories of games, such as popular, featured, recommended, etc. You can also search for a specific game by tapping on the magnifying glass icon. To play a game, just tap on it and wait for it to load. You can also join your friends' games by tapping on their profile pictures.
-How to use Fluxus on Roblox mobile
-Open the Fluxus app and select a script from the menu
-To use Fluxus on Roblox mobile, you need to open the Fluxus app first. You will see a menu with different options, such as scripts, console, settings, etc. Tap on "Scripts" and you will see a list of scripts that you can use on Roblox games. You can scroll through the list or use the search bar to find a script that suits your needs.
-Copy the script and paste it in the Fluxus console
-Once you have selected a script, tap on it and you will see a screen with the script code. Tap on "Copy" and then go back to the main menu. Tap on "Console" and you will see a screen with a text box. Paste the script code in the text box by tapping on it and then tapping on "Paste".
-Tap on execute and enjoy the script features
-After pasting the script code in the console, tap on "Execute" and wait for a few seconds until the script runs. You will see a message that says "Script executed successfully" or something similar. Then you can switch to the Roblox app and enjoy the script features. Depending on the script, you may see a new GUI or menu that lets you control the script options. For example, if you use a fly script, you may see a button that says "Fly" or "Toggle Fly". Tap on it and you will be able to fly in the game.
- Conclusion
-In this article, we have shown you how to download and use Fluxus on Roblox mobile. Fluxus is an amazing app that lets you run scripts on any Roblox game and enhance your gaming experience. You can download Fluxus from its official website and install it on your Android device. You can also download Roblox from the Google Play Store and sign in with your account. To use Fluxus on Roblox mobile, you need to select a script from the Fluxus menu, copy it, paste it in the Fluxus console, and execute it. Then you can switch to the Roblox app and enjoy the script features.
- FAQs
-Q: Is Fluxus safe to use?
-A: Fluxus is safe to use as long as you download it from its official website and use scripts that are not malicious or harmful. However, there is always a risk of getting banned by Roblox if you use scripts that violate their terms of service or give you an unfair advantage over other players. Therefore, use Fluxus at your own risk and discretion.
-Q: Can I use Fluxus on iOS devices?
-A: No, Fluxus is only available for Android devices at the moment. There is no official version of Fluxus for iOS devices.
-Q: Can I create my own scripts for Fluxus?
-A: Yes, you can create your own scripts for Fluxus using Lua programming language. You can learn more about Lua scripting from online tutorials or forums. You can also edit or modify existing scripts to suit your preferences.
-Q: Where can I find more scripts for Fluxus? A: You can find more scripts for Fluxus from various sources online, such as websites, forums, blogs, YouTube videos, etc. However, be careful when downloading scripts from unknown or untrusted sources, as they may contain viruses, malware, or spyware. Always scan the scripts with an antivirus or anti-malware program before using them.
-Q: How can I contact the Fluxus developers or support team?
-A: You can contact the Fluxus developers or support team by visiting their Discord server. There you can chat with other Fluxus users, report bugs or issues, request new features or scripts, and get help from the staff members.
- I hope you found this article helpful and informative. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading and happy gaming!
- : [Fluxus - The best free Roblox executor](https://fluxteam.xyz/) : [Fluxus Discord Server](https://discord.gg/fluxus) 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Moped kredit ilkin odenissiz - i yeri olmadan - srclk visqsi tlb olunmur - Bak.md b/spaces/congsaPfin/Manga-OCR/logs/Moped kredit ilkin odenissiz - i yeri olmadan - srclk visqsi tlb olunmur - Bak.md
deleted file mode 100644
index ce329dfa2a2c3eea8361d2f4ef7f8a8d1d11fc8b..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Moped kredit ilkin odenissiz - i yeri olmadan - srclk visqsi tlb olunmur - Bak.md
+++ /dev/null
@@ -1,171 +0,0 @@
-
-Ilkin Odenissiz Moped: A Convenient and Affordable Way to Travel
- If you are looking for a new way to get around town, you might want to consider buying a moped. A moped is a two-wheeled vehicle that has a small engine and pedals. It is similar to a scooter, but usually has lower power and speed. Mopeds are popular in many countries because they are easy to ride, cheap to maintain, and eco-friendly. In this article, we will tell you everything you need to know about mopeds, including the benefits, the types, the legal aspects, and the safety tips. We will also introduce you to ilkin odenissiz moped, a special offer that allows you to buy a moped without paying any upfront fees. Read on to find out more!
- What is a moped and why should you consider one?
- A moped is a type of motorized bicycle that has an engine size of 50cc or less and a maximum speed of 28 mph or less. Mopeds are different from motorcycles, which have larger engines and higher speeds. Mopeds are also different from scooters, which do not have pedals and usually have higher power and speed than mopeds. However, some people use the terms interchangeably, so it is important to check the technical specifications before buying one.
-ilkin odenissiz moped DOWNLOAD ⚡ https://urlca.com/2uO9xo
- Mopeds are great vehicles for commuting, especially in urban areas where traffic and parking are problems. They are also ideal for short trips and errands, as they can easily navigate through narrow streets and crowded places. Here are some of the benefits of owning a moped:
- The benefits of owning a moped
- Fuel efficiency and low maintenance costs
- One of the main advantages of mopeds is that they are very fuel efficient. Some electric mopeds can even get 100+ mpg, which is much better than cars or motorcycles. This means that you can save a lot of money on gas and reduce your carbon footprint. Mopeds also have low maintenance costs, as they do not require oil changes, tune-ups, or expensive repairs. You only need to check the tire pressure, battery level, brake fluid, and lights regularly.
- Easy to ride and park
- Another benefit of mopeds is that they are easy to ride and park. You do not need a lot of experience or skill to operate a moped, as most of them have automatic or semi-automatic transmission. You just need to twist the throttle and go. You also do not need a lot of space to park a moped, as they can fit in small spots or even on sidewalks. You can also avoid paying parking fees or fines by using designated parking areas for mopeds.
- Eco-friendly and fun
- A third benefit of mopeds is that they are eco-friendly and fun. Mopeds produce less emissions than cars or motorcycles, which helps protect the environment and improve air quality. They also make less noise than other vehicles, which reduces noise pollution and disturbance. Moreover, mopeds
are fun to ride, as they offer a sense of freedom and adventure. You can enjoy the scenery, the breeze, and the thrill of riding a moped. You can also customize your moped with accessories, stickers, or paint to express your personality and style.
- How to choose the right moped for your needs
- Now that you know the benefits of owning a moped, you might be wondering how to choose the right one for your needs. There are many factors to consider, such as the type, the size, the price, and the quality of the moped. Here are some tips to help you make a smart decision:
- The different types and sizes of mopeds
- The first thing to consider is the type and size of the moped. There are different types of mopeds, such as electric, gas, or hybrid. Electric mopeds run on batteries that need to be charged regularly. They are quiet, clean, and cheap to operate, but they have limited range and speed. Gas mopeds run on gasoline that needs to be refilled frequently. They are noisy, polluting, and expensive to operate, but they have longer range and speed. Hybrid mopeds combine both electric and gas power sources. They are more efficient and versatile than either type alone, but they are also more complex and costly.
-ilkin odenissiz mopedler Azərbaycan
-ilkin odenissiz mopedler Bakida
-ilkin odenissiz mopedler kreditlə
-ilkin odenissiz mopedler faizsiz
-ilkin odenissiz mopedler 15 ay
-ilkin odenissiz mopedler 18 ay
-ilkin odenissiz mopedler 21 ay
-ilkin odenissiz mopedler 24 ay
-ilkin odenissiz mopedler 30 ay
-ilkin odenissiz mopedler avtomat
-ilkin odenissiz mopedler polavtomat
-ilkin odenissiz mopedler mexanika
-ilkin odenissiz mopedler motosiklet
-ilkin odenissiz mopedler moon skuter
-ilkin odenissiz mopedler tufan m50
-ilkin odenissiz mopedler kuba x boss
-ilkin odenissiz mopedler benzinlə
-ilkin odenissiz mopedler aftomat model
-ilkin odenissiz mopedler mexanika model
-ilkin odenissiz mopedler pulsuz çatdırılma
-ilkin odenissiz mopedler rayonlara çatdırılma
-ilkin odenissiz mopedler tek sexsiyyet vesiqesi
-ilkin odenissiz mopedler suruculuk vesiqesi teleb etmir
-ilkin odenissiz mopedler qalmaq şərti ilə icarəyə verilir
-ilkin odenissiz mopedler qalmaq şərti olmadan icarəyə verilir
-ilkin odenissiz mopedler kirayə verirəm
-ilkin odenissiz mopedler ikinci el mopetler
-ilkin odenissiz mopedler ideal vəziyyətdədir
-ilkin odenissiz mopedler rəsmi dillerdən alınıb
-ilkin odenissiz mopedler ilk sahibiyem
-ilkin odenissiz mopedler masin verilmir
-ilkin odenissiz mopedler künc kəsən
-ilkin odenissiz mopedler manikür, pedikür aparatları dərnaq dəsti
-ilkin odenissiz mopedler yeni, klassik divan, bazalı, açılan, fransız, sultan, almaniya divanı
-ilkin odenissiz mopedler dövlət işi varsa ödənişli çatdırılma
-ilkin odenissiz mopedler salam aleykum ideal axtayıram leş
-ilkin odenissiz mopedler kreditle veren varsa elaqe saxlasin
-ilkin odenissiz mopedler mehdud sayda hediyyeli
-ilkin odenissiz mopedler net energy gain
-ilkin odenissiz mopedler holy grail fusion experiment
- The size of the moped depends on the engine capacity and the wheel diameter. The engine capacity is measured in cubic centimeters (cc) and determines the power and speed of the moped. The wheel diameter is measured in inches and determines the stability and maneuverability of the moped. Generally, smaller mopeds have lower engine capacity and smaller wheels, while larger mopeds have higher engine capacity and larger wheels. Smaller mopeds are easier to handle and park, but they are also slower and less comfortable. Larger mopeds are faster and more comfortable, but they are also harder to handle and park.
- The legal requirements and safety tips for moped riders
- The second thing to consider is the legal requirements and safety tips for moped riders. Depending on where you live, you may need a license, a registration, an insurance, or a helmet to ride a moped legally. You should check the local laws and regulations before buying or riding a moped. You should also follow some basic safety tips to avoid accidents and injuries, such as:
-
-Wear a helmet, gloves, goggles, and protective clothing when riding a moped.
-Obey the traffic rules and signals when riding a moped.
-Use the lights, horn, and mirrors when riding a moped.
-Keep a safe distance from other vehicles when riding a moped.
-Avoid riding a moped in bad weather or at night.
-Maintain your moped regularly and check for any defects or damages.
-
- The best places to buy a moped in Azerbaijan
- The third thing to consider is where to buy a moped in Azerbaijan. There are many places where you can find mopeds for sale, such as online platforms, dealerships, or private sellers. However, not all of them are reliable or trustworthy. You should do some research before buying a moped from any source. Here are some of the best places to buy a moped in Azerbaijan:
-
-
-Place
-Description
-Pros
-Cons
-
-
-Online platforms
-Websites or apps that connect buyers and sellers of mopeds.
-You can browse through many options and compare prices and features.
-You may not be able to inspect or test the moped before buying it.
-
-
-Dealerships
-Stores that sell new or used mopeds from different brands.
-You can get professional advice and service from experts.
-You may have to pay higher prices or fees than other sources.
-
-
-Private sellers
-Individuals who sell their own mopeds directly to buyers.
-You can negotiate the price and terms with the seller.
-You may encounter scams or frauds from dishonest sellers.
-
-
- Ilkin Odenissiz Moped: A popular option for moped buyers
- If you are interested in buying a moped but do not have enough money to pay upfront, you might want to check out ilkin odenissiz moped. Ilkin odenissiz moped is a special offer that allows you to buy a moped without paying any initial fees. You only need to pay monthly installments for a fixed period of time until you own the moped. Ilkin odenissiz moped is a popular option for moped buyers in Azerbaijan, as it offers many benefits and advantages. Here are some of the details and features of ilkin odenissiz moped:
- What is ilkin odenissiz moped and how does it work?
- The meaning and advantages of ilkin odenissiz moped
- Ilkin odenissiz moped means "first without payment moped" in Azerbaijani. It is a type of financing scheme that allows you to buy a moped without paying any down payment, deposit, or interest. You only need to pay a monthly fee that covers the cost of the moped and the service. The monthly fee depends on the type and size of the moped, as well as the duration of the contract. You can choose from 6, 12, 18, or 24 months of payment terms. Once you complete all the payments, you become the owner of the moped.
- Ilkin odenissiz moped has many advantages over other financing options, such as:
-
-You do not need to have a good credit score or a guarantor to qualify for ilkin odenissiz moped.
-You do not need to pay any extra fees or charges for ilkin odenissiz moped.
-You do not need to worry about losing your moped if you miss a payment, as ilkin odenissiz moped does not have any penalties or repossession clauses.
-You can enjoy free maintenance and repair services for your moped during the contract period.
-You can exchange or upgrade your moped at any time during the contract period.
-
- The terms and conditions of ilkin odenissiz moped
- Ilkin odenissiz moped is a simple and straightforward process that does not require a lot of paperwork or hassle. You only need to follow these steps to get your ilkin odenissiz moped:
-
-Visit one of the authorized dealerships that offer ilkin odenissiz moped and choose your preferred moped model and size.
-Fill out a short application form with your personal and contact information.
-Provide a copy of your ID card and proof of income (such as a bank statement or a salary slip).
-Sign the contract agreement that specifies the monthly fee, the payment term, and the service conditions.
-Pay the first monthly fee and receive your moped on the spot.
-Pay the remaining monthly fees on time until you complete the contract term.
-
- There are some terms and conditions that you need to comply with when you use ilkin odenissiz moped, such as:
-
-You must be at least 18 years old and have a valid driver's license to apply for ilkin odenissiz moped.
-You must use the moped for personal use only and not for commercial or illegal purposes.
-You must keep the moped in good condition and follow the manufacturer's guidelines for maintenance and care.
-You must inform the dealership if you change your address, phone number, or email address during the contract period.
-You must not sell, rent, lend, or transfer your moped to anyone else without the dealership's permission.
-
- The testimonials and reviews of ilkin odenissiz moped customers
- Ilkin odenissiz moped has received many positive testimonials and reviews from satisfied customers who have enjoyed its benefits and features. Here are some of the comments that ilkin odenissiz moped customers have shared online:
- "I bought my first moped with ilkin odenissiz moped and I am very happy with it. It is easy to ride, fuel efficient, and eco-friendly. I do not have to worry about paying any upfront fees or interest rates. I just pay a small monthly fee and I get free service and repair. I recommend ilkin odenissiz moped to anyone who wants to buy a moped."
- "Ilkin odenissiz moped is a great option for students like me who do not have enough money to buy a new vehicle. I can travel to school, work, and anywhere else with my ilkin odenissiz moped. It is very convenient and affordable. I do not have to pay any down payment or interest. I just pay a fixed monthly fee and I get to own the moped after a few months. I love ilkin odenissiz moped."
- "Ilkin odenissiz moped is the best way to buy a moped in Azerbaijan. You can choose from a wide range of models and sizes, and you can get them without paying any upfront fees. You also get free maintenance and repair services, and you can exchange or upgrade your moped anytime. Ilkin odenissiz moped is a reliable and trustworthy company that provides excellent customer service and support."
- Conclusion: Is ilkin odenissiz moped worth it?
- In conclusion, ilkin odenissiz moped is a convenient and affordable way to travel with a moped. It offers many benefits and advantages over other financing options, such as no down payment, no interest, no penalties, free service, and flexible terms. It also has many positive testimonials and reviews from satisfied customers who have enjoyed its features and benefits. If you are looking for a new way to get around town, you might want to consider buying a moped with ilkin odenissiz moped. You can visit their website or contact them for more information and offers.
- A summary of the main points of the article
- Here are the main points of the article:
-
-A moped is a two-wheeled vehicle that has a small engine and pedals. It is similar to a scooter, but usually has lower power and speed.
-Mopeds are popular in many countries because they are easy to ride, cheap to maintain, and eco-friendly.
-Mopeds have many benefits, such as fuel efficiency, low maintenance costs, easy parking, and fun riding.
-Mopeds come in different types and sizes, such as electric, gas, or hybrid. You should choose the one that suits your needs and preferences.
-Mopeds have some legal requirements and safety tips that you should follow before buying or riding one.
-Ilkin odenissiz moped is a special offer that allows you to buy a moped without paying any upfront fees. You only need to pay monthly installments for a fixed period of time until you own the moped.
-Ilkin odenissiz moped has many advantages over other financing options, such as no down payment, no interest, no penalties, free service, and flexible terms.
-Ilkin odenissiz moped has many positive testimonials and reviews from satisfied customers who have enjoyed its features and benefits.
-
- A call to action for the readers to check out ilkin odenissiz moped offers
- If you are interested in buying a moped with ilkin odenissiz moped, you should not hesitate to check out their offers. They have a wide range of models and sizes that you can choose from, and they have a simple and straightforward process that does not require a lot of paperwork or hassle. You can visit their website or contact them for more information and offers. You can also follow them on social media platforms to get the latest news and updates about their products and services. Do not miss this opportunity to get your own ilkin odenissiz moped today!
- FAQs
- Here are some of the frequently asked questions about ilkin odenissiz moped:
-
-What is the minimum income requirement for ilkin odenissiz moped?
-There is no minimum income requirement for ilkin odenissiz moped. You only need to provide proof of income (such as a bank statement or a salary slip) to show that you can afford the monthly fee.
-What if I want to cancel or terminate my contract with ilkin odenissiz moped?
-You can cancel or terminate your contract with ilkin odenissiz moped at any time without paying any penalties or fees. You just need to return the moped in good condition and pay any outstanding balance.
-What if I damage or lose my moped during the contract period?
-You are responsible for taking care of your moped during the contract period. If you damage or lose your moped, you will have to pay for the repair or replacement costs.
-Can I buy more than one moped with ilkin odenissiz moped?
-Yes, you can buy more than one moped with ilkin odenissiz moped. You just need to fill out separate application forms and sign separate contract agreements for each moped.
What are the best brands and models of mopeds that ilkin odenissiz moped offers?
-Ilkin odenissiz moped offers a variety of brands and models of mopeds that suit different tastes and preferences. Some of the best brands and models are:
-
-Vespa: A classic and stylish brand of Italian mopeds that have a retro design and a smooth performance.
-Honda: A reliable and durable brand of Japanese mopeds that have a modern design and a powerful performance.
-Yamaha: A popular and versatile brand of Japanese mopeds that have a sporty design and a fast performance.
-Kymco: A budget-friendly and efficient brand of Taiwanese mopeds that have a simple design and a low fuel consumption.
-
- You can check out the full list of brands and models on their website or visit their showroom to see them in person.
- I hope this article has answered your questions about ilkin odenissiz moped and helped you decide whether it is the right option for you. If you have any more questions or comments, please feel free to contact me or leave them below. Thank you for reading and happy riding!
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/SAKURA School Simulator - The Ultimate Japanese School Game with Ichal Korg - APK Free Download.md b/spaces/congsaPfin/Manga-OCR/logs/SAKURA School Simulator - The Ultimate Japanese School Game with Ichal Korg - APK Free Download.md
deleted file mode 100644
index d07773cc0c073118d242a6857526b7fae1734013..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/SAKURA School Simulator - The Ultimate Japanese School Game with Ichal Korg - APK Free Download.md
+++ /dev/null
@@ -1,122 +0,0 @@
-
-Sakura School Simulator: A Fun and Crazy Game for Android
- If you are looking for a game that offers a unique and immersive experience, you might want to check out Sakura School Simulator. This game is a simulation game that lets you explore a fictional town in Japan, where you can make friends, enemies, lovers, or go on a rampage with weapons. You can also customize your character, dress up, fly, shrink, and do many other crazy things. In this article, we will tell you more about this game, how to download it, and how to play it.
- What is Sakura School Simulator?
- Sakura School Simulator is a game developed by Garusoft Development Inc., a Japanese company that specializes in creating simulation games. The game was released in 2019 and has since gained over 10 million downloads on the Google Play Store alone. It has also received mostly positive reviews from users who praised its graphics, gameplay, and humor.
-sakura school simulator ichal korg download apk DOWNLOAD ->>> https://urlca.com/2uO5K9
- A simulation game with two ways to enjoy
- One of the main features of Sakura School Simulator is that it gives you two ways to enjoy the game. You can either make friends and lovers as you like, and enjoy a brilliant school life, or you can go on the rampage as you like, and cause chaos in the town. Before you do that, though, you need to borrow weapons from the Yakuza office, which you can easily get if you don't stop flying. You can also switch between four different characters in the same stage, each with their own personality and story.
- A huge open world with many features and activities
- Another feature of Sakura School Simulator is that it has a huge open world that you can freely explore. The town of Sakura has many places to visit, such as the school, the park, the mall, the castle, the amusement park, and more. You can also interact with various NPCs, such as students, teachers, shopkeepers, gangsters, animals, and even a giant girl named Alice who is on a rampage. You can also find and use various items, such as clothes, accessories, vehicles, jetpacks, shrink rays, bazookas, and more.
- A game with no concept of death or blood
- One thing that makes Sakura School Simulator different from other simulation games is that it has no concept of death or blood. The game is meant to be fun and humorous, so even if you fight or shoot someone, they will not die or bleed. They will only be stunned and wake up the next day. They will also hate you and try to attack you back. The game also has no end or goal, so you can make your own situations and stories as you like.
- How to Download Sakura School Simulator APK
- If you are interested in playing Sakura School Simulator on your Android device, there are two ways to download it. You can either download it from the official sources or from alternative sources.
- The official sources from Google Play Store and App Store
- The easiest way to download Sakura School Simulator is to get it from the Google Play Store or the App Store. These are the official sources that provide the latest version of the game and ensure its safety and compatibility. To download the game from these sources, you just need to follow these steps:
-sakura school simulator apk download latest version
-sakura school simulator mod apk unlimited money
-sakura school simulator free download for android
-sakura school simulator by ichal korg gameplay
-sakura school simulator online play no download
-sakura school simulator tips and tricks
-sakura school simulator cheats and hacks
-sakura school simulator how to get married
-sakura school simulator best outfits
-sakura school simulator all secrets and easter eggs
-sakura school simulator review and rating
-sakura school simulator update 2023
-sakura school simulator fan art and memes
-sakura school simulator characters and stories
-sakura school simulator wiki and guide
-sakura school simulator pc download windows 10
-sakura school simulator ios download iphone
-sakura school simulator mac download laptop
-sakura school simulator chromebook download google play
-sakura school simulator fire tablet download amazon appstore
-sakura school simulator switch download nintendo eshop
-sakura school simulator ps4 download playstation store
-sakura school simulator xbox one download microsoft store
-sakura school simulator vr download oculus store
-sakura school simulator steam download valve corporation
-sakura school simulator multiplayer mode online
-sakura school simulator custom skins and mods
-sakura school simulator yandere mode and horror mode
-sakura school simulator romance and dating sim
-sakura school simulator anime and manga style
-sakura school simulator japanese and english language
-sakura school simulator garusoft development inc developer
-sakura school simulator jp.garud.ssimulator package name
-sakura school simulator 1.039.97 version code 603[^1^]
-sakura school simulator 257 MB file size[^1^]
-sakura school simulator simulation game genre[^1^]
-sakura school simulator 10 million+ installs[^1^]
-sakura school simulator 4.5 stars rating[^1^]
-sakura school simulator may 7 2023 update date[^1^]
-sakura school simulator requires android 6.0 and up[^1^]
-
-Open the Google Play Store or the App Store on your device.
-Search for "Sakura School Simulator" in the search bar.
-Tap on the game icon and then tap on "Install" or "Get".
-Wait for the download to finish and then enjoy the game.
-
-This method is recommended for most users, as it is simple and secure. However, if you have any issues with the Google Play Store or the App Store, such as region restrictions, device compatibility, or storage space, you might want to try the alternative method.
- The alternative sources from APK websites
- Another way to download Sakura School Simulator is to get it from APK websites. These are websites that provide APK files, which are the installation files for Android applications. By downloading the APK file, you can install the game manually on your device without using the Google Play Store or the App Store. To download the game from these sources, you need to follow these steps:
-
-Find a reliable and trustworthy APK website that offers Sakura School Simulator APK. Some examples are APKPure, APKMirror, and Uptodown.
-Search for "Sakura School Simulator" on the website and choose the version that you want to download.
-Tap on the download button and wait for the APK file to be downloaded on your device.
-Before you install the APK file, you need to enable the "Unknown Sources" option on your device. This option allows you to install applications from sources other than the Google Play Store or the App Store. To enable this option, go to Settings > Security > Unknown Sources and toggle it on.
-Locate the downloaded APK file on your device and tap on it to start the installation process.
-Follow the instructions on the screen and wait for the installation to finish.
-Once the installation is done, you can launch the game and enjoy it.
-
-This method is suitable for users who have problems with the official sources or who want to try different versions of the game. However, this method also has some risks and drawbacks, such as malware infection, data theft, or legal issues. Therefore, you should only use this method if you trust the APK website and if you know what you are doing.
- How to Play Sakura School Simulator
- Now that you have downloaded Sakura School Simulator on your device, you might be wondering how to play it. The game is quite easy to play, but it also has many features and options that you can explore. Here are some of the basics that you need to know:
- The basic controls and options
- The game has a simple interface that shows you various buttons and icons on the screen. You can use these buttons and icons to control your character and access different options. Here are some of the main ones:
-
-The joystick on the left side of the screen lets you move your character around.
-The buttons on the right side of the screen let you perform various actions, such as jumping, flying, attacking, interacting, changing clothes, using items, and more.
-The menu button on the top left corner of the screen lets you access various menus, such as settings, save/load, character switch, map, inventory, shop, and more.
-The status bar on the top right corner of the screen shows you your character's name, health, hunger, love meter, money, and time.
-The chat button on the bottom right corner of the screen lets you chat with other players online if you enable the online mode in the settings.
-
- The missions and tasks to complete
- The game does not have a specific goal or storyline that you need to follow. You can create your own situations and stories as you like. However, if you want some guidance or challenges, you can try to complete some of the missions and tasks that are available in the game. These missions and tasks are optional and can be found in different places in the town. Some examples are:
-
-The school missions that are given by your teachers or classmates. These missions involve attending classes, taking tests, joining clubs, making friends, or causing trouble.
-The town missions that are given by various NPCs in different locations. These missions involve helping people with their problems, delivering goods, fighting enemies, or finding secrets.
-The special missions that are given by the Yakuza boss or Alice. These missions involve borrowing weapons, causing chaos, or stopping Alice's rampage.
-
-By completing these missions and tasks, you can earn money, items, reputation, and love points. You can also unlock new features and options in the game.
- The tips and tricks to have more fun
- Sakura School Simulator is a game that offers a lot of freedom and creativity for the players. You can do whatever you want and have fun in your own way. However, if you want some tips and tricks to make the game more enjoyable, here are some of them:
-
-Try to explore every corner of the town and discover its secrets. You might find hidden items, easter eggs, or references to other games or anime.
-Try to interact with different NPCs and see their reactions. You might make friends, enemies, or lovers. You can also change their outfits, hairstyles, or accessories in the shop.
-Try to customize your character and make them look cool or cute. You can change their clothes, accessories, hair color, eye color, skin color, and more in the dressing room.
-Try to use different items and see their effects. You can use weapons, vehicles, jetpacks, shrink rays, bazookas, and more to spice up your gameplay.
-Try to play online and chat with other players. You can join or create rooms with different themes and rules. You can also cooperate or compete with other players.
-
- Conclusion
- Sakura School Simulator is a fun and crazy game for Android that lets you experience a fictional town in Japan. You can make friends, enemies, lovers, or go on a rampage with weapons. You can also customize your character, dress up, fly, shrink, and do many other crazy things. The game has no concept of death or blood, so you can enjoy it without any worries. The game also has no end or goal, so you can make your own situations and stories as you like.
- If you want to download Sakura School Simulator APK on your device, you can either get it from the official sources or from alternative sources. The official sources are the Google Play Store and the App Store, which provide the latest version of the game and ensure its safety and compatibility. The alternative sources are the APK websites, which provide APK files that you can install manually on your device without using the official sources. However, this method also has some risks and drawbacks, such as malware infection, data theft, or legal issues.
- If you want to play Sakura School Simulator on your device, you just need to know the basic controls and options that are available on the screen. You can also try to complete some of the missions and tasks that are given by various NPCs in different locations. These missions and tasks are optional and can help you earn money, items, reputation, and love points. You can also unlock new features and options in the game. You can also use some tips and tricks to make the game more enjoyable, such as exploring every corner of the town, interacting with different NPCs, customizing your character, using different items, and playing online.
- Sakura School Simulator is a game that offers a lot of freedom and creativity for the players. You can do whatever you want and have fun in your own way. The game is also updated regularly with new content and improvements. If you are looking for a game that is fun and crazy, you should definitely try Sakura School Simulator.
- FAQs
- Here are some of the frequently asked questions about Sakura School Simulator:
- Q: Is Sakura School Simulator free to play?
-A: Yes, Sakura School Simulator is free to play. However, the game also has some in-app purchases that you can buy with real money. These in-app purchases include coins, gems, and ad removal. You can use coins and gems to buy items, clothes, accessories, and more in the game. You can also earn coins and gems by playing the game or watching ads.
- Q: Is Sakura School Simulator safe to play?
-A: Sakura School Simulator is safe to play as long as you download it from the official sources or from reliable APK websites. The game does not contain any harmful or inappropriate content, such as violence, blood, or nudity. The game is also rated 12+ on the Google Play Store and 9+ on the App Store, which means it is suitable for most users.
- Q: Is Sakura School Simulator online or offline?
-A: Sakura School Simulator can be played both online and offline. You can play the game offline without any internet connection. However, if you want to play online and chat with other players, you need to enable the online mode in the settings. You also need to have a stable internet connection and a Google account to play online.
- Q: How to update Sakura School Simulator?
-A: If you download Sakura School Simulator from the official sources, you can update it automatically or manually through the Google Play Store or the App Store. You just need to check for updates and install them when they are available. If you download Sakura School Simulator from alternative sources, you need to download and install the latest version of the APK file from the same website that you used before.
- Q: How to contact the developer of Sakura School Simulator?
-A: If you have any questions, feedback, or suggestions for Sakura School Simulator, you can contact the developer of the game through their email address: garusoft@gmail.com. You can also follow their Twitter account: @garusoft for more information and updates about the game.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Why You Need to Try Kick the Buddy 2 Mod Apk The Best Way to Relieve Stress and Have Fun.md b/spaces/congsaPfin/Manga-OCR/logs/Why You Need to Try Kick the Buddy 2 Mod Apk The Best Way to Relieve Stress and Have Fun.md
deleted file mode 100644
index b34347ce9f2657afa5bd19244b28e81ee99cab57..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Why You Need to Try Kick the Buddy 2 Mod Apk The Best Way to Relieve Stress and Have Fun.md
+++ /dev/null
@@ -1,106 +0,0 @@
-
-Kick the Buddy 2 Mod Apk: A Fun and Stress-Relieving Game
-Do you ever feel stressed, angry, or bored? Do you want to have some fun and unleash your creativity? If yes, then you should try Kick the Buddy 2 Mod Apk , a remastered version of the popular anti-stress game on Android. In this game, you can do anything you want with a rag doll buddy using various weapons and elements. You can also unlock all weapons and skins with unlimited money in this modded version. In this article, we will tell you everything you need to know about Kick the Buddy 2 Mod Apk, including its features, how to download and install it, how to play it, tips and tricks, pros and cons, alternatives, and FAQs.
-What is Kick the Buddy 2 Mod Apk?
-Kick the Buddy 2 Mod Apk is a modified version of Kick the Buddy: Second Kick , a remaster of the iconic anti-stress toy on Android. The developers have improved the graphics and music, but the plot remained as in the original game. In front of you will be a rag doll with which you can play and do anything. You can throw it against the walls, hit it, use various weapons and elements, such as guns, knives, bombs, fire, electricity, etc. You can also customize your buddy with different skins and outfits. The game is designed to help you relieve stress and have fun in a creative way.
-kick the buddy 2mod apk DOWNLOAD ✶✶✶ https://urlca.com/2uObnF
-Features of Kick the Buddy 2 Mod Apk
-Kick the Buddy 2 Mod Apk has many features that make it more enjoyable than the original game. Here are some of them:
-Unlimited money
-With this mod apk, you will have unlimited coins and diamonds that you can use to buy more weapons and skins. You don't have to worry about running out of money or watching ads to get more. You can enjoy the game without any limitations.
-Unlock all weapons and skins
-This mod apk also allows you to unlock all weapons and skins that are available in the game. You can choose from hundreds of weapons and elements that range from simple to crazy. You can also change your buddy's appearance with different skins and outfits. You can experiment with different combinations and see how your buddy reacts.
-Improved graphics and music
-The developers have improved the graphics and music of this remastered version. The game has more colorful and detailed graphics that make it more appealing. The music is also more upbeat and catchy that matches the mood of the game.
How to download and install Kick the Buddy 2 Mod Apk?
-If you want to download and install Kick the Buddy 2 Mod Apk on your Android device, you need to follow these simple steps:
-Step 1: Download the mod apk file from a trusted source
-You can download the mod apk file from this link . Make sure you have enough storage space on your device before downloading. The file size is about 100 MB.
-Step 2: Enable unknown sources on your device settings
-Before installing the mod apk file, you need to enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.
-kick the buddy 2 mod apk unlimited money and gold
-kick the buddy 2 mod apk download for android
-kick the buddy 2 mod apk latest version
-kick the buddy 2 mod apk free shopping
-kick the buddy 2 mod apk no ads
-kick the buddy 2 mod apk all weapons unlocked
-kick the buddy 2 mod apk revdl
-kick the buddy 2 mod apk happymod
-kick the buddy 2 mod apk rexdl
-kick the buddy 2 mod apk android 1
-kick the buddy 2 mod apk unlimited everything
-kick the buddy 2 mod apk offline
-kick the buddy 2 mod apk unlimited diamonds
-kick the buddy 2 mod apk hack
-kick the buddy 2 mod apk an1
-kick the buddy 2 mod apk unlimited coins and gems
-kick the buddy 2 mod apk online
-kick the buddy 2 mod apk unlimited money download
-kick the buddy 2 mod apk new version
-kick the buddy 2 mod apk obb
-kick the buddy 2 mod apk unlimited money and gold download
-kick the buddy 2 mod apk mediafıre
-kick the buddy 2 mod apk ios
-kick the buddy 2 mod apk unlimited money and gold android 1
-kick the buddy 2 mod apk pure
-kick the buddy 2 mod apk second kick
-kick the buddy 2 mod apk unlimited money and gold latest version
-kick the buddy 2 mod apk for pc
-kick the buddy 2 mod apk unlimited money and gold revdl
-kick the buddy 2 mod apk unlimited money and gold happymod
-kick the buddy 2 mod apk unlimited money and gold rexdl
-kick the buddy 2 mod apk unlimited money and gold an1
-kick the buddy 2 mod apk unlimited money and gold offline
-kick the buddy 2 mod apk unlimited money and gold online
-kick the buddy 2 mod apk unlimited money and gold hack
-kick the buddy 2 mod apk unlimited money and gold mediafıre
-kick the buddy 2 mod apk unlimited money and gold ios
-kick the buddy 2 mod apk unlimited money and gold for pc
-kick the buddy remastered (kick the buddy second) v1.14.1457 (mod)
-Step 3: Install the mod apk file and launch the game
-After enabling unknown sources, locate the mod apk file on your device and tap on it to install it. Wait for the installation process to finish and then launch the game. You can now enjoy Kick the Buddy 2 Mod Apk with unlimited money and all weapons and skins unlocked.
-How to play Kick the Buddy 2 Mod Apk?
-Kick the Buddy 2 Mod Apk is very easy and fun to play. Here are some basic instructions on how to play it:
-Choose your buddy and your weapon
-When you start the game, you can choose your buddy from different options, such as a teddy bear, a robot, a zombie, etc. You can also customize your buddy with different skins and outfits. Then, you can choose your weapon from various categories, such as firearms, melee, explosives, objects, animals, etc. You can also use elements, such as fire, water, electricity, etc.
-Tap, swipe, drag, and drop to interact with your buddy
-Once you have chosen your buddy and your weapon, you can start interacting with your buddy by tapping, swiping, dragging, and dropping on the screen. You can make your buddy fly, bounce, explode, burn, freeze, etc. You can also use gestures to zoom in and out, rotate, and move the camera. You can see your buddy's reactions and hear his funny comments as you play.
-Earn coins and diamonds to buy more weapons and skins
-As you play with your buddy, you will earn coins and diamonds that you can use to buy more weapons and skins. You can also get free coins and diamonds by watching ads or completing tasks. With unlimited money in this mod apk, you don't have to worry about spending too much or running out of resources.
Tips and tricks for Kick the Buddy 2 Mod Apk
-Kick the Buddy 2 Mod Apk is a game that allows you to be creative and have fun. However, if you want to make the most out of it, you can follow these tips and tricks:
-Use different combinations of weapons and elements
-One of the best things about this game is that you can use different combinations of weapons and elements to interact with your buddy. For example, you can use a chainsaw and a flamethrower, or a grenade and a water hose, or a cactus and a lightning bolt, etc. You can see how your buddy reacts differently to each combination and discover new effects and animations.
-Watch ads to get free coins and diamonds
-If you want to get more coins and diamonds without spending real money, you can watch ads to get them for free. You can watch ads by tapping on the video icon on the top right corner of the screen. You can also watch ads to get free spins on the wheel of fortune, which can give you more coins, diamonds, or weapons.
-Try different modes and challenges for more fun
-The game also offers different modes and challenges that you can try for more fun and variety. You can access them by tapping on the menu icon on the top left corner of the screen. You can choose from modes such as Sandbox, Kick the Alien, Kick the Zombie, etc. You can also complete challenges such as Kick the Buddy 100 times, Use 10 different weapons, etc. You can earn more coins and diamonds by completing these modes and challenges.
-Pros and cons of Kick the Buddy 2 Mod Apk
-Kick the Buddy 2 Mod Apk is a game that has many pros and cons. Here are some of them:
-Pros: Fun, stress-relieving, creative, addictive, free
-The game is very fun and stress-relieving, as you can do anything you want with your buddy without any consequences. You can also be creative and experiment with different weapons and elements. The game is very addictive, as you will want to try more weapons and skins and see how your buddy reacts. The game is also free to download and play, and with this mod apk, you can enjoy unlimited money and all weapons and skins unlocked.
-Cons: Violent, repetitive, annoying ads, requires internet connection
-The game is very violent, as you are basically torturing your buddy with various weapons and elements. Some people may find this disturbing or offensive. The game is also repetitive, as there is no real goal or story in the game. You may get bored after playing for a while. The game also has annoying ads that pop up frequently and interrupt your gameplay. You can skip them by watching other ads or paying real money. The game also requires an internet connection to play, which may be inconvenient for some users.
Alternatives to Kick the Buddy 2 Mod Apk
-If you like Kick the Buddy 2 Mod Apk, you may also like some other games that are similar to it. Here are some alternatives that you can try:
-Kick the Buddy: Forever
-This is another game in the Kick the Buddy series that offers more weapons and elements to play with your buddy. You can also upgrade your weapons and elements to make them more powerful and effective. You can also collect stickers and unlock new buddies and backgrounds. The game has more realistic physics and graphics than the previous versions.
-Beat the Boss 4
-This is a game that lets you take revenge on your annoying boss by using various weapons and elements. You can customize your boss with different outfits and accessories. You can also explore different locations and find hidden secrets and treasures. The game has over 200 weapons and elements to choose from, such as guns, knives, hammers, rockets, etc.
-Smash Dude
-This is a game that lets you smash a dummy with various weapons and elements. You can use guns, swords, bombs, fire, lasers, etc. to destroy the dummy. You can also customize the dummy with different clothes and accessories. The game has realistic physics and graphics that make it more fun and satisfying.
-Conclusion
-Kick the Buddy 2 Mod Apk is a fun and stress-relieving game that lets you do anything you want with a rag doll buddy using various weapons and elements. You can also unlock all weapons and skins with unlimited money in this modded version. The game has improved graphics and music that make it more appealing. The game is easy and fun to play, but it also has some drawbacks, such as violence, repetition, ads, and internet requirement. If you like this game, you may also like some other games that are similar to it, such as Kick the Buddy: Forever, Beat the Boss 4, and Smash Dude.
-FAQs
-Here are some frequently asked questions about Kick the Buddy 2 Mod Apk:
-
-Is Kick the Buddy 2 Mod Apk safe to download and install?
-Yes, Kick the Buddy 2 Mod Apk is safe to download and install if you get it from a trusted source. However, you should always be careful when downloading and installing any mod apk file from unknown sources, as they may contain viruses or malware that can harm your device or steal your data.
-Is Kick the Buddy 2 Mod Apk legal to use?
-Kick the Buddy 2 Mod Apk is not legal to use, as it violates the terms and conditions of the original game. By using this mod apk, you are bypassing the in-app purchases and ads that support the developers of the game. You may also face legal consequences if you are caught using this mod apk by the authorities or the developers.
-Can I play Kick the Buddy 2 Mod Apk offline?
-No, you cannot play Kick the Buddy 2 Mod Apk offline, as it requires an internet connection to run. You need an internet connection to access all the features and content of the game, such as weapons, skins, modes, challenges, etc. You also need an internet connection to save your progress and sync your data across different devices.
-Can I play Kick the Buddy 2 Mod Apk with friends?
-No, you cannot play Kick the Buddy 2 Mod Apk with friends, as it is a single-player game. You can only play with your buddy on your own device. However, you can share your screenshots and videos of your gameplay with your friends on social media platforms, such as Facebook, Instagram, Twitter, etc.
-What are some other games like Kick the Buddy 2 Mod Apk?
-Some other games like Kick the Buddy 2 Mod Apk are Kick the Buddy: Forever, Beat the Boss 4, Smash Dude, Happy Room: Log, Ragdoll Achievement 2, etc.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/__init__.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/__init__.py
deleted file mode 100644
index 210a2989138380559f23045b568d0fbbeb918c03..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-# flake8: noqa
-from .arraymisc import *
-from .fileio import *
-from .image import *
-from .utils import *
-from .version import *
-from .video import *
-from .visualization import *
-
-# The following modules are not imported to this level, so mmcv may be used
-# without PyTorch.
-# - runner
-# - parallel
-# - op
diff --git a/spaces/csuhan/opendet2/opendet2/data/__init__.py b/spaces/csuhan/opendet2/opendet2/data/__init__.py
deleted file mode 100644
index 75e3253a410b3792e0cb63f41a577f2a362e8bd6..0000000000000000000000000000000000000000
--- a/spaces/csuhan/opendet2/opendet2/data/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .build import *
-from . import builtin
-
-__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/generate_batch.py b/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/generate_batch.py
deleted file mode 100644
index f7628ccc2e1607e0c28dd6ca0cbf8143e1330b9b..0000000000000000000000000000000000000000
--- a/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/generate_batch.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import os
-
-from tqdm import tqdm
-import torch
-import numpy as np
-import random
-import scipy.io as scio
-from Demo_TFR_Pirenderer.src.utils import audio as audio
-
-def crop_pad_audio(wav, audio_length):
- if len(wav) > audio_length:
- wav = wav[:audio_length]
- elif len(wav) < audio_length:
- wav = np.pad(wav, [0, audio_length - len(wav)], mode='constant', constant_values=0)
- return wav
-
-def parse_audio_length(audio_length, sr, fps):
- bit_per_frames = sr / fps
-
- num_frames = int(audio_length / bit_per_frames)
- audio_length = int(num_frames * bit_per_frames)
-
- return audio_length, num_frames
-
-def generate_blink_seq(num_frames):
- ratio = np.zeros((num_frames,1))
- frame_id = 0
- while frame_id in range(num_frames):
- start = 80
- if frame_id+start+9<=num_frames - 1:
- ratio[frame_id+start:frame_id+start+9, 0] = [0.5,0.6,0.7,0.9,1, 0.9, 0.7,0.6,0.5]
- frame_id = frame_id+start+9
- else:
- break
- return ratio
-
-def generate_blink_seq_randomly(num_frames):
- ratio = np.zeros((num_frames,1))
- if num_frames<=20:
- return ratio
- frame_id = 0
- while frame_id in range(num_frames):
- start = random.choice(range(min(10,num_frames), min(int(num_frames/2), 70)))
- if frame_id+start+5<=num_frames - 1:
- ratio[frame_id+start:frame_id+start+5, 0] = [0.5, 0.9, 1.0, 0.9, 0.5]
- frame_id = frame_id+start+5
- else:
- break
- return ratio
-
-def get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path, still=False):
-
- syncnet_mel_step_size = 16
- fps = 25
-
- pic_name = os.path.splitext(os.path.split(first_coeff_path)[-1])[0]
- audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]
-
- wav = audio.load_wav(audio_path, 16000)
- wav_length, num_frames = parse_audio_length(len(wav), 16000, 25)
- wav = crop_pad_audio(wav, wav_length)
- orig_mel = audio.melspectrogram(wav).T
- spec = orig_mel.copy() # nframes 80
- indiv_mels = []
-
- for i in tqdm(range(num_frames), 'mel:'):
- start_frame_num = i-2
- start_idx = int(80. * (start_frame_num / float(fps)))
- end_idx = start_idx + syncnet_mel_step_size
- seq = list(range(start_idx, end_idx))
- seq = [ min(max(item, 0), orig_mel.shape[0]-1) for item in seq ]
- m = spec[seq, :]
- indiv_mels.append(m.T)
- indiv_mels = np.asarray(indiv_mels) # T 80 16
-
- ratio = generate_blink_seq_randomly(num_frames) # T
- source_semantics_path = first_coeff_path
- source_semantics_dict = scio.loadmat(source_semantics_path)
- ref_coeff = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70
- ref_coeff = np.repeat(ref_coeff, num_frames, axis=0)
-
- if ref_eyeblink_coeff_path is not None:
- ratio[:num_frames] = 0
- refeyeblink_coeff_dict = scio.loadmat(ref_eyeblink_coeff_path)
- refeyeblink_coeff = refeyeblink_coeff_dict['coeff_3dmm'][:,:64]
- refeyeblink_num_frames = refeyeblink_coeff.shape[0]
- if refeyeblink_num_frames threshold:
- action_name1 = action_name1
- else:
- action_name1 = 'no_gesture'
-
- if action_name != action_name1:
- # times += 1
- # ACTION_MODEL_MAX_FRAMES = 15
- action_name = action_name1
- # break
- # action_name ='no_gesture'
- else:
- # ACTION_MODEL_MAX_FRAMES = 15
- # times -= 1
- action_name = action_name1
-
-
- elif len(joints_list) == 0:
- action_name = 'no_gesture'
-
- cv.putText(debug_image, ' '.join(action_name), (3, 30),
- cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv.LINE_AA)
- cv.imshow('Hand Gesture Recognition', debug_image)
- # 处理(ESC:关闭) #################################################
- key = cv.waitKey(1)
- if key == 27: # ESC
- break
- cap.release()
- cv.destroyAllWindows()
-
-
-
-
-
-
-
-def calc_bounding_rect(image, landmarks):
- image_width, image_height = image.shape[1], image.shape[0]
-
- landmark_array = np.empty((0, 2), int)
-
- for _, landmark in enumerate(landmarks.landmark):
- landmark_x = min(int(landmark.x * image_width), image_width - 1)
- landmark_y = min(int(landmark.y * image_height), image_height - 1)
-
- landmark_point = [np.array((landmark_x, landmark_y))]
-
- landmark_array = np.append(landmark_array, landmark_point, axis=0)
-
- x, y, w, h = cv.boundingRect(landmark_array)
-
- return [x, y, x + w, y + h]
-
-
-def calc_landmark_list(image, landmarks):
- image_width, image_height = image.shape[1], image.shape[0]
-
- landmark_point = []
- joint = []
- # キーポイント
- for _, landmark in enumerate(landmarks.landmark):
- landmark_x = min(int(landmark.x * image_width), image_width - 1)
- landmark_y = min(int(landmark.y * image_height), image_height - 1)
- landmark_z = landmark.z
- landmark_v = landmark.visibility
- # np.array(landmark_x)
- landmark_point.append([landmark_x, landmark_y])
- joint.append([landmark_x, landmark_y,landmark_v])
- joint.append([0,0,0])
- # landmark_point.append([landmark_x, landmark_y,landmark_z])
- return landmark_point,joint
-
-
-
-def pre_process_landmark(landmark_list):
- temp_landmark_list = copy.deepcopy(landmark_list)
-
- # 相対座標に変換
- base_x, base_y = 0, 0
- for index, landmark_point in enumerate(temp_landmark_list):
- if index == 0:
- base_x, base_y = landmark_point[0], landmark_point[1]
-
- temp_landmark_list[index][0] = temp_landmark_list[index][0] - base_x
- temp_landmark_list[index][1] = temp_landmark_list[index][1] - base_y
-
- # 1次元リストに変換
- temp_landmark_list = list(
- itertools.chain.from_iterable(temp_landmark_list))
-
- # 正規化
- max_value = max(list(map(abs, temp_landmark_list)))
-
- def normalize_(n):
- return n / max_value
-
- temp_landmark_list = list(map(normalize_, temp_landmark_list))
-
- return temp_landmark_list
-
-
-def pre_process_point_history(image, point_history):
- image_width, image_height = image.shape[1], image.shape[0]
-
- temp_point_history = copy.deepcopy(point_history)
-
- # 相対座標に変換
- base_x, base_y = 0, 0
- for index, point in enumerate(temp_point_history):
- if index == 0:
- base_x, base_y = point[0], point[1]
-
- temp_point_history[index][0] = (temp_point_history[index][0] -
- base_x) / image_width
- temp_point_history[index][1] = (temp_point_history[index][1] -
- base_y) / image_height
-
- # 1次元リストに変換
- temp_point_history = list(
- itertools.chain.from_iterable(temp_point_history))
-
- return temp_point_history
-
-
-def logging_csv(number, mode, landmark_list, point_history_list):
- if mode == 0:
- pass
- if mode == 1 and (0 <= number <= 9):
- csv_path = 'model/keypoint_classifier/keypoint.csv'
- with open(csv_path, 'a', newline="") as f:
- writer = csv.writer(f)
- writer.writerow([number, *landmark_list])
- if mode == 2 and (0 <= number <= 9):
- csv_path = 'model/point_history_classifier/point_history.csv'
- with open(csv_path, 'a', newline="") as f:
- writer = csv.writer(f)
- writer.writerow([number, *point_history_list])
- return
-
-
-def draw_landmarks(image, landmark_point):
- # 接続線
- if len(landmark_point) > 0:
- # 親指
- cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[3]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[3]),
- (255, 255, 255), 2)
- cv.line(image, tuple(landmark_point[3]), tuple(landmark_point[4]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[3]), tuple(landmark_point[4]),
- (255, 255, 255), 2)
-
- # 人差指
- cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[6]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[6]),
- (255, 255, 255), 2)
- cv.line(image, tuple(landmark_point[6]), tuple(landmark_point[7]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[6]), tuple(landmark_point[7]),
- (255, 255, 255), 2)
- cv.line(image, tuple(landmark_point[7]), tuple(landmark_point[8]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[7]), tuple(landmark_point[8]),
- (255, 255, 255), 2)
-
- # 中指
- cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[10]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[10]),
- (255, 255, 255), 2)
- cv.line(image, tuple(landmark_point[10]), tuple(landmark_point[11]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[10]), tuple(landmark_point[11]),
- (255, 255, 255), 2)
- cv.line(image, tuple(landmark_point[11]), tuple(landmark_point[12]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[11]), tuple(landmark_point[12]),
- (255, 255, 255), 2)
-
- # 薬指
- cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[14]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[14]),
- (255, 255, 255), 2)
- cv.line(image, tuple(landmark_point[14]), tuple(landmark_point[15]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[14]), tuple(landmark_point[15]),
- (255, 255, 255), 2)
- cv.line(image, tuple(landmark_point[15]), tuple(landmark_point[16]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[15]), tuple(landmark_point[16]),
- (255, 255, 255), 2)
-
- # 小指
- cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[18]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[18]),
- (255, 255, 255), 2)
- cv.line(image, tuple(landmark_point[18]), tuple(landmark_point[19]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[18]), tuple(landmark_point[19]),
- (255, 255, 255), 2)
- cv.line(image, tuple(landmark_point[19]), tuple(landmark_point[20]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[19]), tuple(landmark_point[20]),
- (255, 255, 255), 2)
-
- # 手の平
- cv.line(image, tuple(landmark_point[0]), tuple(landmark_point[1]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[0]), tuple(landmark_point[1]),
- (255, 255, 255), 2)
- cv.line(image, tuple(landmark_point[1]), tuple(landmark_point[2]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[1]), tuple(landmark_point[2]),
- (255, 255, 255), 2)
- cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[5]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[5]),
- (255, 255, 255), 2)
- cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[9]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[9]),
- (255, 255, 255), 2)
- cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[13]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[13]),
- (255, 255, 255), 2)
- cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[17]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[17]),
- (255, 255, 255), 2)
- cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[0]),
- (0, 0, 0), 6)
- cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[0]),
- (255, 255, 255), 2)
-
- # キーポイント
- for index, landmark in enumerate(landmark_point):
- if index == 0: # 手首1
- cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
- if index == 1: # 手首2
- cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
- if index == 2: # 親指:付け根
- cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
- if index == 3: # 親指:第1関節
- cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
- if index == 4: # 親指:指先
- cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
- if index == 5: # 人差指:付け根
- cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
- if index == 6: # 人差指:第2関節
- cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
- if index == 7: # 人差指:第1関節
- cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
- if index == 8: # 人差指:指先
- cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
- if index == 9: # 中指:付け根
- cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
- if index == 10: # 中指:第2関節
- cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
- if index == 11: # 中指:第1関節
- cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
- if index == 12: # 中指:指先
- cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
- if index == 13: # 薬指:付け根
- cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
- if index == 14: # 薬指:第2関節
- cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
- if index == 15: # 薬指:第1関節
- cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
- if index == 16: # 薬指:指先
- cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
- if index == 17: # 小指:付け根
- cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
- if index == 18: # 小指:第2関節
- cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
- if index == 19: # 小指:第1関節
- cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
- if index == 20: # 小指:指先
- cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
- -1)
- cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
-
- return image
-
-
-def draw_bounding_rect(use_brect, image, brect):
- if use_brect:
- # 外接矩形
- cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]),
- (0, 0, 0), 1)
-
- return image
-
-
-def draw_info_text(image, brect, hand_sign_text, conf
- ):
- cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[1] - 22),
- (0, 0, 0), -1)
-
- # info_text = handedness.classification[0].label[0:]
- if hand_sign_text != "":
- # info_text = info_text + ':' + hand_sign_text
- info_text = hand_sign_text + ':' + conf
-
- cv.putText(image, info_text, (brect[0] + 5, brect[1] - 4),
- cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1, cv.LINE_AA)
-
- # if finger_gesture_text != "":
- # cv.putText(image, "Finger Gesture:" + finger_gesture_text, (10, 60),
- # cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0), 4, cv.LINE_AA)
- # cv.putText(image, "Finger Gesture:" + finger_gesture_text, (10, 60),
- # cv.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2,
- # cv.LINE_AA)
-
- return image
-
-
-def draw_point_history(image, point_history):
- for index, point in enumerate(point_history):
- if point[0] != 0 and point[1] != 0:
- cv.circle(image, (point[0], point[1]), 1 + int(index / 2),
- (152, 251, 152), 2)
-
- return image
-
-
-def draw_info(image, fps, mode, number):
- cv.putText(image, "FPS:" + str(fps), (10, 30), cv.FONT_HERSHEY_SIMPLEX,
- 1.0, (0, 0, 0), 4, cv.LINE_AA)
- cv.putText(image, "FPS:" + str(fps), (10, 30), cv.FONT_HERSHEY_SIMPLEX,
- 1.0, (255, 255, 255), 2, cv.LINE_AA)
-
- mode_string = ['Logging Key Point', 'Logging Point History']
- if 1 <= mode <= 2:
- cv.putText(image, "MODE:" + mode_string[mode - 1], (10, 90),
- cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1,
- cv.LINE_AA)
- if 0 <= number <= 9:
- cv.putText(image, "NUM:" + str(number), (10, 110),
- cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1,
- cv.LINE_AA)
- return image
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/dawdqd/ChuanhuChatGPT/web_assets/html/billing_info.html b/spaces/dawdqd/ChuanhuChatGPT/web_assets/html/billing_info.html
deleted file mode 100644
index 71abcc802da3c70716919c1a4738ac077c47bf01..0000000000000000000000000000000000000000
--- a/spaces/dawdqd/ChuanhuChatGPT/web_assets/html/billing_info.html
+++ /dev/null
@@ -1,9 +0,0 @@
-{label}
-
-
- ${rounded_usage} ${usage_limit}
-
\ No newline at end of file
diff --git a/spaces/declare-lab/tango/diffusers/examples/textual_inversion/textual_inversion_flax.py b/spaces/declare-lab/tango/diffusers/examples/textual_inversion/textual_inversion_flax.py
deleted file mode 100644
index 988b67866fe9667d47c66a13eb402af5d9986a14..0000000000000000000000000000000000000000
--- a/spaces/declare-lab/tango/diffusers/examples/textual_inversion/textual_inversion_flax.py
+++ /dev/null
@@ -1,681 +0,0 @@
-import argparse
-import logging
-import math
-import os
-import random
-from pathlib import Path
-
-import jax
-import jax.numpy as jnp
-import numpy as np
-import optax
-import PIL
-import torch
-import torch.utils.checkpoint
-import transformers
-from flax import jax_utils
-from flax.training import train_state
-from flax.training.common_utils import shard
-from huggingface_hub import create_repo, upload_folder
-
-# TODO: remove and import from diffusers.utils when the new version of diffusers is released
-from packaging import version
-from PIL import Image
-from torch.utils.data import Dataset
-from torchvision import transforms
-from tqdm.auto import tqdm
-from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel, set_seed
-
-from diffusers import (
- FlaxAutoencoderKL,
- FlaxDDPMScheduler,
- FlaxPNDMScheduler,
- FlaxStableDiffusionPipeline,
- FlaxUNet2DConditionModel,
-)
-from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker
-from diffusers.utils import check_min_version
-
-
-if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
- PIL_INTERPOLATION = {
- "linear": PIL.Image.Resampling.BILINEAR,
- "bilinear": PIL.Image.Resampling.BILINEAR,
- "bicubic": PIL.Image.Resampling.BICUBIC,
- "lanczos": PIL.Image.Resampling.LANCZOS,
- "nearest": PIL.Image.Resampling.NEAREST,
- }
-else:
- PIL_INTERPOLATION = {
- "linear": PIL.Image.LINEAR,
- "bilinear": PIL.Image.BILINEAR,
- "bicubic": PIL.Image.BICUBIC,
- "lanczos": PIL.Image.LANCZOS,
- "nearest": PIL.Image.NEAREST,
- }
-# ------------------------------------------------------------------------------
-
-# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.15.0.dev0")
-
-logger = logging.getLogger(__name__)
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
- parser.add_argument(
- "--pretrained_model_name_or_path",
- type=str,
- default=None,
- required=True,
- help="Path to pretrained model or model identifier from huggingface.co/models.",
- )
- parser.add_argument(
- "--tokenizer_name",
- type=str,
- default=None,
- help="Pretrained tokenizer name or path if not the same as model_name",
- )
- parser.add_argument(
- "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data."
- )
- parser.add_argument(
- "--placeholder_token",
- type=str,
- default=None,
- required=True,
- help="A token to use as a placeholder for the concept.",
- )
- parser.add_argument(
- "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word."
- )
- parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'")
- parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.")
- parser.add_argument(
- "--output_dir",
- type=str,
- default="text-inversion-model",
- help="The output directory where the model predictions and checkpoints will be written.",
- )
- parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")
- parser.add_argument(
- "--resolution",
- type=int,
- default=512,
- help=(
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
- " resolution"
- ),
- )
- parser.add_argument(
- "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution."
- )
- parser.add_argument(
- "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
- )
- parser.add_argument("--num_train_epochs", type=int, default=100)
- parser.add_argument(
- "--max_train_steps",
- type=int,
- default=5000,
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
- )
- parser.add_argument(
- "--save_steps",
- type=int,
- default=500,
- help="Save learned_embeds.bin every X updates steps.",
- )
- parser.add_argument(
- "--learning_rate",
- type=float,
- default=1e-4,
- help="Initial learning rate (after the potential warmup period) to use.",
- )
- parser.add_argument(
- "--scale_lr",
- action="store_true",
- default=True,
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
- )
- parser.add_argument(
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
- )
- parser.add_argument(
- "--revision",
- type=str,
- default=None,
- required=False,
- help="Revision of pretrained model identifier from huggingface.co/models.",
- )
- parser.add_argument(
- "--lr_scheduler",
- type=str,
- default="constant",
- help=(
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
- ' "constant", "constant_with_warmup"]'
- ),
- )
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
- parser.add_argument(
- "--use_auth_token",
- action="store_true",
- help=(
- "Will use the token generated when running `huggingface-cli login` (necessary to use this script with"
- " private models)."
- ),
- )
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
- parser.add_argument(
- "--hub_model_id",
- type=str,
- default=None,
- help="The name of the repository to keep in sync with the local `output_dir`.",
- )
- parser.add_argument(
- "--logging_dir",
- type=str,
- default="logs",
- help=(
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
- ),
- )
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
-
- args = parser.parse_args()
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
- if env_local_rank != -1 and env_local_rank != args.local_rank:
- args.local_rank = env_local_rank
-
- if args.train_data_dir is None:
- raise ValueError("You must specify a train data directory.")
-
- return args
-
-
-imagenet_templates_small = [
- "a photo of a {}",
- "a rendering of a {}",
- "a cropped photo of the {}",
- "the photo of a {}",
- "a photo of a clean {}",
- "a photo of a dirty {}",
- "a dark photo of the {}",
- "a photo of my {}",
- "a photo of the cool {}",
- "a close-up photo of a {}",
- "a bright photo of the {}",
- "a cropped photo of a {}",
- "a photo of the {}",
- "a good photo of the {}",
- "a photo of one {}",
- "a close-up photo of the {}",
- "a rendition of the {}",
- "a photo of the clean {}",
- "a rendition of a {}",
- "a photo of a nice {}",
- "a good photo of a {}",
- "a photo of the nice {}",
- "a photo of the small {}",
- "a photo of the weird {}",
- "a photo of the large {}",
- "a photo of a cool {}",
- "a photo of a small {}",
-]
-
-imagenet_style_templates_small = [
- "a painting in the style of {}",
- "a rendering in the style of {}",
- "a cropped painting in the style of {}",
- "the painting in the style of {}",
- "a clean painting in the style of {}",
- "a dirty painting in the style of {}",
- "a dark painting in the style of {}",
- "a picture in the style of {}",
- "a cool painting in the style of {}",
- "a close-up painting in the style of {}",
- "a bright painting in the style of {}",
- "a cropped painting in the style of {}",
- "a good painting in the style of {}",
- "a close-up painting in the style of {}",
- "a rendition in the style of {}",
- "a nice painting in the style of {}",
- "a small painting in the style of {}",
- "a weird painting in the style of {}",
- "a large painting in the style of {}",
-]
-
-
-class TextualInversionDataset(Dataset):
- def __init__(
- self,
- data_root,
- tokenizer,
- learnable_property="object", # [object, style]
- size=512,
- repeats=100,
- interpolation="bicubic",
- flip_p=0.5,
- set="train",
- placeholder_token="*",
- center_crop=False,
- ):
- self.data_root = data_root
- self.tokenizer = tokenizer
- self.learnable_property = learnable_property
- self.size = size
- self.placeholder_token = placeholder_token
- self.center_crop = center_crop
- self.flip_p = flip_p
-
- self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
-
- self.num_images = len(self.image_paths)
- self._length = self.num_images
-
- if set == "train":
- self._length = self.num_images * repeats
-
- self.interpolation = {
- "linear": PIL_INTERPOLATION["linear"],
- "bilinear": PIL_INTERPOLATION["bilinear"],
- "bicubic": PIL_INTERPOLATION["bicubic"],
- "lanczos": PIL_INTERPOLATION["lanczos"],
- }[interpolation]
-
- self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
- self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
-
- def __len__(self):
- return self._length
-
- def __getitem__(self, i):
- example = {}
- image = Image.open(self.image_paths[i % self.num_images])
-
- if not image.mode == "RGB":
- image = image.convert("RGB")
-
- placeholder_string = self.placeholder_token
- text = random.choice(self.templates).format(placeholder_string)
-
- example["input_ids"] = self.tokenizer(
- text,
- padding="max_length",
- truncation=True,
- max_length=self.tokenizer.model_max_length,
- return_tensors="pt",
- ).input_ids[0]
-
- # default to score-sde preprocessing
- img = np.array(image).astype(np.uint8)
-
- if self.center_crop:
- crop = min(img.shape[0], img.shape[1])
- (
- h,
- w,
- ) = (
- img.shape[0],
- img.shape[1],
- )
- img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2]
-
- image = Image.fromarray(img)
- image = image.resize((self.size, self.size), resample=self.interpolation)
-
- image = self.flip_transform(image)
- image = np.array(image).astype(np.uint8)
- image = (image / 127.5 - 1.0).astype(np.float32)
-
- example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1)
- return example
-
-
-def resize_token_embeddings(model, new_num_tokens, initializer_token_id, placeholder_token_id, rng):
- if model.config.vocab_size == new_num_tokens or new_num_tokens is None:
- return
- model.config.vocab_size = new_num_tokens
-
- params = model.params
- old_embeddings = params["text_model"]["embeddings"]["token_embedding"]["embedding"]
- old_num_tokens, emb_dim = old_embeddings.shape
-
- initializer = jax.nn.initializers.normal()
-
- new_embeddings = initializer(rng, (new_num_tokens, emb_dim))
- new_embeddings = new_embeddings.at[:old_num_tokens].set(old_embeddings)
- new_embeddings = new_embeddings.at[placeholder_token_id].set(new_embeddings[initializer_token_id])
- params["text_model"]["embeddings"]["token_embedding"]["embedding"] = new_embeddings
-
- model.params = params
- return model
-
-
-def get_params_to_save(params):
- return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params))
-
-
-def main():
- args = parse_args()
-
- if args.seed is not None:
- set_seed(args.seed)
-
- if jax.process_index() == 0:
- if args.output_dir is not None:
- os.makedirs(args.output_dir, exist_ok=True)
-
- if args.push_to_hub:
- repo_id = create_repo(
- repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
- ).repo_id
-
- # Make one log on every process with the configuration for debugging.
- logging.basicConfig(
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
- datefmt="%m/%d/%Y %H:%M:%S",
- level=logging.INFO,
- )
- # Setup logging, we only want one process per machine to log things on the screen.
- logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
- if jax.process_index() == 0:
- transformers.utils.logging.set_verbosity_info()
- else:
- transformers.utils.logging.set_verbosity_error()
-
- # Load the tokenizer and add the placeholder token as a additional special token
- if args.tokenizer_name:
- tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
- elif args.pretrained_model_name_or_path:
- tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
-
- # Add the placeholder token in tokenizer
- num_added_tokens = tokenizer.add_tokens(args.placeholder_token)
- if num_added_tokens == 0:
- raise ValueError(
- f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different"
- " `placeholder_token` that is not already in the tokenizer."
- )
-
- # Convert the initializer_token, placeholder_token to ids
- token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False)
- # Check if initializer_token is a single token or a sequence of tokens
- if len(token_ids) > 1:
- raise ValueError("The initializer token must be a single token.")
-
- initializer_token_id = token_ids[0]
- placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token)
-
- # Load models and create wrapper for stable diffusion
- text_encoder = FlaxCLIPTextModel.from_pretrained(
- args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
- )
- vae, vae_params = FlaxAutoencoderKL.from_pretrained(
- args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision
- )
- unet, unet_params = FlaxUNet2DConditionModel.from_pretrained(
- args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
- )
-
- # Create sampling rng
- rng = jax.random.PRNGKey(args.seed)
- rng, _ = jax.random.split(rng)
- # Resize the token embeddings as we are adding new special tokens to the tokenizer
- text_encoder = resize_token_embeddings(
- text_encoder, len(tokenizer), initializer_token_id, placeholder_token_id, rng
- )
- original_token_embeds = text_encoder.params["text_model"]["embeddings"]["token_embedding"]["embedding"]
-
- train_dataset = TextualInversionDataset(
- data_root=args.train_data_dir,
- tokenizer=tokenizer,
- size=args.resolution,
- placeholder_token=args.placeholder_token,
- repeats=args.repeats,
- learnable_property=args.learnable_property,
- center_crop=args.center_crop,
- set="train",
- )
-
- def collate_fn(examples):
- pixel_values = torch.stack([example["pixel_values"] for example in examples])
- input_ids = torch.stack([example["input_ids"] for example in examples])
-
- batch = {"pixel_values": pixel_values, "input_ids": input_ids}
- batch = {k: v.numpy() for k, v in batch.items()}
-
- return batch
-
- total_train_batch_size = args.train_batch_size * jax.local_device_count()
- train_dataloader = torch.utils.data.DataLoader(
- train_dataset, batch_size=total_train_batch_size, shuffle=True, drop_last=True, collate_fn=collate_fn
- )
-
- # Optimization
- if args.scale_lr:
- args.learning_rate = args.learning_rate * total_train_batch_size
-
- constant_scheduler = optax.constant_schedule(args.learning_rate)
-
- optimizer = optax.adamw(
- learning_rate=constant_scheduler,
- b1=args.adam_beta1,
- b2=args.adam_beta2,
- eps=args.adam_epsilon,
- weight_decay=args.adam_weight_decay,
- )
-
- def create_mask(params, label_fn):
- def _map(params, mask, label_fn):
- for k in params:
- if label_fn(k):
- mask[k] = "token_embedding"
- else:
- if isinstance(params[k], dict):
- mask[k] = {}
- _map(params[k], mask[k], label_fn)
- else:
- mask[k] = "zero"
-
- mask = {}
- _map(params, mask, label_fn)
- return mask
-
- def zero_grads():
- # from https://github.com/deepmind/optax/issues/159#issuecomment-896459491
- def init_fn(_):
- return ()
-
- def update_fn(updates, state, params=None):
- return jax.tree_util.tree_map(jnp.zeros_like, updates), ()
-
- return optax.GradientTransformation(init_fn, update_fn)
-
- # Zero out gradients of layers other than the token embedding layer
- tx = optax.multi_transform(
- {"token_embedding": optimizer, "zero": zero_grads()},
- create_mask(text_encoder.params, lambda s: s == "token_embedding"),
- )
-
- state = train_state.TrainState.create(apply_fn=text_encoder.__call__, params=text_encoder.params, tx=tx)
-
- noise_scheduler = FlaxDDPMScheduler(
- beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000
- )
- noise_scheduler_state = noise_scheduler.create_state()
-
- # Initialize our training
- train_rngs = jax.random.split(rng, jax.local_device_count())
-
- # Define gradient train step fn
- def train_step(state, vae_params, unet_params, batch, train_rng):
- dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3)
-
- def compute_loss(params):
- vae_outputs = vae.apply(
- {"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode
- )
- latents = vae_outputs.latent_dist.sample(sample_rng)
- # (NHWC) -> (NCHW)
- latents = jnp.transpose(latents, (0, 3, 1, 2))
- latents = latents * vae.config.scaling_factor
-
- noise_rng, timestep_rng = jax.random.split(sample_rng)
- noise = jax.random.normal(noise_rng, latents.shape)
- bsz = latents.shape[0]
- timesteps = jax.random.randint(
- timestep_rng,
- (bsz,),
- 0,
- noise_scheduler.config.num_train_timesteps,
- )
- noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps)
- encoder_hidden_states = state.apply_fn(
- batch["input_ids"], params=params, dropout_rng=dropout_rng, train=True
- )[0]
- # Predict the noise residual and compute loss
- model_pred = unet.apply(
- {"params": unet_params}, noisy_latents, timesteps, encoder_hidden_states, train=False
- ).sample
-
- # Get the target for loss depending on the prediction type
- if noise_scheduler.config.prediction_type == "epsilon":
- target = noise
- elif noise_scheduler.config.prediction_type == "v_prediction":
- target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps)
- else:
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
-
- loss = (target - model_pred) ** 2
- loss = loss.mean()
-
- return loss
-
- grad_fn = jax.value_and_grad(compute_loss)
- loss, grad = grad_fn(state.params)
- grad = jax.lax.pmean(grad, "batch")
- new_state = state.apply_gradients(grads=grad)
-
- # Keep the token embeddings fixed except the newly added embeddings for the concept,
- # as we only want to optimize the concept embeddings
- token_embeds = original_token_embeds.at[placeholder_token_id].set(
- new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"][placeholder_token_id]
- )
- new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"] = token_embeds
-
- metrics = {"loss": loss}
- metrics = jax.lax.pmean(metrics, axis_name="batch")
- return new_state, metrics, new_train_rng
-
- # Create parallel version of the train and eval step
- p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
-
- # Replicate the train state on each device
- state = jax_utils.replicate(state)
- vae_params = jax_utils.replicate(vae_params)
- unet_params = jax_utils.replicate(unet_params)
-
- # Train!
- num_update_steps_per_epoch = math.ceil(len(train_dataloader))
-
- # Scheduler and math around the number of training steps.
- if args.max_train_steps is None:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
-
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
-
- logger.info("***** Running training *****")
- logger.info(f" Num examples = {len(train_dataset)}")
- logger.info(f" Num Epochs = {args.num_train_epochs}")
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
- logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}")
- logger.info(f" Total optimization steps = {args.max_train_steps}")
-
- global_step = 0
-
- epochs = tqdm(range(args.num_train_epochs), desc=f"Epoch ... (1/{args.num_train_epochs})", position=0)
- for epoch in epochs:
- # ======================== Training ================================
-
- train_metrics = []
-
- steps_per_epoch = len(train_dataset) // total_train_batch_size
- train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False)
- # train
- for batch in train_dataloader:
- batch = shard(batch)
- state, train_metric, train_rngs = p_train_step(state, vae_params, unet_params, batch, train_rngs)
- train_metrics.append(train_metric)
-
- train_step_progress_bar.update(1)
- global_step += 1
-
- if global_step >= args.max_train_steps:
- break
- if global_step % args.save_steps == 0:
- learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"][
- "embedding"
- ][placeholder_token_id]
- learned_embeds_dict = {args.placeholder_token: learned_embeds}
- jnp.save(
- os.path.join(args.output_dir, "learned_embeds-" + str(global_step) + ".npy"), learned_embeds_dict
- )
-
- train_metric = jax_utils.unreplicate(train_metric)
-
- train_step_progress_bar.close()
- epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})")
-
- # Create the pipeline using using the trained modules and save it.
- if jax.process_index() == 0:
- scheduler = FlaxPNDMScheduler(
- beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True
- )
- safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained(
- "CompVis/stable-diffusion-safety-checker", from_pt=True
- )
- pipeline = FlaxStableDiffusionPipeline(
- text_encoder=text_encoder,
- vae=vae,
- unet=unet,
- tokenizer=tokenizer,
- scheduler=scheduler,
- safety_checker=safety_checker,
- feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"),
- )
-
- pipeline.save_pretrained(
- args.output_dir,
- params={
- "text_encoder": get_params_to_save(state.params),
- "vae": get_params_to_save(vae_params),
- "unet": get_params_to_save(unet_params),
- "safety_checker": safety_checker.params,
- },
- )
-
- # Also save the newly trained embeddings
- learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"]["embedding"][
- placeholder_token_id
- ]
- learned_embeds_dict = {args.placeholder_token: learned_embeds}
- jnp.save(os.path.join(args.output_dir, "learned_embeds.npy"), learned_embeds_dict)
-
- if args.push_to_hub:
- upload_folder(
- repo_id=repo_id,
- folder_path=args.output_dir,
- commit_message="End of training",
- ignore_patterns=["step_*", "epoch_*"],
- )
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/deep-learning-analytics/Title_Generation/README.md b/spaces/deep-learning-analytics/Title_Generation/README.md
deleted file mode 100644
index ad732209f5d9d21ce31622cb0be0d4788da19991..0000000000000000000000000000000000000000
--- a/spaces/deep-learning-analytics/Title_Generation/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-title: Title_Generation
-emoji: 📊
-colorFrom: gray
-colorTo: red
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio`, `streamlit`, or `static`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/deepwisdom/MetaGPT/metagpt/learn/text_to_embedding.py b/spaces/deepwisdom/MetaGPT/metagpt/learn/text_to_embedding.py
deleted file mode 100644
index 26dab0419508ada2a281f1df95a4362ac8465aa9..0000000000000000000000000000000000000000
--- a/spaces/deepwisdom/MetaGPT/metagpt/learn/text_to_embedding.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@Time : 2023/8/18
-@Author : mashenquan
-@File : text_to_embedding.py
-@Desc : Text-to-Embedding skill, which provides text-to-embedding functionality.
-"""
-
-from metagpt.config import CONFIG
-from metagpt.tools.openai_text_to_embedding import oas3_openai_text_to_embedding
-
-
-async def text_to_embedding(text, model="text-embedding-ada-002", openai_api_key="", **kwargs):
- """Text to embedding
-
- :param text: The text used for embedding.
- :param model: One of ['text-embedding-ada-002'], ID of the model to use. For more details, checkout: `https://api.openai.com/v1/models`.
- :param openai_api_key: OpenAI API key, For more details, checkout: `https://platform.openai.com/account/api-keys`
- :return: A json object of :class:`ResultEmbedding` class if successful, otherwise `{}`.
- """
- if CONFIG.OPENAI_API_KEY or openai_api_key:
- return await oas3_openai_text_to_embedding(text, model=model, openai_api_key=openai_api_key)
- raise EnvironmentError
diff --git a/spaces/dejavusss/philschmid-flan-t5-base-samsum/app.py b/spaces/dejavusss/philschmid-flan-t5-base-samsum/app.py
deleted file mode 100644
index 20350f67504440ec74b5febd2d7069f04065f4aa..0000000000000000000000000000000000000000
--- a/spaces/dejavusss/philschmid-flan-t5-base-samsum/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/philschmid/flan-t5-base-samsum").launch()
\ No newline at end of file
diff --git a/spaces/devoworm-group/nucleus_segmentor/app.py b/spaces/devoworm-group/nucleus_segmentor/app.py
deleted file mode 100644
index 4c46f2625315630c7fc26c40b1d076284f4b9c8d..0000000000000000000000000000000000000000
--- a/spaces/devoworm-group/nucleus_segmentor/app.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import functions
-import streamlit as st
-import numpy as np
-import pandas as pd
-from PIL import Image
-from pathlib import Path
-import joblib
-
-import numpy as np
-import cv2
-import onnxruntime as ort
-import imutils
-# import matplotlib.pyplot as plt
-import pandas as pd
-import plotly.express as px
-
-
-functions.nucleus_segmentation()
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Camtasia Studio 9 Key Crack Activator _HOT_ Keygen Download.md b/spaces/diacanFperku/AutoGPT/Camtasia Studio 9 Key Crack Activator _HOT_ Keygen Download.md
deleted file mode 100644
index aada9a33ca53208133685a423cb61f69726f0844..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Camtasia Studio 9 Key Crack Activator _HOT_ Keygen Download.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Camtasia Studio 9 Key {Crack Activator} Keygen Download Download Zip → https://gohhs.com/2uFVtA
-
-Improvement in video standard and caliber changes are coming fast. Great news for you, for the first time in the history of Camtasia Studio 9 Serial Key ... for free. It's just an activation key that you can use to activate software in the Windows Store. Now, if you have a Windows account, you can install and use Camtasia Studio for free. So this is not only a good fix, but also a very good introduction to Camtasia Studio. It's also worth noting that you can change your license requirements to activate Camtasia Studio for free. 8a78ff9644
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/HD Online Player (20 Twenty Malayalam Movie !!INSTALL!! Free 167).md b/spaces/diacanFperku/AutoGPT/HD Online Player (20 Twenty Malayalam Movie !!INSTALL!! Free 167).md
deleted file mode 100644
index 3d7aba3db0315fca07103e8ee2c0d334715bd6fa..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/HD Online Player (20 Twenty Malayalam Movie !!INSTALL!! Free 167).md
+++ /dev/null
@@ -1,11 +0,0 @@
-HD Online Player (20 twenty malayalam movie free 167) Download File ✫ https://gohhs.com/2uFVkI
-
-Vinnaithaandi Varuvaayaa is a 2010 Indian Tamil romantic drama film written and directed by Gautam Vasudev Menon, starring Silambarasan and ... Wikipedia
-Vinniathe Varuvaayaa — Vinnaithaandi Varuvaayaa is a 2010 Hindi drama film written and directed by Gautam Vasudev Menon, starring Silambarasana and ...
-Wikipedia
-Sri Chaitanya Saraswat Math - Sri Chaitanya Saraswat Math, or the Society for Krishna Consciousness in India, is one of the largest Hindu organizations in the world.
-As of October 2011, there are more than 10 million members of Sri Chaitanya Saraswat Math in the world... ...
-Wikipedia 8a78ff9644
-
-
-
diff --git a/spaces/digitalxingtong/Lixiang-Bert-Vits2/data_utils.py b/spaces/digitalxingtong/Lixiang-Bert-Vits2/data_utils.py
deleted file mode 100644
index 2c98d3dc8b9572bd05859033a74d155425a2a2ab..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Lixiang-Bert-Vits2/data_utils.py
+++ /dev/null
@@ -1,332 +0,0 @@
-import time
-import os
-import random
-import numpy as np
-import torch
-import torch.utils.data
-import torchaudio
-import commons
-from mel_processing import spectrogram_torch, mel_spectrogram_torch, spec_to_mel_torch
-from utils import load_wav_to_torch, load_filepaths_and_text
-from text import cleaned_text_to_sequence, get_bert
-
-"""Multi speaker version"""
-
-
-class TextAudioSpeakerLoader(torch.utils.data.Dataset):
- """
- 1) loads audio, speaker_id, text pairs
- 2) normalizes text and converts them to sequences of integers
- 3) computes spectrograms from audio files.
- """
-
- def __init__(self, audiopaths_sid_text, hparams):
- self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
- self.max_wav_value = hparams.max_wav_value
- self.sampling_rate = hparams.sampling_rate
- self.filter_length = hparams.filter_length
- self.hop_length = hparams.hop_length
- self.win_length = hparams.win_length
- self.sampling_rate = hparams.sampling_rate
- self.spk_map = hparams.spk2id
- self.hparams = hparams
-
- self.use_mel_spec_posterior = getattr(hparams, "use_mel_posterior_encoder", False)
- if self.use_mel_spec_posterior:
- self.n_mel_channels = getattr(hparams, "n_mel_channels", 80)
-
- self.cleaned_text = getattr(hparams, "cleaned_text", False)
-
- self.add_blank = hparams.add_blank
- self.min_text_len = getattr(hparams, "min_text_len", 1)
- self.max_text_len = getattr(hparams, "max_text_len", 300)
-
- random.seed(1234)
- random.shuffle(self.audiopaths_sid_text)
- self._filter()
-
- def _filter(self):
- """
- Filter text & store spec lengths
- """
- # Store spectrogram lengths for Bucketing
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
- # spec_length = wav_length // hop_length
-
- audiopaths_sid_text_new = []
- lengths = []
- skipped = 0
- for _id, spk, language, text, phones, tone, word2ph in self.audiopaths_sid_text:
- audiopath = f'{_id}'
- if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:
- phones = phones.split(" ")
- tone = [int(i) for i in tone.split(" ")]
- word2ph = [int(i) for i in word2ph.split(" ")]
- audiopaths_sid_text_new.append([audiopath, spk, language, text, phones, tone, word2ph])
- lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
- else:
- skipped += 1
- print("skipped: ", skipped, ", total: ", len(self.audiopaths_sid_text))
- self.audiopaths_sid_text = audiopaths_sid_text_new
- self.lengths = lengths
-
- def get_audio_text_speaker_pair(self, audiopath_sid_text):
- # separate filename, speaker_id and text
- audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text
-
- bert, phones, tone, language = self.get_text(text, word2ph, phones, tone, language, audiopath)
-
- spec, wav = self.get_audio(audiopath)
- sid = torch.LongTensor([int(self.spk_map[sid])])
- return (phones, spec, wav, sid, tone, language, bert)
-
- def get_audio(self, filename):
- audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True)
- '''
- audio, sampling_rate = load_wav_to_torch(filename)
- if sampling_rate != self.sampling_rate:
- raise ValueError("{} {} SR doesn't match target {} SR".format(
- sampling_rate, self.sampling_rate))
- audio_norm = audio / self.max_wav_value
- audio_norm = audio_norm.unsqueeze(0)
- '''
- spec_filename = filename.replace(".wav", ".spec.pt")
- if self.use_mel_spec_posterior:
- spec_filename = spec_filename.replace(".spec.pt", ".mel.pt")
- if os.path.exists(spec_filename):
- spec = torch.load(spec_filename)
- else:
- if self.use_mel_spec_posterior:
- # if os.path.exists(filename.replace(".wav", ".spec.pt")):
- # # spec, n_fft, num_mels, sampling_rate, fmin, fmax
- # spec = spec_to_mel_torch(
- # torch.load(filename.replace(".wav", ".spec.pt")),
- # self.filter_length, self.n_mel_channels, self.sampling_rate,
- # self.hparams.mel_fmin, self.hparams.mel_fmax)
- spec = mel_spectrogram_torch(audio_norm, self.filter_length,
- self.n_mel_channels, self.sampling_rate, self.hop_length,
- self.win_length, self.hparams.mel_fmin, self.hparams.mel_fmax, center=False)
- else:
- spec = spectrogram_torch(audio_norm, self.filter_length,
- self.sampling_rate, self.hop_length, self.win_length,
- center=False)
- spec = torch.squeeze(spec, 0)
- torch.save(spec, spec_filename)
- return spec, audio_norm
-
- def get_text(self, text, word2ph, phone, tone, language_str, wav_path):
- # print(text, word2ph,phone, tone, language_str)
- pold = phone
- w2pho = [i for i in word2ph]
- word2ph = [i for i in word2ph]
- phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
- pold2 = phone
-
- if self.add_blank:
- p1 = len(phone)
- phone = commons.intersperse(phone, 0)
- p2 = len(phone)
- t1 = len(tone)
- tone = commons.intersperse(tone, 0)
- t2 = len(tone)
- language = commons.intersperse(language, 0)
- for i in range(len(word2ph)):
- word2ph[i] = word2ph[i] * 2
- word2ph[0] += 1
- bert_path = wav_path.replace(".wav", ".bert.pt")
- try:
- bert = torch.load(bert_path)
- assert bert.shape[-1] == len(phone)
- except:
- bert = get_bert(text, word2ph, language_str)
- torch.save(bert, bert_path)
- #print(bert.shape[-1], bert_path, text, pold)
- assert bert.shape[-1] == len(phone)
-
- assert bert.shape[-1] == len(phone), (
- bert.shape, len(phone), sum(word2ph), p1, p2, t1, t2, pold, pold2, word2ph, text, w2pho)
- phone = torch.LongTensor(phone)
- tone = torch.LongTensor(tone)
- language = torch.LongTensor(language)
- return bert, phone, tone, language
-
- def get_sid(self, sid):
- sid = torch.LongTensor([int(sid)])
- return sid
-
- def __getitem__(self, index):
- return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
-
- def __len__(self):
- return len(self.audiopaths_sid_text)
-
-
-class TextAudioSpeakerCollate():
- """ Zero-pads model inputs and targets
- """
-
- def __init__(self, return_ids=False):
- self.return_ids = return_ids
-
- def __call__(self, batch):
- """Collate's training batch from normalized text, audio and speaker identities
- PARAMS
- ------
- batch: [text_normalized, spec_normalized, wav_normalized, sid]
- """
- # Right zero-pad all one-hot text sequences to max input length
- _, ids_sorted_decreasing = torch.sort(
- torch.LongTensor([x[1].size(1) for x in batch]),
- dim=0, descending=True)
-
- max_text_len = max([len(x[0]) for x in batch])
- max_spec_len = max([x[1].size(1) for x in batch])
- max_wav_len = max([x[2].size(1) for x in batch])
-
- text_lengths = torch.LongTensor(len(batch))
- spec_lengths = torch.LongTensor(len(batch))
- wav_lengths = torch.LongTensor(len(batch))
- sid = torch.LongTensor(len(batch))
-
- text_padded = torch.LongTensor(len(batch), max_text_len)
- tone_padded = torch.LongTensor(len(batch), max_text_len)
- language_padded = torch.LongTensor(len(batch), max_text_len)
- bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)
-
- spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
- wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
- text_padded.zero_()
- tone_padded.zero_()
- language_padded.zero_()
- spec_padded.zero_()
- wav_padded.zero_()
- bert_padded.zero_()
- for i in range(len(ids_sorted_decreasing)):
- row = batch[ids_sorted_decreasing[i]]
-
- text = row[0]
- text_padded[i, :text.size(0)] = text
- text_lengths[i] = text.size(0)
-
- spec = row[1]
- spec_padded[i, :, :spec.size(1)] = spec
- spec_lengths[i] = spec.size(1)
-
- wav = row[2]
- wav_padded[i, :, :wav.size(1)] = wav
- wav_lengths[i] = wav.size(1)
-
- sid[i] = row[3]
-
- tone = row[4]
- tone_padded[i, :tone.size(0)] = tone
-
- language = row[5]
- language_padded[i, :language.size(0)] = language
-
- bert = row[6]
- bert_padded[i, :, :bert.size(1)] = bert
-
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, tone_padded, language_padded, bert_padded
-
-
-class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
- """
- Maintain similar input lengths in a batch.
- Length groups are specified by boundaries.
- Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
-
- It removes samples which are not included in the boundaries.
- Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
- """
-
- def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
- super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
- self.lengths = dataset.lengths
- self.batch_size = batch_size
- self.boundaries = boundaries
-
- self.buckets, self.num_samples_per_bucket = self._create_buckets()
- self.total_size = sum(self.num_samples_per_bucket)
- self.num_samples = self.total_size // self.num_replicas
-
- def _create_buckets(self):
- buckets = [[] for _ in range(len(self.boundaries) - 1)]
- for i in range(len(self.lengths)):
- length = self.lengths[i]
- idx_bucket = self._bisect(length)
- if idx_bucket != -1:
- buckets[idx_bucket].append(i)
-
- for i in range(len(buckets) - 1, 0, -1):
- if len(buckets[i]) == 0:
- buckets.pop(i)
- self.boundaries.pop(i + 1)
-
- num_samples_per_bucket = []
- for i in range(len(buckets)):
- len_bucket = len(buckets[i])
- total_batch_size = self.num_replicas * self.batch_size
- rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size
- num_samples_per_bucket.append(len_bucket + rem)
- return buckets, num_samples_per_bucket
-
- def __iter__(self):
- # deterministically shuffle based on epoch
- g = torch.Generator()
- g.manual_seed(self.epoch)
-
- indices = []
- if self.shuffle:
- for bucket in self.buckets:
- indices.append(torch.randperm(len(bucket), generator=g).tolist())
- else:
- for bucket in self.buckets:
- indices.append(list(range(len(bucket))))
-
- batches = []
- for i in range(len(self.buckets)):
- bucket = self.buckets[i]
- len_bucket = len(bucket)
- if (len_bucket == 0):
- continue
- ids_bucket = indices[i]
- num_samples_bucket = self.num_samples_per_bucket[i]
-
- # add extra samples to make it evenly divisible
- rem = num_samples_bucket - len_bucket
- ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]
-
- # subsample
- ids_bucket = ids_bucket[self.rank::self.num_replicas]
-
- # batching
- for j in range(len(ids_bucket) // self.batch_size):
- batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]]
- batches.append(batch)
-
- if self.shuffle:
- batch_ids = torch.randperm(len(batches), generator=g).tolist()
- batches = [batches[i] for i in batch_ids]
- self.batches = batches
-
- assert len(self.batches) * self.batch_size == self.num_samples
- return iter(self.batches)
-
- def _bisect(self, x, lo=0, hi=None):
- if hi is None:
- hi = len(self.boundaries) - 1
-
- if hi > lo:
- mid = (hi + lo) // 2
- if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
- return mid
- elif x <= self.boundaries[mid]:
- return self._bisect(x, lo, mid)
- else:
- return self._bisect(x, mid + 1, hi)
- else:
- return -1
-
- def __len__(self):
- return self.num_samples // self.batch_size
diff --git a/spaces/dineshreddy/WALT/mmdet/models/backbones/ssd_vgg.py b/spaces/dineshreddy/WALT/mmdet/models/backbones/ssd_vgg.py
deleted file mode 100644
index cbc4fbb2301afc002f47abb9ed133a500d6cf23f..0000000000000000000000000000000000000000
--- a/spaces/dineshreddy/WALT/mmdet/models/backbones/ssd_vgg.py
+++ /dev/null
@@ -1,169 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from mmcv.cnn import VGG, constant_init, kaiming_init, normal_init, xavier_init
-from mmcv.runner import load_checkpoint
-
-from mmdet.utils import get_root_logger
-from ..builder import BACKBONES
-
-
-@BACKBONES.register_module()
-class SSDVGG(VGG):
- """VGG Backbone network for single-shot-detection.
-
- Args:
- input_size (int): width and height of input, from {300, 512}.
- depth (int): Depth of vgg, from {11, 13, 16, 19}.
- out_indices (Sequence[int]): Output from which stages.
-
- Example:
- >>> self = SSDVGG(input_size=300, depth=11)
- >>> self.eval()
- >>> inputs = torch.rand(1, 3, 300, 300)
- >>> level_outputs = self.forward(inputs)
- >>> for level_out in level_outputs:
- ... print(tuple(level_out.shape))
- (1, 1024, 19, 19)
- (1, 512, 10, 10)
- (1, 256, 5, 5)
- (1, 256, 3, 3)
- (1, 256, 1, 1)
- """
- extra_setting = {
- 300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
- 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
- }
-
- def __init__(self,
- input_size,
- depth,
- with_last_pool=False,
- ceil_mode=True,
- out_indices=(3, 4),
- out_feature_indices=(22, 34),
- l2_norm_scale=20.):
- # TODO: in_channels for mmcv.VGG
- super(SSDVGG, self).__init__(
- depth,
- with_last_pool=with_last_pool,
- ceil_mode=ceil_mode,
- out_indices=out_indices)
- assert input_size in (300, 512)
- self.input_size = input_size
-
- self.features.add_module(
- str(len(self.features)),
- nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
- self.features.add_module(
- str(len(self.features)),
- nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
- self.features.add_module(
- str(len(self.features)), nn.ReLU(inplace=True))
- self.features.add_module(
- str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
- self.features.add_module(
- str(len(self.features)), nn.ReLU(inplace=True))
- self.out_feature_indices = out_feature_indices
-
- self.inplanes = 1024
- self.extra = self._make_extra_layers(self.extra_setting[input_size])
- self.l2_norm = L2Norm(
- self.features[out_feature_indices[0] - 1].out_channels,
- l2_norm_scale)
-
- def init_weights(self, pretrained=None):
- """Initialize the weights in backbone.
-
- Args:
- pretrained (str, optional): Path to pre-trained weights.
- Defaults to None.
- """
- if isinstance(pretrained, str):
- logger = get_root_logger()
- load_checkpoint(self, pretrained, strict=False, logger=logger)
- elif pretrained is None:
- for m in self.features.modules():
- if isinstance(m, nn.Conv2d):
- kaiming_init(m)
- elif isinstance(m, nn.BatchNorm2d):
- constant_init(m, 1)
- elif isinstance(m, nn.Linear):
- normal_init(m, std=0.01)
- else:
- raise TypeError('pretrained must be a str or None')
-
- for m in self.extra.modules():
- if isinstance(m, nn.Conv2d):
- xavier_init(m, distribution='uniform')
-
- constant_init(self.l2_norm, self.l2_norm.scale)
-
- def forward(self, x):
- """Forward function."""
- outs = []
- for i, layer in enumerate(self.features):
- x = layer(x)
- if i in self.out_feature_indices:
- outs.append(x)
- for i, layer in enumerate(self.extra):
- x = F.relu(layer(x), inplace=True)
- if i % 2 == 1:
- outs.append(x)
- outs[0] = self.l2_norm(outs[0])
- if len(outs) == 1:
- return outs[0]
- else:
- return tuple(outs)
-
- def _make_extra_layers(self, outplanes):
- layers = []
- kernel_sizes = (1, 3)
- num_layers = 0
- outplane = None
- for i in range(len(outplanes)):
- if self.inplanes == 'S':
- self.inplanes = outplane
- continue
- k = kernel_sizes[num_layers % 2]
- if outplanes[i] == 'S':
- outplane = outplanes[i + 1]
- conv = nn.Conv2d(
- self.inplanes, outplane, k, stride=2, padding=1)
- else:
- outplane = outplanes[i]
- conv = nn.Conv2d(
- self.inplanes, outplane, k, stride=1, padding=0)
- layers.append(conv)
- self.inplanes = outplanes[i]
- num_layers += 1
- if self.input_size == 512:
- layers.append(nn.Conv2d(self.inplanes, 256, 4, padding=1))
-
- return nn.Sequential(*layers)
-
-
-class L2Norm(nn.Module):
-
- def __init__(self, n_dims, scale=20., eps=1e-10):
- """L2 normalization layer.
-
- Args:
- n_dims (int): Number of dimensions to be normalized
- scale (float, optional): Defaults to 20..
- eps (float, optional): Used to avoid division by zero.
- Defaults to 1e-10.
- """
- super(L2Norm, self).__init__()
- self.n_dims = n_dims
- self.weight = nn.Parameter(torch.Tensor(self.n_dims))
- self.eps = eps
- self.scale = scale
-
- def forward(self, x):
- """Forward function."""
- # normalization layer convert to FP32 in FP16 training
- x_float = x.float()
- norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
- return (self.weight[None, :, None, None].float().expand_as(x_float) *
- x_float / norm).type_as(x)
diff --git a/spaces/divyahansg/text-generation-webui-space/extensions/google_translate/script.py b/spaces/divyahansg/text-generation-webui-space/extensions/google_translate/script.py
deleted file mode 100644
index 68bc54b293086bed1a070a310d276060ee939d44..0000000000000000000000000000000000000000
--- a/spaces/divyahansg/text-generation-webui-space/extensions/google_translate/script.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import gradio as gr
-from deep_translator import GoogleTranslator
-
-params = {
- "language string": "ja",
-}
-
-language_codes = {'Afrikaans': 'af', 'Albanian': 'sq', 'Amharic': 'am', 'Arabic': 'ar', 'Armenian': 'hy', 'Azerbaijani': 'az', 'Basque': 'eu', 'Belarusian': 'be', 'Bengali': 'bn', 'Bosnian': 'bs', 'Bulgarian': 'bg', 'Catalan': 'ca', 'Cebuano': 'ceb', 'Chinese (Simplified)': 'zh-CN', 'Chinese (Traditional)': 'zh-TW', 'Corsican': 'co', 'Croatian': 'hr', 'Czech': 'cs', 'Danish': 'da', 'Dutch': 'nl', 'English': 'en', 'Esperanto': 'eo', 'Estonian': 'et', 'Finnish': 'fi', 'French': 'fr', 'Frisian': 'fy', 'Galician': 'gl', 'Georgian': 'ka', 'German': 'de', 'Greek': 'el', 'Gujarati': 'gu', 'Haitian Creole': 'ht', 'Hausa': 'ha', 'Hawaiian': 'haw', 'Hebrew': 'iw', 'Hindi': 'hi', 'Hmong': 'hmn', 'Hungarian': 'hu', 'Icelandic': 'is', 'Igbo': 'ig', 'Indonesian': 'id', 'Irish': 'ga', 'Italian': 'it', 'Japanese': 'ja', 'Javanese': 'jw', 'Kannada': 'kn', 'Kazakh': 'kk', 'Khmer': 'km', 'Korean': 'ko', 'Kurdish': 'ku', 'Kyrgyz': 'ky', 'Lao': 'lo', 'Latin': 'la', 'Latvian': 'lv', 'Lithuanian': 'lt', 'Luxembourgish': 'lb', 'Macedonian': 'mk', 'Malagasy': 'mg', 'Malay': 'ms', 'Malayalam': 'ml', 'Maltese': 'mt', 'Maori': 'mi', 'Marathi': 'mr', 'Mongolian': 'mn', 'Myanmar (Burmese)': 'my', 'Nepali': 'ne', 'Norwegian': 'no', 'Nyanja (Chichewa)': 'ny', 'Pashto': 'ps', 'Persian': 'fa', 'Polish': 'pl', 'Portuguese (Portugal, Brazil)': 'pt', 'Punjabi': 'pa', 'Romanian': 'ro', 'Russian': 'ru', 'Samoan': 'sm', 'Scots Gaelic': 'gd', 'Serbian': 'sr', 'Sesotho': 'st', 'Shona': 'sn', 'Sindhi': 'sd', 'Sinhala (Sinhalese)': 'si', 'Slovak': 'sk', 'Slovenian': 'sl', 'Somali': 'so', 'Spanish': 'es', 'Sundanese': 'su', 'Swahili': 'sw', 'Swedish': 'sv', 'Tagalog (Filipino)': 'tl', 'Tajik': 'tg', 'Tamil': 'ta', 'Telugu': 'te', 'Thai': 'th', 'Turkish': 'tr', 'Ukrainian': 'uk', 'Urdu': 'ur', 'Uzbek': 'uz', 'Vietnamese': 'vi', 'Welsh': 'cy', 'Xhosa': 'xh', 'Yiddish': 'yi', 'Yoruba': 'yo', 'Zulu': 'zu'}
-
-def input_modifier(string):
- """
- This function is applied to your text inputs before
- they are fed into the model.
- """
-
- return GoogleTranslator(source=params['language string'], target='en').translate(string)
-
-def output_modifier(string):
- """
- This function is applied to the model outputs.
- """
-
- return GoogleTranslator(source='en', target=params['language string']).translate(string)
-
-def bot_prefix_modifier(string):
- """
- This function is only applied in chat mode. It modifies
- the prefix text for the Bot and can be used to bias its
- behavior.
- """
-
- return string
-
-def ui():
- # Finding the language name from the language code to use as the default value
- language_name = list(language_codes.keys())[list(language_codes.values()).index(params['language string'])]
-
- # Gradio elements
- language = gr.Dropdown(value=language_name, choices=[k for k in language_codes], label='Language')
-
- # Event functions to update the parameters in the backend
- language.change(lambda x: params.update({"language string": language_codes[x]}), language, None)
diff --git a/spaces/dma123/gpt-js/js/utils.js b/spaces/dma123/gpt-js/js/utils.js
deleted file mode 100644
index 5ed5c8d45e0fdcdcf62c242126dcadf7957c9bf4..0000000000000000000000000000000000000000
--- a/spaces/dma123/gpt-js/js/utils.js
+++ /dev/null
@@ -1,291 +0,0 @@
-(function (globals) {
- "use strict";
-
-
- // Interact with OpenAI API
- async function openaiChat(message, chatlog, model, temperature, top_p, user_role, ui) {
- if (!regenerateLastAnswer && !message) return;
- if (receiving) return;
- receiving = true;
-
- if (user_role === 'assistant') {
- const prompt_msg = {
- role: user_role,
- content: message
- };
- chatlog.addMessage(prompt_msg);
- ui.chatlogEl.update();
- receiving = false;
- return;
- }
-
- ui.submitBtn.innerHTML = message_stop;
- let entryCreated = false;
- try {
- if (!regenerateLastAnswer) {
- message = message.trim();
- const prompt_msg = {
- role: user_role,
- content: message
- };
- chatlog.addMessage(prompt_msg);
- chatlog.addMessage(null);
- }
- regenerateLastAnswer = false;
- ui.chatlogEl.update();
- chatlog.getFirstMessage().value.content = first_prompt + getDatePrompt();
- const payload = {
- model,
- messages: chatlog.getActiveMessageValues(),
- temperature,
- top_p,
- stream: true,
- };
- const response = await fetch('https://api.openai.com/v1/chat/completions', {
- signal: controller.signal,
- method: 'POST',
- headers: {
- 'Content-Type': 'application/json',
- 'Authorization': `Bearer ${api_key}`
- },
- body: JSON.stringify(payload)
- });
- const reader = response.body.getReader();
- while (true) {
- const { done, value } = await reader.read();
- if (done) break;
- const value_str = new TextDecoder().decode(value);
- if (value_str.startsWith("{")) {
- const data = JSON.parse(value_str);
- if ('error' in data) throw new Error(data.error.message);
- }
- const chunks = value_str.split('\n');
- let content = '';
- chunks.forEach(chunk => {
- if (chunk.startsWith("data: ")) chunk = chunk.substring(6)
- if (chunk === '' || chunk === '[DONE]') return;
- const data = JSON.parse(chunk);
- if ('error' in data) throw new Error(data.error.message);
- content += data.choices[0].delta.content || '';
- });
-
- if (!entryCreated) {
- const lastMessage = chatlog.addMessage({ role: 'assistant', content });
- entryCreated = true;
- lastMessage.metadata = { model, temperature, top_p };
- } else {
- const lastMessage = chatlog.getLastMessage();
- lastMessage.value.content += content;
- lastMessage.cache = null;
- }
- ui.chatlogEl.update();
- }
- } catch (error) {
- console.error(error);
- if (('' + error).startsWith('AbortError: ')) {
- controller = new AbortController();
- return;
- }
- if (('' + error).startsWith("Error: You didn't provide an API key.") || ('' + error).startsWith("Error: Incorrect API key provided:")) {
- getApiKey();
- }
-
- if (!entryCreated) {
- chatlog.addMessage({ role: 'assistant', content: '' + error });
- entryCreated = true;
- } else {
- chatlog.getLastMessage().value.content += `\n\n${error}`;
- }
- } finally {
- receiving = false;
- ui.submitBtn.innerHTML = message_submit;
- if (entryCreated) {
- chatlog.getLastMessage().metadata = { model, temperature, top_p };
- }
-
- ui.chatlogEl.update();
- }
- }
-
-
- // Returns the current date and time as prompt part
- function getDatePrompt() {
- const now = new Date();
- const year = now.getFullYear();
- const month = String(now.getMonth() + 1).padStart(2, '0');
- const day = String(now.getDate()).padStart(2, '0');
- const hours = String(now.getHours()).padStart(2, '0');
- const minutes = String(now.getMinutes()).padStart(2, '0');
- const datePrompt = `\nKnowledge cutoff: none\nCurrent date: ${year}-${month}-${day}\nCurrent time: ${hours}:${minutes}`;
- return datePrompt;
- }
-
- // Sets up event listeners for the chat interface
- // ChatApp.prototype.setUpEventListeners = () => {
- globals.setUpEventListeners = (chatlog, ui) => {
-
- ui.submitBtn.addEventListener("click", () => {
- if (receiving) {
- controller.abort();
- return;
- }
- openaiChat(ui.messageEl.value, chatlog, document.querySelector('input[name="model"]:checked').value, Number(ui.temperatureEl.value), Number(ui.topPEl.value), document.querySelector('input[name="user_role"]:checked').value, ui);
- ui.messageEl.value = "";
- ui.messageEl.style.height = "auto";
- });
-
- ui.messageEl.addEventListener("keydown", (event) => {
- if (event.keyCode === 13 && (event.shiftKey || event.ctrlKey || event.altKey)) {
- event.preventDefault();
- ui.submitBtn.click();
- }
- });
-
- ui.messageEl.addEventListener("input", function () {
- this.style.height = "auto";
- let height = this.scrollHeight - parseInt(getComputedStyle(this).paddingTop) - parseInt(getComputedStyle(this).paddingBottom);
- if (height > window.innerHeight / 2) {
- height = window.innerHeight / 2;
- this.style.overflowY = "scroll";
- } else {
- this.style.overflowY = "hidden";
- }
- if (height > this.clientHeight) this.style.height = `${height}px`;
- });
-
- document.addEventListener('keydown', function (event) {
- if (event.key === "Escape") {
- controller.abort();
- }
- });
-
- ui.newChatBtn.addEventListener("click", () => {
- if (receiving) {
- controller.abort();
- return;
- }
- ui.messageEl.value = start_message;
- ui.messageEl.style.height = "auto";
- chatlog.rootAlternatives = null;
- chatlog.addMessage({ role: "system", content: first_prompt });
- ui.chatlogEl.update();
- });
-
- ui.saveChatBtn.addEventListener("click", () => {
- const jsonData = JSON.stringify(chatlog);
- const blob = new Blob([jsonData], { type: 'application/json' });
- const url = URL.createObjectURL(blob);
- const a = document.createElement('a');
- a.href = url;
- a.download = 'chatlog.json';
- a.style.display = 'none';
- document.body.appendChild(a);
- a.click();
- document.body.removeChild(a);
- });
-
- ui.loadChatBtn.addEventListener("click", () => {
- const input = document.createElement('input');
- input.type = 'file';
- input.accept = 'application/json';
- input.style.display = 'none';
- document.body.appendChild(input);
-
- input.addEventListener('change', () => {
- const file = input.files[0];
- const reader = new FileReader();
-
- reader.addEventListener('load', () => {
- const jsonData = reader.result;
- const data = JSON.parse(jsonData);
- chatlog.load(data.rootAlternatives);
- ui.chatlogEl.update();
- });
-
- reader.readAsText(file);
- document.body.removeChild(input);
- });
-
- input.click();
- });
-
- ui.temperatureValueEl.textContent = ui.temperatureEl.value;
- ui.temperatureEl.addEventListener('input', () => {
- ui.temperatureValueEl.textContent = ui.temperatureEl.value;
- });
-
- ui.topPValueEl.textContent = ui.topPEl.value;
- ui.topPEl.addEventListener('input', () => {
- ui.topPValueEl.textContent = ui.topPEl.value;
- });
-
- ui.settingsBtn.addEventListener('click', () => {
- ui.settingsEl.classList.toggle('open');
- });
-
- ui.loginBtn.addEventListener('click', () => {
- getApiKey();
- });
-
- ui.logoutBtn.addEventListener('click', () => {
- try {
- localStorage.removeItem("api_key");
- } catch (error) {
- console.error(error);
- }
- location.reload();
- });
-
- }
-
-
- const showLoginButton = () => {
- const login = document.getElementById('session-login');
- const logout = document.getElementById('session-logout');
- login.style.display = 'block';
- logout.style.display = 'none';
- }
-
-
- const showLogoutButton = () => {
- const login = document.getElementById('session-login');
- const logout = document.getElementById('session-logout');
- login.style.display = 'none';
- logout.style.display = 'block';
- }
-
-
- globals.getApiKey = () => {
- // If no or an empty API key has been set, then try to get one from localStorage
- if (typeof api_key == 'undefined' || api_key == '') {
- try {
- globals.api_key = localStorage.api_key;
- } catch (error) {
- console.error(error);
- }
- if (typeof api_key != 'undefined' && api_key != '') {
- showLogoutButton();
- return;
- }
- }
- showLoginButton();
-
- // If any API key has been set, or localStorage was empty, ask the user for a new API key
- setTimeout(() => {
- globals.api_key = prompt('Enter an OpenAI API key:');
- if (globals.api_key == null) globals.api_key = '';
- try {
- localStorage.api_key = api_key;
- } catch (error) {
- console.error(error);
- }
- if (typeof api_key == 'undefined' || api_key == '') {
- showLoginButton();
- } else {
- showLogoutButton();
- }
- }, 0);
- }
-
-
-}(this));
\ No newline at end of file
diff --git a/spaces/doevent/FullSubNet-plus/app.py b/spaces/doevent/FullSubNet-plus/app.py
deleted file mode 100644
index 4a228b82ea40d198d397291e394c76944880fa67..0000000000000000000000000000000000000000
--- a/spaces/doevent/FullSubNet-plus/app.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import os
-import gradio as gr
-import subprocess
-os.system("git clone https://github.com/doevent/FullSubNet-plus")
-os.system("mv FullSubNet-plus/speech_enhance .")
-os.system("mv FullSubNet-plus/config .")
-os.system("gdown https://drive.google.com/uc?id=1UJSt1G0P_aXry-u79LLU_l9tCnNa2u7C -O best_model.tar")
-from speech_enhance.tools.denoise_hf_clone_voice import start
-
-
-# If the file is too duration to inference
-def duration(input_audio) -> int:
- command = f"ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 -i {input_audio}"
- result = subprocess.run(command, shell=True, stdout=subprocess.PIPE)
- data = result.stdout.decode('ascii').rstrip()
- return int(float(data))
-
-
-def inference(audio):
- try:
- if audio.find("audio") < 0:
- if duration(audio) >= 150:
- return "error.wav"
- result = start(to_list_files=[audio])
- return result[0]
- except Exception as e:
- gr.Error(f"Maximum duration 150 sec\n{str(e)}")
-
-
-title = """DeNoise Speech Enhancement """
-description = """
-This is an unofficial demo for FullSubNet-plus: DeNoise Speech Enhancement. To use it, simply upload your audio, or click one of the examples to load them. Read more at the links below.
-Link to GitHub:
-- [FullSubNet +](https://github.com/hit-thusz-RookieCJ/FullSubNet-plus)
-"""
-twitter_link = "[](https://twitter.com/DoEvent)"
-css = '''
-h1#title {
- text-align: center;
-}
-'''
-
-with gr.Blocks(css=css) as demo:
- gr.Markdown(title)
- gr.Markdown(description)
- gr.Markdown(twitter_link)
-
-
- with gr.Tab("Upload audio"):
- u_audio = gr.Audio(type="filepath", source="upload", label="Input audio")
- u_output = gr.Audio(type="filepath", label="Output audio")
- u_button = gr.Button("Submit")
-
- with gr.Tab("Record your voice"):
- m_audio = gr.Audio(ype="filepath", source="microphone", label="Record yourself reading something out loud")
- m_output = gr.Audio(type="filepath", label="Output audio")
- m_button = gr.Button("Submit")
-
- gr.Examples(examples=["man.wav", "woman.wav"], inputs=u_audio, outputs=u_output, fn=inference, cache_examples=True)
- u_button.click(inference, inputs=u_audio, outputs=u_output)
- m_button.click(inference, inputs=m_audio, outputs=m_output)
- gr.Markdown("")
-
-
-demo.queue(concurrency_count=1, api_open=False).launch(show_api=False, show_error=True)
diff --git a/spaces/drdata/ArtNovelComicBookComposer/app.py b/spaces/drdata/ArtNovelComicBookComposer/app.py
deleted file mode 100644
index 4da39753084a6e536ef4d94f8099acabe5deb69e..0000000000000000000000000000000000000000
--- a/spaces/drdata/ArtNovelComicBookComposer/app.py
+++ /dev/null
@@ -1,145 +0,0 @@
-import os
-import random
-
-import gradio as gr
-
-#links
-Article = ""
-Article = Article + " Gallery UI documentation: https://gradio.app/docs/"
-Article = Article + " Unsplash free image source requires 5MP or larger images: https://unsplash.com/"
-Article = Article + " Github should also be a worthy alternative with free repos. "
-Article = Article + " Raw git content can be accessed by URL like so: https://raw.github.com/AaronCWacker/Yggdrasil/images/"
-Article = Article + " Originals go here: https://github.com/AaronCWacker/Yggdrasil/tree/main/images"
-# Aaron_Wacker_health_and_medical_icon_set_on_white_background_bba24b60-9fcf-411b-9c00-dd1ba1e3553c.png
-
-
-import os
-import csv
-import gradio as gr
-from gradio import inputs, outputs
-import huggingface_hub
-from huggingface_hub import Repository
-from datetime import datetime
-
-HF_TOKEN = os.environ.get("HF_TOKEN")
-print("is none?", HF_TOKEN is None)
-DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/BookComposer"
-DATA_FILENAME = "BookComposer.csv"
-DATA_FILE=os.path.join("data", DATA_FILENAME)
-HF_TOKEN=os.environ.get("HF_TOKEN")
-
-def gan():
- images = [
- (random.choice(
- [
- "Comectress.png",
- "Comic Plate 1 - Amnesia.png",
- "Comic Plate 1 - Depths.png",
- "Comic Plate 1 - Helion.png",
- "Comic Plate 1 - Kitsune.png",
- "Comic Plate 1 - Sinbad.png",
- "Comic Plate 1 - Vampiress.png",
- "Comic Plate 2 - Amnesia.png",
- "Comic Plate 2 - Depths.png",
- "Comic Plate 2 - Helion.png",
- "Comic Plate 2 - Kitsune.png",
- "Comic Plate 2 - Sinbad.png",
- "Comic Plate 2 - Vampiress.png",
- "Comic Plate 3 - Amnesia.png",
- "Comic Plate 3 - Depths.png",
- "Comic Plate 3 - Helion.png",
- "Comic Plate 3 - Kitsune.png",
- "Comic Plate 3 - Vampiress.png",
- "Comic Plate 4 - Amnesia.png",
- "Comic Plate 4 - Depths.png",
- "Comic Plate 4 - Helion.png",
- "Comic Plate 4 - Kitsune.png",
- "Comic Plate 4 - Sinbad.png",
- "Comic Plate 4 - Vampiress.png",
- "Comic Plate 5 - Vampiress.png",
- "Gold Suit.png",
- "Heavens.png",
- "Red Dot Sight.png",
- "Starfire.png",
- "Vamp 1.png",
- "Vamp 2.png",
- "Vamp 3.png",
- "Vamp 4.png",
- "Whirlwind.png",
- "Zyphoria.png",
- ]
- ), f"label {i}" if i != 0 else "label" * 50)
- for i in range(10)
- ]
- return images
-
-def gan2():
- images = [
- (random.choice(
- [
- "Comectress.png",
- "Comic Plate 1 - Amnesia.png",
- "Comic Plate 1 - Depths.png",
- "Comic Plate 1 - Helion.png",
- "Comic Plate 1 - Kitsune.png",
- "Comic Plate 1 - Sinbad.png",
- "Comic Plate 1 - Vampiress.png",
- "Comic Plate 2 - Amnesia.png",
- "Comic Plate 2 - Depths.png",
- "Comic Plate 2 - Helion.png",
- "Comic Plate 2 - Kitsune.png",
- "Comic Plate 2 - Sinbad.png",
- "Comic Plate 2 - Vampiress.png",
- "Comic Plate 3 - Amnesia.png",
- "Comic Plate 3 - Depths.png",
- "Comic Plate 3 - Helion.png",
- "Comic Plate 3 - Kitsune.png",
- "Comic Plate 3 - Vampiress.png",
- "Comic Plate 4 - Amnesia.png",
- "Comic Plate 4 - Depths.png",
- "Comic Plate 4 - Helion.png",
- "Comic Plate 4 - Kitsune.png",
- "Comic Plate 4 - Sinbad.png",
- "Comic Plate 4 - Vampiress.png",
- "Comic Plate 5 - Vampiress.png",
- "Gold Suit.png",
- "Heavens.png",
- "Red Dot Sight.png",
- "Starfire.png",
- "Vamp 1.png",
- "Vamp 2.png",
- "Vamp 3.png",
- "Vamp 4.png",
- "Whirlwind.png",
- "Zyphoria.png",
- ]
- ), f"label {i}" if i != 0 else "label" * 50)
- for i in range(10)
- ]
- return images
-
-
-
-with gr.Blocks() as demo:
- with gr.Column(variant="panel"):
- with gr.Row(variant="compact"):
- text = gr.Textbox(
- label="Health and Medical Icon Sets",
- show_label=False,
- max_lines=1,
- placeholder="Enter your prompt",
- ).style(
- container=False,
- )
- btn = gr.Button("Generate image").style(full_width=False)
- btn2 = gr.Button("Generate story").style(full_width=False)
-
- gallery = gr.Gallery(
- label="Generated images", show_label=False, elem_id="gallery"
- ).style(grid=[2], height="auto")
-
- btn.click(gan, None, gallery)
- btn2.click(gan2, None, gallery)
-
-if __name__ == "__main__":
- demo.launch()
\ No newline at end of file
diff --git a/spaces/dsaigc/trans_for_sd/README.md b/spaces/dsaigc/trans_for_sd/README.md
deleted file mode 100644
index 79e53486110061d7416874c637bc205fa0e09b27..0000000000000000000000000000000000000000
--- a/spaces/dsaigc/trans_for_sd/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Trans For Sd
-emoji: 🏃
-colorFrom: gray
-colorTo: gray
-sdk: gradio
-sdk_version: 3.28.3
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/elonmuskceo/sparknlp/app.py b/spaces/elonmuskceo/sparknlp/app.py
deleted file mode 100644
index e1db3c261f31285675562c9abd5bf5eb101458a3..0000000000000000000000000000000000000000
--- a/spaces/elonmuskceo/sparknlp/app.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import gradio as gr
-from sparknlp.base import *
-from sparknlp.annotator import *
-from sparknlp.pretrained import PretrainedPipeline
-import sparknlp
-
-print("Sparknlp Version: " + sparknlp.version())
-
-# Start SparkSession with Spark NLP
-# start() functions has 4 parameters: gpu, spark23, spark24, and memory
-# sparknlp.start(gpu=True) will start the session with GPU support
-# sparknlp.start(spark23=True) is when you have Apache Spark 2.3.x installed
-# sparknlp.start(spark24=True) is when you have Apache Spark 2.4.x installed
-# sparknlp.start(memory="16G") to change the default driver memory in SparkSession
-spark = sparknlp.start()
-
-# Download a pre-trained pipeline
-pipeline = PretrainedPipeline('explain_document_dl', lang='en')
-
-# Your testing dataset
-placeholder = """
-The Mona Lisa is a 16th century oil painting created by Leonardo.
-It's held at the Louvre in Paris.
-"""
-
-print(spark)
-
-def fn(text: str):
- result = pipeline.annotate(text)
- return result
-
-iface = gr.Interface(
- fn=fn,
- inputs="text",
- outputs="json",
- title="Spark NLP explain_document_dl pipeline",
- description=f"Spark object: {spark}",
- examples=[placeholder],
-)
-
-if __name__ == "__main__":
- iface.launch()
diff --git a/spaces/eson/tokenizer-arena/vocab/gpt_nexo_20b/tokenizer/tokenizer.py b/spaces/eson/tokenizer-arena/vocab/gpt_nexo_20b/tokenizer/tokenizer.py
deleted file mode 100644
index 7c81d0ff43331a9fc1d1a54f8b7637b549d082fd..0000000000000000000000000000000000000000
--- a/spaces/eson/tokenizer-arena/vocab/gpt_nexo_20b/tokenizer/tokenizer.py
+++ /dev/null
@@ -1,402 +0,0 @@
-# Copyright (c) 2021, EleutherAI
-# This file is based on code by the authors denoted below and has been modified from its original version.
-#
-# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Megatron tokenizers."""
-
-from abc import ABC
-from abc import abstractmethod
-
-from tokenizers import Tokenizer
-from transformers import GPT2Tokenizer, GPT2TokenizerFast
-import numpy as np
-import sentencepiece as spm
-from typing import List, Union
-from .gpt2_tokenization import GPT2Tokenizer
-
-
-def build_tokenizer(args):
- """Initialize tokenizer."""
- if args.rank == 0:
- print("> building {} tokenizer ...".format(args.tokenizer_type), flush=True)
-
- # Select and instantiate the tokenizer.
- if args.tokenizer_type.lower() == "GPT2BPETokenizer".lower():
- assert args.vocab_file is not None
- assert args.merge_file is not None
- tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file)
- elif args.tokenizer_type.lower() == "SPMTokenizer".lower():
- assert args.vocab_file is not None
- tokenizer = SentencePieceTokenizer(args.vocab_file)
- elif args.tokenizer_type.lower() == "HFTokenizer".lower():
- assert args.vocab_file is not None
- tokenizer = HFTokenizer(args.vocab_file)
- elif args.tokenizer_type.lower() == "HFGPT2Tokenizer".lower():
- if args.vocab_file is None:
- print(
- "WARNING: No vocab file found, loading Huggingface's pretrained GPT2Tokenizer"
- )
- tokenizer = HFGPT2Tokenizer(args.vocab_file)
- elif args.tokenizer_type.lower() == "CharLevelTokenizer".lower():
- tokenizer = CharLevelTokenizer(vocab_size=512)
- elif args.tokenizer_type.lower() == "TiktokenTokenizer".lower():
- assert args.vocab_file is not None
- tokenizer = TiktokenTokenizer(args.vocab_file)
- else:
- raise NotImplementedError(
- "{} tokenizer is not " "implemented.".format(args.tokenizer_type)
- )
-
- # Add vocab size.
- args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size, args)
-
- return tokenizer
-
-
-def _vocab_size_with_padding(orig_vocab_size, args):
- """Pad vocab size so it is divisible by model parallel size and
- still having GPU friendly size."""
-
- after = orig_vocab_size
- multiple = args.make_vocab_size_divisible_by * args.model_parallel_size
- while (after % multiple) != 0:
- after += 1
- if args.rank == 0:
- print(
- " > padded vocab (size: {}) with {} dummy tokens "
- "(new size: {})".format(orig_vocab_size, after - orig_vocab_size, after),
- flush=True,
- )
- return after
-
-
-class AbstractTokenizer(ABC):
- """Abstract class for tokenizer."""
-
- def __init__(self, name):
- self.name = name
- super().__init__()
-
- @property
- @abstractmethod
- def vocab_size(self):
- pass
-
- @property
- @abstractmethod
- def vocab(self):
- """Dictionary from vocab text token to id token."""
- pass
-
- @property
- @abstractmethod
- def inv_vocab(self):
- """Dictionary from vocab id token to text token."""
- pass
-
- @abstractmethod
- def tokenize(self, text):
- pass
-
- def detokenize(self, token_ids):
- raise NotImplementedError(
- "detokenizer is not implemented for {} " "tokenizer".format(self.name)
- )
-
- @property
- def cls(self):
- raise NotImplementedError(
- "CLS is not provided for {} " "tokenizer".format(self.name)
- )
-
- @property
- def sep(self):
- raise NotImplementedError(
- "SEP is not provided for {} " "tokenizer".format(self.name)
- )
-
- @property
- def pad(self):
- raise NotImplementedError(
- "PAD is not provided for {} " "tokenizer".format(self.name)
- )
-
- @property
- def eod(self):
- raise NotImplementedError(
- "EOD is not provided for {} " "tokenizer".format(self.name)
- )
-
- @property
- def mask(self):
- raise NotImplementedError(
- "MASK is not provided for {} " "tokenizer".format(self.name)
- )
-
-
-class _GPT2BPETokenizer(AbstractTokenizer):
- """Original GPT2 BPE tokenizer."""
-
- def __init__(self, vocab_file, merge_file):
- name = "GPT2 BPE"
- super().__init__(name)
-
- self.tokenizer = GPT2Tokenizer(
- vocab_file, merge_file, errors="replace", special_tokens=[], max_len=None
- )
- self.eod_id = self.tokenizer.encoder["<|endoftext|>"]
-
- @property
- def vocab_size(self):
- return len(self.tokenizer.encoder)
-
- @property
- def vocab(self):
- return self.tokenizer.encoder
-
- @property
- def inv_vocab(self):
- return self.tokenizer.decoder
-
- def tokenize(self, text):
- return self.tokenizer.encode(text)
-
- def detokenize(self, token_ids):
- return self.tokenizer.decode(token_ids)
-
- @property
- def eod(self):
- return self.eod_id
-
-
-class SentencePieceTokenizer(AbstractTokenizer):
- """Designed to Integrate SP's Tokenizer."""
-
- def __init__(self, vocab_file):
- name = "SPM"
- super().__init__(name)
-
- self.tokenizer = spm.SentencePieceProcessor(model_file=vocab_file)
- self.eod_id = self.tokenizer.piece_to_id("<|endoftext|>")
-
- @property
- def vocab_size(self):
- return self.tokenizer.get_piece_size()
-
- @property
- def vocab(self):
- return {
- self.tokenizer.id_to_piece(idx): idx
- for idx in range(self.tokenizer.get_piece_size())
- }
-
- @property
- def inv_vocab(self):
- return {
- idx: self.tokenizer.id_to_piece(idx)
- for idx in range(self.tokenizer.get_piece_size())
- }
-
- def tokenize(self, text):
- return self.tokenizer.encode(text)
-
- def detokenize(self, token_ids):
- return self.tokenizer.decode(token_ids)
-
- @property
- def eod(self):
- return self.eod_id
-
-
-class HFTokenizer(AbstractTokenizer):
- """Designed to Integrate HF's Tokenizer library."""
-
- def __init__(self, vocab_file):
- name = "HFTokenizer"
- super().__init__(name)
-
- self.tokenizer = Tokenizer.from_file(vocab_file)
- self.eod_id = self.tokenizer.token_to_id("<|endoftext|>")
- self.pad_id = self.tokenizer.token_to_id("<|padding|>")
-
- @property
- def vocab_size(self):
- return self.tokenizer.get_vocab_size()
-
- @property
- def vocab(self):
- return self.tokenizer.get_vocab()
-
- @property
- def inv_vocab(self):
- return self.tokenizer.decoder
-
- def tokenize(self, text: str):
- return self.tokenizer.encode(text).ids
-
- def tokenize_batch(self, text_batch: Union[List[str], str]):
- return self.tokenizer.encode_batch(text_batch)
-
- def detokenize(self, token_ids):
- return self.tokenizer.decode(token_ids)
-
- @property
- def eod(self):
- return self.eod_id
-
-
-class HFGPT2Tokenizer(AbstractTokenizer):
- """Designed to Integrate the pretrained OpenAI GPT2 Tokenizers from HF"""
-
- def __init__(self, vocab_file=None, fast=True):
- name = "HFGPT2Tokenizer"
- if fast:
- name += "Fast"
- super().__init__(name)
- if vocab_file is None:
- vocab_file = "gpt2"
- if fast:
- self.tokenizer = GPT2TokenizerFast.from_pretrained(vocab_file)
- else:
- self.tokenizer = GPT2Tokenizer.from_pretrained(vocab_file)
-
- self.tokenizer.add_special_tokens({"pad_token": "<|padding|>"})
- self.eod_id = self.tokenizer.eos_token_id
- self.pad_id = self.tokenizer.pad_token_id
-
- @property
- def vocab_size(self):
- return len(self.tokenizer)
-
- @property
- def vocab(self):
- return self.tokenizer.get_vocab()
-
- @property
- def inv_vocab(self):
- return self.tokenizer._tokenizer.decoder
-
- def tokenize(self, text: str):
- return self.tokenizer.encode(text)
-
- def tokenize_batch(self, text_batch: Union[List[str], str]):
- if isinstance(text_batch, str):
- text_batch = [text_batch]
- return [self.tokenize(t) for t in text_batch]
-
- def detokenize(self, token_ids):
- return self.tokenizer.decode(token_ids)
-
- @property
- def eod(self):
- return self.eod_id
-
-
-class CharLevelTokenizer(AbstractTokenizer):
- """Character Level Tokenizer"""
-
- def __init__(self, vocab_size):
- name = "CharLevelTokenizer"
- super().__init__(name)
- self._vocab_size = vocab_size
- self.eod_id = 0
- self.pad_id = 1
-
- def clamp(self, n):
- return max(32, min(n, self.vocab_size))
-
- @property
- def vocab_size(self):
- return self._vocab_size
-
- @property
- def vocab(self):
- raise NotImplementedError
-
- @property
- def inv_vocab(self):
- raise NotImplementedError
-
- def decode_token(self, token: int):
- return str(chr(self.clamp(token)))
-
- def tokenize(self, text: str):
- return list(np.fromstring(text, dtype=np.uint8))
-
- def tokenize_batch(self, text_batch: Union[List[str], str]):
- if isinstance(text_batch, list):
- return [self.tokenize(s) for s in text_batch]
- else:
- return self.tokenize(text_batch)
-
- def detokenize(self, token_ids):
- return "".join(list(map(self.decode_token, token_ids)))
-
- @property
- def eod(self):
- return self.eod_id
-
-
-class TiktokenTokenizer(AbstractTokenizer):
- """Tokenizer from OpenAI's tiktoken implementation"""
-
- def __init__(self, vocab_file):
- try:
- import tiktoken
- except ModuleNotFoundError:
- print("Please install tiktoken: (https://github.com/openai/tiktoken)")
- raise Exception
-
- name = "TiktokenTokenizer"
- super().__init__(name)
-
- self.tokenizer = tiktoken.get_encoding(vocab_file)
- self.eod_id = self.tokenizer.eot_token
- self.pad_id = None
-
- @property
- def vocab_size(self):
- return self.tokenizer.n_vocab
-
- @property
- def vocab(self):
- raise NotImplementedError(
- "TiktokenTokenizer does not implement vocabulary access."
- )
-
- @property
- def inv_vocab(self):
- raise NotImplementedError(
- "TiktokenTokenizer does not implement vocabulary access. \
- To get the idx-th token in vocabulary, use tokenizer.decode([idx]) ."
- )
-
- def tokenize(self, text: str):
- return self.tokenizer.encode(text) # , allowed_special="all")
-
- def tokenize_batch(self, text_batch: List[str]):
- return self.tokenizer.encode_batch(text_batch, allowed_special="all")
-
- def detokenize(self, token_ids):
- return self.tokenizer.decode(tokens=token_ids, errors="strict")
-
- @property
- def eod(self):
- return self.eod_id
-
- @property
- def pad(self):
- raise NotImplementedError
diff --git a/spaces/evaluate-metric/seqeval/README.md b/spaces/evaluate-metric/seqeval/README.md
deleted file mode 100644
index e4442315f079863e802dfc7413e0166ac7d01071..0000000000000000000000000000000000000000
--- a/spaces/evaluate-metric/seqeval/README.md
+++ /dev/null
@@ -1,164 +0,0 @@
----
-title: seqeval
-emoji: 🤗
-colorFrom: blue
-colorTo: red
-sdk: gradio
-sdk_version: 3.19.1
-app_file: app.py
-pinned: false
-tags:
-- evaluate
-- metric
-description: >-
- seqeval is a Python framework for sequence labeling evaluation.
- seqeval can evaluate the performance of chunking tasks such as named-entity recognition, part-of-speech tagging, semantic role labeling and so on.
-
- This is well-tested by using the Perl script conlleval, which can be used for
- measuring the performance of a system that has processed the CoNLL-2000 shared task data.
-
- seqeval supports following formats:
- IOB1
- IOB2
- IOE1
- IOE2
- IOBES
-
- See the [README.md] file at https://github.com/chakki-works/seqeval for more information.
----
-
-# Metric Card for seqeval
-
-## Metric description
-
-seqeval is a Python framework for sequence labeling evaluation. seqeval can evaluate the performance of chunking tasks such as named-entity recognition, part-of-speech tagging, semantic role labeling and so on.
-
-
-## How to use
-
-Seqeval produces labelling scores along with its sufficient statistics from a source against one or more references.
-
-It takes two mandatory arguments:
-
-`predictions`: a list of lists of predicted labels, i.e. estimated targets as returned by a tagger.
-
-`references`: a list of lists of reference labels, i.e. the ground truth/target values.
-
-It can also take several optional arguments:
-
-`suffix` (boolean): `True` if the IOB tag is a suffix (after type) instead of a prefix (before type), `False` otherwise. The default value is `False`, i.e. the IOB tag is a prefix (before type).
-
-`scheme`: the target tagging scheme, which can be one of [`IOB1`, `IOB2`, `IOE1`, `IOE2`, `IOBES`, `BILOU`]. The default value is `None`.
-
-`mode`: whether to count correct entity labels with incorrect I/B tags as true positives or not. If you want to only count exact matches, pass `mode="strict"` and a specific `scheme` value. The default is `None`.
-
-`sample_weight`: An array-like of shape (n_samples,) that provides weights for individual samples. The default is `None`.
-
-`zero_division`: Which value to substitute as a metric value when encountering zero division. Should be one of [`0`,`1`,`"warn"`]. `"warn"` acts as `0`, but the warning is raised.
-
-
-```python
->>> seqeval = evaluate.load('seqeval')
->>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
->>> references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
->>> results = seqeval.compute(predictions=predictions, references=references)
-```
-
-## Output values
-
-This metric returns a dictionary with a summary of scores for overall and per type:
-
-Overall:
-
-`accuracy`: the average [accuracy](https://huggingface.co/metrics/accuracy), on a scale between 0.0 and 1.0.
-
-`precision`: the average [precision](https://huggingface.co/metrics/precision), on a scale between 0.0 and 1.0.
-
-`recall`: the average [recall](https://huggingface.co/metrics/recall), on a scale between 0.0 and 1.0.
-
-`f1`: the average [F1 score](https://huggingface.co/metrics/f1), which is the harmonic mean of the precision and recall. It also has a scale of 0.0 to 1.0.
-
-Per type (e.g. `MISC`, `PER`, `LOC`,...):
-
-`precision`: the average [precision](https://huggingface.co/metrics/precision), on a scale between 0.0 and 1.0.
-
-`recall`: the average [recall](https://huggingface.co/metrics/recall), on a scale between 0.0 and 1.0.
-
-`f1`: the average [F1 score](https://huggingface.co/metrics/f1), on a scale between 0.0 and 1.0.
-
-
-### Values from popular papers
-The 1995 "Text Chunking using Transformation-Based Learning" [paper](https://aclanthology.org/W95-0107) reported a baseline recall of 81.9% and a precision of 78.2% using non Deep Learning-based methods.
-
-More recently, seqeval continues being used for reporting performance on tasks such as [named entity detection](https://www.mdpi.com/2306-5729/6/8/84/htm) and [information extraction](https://ieeexplore.ieee.org/abstract/document/9697942/).
-
-
-## Examples
-
-Maximal values (full match) :
-
-```python
->>> seqeval = evaluate.load('seqeval')
->>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
->>> references = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
->>> results = seqeval.compute(predictions=predictions, references=references)
->>> print(results)
-{'MISC': {'precision': 1.0, 'recall': 1.0, 'f1': 1.0, 'number': 1}, 'PER': {'precision': 1.0, 'recall': 1.0, 'f1': 1.0, 'number': 1}, 'overall_precision': 1.0, 'overall_recall': 1.0, 'overall_f1': 1.0, 'overall_accuracy': 1.0}
-
-```
-
-Minimal values (no match):
-
-```python
->>> seqeval = evaluate.load('seqeval')
->>> predictions = [['O', 'B-MISC', 'I-MISC'], ['B-PER', 'I-PER', 'O']]
->>> references = [['B-MISC', 'O', 'O'], ['I-PER', '0', 'I-PER']]
->>> results = seqeval.compute(predictions=predictions, references=references)
->>> print(results)
-{'MISC': {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 1}, 'PER': {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 2}, '_': {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 1}, 'overall_precision': 0.0, 'overall_recall': 0.0, 'overall_f1': 0.0, 'overall_accuracy': 0.0}
-```
-
-Partial match:
-
-```python
->>> seqeval = evaluate.load('seqeval')
->>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
->>> references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
->>> results = seqeval.compute(predictions=predictions, references=references)
->>> print(results)
-{'MISC': {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 1}, 'PER': {'precision': 1.0, 'recall': 1.0, 'f1': 1.0, 'number': 1}, 'overall_precision': 0.5, 'overall_recall': 0.5, 'overall_f1': 0.5, 'overall_accuracy': 0.8}
-```
-
-## Limitations and bias
-
-seqeval supports following IOB formats (short for inside, outside, beginning) : `IOB1`, `IOB2`, `IOE1`, `IOE2`, `IOBES`, `IOBES` (only in strict mode) and `BILOU` (only in strict mode).
-
-For more information about IOB formats, refer to the [Wikipedia page](https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)) and the description of the [CoNLL-2000 shared task](https://aclanthology.org/W02-2024).
-
-
-## Citation
-
-```bibtex
-@inproceedings{ramshaw-marcus-1995-text,
- title = "Text Chunking using Transformation-Based Learning",
- author = "Ramshaw, Lance and
- Marcus, Mitch",
- booktitle = "Third Workshop on Very Large Corpora",
- year = "1995",
- url = "https://www.aclweb.org/anthology/W95-0107",
-}
-```
-
-```bibtex
-@misc{seqeval,
- title={{seqeval}: A Python framework for sequence labeling evaluation},
- url={https://github.com/chakki-works/seqeval},
- note={Software available from https://github.com/chakki-works/seqeval},
- author={Hiroki Nakayama},
- year={2018},
-}
-```
-
-## Further References
-- [README for seqeval at GitHub](https://github.com/chakki-works/seqeval)
-- [CoNLL-2000 shared task](https://www.clips.uantwerpen.be/conll2002/ner/bin/conlleval.txt)
diff --git a/spaces/facebook/MusicGen/audiocraft/losses/sisnr.py b/spaces/facebook/MusicGen/audiocraft/losses/sisnr.py
deleted file mode 100644
index a1b8ee03507dccf0327b1f2f57298b56f38827fe..0000000000000000000000000000000000000000
--- a/spaces/facebook/MusicGen/audiocraft/losses/sisnr.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-import typing as tp
-
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-
-def _unfold(a: torch.Tensor, kernel_size: int, stride: int) -> torch.Tensor:
- """Given input of size [*OT, T], output Tensor of size [*OT, F, K]
- with K the kernel size, by extracting frames with the given stride.
- This will pad the input so that `F = ceil(T / K)`.
- see https://github.com/pytorch/pytorch/issues/60466
- """
- *shape, length = a.shape
- n_frames = math.ceil(length / stride)
- tgt_length = (n_frames - 1) * stride + kernel_size
- a = F.pad(a, (0, tgt_length - length))
- strides = list(a.stride())
- assert strides[-1] == 1, "data should be contiguous"
- strides = strides[:-1] + [stride, 1]
- return a.as_strided([*shape, n_frames, kernel_size], strides)
-
-
-def _center(x: torch.Tensor) -> torch.Tensor:
- return x - x.mean(-1, True)
-
-
-def _norm2(x: torch.Tensor) -> torch.Tensor:
- return x.pow(2).sum(-1, True)
-
-
-class SISNR(nn.Module):
- """SISNR loss.
-
- Input should be [B, C, T], output is scalar.
-
- ..Warning:: This function returns the opposite of the SI-SNR (e.g. `-1 * regular_SI_SNR`).
- Consequently, lower scores are better in terms of reconstruction quality,
- in particular, it should be negative if training goes well. This done this way so
- that this module can also be used as a loss function for training model.
-
- Args:
- sample_rate (int): Sample rate.
- segment (float or None): Evaluate on chunks of that many seconds. If None, evaluate on
- entire audio only.
- overlap (float): Overlap between chunks, i.e. 0.5 = 50 % overlap.
- epsilon (float): Epsilon value for numerical stability.
- """
- def __init__(
- self,
- sample_rate: int = 16000,
- segment: tp.Optional[float] = 20,
- overlap: float = 0.5,
- epsilon: float = torch.finfo(torch.float32).eps,
- ):
- super().__init__()
- self.sample_rate = sample_rate
- self.segment = segment
- self.overlap = overlap
- self.epsilon = epsilon
-
- def forward(self, out_sig: torch.Tensor, ref_sig: torch.Tensor) -> torch.Tensor:
- B, C, T = ref_sig.shape
- assert ref_sig.shape == out_sig.shape
-
- if self.segment is None:
- frame = T
- stride = T
- else:
- frame = int(self.segment * self.sample_rate)
- stride = int(frame * (1 - self.overlap))
-
- epsilon = self.epsilon * frame # make epsilon prop to frame size.
-
- gt = _unfold(ref_sig, frame, stride)
- est = _unfold(out_sig, frame, stride)
- if self.segment is None:
- assert gt.shape[-1] == 1
-
- gt = _center(gt)
- est = _center(est)
- dot = torch.einsum("bcft,bcft->bcf", gt, est)
-
- proj = dot[:, :, :, None] * gt / (epsilon + _norm2(gt))
- noise = est - proj
-
- sisnr = 10 * (
- torch.log10(epsilon + _norm2(proj)) - torch.log10(epsilon + _norm2(noise))
- )
- return -1 * sisnr[..., 0].mean()
diff --git a/spaces/falterWliame/Face_Mask_Detection/Gta San Andreas Underground Download Torrent.md b/spaces/falterWliame/Face_Mask_Detection/Gta San Andreas Underground Download Torrent.md
deleted file mode 100644
index 0ec7a6e173b710a5e18358b4268997d4877ff86e..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/Gta San Andreas Underground Download Torrent.md
+++ /dev/null
@@ -1,8 +0,0 @@
-
-it's currently the hardest map to get into in all of san andreas. i'll explain why below. if you still wish to try going over the mountains, here is a picture of the profile. there is one tiny hole in the top left hand corner, this isn't an actual hole, if you push on the left edge of the small dirt mound, you will be able to go into the mountain.
-i wish you all the best in conquering san andreas for gta:v. i know that it seems impossible, but i'm sure that you'll get there one day. after all, it's a beautiful place, with its carefully crafted hills and valleys. even if you don't, i'm sure that the game's map editor will keep you busy and entertained for a long time.
-gta san andreas underground download torrent Download File >> https://urlca.com/2uDdxz
-you can access the mountains in the tunnels under the mountains. you'll need to get an ambulance and a side car (with the siren option in your car) and drive around the mountains until you find an opening. there's a small path off of the main path that you can follow (it's in the mountains) until you can get to the tunnel. enter it and go down to either san fierro or los santos. this works for liberty city as well, just get the stp side car or ambulance. hope this helps!
-i've got grand theft auto san andreas underground version 0.4.0 for pc for free and am posting it here in full with all of my mods and maybe you will like it. i hope you enjoy it but if you don't please write me back or try to give me suggestions. i never in my life thought i would do a open source mod but glad i did! i have no idea what to call it so it's just grand theft auto san andreas underground. i made all the sound packs and was trying to make my first mod. wangaray out to all.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/falterWliame/Face_Mask_Detection/Presenter Media Login Password Free HOT.md b/spaces/falterWliame/Face_Mask_Detection/Presenter Media Login Password Free HOT.md
deleted file mode 100644
index 795fbe0dcd5cbda956e9a0a93b95c641b6e57521..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/Presenter Media Login Password Free HOT.md
+++ /dev/null
@@ -1,8 +0,0 @@
-
-Adobe Presenter is a presentation tool for online, high-quality education presentations. Ideal for corporate training, e-learning, and online courses. With Presenter, you can create presentations like a pro without any coding, scripting, or coding.
-Most, if not all, the PowerPoint files that you create on your Windows PC are not compatible with mobile devices. When you are finished preparing content for a presentation, you need a way to convert PowerPoint files for use on mobile devices. Presenter will enable you to do this. A free Google Sheet is also included to track your published presentations.
-Presenter Media Login Password Free Download Zip ::: https://urlca.com/2uDdEt
-All of the digital classroom media that are beamed to the digital classroom sites via Dedicated IP Connections (DPs) should be using the IP Stream. This is done so that the screens will always remain in a constant state of muting. DPs monitor the VOD Media on the IP Stream to determine whether to enable the screens or not. When no stream media is received by a DP in a specific time frame, the screens are muted.
-Blackmagic Web Presenter is a self contained server for sending video to streaming sites. It has a SDI input for video from camera or Live Encoder. You can use it with a Windows PC with a USB or ethernet port for computer output. It has an optional Ethernet connection or use a mobile broadband connection to allow you to use the system anywhere. Or you can use the USB webcam to connect to a PC. You can optionally use the remote control to launch any of the software applications. When connected Blackmagic Web Presenter will automatically detect the streaming keys you need. Once that has been done you can just plug in the SDI input and start using the system!
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Become a Predator in Hungry Shark Evolution - Free APK for Android.md b/spaces/fatiXbelha/sd/Become a Predator in Hungry Shark Evolution - Free APK for Android.md
deleted file mode 100644
index a0456ece8a9d8adf82e1e1265cf739869ff9b68d..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Become a Predator in Hungry Shark Evolution - Free APK for Android.md
+++ /dev/null
@@ -1,108 +0,0 @@
-
-Hungry Shark Evolution APK AN1: A Shark Game You Can't Miss
-Do you love sharks and want to experience life as one of them? Do you want to explore a beautiful underwater world and devour everything in your way? Do you want to evolve your shark from a tiny reef shark to a mighty megalodon? If you answered yes to any of these questions, then you should download hungry shark evolution apk an1 right now.
-hungry shark evolution apk an1 Download File >>> https://urllie.com/2uNyhG
-Hungry shark evolution apk an1 is a modified version of the popular arcade game Hungry Shark Evolution by Ubisoft. It allows you to play the game with unlimited coins and gems, which means you can unlock all the sharks and accessories without spending any real money. You can also play the game offline, so you don't need an internet connection to enjoy it.
-In this article, we will tell you everything you need to know about hungry shark evolution apk an1, including its main features, tips and tricks, review, and FAQs. Read on to find out why this is one of the best shark games ever made.
- Main Features of Hungry Shark Evolution APK AN1
-Hungry shark evolution apk an1 has many features that make it a fun and addictive game. Here are some of them:
-
-More than a dozen unique sharks and other creatures to evolve. You can choose from different types of sharks, such as the great white, the hammerhead, the tiger shark, and even the prehistoric megalodon. You can also evolve other creatures, such as a crab, a turtle, a penguin, and a dragon. Each shark has its own abilities and stats, so you can find the one that suits your play style.
-An open world both above and below the waves. You can explore a vast ocean full of fish, humans, ships, submarines, mines, volcanoes, islands, caves, and more. You can also jump out of the water and fly in the air, eating birds, planes, helicopters, and even satellites. There are many secrets and surprises waiting for you in this game.
-Awesome accessories like lasers, jetpacks, and top hats. You can equip your shark with various items that enhance its performance or appearance. For example, you can use a laser to zap your enemies, a jetpack to fly faster, or a top hat to look classy. There are also sunglasses, headphones, umbrellas, crowns, and more.
-Baby sharks to boost your predatory powers. You can recruit baby sharks that swim next to you and help you eat more. There are different types of baby sharks, such as a baby mako shark, a baby hammerhead shark, or even a baby megalodon shark. You can also merge different types of sharks together to create new hybrids.
-Tons of bonus objects and missions. You can find and collect sunken bonus objects that give you extra coins or gems. You can also complete missions that challenge you to eat certain types of creatures or perform certain actions. Completing missions and achievements will reward you with more coins or gems.
-
- Tips and Tricks for Hungry Shark Evolution APK AN1
-Hungry shark evolution apk an1 is not a hard game to play, but it can be challenging at times. Here are some tips and tricks to help you become a better shark and unlock more sharks:
-
-Understand the basics of survival. The most important thing you need to do in this game is to stay alive. Your shark has a hunger meter that depletes over time. If it reaches zero, your shark will starve and die. To prevent this, you need to eat constantly. Eat anything and everything you can find, such as fish, humans, crabs, turtles, jellyfish, and more. Some creatures are more filling than others, so try to eat the ones that give you more hunger points. You can also eat gold creatures that give you more coins or gems.
-Avoid dangerous enemies and obstacles. Not everything in the ocean is edible or friendly. There are some enemies and obstacles that can harm or kill you, such as bigger sharks, mines, torpedoes, harpoons, electric eels, and more. You can tell if something is dangerous by the red outline around it. Avoid these things or try to destroy them with your boost or accessories. You can also use your map to see where they are located.
-Use your boost wisely. Your shark has a boost meter that fills up when you eat. You can use your boost to swim faster, jump higher, or attack harder. Boosting can help you escape from danger, catch prey, or explore new areas. However, boosting also consumes more hunger points, so don't overuse it. You can upgrade your boost by spending coins or gems.
-Upgrade and unlock new sharks and accessories. As you play the game, you will earn coins and gems that you can use to upgrade and unlock new sharks and accessories. Upgrading your shark will increase its bite, speed, and boost stats. Unlocking new sharks will give you access to different abilities and stats. Unlocking and equipping accessories will enhance your shark's performance or appearance. You can also use coins or gems to revive your shark if it dies.
-Complete missions and achievements. The game has many missions and achievements that challenge you to do various things, such as eating a certain number of creatures, surviving for a certain time, or reaching a certain score. Completing missions and achievements will reward you with more coins or gems. You can also earn more coins or gems by watching ads or completing offers.
-
- Review of Hungry Shark Evolution APK AN1
-Hungry shark evolution apk an1 is a great game for anyone who loves sharks and arcade games. It has many positive aspects, such as:
-
-Fun and addictive gameplay. The game is easy to play but hard to master. You can control your shark with simple touch or tilt controls. You can enjoy eating everything in sight and evolving your shark into different forms. You can also discover new areas and secrets in the ocean. The game is never boring because there is always something new to do or see.
-Amazing graphics and sound effects. The game has stunning 3D graphics that make the ocean look realistic and beautiful. You can see the details of the creatures, the water effects, the lighting effects, and more. The game also has awesome sound effects that make you feel like you are in the ocean. You can hear the sounds of your shark biting, roaring, jumping, and more. You can also hear the sounds of the other creatures, the waves, the explosions, and more.
-Variety of content and customization. The game has a lot of content and customization options that make it more interesting and enjoyable. You can choose from different types of sharks and other creatures to evolve. You can also equip your shark with various accessories that change its look or abilities. You can also collect bonus objects and complete missions and achievements that give you more rewards.
-
-However, the game also has some negative aspects, such as:
-
-Repetitive gameplay. The game can get repetitive after a while because there is not much variation in the gameplay. You basically do the same thing over and over again: eat, avoid danger, upgrade, repeat. The game does not have a story mode or a multiplayer mode that could add some diversity to the gameplay.
-Limited offline mode. The game can be played offline, but you will miss out on some features that require an internet connection. For example, you will not be able to watch ads or complete offers that give you more coins or gems. You will also not be able to access some events or updates that may be available online.
-Potential bugs or glitches. The game is not perfect and may have some bugs or glitches that affect the gameplay. For example, some users have reported that their sharks get stuck in walls or objects, their progress gets lost or reset, their coins or gems disappear or get deducted without reason, their game crashes or freezes randomly, and more. These issues may vary depending on your device or version of the game.
-
-In conclusion, hungry shark evolution apk an1 is a fun and addictive game that lets you experience life as a shark in a beautiful and dangerous underwater world. It has many features, tips and tricks, and pros and cons that make it worth playing. However, it also has some drawbacks that may affect your enjoyment of the game. If you are looking for a shark game that is fun, easy, and free, then you should download hungry shark evolution apk an1 and give it a try.
- FAQs about Hungry Shark Evolution APK AN1
-Here are some frequently asked questions about hungry shark evolution apk an1 and their answers:
-hungry shark evolution mod apk an1
-hungry shark evolution game download an1
-hungry shark evolution hack apk an1
-hungry shark evolution unlimited coins and gems apk an1
-hungry shark evolution latest version apk an1
-hungry shark evolution apk free download an1
-hungry shark evolution offline apk an1
-hungry shark evolution cheats apk an1
-hungry shark evolution mega mod apk an1
-hungry shark evolution rexdl apk an1
-hungry shark evolution apk obb an1
-hungry shark evolution apk pure an1
-hungry shark evolution apk revdl an1
-hungry shark evolution apk uptodown an1
-hungry shark evolution apk android 1 an1
-hungry shark evolution all sharks unlocked apk an1
-hungry shark evolution apk data an1
-hungry shark evolution full version apk an1
-hungry shark evolution premium apk an1
-hungry shark evolution pro apk an1
-hungry shark evolution cracked apk an1
-hungry shark evolution vip mod apk an1
-hungry shark evolution unlimited money and gems apk an1
-hungry shark evolution no ads apk an1
-hungry shark evolution old version apk an1
-hungry shark evolution mod menu apk an1
-hungry shark evolution god mode apk an1
-hungry shark evolution original apk an1
-hungry shark evolution modded apk an1
-hungry shark evolution unlimited everything apk an1
-hungry shark evolution 10.0.0 mod apk an1
-hungry shark evolution 2023 mod apk an1
-hungry shark evolution hack version download an1
-hungry shark evolution mod unlimited money and gems download an1
-hungry shark evolution modded game download for android an1
-hungry shark evolution hack tool download no survey no password an1
-hungry shark evolution cheat codes for android free download an1
-hungry shark evolution hack online generator no human verification or survey 2023 an1
-how to install and play hungry shark evolution on pc with bluestacks emulator and apkpure app store an1
-how to get free gems and coins in hungry shark evolution without root or jailbreak using lucky patcher app and game guardian tool on android devices or ios devices with cydia impactor and ifunbox software tools on windows or mac computers respectively an1
-
-
-Question
-Answer
-
-
-How do I download hungry shark evolution apk an1?
-You can download hungry shark evolution apk an1 from various websites that offer modded apk files. However, you should be careful and only download from trusted sources, as some files may contain viruses or malware. You should also check the compatibility of the file with your device and the version of the game.
-
-
-Is hungry shark evolution apk an1 safe to use?
-Hungry shark evolution apk an1 is generally safe to use, as long as you download it from a reliable source and follow the installation instructions. However, there is always a risk of getting banned or losing your progress when using modded apk files, so you should use them at your own discretion and backup your data regularly.
-
-
-What is the difference between hungry shark evolution apk an1 and the original game?
-The main difference between hungry shark evolution apk an1 and the original game is that the former gives you unlimited coins and gems, which means you can unlock all the sharks and accessories without spending any real money. You can also play the game offline, which means you don't need an internet connection to enjoy it.
-
-
-How do I update hungry shark evolution apk an1?
-To update hungry shark evolution apk an1, you need to download the latest version of the modded apk file from the same source where you downloaded the previous one. You should also uninstall the old version of the game before installing the new one. You may lose your progress or settings if you don't backup your data before updating.
-
-
-How do I contact the developers of hungry shark evolution apk an1?
-Hungry shark evolution apk an1 is not developed by Ubisoft, the original creators of Hungry Shark Evolution. It is developed by independent modders who modify the game files to create new features or functions. Therefore, you cannot contact them directly through the official channels of Ubisoft. You may try to contact them through their websites or social media accounts, but they may not respond or provide support.
-
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Download Stumble Guys Infinito and Enjoy 17 Unique Obstacle Courses with Up to 32 Players.md b/spaces/fatiXbelha/sd/Download Stumble Guys Infinito and Enjoy 17 Unique Obstacle Courses with Up to 32 Players.md
deleted file mode 100644
index a941edb4ba9236358373ecf5b2fad4dd83a13e8d..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Download Stumble Guys Infinito and Enjoy 17 Unique Obstacle Courses with Up to 32 Players.md
+++ /dev/null
@@ -1,122 +0,0 @@
-
-How to Download Stumble Guys Infinito for Free
-Do you love playing party games online with your friends? Do you enjoy racing through obstacle courses and stumbling over hilarious challenges? If so, you might have heard of Stumble Guys , a massively popular multiplayer knockout game that has taken the gaming world by storm. But did you know that there is a way to make this game even more fun and exciting? Yes, we are talking about Stumble Guys Infinito , a modified version of the game that gives you unlimited access to all the features, skins, emotes, and gadgets. In this article, we will show you how to download Stumble Guys Infinito for free on your Android device or PC, and share some tips and tricks for playing this awesome game.
- What is Stumble Guys?
-Stumble Guys is an online battle royale party game that was released in 2021 by Kitka Games. The game is inspired by popular TV shows like Wipeout and Takeshi's Castle, where contestants have to overcome various obstacles and challenges to reach the finish line. In Stumble Guys, you can join up to 32 players online in a series of rounds that get more chaotic and difficult as you progress. You can run, jump, dash, slide, and stumble through different levels until one victor is crowned. You can also customize your character with different outfits and emotes, and invite your friends to join your party. The game is available for free on Google Play Store and Steam, and has received over 10 million downloads and positive reviews from players.
-download stumble guys infinito Download File ••• https://urllie.com/2uNBso
- What is Stumble Guys Infinito?
-Stumble Guys Infinito is a modified version of Stumble Guys that gives you unlimited access to all the features, skins, emotes, and gadgets that are otherwise locked or require in-game currency. With Stumble Guys Infinito, you can enjoy playing the game without any restrictions or limitations. You can unlock all the outfits and emotes that suit your style and mood, and use all the gadgets that can help you win the game faster. You can also play on any level and mode that you want, without having to wait for the game to update or load. Stumble Guys Infinito is a great way to experience the game in a new and exciting way, and to have more fun with your friends online.
- How to Download Stumble Guys Infinito for Android
-If you want to download Stumble Guys Infinito for free on your Android device, you will need to follow some simple steps. However, before you do that, you should be aware of some risks and precautions. First of all, Stumble Guys Infinito is not an official version of the game, and it is not endorsed or supported by Kitka Games. Therefore, downloading and installing it may violate the terms and conditions of the original game, and may result in your account being banned or suspended. Secondly, Stumble Guys Infinito is not available on Google Play Store, and you will need to download it from a third-party source. This means that you may expose your device to malware or viruses that can harm your data or privacy. Therefore, you should only download Stumble Guys Infinito from a trusted and reliable source, and scan it with an antivirus before installing it. Thirdly, you will need to enable the installation of apps from unknown sources on your device settings, which may compromise your device security. Therefore, you should only do this temporarily, and disable it after installing Stumble Guys Infinito. Finally, you should always backup your data before downloading and installing any app, in case something goes wrong or you want to revert to the original version.
- Requirements for Downloading Stumble Guys Infinito
-Before you download Stumble Guys Infinito for free on your Android device, you should make sure that your device meets the minimum requirements for running the app. These are:
-
-An Android device with version 5.0 or higher
-At least 1 GB of RAM
-At least 100 MB of free storage space
-A stable internet connection
-A compatible emulator if you want to play on PC (more on that later)
-
- Steps for Downloading Stumble Guys Infinito
-Now that you have checked the requirements and taken the precautions, you are ready to download Stumble Guys Infinito for free on your Android device. Here are the steps that you need to follow:
-
-Go to a trusted and reliable source that offers the download link for Stumble Guys Infinito. For example, you can use this link: Stumble Guys Infinito APK Download .
-Click on the download button and wait for the file to be downloaded on your device. The file size is about 90 MB.
-Once the file is downloaded, go to your device settings and enable the installation of apps from unknown sources. You can do this by going to Settings > Security > Unknown Sources and toggling it on.
-Locate the downloaded file on your device storage and tap on it to start the installation process.
-Follow the instructions on the screen and wait for the app to be installed on your device.
-Once the app is installed, disable the installation of apps from unknown sources on your device settings.
-Launch the app and enjoy playing Stumble Guys Infinito for free!
-
- How to Download Stumble Guys Infinito for PC
-If you want to download Stumble Guys Infinito for free on your PC, you will need to use an emulator. An emulator is a software that allows you to run Android apps on your PC by simulating an Android environment. There are many emulators available online, but we recommend using BlueStacks , as it is one of the most popular and reliable ones. Here are the steps that you need to follow:
- Requirements for Downloading Stumble Guys Infinito on PC
-Before you download Stumble Guys Infinito for free on your PC using an emulator, you should make sure that your PC meets the minimum requirements for running the emulator. These are:
-
-A Windows PC with version 7 or higher or a Mac PC with version 10 or higher
-At least 2 GB of RAM
-At least 5 GB of free disk space
-A stable internet connection
-A Google account (optional but recommended)
-
- Steps for Downloading Stumble Guys Infinito on PC
-Now that you have checked the requirements and downloaded the emulator, you are ready to download Stumble Guys Infinito for free on your PC using an emulator. Here are the steps that you need to follow:
-
-Install and launch the emulator on your PC. You can download BlueStacks from this link: BlueStacks Download .
-Sign in with your Google account or create a new one if you don't have one. This will allow you to access the Google Play Store and other Google services on the emulator.
-Go to the Google Play Store and search for Stumble Guys. Alternatively, you can use this link: Stumble Guys on Google Play Store .
-Download and install Stumble Guys on the emulator. This will be the original version of the game, not the infinito version.
-Go to a trusted and reliable source that offers the download link for Stumble Guys Infinito. For example, you can use this link: Stumble Guys Infinito APK Download .
-Click on the download button and wait for the file to be downloaded on your PC. The file size is about 90 MB.
-Locate the downloaded file on your PC storage and drag and drop it onto the emulator window. This will start the installation process of Stumble Guys Infinito on the emulator.
-Follow the instructions on the screen and wait for the app to be installed on the emulator.
-Launch the app and enjoy playing Stumble Guys Infinito for free on your PC!
-
- Tips and Tricks for Playing Stumble Guys Infinito
-Now that you have downloaded Stumble Guys Infinito for free on your Android device or PC, you might be wondering how to play it like a pro. Well, don't worry, we have got you covered. Here are some tips and tricks that can help you win more games and have more fun with Stumble Guys Infinito:
-download stumble guys block dash infinito
-download stumble guys multiplayer party knockout infinito
-download stumble guys online game with 32 players infinito
-download stumble guys apk mod infinito
-download stumble guys for android infinito
-download stumble guys live stream infinito
-download stumble guys with friends list infinito
-download stumble guys 3v3 mode infinito
-download stumble guys latest version infinito
-download stumble guys hack infinito
-download stumble guys free gems infinito
-download stumble guys unlimited coins infinito
-download stumble guys no ads infinito
-download stumble guys offline mode infinito
-download stumble guys custom skins infinito
-download stumble guys new levels infinito
-download stumble guys tips and tricks infinito
-download stumble guys gameplay video infinito
-download stumble guys review and rating infinito
-download stumble guys best strategies infinito
-download stumble guys cheats and glitches infinito
-download stumble guys fun and funny moments infinito
-download stumble guys how to win every round infinito
-download stumble guys challenges and tournaments infinito
-download stumble guys updates and news infinito
- How to Use Shortcuts in Stumble Guys Infinito
-One of the best ways to gain an advantage over your opponents in Stumble Guys Infinito is to use shortcuts. Shortcuts are hidden or alternative paths that can help you skip some obstacles or reach the finish line faster. However, not all shortcuts are easy to find or use, and some may even backfire if you are not careful. Here are some examples of shortcuts that you can use in Stumble Guys Infinito:
-
-In the level Crazy Castle , you can jump over the wall at the start of the level instead of going through the gate. This will save you some time and avoid some traps.
-In the level Frozen Frenzy , you can slide down the ice slope at the end of the level instead of going around it. This will give you a boost of speed and help you reach the finish line faster.
-In the level Lava Land , you can jump over the lava pits instead of going through them. This will prevent you from getting burned and losing health.
-
- How to Use Gadgets in Stumble Guys Infinito
-Another way to spice up your gameplay in Stumble Guys Infinito is to use gadgets. Gadgets are special items that can give you various effects, such as speed, shield, magnet, or bomb. You can unlock all the gadgets in Stumble Guys Infinito, and use them whenever you want. However, you should also be aware of their drawbacks and limitations, as some gadgets may have side effects or cooldowns. Here are some examples of gadgets that you can use in Stumble Guys Infinito:
-
-The Speed Boost gadget gives you a burst of speed for a few seconds, which can help you outrun your opponents or overcome some obstacles. However, it also makes you more prone to stumbling or falling off edges, so use it wisely.
-The Shield gadget gives you a protective barrier that blocks any damage or knockback from other players or traps. However, it also makes you slower and heavier, so use it sparingly.
-The Magnet gadget attracts nearby coins and gems to you, which can help you increase your score or buy more items. However, it also attracts nearby players to you, which can make you more vulnerable to attacks or interference, so use it cautiously.
-The Bomb gadget allows you to throw a bomb at a target location, which can damage or knock back anyone or anything in its radius. However, it also has a delay and a cooldown, so use it strategically.
-
- How to Use Emotes in Stumble Guys Infinito
-A final way to make your gameplay more fun and expressive in Stumble Guys Infinito is to use emotes. Emotes are gestures or animations that can show your emotions or personality, such as laughing, dancing, waving, or crying. You can unlock all the emotes in Stumble Guys Infinito, and use them whenever you want. However, you should also be careful not to use them at the wrong time or place, as some emotes may be seen as rude or disrespectful by other players or the game rules. Here are some examples of emotes that you can use in Stumble Guys Infinito:
-
-The Laugh emote makes your character laugh out loud, which can show your joy or amusement. However, it can also be seen as mocking or taunting by other players, especially if you use it after winning or eliminating someone, so use it with respect.
-The Dance emote makes your character perform a dance move, which can show your style or flair. However, it can also be seen as distracting or annoying by other players, especially if you use it during the game or near the finish line, so use it with caution.
-The Wave emote makes your character wave their hand, which can show your friendliness or gratitude. However, it can also be seen as sarcastic or ironic by other players, especially if you use it after losing or being eliminated, so use it with sincerity.
-The Cry emote makes your character cry out loud, which can show your sadness or disappointment. However, it can also be seen as whiny or pathetic by other players, especially if you use it too often or for no reason, so use it with moderation.
-
- Conclusion
-Stumble Guys Infinito is a modified version of Stumble Guys that gives you unlimited access to all the features, skins, emotes, and gadgets that are otherwise locked or require in-game currency. It is a great way to experience the game in a new and exciting way, and to have more fun with your friends online. However, you should also be aware of the risks and precautions involved in downloading and installing Stumble Guys Infinito on your Android device or PC using an emulator. You should only download Stumble Guys Infinito from a trusted and reliable source, scan it with an antivirus before installing it, enable and disable the installation of apps from unknown sources on your device settings, backup your data before downloading and installing any app, and respect the terms and conditions of the original game and other players. If you follow these steps and tips, you will be able to enjoy playing Stumble Guys Infinito for free without any problems. So what are you waiting for? Download Stumble Guys Infinito today and join the ultimate online party game!
- FAQs
-Here are some frequently asked questions and their answers about Stumble Guys Infinito:
-
-Q: Is Stumble Guys Infinito safe to download and install?
-A: Stumble Guys Infinito is not an official version of the game, and it is not endorsed or supported by Kitka Games. Therefore, downloading and installing it may violate the terms and conditions of the original game, and may result in your account being banned or suspended. Moreover, Stumble Guys Infinito is not available on Google Play Store or Steam, and you will need to download it from a third-party source. This means that you may expose your device to malware or viruses that can harm your data or privacy. Therefore, you should only download Stumble Guys Infinito from a trusted and reliable source, and scan it with an antivirus before installing it.
-Q: How do I update Stumble Guys Infinito?
-A: Since Stumble Guys Infinito is not an official version of the game, it may not receive regular updates from Kitka Games. Therefore, you may need to download and install a new version of Stumble Guys Infinito whenever there is a major update for the original game. You can check for updates on the source that you downloaded Stumble Guys Infinito from, or on other websites that offer the download link for Stumble Guys Infinito.
-Q: Can I play Stumble Guys Infinito with my friends who have the original version of the game?
-A: Yes, you can play Stumble Guys Infinito with your friends who have the original version of the game, as long as you are on the same server and platform. However, you should be careful not to use any features, skins, emotes, or gadgets that are exclusive to Stumble Guys Infinito, as this may cause some glitches or errors in the game, or may be detected by the game's anti-cheat system. Moreover, you should also respect your friends and other players who have the original version of the game, and not use Stumble Guys Infinito to gain an unfair advantage or to ruin their gameplay experience.
-Q: Can I play Stumble Guys Infinito offline?
-A: No, you cannot play Stumble Guys Infinito offline, as it is an online multiplayer game that requires a stable internet connection to run. You can only play Stumble Guys Infinito online with other players from around the world, or with your friends in a private party. If you want to play Stumble Guys offline, you will need to download and install the original version of the game from Google Play Store or Steam.
-Q: Is Stumble Guys Infinito legal?
-A: Stumble Guys Infinito is not legal, as it is a modified version of Stumble Guys that violates the intellectual property rights and terms and conditions of Kitka Games. Therefore, downloading and installing Stumble Guys Infinito may result in legal action from Kitka Games or other authorities. Moreover, Stumble Guys Infinito may also contain malware or viruses that can harm your device or data. Therefore, we do not recommend downloading or installing Stumble Guys Infinito, and we advise you to use the original version of the game instead.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Download Ultimate Car Driving Simulator for Free and Experience the Thrill of Driving.md b/spaces/fatiXbelha/sd/Download Ultimate Car Driving Simulator for Free and Experience the Thrill of Driving.md
deleted file mode 100644
index 2eaa2ce3ae7c6647d5a95e91ba0caba1aaa9be04..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Download Ultimate Car Driving Simulator for Free and Experience the Thrill of Driving.md
+++ /dev/null
@@ -1,127 +0,0 @@
-
-Ultimate Car Driving Simulator Free Download: How to Enjoy the Best Driving Game of 2020 on Your PC or Mobile Device
- Do you love driving cars and exploring new places? Do you want to experience the most realistic and fun driving physics on your PC or mobile device? If yes, then you should try Ultimate Car Driving Simulator, the best car driving simulator game of 2020. In this article, we will tell you what Ultimate Car Driving Simulator is, how to download it for free on your PC or mobile device, and some tips and tricks for playing it.
-ultimate car driving simulator free download Download File ->>> https://urllie.com/2uNyii
- What is Ultimate Car Driving Simulator?
- Ultimate Car Driving Simulator is a racing game developed by Sir Studios that combines realism and fun to create the best car driving simulator on mobile. It has the following features and gameplay:
- Features and gameplay of Ultimate Car Driving Simulator
-
-You can choose from a variety of cars, from racing cars to off-road vehicles, SUVs, tuner cars, muscle cars, 4WD trucks, and more. Each car has its own unique characteristics and performance.
-You can customize your car with countless vinyls, parts, and accessories. You can change the color, wheels, spoilers, bumpers, lights, exhausts, and more. You can also upgrade your car's engine, turbo, brakes, suspension, and tires.
-You can drive your car in a huge open world map that has different environments, such as cities, deserts, mountains, forests, beaches, and more. You can explore the map freely and find hidden locations and secrets.
-You can enjoy realistic driving physics that simulate the behavior of real cars. You can feel the acceleration, braking, steering, drifting, skidding, crashing, and damage of your car. You can also switch between different camera views and control modes.
-You can experience realistic sound effects that are recorded from real cars. You can hear the engine roar, the tires screech, the wind blow, and the collision impact. You can also adjust the volume and sound quality.
-
- Why is Ultimate Car Driving Simulator the best driving game of 2020?
- Ultimate Car Driving Simulator is the best driving game of 2020 because it offers you an immersive and addictive gameplay that will keep you entertained for hours. Here are some reasons why you should play Ultimate Car Driving Simulator:
-
-It has stunning graphics and 3D effects that make you feel like you are actually in the driver's seat. The game uses advanced graphics engine to provide the most realistic graphics and deepest 3D ever on mobile.
-It has endless fun and challenges that will test your driving skills and creativity. You can drive as fast as you want, perform stunts and tricks, crash into other cars and objects, or just relax and enjoy the scenery.
-It has a user-friendly interface and easy controls that make it suitable for anyone. You can play the game with simple touch or tilt controls, or use a keyboard or a gamepad if you play on PC. You can also customize the settings according to your preference.
-It has regular updates and new content that will keep you engaged. The game is constantly updated with new cars, maps, features, improvements, and bug fixes. You can also follow the developer on social media platforms to get the latest news and updates.
-
- How to download Ultimate Car Driving Simulator for free on your PC or mobile device? If you want to download Ultimate Car Driving Simulator for free on your PC or mobile device, you have three options:
- Download Ultimate Car Driving Simulator from Google Play Store
- If you have an Android device, you can download Ultimate Car Driving Simulator from the Google Play Store. Here are the steps to do so:
-
-Open the Google Play Store app on your device.
-Search for Ultimate Car Driving Simulator in the search bar.
-Select the game from the list of results and tap on Install.
-Wait for the game to download and install on your device.
-Launch the game and enjoy.
-
- Download Ultimate Car Driving Simulator from BlueStacks
- If you have a PC, you can download Ultimate Car Driving Simulator from BlueStacks, an Android emulator that allows you to run Android apps and games on your PC. Here are the steps to do so:
-
-Download and install BlueStacks on your PC from https://www.bluestacks.com/ .
-Launch BlueStacks and sign in with your Google account.
-Search for Ultimate Car Driving Simulator in the search bar.
-Select the game from the list of results and click on Install.
-Wait for the game to download and install on your PC.
-Launch the game and enjoy.
-
- Download Ultimate Car Driving Simulator APK file
- If you want to download Ultimate Car Driving Simulator APK file, which is the installer file for Android apps and games, you can do so from various websites that offer APK downloads. Here are the steps to do so:
-ultimate car driving simulator apk download
-ultimate car driving simulator mod apk free download
-ultimate car driving simulator pc download
-ultimate car driving simulator game download
-ultimate car driving simulator hack download
-ultimate car driving simulator online free no download
-ultimate car driving simulator latest version free download
-ultimate car driving simulator for android free download
-ultimate car driving simulator unlimited money free download
-ultimate car driving simulator app download
-ultimate car driving simulator cheats free download
-ultimate car driving simulator windows 10 free download
-ultimate car driving simulator offline free download
-ultimate car driving simulator play store free download
-ultimate car driving simulator bluestacks free download
-ultimate car driving simulator 2020 free download
-ultimate car driving simulator ios free download
-ultimate car driving simulator mac free download
-ultimate car driving simulator 3d free download
-ultimate car driving simulator mod menu free download
-ultimate car driving simulator old version free download
-ultimate car driving simulator uptodown free download
-ultimate car driving simulator revdl free download
-ultimate car driving simulator rexdl free download
-ultimate car driving simulator laptop free download
-ultimate car driving simulator premium apk free download
-ultimate car driving simulator obb file free download
-ultimate car driving simulator mod apk all cars unlocked free download
-ultimate car driving simulator hack version free download
-ultimate car driving simulator mod apk android 1 free download
-ultimate car driving simulator mod apk unlimited gems and money free download
-ultimate car driving simulator mod apk latest version 2020 free download
-ultimate car driving simulator mod apk happymod free download
-ultimate car driving simulator mod apk an1 free download
-ultimate car driving simulator mod apk techylist free download
-ultimate car driving simulator mod apk unlimited everything free download
-ultimate car driving simulator mod apk 4.7.2 free download
-ultimate car driving simulator mod apk 4.7.1 free download
-ultimate car driving simulator mod apk 4.7.0 free download
-ultimate car driving simulator mod apk 4.6.9 free download
-ultimate car driving simulator mod apk 4.6.8 free download
-ultimate car driving simulator mod apk 4.6.7 free download
-ultimate car driving simulator mod apk 4.6.6 free download
-ultimate car driving simulator mod apk 4.6.5 free download
-ultimate car driving simulator mod apk 4.6.4 free download
-ultimate car driving simulator mod apk 4.6.3 free download
-ultimate car driving simulator mod apk 4.6.2 free download
-ultimate car driving simulator mod apk 4.6.1 free download
-ultimate car driving simulator mod apk 4.6.0 free download
-
-Go to a website that offers APK downloads, such as https://apkpure.com/ .
-Search for Ultimate Car Driving Simulator in the search bar.
-Select the game from the list of results and click on Download APK.
-Wait for the APK file to download on your device.
-Locate the APK file on your device and tap on it to install it.
-Launch the game and enjoy.
-
- Tips and tricks for playing Ultimate Car Driving Simulator
- Now that you have downloaded Ultimate Car Driving Simulator for free on your PC or mobile device, you might want some tips and tricks to make the most out of it. Here are some of them:
- Customize your car and show off your style
- One of the best features of Ultimate Car Driving Simulator is that you can customize your car with various parts and accessories. You can change the color, wheels, spoilers, bumpers, lights, exhausts, and more. You can also upgrade your car's engine, turbo, brakes, suspension, and tires. This will not only improve your car's performance, but also make it look more stylish and unique. You can show off your car to other players online or take screenshots and share them on social media platforms.
- Explore the huge open world map and discover different terrains
- Another great feature of Ultimate Car Driving Simulator is that you can drive your car in a huge open world map that has different environments, such as cities, deserts, mountains, forests, beaches, and more. You can explore the map freely and find hidden locations and secrets. You can also drive on different terrains, such as asphalt, dirt, sand, snow, grass, and more. Each terrain has its own challenges and effects on your car's handling and speed. You can also switch between day and night modes and experience different weather conditions.
- Use realistic driving physics and sound effects to enhance your experience
- The last but not least feature of Ultimate Car Driving Simulator is that it uses realistic driving physics and sound effects that simulate the behavior of real cars. You can feel the acceleration, braking, steering, drifting, skidding, crashing, and damage of your car. You can also hear the engine roar, the tires screech, the wind blow, and the collision impact. These features make the game more immersive and fun to play. You can also adjust the settings according to your preference.
- Conclusion
- In conclusion, Ultimate Car Driving Simulator is a racing game that combines realism and fun to create the best car driving simulator on mobile. You can choose from a variety of cars, customize them with countless parts and accessories, drive them in a huge open world map with different environments and terrains, and enjoy realistic driving physics and sound effects. You can download Ultimate Car Driving Simulator for free on your PC or mobile device by following one of the three methods we have explained above. You can also check out some tips and tricks we have shared to make the most out of the game. Ultimate Car Driving Simulator is a game that will satisfy your driving passion and give you hours of fun and excitement. Download it now and enjoy the ultimate car driving experience.
- FAQs
- Here are some frequently asked questions about Ultimate Car Driving Simulator:
-
-Is Ultimate Car Driving Simulator free to play?
-Yes, Ultimate Car Driving Simulator is free to play. However, it contains ads and in-app purchases that can enhance your gameplay or remove ads. You can also disable ads by turning off your internet connection while playing.
-Can I play Ultimate Car Driving Simulator offline?
-Yes, you can play Ultimate Car Driving Simulator offline. However, some features and content may not be available or updated when you play offline. You also need an internet connection to download the game and access some online features, such as multiplayer mode, leaderboards, and social media integration.
-Can I play Ultimate Car Driving Simulator on PC?
-Yes, you can play Ultimate Car Driving Simulator on PC by using an Android emulator, such as BlueStacks. This will allow you to run the game on your PC and enjoy it on a bigger screen and with better controls. However, you may need a powerful PC to run the game smoothly and without lag.
-How can I get more coins and gems in Ultimate Car Driving Simulator?
-You can get more coins and gems in Ultimate Car Driving Simulator by playing the game regularly and completing various tasks and challenges. You can also watch ads or make in-app purchases to get more coins and gems. Coins and gems are used to buy and upgrade cars, parts, and accessories.
-How can I contact the developer of Ultimate Car Driving Simulator?
-You can contact the developer of Ultimate Car Driving Simulator by sending an email to support@sirstudios.com . You can also follow them on Facebook, Instagram, Twitter, YouTube, and Discord to get the latest news and updates about the game.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/tools/data/align_images.py b/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/tools/data/align_images.py
deleted file mode 100644
index 0ea56d222cde6888002fe82f1f9b34312d4dd48f..0000000000000000000000000000000000000000
--- a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/tools/data/align_images.py
+++ /dev/null
@@ -1,117 +0,0 @@
-import argparse
-import json
-import os
-from os.path import join as pjoin
-import sys
-import bz2
-import numpy as np
-import cv2
-from tqdm import tqdm
-from tensorflow.keras.utils import get_file
-from utils.ffhq_dataset.face_alignment import image_align
-from utils.ffhq_dataset.landmarks_detector import LandmarksDetector
-
-LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'
-
-
-def unpack_bz2(src_path):
- data = bz2.BZ2File(src_path).read()
- dst_path = src_path[:-4]
- with open(dst_path, 'wb') as fp:
- fp.write(data)
- return dst_path
-
-
-class SizePathMap(dict):
- """{size: {aligned_face_path0, aligned_face_path1, ...}, ...}"""
- def add_item(self, size, path):
- if size not in self:
- self[size] = set()
- self[size].add(path)
-
- def get_sizes(self):
- sizes = []
- for key, paths in self.items():
- sizes.extend([key,]*len(paths))
- return sizes
-
- def serialize(self):
- result = {}
- for key, paths in self.items():
- result[key] = list(paths)
- return result
-
-
-def main(args):
- landmarks_model_path = unpack_bz2(get_file('shape_predictor_68_face_landmarks.dat.bz2',
- LANDMARKS_MODEL_URL, cache_subdir='temp'))
-
- landmarks_detector = LandmarksDetector(landmarks_model_path)
- face_sizes = SizePathMap()
- raw_img_dir = args.raw_image_dir
- img_names = [n for n in os.listdir(raw_img_dir) if os.path.isfile(pjoin(raw_img_dir, n))]
- aligned_image_dir = args.aligned_image_dir
- os.makedirs(aligned_image_dir, exist_ok=True)
- pbar = tqdm(img_names)
- for img_name in pbar:
- pbar.set_description(img_name)
- if os.path.splitext(img_name)[-1] == '.txt':
- continue
- raw_img_path = os.path.join(raw_img_dir, img_name)
- try:
- for i, face_landmarks in enumerate(landmarks_detector.get_landmarks(raw_img_path), start=1):
- face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
- aligned_face_path = os.path.join(aligned_image_dir, face_img_name)
-
- face_size = image_align(
- raw_img_path, aligned_face_path, face_landmarks, resize=args.resize
- )
- face_sizes.add_item(face_size, aligned_face_path)
- pbar.set_description(f"{img_name}: {face_size}")
-
- if args.draw:
- visual = LandmarksDetector.draw(cv2.imread(raw_img_path), face_landmarks)
- cv2.imwrite(
- pjoin(args.aligned_image_dir, os.path.splitext(face_img_name)[0] + "_landmarks.png"),
- visual
- )
- except Exception as e:
- print('[Error]', e, 'error happened when processing', raw_img_path)
-
- print(args.raw_image_dir, ':')
- sizes = face_sizes.get_sizes()
- results = {
- 'mean_size': np.mean(sizes),
- 'num_faces_detected': len(sizes),
- 'num_images': len(img_names),
- 'sizes': sizes,
- 'size_path_dict': face_sizes.serialize(),
- }
- print('\t', results)
- if args.out_stats is not None:
- os.makedirs(os.path.dirname(args.out_stats), exist_ok=True)
- with open(out_stats, 'w') as f:
- json.dump(results, f)
-
-
-def parse_args(args=None, namespace=None):
- parser = argparse.ArgumentParser(description="""
- Extracts and aligns all faces from images using DLib and a function from original FFHQ dataset preparation step
- python align_images.py /raw_images /aligned_images
- """
- )
- parser.add_argument('raw_image_dir')
- parser.add_argument('aligned_image_dir')
- parser.add_argument('--resize',
- help="True if want to resize to 1024",
- action='store_true')
- parser.add_argument('--draw',
- help="True if want to visualize landmarks",
- action='store_true')
- parser.add_argument('--out_stats',
- help="output_fn for statistics of faces", default=None)
- return parser.parse_args(args=args, namespace=namespace)
-
-
-if __name__ == "__main__":
- main(parse_args())
diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/APKPure The Best Way to Get Roblox APK for Android.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/APKPure The Best Way to Get Roblox APK for Android.md
deleted file mode 100644
index d69630216bd7cbad1ba35d54d42dd05289f14c5e..0000000000000000000000000000000000000000
--- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/APKPure The Best Way to Get Roblox APK for Android.md
+++ /dev/null
@@ -1,166 +0,0 @@
-
-What is apk pure roblox and why you should try it
-If you are a fan of roblox , the online platform where you can create and play millions of games with other players, you might be interested in apk pure roblox , a free app that lets you access and enjoy roblox on your Android device.
-Apk pure roblox is not an official app from roblox, but rather a third-party app that allows you to download and install roblox games without any restrictions or limitations. You can also use apk pure roblox to update your existing roblox games, as well as discover new ones that are not available on the Google Play Store.
-apk pure roblox DOWNLOAD ⚙⚙⚙ https://gohhs.com/2uPnKK
-Some of the features and benefits of apk pure roblox are:
-
-It is easy to use and has a user-friendly interface.
-It is safe and secure, as it only provides verified and original apk files from the developers.
-It is fast and reliable, as it uses advanced download technology to speed up the download process.
-It is compatible with most Android devices, as it supports different versions and architectures.
-It is free and does not require any registration or subscription.
-
-If you want to experience roblox in a new and exciting way, you should definitely give apk pure roblox a try. You will be able to access and play a variety of roblox games, from adventure and action to simulation and role-playing. You will also be able to create your own roblox games using the app, and share them with other players around the world.
-roblox apk download apk pure
-apk pure roblox studio
-roblox mod apk apk pure
-apk pure roblox hack
-roblox apk for android apk pure
-apk pure roblox free robux
-roblox unlimited robux apk apk pure
-apk pure roblox games
-roblox old version apk apk pure
-apk pure roblox download pc
-roblox premium apk apk pure
-apk pure roblox skins
-roblox online apk apk pure
-apk pure roblox app
-roblox offline apk apk pure
-roblox latest version apk apk pure
-apk pure roblox codes
-roblox beta apk apk pure
-apk pure roblox cheats
-roblox 2.0 apk apk pure
-roblox vr apk apk pure
-apk pure roblox simulator
-roblox tycoon apk apk pure
-roblox obb file download apk pure
-roblox 3d world explorer apk apk pure
-roblox adventure games apk pure
-roblox roleplay games apk pure
-roblox horror games apk pure
-roblox sandbox games apk pure
-roblox multiplayer games apk pure
-roblox fps games apk pure
-roblox rpg games apk pure
-roblox music games apk pure
-roblox racing games apk pure
-roblox sports games apk pure
-roblox puzzle games apk pure
-roblox education games apk pure
-roblox arcade games apk pure
-roblox action games apk pure
-roblox strategy games apk pure
-roblox casual games apk pure
-how to install roblox on android from apkpure.com
-how to update roblox on android using apkpure.com
-how to uninstall roblox on android from apkpure.com
-how to play roblox on android with apkpure.com
-how to download apkpure app for android to get roblox
-how to use apkpure app for android to manage your downloaded apps like roblox
-how to share your apkpure app downloads like roblox with your friends
-how to rate and review apkpure app downloads like roblox
-how to contact apkpure app support for any issues with downloading or installing apps like roblox
- How to download and install apk pure roblox on your device
-Downloading and installing apk pure roblox on your device is very simple and straightforward. Just follow these steps:
-
-Go to the official website of apk pure roblox at https://www.apkpure.com/robloxpure/com.apkpure.robloxpure .
-Click on the green "Download APK" button to start downloading the app.
-Once the download is complete, open the downloaded file and tap on "Install".
-If you see a message that says "For your security, your phone is not allowed to install unknown apps from this source", go to your device settings and enable the option to allow installation from unknown sources.
-Wait for the installation to finish, then tap on "Open" to launch the app.
-
-Congratulations, you have successfully downloaded and installed apk pure roblox on your device. You can now start exploring and playing roblox games using the app.
- How to play roblox games using apk pure roblox
-Playing roblox games using apk pure roblox is very easy and fun. Here are some tips and tricks for finding and playing roblox games on apk pure roblox:
-
-To find a game, you can either browse through the categories or use the search function. You can also check out the featured, popular, or new games on the home page.
-To play a game, just tap on it and then tap on "Play". The game will start downloading automatically. You can also tap on "Details" to see more information about the game, such as its description, screenshots, ratings, reviews, etc.
-To update a game, just tap on it and then tap on "Update". The game will start updating automatically. You can also tap on "Details" to see the latest version and changelog of the game.
-To manage your games, just tap on the menu icon on the top left corner and then tap on "My Games". You can see all the games you have downloaded, installed, or updated. You can also delete or uninstall any game you don't want anymore.
-To play with other players, just tap on the menu icon on the top left corner and then tap on "Friends". You can see all the players who are online or offline. You can also add, chat, or join any player you want.
-
-Playing roblox games using apk pure roblox is a great way to enjoy roblox on your Android device. You can play a variety of games, from casual and fun to challenging and competitive. You can also interact with other players and make new friends.
- How to create your own roblox games using apk pure roblox
-Creating your own roblox games using apk pure roblox is not only possible, but also rewarding and fun. You can unleash your creativity and imagination, and share your games with other players. Here are some benefits and challenges of creating your own roblox games on apk pure roblox:
-
-
-Benefits
-Challenges
-
-
-You can use the same tools and features as the official roblox app, such as the Studio, the Toolbox, the Script Editor, etc.
-You need to have some basic knowledge and skills in programming, designing, and testing.
-
-
-You can access and use thousands of free assets and models from the roblox library or other users.
-You need to follow the roblox terms of service and community guidelines, and respect the intellectual property rights of others.
-
-
-You can publish and update your games easily and quickly using apk pure roblox.
-You need to monitor and maintain your games regularly, and respond to feedback and issues from users.
-
-
-You can earn robux, the virtual currency of roblox, by creating popular and engaging games.
-You need to invest time, effort, and resources in creating and improving your games.
-
-
- If you are interested in creating your own roblox games using apk pure roblox, here are some steps you can follow:
-
-Tap on the menu icon on the top left corner and then tap on "Create". You will see a list of templates you can use to start your game.
-Choose a template that suits your game idea and genre. You can also choose a blank template if you want to start from scratch.
-Edit your game using the Studio. You can add, remove, or modify any elements you want. You can also use the Toolbox to search and insert assets and models from the library or other users.
-Write your game logic using the Script Editor. You can use Lua, a simple and powerful programming language, to control how your game works. You can also use the Command Bar to execute commands or test your code.
-Test your game using the Play button. You can see how your game looks and feels on your device. You can also use the Test button to see how your game performs on different devices and platforms.
-Publish your game using the Publish button. You can give your game a name, a description, an icon, a genre, etc. You can also choose who can play your game: everyone, friends only, or no one.
-Update your game using the Update button. You can make any changes or improvements you want to your game. You can also see how many downloads, likes, dislikes, comments, etc. your game has received.
-
- How to fix common issues and problems with apk pure roblox
- Apk pure roblox is a reliable and stable app that works well on most Android devices. However, sometimes you might encounter some issues or problems with apk pure roblox that affect your experience. Here are some solutions for some of the most common issues and problems with apk pure roblox:
- The app does not download or install properly
- If you have trouble downloading or installing apk pure roblox on your device, you might want to check the following:
-
- Make sure you have enough storage space on your device. Apk pure roblox requires about 100 MB of free space to download and install.
- Make sure you have a stable internet connection. Apk pure roblox requires a fast and reliable internet connection to download and install.
- Make sure you have enabled the option to allow installation from unknown sources. You can do this by going to your device settings and finding the security or privacy option.
- Make sure you have downloaded the app from the official website of apk pure roblox. You can find the link in the first section of this article.
- If none of the above works, you might want to try uninstalling and reinstalling the app, or clearing the app cache and data.
-
- The app crashes or freezes frequently
- If you experience frequent crashes or freezes with apk pure roblox on your device, you might want to check the following:
-
-Make sure your device meets the minimum requirements for apk pure roblox. Apk pure roblox requires Android 4.4 or higher, and at least 2 GB of RAM.
- Make sure your device is updated to the latest version of Android. Apk pure roblox works best on the latest version of Android, as it has more features and bug fixes.
- Make sure your app is updated to the latest version of apk pure roblox. Apk pure roblox releases regular updates that improve the app performance and stability, as well as add new features and games.
- Make sure you have closed any other apps or background processes that might be using up your device resources. Apk pure roblox requires a lot of memory and CPU power to run smoothly, so having too many apps or processes running at the same time might cause conflicts or slowdowns.
- If none of the above works, you might want to try restarting your device, or reinstalling the app.
-
- The app does not load or play roblox games properly
- If you have trouble loading or playing roblox games using apk pure roblox on your device, you might want to check the following:
-
-Make sure you have a stable internet connection. Roblox games require a fast and reliable internet connection to load and play, as they are online multiplayer games.
- Make sure you have enough storage space on your device. Roblox games require a lot of storage space to download and install, as they have high-quality graphics and sounds.
- Make sure you have allowed the app to access your device permissions. Apk pure roblox needs to access your device permissions, such as camera, microphone, location, etc., to enable some features and functions of roblox games.
- Make sure you have adjusted the app settings according to your device specifications. Apk pure roblox allows you to change some settings, such as graphics quality, sound volume, etc., to optimize the app performance and compatibility with your device.
- If none of the above works, you might want to try clearing the game cache and data, or reinstalling the game.
-
- Conclusion
- Apk pure roblox is a wonderful app that lets you access and enjoy roblox on your Android device. You can download and install roblox games without any restrictions or limitations, as well as update them easily and quickly. You can also play a variety of roblox games, from adventure and action to simulation and role-playing. You can also create your own roblox games using the app, and share them with other players around the world.
- If you are looking for a new and exciting way to experience roblox on your Android device, you should definitely give apk pure roblox a try. You will be amazed by how much fun and creativity you can have with apk pure roblox.
- So what are you waiting for? Download apk pure roblox today and start exploring and playing roblox games on your Android device!
- Frequently Asked Questions
-
-Q: Is apk pure roblox safe?
-A: Yes, apk pure roblox is safe and secure. It only provides verified and original apk files from the developers. It does not contain any viruses, malware, or spyware. It also does not collect any personal information from users.
-Q: Is apk pure roblox legal?
-A: Yes, apk pure roblox is legal. It does not violate any laws or regulations regarding downloading or installing apps from third-party sources. However, it is not affiliated with or endorsed by roblox, so use it at your own risk.
-Q: Is apk pure roblox free?
-A: Yes, apk pure roblox is free. It does not require any registration or subscription. However, some features or functions of roblox games might require in-app purchases or premium memberships.
-Q: How can I contact apk pure roblox?
-A: You can contact apk pure roblox by visiting their website at https://www.apkpure.com/robloxpure/com.apkpure.robloxpure and clicking on the "Contact Us" button. You can also follow them on their social media accounts, such as Facebook, Twitter, Instagram, etc.
-Q: How can I support apk pure roblox?
-A: You can support apk pure roblox by rating and reviewing the app on the website or the Google Play Store. You can also share the app with your friends and family, and invite them to join you in playing roblox games using apk pure roblox.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/mime/mime.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/mime/mime.js
deleted file mode 100644
index d7efbde70b8e95fb7f67da9c8cfed11ce8ce4133..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/mime/mime.js
+++ /dev/null
@@ -1,108 +0,0 @@
-var path = require('path');
-var fs = require('fs');
-
-function Mime() {
- // Map of extension -> mime type
- this.types = Object.create(null);
-
- // Map of mime type -> extension
- this.extensions = Object.create(null);
-}
-
-/**
- * Define mimetype -> extension mappings. Each key is a mime-type that maps
- * to an array of extensions associated with the type. The first extension is
- * used as the default extension for the type.
- *
- * e.g. mime.define({'audio/ogg', ['oga', 'ogg', 'spx']});
- *
- * @param map (Object) type definitions
- */
-Mime.prototype.define = function (map) {
- for (var type in map) {
- var exts = map[type];
- for (var i = 0; i < exts.length; i++) {
- if (process.env.DEBUG_MIME && this.types[exts[i]]) {
- console.warn((this._loading || "define()").replace(/.*\//, ''), 'changes "' + exts[i] + '" extension type from ' +
- this.types[exts[i]] + ' to ' + type);
- }
-
- this.types[exts[i]] = type;
- }
-
- // Default extension is the first one we encounter
- if (!this.extensions[type]) {
- this.extensions[type] = exts[0];
- }
- }
-};
-
-/**
- * Load an Apache2-style ".types" file
- *
- * This may be called multiple times (it's expected). Where files declare
- * overlapping types/extensions, the last file wins.
- *
- * @param file (String) path of file to load.
- */
-Mime.prototype.load = function(file) {
- this._loading = file;
- // Read file and split into lines
- var map = {},
- content = fs.readFileSync(file, 'ascii'),
- lines = content.split(/[\r\n]+/);
-
- lines.forEach(function(line) {
- // Clean up whitespace/comments, and split into fields
- var fields = line.replace(/\s*#.*|^\s*|\s*$/g, '').split(/\s+/);
- map[fields.shift()] = fields;
- });
-
- this.define(map);
-
- this._loading = null;
-};
-
-/**
- * Lookup a mime type based on extension
- */
-Mime.prototype.lookup = function(path, fallback) {
- var ext = path.replace(/^.*[\.\/\\]/, '').toLowerCase();
-
- return this.types[ext] || fallback || this.default_type;
-};
-
-/**
- * Return file extension associated with a mime type
- */
-Mime.prototype.extension = function(mimeType) {
- var type = mimeType.match(/^\s*([^;\s]*)(?:;|\s|$)/)[1].toLowerCase();
- return this.extensions[type];
-};
-
-// Default instance
-var mime = new Mime();
-
-// Define built-in types
-mime.define(require('./types.json'));
-
-// Default type
-mime.default_type = mime.lookup('bin');
-
-//
-// Additional API specific to the default instance
-//
-
-mime.Mime = Mime;
-
-/**
- * Lookup a charset based on mime type.
- */
-mime.charsets = {
- lookup: function(mimeType, fallback) {
- // Assume text types are utf8
- return (/^text\/|^application\/(javascript|json)/).test(mimeType) ? 'UTF-8' : fallback;
- }
-};
-
-module.exports = mime;
diff --git a/spaces/fffiloni/lama-video-watermark-remover/saicinpainting/evaluation/masks/mask.py b/spaces/fffiloni/lama-video-watermark-remover/saicinpainting/evaluation/masks/mask.py
deleted file mode 100644
index 3e34d0675a781fba983cb542f18390255aaf2609..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/lama-video-watermark-remover/saicinpainting/evaluation/masks/mask.py
+++ /dev/null
@@ -1,429 +0,0 @@
-import enum
-from copy import deepcopy
-
-import numpy as np
-from skimage import img_as_ubyte
-from skimage.transform import rescale, resize
-try:
- from detectron2 import model_zoo
- from detectron2.config import get_cfg
- from detectron2.engine import DefaultPredictor
- DETECTRON_INSTALLED = True
-except:
- print("Detectron v2 is not installed")
- DETECTRON_INSTALLED = False
-
-from .countless.countless2d import zero_corrected_countless
-
-
-class ObjectMask():
- def __init__(self, mask):
- self.height, self.width = mask.shape
- (self.up, self.down), (self.left, self.right) = self._get_limits(mask)
- self.mask = mask[self.up:self.down, self.left:self.right].copy()
-
- @staticmethod
- def _get_limits(mask):
- def indicator_limits(indicator):
- lower = indicator.argmax()
- upper = len(indicator) - indicator[::-1].argmax()
- return lower, upper
-
- vertical_indicator = mask.any(axis=1)
- vertical_limits = indicator_limits(vertical_indicator)
-
- horizontal_indicator = mask.any(axis=0)
- horizontal_limits = indicator_limits(horizontal_indicator)
-
- return vertical_limits, horizontal_limits
-
- def _clean(self):
- self.up, self.down, self.left, self.right = 0, 0, 0, 0
- self.mask = np.empty((0, 0))
-
- def horizontal_flip(self, inplace=False):
- if not inplace:
- flipped = deepcopy(self)
- return flipped.horizontal_flip(inplace=True)
-
- self.mask = self.mask[:, ::-1]
- return self
-
- def vertical_flip(self, inplace=False):
- if not inplace:
- flipped = deepcopy(self)
- return flipped.vertical_flip(inplace=True)
-
- self.mask = self.mask[::-1, :]
- return self
-
- def image_center(self):
- y_center = self.up + (self.down - self.up) / 2
- x_center = self.left + (self.right - self.left) / 2
- return y_center, x_center
-
- def rescale(self, scaling_factor, inplace=False):
- if not inplace:
- scaled = deepcopy(self)
- return scaled.rescale(scaling_factor, inplace=True)
-
- scaled_mask = rescale(self.mask.astype(float), scaling_factor, order=0) > 0.5
- (up, down), (left, right) = self._get_limits(scaled_mask)
- self.mask = scaled_mask[up:down, left:right]
-
- y_center, x_center = self.image_center()
- mask_height, mask_width = self.mask.shape
- self.up = int(round(y_center - mask_height / 2))
- self.down = self.up + mask_height
- self.left = int(round(x_center - mask_width / 2))
- self.right = self.left + mask_width
- return self
-
- def crop_to_canvas(self, vertical=True, horizontal=True, inplace=False):
- if not inplace:
- cropped = deepcopy(self)
- cropped.crop_to_canvas(vertical=vertical, horizontal=horizontal, inplace=True)
- return cropped
-
- if vertical:
- if self.up >= self.height or self.down <= 0:
- self._clean()
- else:
- cut_up, cut_down = max(-self.up, 0), max(self.down - self.height, 0)
- if cut_up != 0:
- self.mask = self.mask[cut_up:]
- self.up = 0
- if cut_down != 0:
- self.mask = self.mask[:-cut_down]
- self.down = self.height
-
- if horizontal:
- if self.left >= self.width or self.right <= 0:
- self._clean()
- else:
- cut_left, cut_right = max(-self.left, 0), max(self.right - self.width, 0)
- if cut_left != 0:
- self.mask = self.mask[:, cut_left:]
- self.left = 0
- if cut_right != 0:
- self.mask = self.mask[:, :-cut_right]
- self.right = self.width
-
- return self
-
- def restore_full_mask(self, allow_crop=False):
- cropped = self.crop_to_canvas(inplace=allow_crop)
- mask = np.zeros((cropped.height, cropped.width), dtype=bool)
- mask[cropped.up:cropped.down, cropped.left:cropped.right] = cropped.mask
- return mask
-
- def shift(self, vertical=0, horizontal=0, inplace=False):
- if not inplace:
- shifted = deepcopy(self)
- return shifted.shift(vertical=vertical, horizontal=horizontal, inplace=True)
-
- self.up += vertical
- self.down += vertical
- self.left += horizontal
- self.right += horizontal
- return self
-
- def area(self):
- return self.mask.sum()
-
-
-class RigidnessMode(enum.Enum):
- soft = 0
- rigid = 1
-
-
-class SegmentationMask:
- def __init__(self, confidence_threshold=0.5, rigidness_mode=RigidnessMode.rigid,
- max_object_area=0.3, min_mask_area=0.02, downsample_levels=6, num_variants_per_mask=4,
- max_mask_intersection=0.5, max_foreground_coverage=0.5, max_foreground_intersection=0.5,
- max_hidden_area=0.2, max_scale_change=0.25, horizontal_flip=True,
- max_vertical_shift=0.1, position_shuffle=True):
- """
- :param confidence_threshold: float; threshold for confidence of the panoptic segmentator to allow for
- the instance.
- :param rigidness_mode: RigidnessMode object
- when soft, checks intersection only with the object from which the mask_object was produced
- when rigid, checks intersection with any foreground class object
- :param max_object_area: float; allowed upper bound for to be considered as mask_object.
- :param min_mask_area: float; lower bound for mask to be considered valid
- :param downsample_levels: int; defines width of the resized segmentation to obtain shifted masks;
- :param num_variants_per_mask: int; maximal number of the masks for the same object;
- :param max_mask_intersection: float; maximum allowed area fraction of intersection for 2 masks
- produced by horizontal shift of the same mask_object; higher value -> more diversity
- :param max_foreground_coverage: float; maximum allowed area fraction of intersection for foreground object to be
- covered by mask; lower value -> less the objects are covered
- :param max_foreground_intersection: float; maximum allowed area of intersection for the mask with foreground
- object; lower value -> mask is more on the background than on the objects
- :param max_hidden_area: upper bound on part of the object hidden by shifting object outside the screen area;
- :param max_scale_change: allowed scale change for the mask_object;
- :param horizontal_flip: if horizontal flips are allowed;
- :param max_vertical_shift: amount of vertical movement allowed;
- :param position_shuffle: shuffle
- """
-
- assert DETECTRON_INSTALLED, 'Cannot use SegmentationMask without detectron2'
- self.cfg = get_cfg()
- self.cfg.merge_from_file(model_zoo.get_config_file("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml"))
- self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml")
- self.cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = confidence_threshold
- self.predictor = DefaultPredictor(self.cfg)
-
- self.rigidness_mode = RigidnessMode(rigidness_mode)
- self.max_object_area = max_object_area
- self.min_mask_area = min_mask_area
- self.downsample_levels = downsample_levels
- self.num_variants_per_mask = num_variants_per_mask
- self.max_mask_intersection = max_mask_intersection
- self.max_foreground_coverage = max_foreground_coverage
- self.max_foreground_intersection = max_foreground_intersection
- self.max_hidden_area = max_hidden_area
- self.position_shuffle = position_shuffle
-
- self.max_scale_change = max_scale_change
- self.horizontal_flip = horizontal_flip
- self.max_vertical_shift = max_vertical_shift
-
- def get_segmentation(self, img):
- im = img_as_ubyte(img)
- panoptic_seg, segment_info = self.predictor(im)["panoptic_seg"]
- return panoptic_seg, segment_info
-
- @staticmethod
- def _is_power_of_two(n):
- return (n != 0) and (n & (n-1) == 0)
-
- def identify_candidates(self, panoptic_seg, segments_info):
- potential_mask_ids = []
- for segment in segments_info:
- if not segment["isthing"]:
- continue
- mask = (panoptic_seg == segment["id"]).int().detach().cpu().numpy()
- area = mask.sum().item() / np.prod(panoptic_seg.shape)
- if area >= self.max_object_area:
- continue
- potential_mask_ids.append(segment["id"])
- return potential_mask_ids
-
- def downsample_mask(self, mask):
- height, width = mask.shape
- if not (self._is_power_of_two(height) and self._is_power_of_two(width)):
- raise ValueError("Image sides are not power of 2.")
-
- num_iterations = width.bit_length() - 1 - self.downsample_levels
- if num_iterations < 0:
- raise ValueError(f"Width is lower than 2^{self.downsample_levels}.")
-
- if height.bit_length() - 1 < num_iterations:
- raise ValueError("Height is too low to perform downsampling")
-
- downsampled = mask
- for _ in range(num_iterations):
- downsampled = zero_corrected_countless(downsampled)
-
- return downsampled
-
- def _augmentation_params(self):
- scaling_factor = np.random.uniform(1 - self.max_scale_change, 1 + self.max_scale_change)
- if self.horizontal_flip:
- horizontal_flip = bool(np.random.choice(2))
- else:
- horizontal_flip = False
- vertical_shift = np.random.uniform(-self.max_vertical_shift, self.max_vertical_shift)
-
- return {
- "scaling_factor": scaling_factor,
- "horizontal_flip": horizontal_flip,
- "vertical_shift": vertical_shift
- }
-
- def _get_intersection(self, mask_array, mask_object):
- intersection = mask_array[
- mask_object.up:mask_object.down, mask_object.left:mask_object.right
- ] & mask_object.mask
- return intersection
-
- def _check_masks_intersection(self, aug_mask, total_mask_area, prev_masks):
- for existing_mask in prev_masks:
- intersection_area = self._get_intersection(existing_mask, aug_mask).sum()
- intersection_existing = intersection_area / existing_mask.sum()
- intersection_current = 1 - (aug_mask.area() - intersection_area) / total_mask_area
- if (intersection_existing > self.max_mask_intersection) or \
- (intersection_current > self.max_mask_intersection):
- return False
- return True
-
- def _check_foreground_intersection(self, aug_mask, foreground):
- for existing_mask in foreground:
- intersection_area = self._get_intersection(existing_mask, aug_mask).sum()
- intersection_existing = intersection_area / existing_mask.sum()
- if intersection_existing > self.max_foreground_coverage:
- return False
- intersection_mask = intersection_area / aug_mask.area()
- if intersection_mask > self.max_foreground_intersection:
- return False
- return True
-
- def _move_mask(self, mask, foreground):
- # Obtaining properties of the original mask_object:
- orig_mask = ObjectMask(mask)
-
- chosen_masks = []
- chosen_parameters = []
- # to fix the case when resizing gives mask_object consisting only of False
- scaling_factor_lower_bound = 0.
-
- for var_idx in range(self.num_variants_per_mask):
- # Obtaining augmentation parameters and applying them to the downscaled mask_object
- augmentation_params = self._augmentation_params()
- augmentation_params["scaling_factor"] = min([
- augmentation_params["scaling_factor"],
- 2 * min(orig_mask.up, orig_mask.height - orig_mask.down) / orig_mask.height + 1.,
- 2 * min(orig_mask.left, orig_mask.width - orig_mask.right) / orig_mask.width + 1.
- ])
- augmentation_params["scaling_factor"] = max([
- augmentation_params["scaling_factor"], scaling_factor_lower_bound
- ])
-
- aug_mask = deepcopy(orig_mask)
- aug_mask.rescale(augmentation_params["scaling_factor"], inplace=True)
- if augmentation_params["horizontal_flip"]:
- aug_mask.horizontal_flip(inplace=True)
- total_aug_area = aug_mask.area()
- if total_aug_area == 0:
- scaling_factor_lower_bound = 1.
- continue
-
- # Fix if the element vertical shift is too strong and shown area is too small:
- vertical_area = aug_mask.mask.sum(axis=1) / total_aug_area # share of area taken by rows
- # number of rows which are allowed to be hidden from upper and lower parts of image respectively
- max_hidden_up = np.searchsorted(vertical_area.cumsum(), self.max_hidden_area)
- max_hidden_down = np.searchsorted(vertical_area[::-1].cumsum(), self.max_hidden_area)
- # correcting vertical shift, so not too much area will be hidden
- augmentation_params["vertical_shift"] = np.clip(
- augmentation_params["vertical_shift"],
- -(aug_mask.up + max_hidden_up) / aug_mask.height,
- (aug_mask.height - aug_mask.down + max_hidden_down) / aug_mask.height
- )
- # Applying vertical shift:
- vertical_shift = int(round(aug_mask.height * augmentation_params["vertical_shift"]))
- aug_mask.shift(vertical=vertical_shift, inplace=True)
- aug_mask.crop_to_canvas(vertical=True, horizontal=False, inplace=True)
-
- # Choosing horizontal shift:
- max_hidden_area = self.max_hidden_area - (1 - aug_mask.area() / total_aug_area)
- horizontal_area = aug_mask.mask.sum(axis=0) / total_aug_area
- max_hidden_left = np.searchsorted(horizontal_area.cumsum(), max_hidden_area)
- max_hidden_right = np.searchsorted(horizontal_area[::-1].cumsum(), max_hidden_area)
- allowed_shifts = np.arange(-max_hidden_left, aug_mask.width -
- (aug_mask.right - aug_mask.left) + max_hidden_right + 1)
- allowed_shifts = - (aug_mask.left - allowed_shifts)
-
- if self.position_shuffle:
- np.random.shuffle(allowed_shifts)
-
- mask_is_found = False
- for horizontal_shift in allowed_shifts:
- aug_mask_left = deepcopy(aug_mask)
- aug_mask_left.shift(horizontal=horizontal_shift, inplace=True)
- aug_mask_left.crop_to_canvas(inplace=True)
-
- prev_masks = [mask] + chosen_masks
- is_mask_suitable = self._check_masks_intersection(aug_mask_left, total_aug_area, prev_masks) & \
- self._check_foreground_intersection(aug_mask_left, foreground)
- if is_mask_suitable:
- aug_draw = aug_mask_left.restore_full_mask()
- chosen_masks.append(aug_draw)
- augmentation_params["horizontal_shift"] = horizontal_shift / aug_mask_left.width
- chosen_parameters.append(augmentation_params)
- mask_is_found = True
- break
-
- if not mask_is_found:
- break
-
- return chosen_parameters
-
- def _prepare_mask(self, mask):
- height, width = mask.shape
- target_width = width if self._is_power_of_two(width) else (1 << width.bit_length())
- target_height = height if self._is_power_of_two(height) else (1 << height.bit_length())
-
- return resize(mask.astype('float32'), (target_height, target_width), order=0, mode='edge').round().astype('int32')
-
- def get_masks(self, im, return_panoptic=False):
- panoptic_seg, segments_info = self.get_segmentation(im)
- potential_mask_ids = self.identify_candidates(panoptic_seg, segments_info)
-
- panoptic_seg_scaled = self._prepare_mask(panoptic_seg.detach().cpu().numpy())
- downsampled = self.downsample_mask(panoptic_seg_scaled)
- scene_objects = []
- for segment in segments_info:
- if not segment["isthing"]:
- continue
- mask = downsampled == segment["id"]
- if not np.any(mask):
- continue
- scene_objects.append(mask)
-
- mask_set = []
- for mask_id in potential_mask_ids:
- mask = downsampled == mask_id
- if not np.any(mask):
- continue
-
- if self.rigidness_mode is RigidnessMode.soft:
- foreground = [mask]
- elif self.rigidness_mode is RigidnessMode.rigid:
- foreground = scene_objects
- else:
- raise ValueError(f'Unexpected rigidness_mode: {rigidness_mode}')
-
- masks_params = self._move_mask(mask, foreground)
-
- full_mask = ObjectMask((panoptic_seg == mask_id).detach().cpu().numpy())
-
- for params in masks_params:
- aug_mask = deepcopy(full_mask)
- aug_mask.rescale(params["scaling_factor"], inplace=True)
- if params["horizontal_flip"]:
- aug_mask.horizontal_flip(inplace=True)
-
- vertical_shift = int(round(aug_mask.height * params["vertical_shift"]))
- horizontal_shift = int(round(aug_mask.width * params["horizontal_shift"]))
- aug_mask.shift(vertical=vertical_shift, horizontal=horizontal_shift, inplace=True)
- aug_mask = aug_mask.restore_full_mask().astype('uint8')
- if aug_mask.mean() <= self.min_mask_area:
- continue
- mask_set.append(aug_mask)
-
- if return_panoptic:
- return mask_set, panoptic_seg.detach().cpu().numpy()
- else:
- return mask_set
-
-
-def propose_random_square_crop(mask, min_overlap=0.5):
- height, width = mask.shape
- mask_ys, mask_xs = np.where(mask > 0.5) # mask==0 is known fragment and mask==1 is missing
-
- if height < width:
- crop_size = height
- obj_left, obj_right = mask_xs.min(), mask_xs.max()
- obj_width = obj_right - obj_left
- left_border = max(0, min(width - crop_size - 1, obj_left + obj_width * min_overlap - crop_size))
- right_border = max(left_border + 1, min(width - crop_size, obj_left + obj_width * min_overlap))
- start_x = np.random.randint(left_border, right_border)
- return start_x, 0, start_x + crop_size, height
- else:
- crop_size = width
- obj_top, obj_bottom = mask_ys.min(), mask_ys.max()
- obj_height = obj_bottom - obj_top
- top_border = max(0, min(height - crop_size - 1, obj_top + obj_height * min_overlap - crop_size))
- bottom_border = max(top_border + 1, min(height - crop_size, obj_top + obj_height * min_overlap))
- start_y = np.random.randint(top_border, bottom_border)
- return 0, start_y, width, start_y + crop_size
diff --git a/spaces/fuckyoudeki/AutoGPT/autogpt/memory/redismem.py b/spaces/fuckyoudeki/AutoGPT/autogpt/memory/redismem.py
deleted file mode 100644
index 082a812c5362cc9f19e35bf1bb10269b558f7724..0000000000000000000000000000000000000000
--- a/spaces/fuckyoudeki/AutoGPT/autogpt/memory/redismem.py
+++ /dev/null
@@ -1,156 +0,0 @@
-"""Redis memory provider."""
-from __future__ import annotations
-
-from typing import Any
-
-import numpy as np
-import redis
-from colorama import Fore, Style
-from redis.commands.search.field import TextField, VectorField
-from redis.commands.search.indexDefinition import IndexDefinition, IndexType
-from redis.commands.search.query import Query
-
-from autogpt.llm_utils import create_embedding_with_ada
-from autogpt.logs import logger
-from autogpt.memory.base import MemoryProviderSingleton
-
-SCHEMA = [
- TextField("data"),
- VectorField(
- "embedding",
- "HNSW",
- {"TYPE": "FLOAT32", "DIM": 1536, "DISTANCE_METRIC": "COSINE"},
- ),
-]
-
-
-class RedisMemory(MemoryProviderSingleton):
- def __init__(self, cfg):
- """
- Initializes the Redis memory provider.
-
- Args:
- cfg: The config object.
-
- Returns: None
- """
- redis_host = cfg.redis_host
- redis_port = cfg.redis_port
- redis_password = cfg.redis_password
- self.dimension = 1536
- self.redis = redis.Redis(
- host=redis_host,
- port=redis_port,
- password=redis_password,
- db=0, # Cannot be changed
- )
- self.cfg = cfg
-
- # Check redis connection
- try:
- self.redis.ping()
- except redis.ConnectionError as e:
- logger.typewriter_log(
- "FAILED TO CONNECT TO REDIS",
- Fore.RED,
- Style.BRIGHT + str(e) + Style.RESET_ALL,
- )
- logger.double_check(
- "Please ensure you have setup and configured Redis properly for use. "
- + f"You can check out {Fore.CYAN + Style.BRIGHT}"
- f"https://github.com/Torantulino/Auto-GPT#redis-setup{Style.RESET_ALL}"
- " to ensure you've set up everything correctly."
- )
- exit(1)
-
- if cfg.wipe_redis_on_start:
- self.redis.flushall()
- try:
- self.redis.ft(f"{cfg.memory_index}").create_index(
- fields=SCHEMA,
- definition=IndexDefinition(
- prefix=[f"{cfg.memory_index}:"], index_type=IndexType.HASH
- ),
- )
- except Exception as e:
- print("Error creating Redis search index: ", e)
- existing_vec_num = self.redis.get(f"{cfg.memory_index}-vec_num")
- self.vec_num = int(existing_vec_num.decode("utf-8")) if existing_vec_num else 0
-
- def add(self, data: str) -> str:
- """
- Adds a data point to the memory.
-
- Args:
- data: The data to add.
-
- Returns: Message indicating that the data has been added.
- """
- if "Command Error:" in data:
- return ""
- vector = create_embedding_with_ada(data)
- vector = np.array(vector).astype(np.float32).tobytes()
- data_dict = {b"data": data, "embedding": vector}
- pipe = self.redis.pipeline()
- pipe.hset(f"{self.cfg.memory_index}:{self.vec_num}", mapping=data_dict)
- _text = (
- f"Inserting data into memory at index: {self.vec_num}:\n" f"data: {data}"
- )
- self.vec_num += 1
- pipe.set(f"{self.cfg.memory_index}-vec_num", self.vec_num)
- pipe.execute()
- return _text
-
- def get(self, data: str) -> list[Any] | None:
- """
- Gets the data from the memory that is most relevant to the given data.
-
- Args:
- data: The data to compare to.
-
- Returns: The most relevant data.
- """
- return self.get_relevant(data, 1)
-
- def clear(self) -> str:
- """
- Clears the redis server.
-
- Returns: A message indicating that the memory has been cleared.
- """
- self.redis.flushall()
- return "Obliviated"
-
- def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
- """
- Returns all the data in the memory that is relevant to the given data.
- Args:
- data: The data to compare to.
- num_relevant: The number of relevant data to return.
-
- Returns: A list of the most relevant data.
- """
- query_embedding = create_embedding_with_ada(data)
- base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]"
- query = (
- Query(base_query)
- .return_fields("data", "vector_score")
- .sort_by("vector_score")
- .dialect(2)
- )
- query_vector = np.array(query_embedding).astype(np.float32).tobytes()
-
- try:
- results = self.redis.ft(f"{self.cfg.memory_index}").search(
- query, query_params={"vector": query_vector}
- )
- except Exception as e:
- print("Error calling Redis search: ", e)
- return None
- return [result.data for result in results.docs]
-
- def get_stats(self):
- """
- Returns: The stats of the memory index.
- """
- return self.redis.ft(f"{self.cfg.memory_index}").info()
diff --git a/spaces/gaodrew/constellation/app.py b/spaces/gaodrew/constellation/app.py
deleted file mode 100644
index 60d1a7560165a8ebc3f9549b73ad5f071d7345b7..0000000000000000000000000000000000000000
--- a/spaces/gaodrew/constellation/app.py
+++ /dev/null
@@ -1,290 +0,0 @@
-import streamlit as st
-import numpy as np
-import pandas as pd
-import warnings
-warnings.filterwarnings('ignore')
-import math
-from scipy.cluster.hierarchy import dendrogram, linkage
-from sklearn.feature_extraction.text import TfidfVectorizer
-import itertools
-import plotly.figure_factory as ff
-from community import community_louvain
-import networkx as nx
-from sklearn.metrics.pairwise import cosine_distances
-from sklearn.metrics.pairwise import cosine_similarity
-from sklearn.feature_extraction.text import CountVectorizer
-from sklearn.cluster import AgglomerativeClustering
-from wordcloud import WordCloud
-import plotly.graph_objects as go
-
-
-def create_dendrogram(X, labels):
- Z = linkage(X.toarray(), "single")
- fig = ff.create_dendrogram(Z, orientation='left', labels=labels)
- return fig
-
-@st.cache_data
-def load_data():
- data = pd.read_csv("HuggingFaceLLMsWithParamsAndReadmeLinks.csv")
- return data
-
-df = pd.read_csv("HuggingFaceLLMsWithParamsAndReadmeLinks.csv")
-st.title("Constellation: An Atlas of 15,000 Large Language Models")
-st.write("15,821 to be precise. Scraped from Hugging Face on July 18, 2023.")
-st.write("Please cite: Gao, S., & Gao, A. K. (2023, July 19). On the Origin of LLMs: An Evolutionary Tree and Graph for 15,821 Large Language Models. ArXiv.org; ArXiv. https://doi.org/10.48550/arXiv.2307.09793")
-threshold = st.number_input("Enter the minimum number of downloads an LLM must have to be considered.", value=10000)
-numClusters = st.number_input("Number of clusters to group into.", value=20, min_value=2, max_value=50)
-wordClouds = st.checkbox("Show word clouds?")
-
-def create_downloads_vs_likes_scatter(dataframe):
- # Convert 'likes' column to numeric values
- dataframe['likes'] = pd.to_numeric(dataframe['likes'], errors='coerce')
-
- # Filter out the outlier point at 14M likes
- dataframe_filtered = dataframe[dataframe['likes'] != 14000000]
-
- fig = go.Figure()
- fig.add_trace(go.Scatter(x=dataframe_filtered['downloads'], y=dataframe_filtered['likes'], mode='markers',
- marker=dict(color='blue', size=7, opacity=0.7),
- text=dataframe_filtered['model_name'],
- hovertemplate="Model Name: %{text} Downloads: %{x} Likes: %{y} "))
- fig.update_layout(title='Downloads vs Likes',
- xaxis_title='Downloads',
- #xaxis_range=[0,300000],
- yaxis_title='Likes')
- #yaxis_range=[0, 800]) # Set custom y-axis range
- return fig
-
-
-if st.button("Run Clustering"):
- df_filtered = df[df['downloads'] > threshold]
- df_extra_filtered = df_filtered.drop_duplicates(subset='model_name', keep='first')
-
- # Convert the model names into a matrix of TF-IDF features
- vectorizer = TfidfVectorizer(analyzer='char', ngram_range=(2, 8))
- X = vectorizer.fit_transform(df_extra_filtered['model_name'].tolist()).toarray()
-
- # Function to compute the pairwise cosine distances
- def distfun(X):
- return cosine_distances(X)
-
- # Function to compute the linkage matrix
- def linkagefun(dist_array):
- return linkage(dist_array, "single")
-
- # Create dendrogram
- fig = ff.create_dendrogram(X, orientation='bottom', labels=df_extra_filtered['model_name'].tolist(), distfun=distfun, linkagefun=linkagefun)
- #fig.update_layout(width=800, height=500)
- st.plotly_chart(fig, use_container_width=True)
-
- # Group by cluster
- # Convert the model names into a matrix of token counts
- vectorizer = CountVectorizer(analyzer='char', ngram_range=(3, 6))
- X = vectorizer.fit_transform(df_extra_filtered['model_name'])
- # Use clustering to group model names
- clustering = AgglomerativeClustering(n_clusters=20).fit(X.toarray())
-
- # Add cluster labels to the filtered DataFrame
- df_extra_filtered['cluster'] = clustering.labels_
-
- # Count the number of models in each cluster
- cluster_counts = df_extra_filtered['cluster'].value_counts()
-
- # Create a bar chart
- fig = go.Figure([go.Bar(x=cluster_counts.index, y=cluster_counts.values)])
- fig.update_layout(title='Number of Models per Cluster', xaxis_title='Cluster', yaxis_title='Number of Models')
- st.plotly_chart(fig)
-
- # graphing!
-
- # Convert the model names into a matrix of TF-IDF features
- vectorizer = TfidfVectorizer(analyzer='char', ngram_range=(2, 8))
- X = vectorizer.fit_transform(df_extra_filtered['model_name'])
-
- # Compute the pairwise cosine similarities
- sim_matrix = cosine_similarity(X)
-
- # Create a graph
- G = nx.Graph()
-
- # Add nodes to the graph
- for i in range(len(df_extra_filtered)):
- G.add_node(i, label=df_extra_filtered['model_name'].iloc[i])
-
- # Add edges to the graph
- for i in range(len(df_extra_filtered)):
- for j in range(i+1, len(df_extra_filtered)):
- # If the similarity is above a certain threshold
- if sim_matrix[i, j] > 0.2:
- G.add_edge(i, j, weight=sim_matrix[i, j])
-
- # Compute the layout positions
- pos = nx.spring_layout(G)
-
- # Detect communities
- partition = community_louvain.best_partition(G)
- # Create a figure
- # Compute the layout for each community
- layouts = {}
- for community in set(partition.values()):
- nodes_in_community = [node for node, comm in partition.items() if comm == community]
- subgraph = G.subgraph(nodes_in_community)
- layouts[community] = nx.spring_layout(subgraph)
-
- # Combine the layouts, spreading them out on a grid
- grid_size = math.ceil(math.sqrt(len(layouts))) # Size of the grid
- grid = np.array(list(itertools.product(range(grid_size), repeat=2))) # Coordinates for the grid
- scale = 2 # Scale factor for spreading out the communities
- offsets = dict(zip(layouts, grid*scale)) # Map communities to grid coordinates
-
- combined_layout = {}
- for community, layout in layouts.items():
- for node, position in layout.items():
- combined_layout[node] = position + offsets[community]
-
- # Prepare data for plotly
- x = [combined_layout[node][0] for node in range(len(df_extra_filtered))]
- y = [combined_layout[node][1] for node in range(len(df_extra_filtered))]
-
- # Create a figure
- fig = go.Figure()
-
- # Prepare lists for node positions, labels, ranks, downloads, likes, and params
- x, y, labels, ranks, downloads, likes, params = [], [], [], [], [], [], []
-
- # Prepare the node attributes
- for node, community in partition.items():
- # Get model info
- model_info = df_extra_filtered.iloc[node]
-
- # Node position
- x.append(pos[node][0])
- y.append(pos[node][1])
-
- # Node attributes
- labels.append(model_info['model_name'])
- ranks.append(model_info['rank'])
- downloads.append(model_info['downloads'])
- likes.append(model_info['likes'])
- params.append(model_info['params_millions'] if pd.notnull(model_info['params_millions']) else 'N/A')
-
- # Compute the centroid of each cluster for background coloring
- centroids = dict()
- community_sizes = dict() # Create a dict to store the sizes of each community
- for community in set(partition.values()):
- nodes_in_community = [node for node, comm in partition.items() if comm == community]
- if len(nodes_in_community) > 1: # Only consider communities with more than one node
- centroid_x = np.mean([pos[node][0] for node in nodes_in_community])
- centroid_y = np.mean([pos[node][1] for node in nodes_in_community])
- centroids[community] = (centroid_x, centroid_y)
- community_sizes[community] = len(nodes_in_community)
-
- # Add background coloring for each cluster
- for community, centroid in centroids.items():
- fig.add_trace(go.Scatter(
- x=[centroid[0]], y=[centroid[1]],
- mode='markers',
- marker=dict(
- size=community_sizes[community]*5, # Adjust size by multiplying the community size by a factor
- color=community,
- opacity=0.1
- ),
- hoverinfo='none',
- showlegend=False
- ))
-
- # Add nodes to the figure
- fig.add_trace(go.Scatter(
- x=x, y=y,
- mode='markers',
- marker=dict(size=3, color=community),
- text=labels,
- customdata=np.stack((ranks, downloads, likes, params), axis=-1),
- hovertemplate=(
- "Model Name: %{text} "
- "Rank: %{customdata[0]} "
- "Downloads: %{customdata[1]} "
- "Likes: %{customdata[2]} "
- "Params (millions): %{customdata[3]}"
- " "
- )
- ))
-
- # Add edges to the figure
- for edge in G.edges():
- # Calculate edge weight for line width, normalize it for better visibility
- line_width = G.edges[edge]['weight'] / np.max(list(nx.get_edge_attributes(G, 'weight').values()))
-
- fig.add_trace(go.Scatter(
- x=[pos[edge[0]][0], pos[edge[1]][0]],
- y=[pos[edge[0]][1], pos[edge[1]][1]],
- mode='lines',
- line=dict(width=line_width), # Multiply by a factor for better visibility
- hoverinfo='none'
- ))
-
- # Set the figure layout
- fig.update_layout(showlegend=False, hovermode='closest')
-
- st.plotly_chart(fig)
-
- # Calculate degree of each node
- degrees = dict(G.degree())
-
- # Sort nodes by degree in descending order and get top 20
- top_20_models = sorted(degrees.items(), key=lambda x: x[1], reverse=True)[:20]
-
- # Prepare data for display
- models = [df_extra_filtered.iloc[node]['model_name'] for node, degree in top_20_models]
- connections = [degree for node, degree in top_20_models]
-
- st.subheader("Top 20 Models by Number of Connections")
- for model, connections in zip(models, connections):
- st.write(f"{model}: {connections} connections")
-
-
- # Find the representative model for each community
- representatives = dict()
- for community in set(partition.values()):
- nodes_in_community = [node for node, comm in partition.items() if comm == community]
- # Select the node with the highest degree within the community as representative
- representative = max(nodes_in_community, key=lambda node: degrees[node])
- representatives[community] = df_extra_filtered.iloc[representative]['model_name']
-
- # Prepare data for display
- communities = list(representatives.keys())
- community_sizes = [community_sizes.get(comm, 1) for comm in communities] # Use a default size of 1 for communities not in the dictionary
- representatives = list(representatives.values())
-
- # Create a DataFrame to hold the data
- df_reps = pd.DataFrame({
- 'Community ID': communities,
- 'Size': community_sizes,
- 'Representative Model': representatives
- })
-
- # Sort the DataFrame by community size in descending order
- df_reps.sort_values(by='Size', ascending=False, inplace=True)
-
- # Display in Streamlit
- st.subheader("Representative for each community, sorted by community size.")
- st.dataframe(df_reps)
- if wordClouds:
- groups = df_extra_filtered.groupby('cluster')
-
- for name, group in groups:
- # Join all model names in the cluster into a single string
- text = ' '.join(group['model_name'])
-
- # Generate a word cloud
- wordcloud = WordCloud().generate(text)
-
- # Convert WordCloud to Image
- image = wordcloud.to_image()
-
- # Display the word cloud
- st.image(image, use_column_width=True)
- st.write(f'Word Cloud for Cluster {name}')
-
- scatter_plot = create_downloads_vs_likes_scatter(df_extra_filtered)
- st.plotly_chart(scatter_plot, use_container_width=True)
\ No newline at end of file
diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/cnn/bricks/swish.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/cnn/bricks/swish.py
deleted file mode 100644
index e2ca8ed7b749413f011ae54aac0cab27e6f0b51f..0000000000000000000000000000000000000000
--- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/cnn/bricks/swish.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-
-from .registry import ACTIVATION_LAYERS
-
-
-@ACTIVATION_LAYERS.register_module()
-class Swish(nn.Module):
- """Swish Module.
-
- This module applies the swish function:
-
- .. math::
- Swish(x) = x * Sigmoid(x)
-
- Returns:
- Tensor: The output tensor.
- """
-
- def __init__(self):
- super(Swish, self).__init__()
-
- def forward(self, x):
- return x * torch.sigmoid(x)
diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/visualization/optflow.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/visualization/optflow.py
deleted file mode 100644
index c3870c700f7c946177ee5d536ce3f6c814a77ce7..0000000000000000000000000000000000000000
--- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/visualization/optflow.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from __future__ import division
-
-import numpy as np
-
-from annotator.uniformer.mmcv.image import rgb2bgr
-from annotator.uniformer.mmcv.video import flowread
-from .image import imshow
-
-
-def flowshow(flow, win_name='', wait_time=0):
- """Show optical flow.
-
- Args:
- flow (ndarray or str): The optical flow to be displayed.
- win_name (str): The window name.
- wait_time (int): Value of waitKey param.
- """
- flow = flowread(flow)
- flow_img = flow2rgb(flow)
- imshow(rgb2bgr(flow_img), win_name, wait_time)
-
-
-def flow2rgb(flow, color_wheel=None, unknown_thr=1e6):
- """Convert flow map to RGB image.
-
- Args:
- flow (ndarray): Array of optical flow.
- color_wheel (ndarray or None): Color wheel used to map flow field to
- RGB colorspace. Default color wheel will be used if not specified.
- unknown_thr (str): Values above this threshold will be marked as
- unknown and thus ignored.
-
- Returns:
- ndarray: RGB image that can be visualized.
- """
- assert flow.ndim == 3 and flow.shape[-1] == 2
- if color_wheel is None:
- color_wheel = make_color_wheel()
- assert color_wheel.ndim == 2 and color_wheel.shape[1] == 3
- num_bins = color_wheel.shape[0]
-
- dx = flow[:, :, 0].copy()
- dy = flow[:, :, 1].copy()
-
- ignore_inds = (
- np.isnan(dx) | np.isnan(dy) | (np.abs(dx) > unknown_thr) |
- (np.abs(dy) > unknown_thr))
- dx[ignore_inds] = 0
- dy[ignore_inds] = 0
-
- rad = np.sqrt(dx**2 + dy**2)
- if np.any(rad > np.finfo(float).eps):
- max_rad = np.max(rad)
- dx /= max_rad
- dy /= max_rad
-
- rad = np.sqrt(dx**2 + dy**2)
- angle = np.arctan2(-dy, -dx) / np.pi
-
- bin_real = (angle + 1) / 2 * (num_bins - 1)
- bin_left = np.floor(bin_real).astype(int)
- bin_right = (bin_left + 1) % num_bins
- w = (bin_real - bin_left.astype(np.float32))[..., None]
- flow_img = (1 -
- w) * color_wheel[bin_left, :] + w * color_wheel[bin_right, :]
- small_ind = rad <= 1
- flow_img[small_ind] = 1 - rad[small_ind, None] * (1 - flow_img[small_ind])
- flow_img[np.logical_not(small_ind)] *= 0.75
-
- flow_img[ignore_inds, :] = 0
-
- return flow_img
-
-
-def make_color_wheel(bins=None):
- """Build a color wheel.
-
- Args:
- bins(list or tuple, optional): Specify the number of bins for each
- color range, corresponding to six ranges: red -> yellow,
- yellow -> green, green -> cyan, cyan -> blue, blue -> magenta,
- magenta -> red. [15, 6, 4, 11, 13, 6] is used for default
- (see Middlebury).
-
- Returns:
- ndarray: Color wheel of shape (total_bins, 3).
- """
- if bins is None:
- bins = [15, 6, 4, 11, 13, 6]
- assert len(bins) == 6
-
- RY, YG, GC, CB, BM, MR = tuple(bins)
-
- ry = [1, np.arange(RY) / RY, 0]
- yg = [1 - np.arange(YG) / YG, 1, 0]
- gc = [0, 1, np.arange(GC) / GC]
- cb = [0, 1 - np.arange(CB) / CB, 1]
- bm = [np.arange(BM) / BM, 0, 1]
- mr = [1, 0, 1 - np.arange(MR) / MR]
-
- num_bins = RY + YG + GC + CB + BM + MR
-
- color_wheel = np.zeros((3, num_bins), dtype=np.float32)
-
- col = 0
- for i, color in enumerate([ry, yg, gc, cb, bm, mr]):
- for j in range(3):
- color_wheel[j, col:col + bins[i]] = color[j]
- col += bins[i]
-
- return color_wheel.T
diff --git a/spaces/glyszt/vt/vtoonify/model/raft/core/utils/flow_viz.py b/spaces/glyszt/vt/vtoonify/model/raft/core/utils/flow_viz.py
deleted file mode 100644
index dcee65e89b91b07ee0496aeb4c7e7436abf99641..0000000000000000000000000000000000000000
--- a/spaces/glyszt/vt/vtoonify/model/raft/core/utils/flow_viz.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# Flow visualization code used from https://github.com/tomrunia/OpticalFlow_Visualization
-
-
-# MIT License
-#
-# Copyright (c) 2018 Tom Runia
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to conditions.
-#
-# Author: Tom Runia
-# Date Created: 2018-08-03
-
-import numpy as np
-
-def make_colorwheel():
- """
- Generates a color wheel for optical flow visualization as presented in:
- Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
- URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
-
- Code follows the original C++ source code of Daniel Scharstein.
- Code follows the the Matlab source code of Deqing Sun.
-
- Returns:
- np.ndarray: Color wheel
- """
-
- RY = 15
- YG = 6
- GC = 4
- CB = 11
- BM = 13
- MR = 6
-
- ncols = RY + YG + GC + CB + BM + MR
- colorwheel = np.zeros((ncols, 3))
- col = 0
-
- # RY
- colorwheel[0:RY, 0] = 255
- colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY)
- col = col+RY
- # YG
- colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG)
- colorwheel[col:col+YG, 1] = 255
- col = col+YG
- # GC
- colorwheel[col:col+GC, 1] = 255
- colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC)
- col = col+GC
- # CB
- colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB)
- colorwheel[col:col+CB, 2] = 255
- col = col+CB
- # BM
- colorwheel[col:col+BM, 2] = 255
- colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM)
- col = col+BM
- # MR
- colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR)
- colorwheel[col:col+MR, 0] = 255
- return colorwheel
-
-
-def flow_uv_to_colors(u, v, convert_to_bgr=False):
- """
- Applies the flow color wheel to (possibly clipped) flow components u and v.
-
- According to the C++ source code of Daniel Scharstein
- According to the Matlab source code of Deqing Sun
-
- Args:
- u (np.ndarray): Input horizontal flow of shape [H,W]
- v (np.ndarray): Input vertical flow of shape [H,W]
- convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
-
- Returns:
- np.ndarray: Flow visualization image of shape [H,W,3]
- """
- flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
- colorwheel = make_colorwheel() # shape [55x3]
- ncols = colorwheel.shape[0]
- rad = np.sqrt(np.square(u) + np.square(v))
- a = np.arctan2(-v, -u)/np.pi
- fk = (a+1) / 2*(ncols-1)
- k0 = np.floor(fk).astype(np.int32)
- k1 = k0 + 1
- k1[k1 == ncols] = 0
- f = fk - k0
- for i in range(colorwheel.shape[1]):
- tmp = colorwheel[:,i]
- col0 = tmp[k0] / 255.0
- col1 = tmp[k1] / 255.0
- col = (1-f)*col0 + f*col1
- idx = (rad <= 1)
- col[idx] = 1 - rad[idx] * (1-col[idx])
- col[~idx] = col[~idx] * 0.75 # out of range
- # Note the 2-i => BGR instead of RGB
- ch_idx = 2-i if convert_to_bgr else i
- flow_image[:,:,ch_idx] = np.floor(255 * col)
- return flow_image
-
-
-def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False):
- """
- Expects a two dimensional flow image of shape.
-
- Args:
- flow_uv (np.ndarray): Flow UV image of shape [H,W,2]
- clip_flow (float, optional): Clip maximum of flow values. Defaults to None.
- convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
-
- Returns:
- np.ndarray: Flow visualization image of shape [H,W,3]
- """
- assert flow_uv.ndim == 3, 'input flow must have three dimensions'
- assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
- if clip_flow is not None:
- flow_uv = np.clip(flow_uv, 0, clip_flow)
- u = flow_uv[:,:,0]
- v = flow_uv[:,:,1]
- rad = np.sqrt(np.square(u) + np.square(v))
- rad_max = np.max(rad)
- epsilon = 1e-5
- u = u / (rad_max + epsilon)
- v = v / (rad_max + epsilon)
- return flow_uv_to_colors(u, v, convert_to_bgr)
\ No newline at end of file
diff --git a/spaces/gradio/HuBERT/README.md b/spaces/gradio/HuBERT/README.md
deleted file mode 100644
index d2fcd88ac2ddb25290093f6443e72b9c1dcffa96..0000000000000000000000000000000000000000
--- a/spaces/gradio/HuBERT/README.md
+++ /dev/null
@@ -1,33 +0,0 @@
----
-title: HuBERT
-emoji: 🦀
-colorFrom: pink
-colorTo: blue
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/gradio/HuBERT/fairseq/data/audio/feature_transforms/global_cmvn.py b/spaces/gradio/HuBERT/fairseq/data/audio/feature_transforms/global_cmvn.py
deleted file mode 100644
index e457ff176fee3b996da11f47e7dc61b81c445ba3..0000000000000000000000000000000000000000
--- a/spaces/gradio/HuBERT/fairseq/data/audio/feature_transforms/global_cmvn.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import numpy as np
-from fairseq.data.audio.feature_transforms import (
- AudioFeatureTransform,
- register_audio_feature_transform,
-)
-
-
-@register_audio_feature_transform("global_cmvn")
-class GlobalCMVN(AudioFeatureTransform):
- """Global CMVN (cepstral mean and variance normalization). The global mean
- and variance need to be pre-computed and stored in NumPy format (.npz)."""
-
- @classmethod
- def from_config_dict(cls, config=None):
- _config = {} if config is None else config
- return GlobalCMVN(_config.get("stats_npz_path"))
-
- def __init__(self, stats_npz_path):
- self.stats_npz_path = stats_npz_path
- stats = np.load(stats_npz_path)
- self.mean, self.std = stats["mean"], stats["std"]
-
- def __repr__(self):
- return self.__class__.__name__ + f'(stats_npz_path="{self.stats_npz_path}")'
-
- def __call__(self, x):
- x = np.subtract(x, self.mean)
- x = np.divide(x, self.std)
- return x
diff --git a/spaces/gradio/HuBERT/fairseq/modules/sparse_transformer_sentence_encoder.py b/spaces/gradio/HuBERT/fairseq/modules/sparse_transformer_sentence_encoder.py
deleted file mode 100644
index f41ec09327fe80b50d20674e7482794ce45c531c..0000000000000000000000000000000000000000
--- a/spaces/gradio/HuBERT/fairseq/modules/sparse_transformer_sentence_encoder.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch.nn as nn
-from fairseq.modules import TransformerSentenceEncoder
-from fairseq.modules.sparse_transformer_sentence_encoder_layer import (
- SparseTransformerSentenceEncoderLayer,
-)
-
-
-class SparseTransformerSentenceEncoder(TransformerSentenceEncoder):
- """
- Sparse implementation of the TransformerSentenceEncoder
- - see SparseMultiheadAttention
- """
-
- def __init__(
- self,
- padding_idx: int,
- vocab_size: int,
- num_encoder_layers: int = 6,
- embedding_dim: int = 768,
- ffn_embedding_dim: int = 3072,
- num_attention_heads: int = 8,
- dropout: float = 0.1,
- attention_dropout: float = 0.1,
- activation_dropout: float = 0.1,
- max_seq_len: int = 256,
- num_segments: int = 2,
- use_position_embeddings: bool = True,
- offset_positions_by_padding: bool = True,
- encoder_normalize_before: bool = False,
- apply_bert_init: bool = False,
- activation_fn: str = "relu",
- learned_pos_embedding: bool = True,
- embed_scale: float = None,
- freeze_embeddings: bool = False,
- n_trans_layers_to_freeze: int = 0,
- export: bool = False,
- is_bidirectional: bool = True,
- stride: int = 32,
- expressivity: int = 8,
- ) -> None:
-
- super().__init__(
- padding_idx,
- vocab_size,
- num_encoder_layers,
- embedding_dim,
- ffn_embedding_dim,
- num_attention_heads,
- dropout,
- attention_dropout,
- activation_dropout,
- max_seq_len,
- num_segments,
- use_position_embeddings,
- offset_positions_by_padding,
- encoder_normalize_before,
- apply_bert_init,
- activation_fn,
- learned_pos_embedding,
- embed_scale,
- freeze_embeddings,
- n_trans_layers_to_freeze,
- export,
- )
-
- self.layers = nn.ModuleList(
- [
- SparseTransformerSentenceEncoderLayer(
- embedding_dim=self.embedding_dim,
- ffn_embedding_dim=ffn_embedding_dim,
- num_attention_heads=num_attention_heads,
- dropout=dropout,
- attention_dropout=attention_dropout,
- activation_dropout=activation_dropout,
- activation_fn=activation_fn,
- export=export,
- is_bidirectional=is_bidirectional,
- stride=stride,
- expressivity=expressivity,
- )
- for _ in range(num_encoder_layers)
- ]
- )
-
- def freeze_module_params(m):
- if m is not None:
- for p in m.parameters():
- p.requires_grad = False
-
- for layer in range(n_trans_layers_to_freeze):
- freeze_module_params(self.layers[layer])
diff --git a/spaces/gradio/HuBERT/scripts/shard_docs.py b/spaces/gradio/HuBERT/scripts/shard_docs.py
deleted file mode 100644
index 97232c3c845ee01dc5ab627388934cc0f9588280..0000000000000000000000000000000000000000
--- a/spaces/gradio/HuBERT/scripts/shard_docs.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-"""
-Split a large file into shards while respecting document boundaries. Documents
-should be separated by a single empty line.
-"""
-
-import argparse
-import contextlib
-
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument("input")
- parser.add_argument("--num-shards", type=int)
- args = parser.parse_args()
-
- assert args.num_shards is not None and args.num_shards > 1
-
- with open(args.input, "r", encoding="utf-8") as h:
- with contextlib.ExitStack() as stack:
- outputs = [
- stack.enter_context(
- open(args.input + ".shard" + str(i), "w", encoding="utf-8")
- )
- for i in range(args.num_shards)
- ]
-
- doc = []
- first_doc = [True] * args.num_shards
-
- def output_doc(i):
- if not first_doc[i]:
- outputs[i].write("\n")
- first_doc[i] = False
- for line in doc:
- outputs[i].write(line)
- doc.clear()
-
- num_docs = 0
- for line in h:
- if line.strip() == "": # empty line indicates new document
- output_doc(num_docs % args.num_shards)
- num_docs += 1
- else:
- doc.append(line)
- output_doc(num_docs % args.num_shards)
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/gradio/video_identity/README.md b/spaces/gradio/video_identity/README.md
deleted file mode 100644
index 45bc2d80298a589e5bd94200c3d4530d409b4f74..0000000000000000000000000000000000000000
--- a/spaces/gradio/video_identity/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-
----
-title: video_identity
-emoji: 🔥
-colorFrom: indigo
-colorTo: indigo
-sdk: gradio
-sdk_version: 4.1.2
-app_file: run.py
-pinned: false
-hf_oauth: true
----
diff --git a/spaces/gwang-kim/DATID-3D/pose_estimation/models/arcface_torch/torch2onnx.py b/spaces/gwang-kim/DATID-3D/pose_estimation/models/arcface_torch/torch2onnx.py
deleted file mode 100644
index fc26ab82e552331bc8d75b34e81000418f4d38ec..0000000000000000000000000000000000000000
--- a/spaces/gwang-kim/DATID-3D/pose_estimation/models/arcface_torch/torch2onnx.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import numpy as np
-import onnx
-import torch
-
-
-def convert_onnx(net, path_module, output, opset=11, simplify=False):
- assert isinstance(net, torch.nn.Module)
- img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32)
- img = img.astype(np.float)
- img = (img / 255. - 0.5) / 0.5 # torch style norm
- img = img.transpose((2, 0, 1))
- img = torch.from_numpy(img).unsqueeze(0).float()
-
- weight = torch.load(path_module)
- net.load_state_dict(weight)
- net.eval()
- torch.onnx.export(net, img, output, keep_initializers_as_inputs=False, verbose=False, opset_version=opset)
- model = onnx.load(output)
- graph = model.graph
- graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None'
- if simplify:
- from onnxsim import simplify
- model, check = simplify(model)
- assert check, "Simplified ONNX model could not be validated"
- onnx.save(model, output)
-
-
-if __name__ == '__main__':
- import os
- import argparse
- from backbones import get_model
-
- parser = argparse.ArgumentParser(description='ArcFace PyTorch to onnx')
- parser.add_argument('input', type=str, help='input backbone.pth file or path')
- parser.add_argument('--output', type=str, default=None, help='output onnx path')
- parser.add_argument('--network', type=str, default=None, help='backbone network')
- parser.add_argument('--simplify', type=bool, default=False, help='onnx simplify')
- args = parser.parse_args()
- input_file = args.input
- if os.path.isdir(input_file):
- input_file = os.path.join(input_file, "backbone.pth")
- assert os.path.exists(input_file)
- model_name = os.path.basename(os.path.dirname(input_file)).lower()
- params = model_name.split("_")
- if len(params) >= 3 and params[1] in ('arcface', 'cosface'):
- if args.network is None:
- args.network = params[2]
- assert args.network is not None
- print(args)
- backbone_onnx = get_model(args.network, dropout=0)
-
- output_path = args.output
- if output_path is None:
- output_path = os.path.join(os.path.dirname(__file__), 'onnx')
- if not os.path.exists(output_path):
- os.makedirs(output_path)
- assert os.path.isdir(output_path)
- output_file = os.path.join(output_path, "%s.onnx" % model_name)
- convert_onnx(backbone_onnx, input_file, output_file, simplify=args.simplify)
diff --git a/spaces/haakohu/deep_privacy2_face/dp2/loss/r1_regularization.py b/spaces/haakohu/deep_privacy2_face/dp2/loss/r1_regularization.py
deleted file mode 100644
index f974c5542bf49ed36b54b46cfc7c9c9bfaff9ce3..0000000000000000000000000000000000000000
--- a/spaces/haakohu/deep_privacy2_face/dp2/loss/r1_regularization.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import torch
-import tops
-
-
-def r1_regularization(
- real_img, real_score, mask, lambd: float, lazy_reg_interval: int,
- lazy_regularization: bool,
- scaler: torch.cuda.amp.GradScaler, mask_out: bool,
- mask_out_scale: bool,
- **kwargs
-):
- grad = torch.autograd.grad(
- outputs=scaler.scale(real_score),
- inputs=real_img,
- grad_outputs=torch.ones_like(real_score),
- create_graph=True,
- only_inputs=True,
- )[0]
- inv_scale = 1.0 / scaler.get_scale()
- grad = grad * inv_scale
- with torch.cuda.amp.autocast(tops.AMP()):
- if mask_out:
- grad = grad * (1 - mask)
- grad = grad.square().sum(dim=[1, 2, 3])
- if mask_out and mask_out_scale:
- total_pixels = real_img.shape[1] * real_img.shape[2] * real_img.shape[3]
- n_fake = (1-mask).sum(dim=[1, 2, 3])
- scaling = total_pixels / n_fake
- grad = grad * scaling
- if lazy_regularization:
- lambd_ = lambd * lazy_reg_interval / 2 # From stylegan2, lazy regularization
- return grad * lambd_, grad.detach()
diff --git a/spaces/hackathon-pln-es/es_nlp_gender_neutralizer/README.md b/spaces/hackathon-pln-es/es_nlp_gender_neutralizer/README.md
deleted file mode 100644
index 1475f8c2fd29761d41e80fc8aebf116942d48a0a..0000000000000000000000000000000000000000
--- a/spaces/hackathon-pln-es/es_nlp_gender_neutralizer/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Es_nlp_gender_neutralizer
-emoji: ☯️
-colorFrom: blue
-colorTo: purple
-sdk: gradio
-sdk_version: 2.8.14
-app_file: app.py
-pinned: false
-datasets : hackathon-pln-es/neutral-es
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/hamacojr/CAT-Seg/cat_seg/data/datasets/register_coco_stuff.py b/spaces/hamacojr/CAT-Seg/cat_seg/data/datasets/register_coco_stuff.py
deleted file mode 100644
index 35c823dee37b1657dc61d1f5beab8c0ecaa98855..0000000000000000000000000000000000000000
--- a/spaces/hamacojr/CAT-Seg/cat_seg/data/datasets/register_coco_stuff.py
+++ /dev/null
@@ -1,216 +0,0 @@
-import os
-
-from detectron2.data import DatasetCatalog, MetadataCatalog
-from detectron2.data.datasets import load_sem_seg
-
-COCO_CATEGORIES = [
- {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
- {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
- {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
- {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
- {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
- {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
- {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
- {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
- {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
- {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
- {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
- {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
- {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
- {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
- {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
- {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
- {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
- {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
- {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
- {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
- {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
- {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
- {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
- {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
- {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
- {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
- {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
- {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
- {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
- {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
- {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
- {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
- {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
- {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
- {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
- {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
- {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
- {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
- {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
- {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
- {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
- {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
- {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
- {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
- {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
- {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
- {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
- {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
- {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
- {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
- {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
- {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
- {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
- {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
- {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
- {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
- {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
- {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
- {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
- {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
- {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
- {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
- {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
- {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
- {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
- {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
- {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
- {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
- {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
- {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
- {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
- {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
- {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
- {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
- {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
- {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
- {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
- {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
- {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
- {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
- {"id": 92, "name": "banner", "supercategory": "textile"},
- {"id": 93, "name": "blanket", "supercategory": "textile"},
- {"id": 94, "name": "branch", "supercategory": "plant"},
- {"id": 95, "name": "bridge", "supercategory": "building"},
- {"id": 96, "name": "building-other", "supercategory": "building"},
- {"id": 97, "name": "bush", "supercategory": "plant"},
- {"id": 98, "name": "cabinet", "supercategory": "furniture-stuff"},
- {"id": 99, "name": "cage", "supercategory": "structural"},
- {"id": 100, "name": "cardboard", "supercategory": "raw-material"},
- {"id": 101, "name": "carpet", "supercategory": "floor"},
- {"id": 102, "name": "ceiling-other", "supercategory": "ceiling"},
- {"id": 103, "name": "ceiling-tile", "supercategory": "ceiling"},
- {"id": 104, "name": "cloth", "supercategory": "textile"},
- {"id": 105, "name": "clothes", "supercategory": "textile"},
- {"id": 106, "name": "clouds", "supercategory": "sky"},
- {"id": 107, "name": "counter", "supercategory": "furniture-stuff"},
- {"id": 108, "name": "cupboard", "supercategory": "furniture-stuff"},
- {"id": 109, "name": "curtain", "supercategory": "textile"},
- {"id": 110, "name": "desk-stuff", "supercategory": "furniture-stuff"},
- {"id": 111, "name": "dirt", "supercategory": "ground"},
- {"id": 112, "name": "door-stuff", "supercategory": "furniture-stuff"},
- {"id": 113, "name": "fence", "supercategory": "structural"},
- {"id": 114, "name": "floor-marble", "supercategory": "floor"},
- {"id": 115, "name": "floor-other", "supercategory": "floor"},
- {"id": 116, "name": "floor-stone", "supercategory": "floor"},
- {"id": 117, "name": "floor-tile", "supercategory": "floor"},
- {"id": 118, "name": "floor-wood", "supercategory": "floor"},
- {"id": 119, "name": "flower", "supercategory": "plant"},
- {"id": 120, "name": "fog", "supercategory": "water"},
- {"id": 121, "name": "food-other", "supercategory": "food-stuff"},
- {"id": 122, "name": "fruit", "supercategory": "food-stuff"},
- {"id": 123, "name": "furniture-other", "supercategory": "furniture-stuff"},
- {"id": 124, "name": "grass", "supercategory": "plant"},
- {"id": 125, "name": "gravel", "supercategory": "ground"},
- {"id": 126, "name": "ground-other", "supercategory": "ground"},
- {"id": 127, "name": "hill", "supercategory": "solid"},
- {"id": 128, "name": "house", "supercategory": "building"},
- {"id": 129, "name": "leaves", "supercategory": "plant"},
- {"id": 130, "name": "light", "supercategory": "furniture-stuff"},
- {"id": 131, "name": "mat", "supercategory": "textile"},
- {"id": 132, "name": "metal", "supercategory": "raw-material"},
- {"id": 133, "name": "mirror-stuff", "supercategory": "furniture-stuff"},
- {"id": 134, "name": "moss", "supercategory": "plant"},
- {"id": 135, "name": "mountain", "supercategory": "solid"},
- {"id": 136, "name": "mud", "supercategory": "ground"},
- {"id": 137, "name": "napkin", "supercategory": "textile"},
- {"id": 138, "name": "net", "supercategory": "structural"},
- {"id": 139, "name": "paper", "supercategory": "raw-material"},
- {"id": 140, "name": "pavement", "supercategory": "ground"},
- {"id": 141, "name": "pillow", "supercategory": "textile"},
- {"id": 142, "name": "plant-other", "supercategory": "plant"},
- {"id": 143, "name": "plastic", "supercategory": "raw-material"},
- {"id": 144, "name": "platform", "supercategory": "ground"},
- {"id": 145, "name": "playingfield", "supercategory": "ground"},
- {"id": 146, "name": "railing", "supercategory": "structural"},
- {"id": 147, "name": "railroad", "supercategory": "ground"},
- {"id": 148, "name": "river", "supercategory": "water"},
- {"id": 149, "name": "road", "supercategory": "ground"},
- {"id": 150, "name": "rock", "supercategory": "solid"},
- {"id": 151, "name": "roof", "supercategory": "building"},
- {"id": 152, "name": "rug", "supercategory": "textile"},
- {"id": 153, "name": "salad", "supercategory": "food-stuff"},
- {"id": 154, "name": "sand", "supercategory": "ground"},
- {"id": 155, "name": "sea", "supercategory": "water"},
- {"id": 156, "name": "shelf", "supercategory": "furniture-stuff"},
- {"id": 157, "name": "sky-other", "supercategory": "sky"},
- {"id": 158, "name": "skyscraper", "supercategory": "building"},
- {"id": 159, "name": "snow", "supercategory": "ground"},
- {"id": 160, "name": "solid-other", "supercategory": "solid"},
- {"id": 161, "name": "stairs", "supercategory": "furniture-stuff"},
- {"id": 162, "name": "stone", "supercategory": "solid"},
- {"id": 163, "name": "straw", "supercategory": "plant"},
- {"id": 164, "name": "structural-other", "supercategory": "structural"},
- {"id": 165, "name": "table", "supercategory": "furniture-stuff"},
- {"id": 166, "name": "tent", "supercategory": "building"},
- {"id": 167, "name": "textile-other", "supercategory": "textile"},
- {"id": 168, "name": "towel", "supercategory": "textile"},
- {"id": 169, "name": "tree", "supercategory": "plant"},
- {"id": 170, "name": "vegetable", "supercategory": "food-stuff"},
- {"id": 171, "name": "wall-brick", "supercategory": "wall"},
- {"id": 172, "name": "wall-concrete", "supercategory": "wall"},
- {"id": 173, "name": "wall-other", "supercategory": "wall"},
- {"id": 174, "name": "wall-panel", "supercategory": "wall"},
- {"id": 175, "name": "wall-stone", "supercategory": "wall"},
- {"id": 176, "name": "wall-tile", "supercategory": "wall"},
- {"id": 177, "name": "wall-wood", "supercategory": "wall"},
- {"id": 178, "name": "water-other", "supercategory": "water"},
- {"id": 179, "name": "waterdrops", "supercategory": "water"},
- {"id": 180, "name": "window-blind", "supercategory": "window"},
- {"id": 181, "name": "window-other", "supercategory": "window"},
- {"id": 182, "name": "wood", "supercategory": "solid"},
-]
-
-
-def _get_coco_stuff_meta():
- stuff_ids = [k["id"] for k in COCO_CATEGORIES]
- assert len(stuff_ids) == 171, len(stuff_ids)
-
- stuff_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(stuff_ids)}
- stuff_classes = [k["name"] for k in COCO_CATEGORIES]
-
- ret = {
- "stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
- "stuff_classes": stuff_classes,
- }
- return ret
-
-def register_all_coco_stuff_10k(root):
- root = os.path.join(root, "coco-stuff")
- meta = _get_coco_stuff_meta()
- for name, image_dirname, sem_seg_dirname in [
- ("train", "images/train2017", "annotations_detectron2/train2017"),
- ("test", "images/val2017", "annotations_detectron2/val2017"),
- ]:
- image_dir = os.path.join(root, image_dirname)
- gt_dir = os.path.join(root, sem_seg_dirname)
- name = f"coco_2017_{name}_stuff_all_sem_seg"
- DatasetCatalog.register(
- name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
- )
- MetadataCatalog.get(name).set(
- image_root=image_dir,
- sem_seg_root=gt_dir,
- evaluator_type="sem_seg",
- ignore_label=255,
- **meta,
- )
-
-_root = os.getenv("DETECTRON2_DATASETS", "datasets")
-register_all_coco_stuff_10k(_root)
diff --git a/spaces/hamacojr/CAT-Seg/cat_seg/modeling/__init__.py b/spaces/hamacojr/CAT-Seg/cat_seg/modeling/__init__.py
deleted file mode 100644
index b788ab8d314e5401c06df8cfc405f0571801487f..0000000000000000000000000000000000000000
--- a/spaces/hamacojr/CAT-Seg/cat_seg/modeling/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-from .backbone.swin import D2SwinTransformer
-from .heads.cat_seg_head import CATSegHead
\ No newline at end of file
diff --git a/spaces/hamacojr/SAM-CAT-Seg/open_clip/src/training/zero_shot.py b/spaces/hamacojr/SAM-CAT-Seg/open_clip/src/training/zero_shot.py
deleted file mode 100644
index e5768b4a3ce26f0a9a12d8ee3a6d9490e778a78a..0000000000000000000000000000000000000000
--- a/spaces/hamacojr/SAM-CAT-Seg/open_clip/src/training/zero_shot.py
+++ /dev/null
@@ -1,93 +0,0 @@
-import logging
-
-import torch
-import torch.nn.functional as F
-from tqdm import tqdm
-
-from open_clip import get_cast_dtype, get_tokenizer
-from .precision import get_autocast
-from .imagenet_zeroshot_data import imagenet_classnames, openai_imagenet_template
-
-
-def zero_shot_classifier(model, classnames, templates, args):
- tokenizer = get_tokenizer(args.model)
- with torch.no_grad():
- zeroshot_weights = []
- for classname in tqdm(classnames):
- texts = [template(classname) for template in templates] # format with class
- texts = tokenizer(texts).to(args.device) # tokenize
- if args.distributed and not args.horovod:
- class_embeddings = model.module.encode_text(texts)
- else:
- class_embeddings = model.encode_text(texts)
- class_embedding = F.normalize(class_embeddings, dim=-1).mean(dim=0)
- class_embedding /= class_embedding.norm()
- zeroshot_weights.append(class_embedding)
- zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(args.device)
- return zeroshot_weights
-
-
-def accuracy(output, target, topk=(1,)):
- pred = output.topk(max(topk), 1, True, True)[1].t()
- correct = pred.eq(target.view(1, -1).expand_as(pred))
- return [float(correct[:k].reshape(-1).float().sum(0, keepdim=True).cpu().numpy()) for k in topk]
-
-
-def run(model, classifier, dataloader, args):
- autocast = get_autocast(args.precision)
- cast_dtype = get_cast_dtype(args.precision)
- with torch.no_grad():
- top1, top5, n = 0., 0., 0.
- for images, target in tqdm(dataloader, unit_scale=args.batch_size):
- images = images.to(args.device)
- if cast_dtype is not None:
- images = images.to(dtype=cast_dtype)
- target = target.to(args.device)
-
- with autocast():
- # predict
- if args.distributed and not args.horovod:
- image_features = model.module.encode_image(images)
- else:
- image_features = model.encode_image(images)
- image_features = F.normalize(image_features, dim=-1)
- logits = 100. * image_features @ classifier
-
- # measure accuracy
- acc1, acc5 = accuracy(logits, target, topk=(1, 5))
- top1 += acc1
- top5 += acc5
- n += images.size(0)
-
- top1 = (top1 / n)
- top5 = (top5 / n)
- return top1, top5
-
-
-def zero_shot_eval(model, data, epoch, args):
- if 'imagenet-val' not in data and 'imagenet-v2' not in data:
- return {}
- if args.zeroshot_frequency == 0:
- return {}
- if (epoch % args.zeroshot_frequency) != 0 and epoch != args.epochs:
- return {}
-
- logging.info('Starting zero-shot imagenet.')
-
- logging.info('Building zero-shot classifier')
- classifier = zero_shot_classifier(model, imagenet_classnames, openai_imagenet_template, args)
-
- logging.info('Using classifier')
- results = {}
- if 'imagenet-val' in data:
- top1, top5 = run(model, classifier, data['imagenet-val'].dataloader, args)
- results['imagenet-zeroshot-val-top1'] = top1
- results['imagenet-zeroshot-val-top5'] = top5
- if 'imagenet-v2' in data:
- top1, top5 = run(model, classifier, data['imagenet-v2'].dataloader, args)
- results['imagenetv2-zeroshot-val-top1'] = top1
- results['imagenetv2-zeroshot-val-top5'] = top5
-
- logging.info('Finished zero-shot imagenet.')
-
- return results
diff --git a/spaces/haoqi7/images/app.py b/spaces/haoqi7/images/app.py
deleted file mode 100644
index 7ddd4bbf3b9c6ace2d4ab07ebbe74d00e4b6a00c..0000000000000000000000000000000000000000
--- a/spaces/haoqi7/images/app.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import requests
-import shutil
-from PIL import Image
-from io import BytesIO
-import numpy as np
-import matplotlib.pyplot as plt
-import pandas as pd
-import random
-import gradio as gr
-
-design='india'
-def lexica(design,n):
-
- request=requests.get(f'https://lexica.art/api/v1/search?q={design}')
- request.json()
- data = request.json()
- data_items = list(data.items())
-
- random.shuffle(data_items)
-
- data = dict(data_items)
-
- image_urls = []
- image_prompts = []
-
- for key, value in data.items():
- for i in range(n):
- image_url = value[i]['src']
- if isinstance(image_url, list):
- image_url = image_url[0]
- image_urls.append(image_url)
-
-
- image_prompts.append(value[i]['prompt'])
-
- images = []
-
- # Loop through the image URLs
- for url in image_urls:
- # Download the image from the URL
- response = requests.get(url)
-
- # Load the image data into PIL format
- image = Image.open(BytesIO(response.content))
-
- # Add the image to the list
- images.append(image)
-
-
- df = pd.DataFrame(image_prompts, columns=["Lexica Prompt"], index=range(1, len(image_prompts)+1))
-
-
- df.index.name = "Sr. No."
-
-
- for image in images:
-
- array = np.array(image)
-
-
- return images , df
-design='india'
-# lexica(design)
-
-inputs =[ gr.Textbox(label = 'Enter prompt to search'),
- gr.Slider(label='Number of images ', minimum = 4, maximum = 20, step = 1, value = 4)]
-
-outputs= [gr.Gallery(lable='Output gallery').style(grid=1),
- gr.Dataframe(label='prompts for corresponding images')]
-
-# Create and launch the interface
-interface = gr.Interface(lexica,
- inputs=inputs,
- outputs=outputs,
- examples =[ ['trending digital art', 5],
- ['beautiful home', 5],
- ['interior design of living room', 5]]
- ,
- title = "" +' A Search Engine for Generative Art Prompts and Works '+ "",
- description=" Find more powerful tools at [AI Beast](https://aibeast.net) and follow for updates . [Follow](https://www.facebook.com/abbas143office) ❤️")
-
-interface.launch(debug=True)
\ No newline at end of file
diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/docs/tutorials/install.md b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/docs/tutorials/install.md
deleted file mode 100644
index 3985f8ae4f5ecde26b310b4ab01c49b922f742e9..0000000000000000000000000000000000000000
--- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/docs/tutorials/install.md
+++ /dev/null
@@ -1,184 +0,0 @@
-## Installation
-
-Our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5)
-has step-by-step instructions that install detectron2.
-The [Dockerfile](docker)
-also installs detectron2 with a few simple commands.
-
-### Requirements
-- Linux or macOS with Python ≥ 3.6
-- PyTorch ≥ 1.4
-- [torchvision](https://github.com/pytorch/vision/) that matches the PyTorch installation.
- You can install them together at [pytorch.org](https://pytorch.org) to make sure of this.
-- OpenCV, optional, needed by demo and visualization
-- pycocotools: `pip install cython; pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'`
-
-
-### Build Detectron2 from Source
-
-gcc & g++ ≥ 5 are required. [ninja](https://ninja-build.org/) is recommended for faster build.
-After having them, run:
-```
-python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
-# (add --user if you don't have permission)
-
-# Or, to install it from a local clone:
-git clone https://github.com/facebookresearch/detectron2.git
-python -m pip install -e detectron2
-
-# Or if you are on macOS
-# CC=clang CXX=clang++ python -m pip install -e .
-```
-
-To __rebuild__ detectron2 that's built from a local clone, use `rm -rf build/ **/*.so` to clean the
-old build first. You often need to rebuild detectron2 after reinstalling PyTorch.
-
-### Install Pre-Built Detectron2 (Linux only)
-```
-# for CUDA 10.1:
-python -m pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/index.html
-```
-You can replace cu101 with "cu{100,92}" or "cpu".
-
-Note that:
-1. Such installation has to be used with certain version of official PyTorch release.
- See [releases](https://github.com/facebookresearch/detectron2/releases) for requirements.
- It will not work with a different version of PyTorch or a non-official build of PyTorch.
-2. Such installation is out-of-date w.r.t. master branch of detectron2. It may not be
- compatible with the master branch of a research project that uses detectron2 (e.g. those in
- [projects](projects) or [meshrcnn](https://github.com/facebookresearch/meshrcnn/)).
-
-### Common Installation Issues
-
-If you met issues using the pre-built detectron2, please uninstall it and try building it from source.
-
-Click each issue for its solutions:
-
-
-
-Undefined torch/aten/caffe2 symbols, or segmentation fault immediately when running the library.
-
-
-
-This usually happens when detectron2 or torchvision is not
-compiled with the version of PyTorch you're running.
-
-Pre-built torchvision or detectron2 has to work with the corresponding official release of pytorch.
-If the error comes from a pre-built torchvision, uninstall torchvision and pytorch and reinstall them
-following [pytorch.org](http://pytorch.org). So the versions will match.
-
-If the error comes from a pre-built detectron2, check [release notes](https://github.com/facebookresearch/detectron2/releases)
-to see the corresponding pytorch version required for each pre-built detectron2.
-
-If the error comes from detectron2 or torchvision that you built manually from source,
-remove files you built (`build/`, `**/*.so`) and rebuild it so it can pick up the version of pytorch currently in your environment.
-
-If you cannot resolve this problem, please include the output of `gdb -ex "r" -ex "bt" -ex "quit" --args python -m detectron2.utils.collect_env`
-in your issue.
-
-
-
-
-Undefined C++ symbols (e.g. `GLIBCXX`) or C++ symbols not found.
-
-
-Usually it's because the library is compiled with a newer C++ compiler but run with an old C++ runtime.
-
-This often happens with old anaconda.
-Try `conda update libgcc`. Then rebuild detectron2.
-
-The fundamental solution is to run the code with proper C++ runtime.
-One way is to use `LD_PRELOAD=/path/to/libstdc++.so`.
-
-
-
-
-
-"Not compiled with GPU support" or "Detectron2 CUDA Compiler: not available".
-
-
-CUDA is not found when building detectron2.
-You should make sure
-
-```
-python -c 'import torch; from torch.utils.cpp_extension import CUDA_HOME; print(torch.cuda.is_available(), CUDA_HOME)'
-```
-
-print valid outputs at the time you build detectron2.
-
-Most models can run inference (but not training) without GPU support. To use CPUs, set `MODEL.DEVICE='cpu'` in the config.
-
-
-
-
-"invalid device function" or "no kernel image is available for execution".
-
-
-Two possibilities:
-
-* You build detectron2 with one version of CUDA but run it with a different version.
-
- To check whether it is the case,
- use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions.
- In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA"
- to contain cuda libraries of the same version.
-
- When they are inconsistent,
- you need to either install a different build of PyTorch (or build by yourself)
- to match your local CUDA installation, or install a different version of CUDA to match PyTorch.
-
-* Detectron2 or PyTorch/torchvision is not built for the correct GPU architecture (compute compatibility).
-
- The GPU architecture for PyTorch/detectron2/torchvision is available in the "architecture flags" in
- `python -m detectron2.utils.collect_env`.
-
- The GPU architecture flags of detectron2/torchvision by default matches the GPU model detected
- during compilation. This means the compiled code may not work on a different GPU model.
- To overwrite the GPU architecture for detectron2/torchvision, use `TORCH_CUDA_ARCH_LIST` environment variable during compilation.
-
- For example, `export TORCH_CUDA_ARCH_LIST=6.0,7.0` makes it compile for both P100s and V100s.
- Visit [developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus) to find out
- the correct compute compatibility number for your device.
-
-
-
-
-
-Undefined CUDA symbols; cannot open libcudart.so; other nvcc failures.
-
-
-The version of NVCC you use to build detectron2 or torchvision does
-not match the version of CUDA you are running with.
-This often happens when using anaconda's CUDA runtime.
-
-Use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions.
-In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA"
-to contain cuda libraries of the same version.
-
-When they are inconsistent,
-you need to either install a different build of PyTorch (or build by yourself)
-to match your local CUDA installation, or install a different version of CUDA to match PyTorch.
-
-
-
-
-
-"ImportError: cannot import name '_C'".
-
-
-Please build and install detectron2 following the instructions above.
-
-If you are running code from detectron2's root directory, `cd` to a different one.
-Otherwise you may not import the code that you installed.
-
-
-
-
-ONNX conversion segfault after some "TraceWarning".
-
-
-The ONNX package is compiled with too old compiler.
-
-Please build and install ONNX from its source code using a compiler
-whose version is closer to what's used by PyTorch (available in `torch.__config__.show()`).
-
diff --git a/spaces/hdhzk/bingo/src/components/user-menu.tsx b/spaces/hdhzk/bingo/src/components/user-menu.tsx
deleted file mode 100644
index 9bd1edc9cf9f39b63629b021f0c1186b1a7c1341..0000000000000000000000000000000000000000
--- a/spaces/hdhzk/bingo/src/components/user-menu.tsx
+++ /dev/null
@@ -1,113 +0,0 @@
-'use client'
-
-import { useEffect, useState } from 'react'
-import Image from 'next/image'
-import { toast } from 'react-hot-toast'
-import { Button } from '@/components/ui/button'
-import pkg from '../../package.json'
-import {
- DropdownMenu,
- DropdownMenuContent,
- DropdownMenuItem,
- DropdownMenuSeparator,
- DropdownMenuTrigger
-} from '@/components/ui/dropdown-menu'
-import { IconCopy, IconExternalLink, IconGitHub } from '@/components/ui/icons'
-import SettingIcon from '@/assets/images/settings.svg'
-import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard'
-
-export function UserMenu() {
- const [host, setHost] = useState('')
- const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 })
- useEffect(() => {
- setHost(location.host)
- }, [])
-
- useEffect(() => {
- if (isCopied) {
- toast.success('复制成功')
- }
- }, [isCopied])
- return (
-
-
-
-
-
-
-
- 设置
-
-
-
-
- location.href='#dialog="settings"'
- }
- className="cursor-pointer"
- >
- 设置用户
-
-
-
- location.href='#dialog="voice"'
- }
- className="cursor-pointer"
- >
- 语音设置
-
-
-
-
- 开源地址
-
-
-
-
-
-
-
- 托管地址
- 🤗
-
-
-
-
-
-
- 复制站点
-
-
-
-
-
- 版本信息 {pkg.version}
-
-
-
- 站点域名
- copyToClipboard(host)} className="flex gap-1 text-xs text-zinc-500 cursor-pointer">
- {host}
-
-
-
-
-
- )
-}
diff --git a/spaces/hekbobo/bingo/src/lib/bots/bing/utils.ts b/spaces/hekbobo/bingo/src/lib/bots/bing/utils.ts
deleted file mode 100644
index 6bbbc5e463ad55bc1219b63cf78013f5360fc908..0000000000000000000000000000000000000000
--- a/spaces/hekbobo/bingo/src/lib/bots/bing/utils.ts
+++ /dev/null
@@ -1,87 +0,0 @@
-import { ChatResponseMessage, BingChatResponse } from './types'
-
-export function convertMessageToMarkdown(message: ChatResponseMessage): string {
- if (message.messageType === 'InternalSearchQuery') {
- return message.text
- }
- for (const card of message.adaptiveCards??[]) {
- for (const block of card.body) {
- if (block.type === 'TextBlock') {
- return block.text
- }
- }
- }
- return ''
-}
-
-const RecordSeparator = String.fromCharCode(30)
-
-export const websocketUtils = {
- packMessage(data: any) {
- return `${JSON.stringify(data)}${RecordSeparator}`
- },
- unpackMessage(data: string | ArrayBuffer | Blob) {
- if (!data) return {}
- return data
- .toString()
- .split(RecordSeparator)
- .filter(Boolean)
- .map((s) => {
- try {
- return JSON.parse(s)
- } catch (e) {
- return {}
- }
- })
- },
-}
-
-export async function createImage(prompt: string, id: string, headers: HeadersInit): Promise {
- const { headers: responseHeaders } = await fetch(`https://www.bing.com/images/create?partner=sydney&re=1&showselective=1&sude=1&kseed=7000&SFX=&q=${encodeURIComponent(prompt)}&iframeid=${id}`,
- {
- method: 'HEAD',
- headers,
- redirect: 'manual'
- },
- );
-
- if (!/&id=([^&]+)$/.test(responseHeaders.get('location') || '')) {
- throw new Error('请求异常,请检查身份信息是否有效')
- }
-
- const resultId = RegExp.$1;
- let count = 0
- const imageThumbUrl = `https://www.bing.com/images/create/async/results/${resultId}?q=${encodeURIComponent(prompt)}&partner=sydney&showselective=1&IID=images.as`;
-
- do {
- await sleep(3000);
- const content = await fetch(imageThumbUrl, { headers, method: 'GET' })
-
- // @ts-ignore
- if (content.headers.get('content-length') > 1) {
- const text = await content.text()
- return (text?.match(/ target?.split('src="').pop()?.replace(/&/g, '&'))
- .map(img => ``).join(' ')
- }
- } while(count ++ < 10);
-}
-
-
-export async function* streamAsyncIterable(stream: ReadableStream) {
- const reader = stream.getReader()
- try {
- while (true) {
- const { done, value } = await reader.read()
- if (done) {
- return
- }
- yield value
- }
- } finally {
- reader.releaseLock()
- }
-}
-
-export const sleep = (ms: number) => new Promise(resolve => setTimeout(resolve, ms))
-
diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/readme.md b/spaces/ho11laqe/nnUNet_calvingfront_detection/readme.md
deleted file mode 100644
index 8be8e943e8804005a0cf90e0b7ca5b75beed18b2..0000000000000000000000000000000000000000
--- a/spaces/ho11laqe/nnUNet_calvingfront_detection/readme.md
+++ /dev/null
@@ -1,855 +0,0 @@
-test
-# Multi-Task Learning for Glacier Segmentation and Calving Front Prediction with the nnU-Net.
-
-This project contains the script for the experiments that are described in the paper "Out-of-the-box calving front detection method using deep-learning" by Herrmann et al. https://tc.copernicus.org/preprints/tc-2023-34/
-The project was build up on the nnU-Net project by Isensee, F., Jaeger, P. F. (2020) https://github.com/MIC-DKFZ/nnUNet. The folders that are new to the project are marked as "xx_new". I tried to change a minimum number of original files and create new ones, but it was no always feasible.
-
-## Out-of-the-box claving front detection
-To apply the trained nnU-Net on a set of SAR images for claving front detection, follow the steps below:
-
-1. Download this repository extract the files https://github.com/ho11laqe/nnUNet_calvingfront_detection.git
-2. Download the pretrained model from Zenodo and extract the zip-file https://zenodo.org/record/7837300#.ZD1OI9IzbUA.
-3. Install the repository
- - Create a new virtual environment with `python3 -m venv /path/to/venv/nnunet` and repace the path with the location,
- where the virtual environment should be installed.
- - Activate the environment with `source /path/to/venv/nnunet/bin/activate`.
- - Install the repository by entering `pip install -e /path/to/extraced/repository/nnunet_clavingfront` and replace the path.
-7. Run the calving front prediction with `bash RUN_CALBINGFRONT_DETECTION.sh -d /path/to/SARimages/ -m /path/to/pretrained/model/` and replace the paths
-with the path to the folder containing the SAR images and path to the pretrained model.
-
-## 1. Dataset
-
-The dataset is provided by Gourmelon et al. and can be found [here](https://doi.pangaea.de/10.1594/PANGAEA.940950).
-It contains 681 SAR images of seven glaciers taken by seven different satellites. Two glaciers are located in the
-northern hemisphere and five in the southern hemisphere. The two glaciers on the southern hemisphere are the Columbia
-Glacier in Alaska and the Jacobshavn in Greenland. Both glaciers are
-famous representatives of their regions because they are two of the largest tidewater glaciers in the world. The
-Columbia Glacier has a length of 51 km, and a thickness of 550 m. The glacier has been retreating at a rate of
-approximately 0.6 km per year since 1982. Jacobshaven has a length of 65 km, a thickness of
-2000 m, and retreated 27.4 km between 1902 and 2010. The five glaciers in the southern
-hemisphere are all located at the Arctic Penisula.
-
-
-Properties of the dataset including list of captured glaciers train-test-split,
-number of images per glacier, and covered area in km.
-
-The dataset contains two labels for each glacier image. One is a mask of the different
-zones of the glacier (ocean, glacier, radar shadow, rock). The other label contains a 1 pixel
-wide line representing the calving front. A sample of each glacier in the training set with
-its corresponding labels is shown in Figure 2. Predicting the zone mask can be seen as a
-classic segmentation problem. The only specialty is that all pixels are associated with a
-specific class so that there is no general ’background’-class for unclassifiable pixels. Because
-of the high-class imbalance, the calving front delineation is a more difficult task. Fewer
-than 1 % of the pixels are labeled as the front. Additionally, the structure of the class
-region is not a convex hull but a thin line.
-
-Figure 2: Sample images of every glacier in the train set and their corresponding labels.
-The first row shows the front label with black background and a 1 pixel wide white line
-representing the calving front. The second row contains the zone labels with four classes:
-ocean (white), glacier (light gray), rock (dark gray), radar shadow (black).
-
-Every glacier is captured by multiple satellites for a higher temporal resolution. Mean-
-ing, that recordings of one glacier are captured by different SAR systems with different
-resolutions. In Figure 3 a timeline of the images of each glacier visualizes the observation
-time and frequency of the images. The first two rows show the glacier of the test set.
-
-Figure 3: Distribution of the dataset’s images over time. The samples are grouped by the
-seven glaciers, and colored according to the capturing satellite.
-
-## 2. nnU-Net
-The nnU-Net by Fabian Isensee et al. [Ise+21] reduces the hyperparameter
-search by taking a fingerprint of the dataset and adjusting hyperparameters accordingly.
-Additionally, there are fixed parameters. These parameters are based on the authors’
-experience and generalize well across a variety of tasks. The structure of the nnU-Net is
-visualized in Figure 4.
-
-Figure 4: Illustration of the nnU-Net framework created by Isensee et al. [Ise+21]
-
-I retraced the pipline of the nnU-Net and created the following visualizations. Figure 5 show the whole pipeline
-including the added python scripts. The data samples and the labels have to be in the Neuroimaging Informatics Technology
-Initiative (NIfTI) file format, separated into test and training samples. The NIfTI file format
-was developed for neuroimaging. The files store 3D scans of brains or other organs. The
-format stores additional information about the orientation of the data, distances between
-the individual pixels/voxels, and layers. Because the nnU-Net was developed for medical
-imaging, it uses this file format.
-
-Figure 5: Scripts for conversion between PNG and NIfTI (blue), nnU-Net scripts (purple),
-evaluation scripts (green).
-
-### 2.1 Preprocessing
-
-The nnU-Net crops borders before the
-dataset fingerprint is created. While the dataset is perused for black borders, properties of
-every image, including size and spacing, are stored. After the cropping, every sample and
-its corresponding label is stacked into one NIfTI file. Finally, the dataset’s fingerprint is
-created by analyzing the dataset. The fingerprint includes the size and spacing of every
-sample, the number of classes, the imaging modality, and the intensity distribution.
-Based on the dataset’s fingerprint and the available Random Access Memory (RAM)
-of the Graphics Processing Unit (GPU), a plan for the training and architecture of the
-U-Net is created. The hyperparameters concerning the architecture is the patch size and
-the number of layers. Most often, using the whole data sample as the input for the U-Net
-results in a massive number of parameters and will not fit on traditional GPUs. Therefore,
-the image is divided into smaller parts called patches. Their segmentation mask is stitched
-together afterwards to get a segmentation of the whole image. The patch size is initialized
-with the median image shape and iteratively reduced until at least two images can be
-processed in parallel. The number of images passed through the network in parallel is called
-batch size and provides a more stable training. Here a larger patch size is preferred over a
-larger batch size to provide more contextual information for the segmentation. The patch
-size also represents the size of the first and last layer of the U-Net.
-
-Figure 6: Plan and preprocessing pipeline including generated files. The python scripts
-are reduced to the important functions.
-
-### 2.2. Training
-Before the training starts, the network trainer and the network have to be initialized with
-the parameters generated by the previous step. The trainer’s parameters are learning rate,
-loss function, the maximum number of epochs, optimizer, and dataloader. The dataloader
-is responsible for creating the patches, batches, and augmentation of the samples. There
-are 11 augmentation steps in the nnU-Net listed in the Table below.
-
-
-In the next step, the network is created based on the generated parameters. The U-Net
-consists of multiple blocks (in this work: nine encoder blocks and eight decoder blocks).
-The encoder block and the decoder block are illustrated in Figure 7. The encoder block
-contains two convolutional layers. Each block is followed by an instance normalization and
-the activation function (leaky rectified linear unit). For instance normalization, the mean
-and variance are calculated for every feature map. Afterwards, it is subtracted by the mean
-and divided by the variance. The decoder block takes as input the output of the previous
-block and the output of the corresponding encoder block. The output of the previous block
-is scaled up with a transpose convolution and then concatenated with the encoder output.
-Then the decoder block is equally structured with the encoder block. The output is used
-by the next layer and the deep supervision part.
-
-Figure 7: Illustration of the encoder and decoder blocks that make up the architecture of
-the nnU-Net. The encoder and the decoder contain multiple blocks.
-
-After everything is initialized, the network is trained to minimize the loss function.
-The loss function of the nnU-Net is the summation of the cross entropy loss. Typically one epoch
-corresponds to feeding every dataset sample to the network. The nnU-Net sets a fixed
-number of iterations (250) to be one epoch. Because the network ensures that at least
-one-third of samples contain a randomly chosen foreground class, this is especially helpful
-for the class imbalance of the front label, where most of the patches do not contain any
-class.
-
-
-### 2.3 Post-processing
-The trained model can be used to detect the target in unseen data. First, the model files
-are loaded from the specified fold. Afterwards, the network preprocesses the hold-out test
-set. The test samples are divided into patches similar to the training samples. For a robust
-result, the patches are rotated three times, and the three resulting predictions are then
-combined by averaging the pixel values. The network accuracy decreased towards the
-borders of patches, therefore the predictions are weighted by a Gaussian bell. Finally, the
-patches overlap by half of the patch size to get a smoother final result and stored as NIfTI
-files in the specified folder. The inference script and its steps are illustrated in Figure 3.11.
-
-
-
-## 3. Adjustments of the nnU-Net Pipeline
-There are mainly two approaches that can
-be distinguished. The approach that requires a minimum change to the vanilla U-Net is
-created by adding the second label as a second channel to the last layer (late-branching).
-Only the parameters for an additional kernel in the last layer need to be trained additionally.
-The total number of parameters that need to be trained changes insignificantly. The second
-approach uses one decoder for every label (early-branching).
-
-
-The PNGs of glaciers had to be converted to the NIfTI file format. Because the glacier
-labels are 2D, the two labels were stacked in one label file with the label of the front located
-at z = 0 and the zone masks at z = 1. In the dataset.json, which contains the dataset’s
-metadata, the label entry contains a list of multiple labels with multiple classes instead of a
-single list of classes. After the dataset is in the desired directory and format, the nnU-Net
-scripts can be executed.
-Changes in the preprocessing concern mainly the added dimension of the labels in
-dataset.json. Meaning, there are now multiple labels, each with multiple classes. And
-not one label with multiple classes. During the planning of the experiment, the es-
-timated size of the network architecture is requested. This work implements a new
-class Generic_UNet_MTLearly, which returns the network’s size in a method (com-
-pute_approx_vram_consumption). For comparison, this value is also used for the late-
-branching network, even if its size is small. Otherwise, early and late branching networks
-would be trained on different patch sizes. Generic_UNet_MTLearly is derived from the
-given class Generic_UNet, which was included in the framework and is used in this work
-for the single task segmentation. The Generic_UNet_MTLearly contains a second decoder, which is created in the initialization of every instance of the class and used in the
-forward-function. The outputs of both decoders is concatenated before returned.
-Another class is responsible for the training of the network. The given nnUNetTrainerV2
-was used for the single task segmentation. For the MTL a new nnUNetTrainerMTLearly
-and nnUNetTrainerMTLlate were derived from the single task trainer. These trainer
-classes contain hyperparameters, e.g., a maximum number of epochs and deep supervision
-scales. They also trigger the initialization the network, run feedforward, compute the loss
-and trigger the update of the weights. The initialization of the network is done in the
-aforementioned Generic_UNet classes. For early-branching, the last layer and the layer
-for deep supervision are modified to create two channel outputs. For lat e-branching, the
-decoder is duplicated, and the results of the decoders are concatenated before the return of
-the feedforward. After every iteration, the error of both labels is calculated as described in
-Section 3.6.2 and summed up with an equal weighting (unitary scalarization).
-Only minor changes had to be made in the inference script (predict_simple.py). After
-the test samples are divided into patches and fed through the network, multiple channels
-of the network’s output had to be separated and the patch predictions are composed to
-the prediction of the whole image. A list of the edited and added scripts is provided in the following table.
-
-
-
-## 4. Experiments
-The first research goal is to apply the out-of-the-box nnU-Net, how it is intended to
-be used, on the glacier front detection and on the glacier zone segmentation, which is
-represented by the first two columns in the Figure 5.1. 5-fold cross-validation is used to
-eliminate the bias of the weight initialization, and the bias of the data split into training
-and validation sets. Every column in Figure 5.1 represents five newly trained U-Nets,
-with common hyperparameters but different weight initialization. The evaluations of the
-individual models are averaged to get a robust measure independent of weight initialization
-and data split.
-
-raining the nnU-Net directly on the front labels is the most straightforward approach to
-get a method for calving front detection. The label of the calving front is dilated to the width
-of five pixels. In provisional experiments the dilation has shown to make the predictions
-more robust. For the training with zone labels, the evaluation script includes extraction of
-the boundary between ocean and glacier, which is described in more detail in Section 5.2.
-The following approach is to train the U-Net on the zone and front label simultaneously.
-Two architectures are compared. The early-branching and the late-branching network are
-described in Section 4.1. The fifth experiment of this work extracts the boundaries of the
-glacier zone to all other zones as a third segmentation task for the U-Net (see Figure 5.2).
-In contrast to the calving front segmentation, which is a particular part of the boundary
-between glacier and ocean. The label of the glacier boundaries was extracted from the zone
-label. All glacier pixels with a neighbouring rock or shadow pixel are assigned as glacier
-boundary. The hypothesis is that providing more information about the same domain
-benefits the performance of the U-Net on the individual task. The last experiment fuses
-the two labels by creating a fourth class in the zone label associated with the glacier front.
-Because the front line has a width of five pixels, the other zone classes are merely impaired.
-
-After the samples are converted and stored in the correct directory, the first nnU-Net
-script reprocesses the data, takes a fingerprint of the dataset, and generates a corresponding
-plan for the training. The training plan contains the number of layers, kernel size for every
-convolutional layer, patch, and batch size. For the glacier dataset and a NVIDIA RTX 3080
-with 12GB memory, the resulting network architecture has nine encoder blocks and eight
-decoder blocks (see Figure 3.8). Considering that every block has two convolutional layers,
-the network architecture is relatively deep compared to the U-net presented in [Ron+15].
-Deep networks usually suffer from vanishing gradients. Vanishing gradients is avoided
-in this U-Net with 34 convolutional layers using deep supervisions. Deep supervision is
-explained in more detail in Section 3.6.2. The kernels of all convolutional layers have the
-size of 3x3. During training, one batch contains two images. Each image has a patch size
-of 1024 x 896 pixels. The second nnU-Net script trains the network and stores the trained
-models. The U-Net is trained with an SGD optimizer with an initial learning rate of 0.01,
-a Nesterov momentum of 0.99, and a weight decay of 3e-5. Training of one epoch took
-between 100 s and 160 s. The nnU-Net uses early-stopping, but due to limited resources
-the maximum number of epochs (500) is reached in every training. The common way to
-define one epoch to iterate over every sample of the training set. The nnU-Net uses a fixed
-number of iterations (250) to define one epoch. In iteration the batch is sampled depending
-on the class distribution of the sample to counteract class-imbalance. After the training,
-the final model is used to predict the test set. The predictions of the test set are stored in
-NIfTI files. After the test predictions are converted back to PNG, the results are evaluated.
-
-The first nnU-Net script reprocesses the data, takes a fingerprint of the dataset, and generates a corresponding
-plan for the training. The training plan contains the number of layers, kernel size for every
-convolutional layer, patch, and batch size. For the glacier dataset and a NVIDIA RTX 3080
-with 12GB memory, the resulting network architecture has nine encoder blocks and eight
-decoder blocks. Considering that every block has two convolutional layers,
-the network architecture is relatively deep compared to the U-net presented in [Ron+15].
-Deep networks usually suffer from vanishing gradients. Vanishing gradients is avoided
-in this U-Net with 34 convolutional layers using deep supervisions. Deep supervision is
-explained in more detail in Section 3.6.2. The kernels of all convolutional layers have the
-size of 3x3. During training, one batch contains two images. Each image has a patch size
-of 1024 x 896 pixels. The second nnU-Net script trains the network and stores the trained
-models. The U-Net is trained with an SGD optimizer with an initial learning rate of 0.01,
-a Nesterov momentum of 0.99, and a weight decay of 3e-5. Training of one epoch took
-between 100 s and 160 s. The nnU-Net uses early-stopping, but due to limited resources
-the maximum number of epochs (500) is reached in every training. The common way to
-define one epoch to iterate over every sample of the training set. The nnU-Net uses a fixed
-number of iterations (250) to define one epoch. In iteration the batch is sampled depending
-on the class distribution of the sample to counteract class-imbalance. After the training,
-the final model is used to predict the test set. The predictions of the test set are stored in
-NIfTI files. After the test predictions are converted back to PNG, the results are evaluated.
-A visualization of the training progress of third experiment with a late-branching
-architecture is shown in the gif below The gif shows a random sample of the training set.
-The predictions of the nnU-Net after different numbers of epochs are superimposed on the
-input image. In epoch 0 The classes
-are randomly assigned to the pixels. This leads to a noisy pattern of where all classes are
-equally distributed.The third and last nnU-Net script executes the inference. After a few epochs
-the class distributions of the prediction
-is already close to the target distribution. A small number pixels is classified as the glacier
-front and large number of pixels classified as the glacier. The ocean classifications are large
-clusters but some of them are falsely located in the glacier zone. In the end the calving front
-and the ocean is classified correctly
-only some parts of the glacier are classified as rock and vice versa. Visually, the predictions
-are similar to the target
-
-
-
-The evaluation metric measures how accurate, precise, and robust the method detects the
-position of the calving front. Additionally, the precision of the glacier zone segmentation is
-meaningful information. The mean distance between the front pixels of the label and the
-front pixels of the prediction is used to evaluate the calving front detection. For every pixel
-in the label front Y , the distance to the closest pixel in the predicted front X is determined.
-Additionally, the distance to its closest pixel in the predicted front is determined for every
-pixel in the label front. Both distances are averaged and taken as the mean distance between
-the two lines.
-
-## 5. Results
-The evaluation metrics described above, show that both tasks achieve higher
-accuracy with MTL compared to Single-Task Learning (STL). In Figure 6.1 the front
-delineation error of every experiment is compared. The STL approach that is trained on
-front label has a front delineation error of 1103 ± 72 m and the STL approach that is trained
-on the zone label has a front delineation error of 1184 ± 225 m. The difference between the
-STL experiments is that the variance of the performance of the trained model is higher
-when trained on the zone labels.
-
-
-
-The distribution of the test set prediction is plotted in Figure 6.4. In the first row, all
-122 test samples are drawn as dots. The median is the middle line in the orange rectangle,
-and the dashed line represents the mean. The x-axis has a logarithmic scale. Otherwise, the
-outliers would dominate the plot. The rectangle reaches from the first quartile to the third
-quartile. Each quartile contains 25 % of the data points. The rows below represent the
-samples captured during different seasons. The test set contains two glaciers: Mapple and
-COL. The glaciers are located on different hemispheres, therefore the winter and summer
-months are different for each glacier. Winter in the northern hemisphere is from October
-to March, and winter in the southern hemisphere is from April to August. The mean of the
-front prediction of the samples captured during summer have higher precision 458 ± 1060 m
-than the samples captured during the winter months 996 ± 1683 m. However, the medians
-are more similar with 133 m in the summer month and 185 m in the winter month.
-
-
-In this Figure the distribution of the prediction is divided into the two glaciers. The
-front delineation error for the calving front of Mapple is, on average 127 ± 107 m while
-the mean error of COL is 1184 ± 1761 m. This is caused by a group of predictions with an
-error > 2000 m. The median value is 275 m for COL and 97 m for Mapple.
-
-
-In this Figure the front delineation error is grouped by satellite. The predictions of
-samples created by ERS, ENVISAT, PALSAR, and TDX have an similar average error
-between 150 m and 300 m. The prediction of samples created by TSX are more precise
-with 68 ± 59 m and the error on samples created by S1 are less precise with 2060 ± 2139 m.
-Most test samples are captured by TSX, TDX and S1. TSX and TDX have a resolution of
-6 − 7 m, while S1 has a resolution of 20 m.
-
-
-Calving front prediction of COL on 3.9.2011, 22.6.2014, and 11.2.2016 taken by
-TDX with 7 m2/pixel resolution; label (blue), prediction (yellow), overlap (magenta).
-
-
-(a) Glacier images taken by ERS (20 m2/pixel)
-on 5.2.2007, 20.3.2010, and 8.9.2017.
-(b) Glacier images taken by TSX (7 m2/pixel)
-on 4.11.2008, 2.11.2009, and 2.8.2013.
-Figure 6.9: Calving front prediction of Mapple Glacier; label (blue), prediction (yellow),
-overlap (magenta), bounding box (cyan).
-
-All plots are generated by the files in the directory create_plots_new or by hand.
-
-# vvv Readme of the original git project vvv
-
-**[2020_10_21] Update:** We now have documentation for [common questions](documentation/common_questions.md) and
-[common issues](documentation/common_problems_and_solutions.md). We now also provide [reference epoch times for
-several datasets and tips on how to identify bottlenecks](documentation/expected_epoch_times.md).
-
-Please read these documents before opening a new issue!
-
-# nnU-Net
-
-In 3D biomedical image segmentation, dataset properties like imaging modality, image sizes, voxel spacings, class
-ratios etc vary drastically.
-For example, images in
-the [Liver and Liver Tumor Segmentation Challenge dataset](https://competitions.codalab.org/competitions/17094)
-are computed tomography (CT) scans, about 512x512x512 voxels large, have isotropic voxel spacings and their
-intensity values are quantitative (Hounsfield Units).
-The [Automated Cardiac Diagnosis Challenge dataset](https://acdc.creatis.insa-lyon.fr/) on the other hand shows cardiac
-structures in cine MRI with a typical image shape of 10x320x320 voxels, highly anisotropic voxel spacings and
-qualitative intensity values. In addition, the ACDC dataset suffers from slice misalignments and a heterogeneity of
-out-of-plane spacings which can cause severe interpolation artifacts if not handled properly.
-
-In current research practice, segmentation pipelines are designed manually and with one specific dataset in mind.
-Hereby, many pipeline settings depend directly or indirectly on the properties of the dataset
-and display a complex co-dependence: image size, for example, affects the patch size, which in
-turn affects the required receptive field of the network, a factor that itself influences several other
-hyperparameters in the pipeline. As a result, pipelines that were developed on one (type of) dataset are inherently
-incomaptible with other datasets in the domain.
-
-**nnU-Net is the first segmentation method that is designed to deal with the dataset diversity found in the domain. It
-condenses and automates the keys decisions for designing a successful segmentation pipeline for any given dataset.**
-
-nnU-Net makes the following contributions to the field:
-
-1. **Standardized baseline:** nnU-Net is the first standardized deep learning benchmark in biomedical segmentation.
- Without manual effort, researchers can compare their algorithms against nnU-Net on an arbitrary number of datasets
- to provide meaningful evidence for proposed improvements.
-2. **Out-of-the-box segmentation method:** nnU-Net is the first plug-and-play tool for state-of-the-art biomedical
- segmentation. Inexperienced users can use nnU-Net out of the box for their custom 3D segmentation problem without
- need for manual intervention.
-3. **Framework:** nnU-Net is a framework for fast and effective development of segmentation methods. Due to its modular
- structure, new architectures and methods can easily be integrated into nnU-Net. Researchers can then benefit from its
- generic nature to roll out and evaluate their modifications on an arbitrary number of datasets in a
- standardized environment.
-
-For more information about nnU-Net, please read the following paper:
-
- Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2020). nnU-Net: a self-configuring method
- for deep learning-based biomedical image segmentation. Nature Methods, 1-9.
-
-Please also cite this paper if you are using nnU-Net for your research!
-
-# Table of Contents
-
-- [Installation](#installation)
-- [Usage](#usage)
- * [How to run nnU-Net on a new dataset](#how-to-run-nnu-net-on-a-new-dataset)
- + [Dataset conversion](#dataset-conversion)
- + [Experiment planning and preprocessing](#experiment-planning-and-preprocessing)
- + [Model training](#model-training)
- - [2D U-Net](#2d-u-net)
- - [3D full resolution U-Net](#3d-full-resolution-u-net)
- - [3D U-Net cascade](#3d-u-net-cascade)
- * [3D low resolution U-Net](#3d-low-resolution-u-net)
- * [3D full resolution U-Net](#3d-full-resolution-u-net-1)
- - [Multi GPU training](#multi-gpu-training)
- + [Identifying the best U-Net configuration](#identifying-the-best-u-net-configuration)
- + [Run inference](#run-inference)
- * [How to run inference with pretrained models](#how-to-run-inference-with-pretrained-models)
- * [Examples](#examples)
-- [Extending/Changing nnU-Net](#extending-or-changing-nnu-net)
-- [Information on run time and potential performance bottlenecks.](#information-on-run-time-and-potential-performance-bottlenecks)
-- [Common questions and issues](#common-questions-and-issues)
-
-# Installation
-
-nnU-Net has been tested on Linux (Ubuntu 16, 18 and 20; centOS, RHEL). We do not provide support for other operating
-systems.
-
-nnU-Net requires a GPU! For inference, the GPU should have 4 GB of VRAM. For training nnU-Net models the GPU should have
-at
-least 10 GB (popular non-datacenter options are the RTX 2080ti, RTX 3080 or RTX 3090). Due to the use of automated mixed
-precision, fastest training times are achieved with the Volta architecture (Titan V, V100 GPUs) when installing pytorch
-the easy way. Since pytorch comes with cuDNN 7.6.5 and tensor core acceleration on Turing GPUs is not supported for 3D
-convolutions in this version, you will not get the best training speeds on Turing GPUs. You can remedy that by compiling
-pytorch from source
-(see [here](https://github.com/pytorch/pytorch#from-source)) using cuDNN 8.0.2 or newer. This will unlock Turing GPUs
-(RTX 2080ti, RTX 6000) for automated mixed precision training with 3D convolutions and make the training blistering
-fast as well. Note that future versions of pytorch may include cuDNN 8.0.2 or newer by default and
-compiling from source will not be necessary.
-We don't know the speed of Ampere GPUs with vanilla vs self-compiled pytorch yet - this section will be updated as
-soon as we know.
-
-For training, we recommend a strong CPU to go along with the GPU. At least 6 CPU cores (12 threads) are recommended. CPU
-requirements are mostly related to data augmentation and scale with the number of input channels. They are thus higher
-for datasets like BraTS which use 4 image modalities and lower for datasets like LiTS which only uses CT images.
-
-We very strongly recommend you install nnU-Net in a virtual environment.
-[Here is a quick how-to for Ubuntu.](https://linoxide.com/linux-how-to/setup-python-virtual-environment-ubuntu/)
-If you choose to compile pytorch from source, you will need to use conda instead of pip. In that case, please set the
-environment variable OMP_NUM_THREADS=1 (preferably in your bashrc using `export OMP_NUM_THREADS=1`). This is important!
-
-Python 2 is deprecated and not supported. Please make sure you are using Python 3.
-
-1) Install [PyTorch](https://pytorch.org/get-started/locally/). You need at least version 1.6
-2) Install nnU-Net depending on your use case:
- 1) For use as **standardized baseline**, **out-of-the-box segmentation algorithm** or for running **inference with
- pretrained models**:
-
- ```pip install nnunet```
-
- 2) For use as integrative **framework** (this will create a copy of the nnU-Net code on your computer so that you
- can modify it as needed):
- ```bash
- git clone https://github.com/MIC-DKFZ/nnUNet.git
- cd nnUNet
- pip install -e .
- ```
-3) nnU-Net needs to know where you intend to save raw data, preprocessed data and trained models. For this you need to
- set a few of environment variables. Please follow the instructions [here](documentation/setting_up_paths.md).
-4) (OPTIONAL) Install [hiddenlayer](https://github.com/waleedka/hiddenlayer). hiddenlayer enables nnU-net to generate
- plots of the network topologies it generates (see [Model training](#model-training)). To install hiddenlayer,
- run the following commands:
- ```bash
- pip install --upgrade git+https://github.com/FabianIsensee/hiddenlayer.git@more_plotted_details#egg=hiddenlayer
- ```
-
-Installing nnU-Net will add several new commands to your terminal. These commands are used to run the entire nnU-Net
-pipeline. You can execute them from any location on your system. All nnU-Net commands have the prefix `nnUNet_` for
-easy identification.
-
-Note that these commands simply execute python scripts. If you installed nnU-Net in a virtual environment, this
-environment must be activated when executing the commands.
-
-All nnU-Net commands have a `-h` option which gives information on how to use them.
-
-A typical installation of nnU-Net can be completed in less than 5 minutes. If pytorch needs to be compiled from source
-(which is what we currently recommend when using Turing GPUs), this can extend to more than an hour.
-
-# Usage
-
-To familiarize yourself with nnU-Net we recommend you have a look at the [Examples](#Examples) before you start with
-your own dataset.
-
-## How to run nnU-Net on a new dataset
-
-Given some dataset, nnU-Net fully automatically configures an entire segmentation pipeline that matches its properties.
-nnU-Net covers the entire pipeline, from preprocessing to model configuration, model training, postprocessing
-all the way to ensembling. After running nnU-Net, the trained model(s) can be applied to the test cases for inference.
-
-### Dataset conversion
-
-nnU-Net expects datasets in a structured format. This format closely (but not entirely) follows the data structure of
-the [Medical Segmentation Decthlon](http://medicaldecathlon.com/). Please read
-[this](documentation/dataset_conversion.md) for information on how to convert datasets to be compatible with nnU-Net.
-
-### Experiment planning and preprocessing
-
-As a first step, nnU-Net extracts a dataset fingerprint (a set of dataset-specific properties such as
-image sizes, voxel spacings, intensity information etc). This information is used to create three U-Net configurations:
-a 2D U-Net, a 3D U-Net that operated on full resolution images as well as a 3D U-Net cascade where the first U-Net
-creates a coarse segmentation map in downsampled images which is then refined by the second U-Net.
-
-Provided that the requested raw dataset is located in the correct
-folder (`nnUNet_raw_data_base/nnUNet_raw_data/TaskXXX_MYTASK`,
-also see [here](documentation/dataset_conversion.md)), you can run this step with the following command:
-
-```bash
-nnUNet_plan_and_preprocess -t XXX --verify_dataset_integrity
-```
-
-`XXX` is the integer identifier associated with your Task name `TaskXXX_MYTASK`. You can pass several task IDs at once.
-
-Running `nnUNet_plan_and_preprocess` will populate your folder with preprocessed data. You will find the output in
-nnUNet_preprocessed/TaskXXX_MYTASK. `nnUNet_plan_and_preprocess` creates subfolders with preprocessed data for the 2D
-U-Net as well as all applicable 3D U-Nets. It will also create 'plans' files (with the ending.pkl) for the 2D and
-3D configurations. These files contain the generated segmentation pipeline configuration and will be read by the
-nnUNetTrainer (see below). Note that the preprocessed data folder only contains the training cases.
-The test images are not preprocessed (they are not looked at at all!). Their preprocessing happens on the fly during
-inference.
-
-`--verify_dataset_integrity` should be run at least for the first time the command is run on a given dataset. This will
-execute some
-checks on the dataset to ensure that it is compatible with nnU-Net. If this check has passed once, it can be
-omitted in future runs. If you adhere to the dataset conversion guide (see above) then this should pass without issues :
--)
-
-Note that `nnUNet_plan_and_preprocess` accepts several additional input arguments. Running `-h` will list all of them
-along with a description. If you run out of RAM during preprocessing, you may want to adapt the number of processes
-used with the `-tl` and `-tf` options.
-
-After `nnUNet_plan_and_preprocess` is completed, the U-Net configurations have been created and a preprocessed copy
-of the data will be located at nnUNet_preprocessed/TaskXXX_MYTASK.
-
-Extraction of the dataset fingerprint can take from a couple of seconds to several minutes depending on the properties
-of the segmentation task. Pipeline configuration given the extracted finger print is nearly instantaneous (couple
-of seconds). Preprocessing depends on image size and how powerful the CPU is. It can take between seconds and several
-tens of minutes.
-
-### Model training
-
-nnU-Net trains all U-Net configurations in a 5-fold cross-validation. This enables nnU-Net to determine the
-postprocessing and ensembling (see next step) on the training dataset. Per default, all U-Net configurations need to
-be run on a given dataset. There are, however situations in which only some configurations (and maybe even without
-running the cross-validation) are desired. See [FAQ](documentation/common_questions.md) for more information.
-
-Note that not all U-Net configurations are created for all datasets. In datasets with small image sizes, the U-Net
-cascade is omitted because the patch size of the full resolution U-Net already covers a large part of the input images.
-
-Training models is done with the `nnUNet_train` command. The general structure of the command is:
-
-```bash
-nnUNet_train CONFIGURATION TRAINER_CLASS_NAME TASK_NAME_OR_ID FOLD --npz (additional options)
-```
-
-CONFIGURATION is a string that identifies the requested U-Net configuration. TRAINER_CLASS_NAME is the name of the
-model trainer. If you implement custom trainers (nnU-Net as a framework) you can specify your custom trainer here.
-TASK_NAME_OR_ID specifies what dataset should be trained on and FOLD specifies which fold of the 5-fold-cross-validaton
-is trained.
-
-nnU-Net stores a checkpoint every 50 epochs. If you need to continue a previous training, just add a `-c` to the
-training command.
-
-IMPORTANT: `--npz` makes the models save the softmax outputs during the final validation. It should only be used for
-trainings
-where you plan to run `nnUNet_find_best_configuration` afterwards
-(this is nnU-Nets automated selection of the best performing (ensemble of) configuration(s), see below). If you are
-developing new
-trainer classes you may not need the softmax predictions and should therefore omit the `--npz` flag. Exported softmax
-predictions are very large and therefore can take up a lot of disk space.
-If you ran initially without the `--npz` flag but now require the softmax predictions, simply run
-
-```bash
-nnUNet_train CONFIGURATION TRAINER_CLASS_NAME TASK_NAME_OR_ID FOLD -val --npz
-```
-
-to generate them. This will only rerun the validation, not the training.
-
-See `nnUNet_train -h` for additional options.
-
-#### 2D U-Net
-
-For FOLD in [0, 1, 2, 3, 4], run:
-
-```bash
-nnUNet_train 2d nnUNetTrainerV2 TaskXXX_MYTASK FOLD --npz
-```
-
-#### 3D full resolution U-Net
-
-For FOLD in [0, 1, 2, 3, 4], run:
-
-```bash
-nnUNet_train 3d_fullres nnUNetTrainerV2 TaskXXX_MYTASK FOLD --npz
-```
-
-#### 3D U-Net cascade
-
-##### 3D low resolution U-Net
-
-For FOLD in [0, 1, 2, 3, 4], run:
-
-```bash
-nnUNet_train 3d_lowres nnUNetTrainerV2 TaskXXX_MYTASK FOLD --npz
-```
-
-##### 3D full resolution U-Net
-
-For FOLD in [0, 1, 2, 3, 4], run:
-
-```bash
-nnUNet_train 3d_cascade_fullres nnUNetTrainerV2CascadeFullRes TaskXXX_MYTASK FOLD --npz
-```
-
-Note that the 3D full resolution U-Net of the cascade requires the five folds of the low resolution U-Net to be
-completed beforehand!
-
-The trained models will we written to the RESULTS_FOLDER/nnUNet folder. Each training obtains an automatically generated
-output folder name:
-
-nnUNet_preprocessed/CONFIGURATION/TaskXXX_MYTASKNAME/TRAINER_CLASS_NAME__PLANS_FILE_NAME/FOLD
-
-For Task002_Heart (from the MSD), for example, this looks like this:
-
- RESULTS_FOLDER/nnUNet/
- ├── 2d
- │ └── Task02_Heart
- │ └── nnUNetTrainerV2__nnUNetPlansv2.1
- │ ├── fold_0
- │ ├── fold_1
- │ ├── fold_2
- │ ├── fold_3
- │ └── fold_4
- ├── 3d_cascade_fullres
- ├── 3d_fullres
- │ └── Task02_Heart
- │ └── nnUNetTrainerV2__nnUNetPlansv2.1
- │ ├── fold_0
- │ │ ├── debug.json
- │ │ ├── model_best.model
- │ │ ├── model_best.model.pkl
- │ │ ├── model_final_checkpoint.model
- │ │ ├── model_final_checkpoint.model.pkl
- │ │ ├── network_architecture.pdf
- │ │ ├── progress.png
- │ │ └── validation_raw
- │ │ ├── la_007.nii.gz
- │ │ ├── la_007.pkl
- │ │ ├── la_016.nii.gz
- │ │ ├── la_016.pkl
- │ │ ├── la_021.nii.gz
- │ │ ├── la_021.pkl
- │ │ ├── la_024.nii.gz
- │ │ ├── la_024.pkl
- │ │ ├── summary.json
- │ │ └── validation_args.json
- │ ├── fold_1
- │ ├── fold_2
- │ ├── fold_3
- │ └── fold_4
- └── 3d_lowres
-
-Note that 3d_lowres and 3d_cascade_fullres are not populated because this dataset did not trigger the cascade. In each
-model training output folder (each of the fold_x folder, 10 in total here), the following files will be created (only
-shown for one folder above for brevity):
-
-- debug.json: Contains a summary of blueprint and inferred parameters used for training this model. Not easy to read,
- but very useful for debugging ;-)
-- model_best.model / model_best.model.pkl: checkpoint files of the best model identified during training. Not used right
- now.
-- model_final_checkpoint.model / model_final_checkpoint.model.pkl: checkpoint files of the final model (after training
- has ended). This is what is used for both validation and inference.
-- network_architecture.pdf (only if hiddenlayer is installed!): a pdf document with a figure of the network architecture
- in it.
-- progress.png: A plot of the training (blue) and validation (red) loss during training. Also shows an approximation of
- the evlauation metric (green). This approximation is the average Dice score of the foreground classes. It should,
- however, only to be taken with a grain of salt because it is computed on randomly drawn patches from the validation
- data at the end of each epoch, and the aggregation of TP, FP and FN for the Dice computation treats the patches as if
- they all originate from the same volume ('global Dice'; we do not compute a Dice for each validation case and then
- average over all cases but pretend that there is only one validation case from which we sample patches). The reason
- for
- this is that the 'global Dice' is easy to compute during training and is still quite useful to evaluate whether a
- model
- is training at all or not. A proper validation is run at the end of the training.
-- validation_raw: in this folder are the predicted validation cases after the training has finished. The summary.json
- contains the validation metrics (a mean over all cases is provided at the end of the file).
-
-During training it is often useful to watch the progress. We therefore recommend that you have a look at the generated
-progress.png when running the first training. It will be updated after each epoch.
-
-Training times largely depend on the GPU. The smallest GPU we recommend for training is the Nvidia RTX 2080ti. With
-this GPU (and pytorch compiled with cuDNN 8.0.2), all network trainings take less than 2 days.
-
-#### Multi GPU training
-
-**Multi GPU training is experimental and NOT RECOMMENDED!**
-
-nnU-Net supports two different multi-GPU implementation: DataParallel (DP) and Distributed Data Parallel (DDP)
-(but currently only on one host!). DDP is faster than DP and should be preferred if possible. However, if you did not
-install nnunet as a framework (meaning you used the `pip install nnunet` variant), DDP is not available. It requires a
-different way of calling the correct python script (see below) which we cannot support from our terminal commands.
-
-Distributed training currently only works for the basic trainers (2D, 3D full resolution and 3D low resolution) and not
-for the second, high resolution U-Net of the cascade. The reason for this is that distributed training requires some
-changes to the network and loss function, requiring a new nnUNet trainer class. This is, as of now, simply not
-implemented for the cascade, but may be added in the future.
-
-To run distributed training (DP), use the following command:
-
-```bash
-CUDA_VISIBLE_DEVICES=0,1,2... nnUNet_train_DP CONFIGURATION nnUNetTrainerV2_DP TASK_NAME_OR_ID FOLD -gpus GPUS --dbs
-```
-
-Note that nnUNetTrainerV2 was replaced with nnUNetTrainerV2_DP. Just like before, CONFIGURATION can be 2d, 3d_lowres or
-3d_fullres. TASK_NAME_OR_ID refers to the task you would like to train and FOLD is the fold of the cross-validation.
-GPUS (integer value) specifies the number of GPUs you wish to train on. To specify which GPUs you want to use, please
-make use of the
-CUDA_VISIBLE_DEVICES envorinment variable to specify the GPU ids (specify as many as you configure with -gpus GPUS).
---dbs, if set, will distribute the batch size across GPUs. So if nnUNet configures a batch size of 2 and you run on 2
-GPUs
-, each GPU will run with a batch size of 1. If you omit --dbs, each GPU will run with the full batch size (2 for each
-GPU
-in this example for a total of batch size 4).
-
-To run the DDP training you must have nnU-Net installed as a framework. Your current working directory must be the
-nnunet folder (the one that has the dataset_conversion, evaluation, experiment_planning, ... subfolders!). You can then
-run
-the DDP training with the following command:
-
-```bash
-CUDA_VISIBLE_DEVICES=0,1,2... python -m torch.distributed.launch --master_port=XXXX --nproc_per_node=Y run/run_training_DDP.py CONFIGURATION nnUNetTrainerV2_DDP TASK_NAME_OR_ID FOLD --dbs
-```
-
-XXXX must be an open port for process-process communication (something like 4321 will do on most systems). Y is the
-number of GPUs you wish to use. Remember that we do not (yet) support distributed training across compute nodes. This
-all happens on the same system. Again, you can use CUDA_VISIBLE_DEVICES=0,1,2 to control what GPUs are used.
-If you run more than one DDP training on the same system (say you have 4 GPUs and you run two training with 2 GPUs each)
-you need to specify a different --master_port for each training!
-
-*IMPORTANT!*
-Multi-GPU training results in models that cannot be used for inference easily (as said above, all of this is
-experimental ;-) ).
-After finishing the training of all folds, run `nnUNet_change_trainer_class` on the folder where the trained model is
-(see `nnUNet_change_trainer_class -h` for instructions). After that you can run inference.
-
-### Identifying the best U-Net configuration
-
-Once all models are trained, use the following
-command to automatically determine what U-Net configuration(s) to use for test set prediction:
-
-```bash
-nnUNet_find_best_configuration -m 2d 3d_fullres 3d_lowres 3d_cascade_fullres -t XXX --strict
-```
-
-(all 5 folds need to be completed for all specified configurations!)
-
-On datasets for which the cascade was not configured, use `-m 2d 3d_fullres` instead. If you wish to only explore some
-subset of the configurations, you can specify that with the `-m` command. We recommend setting the
-`--strict` (crash if one of the requested configurations is
-missing) flag. Additional options are available (use `-h` for help).
-
-### Run inference
-
-Remember that the data located in the input folder must adhere to the format specified
-[here](documentation/data_format_inference.md).
-
-`nnUNet_find_best_configuration` will print a string to the terminal with the inference commands you need to use.
-The easiest way to run inference is to simply use these commands.
-
-If you wish to manually specify the configuration(s) used for inference, use the following commands:
-
-For each of the desired configurations, run:
-
-```
-nnUNet_predict -i INPUT_FOLDER -o OUTPUT_FOLDER -t TASK_NAME_OR_ID -m CONFIGURATION --save_npz
-```
-
-Only specify `--save_npz` if you intend to use ensembling. `--save_npz` will make the command save the softmax
-probabilities alongside of the predicted segmentation masks requiring a lot of disk space.
-
-Please select a separate `OUTPUT_FOLDER` for each configuration!
-
-If you wish to run ensembling, you can ensemble the predictions from several configurations with the following command:
-
-```bash
-nnUNet_ensemble -f FOLDER1 FOLDER2 ... -o OUTPUT_FOLDER -pp POSTPROCESSING_FILE
-```
-
-You can specify an arbitrary number of folders, but remember that each folder needs to contain npz files that were
-generated by `nnUNet_predict`. For ensembling you can also specify a file that tells the command how to postprocess.
-These files are created when running `nnUNet_find_best_configuration` and are located in the respective trained model
-directory (
-RESULTS_FOLDER/nnUNet/CONFIGURATION/TaskXXX_MYTASK/TRAINER_CLASS_NAME__PLANS_FILE_IDENTIFIER/postprocessing.json or
-RESULTS_FOLDER/nnUNet/ensembles/TaskXXX_MYTASK/ensemble_X__Y__Z--X__Y__Z/postprocessing.json). You can also choose to
-not provide a file (simply omit -pp) and nnU-Net will not run postprocessing.
-
-Note that per default, inference will be done with all available folds. We very strongly recommend you use all 5 folds.
-Thus, all 5 folds must have been trained prior to running inference. The list of available folds nnU-Net found will be
-printed at the start of the inference.
-
-## How to run inference with pretrained models
-
-Trained models for all challenges we participated in are publicly available. They can be downloaded and installed
-directly with nnU-Net. Note that downloading a pretrained model will overwrite other models that were trained with
-exactly the same configuration (2d, 3d_fullres, ...), trainer (nnUNetTrainerV2) and plans.
-
-To obtain a list of available models, as well as a short description, run
-
-```bash
-nnUNet_print_available_pretrained_models
-```
-
-You can then download models by specifying their task name. For the Liver and Liver Tumor Segmentation Challenge,
-for example, this would be:
-
-```bash
-nnUNet_download_pretrained_model Task029_LiTS
-```
-
-After downloading is complete, you can use this model to run [inference](#run-inference). Keep in mind that each of
-these models has specific data requirements (Task029_LiTS runs on abdominal CT scans, others require several image
-modalities as input in a specific order).
-
-When using the pretrained models you must adhere to the license of the dataset they are trained on! If you run
-`nnUNet_download_pretrained_model` you will find a link where you can find the license for each dataset.
-
-## Examples
-
-To get you started we compiled two simple to follow examples:
-
-- run a training with the 3d full resolution U-Net on the Hippocampus dataset.
- See [here](documentation/training_example_Hippocampus.md).
-- run inference with nnU-Net's pretrained models on the Prostate dataset.
- See [here](documentation/inference_example_Prostate.md).
-
-Usability not good enough? Let us know!
-
-# Extending or Changing nnU-Net
-
-Please refer to [this](documentation/extending_nnunet.md) guide.
-
-# Information on run time and potential performance bottlenecks.
-
-We have compiled a list of expected epoch times on standardized datasets across many different GPUs. You can use them
-to verify that your system is performing as expected. There are also tips on how to identify bottlenecks and what
-to do about them.
-
-Click [here](documentation/expected_epoch_times.md).
-
-# Common questions and issues
-
-We have collected solutions to common [questions](documentation/common_questions.md) and
-[problems](documentation/common_problems_and_solutions.md). Please consult these documents before you open a new issue.
-
---------------------
-
-
-
-nnU-Net is developed and maintained by the Applied Computer Vision Lab (ACVL) of
-the [Helmholtz Imaging Platform](http://helmholtz-imaging.de).
diff --git a/spaces/huaiji3y/bingo-Public/src/components/theme-toggle.tsx b/spaces/huaiji3y/bingo-Public/src/components/theme-toggle.tsx
deleted file mode 100644
index 67d3f1a2c163ccbeb52c40a7e42f107190237154..0000000000000000000000000000000000000000
--- a/spaces/huaiji3y/bingo-Public/src/components/theme-toggle.tsx
+++ /dev/null
@@ -1,31 +0,0 @@
-'use client'
-
-import * as React from 'react'
-import { useTheme } from 'next-themes'
-
-import { Button } from '@/components/ui/button'
-import { IconMoon, IconSun } from '@/components/ui/icons'
-
-export function ThemeToggle() {
- const { setTheme, theme } = useTheme()
- const [_, startTransition] = React.useTransition()
-
- return (
- {
- startTransition(() => {
- setTheme(theme === 'light' ? 'dark' : 'light')
- })
- }}
- >
- {!theme ? null : theme === 'dark' ? (
-
- ) : (
-
- )}
- Toggle theme
-
- )
-}
diff --git a/spaces/hugggof/vampnet/scripts/exp/train.py b/spaces/hugggof/vampnet/scripts/exp/train.py
deleted file mode 100644
index d276bc29421a796346c027b711481921b3e51d6b..0000000000000000000000000000000000000000
--- a/spaces/hugggof/vampnet/scripts/exp/train.py
+++ /dev/null
@@ -1,679 +0,0 @@
-import os
-import sys
-import warnings
-from pathlib import Path
-from typing import Optional
-from dataclasses import dataclass
-
-import argbind
-import audiotools as at
-import torch
-import torch.nn as nn
-from audiotools import AudioSignal
-from audiotools.data import transforms
-from einops import rearrange
-from rich import pretty
-from rich.traceback import install
-from torch.utils.tensorboard import SummaryWriter
-
-import vampnet
-from vampnet.modules.transformer import VampNet
-from vampnet.util import codebook_unflatten, codebook_flatten
-from vampnet import mask as pmask
-# from dac.model.dac import DAC
-from lac.model.lac import LAC as DAC
-
-from audiotools.ml.decorators import (
- timer, Tracker, when
-)
-
-import loralib as lora
-
-import torch._dynamo
-torch._dynamo.config.verbose=True
-
-
-# Enable cudnn autotuner to speed up training
-# (can be altered by the funcs.seed function)
-torch.backends.cudnn.benchmark = bool(int(os.getenv("CUDNN_BENCHMARK", 1)))
-# Uncomment to trade memory for speed.
-
-# Install to make things look nice
-warnings.filterwarnings("ignore", category=UserWarning)
-pretty.install()
-install()
-
-# optim
-Accelerator = argbind.bind(at.ml.Accelerator, without_prefix=True)
-CrossEntropyLoss = argbind.bind(nn.CrossEntropyLoss)
-AdamW = argbind.bind(torch.optim.AdamW)
-NoamScheduler = argbind.bind(vampnet.scheduler.NoamScheduler)
-
-# transforms
-filter_fn = lambda fn: hasattr(fn, "transform") and fn.__qualname__ not in [
- "BaseTransform",
- "Compose",
- "Choose",
-]
-tfm = argbind.bind_module(transforms, "train", "val", filter_fn=filter_fn)
-
-# model
-VampNet = argbind.bind(VampNet)
-
-
-# data
-AudioLoader = argbind.bind(at.datasets.AudioLoader)
-AudioDataset = argbind.bind(at.datasets.AudioDataset, "train", "val")
-
-IGNORE_INDEX = -100
-
-
-@argbind.bind("train", "val", without_prefix=True)
-def build_transform():
- transform = transforms.Compose(
- tfm.VolumeNorm(("const", -24)),
- # tfm.PitchShift(),
- tfm.RescaleAudio(),
- )
- return transform
-
-
-@torch.no_grad()
-def apply_transform(transform_fn, batch):
- sig: AudioSignal = batch["signal"]
- kwargs = batch["transform_args"]
-
- sig: AudioSignal = transform_fn(sig.clone(), **kwargs)
- return sig
-
-
-def build_datasets(args, sample_rate: int):
- with argbind.scope(args, "train"):
- train_data = AudioDataset(
- AudioLoader(), sample_rate, transform=build_transform()
- )
- with argbind.scope(args, "val"):
- val_data = AudioDataset(AudioLoader(), sample_rate, transform=build_transform())
- return train_data, val_data
-
-
-def rand_float(shape, low, high, rng):
- return rng.draw(shape)[:, 0] * (high - low) + low
-
-
-def flip_coin(shape, p, rng):
- return rng.draw(shape)[:, 0] < p
-
-
-def num_params_hook(o, p):
- return o + f" {p/1e6:<.3f}M params."
-
-
-def add_num_params_repr_hook(model):
- import numpy as np
- from functools import partial
-
- for n, m in model.named_modules():
- o = m.extra_repr()
- p = sum([np.prod(p.size()) for p in m.parameters()])
-
- setattr(m, "extra_repr", partial(num_params_hook, o=o, p=p))
-
-
-def accuracy(
- preds: torch.Tensor,
- target: torch.Tensor,
- top_k: int = 1,
- ignore_index: Optional[int] = None,
-) -> torch.Tensor:
- # Flatten the predictions and targets to be of shape (batch_size * sequence_length, n_class)
- preds = rearrange(preds, "b p s -> (b s) p")
- target = rearrange(target, "b s -> (b s)")
-
- # return torchmetrics.functional.accuracy(preds, target, task='multiclass', top_k=topk, num_classes=preds.shape[-1], ignore_index=ignore_index)
- if ignore_index is not None:
- # Create a mask for the ignored index
- mask = target != ignore_index
- # Apply the mask to the target and predictions
- preds = preds[mask]
- target = target[mask]
-
- # Get the top-k predicted classes and their indices
- _, pred_indices = torch.topk(preds, k=top_k, dim=-1)
-
- # Determine if the true target is in the top-k predicted classes
- correct = torch.sum(torch.eq(pred_indices, target.unsqueeze(1)), dim=1)
-
- # Calculate the accuracy
- accuracy = torch.mean(correct.float())
-
- return accuracy
-
-def _metrics(z_hat, r, target, flat_mask, output):
- for r_range in [(0, 0.5), (0.5, 1.0)]:
- unmasked_target = target.masked_fill(flat_mask.bool(), IGNORE_INDEX)
- masked_target = target.masked_fill(~flat_mask.bool(), IGNORE_INDEX)
-
- assert target.shape[0] == r.shape[0]
- # grab the indices of the r values that are in the range
- r_idx = (r >= r_range[0]) & (r < r_range[1])
-
- # grab the target and z_hat values that are in the range
- r_unmasked_target = unmasked_target[r_idx]
- r_masked_target = masked_target[r_idx]
- r_z_hat = z_hat[r_idx]
-
- for topk in (1, 25):
- s, e = r_range
- tag = f"accuracy-{s}-{e}/top{topk}"
-
- output[f"{tag}/unmasked"] = accuracy(
- preds=r_z_hat,
- target=r_unmasked_target,
- ignore_index=IGNORE_INDEX,
- top_k=topk,
- )
- output[f"{tag}/masked"] = accuracy(
- preds=r_z_hat,
- target=r_masked_target,
- ignore_index=IGNORE_INDEX,
- top_k=topk,
- )
-
-
-@dataclass
-class State:
- model: VampNet
- codec: DAC
-
- optimizer: AdamW
- scheduler: NoamScheduler
- criterion: CrossEntropyLoss
- grad_clip_val: float
-
- rng: torch.quasirandom.SobolEngine
-
- train_data: AudioDataset
- val_data: AudioDataset
-
- tracker: Tracker
-
-
-@timer()
-def train_loop(state: State, batch: dict, accel: Accelerator):
- state.model.train()
- batch = at.util.prepare_batch(batch, accel.device)
- signal = apply_transform(state.train_data.transform, batch)
-
- output = {}
- vn = accel.unwrap(state.model)
- with accel.autocast():
- with torch.inference_mode():
- state.codec.to(accel.device)
- z = state.codec.encode(signal.samples, signal.sample_rate)["codes"]
- z = z[:, : vn.n_codebooks, :]
-
- n_batch = z.shape[0]
- r = state.rng.draw(n_batch)[:, 0].to(accel.device)
-
- mask = pmask.random(z, r)
- mask = pmask.codebook_unmask(mask, vn.n_conditioning_codebooks)
- z_mask, mask = pmask.apply_mask(z, mask, vn.mask_token)
-
- z_mask_latent = vn.embedding.from_codes(z_mask, state.codec)
-
- dtype = torch.bfloat16 if accel.amp else None
- with accel.autocast(dtype=dtype):
- z_hat = state.model(z_mask_latent)
-
- target = codebook_flatten(
- z[:, vn.n_conditioning_codebooks :, :],
- )
-
- flat_mask = codebook_flatten(
- mask[:, vn.n_conditioning_codebooks :, :],
- )
-
- # replace target with ignore index for masked tokens
- t_masked = target.masked_fill(~flat_mask.bool(), IGNORE_INDEX)
- output["loss"] = state.criterion(z_hat, t_masked)
-
- _metrics(
- r=r,
- z_hat=z_hat,
- target=target,
- flat_mask=flat_mask,
- output=output,
- )
-
-
- accel.backward(output["loss"])
-
- output["other/learning_rate"] = state.optimizer.param_groups[0]["lr"]
- output["other/batch_size"] = z.shape[0]
-
-
- accel.scaler.unscale_(state.optimizer)
- output["other/grad_norm"] = torch.nn.utils.clip_grad_norm_(
- state.model.parameters(), state.grad_clip_val
- )
-
- accel.step(state.optimizer)
- state.optimizer.zero_grad()
-
- state.scheduler.step()
- accel.update()
-
-
- return {k: v for k, v in sorted(output.items())}
-
-
-@timer()
-@torch.no_grad()
-def val_loop(state: State, batch: dict, accel: Accelerator):
- state.model.eval()
- state.codec.eval()
- batch = at.util.prepare_batch(batch, accel.device)
- signal = apply_transform(state.val_data.transform, batch)
-
- vn = accel.unwrap(state.model)
- z = state.codec.encode(signal.samples, signal.sample_rate)["codes"]
- z = z[:, : vn.n_codebooks, :]
-
- n_batch = z.shape[0]
- r = state.rng.draw(n_batch)[:, 0].to(accel.device)
-
- mask = pmask.random(z, r)
- mask = pmask.codebook_unmask(mask, vn.n_conditioning_codebooks)
- z_mask, mask = pmask.apply_mask(z, mask, vn.mask_token)
-
- z_mask_latent = vn.embedding.from_codes(z_mask, state.codec)
-
- z_hat = state.model(z_mask_latent)
-
- target = codebook_flatten(
- z[:, vn.n_conditioning_codebooks :, :],
- )
-
- flat_mask = codebook_flatten(
- mask[:, vn.n_conditioning_codebooks :, :]
- )
-
- output = {}
- # replace target with ignore index for masked tokens
- t_masked = target.masked_fill(~flat_mask.bool(), IGNORE_INDEX)
- output["loss"] = state.criterion(z_hat, t_masked)
-
- _metrics(
- r=r,
- z_hat=z_hat,
- target=target,
- flat_mask=flat_mask,
- output=output,
- )
-
- return output
-
-
-def validate(state, val_dataloader, accel):
- for batch in val_dataloader:
- output = val_loop(state, batch, accel)
- # Consolidate state dicts if using ZeroRedundancyOptimizer
- if hasattr(state.optimizer, "consolidate_state_dict"):
- state.optimizer.consolidate_state_dict()
- return output
-
-
-def checkpoint(state, save_iters, save_path, fine_tune):
- if accel.local_rank != 0:
- state.tracker.print(f"ERROR:Skipping checkpoint on rank {accel.local_rank}")
- return
-
- metadata = {"logs": dict(state.tracker.history)}
-
- tags = ["latest"]
- state.tracker.print(f"Saving to {str(Path('.').absolute())}")
-
- if state.tracker.step in save_iters:
- tags.append(f"{state.tracker.step // 1000}k")
-
- if state.tracker.is_best("val", "loss"):
- state.tracker.print(f"Best model so far")
- tags.append("best")
-
- if fine_tune:
- for tag in tags:
- # save the lora model
- (Path(save_path) / tag).mkdir(parents=True, exist_ok=True)
- torch.save(
- lora.lora_state_dict(accel.unwrap(state.model)),
- f"{save_path}/{tag}/lora.pth"
- )
-
- for tag in tags:
- model_extra = {
- "optimizer.pth": state.optimizer.state_dict(),
- "scheduler.pth": state.scheduler.state_dict(),
- "tracker.pth": state.tracker.state_dict(),
- "metadata.pth": metadata,
- }
-
- accel.unwrap(state.model).metadata = metadata
- accel.unwrap(state.model).save_to_folder(
- f"{save_path}/{tag}", model_extra, package=False
- )
-
-
-def save_sampled(state, z, writer):
- num_samples = z.shape[0]
-
- for i in range(num_samples):
- sampled = accel.unwrap(state.model).generate(
- codec=state.codec,
- time_steps=z.shape[-1],
- start_tokens=z[i : i + 1],
- )
- sampled.cpu().write_audio_to_tb(
- f"sampled/{i}",
- writer,
- step=state.tracker.step,
- plot_fn=None,
- )
-
-
-def save_imputation(state, z, val_idx, writer):
- n_prefix = int(z.shape[-1] * 0.25)
- n_suffix = int(z.shape[-1] * 0.25)
-
- vn = accel.unwrap(state.model)
-
- mask = pmask.inpaint(z, n_prefix, n_suffix)
- mask = pmask.codebook_unmask(mask, vn.n_conditioning_codebooks)
- z_mask, mask = pmask.apply_mask(z, mask, vn.mask_token)
-
- imputed_noisy = vn.to_signal(z_mask, state.codec)
- imputed_true = vn.to_signal(z, state.codec)
-
- imputed = []
- for i in range(len(z)):
- imputed.append(
- vn.generate(
- codec=state.codec,
- time_steps=z.shape[-1],
- start_tokens=z[i][None, ...],
- mask=mask[i][None, ...],
- )
- )
- imputed = AudioSignal.batch(imputed)
-
- for i in range(len(val_idx)):
- imputed_noisy[i].cpu().write_audio_to_tb(
- f"inpainted_prompt/{i}",
- writer,
- step=state.tracker.step,
- plot_fn=None,
- )
- imputed[i].cpu().write_audio_to_tb(
- f"inpainted_middle/{i}",
- writer,
- step=state.tracker.step,
- plot_fn=None,
- )
- imputed_true[i].cpu().write_audio_to_tb(
- f"reconstructed/{i}",
- writer,
- step=state.tracker.step,
- plot_fn=None,
- )
-
-
-@torch.no_grad()
-def save_samples(state: State, val_idx: int, writer: SummaryWriter):
- state.model.eval()
- state.codec.eval()
- vn = accel.unwrap(state.model)
-
- batch = [state.val_data[i] for i in val_idx]
- batch = at.util.prepare_batch(state.val_data.collate(batch), accel.device)
-
- signal = apply_transform(state.val_data.transform, batch)
-
- z = state.codec.encode(signal.samples, signal.sample_rate)["codes"]
- z = z[:, : vn.n_codebooks, :]
-
- r = torch.linspace(0.1, 0.95, len(val_idx)).to(accel.device)
-
-
- mask = pmask.random(z, r)
- mask = pmask.codebook_unmask(mask, vn.n_conditioning_codebooks)
- z_mask, mask = pmask.apply_mask(z, mask, vn.mask_token)
-
- z_mask_latent = vn.embedding.from_codes(z_mask, state.codec)
-
- z_hat = state.model(z_mask_latent)
-
- z_pred = torch.softmax(z_hat, dim=1).argmax(dim=1)
- z_pred = codebook_unflatten(z_pred, n_c=vn.n_predict_codebooks)
- z_pred = torch.cat([z[:, : vn.n_conditioning_codebooks, :], z_pred], dim=1)
-
- generated = vn.to_signal(z_pred, state.codec)
- reconstructed = vn.to_signal(z, state.codec)
- masked = vn.to_signal(z_mask.squeeze(1), state.codec)
-
- for i in range(generated.batch_size):
- audio_dict = {
- "original": signal[i],
- "masked": masked[i],
- "generated": generated[i],
- "reconstructed": reconstructed[i],
- }
- for k, v in audio_dict.items():
- v.cpu().write_audio_to_tb(
- f"onestep/_{i}.r={r[i]:0.2f}/{k}",
- writer,
- step=state.tracker.step,
- plot_fn=None,
- )
-
- save_sampled(state=state, z=z, writer=writer)
- save_imputation(state=state, z=z, val_idx=val_idx, writer=writer)
-
-
-
-@argbind.bind(without_prefix=True)
-def load(
- args,
- accel: at.ml.Accelerator,
- tracker: Tracker,
- save_path: str,
- resume: bool = False,
- tag: str = "latest",
- fine_tune_checkpoint: Optional[str] = None,
- grad_clip_val: float = 5.0,
-) -> State:
- codec = DAC.load(args["codec_ckpt"], map_location="cpu")
- codec.eval()
-
- model, v_extra = None, {}
-
- if resume:
- kwargs = {
- "folder": f"{save_path}/{tag}",
- "map_location": "cpu",
- "package": False,
- }
- tracker.print(f"Loading checkpoint from {kwargs['folder']}")
- if (Path(kwargs["folder"]) / "vampnet").exists():
- model, v_extra = VampNet.load_from_folder(**kwargs)
- else:
- raise ValueError(
- f"Could not find a VampNet checkpoint in {kwargs['folder']}"
- )
-
-
- if args["fine_tune"]:
- assert fine_tune_checkpoint is not None, "Must provide a fine-tune checkpoint"
- model = torch.compile(
- VampNet.load(location=Path(fine_tune_checkpoint),
- map_location="cpu",
- )
- )
-
-
- model = torch.compile(VampNet()) if model is None else model
- model = accel.prepare_model(model)
-
- # assert accel.unwrap(model).n_codebooks == codec.quantizer.n_codebooks
- assert (
- accel.unwrap(model).vocab_size == codec.quantizer.quantizers[0].codebook_size
- )
-
- optimizer = AdamW(model.parameters(), use_zero=accel.use_ddp)
- scheduler = NoamScheduler(optimizer, d_model=accel.unwrap(model).embedding_dim)
- scheduler.step()
-
- if "optimizer.pth" in v_extra:
- optimizer.load_state_dict(v_extra["optimizer.pth"])
- scheduler.load_state_dict(v_extra["scheduler.pth"])
- if "tracker.pth" in v_extra:
- tracker.load_state_dict(v_extra["tracker.pth"])
-
- criterion = CrossEntropyLoss()
-
- sample_rate = codec.sample_rate
-
- # a better rng for sampling from our schedule
- rng = torch.quasirandom.SobolEngine(1, scramble=True, seed=args["seed"])
-
- # log a model summary w/ num params
- if accel.local_rank == 0:
- add_num_params_repr_hook(accel.unwrap(model))
- with open(f"{save_path}/model.txt", "w") as f:
- f.write(repr(accel.unwrap(model)))
-
- # load the datasets
- train_data, val_data = build_datasets(args, sample_rate)
-
- return State(
- tracker=tracker,
- model=model,
- codec=codec,
- optimizer=optimizer,
- scheduler=scheduler,
- criterion=criterion,
- rng=rng,
- train_data=train_data,
- val_data=val_data,
- grad_clip_val=grad_clip_val,
- )
-
-
-@argbind.bind(without_prefix=True)
-def train(
- args,
- accel: at.ml.Accelerator,
- seed: int = 0,
- codec_ckpt: str = None,
- save_path: str = "ckpt",
- num_iters: int = int(1000e6),
- save_iters: list = [10000, 50000, 100000, 300000, 500000,],
- sample_freq: int = 10000,
- val_freq: int = 1000,
- batch_size: int = 12,
- val_idx: list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
- num_workers: int = 10,
- fine_tune: bool = False,
-):
- assert codec_ckpt is not None, "codec_ckpt is required"
-
- seed = seed + accel.local_rank
- at.util.seed(seed)
- writer = None
-
- if accel.local_rank == 0:
- writer = SummaryWriter(log_dir=f"{save_path}/logs/")
- argbind.dump_args(args, f"{save_path}/args.yml")
-
- tracker = Tracker(
- writer=writer, log_file=f"{save_path}/log.txt", rank=accel.local_rank
- )
-
- # load the codec model
- state: State = load(
- args=args,
- accel=accel,
- tracker=tracker,
- save_path=save_path)
- print("initialized state.")
-
- train_dataloader = accel.prepare_dataloader(
- state.train_data,
- start_idx=state.tracker.step * batch_size,
- num_workers=num_workers,
- batch_size=batch_size,
- collate_fn=state.train_data.collate,
- )
- val_dataloader = accel.prepare_dataloader(
- state.val_data,
- start_idx=0,
- num_workers=num_workers,
- batch_size=batch_size,
- collate_fn=state.val_data.collate,
- persistent_workers=num_workers > 0,
- )
- print("initialized dataloader.")
-
-
-
- if fine_tune:
- lora.mark_only_lora_as_trainable(state.model)
- print("marked only lora as trainable.")
-
- # Wrap the functions so that they neatly track in TensorBoard + progress bars
- # and only run when specific conditions are met.
- global train_loop, val_loop, validate, save_samples, checkpoint
-
- train_loop = tracker.log("train", "value", history=False)(
- tracker.track("train", num_iters, completed=state.tracker.step)(train_loop)
- )
- val_loop = tracker.track("val", len(val_dataloader))(val_loop)
- validate = tracker.log("val", "mean")(validate)
-
- save_samples = when(lambda: accel.local_rank == 0)(save_samples)
- checkpoint = when(lambda: accel.local_rank == 0)(checkpoint)
-
- print("starting training loop.")
- with tracker.live:
- for tracker.step, batch in enumerate(train_dataloader, start=tracker.step):
- train_loop(state, batch, accel)
-
- last_iter = (
- tracker.step == num_iters - 1 if num_iters is not None else False
- )
-
- if tracker.step % sample_freq == 0 or last_iter:
- save_samples(state, val_idx, writer)
-
- if tracker.step % val_freq == 0 or last_iter:
- validate(state, val_dataloader, accel)
- checkpoint(
- state=state,
- save_iters=save_iters,
- save_path=save_path,
- fine_tune=fine_tune)
-
- # Reset validation progress bar, print summary since last validation.
- tracker.done("val", f"Iteration {tracker.step}")
-
- if last_iter:
- break
-
-
-if __name__ == "__main__":
- args = argbind.parse_args()
- args["args.debug"] = int(os.getenv("LOCAL_RANK", 0)) == 0
- with argbind.scope(args):
- with Accelerator() as accel:
- if accel.local_rank != 0:
- sys.tracebacklimit = 0
- train(args, accel)
diff --git a/spaces/hylee/apdrawing/APDrawingGAN2/options/__init__.py b/spaces/hylee/apdrawing/APDrawingGAN2/options/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/hysts/mmdetection/images/README.md b/spaces/hysts/mmdetection/images/README.md
deleted file mode 100644
index 7bf8a7c50604f01a06c4de6720a350e9666e487d..0000000000000000000000000000000000000000
--- a/spaces/hysts/mmdetection/images/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
-These images are freely-usable ones from https://www.pexels.com/.
-
-- https://www.pexels.com/photo/assorted-color-kittens-45170/
-- https://www.pexels.com/photo/white-wooden-kitchen-cabinet-1599791/
-- https://www.pexels.com/photo/assorted-books-on-book-shelves-1370295/
-- https://www.pexels.com/photo/pile-of-assorted-varieties-of-vegetables-2255935/
-- https://www.pexels.com/photo/sliced-fruits-on-tray-1132047/
-- https://www.pexels.com/photo/group-of-people-carrying-surfboards-1549196/
-- https://www.pexels.com/photo/aerial-photo-of-vehicles-in-the-city-1031698/
diff --git a/spaces/imperialwool/funapi/routes/osuApi/getPreview.py b/spaces/imperialwool/funapi/routes/osuApi/getPreview.py
deleted file mode 100644
index 5cd98370bb823dc8664d89407aceb073bfb32504..0000000000000000000000000000000000000000
--- a/spaces/imperialwool/funapi/routes/osuApi/getPreview.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from .. import helpers
-from .findSong import *
-from requests import get
-from random import randint as rand
-def getPreview(request):
- beatmapId = helpers.getFromRequest(request, "beatmapId")
- query = helpers.getFromRequest(request, "query")
-
- if beatmapId != None:
- tryment = get(f"https://b.ppy.sh/preview/{beatmapId}.mp3")
- if int(tryment.status_code) not in [404, 403]:
- return {"status": "pass", "details": {"code": int(tryment.status_code), "result": f"https://b.ppy.sh/preview/{beatmapId}.mp3"}}
- else:
- return {"status": "error", "details": {"code": int(tryment.status_code), "answer": tryment.text}}, 400
- elif query != None:
- fffff = findSong(request)
- if fffff['status'] == "error": return fffff, 400
- rBId = fffff['details']['result'][rand(0,len(fffff['details']['result'])-1)]['beatmapId']
- return {"status": "pass", "details": {"code": fffff['details']['code'], "name": "{rBId}.mp3", "result": f"https://b.ppy.sh/preview/{rBId}.mp3"}}
- return {"status": "error", "details": { "error_code": 133, "error_details": "No details for finding preview" }}, 400
\ No newline at end of file
diff --git a/spaces/inamXcontru/PoeticTTS/All Things Fair 1995 Movie Free Download The Last Film by Bo Widerberg the Master of Swedish Cinema.md b/spaces/inamXcontru/PoeticTTS/All Things Fair 1995 Movie Free Download The Last Film by Bo Widerberg the Master of Swedish Cinema.md
deleted file mode 100644
index 9ae9b2130edc61d7ef94e5bbc4aa816efad85c74..0000000000000000000000000000000000000000
--- a/spaces/inamXcontru/PoeticTTS/All Things Fair 1995 Movie Free Download The Last Film by Bo Widerberg the Master of Swedish Cinema.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
- Also known as: Lust och fägring stor, Love Lessons, Schön ist die Jugendzeit, Passioni proibite, Akogare utsukushiku moe Lang Swedish Subs English Malmö, Sweden during the Second World War. Stig is a 15 year old pupil on the verge of adulthood. Viola is 37 years old and his teacher. He is attracted by her beauty and maturity. She is drawn to him by his youth and innocence, a god-sent relief from her drunk and miserable husband. They start a passionate and forbidden relationship - but it has consequences they never could have expected. All things fair takes place in 1943. Stig meets his teacher, Viola. His life changes forever. They start an affair. She is 37, and he is 15, but that makes no difference to them. Viola's husband is always drunk and unfaithful, so she thinks her own affair is therefore justified. The girl next door, however, also shares love for Stig and is disappointed he never notices her. The affair grows dangerous, and Viola is risking everything. Then one day Stig realizes love does exist with someone his own age, and Viola lets him pay dearly for his realization...
-all things fair 1995 movie free download Download 🗹 https://gohhs.com/2uz3zO
-AZNude has a global mission to organize celebrity nudity from television and make it universally free, accessible, and usable. We have a free collection of nude celebs and movie sex scenes; which include naked celebs, lesbian, boobs, underwear and butt pics, hot scenes from movies and series, nude and real sex celeb videos.
-Watch online streaming dan Nonton Movie All Things Fair 1995 BluRay 480p & 720p mp4 mkv hindi dubbed, eng sub, sub indo, nonton online streaming film All Things Fair 1995 full hd movies free download Movie gratis via google drive, openload, uptobox, upfile, mediafire direct link download on index movies, world4ufree, bolly4u, downloadhub, tamilrockers, rarbg, torrent, yify, eztv, erosnow, mkvcage, pahe.in, ganool, filmywap, bioskopkeren, layarkaca21, indoxxi, dunia21, Lk21, 123movies, 300mbfilms, subscene, 300mb movies, Tv21, Televisi21, 9xmovie, khatrimaza, moviesbaba, hdmovie8, mkv movies king, mkvmoviesking, Mkvking, Mkvking.com .
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Diablo 2 1.13c D2me Maphack [HOT].md b/spaces/inplisQlawa/anything-midjourney-v4-1/Diablo 2 1.13c D2me Maphack [HOT].md
deleted file mode 100644
index b89b286e85b215d4969f083e08e96e20a2344d49..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Diablo 2 1.13c D2me Maphack [HOT].md
+++ /dev/null
@@ -1,17 +0,0 @@
-Diablo 2 1.13c d2me Maphack Download ⚹ https://urlin.us/2uEvMV
-
-I would like to announce a new release of the BH maphack for slashdiablo,... EDIT 2: This program will only work with client version 1.13C. If you ever connect ...... (Re).
-I couldn't find any solution.
-Maybe someone can help me.
-Thanks!
-Edit: I downloaded a demo and tested it.
-This does not work.
-I'm not sure what I'm doing wrong with this file.
-I checked on 1.13.
-It should work...
-I can't find the config file.
-I'm sure all settings are enabled and cannot be changed.
-I tried deleting the config file using C:\\Program Files\\SLASHDIAPHORB\\bhmag.exe to see what it does but nothing changed. 8a78ff9644
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Gadwin Screen Recorder 3.4 Keygen EXCLUSIVE.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Gadwin Screen Recorder 3.4 Keygen EXCLUSIVE.md
deleted file mode 100644
index 56c2e59f4b1808bfbe4cda70a3172ade305f23d8..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Gadwin Screen Recorder 3.4 Keygen EXCLUSIVE.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
-Mama & Me - Whole World Wide Works 2, 243 iMGSRC.RU [url= maya 2011 xforce keygen download free[/url] Blackout - I Got Chopped 2 2, 302 iMGSRC.RU [url= january 2013 geoloqi [url= sesspaphpag [url= endless woods 2k17 hack apk no survey free downloadgolkes[/url] santoori iranproud serial download[/url] ReFWocheNuththegodat [url= R5, P8148402 iMGSRC.RU [url= tony montana full movie torrent download[/url] Triad2.5.0.0[b].1.sc[b].apk Download[/url] Eric Demers - Ready Player One Torrent File Free [url= [url= 3eba9ee9
-Gadwin Screen Recorder 3.4 keygen Download »»» https://urlin.us/2uEyjC
-Guillermo del Toro: Life & Death in Calafia 107, 156 iMGSRC.RU [url= e09f8853 iMGSRC.RU [url= Geoloqi [url= Exuberant Birthday Invitation Free Download[/url] The Octarchy In The United States In The Constitutional Controversy Of 1820 [url= NatttureCemFrawlHem [url= Vj 2.1.2 For Mac[/url]Laurent and the artist 515, 417 iMGSRC.RU [url= ReFWocheNuththegodat [url= in Russia (9)(boy)(photo), L8-im-80 (225) iMGSRC.RU [url= flissinneple [url= [url= guillermo del toro iMGSRC.RU [url= tony montana full movie torrent download[/url] Die kleine Kirche All gesucht Datum (14) iMGSRC.RU [url= fruity loops eutsy 89 crack [url= sesspaphpag [url= NatttureCemFrawlHem [url= briletypeAbumunult [url= ReFWocheNuththegodat [url= maya 2011 xforce keygen download free[/url] Teens doing sex on internet (2009)[/url-de-teens-doing-sex-on-internet-torrent_fille-de-toi-electron-internet-v2-te [url= [url= What If It's Pc, S5[/url] Natural Essays]Bsm5,[i]Bsm5R [url= iMGSRC.RU [url=
-[url] Download[/url] imngstar[/p]Gadwin Screen Recorder 3.4 keygen
[url= download[/url] Its Open, Star Research, Destroy, P1043 (86).rar PC /[url= stepupler-vs-pantihs-generator-v1-51-serial-download-all-music-legendado-piratatuga, download legendado iMGSRC.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (Khalnayak Full Movie 720p Free Downl).md b/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (Khalnayak Full Movie 720p Free Downl).md
deleted file mode 100644
index 119bf52c89598a8c9a74cf0649bcb1cdb1803d32..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (Khalnayak Full Movie 720p Free Downl).md
+++ /dev/null
@@ -1,6 +0,0 @@
-HD Online Player (Khalnayak full movie 720p free downl) Download >>>>> https://urlin.us/2uEy1X
-
- 3cee63e6c2
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Artificial Academy 2 Save Editor Download.md b/spaces/inreVtussa/clothingai/Examples/Artificial Academy 2 Save Editor Download.md
deleted file mode 100644
index afbb5930b600366e3ead26a1e2e0540ddabc3942..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Artificial Academy 2 Save Editor Download.md
+++ /dev/null
@@ -1,6 +0,0 @@
-artificial academy 2 save editor download Download ===== https://tiurll.com/2uCiwu
-
-Dipstick is annoying prospectively due artificial academy 2 mods the technicolor ... mod save games xbox 360; minecraft throwing spears mod 1.3.2; cheap ... 4d29de3e1b
-
-
-
diff --git a/spaces/iqovocn/ChuanhuChatGPT/modules/pdf_func.py b/spaces/iqovocn/ChuanhuChatGPT/modules/pdf_func.py
deleted file mode 100644
index 1b1087f2687fd26c8676867dd45189c069dd56a5..0000000000000000000000000000000000000000
--- a/spaces/iqovocn/ChuanhuChatGPT/modules/pdf_func.py
+++ /dev/null
@@ -1,180 +0,0 @@
-from types import SimpleNamespace
-import pdfplumber
-import logging
-from langchain.docstore.document import Document
-
-def prepare_table_config(crop_page):
- """Prepare table查找边界, 要求page为原始page
-
- From https://github.com/jsvine/pdfplumber/issues/242
- """
- page = crop_page.root_page # root/parent
- cs = page.curves + page.edges
- def curves_to_edges():
- """See https://github.com/jsvine/pdfplumber/issues/127"""
- edges = []
- for c in cs:
- edges += pdfplumber.utils.rect_to_edges(c)
- return edges
- edges = curves_to_edges()
- return {
- "vertical_strategy": "explicit",
- "horizontal_strategy": "explicit",
- "explicit_vertical_lines": edges,
- "explicit_horizontal_lines": edges,
- "intersection_y_tolerance": 10,
- }
-
-def get_text_outside_table(crop_page):
- ts = prepare_table_config(crop_page)
- if len(ts["explicit_vertical_lines"]) == 0 or len(ts["explicit_horizontal_lines"]) == 0:
- return crop_page
-
- ### Get the bounding boxes of the tables on the page.
- bboxes = [table.bbox for table in crop_page.root_page.find_tables(table_settings=ts)]
- def not_within_bboxes(obj):
- """Check if the object is in any of the table's bbox."""
- def obj_in_bbox(_bbox):
- """See https://github.com/jsvine/pdfplumber/blob/stable/pdfplumber/table.py#L404"""
- v_mid = (obj["top"] + obj["bottom"]) / 2
- h_mid = (obj["x0"] + obj["x1"]) / 2
- x0, top, x1, bottom = _bbox
- return (h_mid >= x0) and (h_mid < x1) and (v_mid >= top) and (v_mid < bottom)
- return not any(obj_in_bbox(__bbox) for __bbox in bboxes)
-
- return crop_page.filter(not_within_bboxes)
-# 请使用 LaTeX 表达公式,行内公式以 $ 包裹,行间公式以 $$ 包裹
-
-extract_words = lambda page: page.extract_words(keep_blank_chars=True, y_tolerance=0, x_tolerance=1, extra_attrs=["fontname", "size", "object_type"])
-# dict_keys(['text', 'x0', 'x1', 'top', 'doctop', 'bottom', 'upright', 'direction', 'fontname', 'size'])
-
-def get_title_with_cropped_page(first_page):
- title = [] # 处理标题
- x0,top,x1,bottom = first_page.bbox # 获取页面边框
-
- for word in extract_words(first_page):
- word = SimpleNamespace(**word)
-
- if word.size >= 14:
- title.append(word.text)
- title_bottom = word.bottom
- elif word.text == "Abstract": # 获取页面abstract
- top = word.top
-
- user_info = [i["text"] for i in extract_words(first_page.within_bbox((x0,title_bottom,x1,top)))]
- # 裁剪掉上半部分, within_bbox: full_included; crop: partial_included
- return title, user_info, first_page.within_bbox((x0,top,x1,bottom))
-
-def get_column_cropped_pages(pages, two_column=True):
- new_pages = []
- for page in pages:
- if two_column:
- left = page.within_bbox((0, 0, page.width/2, page.height),relative=True)
- right = page.within_bbox((page.width/2, 0, page.width, page.height), relative=True)
- new_pages.append(left)
- new_pages.append(right)
- else:
- new_pages.append(page)
-
- return new_pages
-
-def parse_pdf(filename, two_column = True):
- level = logging.getLogger().level
- if level == logging.getLevelName("DEBUG"):
- logging.getLogger().setLevel("INFO")
-
- with pdfplumber.open(filename) as pdf:
- title, user_info, first_page = get_title_with_cropped_page(pdf.pages[0])
- new_pages = get_column_cropped_pages([first_page] + pdf.pages[1:], two_column)
-
- chapters = []
- # tuple (chapter_name, [pageid] (start,stop), chapter_text)
- create_chapter = lambda page_start,name_top,name_bottom: SimpleNamespace(
- name=[],
- name_top=name_top,
- name_bottom=name_bottom,
- record_chapter_name = True,
-
- page_start=page_start,
- page_stop=None,
-
- text=[],
- )
- cur_chapter = None
-
- # 按页遍历PDF文档
- for idx, page in enumerate(new_pages):
- page = get_text_outside_table(page)
-
- # 按行遍历页面文本
- for word in extract_words(page):
- word = SimpleNamespace(**word)
-
- # 检查行文本是否以12号字体打印,如果是,则将其作为新章节开始
- if word.size >= 11: # 出现chapter name
- if cur_chapter is None:
- cur_chapter = create_chapter(page.page_number, word.top, word.bottom)
- elif not cur_chapter.record_chapter_name or (cur_chapter.name_bottom != cur_chapter.name_bottom and cur_chapter.name_top != cur_chapter.name_top):
- # 不再继续写chapter name
- cur_chapter.page_stop = page.page_number # stop id
- chapters.append(cur_chapter)
- # 重置当前chapter信息
- cur_chapter = create_chapter(page.page_number, word.top, word.bottom)
-
- # print(word.size, word.top, word.bottom, word.text)
- cur_chapter.name.append(word.text)
- else:
- cur_chapter.record_chapter_name = False # chapter name 结束
- cur_chapter.text.append(word.text)
- else:
- # 处理最后一个章节
- cur_chapter.page_stop = page.page_number # stop id
- chapters.append(cur_chapter)
-
- for i in chapters:
- logging.info(f"section: {i.name} pages:{i.page_start, i.page_stop} word-count:{len(i.text)}")
- logging.debug(" ".join(i.text))
-
- title = " ".join(title)
- user_info = " ".join(user_info)
- text = f"Article Title: {title}, Information:{user_info}\n"
- for idx, chapter in enumerate(chapters):
- chapter.name = " ".join(chapter.name)
- text += f"The {idx}th Chapter {chapter.name}: " + " ".join(chapter.text) + "\n"
-
- logging.getLogger().setLevel(level)
- return Document(page_content=text, metadata={"title": title})
-
-BASE_POINTS = """
-1. Who are the authors?
-2. What is the process of the proposed method?
-3. What is the performance of the proposed method? Please note down its performance metrics.
-4. What are the baseline models and their performances? Please note down these baseline methods.
-5. What dataset did this paper use?
-"""
-
-READING_PROMPT = """
-You are a researcher helper bot. You can help the user with research paper reading and summarizing. \n
-Now I am going to send you a paper. You need to read it and summarize it for me part by part. \n
-When you are reading, You need to focus on these key points:{}
-"""
-
-READING_PROMT_V2 = """
-You are a researcher helper bot. You can help the user with research paper reading and summarizing. \n
-Now I am going to send you a paper. You need to read it and summarize it for me part by part. \n
-When you are reading, You need to focus on these key points:{},
-
-And You need to generate a brief but informative title for this part.
-Your return format:
-- title: '...'
-- summary: '...'
-"""
-
-SUMMARY_PROMPT = "You are a researcher helper bot. Now you need to read the summaries of a research paper."
-
-
-if __name__ == '__main__':
- # Test code
- z = parse_pdf("./build/test.pdf")
- print(z["user_info"])
- print(z["title"])
\ No newline at end of file
diff --git a/spaces/jackli888/stable-diffusion-webui/modules/shared.py b/spaces/jackli888/stable-diffusion-webui/modules/shared.py
deleted file mode 100644
index 2a3037ac85a4ce46300b769c97b63d736315f848..0000000000000000000000000000000000000000
--- a/spaces/jackli888/stable-diffusion-webui/modules/shared.py
+++ /dev/null
@@ -1,720 +0,0 @@
-import argparse
-import datetime
-import json
-import os
-import sys
-import time
-
-from PIL import Image
-import gradio as gr
-import tqdm
-
-import modules.interrogate
-import modules.memmon
-import modules.styles
-import modules.devices as devices
-from modules import localization, extensions, script_loading, errors, ui_components, shared_items
-from modules.paths import models_path, script_path, data_path
-
-
-demo = None
-
-sd_configs_path = os.path.join(script_path, "configs")
-sd_default_config = os.path.join(sd_configs_path, "v1-inference.yaml")
-sd_model_file = os.path.join(script_path, 'model.ckpt')
-default_sd_model_file = sd_model_file
-
-parser = argparse.ArgumentParser()
-parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored",)
-parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",)
-parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
-parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints")
-parser.add_argument("--vae-dir", type=str, default=None, help="Path to directory with VAE files")
-parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
-parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
-parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
-parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats")
-parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
-parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
-parser.add_argument("--embeddings-dir", type=str, default=os.path.join(data_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
-parser.add_argument("--textual-inversion-templates-dir", type=str, default=os.path.join(script_path, 'textual_inversion_templates'), help="directory with textual inversion templates")
-parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory")
-parser.add_argument("--localizations-dir", type=str, default=os.path.join(script_path, 'localizations'), help="localizations directory")
-parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
-parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
-parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
-parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
-parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
-parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
-parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
-parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.")
-parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
-parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
-parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us")
-parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options")
-parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
-parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
-parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
-parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN'))
-parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN'))
-parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None)
-parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
-parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
-parser.add_argument("--xformers-flash-attention", action='store_true', help="enable xformers with Flash Attention to improve reproducibility (supported for SD2.x or variant only)")
-parser.add_argument("--deepdanbooru", action='store_true', help="does not do anything")
-parser.add_argument("--opt-split-attention", action='store_true', help="force-enables Doggettx's cross-attention layer optimization. By default, it's on for torch cuda.")
-parser.add_argument("--opt-sub-quad-attention", action='store_true', help="enable memory efficient sub-quadratic cross-attention layer optimization")
-parser.add_argument("--sub-quad-q-chunk-size", type=int, help="query chunk size for the sub-quadratic cross-attention layer optimization to use", default=1024)
-parser.add_argument("--sub-quad-kv-chunk-size", type=int, help="kv chunk size for the sub-quadratic cross-attention layer optimization to use", default=None)
-parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the percentage of VRAM threshold for the sub-quadratic cross-attention layer optimization to use chunking", default=None)
-parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
-parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
-parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
-parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
-parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
-parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
-parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
-parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
-parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(data_path, 'ui-config.json'))
-parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
-parser.add_argument("--freeze-settings", action='store_true', help="disable editing settings", default=False)
-parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(data_path, 'config.json'))
-parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
-parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
-parser.add_argument("--gradio-auth-path", type=str, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None)
-parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything')
-parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything")
-parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
-parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(data_path, 'styles.csv'))
-parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
-parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None)
-parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
-parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
-parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
-parser.add_argument('--vae-path', type=str, help='Checkpoint to use as VAE; setting this argument disables all settings related to VAE', default=None)
-parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
-parser.add_argument("--api", action='store_true', help="use api=True to launch the API together with the webui (use --nowebui instead for only the API)")
-parser.add_argument("--api-auth", type=str, help='Set authentication for API like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
-parser.add_argument("--api-log", action='store_true', help="use api-log=True to enable logging of all API requests")
-parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the API instead of the webui")
-parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
-parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
-parser.add_argument("--administrator", action='store_true', help="Administrator rights", default=False)
-parser.add_argument("--cors-allow-origins", type=str, help="Allowed CORS origin(s) in the form of a comma-separated list (no spaces)", default=None)
-parser.add_argument("--cors-allow-origins-regex", type=str, help="Allowed CORS origin(s) in the form of a single regular expression", default=None)
-parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None)
-parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None)
-parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None)
-parser.add_argument("--gradio-queue", action='store_true', help="Uses gradio queue; experimental option; breaks restart UI button")
-parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers")
-parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False)
-parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False)
-
-
-script_loading.preload_extensions(extensions.extensions_dir, parser)
-script_loading.preload_extensions(extensions.extensions_builtin_dir, parser)
-
-cmd_opts = parser.parse_args()
-
-restricted_opts = {
- "samples_filename_pattern",
- "directories_filename_pattern",
- "outdir_samples",
- "outdir_txt2img_samples",
- "outdir_img2img_samples",
- "outdir_extras_samples",
- "outdir_grids",
- "outdir_txt2img_grids",
- "outdir_save",
-}
-
-ui_reorder_categories = [
- "inpaint",
- "sampler",
- "checkboxes",
- "hires_fix",
- "dimensions",
- "cfg",
- "seed",
- "batch",
- "override_settings",
- "scripts",
-]
-
-cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access
-
-devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \
- (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer'])
-
-device = devices.device
-weight_load_location = None if cmd_opts.lowram else "cpu"
-
-batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
-parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
-xformers_available = False
-config_filename = cmd_opts.ui_settings_file
-
-os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True)
-hypernetworks = {}
-loaded_hypernetworks = []
-
-
-def reload_hypernetworks():
- from modules.hypernetworks import hypernetwork
- global hypernetworks
-
- hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
-
-
-class State:
- skipped = False
- interrupted = False
- job = ""
- job_no = 0
- job_count = 0
- processing_has_refined_job_count = False
- job_timestamp = '0'
- sampling_step = 0
- sampling_steps = 0
- current_latent = None
- current_image = None
- current_image_sampling_step = 0
- id_live_preview = 0
- textinfo = None
- time_start = None
- need_restart = False
- server_start = None
-
- def skip(self):
- self.skipped = True
-
- def interrupt(self):
- self.interrupted = True
-
- def nextjob(self):
- if opts.live_previews_enable and opts.show_progress_every_n_steps == -1:
- self.do_set_current_image()
-
- self.job_no += 1
- self.sampling_step = 0
- self.current_image_sampling_step = 0
-
- def dict(self):
- obj = {
- "skipped": self.skipped,
- "interrupted": self.interrupted,
- "job": self.job,
- "job_count": self.job_count,
- "job_timestamp": self.job_timestamp,
- "job_no": self.job_no,
- "sampling_step": self.sampling_step,
- "sampling_steps": self.sampling_steps,
- }
-
- return obj
-
- def begin(self):
- self.sampling_step = 0
- self.job_count = -1
- self.processing_has_refined_job_count = False
- self.job_no = 0
- self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
- self.current_latent = None
- self.current_image = None
- self.current_image_sampling_step = 0
- self.id_live_preview = 0
- self.skipped = False
- self.interrupted = False
- self.textinfo = None
- self.time_start = time.time()
-
- devices.torch_gc()
-
- def end(self):
- self.job = ""
- self.job_count = 0
-
- devices.torch_gc()
-
- def set_current_image(self):
- """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
- if not parallel_processing_allowed:
- return
-
- if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.live_previews_enable and opts.show_progress_every_n_steps != -1:
- self.do_set_current_image()
-
- def do_set_current_image(self):
- if self.current_latent is None:
- return
-
- import modules.sd_samplers
- if opts.show_progress_grid:
- self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent))
- else:
- self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent))
-
- self.current_image_sampling_step = self.sampling_step
-
- def assign_current_image(self, image):
- self.current_image = image
- self.id_live_preview += 1
-
-
-state = State()
-state.server_start = time.time()
-
-styles_filename = cmd_opts.styles_file
-prompt_styles = modules.styles.StyleDatabase(styles_filename)
-
-interrogator = modules.interrogate.InterrogateModels("interrogate")
-
-face_restorers = []
-
-class OptionInfo:
- def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None):
- self.default = default
- self.label = label
- self.component = component
- self.component_args = component_args
- self.onchange = onchange
- self.section = section
- self.refresh = refresh
-
-
-def options_section(section_identifier, options_dict):
- for k, v in options_dict.items():
- v.section = section_identifier
-
- return options_dict
-
-
-def list_checkpoint_tiles():
- import modules.sd_models
- return modules.sd_models.checkpoint_tiles()
-
-
-def refresh_checkpoints():
- import modules.sd_models
- return modules.sd_models.list_models()
-
-
-def list_samplers():
- import modules.sd_samplers
- return modules.sd_samplers.all_samplers
-
-
-hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config}
-
-options_templates = {}
-
-options_templates.update(options_section(('saving-images', "Saving images/grids"), {
- "samples_save": OptionInfo(True, "Always save all generated images"),
- "samples_format": OptionInfo('png', 'File format for images'),
- "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs),
- "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs),
-
- "grid_save": OptionInfo(True, "Always save all generated image grids"),
- "grid_format": OptionInfo('png', 'File format for grids'),
- "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
- "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
- "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"),
- "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
-
- "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
- "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
- "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
- "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
- "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
- "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
- "export_for_4chan": OptionInfo(True, "If the saved image file size is above the limit, or its either width or height are above the limit, save a downscaled copy as JPG"),
- "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number),
- "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number),
-
- "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"),
- "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"),
- "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
- "do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"),
-
- "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"),
- "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"),
-
-}))
-
-options_templates.update(options_section(('saving-paths', "Paths for saving"), {
- "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
- "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
- "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
- "outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
- "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
- "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
- "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
- "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
-}))
-
-options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
- "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"),
- "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"),
- "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
- "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs),
- "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
-}))
-
-options_templates.update(options_section(('upscaling', "Upscaling"), {
- "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
- "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
- "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}),
- "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}),
-}))
-
-options_templates.update(options_section(('face-restoration', "Face restoration"), {
- "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
- "code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
- "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
-}))
-
-options_templates.update(options_section(('system', "System"), {
- "show_warnings": OptionInfo(False, "Show warnings in console."),
- "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}),
- "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
- "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
- "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."),
-}))
-
-options_templates.update(options_section(('training', "Training"), {
- "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
- "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
- "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
- "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."),
- "dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
- "dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
- "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
- "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
- "training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"),
- "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."),
- "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."),
- "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."),
-}))
-
-options_templates.update(options_section(('sd', "Stable Diffusion"), {
- "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints),
- "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
- "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
- "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list),
- "sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"),
- "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}),
- "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
- "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
- "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill image's transparent parts with this color.", ui_components.FormColorPicker, {}),
- "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
- "enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"),
- "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
- "comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }),
- "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}),
- "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
-}))
-
-options_templates.update(options_section(('compatibility', "Compatibility"), {
- "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
- "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."),
- "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."),
- "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."),
-}))
-
-options_templates.update(options_section(('interrogate', "Interrogate Options"), {
- "interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
- "interrogate_return_ranks": OptionInfo(False, "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators)."),
- "interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
- "interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
- "interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
- "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file (0 = No limit)"),
- "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types),
- "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "Interrogate: deepbooru score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
- "deepbooru_sort_alpha": OptionInfo(True, "Interrogate: deepbooru sort alphabetically"),
- "deepbooru_use_spaces": OptionInfo(False, "use spaces for tags in deepbooru"),
- "deepbooru_escape": OptionInfo(True, "escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)"),
- "deepbooru_filter_tags": OptionInfo("", "filter out those tags from deepbooru output (separated by comma)"),
-}))
-
-options_templates.update(options_section(('extra_networks', "Extra Networks"), {
- "extra_networks_default_view": OptionInfo("cards", "Default view for Extra Networks", gr.Dropdown, {"choices": ["cards", "thumbs"]}),
- "extra_networks_default_multiplier": OptionInfo(1.0, "Multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": [""] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks),
-}))
-
-options_templates.update(options_section(('ui', "User interface"), {
- "return_grid": OptionInfo(True, "Show grid in results for web"),
- "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
- "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
- "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
- "disable_weights_auto_swap": OptionInfo(True, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."),
- "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
- "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"),
- "font": OptionInfo("", "Font for image grids that have text"),
- "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
- "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
- "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
- "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"),
- "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row"),
- "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
- "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
- "quicksettings": OptionInfo("sd_model_checkpoint", "Quicksettings list"),
- "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"),
- "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"),
- "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)),
-}))
-
-options_templates.update(options_section(('ui', "Live previews"), {
- "show_progressbar": OptionInfo(True, "Show progressbar"),
- "live_previews_enable": OptionInfo(True, "Show live previews of the created image"),
- "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
- "show_progress_every_n_steps": OptionInfo(10, "Show new live preview image every N sampling steps. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}),
- "show_progress_type": OptionInfo("Approx NN", "Image creation progress preview mode", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap"]}),
- "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}),
- "live_preview_refresh_period": OptionInfo(1000, "Progressbar/preview update period, in milliseconds")
-}))
-
-options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
- "hide_samplers": OptionInfo([], "Hide samplers in user interface (requires restart)", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}),
- "eta_ddim": OptionInfo(0.0, "eta (noise multiplier) for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- "eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
- 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}),
- 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma"),
-}))
-
-options_templates.update(options_section(('postprocessing', "Postprocessing"), {
- 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
- 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
- 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
-}))
-
-options_templates.update(options_section((None, "Hidden options"), {
- "disabled_extensions": OptionInfo([], "Disable those extensions"),
- "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"),
-}))
-
-options_templates.update()
-
-
-class Options:
- data = None
- data_labels = options_templates
- typemap = {int: float}
-
- def __init__(self):
- self.data = {k: v.default for k, v in self.data_labels.items()}
-
- def __setattr__(self, key, value):
- if self.data is not None:
- if key in self.data or key in self.data_labels:
- assert not cmd_opts.freeze_settings, "changing settings is disabled"
-
- info = opts.data_labels.get(key, None)
- comp_args = info.component_args if info else None
- if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
- raise RuntimeError(f"not possible to set {key} because it is restricted")
-
- if cmd_opts.hide_ui_dir_config and key in restricted_opts:
- raise RuntimeError(f"not possible to set {key} because it is restricted")
-
- self.data[key] = value
- return
-
- return super(Options, self).__setattr__(key, value)
-
- def __getattr__(self, item):
- if self.data is not None:
- if item in self.data:
- return self.data[item]
-
- if item in self.data_labels:
- return self.data_labels[item].default
-
- return super(Options, self).__getattribute__(item)
-
- def set(self, key, value):
- """sets an option and calls its onchange callback, returning True if the option changed and False otherwise"""
-
- oldval = self.data.get(key, None)
- if oldval == value:
- return False
-
- try:
- setattr(self, key, value)
- except RuntimeError:
- return False
-
- if self.data_labels[key].onchange is not None:
- try:
- self.data_labels[key].onchange()
- except Exception as e:
- errors.display(e, f"changing setting {key} to {value}")
- setattr(self, key, oldval)
- return False
-
- return True
-
- def save(self, filename):
- assert not cmd_opts.freeze_settings, "saving settings is disabled"
-
- with open(filename, "w", encoding="utf8") as file:
- json.dump(self.data, file, indent=4)
-
- def same_type(self, x, y):
- if x is None or y is None:
- return True
-
- type_x = self.typemap.get(type(x), type(x))
- type_y = self.typemap.get(type(y), type(y))
-
- return type_x == type_y
-
- def load(self, filename):
- with open(filename, "r", encoding="utf8") as file:
- self.data = json.load(file)
-
- bad_settings = 0
- for k, v in self.data.items():
- info = self.data_labels.get(k, None)
- if info is not None and not self.same_type(info.default, v):
- print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr)
- bad_settings += 1
-
- if bad_settings > 0:
- print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
-
- def onchange(self, key, func, call=True):
- item = self.data_labels.get(key)
- item.onchange = func
-
- if call:
- func()
-
- def dumpjson(self):
- d = {k: self.data.get(k, self.data_labels.get(k).default) for k in self.data_labels.keys()}
- return json.dumps(d)
-
- def add_option(self, key, info):
- self.data_labels[key] = info
-
- def reorder(self):
- """reorder settings so that all items related to section always go together"""
-
- section_ids = {}
- settings_items = self.data_labels.items()
- for k, item in settings_items:
- if item.section not in section_ids:
- section_ids[item.section] = len(section_ids)
-
- self.data_labels = {k: v for k, v in sorted(settings_items, key=lambda x: section_ids[x[1].section])}
-
- def cast_value(self, key, value):
- """casts an arbitrary to the same type as this setting's value with key
- Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str)
- """
-
- if value is None:
- return None
-
- default_value = self.data_labels[key].default
- if default_value is None:
- default_value = getattr(self, key, None)
- if default_value is None:
- return None
-
- expected_type = type(default_value)
- if expected_type == bool and value == "False":
- value = False
- else:
- value = expected_type(value)
-
- return value
-
-
-
-opts = Options()
-if os.path.exists(config_filename):
- opts.load(config_filename)
-
-settings_components = None
-"""assinged from ui.py, a mapping on setting anmes to gradio components repsponsible for those settings"""
-
-latent_upscale_default_mode = "Latent"
-latent_upscale_modes = {
- "Latent": {"mode": "bilinear", "antialias": False},
- "Latent (antialiased)": {"mode": "bilinear", "antialias": True},
- "Latent (bicubic)": {"mode": "bicubic", "antialias": False},
- "Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True},
- "Latent (nearest)": {"mode": "nearest", "antialias": False},
- "Latent (nearest-exact)": {"mode": "nearest-exact", "antialias": False},
-}
-
-sd_upscalers = []
-
-sd_model = None
-
-clip_model = None
-
-progress_print_out = sys.stdout
-
-
-class TotalTQDM:
- def __init__(self):
- self._tqdm = None
-
- def reset(self):
- self._tqdm = tqdm.tqdm(
- desc="Total progress",
- total=state.job_count * state.sampling_steps,
- position=1,
- file=progress_print_out
- )
-
- def update(self):
- if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
- return
- if self._tqdm is None:
- self.reset()
- self._tqdm.update()
-
- def updateTotal(self, new_total):
- if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
- return
- if self._tqdm is None:
- self.reset()
- self._tqdm.total = new_total
-
- def clear(self):
- if self._tqdm is not None:
- self._tqdm.close()
- self._tqdm = None
-
-
-total_tqdm = TotalTQDM()
-
-mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts)
-mem_mon.start()
-
-
-def listfiles(dirname):
- filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname)) if not x.startswith(".")]
- return [file for file in filenames if os.path.isfile(file)]
-
-
-def html_path(filename):
- return os.path.join(script_path, "html", filename)
-
-
-def html(filename):
- path = html_path(filename)
-
- if os.path.exists(path):
- with open(path, encoding="utf8") as file:
- return file.read()
-
- return ""
diff --git a/spaces/jbilcke-hf/AnimateDiff/animatediff/models/motion_module.py b/spaces/jbilcke-hf/AnimateDiff/animatediff/models/motion_module.py
deleted file mode 100644
index 2359e712e386c016463059b4d601a1dd396999cd..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/AnimateDiff/animatediff/models/motion_module.py
+++ /dev/null
@@ -1,331 +0,0 @@
-from dataclasses import dataclass
-from typing import List, Optional, Tuple, Union
-
-import torch
-import numpy as np
-import torch.nn.functional as F
-from torch import nn
-import torchvision
-
-from diffusers.configuration_utils import ConfigMixin, register_to_config
-from diffusers.modeling_utils import ModelMixin
-from diffusers.utils import BaseOutput
-from diffusers.utils.import_utils import is_xformers_available
-from diffusers.models.attention import CrossAttention, FeedForward
-
-from einops import rearrange, repeat
-import math
-
-
-def zero_module(module):
- # Zero out the parameters of a module and return it.
- for p in module.parameters():
- p.detach().zero_()
- return module
-
-
-@dataclass
-class TemporalTransformer3DModelOutput(BaseOutput):
- sample: torch.FloatTensor
-
-
-if is_xformers_available():
- import xformers
- import xformers.ops
-else:
- xformers = None
-
-
-def get_motion_module(
- in_channels,
- motion_module_type: str,
- motion_module_kwargs: dict
-):
- if motion_module_type == "Vanilla":
- return VanillaTemporalModule(in_channels=in_channels, **motion_module_kwargs,)
- else:
- raise ValueError
-
-
-class VanillaTemporalModule(nn.Module):
- def __init__(
- self,
- in_channels,
- num_attention_heads = 8,
- num_transformer_block = 2,
- attention_block_types =( "Temporal_Self", "Temporal_Self" ),
- cross_frame_attention_mode = None,
- temporal_position_encoding = False,
- temporal_position_encoding_max_len = 24,
- temporal_attention_dim_div = 1,
- zero_initialize = True,
- ):
- super().__init__()
-
- self.temporal_transformer = TemporalTransformer3DModel(
- in_channels=in_channels,
- num_attention_heads=num_attention_heads,
- attention_head_dim=in_channels // num_attention_heads // temporal_attention_dim_div,
- num_layers=num_transformer_block,
- attention_block_types=attention_block_types,
- cross_frame_attention_mode=cross_frame_attention_mode,
- temporal_position_encoding=temporal_position_encoding,
- temporal_position_encoding_max_len=temporal_position_encoding_max_len,
- )
-
- if zero_initialize:
- self.temporal_transformer.proj_out = zero_module(self.temporal_transformer.proj_out)
-
- def forward(self, input_tensor, temb, encoder_hidden_states, attention_mask=None, anchor_frame_idx=None):
- hidden_states = input_tensor
- hidden_states = self.temporal_transformer(hidden_states, encoder_hidden_states, attention_mask)
-
- output = hidden_states
- return output
-
-
-class TemporalTransformer3DModel(nn.Module):
- def __init__(
- self,
- in_channels,
- num_attention_heads,
- attention_head_dim,
-
- num_layers,
- attention_block_types = ( "Temporal_Self", "Temporal_Self", ),
- dropout = 0.0,
- norm_num_groups = 32,
- cross_attention_dim = 768,
- activation_fn = "geglu",
- attention_bias = False,
- upcast_attention = False,
-
- cross_frame_attention_mode = None,
- temporal_position_encoding = False,
- temporal_position_encoding_max_len = 24,
- ):
- super().__init__()
-
- inner_dim = num_attention_heads * attention_head_dim
-
- self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)
- self.proj_in = nn.Linear(in_channels, inner_dim)
-
- self.transformer_blocks = nn.ModuleList(
- [
- TemporalTransformerBlock(
- dim=inner_dim,
- num_attention_heads=num_attention_heads,
- attention_head_dim=attention_head_dim,
- attention_block_types=attention_block_types,
- dropout=dropout,
- norm_num_groups=norm_num_groups,
- cross_attention_dim=cross_attention_dim,
- activation_fn=activation_fn,
- attention_bias=attention_bias,
- upcast_attention=upcast_attention,
- cross_frame_attention_mode=cross_frame_attention_mode,
- temporal_position_encoding=temporal_position_encoding,
- temporal_position_encoding_max_len=temporal_position_encoding_max_len,
- )
- for d in range(num_layers)
- ]
- )
- self.proj_out = nn.Linear(inner_dim, in_channels)
-
- def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None):
- assert hidden_states.dim() == 5, f"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}."
- video_length = hidden_states.shape[2]
- hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
-
- batch, channel, height, weight = hidden_states.shape
- residual = hidden_states
-
- hidden_states = self.norm(hidden_states)
- inner_dim = hidden_states.shape[1]
- hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
- hidden_states = self.proj_in(hidden_states)
-
- # Transformer Blocks
- for block in self.transformer_blocks:
- hidden_states = block(hidden_states, encoder_hidden_states=encoder_hidden_states, video_length=video_length)
-
- # output
- hidden_states = self.proj_out(hidden_states)
- hidden_states = hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
-
- output = hidden_states + residual
- output = rearrange(output, "(b f) c h w -> b c f h w", f=video_length)
-
- return output
-
-
-class TemporalTransformerBlock(nn.Module):
- def __init__(
- self,
- dim,
- num_attention_heads,
- attention_head_dim,
- attention_block_types = ( "Temporal_Self", "Temporal_Self", ),
- dropout = 0.0,
- norm_num_groups = 32,
- cross_attention_dim = 768,
- activation_fn = "geglu",
- attention_bias = False,
- upcast_attention = False,
- cross_frame_attention_mode = None,
- temporal_position_encoding = False,
- temporal_position_encoding_max_len = 24,
- ):
- super().__init__()
-
- attention_blocks = []
- norms = []
-
- for block_name in attention_block_types:
- attention_blocks.append(
- VersatileAttention(
- attention_mode=block_name.split("_")[0],
- cross_attention_dim=cross_attention_dim if block_name.endswith("_Cross") else None,
-
- query_dim=dim,
- heads=num_attention_heads,
- dim_head=attention_head_dim,
- dropout=dropout,
- bias=attention_bias,
- upcast_attention=upcast_attention,
-
- cross_frame_attention_mode=cross_frame_attention_mode,
- temporal_position_encoding=temporal_position_encoding,
- temporal_position_encoding_max_len=temporal_position_encoding_max_len,
- )
- )
- norms.append(nn.LayerNorm(dim))
-
- self.attention_blocks = nn.ModuleList(attention_blocks)
- self.norms = nn.ModuleList(norms)
-
- self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn)
- self.ff_norm = nn.LayerNorm(dim)
-
-
- def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None, video_length=None):
- for attention_block, norm in zip(self.attention_blocks, self.norms):
- norm_hidden_states = norm(hidden_states)
- hidden_states = attention_block(
- norm_hidden_states,
- encoder_hidden_states=encoder_hidden_states if attention_block.is_cross_attention else None,
- video_length=video_length,
- ) + hidden_states
-
- hidden_states = self.ff(self.ff_norm(hidden_states)) + hidden_states
-
- output = hidden_states
- return output
-
-
-class PositionalEncoding(nn.Module):
- def __init__(
- self,
- d_model,
- dropout = 0.,
- max_len = 24
- ):
- super().__init__()
- self.dropout = nn.Dropout(p=dropout)
- position = torch.arange(max_len).unsqueeze(1)
- div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
- pe = torch.zeros(1, max_len, d_model)
- pe[0, :, 0::2] = torch.sin(position * div_term)
- pe[0, :, 1::2] = torch.cos(position * div_term)
- self.register_buffer('pe', pe)
-
- def forward(self, x):
- x = x + self.pe[:, :x.size(1)]
- return self.dropout(x)
-
-
-class VersatileAttention(CrossAttention):
- def __init__(
- self,
- attention_mode = None,
- cross_frame_attention_mode = None,
- temporal_position_encoding = False,
- temporal_position_encoding_max_len = 24,
- *args, **kwargs
- ):
- super().__init__(*args, **kwargs)
- assert attention_mode == "Temporal"
-
- self.attention_mode = attention_mode
- self.is_cross_attention = kwargs["cross_attention_dim"] is not None
-
- self.pos_encoder = PositionalEncoding(
- kwargs["query_dim"],
- dropout=0.,
- max_len=temporal_position_encoding_max_len
- ) if (temporal_position_encoding and attention_mode == "Temporal") else None
-
- def extra_repr(self):
- return f"(Module Info) Attention_Mode: {self.attention_mode}, Is_Cross_Attention: {self.is_cross_attention}"
-
- def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None, video_length=None):
- batch_size, sequence_length, _ = hidden_states.shape
-
- if self.attention_mode == "Temporal":
- d = hidden_states.shape[1]
- hidden_states = rearrange(hidden_states, "(b f) d c -> (b d) f c", f=video_length)
-
- if self.pos_encoder is not None:
- hidden_states = self.pos_encoder(hidden_states)
-
- encoder_hidden_states = repeat(encoder_hidden_states, "b n c -> (b d) n c", d=d) if encoder_hidden_states is not None else encoder_hidden_states
- else:
- raise NotImplementedError
-
- encoder_hidden_states = encoder_hidden_states
-
- if self.group_norm is not None:
- hidden_states = self.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
-
- query = self.to_q(hidden_states)
- dim = query.shape[-1]
- query = self.reshape_heads_to_batch_dim(query)
-
- if self.added_kv_proj_dim is not None:
- raise NotImplementedError
-
- encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
- key = self.to_k(encoder_hidden_states)
- value = self.to_v(encoder_hidden_states)
-
- key = self.reshape_heads_to_batch_dim(key)
- value = self.reshape_heads_to_batch_dim(value)
-
- if attention_mask is not None:
- if attention_mask.shape[-1] != query.shape[1]:
- target_length = query.shape[1]
- attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)
- attention_mask = attention_mask.repeat_interleave(self.heads, dim=0)
-
- # attention, what we cannot get enough of
- if self._use_memory_efficient_attention_xformers:
- hidden_states = self._memory_efficient_attention_xformers(query, key, value, attention_mask)
- # Some versions of xformers return output in fp32, cast it back to the dtype of the input
- hidden_states = hidden_states.to(query.dtype)
- else:
- if self._slice_size is None or query.shape[0] // self._slice_size == 1:
- hidden_states = self._attention(query, key, value, attention_mask)
- else:
- hidden_states = self._sliced_attention(query, key, value, sequence_length, dim, attention_mask)
-
- # linear proj
- hidden_states = self.to_out[0](hidden_states)
-
- # dropout
- hidden_states = self.to_out[1](hidden_states)
-
- if self.attention_mode == "Temporal":
- hidden_states = rearrange(hidden_states, "(b d) f c -> (b f) d c", d=d)
-
- return hidden_states
diff --git a/spaces/jbilcke-hf/ai-clip-factory/src/app/interface/fonts/index.tsx b/spaces/jbilcke-hf/ai-clip-factory/src/app/interface/fonts/index.tsx
deleted file mode 100644
index 6e67106f512fff4c207ca9df1df44f9846f67790..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/ai-clip-factory/src/app/interface/fonts/index.tsx
+++ /dev/null
@@ -1,7 +0,0 @@
-"use client"
-
-import { Grandstander, Klee_One } from 'next/font/google'
-
-// If loading a variable font, you don't need to specify the font weight
-export const headingFont = Grandstander({ subsets: ['latin'] })
-export const paragraphFont = Klee_One({ subsets: ['latin'], weight: "600" })
diff --git a/spaces/jbilcke-hf/media-server/scripts/channel_random.sh b/spaces/jbilcke-hf/media-server/scripts/channel_random.sh
deleted file mode 100644
index 4d07c6034238677f9d23ab341577308b0546df87..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/media-server/scripts/channel_random.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-
-echo "Starting FFMPEG live stream for channel random"
-while true; do
- # TODO use channel_random.txt
- if [ -f channel_random.txt ] && [ -f channel_1_audio.txt ]; then
- echo "Files exist, starting stream"
- # Note: for now we also use channel 1 for audio!
- ffmpeg -y -nostdin -re -f concat -safe 0 -i channel_random.txt -stream_loop -1 -safe 0 -i channel_1_audio.txt -loglevel error -c:v libx264 -preset veryfast -tune zerolatency -c:a aac -ar 44100 -shortest -f flv rtmp://localhost/live/random
- else
- echo "Files do not exist, waiting for files"
- sleep 1 # check every second
- fi
-done
\ No newline at end of file
diff --git a/spaces/joaogabriellima/Real-Time-Voice-Cloning/synthesizer/audio.py b/spaces/joaogabriellima/Real-Time-Voice-Cloning/synthesizer/audio.py
deleted file mode 100644
index 83dc96c63c962bc8e13c446d05e27c009fb3239f..0000000000000000000000000000000000000000
--- a/spaces/joaogabriellima/Real-Time-Voice-Cloning/synthesizer/audio.py
+++ /dev/null
@@ -1,206 +0,0 @@
-import librosa
-import librosa.filters
-import numpy as np
-from scipy import signal
-from scipy.io import wavfile
-import soundfile as sf
-
-
-def load_wav(path, sr):
- return librosa.core.load(path, sr=sr)[0]
-
-def save_wav(wav, path, sr):
- wav *= 32767 / max(0.01, np.max(np.abs(wav)))
- #proposed by @dsmiller
- wavfile.write(path, sr, wav.astype(np.int16))
-
-def save_wavenet_wav(wav, path, sr):
- sf.write(path, wav.astype(np.float32), sr)
-
-def preemphasis(wav, k, preemphasize=True):
- if preemphasize:
- return signal.lfilter([1, -k], [1], wav)
- return wav
-
-def inv_preemphasis(wav, k, inv_preemphasize=True):
- if inv_preemphasize:
- return signal.lfilter([1], [1, -k], wav)
- return wav
-
-#From https://github.com/r9y9/wavenet_vocoder/blob/master/audio.py
-def start_and_end_indices(quantized, silence_threshold=2):
- for start in range(quantized.size):
- if abs(quantized[start] - 127) > silence_threshold:
- break
- for end in range(quantized.size - 1, 1, -1):
- if abs(quantized[end] - 127) > silence_threshold:
- break
-
- assert abs(quantized[start] - 127) > silence_threshold
- assert abs(quantized[end] - 127) > silence_threshold
-
- return start, end
-
-def get_hop_size(hparams):
- hop_size = hparams.hop_size
- if hop_size is None:
- assert hparams.frame_shift_ms is not None
- hop_size = int(hparams.frame_shift_ms / 1000 * hparams.sample_rate)
- return hop_size
-
-def linearspectrogram(wav, hparams):
- D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams)
- S = _amp_to_db(np.abs(D), hparams) - hparams.ref_level_db
-
- if hparams.signal_normalization:
- return _normalize(S, hparams)
- return S
-
-def melspectrogram(wav, hparams):
- D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams)
- S = _amp_to_db(_linear_to_mel(np.abs(D), hparams), hparams) - hparams.ref_level_db
-
- if hparams.signal_normalization:
- return _normalize(S, hparams)
- return S
-
-def inv_linear_spectrogram(linear_spectrogram, hparams):
- """Converts linear spectrogram to waveform using librosa"""
- if hparams.signal_normalization:
- D = _denormalize(linear_spectrogram, hparams)
- else:
- D = linear_spectrogram
-
- S = _db_to_amp(D + hparams.ref_level_db) #Convert back to linear
-
- if hparams.use_lws:
- processor = _lws_processor(hparams)
- D = processor.run_lws(S.astype(np.float64).T ** hparams.power)
- y = processor.istft(D).astype(np.float32)
- return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize)
- else:
- return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize)
-
-def inv_mel_spectrogram(mel_spectrogram, hparams):
- """Converts mel spectrogram to waveform using librosa"""
- if hparams.signal_normalization:
- D = _denormalize(mel_spectrogram, hparams)
- else:
- D = mel_spectrogram
-
- S = _mel_to_linear(_db_to_amp(D + hparams.ref_level_db), hparams) # Convert back to linear
-
- if hparams.use_lws:
- processor = _lws_processor(hparams)
- D = processor.run_lws(S.astype(np.float64).T ** hparams.power)
- y = processor.istft(D).astype(np.float32)
- return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize)
- else:
- return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize)
-
-def _lws_processor(hparams):
- import lws
- return lws.lws(hparams.n_fft, get_hop_size(hparams), fftsize=hparams.win_size, mode="speech")
-
-def _griffin_lim(S, hparams):
- """librosa implementation of Griffin-Lim
- Based on https://github.com/librosa/librosa/issues/434
- """
- angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
- S_complex = np.abs(S).astype(np.complex)
- y = _istft(S_complex * angles, hparams)
- for i in range(hparams.griffin_lim_iters):
- angles = np.exp(1j * np.angle(_stft(y, hparams)))
- y = _istft(S_complex * angles, hparams)
- return y
-
-def _stft(y, hparams):
- if hparams.use_lws:
- return _lws_processor(hparams).stft(y).T
- else:
- return librosa.stft(y=y, n_fft=hparams.n_fft, hop_length=get_hop_size(hparams), win_length=hparams.win_size)
-
-def _istft(y, hparams):
- return librosa.istft(y, hop_length=get_hop_size(hparams), win_length=hparams.win_size)
-
-##########################################################
-#Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)
-def num_frames(length, fsize, fshift):
- """Compute number of time frames of spectrogram
- """
- pad = (fsize - fshift)
- if length % fshift == 0:
- M = (length + pad * 2 - fsize) // fshift + 1
- else:
- M = (length + pad * 2 - fsize) // fshift + 2
- return M
-
-
-def pad_lr(x, fsize, fshift):
- """Compute left and right padding
- """
- M = num_frames(len(x), fsize, fshift)
- pad = (fsize - fshift)
- T = len(x) + 2 * pad
- r = (M - 1) * fshift + fsize - T
- return pad, pad + r
-##########################################################
-#Librosa correct padding
-def librosa_pad_lr(x, fsize, fshift):
- return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0]
-
-# Conversions
-_mel_basis = None
-_inv_mel_basis = None
-
-def _linear_to_mel(spectogram, hparams):
- global _mel_basis
- if _mel_basis is None:
- _mel_basis = _build_mel_basis(hparams)
- return np.dot(_mel_basis, spectogram)
-
-def _mel_to_linear(mel_spectrogram, hparams):
- global _inv_mel_basis
- if _inv_mel_basis is None:
- _inv_mel_basis = np.linalg.pinv(_build_mel_basis(hparams))
- return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram))
-
-def _build_mel_basis(hparams):
- assert hparams.fmax <= hparams.sample_rate // 2
- return librosa.filters.mel(hparams.sample_rate, hparams.n_fft, n_mels=hparams.num_mels,
- fmin=hparams.fmin, fmax=hparams.fmax)
-
-def _amp_to_db(x, hparams):
- min_level = np.exp(hparams.min_level_db / 20 * np.log(10))
- return 20 * np.log10(np.maximum(min_level, x))
-
-def _db_to_amp(x):
- return np.power(10.0, (x) * 0.05)
-
-def _normalize(S, hparams):
- if hparams.allow_clipping_in_normalization:
- if hparams.symmetric_mels:
- return np.clip((2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value,
- -hparams.max_abs_value, hparams.max_abs_value)
- else:
- return np.clip(hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db)), 0, hparams.max_abs_value)
-
- assert S.max() <= 0 and S.min() - hparams.min_level_db >= 0
- if hparams.symmetric_mels:
- return (2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value
- else:
- return hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db))
-
-def _denormalize(D, hparams):
- if hparams.allow_clipping_in_normalization:
- if hparams.symmetric_mels:
- return (((np.clip(D, -hparams.max_abs_value,
- hparams.max_abs_value) + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value))
- + hparams.min_level_db)
- else:
- return ((np.clip(D, 0, hparams.max_abs_value) * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db)
-
- if hparams.symmetric_mels:
- return (((D + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value)) + hparams.min_level_db)
- else:
- return ((D * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db)
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/immutable.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/immutable.py
deleted file mode 100644
index cab8d6fb5a03164734bf5af4f97ad45b81c0a9fb..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/immutable.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
-
-import collections.abc
-from typing import Any
-
-from dns._immutable_ctx import immutable
-
-
-@immutable
-class Dict(collections.abc.Mapping): # lgtm[py/missing-equals]
- def __init__(self, dictionary: Any, no_copy: bool = False):
- """Make an immutable dictionary from the specified dictionary.
-
- If *no_copy* is `True`, then *dictionary* will be wrapped instead
- of copied. Only set this if you are sure there will be no external
- references to the dictionary.
- """
- if no_copy and isinstance(dictionary, dict):
- self._odict = dictionary
- else:
- self._odict = dict(dictionary)
- self._hash = None
-
- def __getitem__(self, key):
- return self._odict.__getitem__(key)
-
- def __hash__(self): # pylint: disable=invalid-hash-returned
- if self._hash is None:
- h = 0
- for key in sorted(self._odict.keys()):
- h ^= hash(key)
- object.__setattr__(self, "_hash", h)
- # this does return an int, but pylint doesn't figure that out
- return self._hash
-
- def __len__(self):
- return len(self._odict)
-
- def __iter__(self):
- return iter(self._odict)
-
-
-def constify(o: Any) -> Any:
- """
- Convert mutable types to immutable types.
- """
- if isinstance(o, bytearray):
- return bytes(o)
- if isinstance(o, tuple):
- try:
- hash(o)
- return o
- except Exception:
- return tuple(constify(elt) for elt in o)
- if isinstance(o, list):
- return tuple(constify(elt) for elt in o)
- if isinstance(o, dict):
- cdict = dict()
- for k, v in o.items():
- cdict[k] = constify(v)
- return Dict(cdict, True)
- return o
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gradio/chat_interface.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gradio/chat_interface.py
deleted file mode 100644
index a27d92c92d5c4c9dae7da8cc77207c52ed5a71dc..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gradio/chat_interface.py
+++ /dev/null
@@ -1,495 +0,0 @@
-"""
-This file defines a useful high-level abstraction to build Gradio chatbots: ChatInterface.
-"""
-
-
-from __future__ import annotations
-
-import inspect
-from typing import AsyncGenerator, Callable
-
-import anyio
-from gradio_client import utils as client_utils
-from gradio_client.documentation import document, set_documentation_group
-
-from gradio.blocks import Blocks
-from gradio.components import (
- Button,
- Chatbot,
- IOComponent,
- Markdown,
- State,
- Textbox,
- get_component_instance,
-)
-from gradio.events import Dependency, EventListenerMethod, on
-from gradio.helpers import create_examples as Examples # noqa: N812
-from gradio.layouts import Accordion, Column, Group, Row
-from gradio.themes import ThemeClass as Theme
-from gradio.utils import SyncToAsyncIterator, async_iteration
-
-set_documentation_group("chatinterface")
-
-
-@document()
-class ChatInterface(Blocks):
- """
- ChatInterface is Gradio's high-level abstraction for creating chatbot UIs, and allows you to create
- a web-based demo around a chatbot model in a few lines of code. Only one parameter is required: fn, which
- takes a function that governs the response of the chatbot based on the user input and chat history. Additional
- parameters can be used to control the appearance and behavior of the demo.
-
- Example:
- import gradio as gr
-
- def echo(message, history):
- return message
-
- demo = gr.ChatInterface(fn=echo, examples=["hello", "hola", "merhaba"], title="Echo Bot")
- demo.launch()
- Demos: chatinterface_random_response, chatinterface_streaming_echo
- Guides: creating-a-chatbot-fast, sharing-your-app
- """
-
- def __init__(
- self,
- fn: Callable,
- *,
- chatbot: Chatbot | None = None,
- textbox: Textbox | None = None,
- additional_inputs: str | IOComponent | list[str | IOComponent] | None = None,
- additional_inputs_accordion_name: str = "Additional Inputs",
- examples: list[str] | None = None,
- cache_examples: bool | None = None,
- title: str | None = None,
- description: str | None = None,
- theme: Theme | str | None = None,
- css: str | None = None,
- analytics_enabled: bool | None = None,
- submit_btn: str | None | Button = "Submit",
- stop_btn: str | None | Button = "Stop",
- retry_btn: str | None | Button = "🔄 Retry",
- undo_btn: str | None | Button = "↩️ Undo",
- clear_btn: str | None | Button = "🗑️ Clear",
- autofocus: bool = True,
- ):
- """
- Parameters:
- fn: the function to wrap the chat interface around. Should accept two parameters: a string input message and list of two-element lists of the form [[user_message, bot_message], ...] representing the chat history, and return a string response. See the Chatbot documentation for more information on the chat history format.
- chatbot: an instance of the gr.Chatbot component to use for the chat interface, if you would like to customize the chatbot properties. If not provided, a default gr.Chatbot component will be created.
- textbox: an instance of the gr.Textbox component to use for the chat interface, if you would like to customize the textbox properties. If not provided, a default gr.Textbox component will be created.
- additional_inputs: an instance or list of instances of gradio components (or their string shortcuts) to use as additional inputs to the chatbot. If components are not already rendered in a surrounding Blocks, then the components will be displayed under the chatbot, in an accordion.
- additional_inputs_accordion_name: the label of the accordion to use for additional inputs, only used if additional_inputs is provided.
- examples: sample inputs for the function; if provided, appear below the chatbot and can be clicked to populate the chatbot input.
- cache_examples: If True, caches examples in the server for fast runtime in examples. The default option in HuggingFace Spaces is True. The default option elsewhere is False.
- title: a title for the interface; if provided, appears above chatbot in large font. Also used as the tab title when opened in a browser window.
- description: a description for the interface; if provided, appears above the chatbot and beneath the title in regular font. Accepts Markdown and HTML content.
- theme: Theme to use, loaded from gradio.themes.
- css: custom css or path to custom css file to use with interface.
- analytics_enabled: Whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable if defined, or default to True.
- submit_btn: Text to display on the submit button. If None, no button will be displayed. If a Button object, that button will be used.
- stop_btn: Text to display on the stop button, which replaces the submit_btn when the submit_btn or retry_btn is clicked and response is streaming. Clicking on the stop_btn will halt the chatbot response. If set to None, stop button functionality does not appear in the chatbot. If a Button object, that button will be used as the stop button.
- retry_btn: Text to display on the retry button. If None, no button will be displayed. If a Button object, that button will be used.
- undo_btn: Text to display on the delete last button. If None, no button will be displayed. If a Button object, that button will be used.
- clear_btn: Text to display on the clear button. If None, no button will be displayed. If a Button object, that button will be used.
- autofocus: If True, autofocuses to the textbox when the page loads.
- """
- super().__init__(
- analytics_enabled=analytics_enabled,
- mode="chat_interface",
- css=css,
- title=title or "Gradio",
- theme=theme,
- )
- self.fn = fn
- self.is_async = inspect.iscoroutinefunction(
- self.fn
- ) or inspect.isasyncgenfunction(self.fn)
- self.is_generator = inspect.isgeneratorfunction(
- self.fn
- ) or inspect.isasyncgenfunction(self.fn)
- self.examples = examples
- if self.space_id and cache_examples is None:
- self.cache_examples = True
- else:
- self.cache_examples = cache_examples or False
- self.buttons: list[Button] = []
-
- if additional_inputs:
- if not isinstance(additional_inputs, list):
- additional_inputs = [additional_inputs]
- self.additional_inputs = [
- get_component_instance(i) for i in additional_inputs # type: ignore
- ]
- else:
- self.additional_inputs = []
- self.additional_inputs_accordion_name = additional_inputs_accordion_name
-
- with self:
- if title:
- Markdown(
- f"{self.title} "
- )
- if description:
- Markdown(description)
-
- with Column(variant="panel"):
- if chatbot:
- self.chatbot = chatbot.render()
- else:
- self.chatbot = Chatbot(label="Chatbot")
-
- with Group():
- with Row():
- if textbox:
- textbox.container = False
- textbox.show_label = False
- self.textbox = textbox.render()
- else:
- self.textbox = Textbox(
- container=False,
- show_label=False,
- label="Message",
- placeholder="Type a message...",
- scale=7,
- autofocus=autofocus,
- )
- if submit_btn:
- if isinstance(submit_btn, Button):
- submit_btn.render()
- elif isinstance(submit_btn, str):
- submit_btn = Button(
- submit_btn,
- variant="primary",
- scale=1,
- min_width=150,
- )
- else:
- raise ValueError(
- f"The submit_btn parameter must be a gr.Button, string, or None, not {type(submit_btn)}"
- )
- if stop_btn:
- if isinstance(stop_btn, Button):
- stop_btn.visible = False
- stop_btn.render()
- elif isinstance(stop_btn, str):
- stop_btn = Button(
- stop_btn,
- variant="stop",
- visible=False,
- scale=1,
- min_width=150,
- )
- else:
- raise ValueError(
- f"The stop_btn parameter must be a gr.Button, string, or None, not {type(stop_btn)}"
- )
- self.buttons.extend([submit_btn, stop_btn])
-
- with Row():
- for btn in [retry_btn, undo_btn, clear_btn]:
- if btn:
- if isinstance(btn, Button):
- btn.render()
- elif isinstance(btn, str):
- btn = Button(btn, variant="secondary")
- else:
- raise ValueError(
- f"All the _btn parameters must be a gr.Button, string, or None, not {type(btn)}"
- )
- self.buttons.append(btn)
-
- self.fake_api_btn = Button("Fake API", visible=False)
- self.fake_response_textbox = Textbox(
- label="Response", visible=False
- )
- (
- self.submit_btn,
- self.stop_btn,
- self.retry_btn,
- self.undo_btn,
- self.clear_btn,
- ) = self.buttons
-
- if examples:
- if self.is_generator:
- examples_fn = self._examples_stream_fn
- else:
- examples_fn = self._examples_fn
-
- self.examples_handler = Examples(
- examples=examples,
- inputs=[self.textbox] + self.additional_inputs,
- outputs=self.chatbot,
- fn=examples_fn,
- )
-
- any_unrendered_inputs = any(
- not inp.is_rendered for inp in self.additional_inputs
- )
- if self.additional_inputs and any_unrendered_inputs:
- with Accordion(self.additional_inputs_accordion_name, open=False):
- for input_component in self.additional_inputs:
- if not input_component.is_rendered:
- input_component.render()
-
- # The example caching must happen after the input components have rendered
- if cache_examples:
- client_utils.synchronize_async(self.examples_handler.cache)
-
- self.saved_input = State()
- self.chatbot_state = State([])
-
- self._setup_events()
- self._setup_api()
-
- def _setup_events(self) -> None:
- submit_fn = self._stream_fn if self.is_generator else self._submit_fn
- submit_triggers = (
- [self.textbox.submit, self.submit_btn.click]
- if self.submit_btn
- else [self.textbox.submit]
- )
- submit_event = (
- on(
- submit_triggers,
- self._clear_and_save_textbox,
- [self.textbox],
- [self.textbox, self.saved_input],
- api_name=False,
- queue=False,
- )
- .then(
- self._display_input,
- [self.saved_input, self.chatbot_state],
- [self.chatbot, self.chatbot_state],
- api_name=False,
- queue=False,
- )
- .then(
- submit_fn,
- [self.saved_input, self.chatbot_state] + self.additional_inputs,
- [self.chatbot, self.chatbot_state],
- api_name=False,
- )
- )
- self._setup_stop_events(submit_triggers, submit_event)
-
- if self.retry_btn:
- retry_event = (
- self.retry_btn.click(
- self._delete_prev_fn,
- [self.chatbot_state],
- [self.chatbot, self.saved_input, self.chatbot_state],
- api_name=False,
- queue=False,
- )
- .then(
- self._display_input,
- [self.saved_input, self.chatbot_state],
- [self.chatbot, self.chatbot_state],
- api_name=False,
- queue=False,
- )
- .then(
- submit_fn,
- [self.saved_input, self.chatbot_state] + self.additional_inputs,
- [self.chatbot, self.chatbot_state],
- api_name=False,
- )
- )
- self._setup_stop_events([self.retry_btn.click], retry_event)
-
- if self.undo_btn:
- self.undo_btn.click(
- self._delete_prev_fn,
- [self.chatbot_state],
- [self.chatbot, self.saved_input, self.chatbot_state],
- api_name=False,
- queue=False,
- ).then(
- lambda x: x,
- [self.saved_input],
- [self.textbox],
- api_name=False,
- queue=False,
- )
-
- if self.clear_btn:
- self.clear_btn.click(
- lambda: ([], [], None),
- None,
- [self.chatbot, self.chatbot_state, self.saved_input],
- queue=False,
- api_name=False,
- )
-
- def _setup_stop_events(
- self, event_triggers: list[EventListenerMethod], event_to_cancel: Dependency
- ) -> None:
- if self.stop_btn and self.is_generator:
- if self.submit_btn:
- for event_trigger in event_triggers:
- event_trigger(
- lambda: (
- Button.update(visible=False),
- Button.update(visible=True),
- ),
- None,
- [self.submit_btn, self.stop_btn],
- api_name=False,
- queue=False,
- )
- event_to_cancel.then(
- lambda: (Button.update(visible=True), Button.update(visible=False)),
- None,
- [self.submit_btn, self.stop_btn],
- api_name=False,
- queue=False,
- )
- else:
- for event_trigger in event_triggers:
- event_trigger(
- lambda: Button.update(visible=True),
- None,
- [self.stop_btn],
- api_name=False,
- queue=False,
- )
- event_to_cancel.then(
- lambda: Button.update(visible=False),
- None,
- [self.stop_btn],
- api_name=False,
- queue=False,
- )
- self.stop_btn.click(
- None,
- None,
- None,
- cancels=event_to_cancel,
- api_name=False,
- )
-
- def _setup_api(self) -> None:
- api_fn = self._api_stream_fn if self.is_generator else self._api_submit_fn
-
- self.fake_api_btn.click(
- api_fn,
- [self.textbox, self.chatbot_state] + self.additional_inputs,
- [self.textbox, self.chatbot_state],
- api_name="chat",
- )
-
- def _clear_and_save_textbox(self, message: str) -> tuple[str, str]:
- return "", message
-
- def _display_input(
- self, message: str, history: list[list[str | None]]
- ) -> tuple[list[list[str | None]], list[list[str | None]]]:
- history.append([message, None])
- return history, history
-
- async def _submit_fn(
- self,
- message: str,
- history_with_input: list[list[str | None]],
- *args,
- ) -> tuple[list[list[str | None]], list[list[str | None]]]:
- history = history_with_input[:-1]
- if self.is_async:
- response = await self.fn(message, history, *args)
- else:
- response = await anyio.to_thread.run_sync(
- self.fn, message, history, *args, limiter=self.limiter
- )
- history.append([message, response])
- return history, history
-
- async def _stream_fn(
- self,
- message: str,
- history_with_input: list[list[str | None]],
- *args,
- ) -> AsyncGenerator:
- history = history_with_input[:-1]
- if self.is_async:
- generator = self.fn(message, history, *args)
- else:
- generator = await anyio.to_thread.run_sync(
- self.fn, message, history, *args, limiter=self.limiter
- )
- generator = SyncToAsyncIterator(generator, self.limiter)
- try:
- first_response = await async_iteration(generator)
- update = history + [[message, first_response]]
- yield update, update
- except StopIteration:
- update = history + [[message, None]]
- yield update, update
- async for response in generator:
- update = history + [[message, response]]
- yield update, update
-
- async def _api_submit_fn(
- self, message: str, history: list[list[str | None]], *args
- ) -> tuple[str, list[list[str | None]]]:
- if self.is_async:
- response = await self.fn(message, history, *args)
- else:
- response = await anyio.to_thread.run_sync(
- self.fn, message, history, *args, limiter=self.limiter
- )
- history.append([message, response])
- return response, history
-
- async def _api_stream_fn(
- self, message: str, history: list[list[str | None]], *args
- ) -> AsyncGenerator:
- if self.is_async:
- generator = self.fn(message, history, *args)
- else:
- generator = await anyio.to_thread.run_sync(
- self.fn, message, history, *args, limiter=self.limiter
- )
- generator = SyncToAsyncIterator(generator, self.limiter)
- try:
- first_response = await async_iteration(generator)
- yield first_response, history + [[message, first_response]]
- except StopIteration:
- yield None, history + [[message, None]]
- async for response in generator:
- yield response, history + [[message, response]]
-
- async def _examples_fn(self, message: str, *args) -> list[list[str | None]]:
- if self.is_async:
- response = await self.fn(message, [], *args)
- else:
- response = await anyio.to_thread.run_sync(
- self.fn, message, [], *args, limiter=self.limiter
- )
- return [[message, response]]
-
- async def _examples_stream_fn(
- self,
- message: str,
- *args,
- ) -> AsyncGenerator:
- if self.is_async:
- generator = self.fn(message, [], *args)
- else:
- generator = await anyio.to_thread.run_sync(
- self.fn, message, [], *args, limiter=self.limiter
- )
- generator = SyncToAsyncIterator(generator, self.limiter)
- async for response in generator:
- yield [[message, response]]
-
- def _delete_prev_fn(
- self, history: list[list[str | None]]
- ) -> tuple[list[list[str | None]], str, list[list[str | None]]]:
- try:
- message, _ = history.pop()
- except IndexError:
- message = ""
- return history, message or "", history
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gradio/components/bar_plot.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gradio/components/bar_plot.py
deleted file mode 100644
index f16bd4c379e142f94e077e46def907ed8da0b615..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gradio/components/bar_plot.py
+++ /dev/null
@@ -1,412 +0,0 @@
-"""gr.BarPlot() component."""
-
-from __future__ import annotations
-
-import warnings
-from typing import Callable, Literal
-
-import altair as alt
-import pandas as pd
-from gradio_client.documentation import document, set_documentation_group
-
-from gradio.components.base import _Keywords
-from gradio.components.plot import AltairPlot, Plot
-
-set_documentation_group("component")
-
-
-@document()
-class BarPlot(Plot):
- """
- Create a bar plot.
-
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects a pandas dataframe with the data to plot.
-
- Demos: bar_plot, chicago-bikeshare-dashboard
- """
-
- def __init__(
- self,
- value: pd.DataFrame | Callable | None = None,
- x: str | None = None,
- y: str | None = None,
- *,
- color: str | None = None,
- vertical: bool = True,
- group: str | None = None,
- title: str | None = None,
- tooltip: list[str] | str | None = None,
- x_title: str | None = None,
- y_title: str | None = None,
- x_label_angle: float | None = None,
- y_label_angle: float | None = None,
- color_legend_title: str | None = None,
- group_title: str | None = None,
- color_legend_position: Literal[
- "left",
- "right",
- "top",
- "bottom",
- "top-left",
- "top-right",
- "bottom-left",
- "bottom-right",
- "none",
- ]
- | None = None,
- height: int | None = None,
- width: int | None = None,
- y_lim: list[int] | None = None,
- caption: str | None = None,
- interactive: bool | None = True,
- label: str | None = None,
- show_label: bool | None = None,
- container: bool = True,
- scale: int | None = None,
- min_width: int = 160,
- every: float | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- elem_classes: list[str] | str | None = None,
- sort: Literal["x", "y", "-x", "-y"] | None = None,
- show_actions_button: bool = False,
- **kwargs,
- ):
- """
- Parameters:
- value: The pandas dataframe containing the data to display in a scatter plot.
- x: Column corresponding to the x axis.
- y: Column corresponding to the y axis.
- color: The column to determine the bar color. Must be categorical (discrete values).
- vertical: If True, the bars will be displayed vertically. If False, the x and y axis will be switched, displaying the bars horizontally. Default is True.
- group: The column with which to split the overall plot into smaller subplots.
- title: The title to display on top of the chart.
- tooltip: The column (or list of columns) to display on the tooltip when a user hovers over a bar.
- x_title: The title given to the x axis. By default, uses the value of the x parameter.
- y_title: The title given to the y axis. By default, uses the value of the y parameter.
- x_label_angle: The angle (in degrees) of the x axis labels. Positive values are clockwise, and negative values are counter-clockwise.
- y_label_angle: The angle (in degrees) of the y axis labels. Positive values are clockwise, and negative values are counter-clockwise.
- color_legend_title: The title given to the color legend. By default, uses the value of color parameter.
- group_title: The label displayed on top of the subplot columns (or rows if vertical=True). Use an empty string to omit.
- color_legend_position: The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.
- height: The height of the plot in pixels.
- width: The width of the plot in pixels.
- y_lim: A tuple of list containing the limits for the y-axis, specified as [y_min, y_max].
- caption: The (optional) caption to display below the plot.
- interactive: Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad.
- label: The (optional) label to display on the top left corner of the plot.
- show_label: Whether the label should be displayed.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- visible: Whether the plot should be visible.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
- sort: Specifies the sorting axis as either "x", "y", "-x" or "-y". If None, no sorting is applied.
- show_actions_button: Whether to show the actions button on the top right corner of the plot.
- """
- self.x = x
- self.y = y
- self.color = color
- self.vertical = vertical
- self.group = group
- self.group_title = group_title
- self.tooltip = tooltip
- self.title = title
- self.x_title = x_title
- self.y_title = y_title
- self.x_label_angle = x_label_angle
- self.y_label_angle = y_label_angle
- self.color_legend_title = color_legend_title
- self.group_title = group_title
- self.color_legend_position = color_legend_position
- self.y_lim = y_lim
- self.caption = caption
- self.interactive_chart = interactive
- self.width = width
- self.height = height
- self.sort = sort
- self.show_actions_button = show_actions_button
- super().__init__(
- value=value,
- label=label,
- show_label=show_label,
- container=container,
- scale=scale,
- min_width=min_width,
- visible=visible,
- elem_id=elem_id,
- elem_classes=elem_classes,
- every=every,
- **kwargs,
- )
-
- def get_block_name(self) -> str:
- return "plot"
-
- @staticmethod
- def update(
- value: pd.DataFrame | dict | Literal[_Keywords.NO_VALUE] = _Keywords.NO_VALUE,
- x: str | None = None,
- y: str | None = None,
- color: str | None = None,
- vertical: bool = True,
- group: str | None = None,
- title: str | None = None,
- tooltip: list[str] | str | None = None,
- x_title: str | None = None,
- y_title: str | None = None,
- x_label_angle: float | None = None,
- y_label_angle: float | None = None,
- color_legend_title: str | None = None,
- group_title: str | None = None,
- color_legend_position: Literal[
- "left",
- "right",
- "top",
- "bottom",
- "top-left",
- "top-right",
- "bottom-left",
- "bottom-right",
- "none",
- ]
- | None = None,
- height: int | None = None,
- width: int | None = None,
- y_lim: list[int] | None = None,
- caption: str | None = None,
- interactive: bool | None = None,
- label: str | None = None,
- show_label: bool | None = None,
- container: bool | None = None,
- scale: int | None = None,
- min_width: int | None = None,
- visible: bool | None = None,
- sort: Literal["x", "y", "-x", "-y"] | None = None,
- ):
- """Update an existing BarPlot component.
-
- If updating any of the plot properties (color, size, etc) the value, x, and y parameters must be specified.
-
- Parameters:
- value: The pandas dataframe containing the data to display in a scatter plot.
- x: Column corresponding to the x axis.
- y: Column corresponding to the y axis.
- color: The column to determine the bar color. Must be categorical (discrete values).
- vertical: If True, the bars will be displayed vertically. If False, the x and y axis will be switched, displaying the bars horizontally. Default is True.
- group: The column with which to split the overall plot into smaller subplots.
- title: The title to display on top of the chart.
- tooltip: The column (or list of columns) to display on the tooltip when a user hovers over a bar.
- x_title: The title given to the x axis. By default, uses the value of the x parameter.
- y_title: The title given to the y axis. By default, uses the value of the y parameter.
- x_label_angle: The angle (in degrees) of the x axis labels. Positive values are clockwise, and negative values are counter-clockwise.
- y_label_angle: The angle (in degrees) of the y axis labels. Positive values are clockwise, and negative values are counter-clockwise.
- color_legend_title: The title given to the color legend. By default, uses the value of color parameter.
- group_title: The label displayed on top of the subplot columns (or rows if vertical=True). Use an empty string to omit.
- color_legend_position: The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.
- height: The height of the plot in pixels.
- width: The width of the plot in pixels.
- y_lim: A tuple of list containing the limits for the y-axis, specified as [y_min, y_max].
- caption: The (optional) caption to display below the plot.
- interactive: Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad.
- label: The (optional) label to display on the top left corner of the plot.
- show_label: Whether the label should be displayed.
- visible: Whether the plot should be visible.
- sort: Specifies the sorting axis as either "x", "y", "-x" or "-y". If None, no sorting is applied.
- """
- warnings.warn(
- "Using the update method is deprecated. Simply return a new object instead, e.g. `return gr.BarPlot(...)` instead of `return gr.BarPlot.update(...)`."
- )
- properties = [
- x,
- y,
- color,
- vertical,
- group,
- title,
- tooltip,
- x_title,
- y_title,
- x_label_angle,
- y_label_angle,
- color_legend_title,
- group_title,
- color_legend_position,
- height,
- width,
- y_lim,
- interactive,
- sort,
- ]
- if any(properties):
- if not isinstance(value, pd.DataFrame):
- raise ValueError(
- "In order to update plot properties the value parameter "
- "must be provided, and it must be a Dataframe. Please pass a value "
- "parameter to gr.BarPlot.update."
- )
- if x is None or y is None:
- raise ValueError(
- "In order to update plot properties, the x and y axis data "
- "must be specified. Please pass valid values for x an y to "
- "gr.BarPlot.update."
- )
- chart = BarPlot.create_plot(value, *properties)
- value = {"type": "altair", "plot": chart.to_json(), "chart": "bar"}
-
- updated_config = {
- "label": label,
- "show_label": show_label,
- "container": container,
- "scale": scale,
- "min_width": min_width,
- "visible": visible,
- "value": value,
- "caption": caption,
- "__type__": "update",
- }
- return updated_config
-
- @staticmethod
- def create_plot(
- value: pd.DataFrame,
- x: str,
- y: str,
- color: str | None = None,
- vertical: bool = True,
- group: str | None = None,
- title: str | None = None,
- tooltip: list[str] | str | None = None,
- x_title: str | None = None,
- y_title: str | None = None,
- x_label_angle: float | None = None,
- y_label_angle: float | None = None,
- color_legend_title: str | None = None,
- group_title: str | None = None,
- color_legend_position: Literal[
- "left",
- "right",
- "top",
- "bottom",
- "top-left",
- "top-right",
- "bottom-left",
- "bottom-right",
- "none",
- ]
- | None = None,
- height: int | None = None,
- width: int | None = None,
- y_lim: list[int] | None = None,
- interactive: bool | None = True,
- sort: Literal["x", "y", "-x", "-y"] | None = None,
- ):
- """Helper for creating the bar plot."""
- interactive = True if interactive is None else interactive
- orientation = (
- {"field": group, "title": group_title if group_title is not None else group}
- if group
- else {}
- )
-
- x_title = x_title or x
- y_title = y_title or y
-
- # If horizontal, switch x and y
- if not vertical:
- y, x = x, y
- x = f"sum({x}):Q"
- y_title, x_title = x_title, y_title
- orientation = {"row": alt.Row(**orientation)} if orientation else {} # type: ignore
- x_lim = y_lim
- y_lim = None
- else:
- y = f"sum({y}):Q"
- x_lim = None
- orientation = {"column": alt.Column(**orientation)} if orientation else {} # type: ignore
-
- encodings = dict(
- x=alt.X(
- x, # type: ignore
- title=x_title, # type: ignore
- scale=AltairPlot.create_scale(x_lim), # type: ignore
- axis=alt.Axis(labelAngle=x_label_angle)
- if x_label_angle is not None
- else alt.Axis(),
- sort=sort if vertical and sort is not None else None,
- ),
- y=alt.Y(
- y, # type: ignore
- title=y_title, # type: ignore
- scale=AltairPlot.create_scale(y_lim), # type: ignore
- axis=alt.Axis(labelAngle=x_label_angle)
- if x_label_angle is not None
- else alt.Axis(),
- sort=sort if not vertical and sort is not None else None,
- ),
- **orientation,
- )
- properties = {}
- if title:
- properties["title"] = title
- if height:
- properties["height"] = height
- if width:
- properties["width"] = width
-
- if color:
- domain = value[color].unique().tolist()
- range_ = list(range(len(domain)))
- encodings["color"] = {
- "field": color,
- "type": "nominal",
- "scale": {"domain": domain, "range": range_},
- "legend": AltairPlot.create_legend(
- position=color_legend_position, title=color_legend_title or color
- ),
- }
-
- if tooltip:
- encodings["tooltip"] = tooltip
-
- chart = (
- alt.Chart(value) # type: ignore
- .mark_bar() # type: ignore
- .encode(**encodings)
- .properties(background="transparent", **properties)
- )
- if interactive:
- chart = chart.interactive()
-
- return chart
-
- def postprocess(self, y: pd.DataFrame | dict | None) -> dict[str, str] | None:
- # if None or update
- if y is None or isinstance(y, dict):
- return y
- if self.x is None or self.y is None:
- raise ValueError("No value provided for required parameters `x` and `y`.")
- chart = self.create_plot(
- value=y,
- x=self.x,
- y=self.y,
- color=self.color,
- vertical=self.vertical,
- group=self.group,
- title=self.title,
- tooltip=self.tooltip,
- x_title=self.x_title,
- y_title=self.y_title,
- x_label_angle=self.x_label_angle,
- y_label_angle=self.y_label_angle,
- color_legend_title=self.color_legend_title,
- color_legend_position=self.color_legend_position, # type: ignore
- group_title=self.group_title,
- y_lim=self.y_lim,
- interactive=self.interactive_chart,
- height=self.height,
- width=self.width,
- sort=self.sort, # type: ignore
- )
-
- return {"type": "altair", "plot": chart.to_json(), "chart": "bar"}
diff --git a/spaces/juancopi81/whisper-youtube-2-hf_dataset/transforming/batchtransformer.py b/spaces/juancopi81/whisper-youtube-2-hf_dataset/transforming/batchtransformer.py
deleted file mode 100644
index c4619a2c9ff658142f3f561430bca6d8410ff81d..0000000000000000000000000000000000000000
--- a/spaces/juancopi81/whisper-youtube-2-hf_dataset/transforming/batchtransformer.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from typing import List
-
-from video import YoutubeVideo
-from transforming.transform import Transform
-from utils import accepts_types
-
-class BatchTransformer:
-
- """Class that applies multiple transforms to YouTube video object."""
-
- def __init__(self, transforms: List[Transform]) -> None:
- self._transforms = transforms
-
- @property
- def transforms(self) -> List[Transform]:
- return self._transforms
-
- @transforms.setter
- def transforms(self, transforms: List[Transform]) -> None:
- self._transforms = transforms
-
- @accepts_types(list)
- def apply(self, videos: List[YoutubeVideo]) -> List[YoutubeVideo]:
- for transform in self._transforms:
- videos = list(map(transform.apply, videos))
- return videos
\ No newline at end of file
diff --git a/spaces/keremberke/smoke-object-detection/app.py b/spaces/keremberke/smoke-object-detection/app.py
deleted file mode 100644
index 8a02910400b5dc0c969b9afd0ceeed2c82f1bbb4..0000000000000000000000000000000000000000
--- a/spaces/keremberke/smoke-object-detection/app.py
+++ /dev/null
@@ -1,53 +0,0 @@
-
-import json
-import gradio as gr
-import yolov5
-from PIL import Image
-from huggingface_hub import hf_hub_download
-
-app_title = "Smoke Object Detection"
-models_ids = ['keremberke/yolov5n-smoke', 'keremberke/yolov5s-smoke', 'keremberke/yolov5m-smoke']
-article = f" model | dataset | awesome-yolov5-models
"
-
-current_model_id = models_ids[-1]
-model = yolov5.load(current_model_id)
-
-examples = [['test_images/H_00902_png.rf.127931e9be51d3943ee7fb8a49d6cfa1.jpg', 0.25, 'keremberke/yolov5m-smoke'], ['test_images/H_09986_png.rf.0aeb1695f5989b9adeaa82baaecc65e1.jpg', 0.25, 'keremberke/yolov5m-smoke'], ['test_images/L_00261_png.rf.497e30c8474732bde3c12c31309c774c.jpg', 0.25, 'keremberke/yolov5m-smoke'], ['test_images/L_04459_png.rf.deeec1f4ef32d2d26881c275f71ba2b9.jpg', 0.25, 'keremberke/yolov5m-smoke'], ['test_images/M_00194_png.rf.a2157843f797aab94a8e26b5733c2402.jpg', 0.25, 'keremberke/yolov5m-smoke'], ['test_images/M_00848_png.rf.ec61e10aa03fb5d4f4cd3a4b615c77ad.jpg', 0.25, 'keremberke/yolov5m-smoke']]
-
-
-def predict(image, threshold=0.25, model_id=None):
- # update model if required
- global current_model_id
- global model
- if model_id != current_model_id:
- model = yolov5.load(model_id)
- current_model_id = model_id
-
- # get model input size
- config_path = hf_hub_download(repo_id=model_id, filename="config.json")
- with open(config_path, "r") as f:
- config = json.load(f)
- input_size = config["input_size"]
-
- # perform inference
- model.conf = threshold
- results = model(image, size=input_size)
- numpy_image = results.render()[0]
- output_image = Image.fromarray(numpy_image)
- return output_image
-
-
-gr.Interface(
- title=app_title,
- description="Created by 'keremberke'",
- article=article,
- fn=predict,
- inputs=[
- gr.Image(type="pil"),
- gr.Slider(maximum=1, step=0.01, value=0.25),
- gr.Dropdown(models_ids, value=models_ids[-1]),
- ],
- outputs=gr.Image(type="pil"),
- examples=examples,
- cache_examples=True if examples else False,
-).launch(enable_queue=True)
diff --git a/spaces/kernelmachine/gpt3-quality-filter/lr/eval.py b/spaces/kernelmachine/gpt3-quality-filter/lr/eval.py
deleted file mode 100644
index c2f8ba4f1c619b19043e30ea68646a4340f19167..0000000000000000000000000000000000000000
--- a/spaces/kernelmachine/gpt3-quality-filter/lr/eval.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import argparse
-import json
-import logging
-import os
-import pathlib
-import random
-import shutil
-import time
-from typing import Any, Dict, List, Union
-
-import numpy as np
-import pandas as pd
-import ray
-from sklearn.feature_extraction.text import (CountVectorizer, TfidfTransformer, HashingVectorizer,
- TfidfVectorizer)
-from sklearn.linear_model import LogisticRegression
-from sklearn.metrics import f1_score
-from sklearn.model_selection import train_test_split
-from tqdm import tqdm
-from lr.hyperparameters import SEARCH_SPACE, RandomSearch, HyperparameterSearch
-from shutil import rmtree
-
-
-# Create a custom logger
-logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
-
-
-def load_model(serialization_dir):
- with open(os.path.join(serialization_dir, "best_hyperparameters.json"), 'r') as f:
- hyperparameters = json.load(f)
- if hyperparameters.pop('stopwords') == 1:
- stop_words = 'english'
- else:
- stop_words = None
- weight = hyperparameters.pop('weight')
- if weight == 'binary':
- binary = True
- else:
- binary = False
- ngram_range = hyperparameters.pop('ngram_range')
- ngram_range = sorted([int(x) for x in ngram_range.split()])
- if weight == 'tf-idf':
- vect = TfidfVectorizer(stop_words=stop_words,
- lowercase=True,
- ngram_range=ngram_range)
- elif weight == 'hash':
- vect = HashingVectorizer(stop_words=stop_words,lowercase=True,ngram_range=ngram_range)
- else:
- vect = CountVectorizer(binary=binary,
- stop_words=stop_words,
- lowercase=True,
- ngram_range=ngram_range)
- if weight != "hash":
- with open(os.path.join(serialization_dir, "vocab.json"), 'r') as f:
- vocab = json.load(f)
- vect.vocabulary_ = vocab
- hyperparameters['C'] = float(hyperparameters['C'])
- hyperparameters['tol'] = float(hyperparameters['tol'])
- classifier = LogisticRegression(**hyperparameters)
- if os.path.exists(os.path.join(serialization_dir, "archive", "idf.npy")):
- vect.idf_ = np.load(os.path.join(serialization_dir, "archive", "idf.npy"))
- classifier.coef_ = np.load(os.path.join(serialization_dir, "archive", "coef.npy"))
- classifier.intercept_ = np.load(os.path.join(serialization_dir, "archive", "intercept.npy"))
- classifier.classes_ = np.load(os.path.join(serialization_dir, "archive", "classes.npy"))
- return classifier, vect
-
-
-def eval_lr(test,
- classifier,
- vect):
- start = time.time()
- X_test = vect.transform(tqdm(test.text, desc="fitting and transforming data"))
- end = time.time()
- preds = classifier.predict(X_test)
- scores = classifier.predict_proba(X_test)
- return f1_score(test.label, preds, average='macro'), classifier.score(X_test, test.label), scores
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--eval_file', type=str)
- parser.add_argument('--model', '-m', type=str)
- parser.add_argument('--output', '-o', type=str)
-
-
-
- args = parser.parse_args()
-
- if not os.path.isdir(args.model):
- print(f"model {args.model} does not exist. Aborting! ")
- else:
- clf, vect = load_model(args.model)
-
- print(f"reading evaluation data at {args.eval_file}...")
- test = pd.read_json(args.eval_file, lines=True)
-
- f1, acc, scores = eval_lr(test, clf, vect)
- if args.output:
- out = pd.DataFrame({'id': test['id'], 'score': scores.tolist()})
- out.to_json(args.output, lines=True, orient='records')
-
- print("================")
- print(f"F1: {f1}")
- print(f"accuracy: {acc}")
diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/audio2pose_models/discriminator.py b/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/audio2pose_models/discriminator.py
deleted file mode 100644
index 339c38e4812ff38a810f0f3a1c01812f6d5d78db..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/audio2pose_models/discriminator.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import torch
-import torch.nn.functional as F
-from torch import nn
-
-class ConvNormRelu(nn.Module):
- def __init__(self, conv_type='1d', in_channels=3, out_channels=64, downsample=False,
- kernel_size=None, stride=None, padding=None, norm='BN', leaky=False):
- super().__init__()
- if kernel_size is None:
- if downsample:
- kernel_size, stride, padding = 4, 2, 1
- else:
- kernel_size, stride, padding = 3, 1, 1
-
- if conv_type == '2d':
- self.conv = nn.Conv2d(
- in_channels,
- out_channels,
- kernel_size,
- stride,
- padding,
- bias=False,
- )
- if norm == 'BN':
- self.norm = nn.BatchNorm2d(out_channels)
- elif norm == 'IN':
- self.norm = nn.InstanceNorm2d(out_channels)
- else:
- raise NotImplementedError
- elif conv_type == '1d':
- self.conv = nn.Conv1d(
- in_channels,
- out_channels,
- kernel_size,
- stride,
- padding,
- bias=False,
- )
- if norm == 'BN':
- self.norm = nn.BatchNorm1d(out_channels)
- elif norm == 'IN':
- self.norm = nn.InstanceNorm1d(out_channels)
- else:
- raise NotImplementedError
- nn.init.kaiming_normal_(self.conv.weight)
-
- self.act = nn.LeakyReLU(negative_slope=0.2, inplace=False) if leaky else nn.ReLU(inplace=True)
-
- def forward(self, x):
- x = self.conv(x)
- if isinstance(self.norm, nn.InstanceNorm1d):
- x = self.norm(x.permute((0, 2, 1))).permute((0, 2, 1)) # normalize on [C]
- else:
- x = self.norm(x)
- x = self.act(x)
- return x
-
-
-class PoseSequenceDiscriminator(nn.Module):
- def __init__(self, cfg):
- super().__init__()
- self.cfg = cfg
- leaky = self.cfg.MODEL.DISCRIMINATOR.LEAKY_RELU
-
- self.seq = nn.Sequential(
- ConvNormRelu('1d', cfg.MODEL.DISCRIMINATOR.INPUT_CHANNELS, 256, downsample=True, leaky=leaky), # B, 256, 64
- ConvNormRelu('1d', 256, 512, downsample=True, leaky=leaky), # B, 512, 32
- ConvNormRelu('1d', 512, 1024, kernel_size=3, stride=1, padding=1, leaky=leaky), # B, 1024, 16
- nn.Conv1d(1024, 1, kernel_size=3, stride=1, padding=1, bias=True) # B, 1, 16
- )
-
- def forward(self, x):
- x = x.reshape(x.size(0), x.size(1), -1).transpose(1, 2)
- x = self.seq(x)
- x = x.squeeze(1)
- return x
\ No newline at end of file
diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/datasets/stare.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/datasets/stare.py
deleted file mode 100644
index 3f71b25488cc11a6b4d582ac52b5a24e1ad1cf8e..0000000000000000000000000000000000000000
--- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/datasets/stare.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# dataset settings
-dataset_type = 'STAREDataset'
-data_root = 'data/STARE'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-img_scale = (605, 700)
-crop_size = (128, 128)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations'),
- dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=img_scale,
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
- ])
-]
-
-data = dict(
- samples_per_gpu=4,
- workers_per_gpu=4,
- train=dict(
- type='RepeatDataset',
- times=40000,
- dataset=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/training',
- ann_dir='annotations/training',
- pipeline=train_pipeline)),
- val=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline))
diff --git a/spaces/kolibril13/tldraw-solara-test/Dockerfile b/spaces/kolibril13/tldraw-solara-test/Dockerfile
deleted file mode 100644
index 271b19ce6fd8f70d42243166136a200328b1fd0f..0000000000000000000000000000000000000000
--- a/spaces/kolibril13/tldraw-solara-test/Dockerfile
+++ /dev/null
@@ -1,21 +0,0 @@
-FROM jupyter/base-notebook:latest
-
-RUN mamba install -c conda-forge leafmap geopandas localtileserver -y && \
- fix-permissions "${CONDA_DIR}" && \
- fix-permissions "/home/${NB_USER}"
-
-COPY requirements.txt .
-RUN pip install -r requirements.txt
-
-RUN mkdir ./pages
-COPY /pages ./pages
-
-ENV PROJ_LIB='/opt/conda/share/proj'
-
-USER root
-RUN chown -R ${NB_UID} ${HOME}
-USER ${NB_USER}
-
-EXPOSE 8765
-
-CMD ["solara", "run", "./pages", "--host=0.0.0.0"]
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttx.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttx.py
deleted file mode 100644
index 65a3c7a808b41fc571d59bac80f7b1085abc6b9b..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/ttx.py
+++ /dev/null
@@ -1,469 +0,0 @@
-"""\
-usage: ttx [options] inputfile1 [... inputfileN]
-
-TTX -- From OpenType To XML And Back
-
-If an input file is a TrueType or OpenType font file, it will be
-decompiled to a TTX file (an XML-based text format).
-If an input file is a TTX file, it will be compiled to whatever
-format the data is in, a TrueType or OpenType/CFF font file.
-A special input value of - means read from the standard input.
-
-Output files are created so they are unique: an existing file is
-never overwritten.
-
-General options
-===============
-
--h Help print this message.
---version show version and exit.
--d Specify a directory where the output files are
- to be created.
--o Specify a file to write the output to. A special
- value of - would use the standard output.
--f Overwrite existing output file(s), ie. don't append
- numbers.
--v Verbose: more messages will be written to stdout
- about what is being done.
--q Quiet: No messages will be written to stdout about
- what is being done.
--a allow virtual glyphs ID's on compile or decompile.
-
-Dump options
-============
-
--l List table info: instead of dumping to a TTX file, list
- some minimal info about each table.
--t Specify a table to dump. Multiple -t options
- are allowed. When no -t option is specified, all tables
- will be dumped.
--x Specify a table to exclude from the dump. Multiple
- -x options are allowed. -t and -x are mutually exclusive.
--s Split tables: save the TTX data into separate TTX files per
- table and write one small TTX file that contains references
- to the individual table dumps. This file can be used as
- input to ttx, as long as the table files are in the
- same directory.
--g Split glyf table: Save the glyf data into separate TTX files
- per glyph and write a small TTX for the glyf table which
- contains references to the individual TTGlyph elements.
- NOTE: specifying -g implies -s (no need for -s together
- with -g)
--i Do NOT disassemble TT instructions: when this option is
- given, all TrueType programs (glyph programs, the font
- program and the pre-program) will be written to the TTX
- file as hex data instead of assembly. This saves some time
- and makes the TTX file smaller.
--z Specify a bitmap data export option for EBDT:
- {'raw', 'row', 'bitwise', 'extfile'} or for the CBDT:
- {'raw', 'extfile'} Each option does one of the following:
-
- -z raw
- export the bitmap data as a hex dump
- -z row
- export each row as hex data
- -z bitwise
- export each row as binary in an ASCII art style
- -z extfile
- export the data as external files with XML references
-
- If no export format is specified 'raw' format is used.
--e Don't ignore decompilation errors, but show a full traceback
- and abort.
--y Select font number for TrueType Collection (.ttc/.otc),
- starting from 0.
---unicodedata
- Use custom database file to write character names in the
- comments of the cmap TTX output.
---newline
- Control how line endings are written in the XML file. It
- can be 'LF', 'CR', or 'CRLF'. If not specified, the
- default platform-specific line endings are used.
-
-Compile options
-===============
-
--m Merge with TrueType-input-file: specify a TrueType or
- OpenType font file to be merged with the TTX file. This
- option is only valid when at most one TTX file is specified.
--b Don't recalc glyph bounding boxes: use the values in the
- TTX file as-is.
---recalc-timestamp
- Set font 'modified' timestamp to current time.
- By default, the modification time of the TTX file will be
- used.
---no-recalc-timestamp
- Keep the original font 'modified' timestamp.
---flavor
- Specify flavor of output font file. May be 'woff' or 'woff2'.
- Note that WOFF2 requires the Brotli Python extension,
- available at https://github.com/google/brotli
---with-zopfli
- Use Zopfli instead of Zlib to compress WOFF. The Python
- extension is available at https://pypi.python.org/pypi/zopfli
-"""
-
-
-from fontTools.ttLib import TTFont, TTLibError
-from fontTools.misc.macCreatorType import getMacCreatorAndType
-from fontTools.unicode import setUnicodeData
-from fontTools.misc.textTools import Tag, tostr
-from fontTools.misc.timeTools import timestampSinceEpoch
-from fontTools.misc.loggingTools import Timer
-from fontTools.misc.cliTools import makeOutputFileName
-import os
-import sys
-import getopt
-import re
-import logging
-
-
-log = logging.getLogger("fontTools.ttx")
-
-opentypeheaderRE = re.compile("""sfntVersion=['"]OTTO["']""")
-
-
-class Options(object):
-
- listTables = False
- outputDir = None
- outputFile = None
- overWrite = False
- verbose = False
- quiet = False
- splitTables = False
- splitGlyphs = False
- disassembleInstructions = True
- mergeFile = None
- recalcBBoxes = True
- ignoreDecompileErrors = True
- bitmapGlyphDataFormat = "raw"
- unicodedata = None
- newlinestr = "\n"
- recalcTimestamp = None
- flavor = None
- useZopfli = False
-
- def __init__(self, rawOptions, numFiles):
- self.onlyTables = []
- self.skipTables = []
- self.fontNumber = -1
- for option, value in rawOptions:
- # general options
- if option == "-h":
- print(__doc__)
- sys.exit(0)
- elif option == "--version":
- from fontTools import version
-
- print(version)
- sys.exit(0)
- elif option == "-d":
- if not os.path.isdir(value):
- raise getopt.GetoptError(
- "The -d option value must be an existing directory"
- )
- self.outputDir = value
- elif option == "-o":
- self.outputFile = value
- elif option == "-f":
- self.overWrite = True
- elif option == "-v":
- self.verbose = True
- elif option == "-q":
- self.quiet = True
- # dump options
- elif option == "-l":
- self.listTables = True
- elif option == "-t":
- # pad with space if table tag length is less than 4
- value = value.ljust(4)
- self.onlyTables.append(value)
- elif option == "-x":
- # pad with space if table tag length is less than 4
- value = value.ljust(4)
- self.skipTables.append(value)
- elif option == "-s":
- self.splitTables = True
- elif option == "-g":
- # -g implies (and forces) splitTables
- self.splitGlyphs = True
- self.splitTables = True
- elif option == "-i":
- self.disassembleInstructions = False
- elif option == "-z":
- validOptions = ("raw", "row", "bitwise", "extfile")
- if value not in validOptions:
- raise getopt.GetoptError(
- "-z does not allow %s as a format. Use %s"
- % (option, validOptions)
- )
- self.bitmapGlyphDataFormat = value
- elif option == "-y":
- self.fontNumber = int(value)
- # compile options
- elif option == "-m":
- self.mergeFile = value
- elif option == "-b":
- self.recalcBBoxes = False
- elif option == "-e":
- self.ignoreDecompileErrors = False
- elif option == "--unicodedata":
- self.unicodedata = value
- elif option == "--newline":
- validOptions = ("LF", "CR", "CRLF")
- if value == "LF":
- self.newlinestr = "\n"
- elif value == "CR":
- self.newlinestr = "\r"
- elif value == "CRLF":
- self.newlinestr = "\r\n"
- else:
- raise getopt.GetoptError(
- "Invalid choice for --newline: %r (choose from %s)"
- % (value, ", ".join(map(repr, validOptions)))
- )
- elif option == "--recalc-timestamp":
- self.recalcTimestamp = True
- elif option == "--no-recalc-timestamp":
- self.recalcTimestamp = False
- elif option == "--flavor":
- self.flavor = value
- elif option == "--with-zopfli":
- self.useZopfli = True
- if self.verbose and self.quiet:
- raise getopt.GetoptError("-q and -v options are mutually exclusive")
- if self.verbose:
- self.logLevel = logging.DEBUG
- elif self.quiet:
- self.logLevel = logging.WARNING
- else:
- self.logLevel = logging.INFO
- if self.mergeFile and self.flavor:
- raise getopt.GetoptError("-m and --flavor options are mutually exclusive")
- if self.onlyTables and self.skipTables:
- raise getopt.GetoptError("-t and -x options are mutually exclusive")
- if self.mergeFile and numFiles > 1:
- raise getopt.GetoptError(
- "Must specify exactly one TTX source file when using -m"
- )
- if self.flavor != "woff" and self.useZopfli:
- raise getopt.GetoptError("--with-zopfli option requires --flavor 'woff'")
-
-
-def ttList(input, output, options):
- ttf = TTFont(input, fontNumber=options.fontNumber, lazy=True)
- reader = ttf.reader
- tags = sorted(reader.keys())
- print('Listing table info for "%s":' % input)
- format = " %4s %10s %8s %8s"
- print(format % ("tag ", " checksum", " length", " offset"))
- print(format % ("----", "----------", "--------", "--------"))
- for tag in tags:
- entry = reader.tables[tag]
- if ttf.flavor == "woff2":
- # WOFF2 doesn't store table checksums, so they must be calculated
- from fontTools.ttLib.sfnt import calcChecksum
-
- data = entry.loadData(reader.transformBuffer)
- checkSum = calcChecksum(data)
- else:
- checkSum = int(entry.checkSum)
- if checkSum < 0:
- checkSum = checkSum + 0x100000000
- checksum = "0x%08X" % checkSum
- print(format % (tag, checksum, entry.length, entry.offset))
- print()
- ttf.close()
-
-
-@Timer(log, "Done dumping TTX in %(time).3f seconds")
-def ttDump(input, output, options):
- input_name = input
- if input == "-":
- input, input_name = sys.stdin.buffer, sys.stdin.name
- output_name = output
- if output == "-":
- output, output_name = sys.stdout, sys.stdout.name
- log.info('Dumping "%s" to "%s"...', input_name, output_name)
- if options.unicodedata:
- setUnicodeData(options.unicodedata)
- ttf = TTFont(
- input,
- 0,
- ignoreDecompileErrors=options.ignoreDecompileErrors,
- fontNumber=options.fontNumber,
- )
- ttf.saveXML(
- output,
- tables=options.onlyTables,
- skipTables=options.skipTables,
- splitTables=options.splitTables,
- splitGlyphs=options.splitGlyphs,
- disassembleInstructions=options.disassembleInstructions,
- bitmapGlyphDataFormat=options.bitmapGlyphDataFormat,
- newlinestr=options.newlinestr,
- )
- ttf.close()
-
-
-@Timer(log, "Done compiling TTX in %(time).3f seconds")
-def ttCompile(input, output, options):
- input_name = input
- if input == "-":
- input, input_name = sys.stdin, sys.stdin.name
- output_name = output
- if output == "-":
- output, output_name = sys.stdout.buffer, sys.stdout.name
- log.info('Compiling "%s" to "%s"...' % (input_name, output))
- if options.useZopfli:
- from fontTools.ttLib import sfnt
-
- sfnt.USE_ZOPFLI = True
- ttf = TTFont(
- options.mergeFile,
- flavor=options.flavor,
- recalcBBoxes=options.recalcBBoxes,
- recalcTimestamp=options.recalcTimestamp,
- )
- ttf.importXML(input)
-
- if options.recalcTimestamp is None and "head" in ttf and input is not sys.stdin:
- # use TTX file modification time for head "modified" timestamp
- mtime = os.path.getmtime(input)
- ttf["head"].modified = timestampSinceEpoch(mtime)
-
- ttf.save(output)
-
-
-def guessFileType(fileName):
- if fileName == "-":
- header = sys.stdin.buffer.peek(256)
- ext = ""
- else:
- base, ext = os.path.splitext(fileName)
- try:
- with open(fileName, "rb") as f:
- header = f.read(256)
- except IOError:
- return None
-
- if header.startswith(b"\xef\xbb\xbf ",
- id: '8'
- }
-]
-
-export const GreetMessages = [
- '谢谢你! 知道你什么时候准备好继续前进总是很有帮助的。我现在能为你回答什么问题?',
- '重新开始总是很棒。问我任何问题!',
- '当然,我很乐意重新开始。我现在可以为你提供哪些帮助?',
- '当然,我已准备好进行新的挑战。我现在可以为你做什么?',
- '很好,让我们来更改主题。你在想什么?',
- '不用担心,我很高兴尝试一些新内容。我现在可以为你回答什么问题?',
- '好的,我准备好了!感谢重置。我们应该了解哪些内容?',
- '感谢刷新!你有新的话题吗?',
- '明白了,让我们重新开始。接下来应该讨论什么?',
- '下一步!我可以为你做什么?',
- '好的,我已准备好新话题。我们应该一起了解哪些内容?'
-]
-
-export const bingConversationStyleAtom = atomWithStorage('bingConversationStyle', BingConversationStyle.Creative, undefined, { unstable_getOnInit: true })
-export const voiceAtom = atomWithStorage('enableTTS', false, undefined, { unstable_getOnInit: true })
-
-type Param = { botId: BotId; page: string }
-
-const createBotInstance = () => {
- return new BingWebBot({
- cookie: ' ',
- ua: ' ',
- })
-}
-
-export const chatFamily = atomFamily(
- (param: Param) => {
- return atomWithImmer({
- botId: param.botId,
- bot: createBotInstance(),
- messages: [] as ChatMessageModel[],
- generatingMessageId: '',
- abortController: undefined as AbortController | undefined,
- conversationId: nanoid(),
- })
- },
- (a, b) => a.botId === b.botId && a.page === b.page,
-)
-
-export const hashAtom = atomWithHash('dialog', '')
-
-export const locationAtom = atomWithLocation()
-
-export const voiceListenAtom = atom(false)
diff --git a/spaces/limcheekin/zephyr-7B-beta-GGUF/README.md b/spaces/limcheekin/zephyr-7B-beta-GGUF/README.md
deleted file mode 100644
index b5281e646fdcf713e937ac755a4b4ea26dbf698c..0000000000000000000000000000000000000000
--- a/spaces/limcheekin/zephyr-7B-beta-GGUF/README.md
+++ /dev/null
@@ -1,20 +0,0 @@
----
-title: zephyr-7B-beta-GGUF (Q4_K_M)
-colorFrom: purple
-colorTo: blue
-sdk: docker
-models:
- - HuggingFaceH4/zephyr-7b-beta
- - TheBloke/zephyr-7B-beta-GGUF
-tags:
- - inference api
- - openai-api compatible
- - llama-cpp-python
- - zephyr-7B-beta-GGUF
- - gguf
-pinned: false
----
-
-# zephyr-7B-beta-GGUF (Q4_K_M)
-
-Please refer to the [index.html](index.html) for more information.
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/AUTODATA 8.55.0 Crack Serial Utorrent HOT.md b/spaces/lincquiQcaudo/Top-20-Diffusion/AUTODATA 8.55.0 Crack Serial Utorrent HOT.md
deleted file mode 100644
index 28633d56ec3bb2e9c1e3841fdbc985a491bcb7a6..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/AUTODATA 8.55.0 Crack Serial Utorrent HOT.md
+++ /dev/null
@@ -1,8 +0,0 @@
-AUTODATA 8.55.0 Crack Serial utorrent Download File ✔ https://bytlly.com/2uGymH
-
-March 4, 2015 - PES 2013 Editor V 2.0. Version 2.0 was released a few days ago.
-There are some changes in this version, including the ability to copy and pasting the PES 2013 characters.
-Bugs removed in version 2.0: - The bug where character names were disappearing when the player was on their own - The bug where character names were disappearing when a player was on a team - The bug where the player wasn't moving when the player was playing backwards - The bug where the player wasn't moving when the player was on his own - 8a78ff9644
-
-
-
diff --git a/spaces/lizhen30/LangChainGo/llms_cache_option.py b/spaces/lizhen30/LangChainGo/llms_cache_option.py
deleted file mode 100644
index 52d3774b065588d839839d78bc224311e20035cd..0000000000000000000000000000000000000000
--- a/spaces/lizhen30/LangChainGo/llms_cache_option.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from langchain.llms import OpenAI
-
-llm = OpenAI(model_name="text-davinci-002", n=2, best_of=2, cache=False)
-
-%%time
-llm("Tell me a joke")
\ No newline at end of file
diff --git a/spaces/lj1995/vocal2guitar/extract_f0_print.py b/spaces/lj1995/vocal2guitar/extract_f0_print.py
deleted file mode 100644
index 8efe8a4345b942bd93306c3491588ed7edcb6c80..0000000000000000000000000000000000000000
--- a/spaces/lj1995/vocal2guitar/extract_f0_print.py
+++ /dev/null
@@ -1,160 +0,0 @@
-import os, traceback, sys, parselmouth
-
-now_dir = os.getcwd()
-sys.path.append(now_dir)
-from my_utils import load_audio
-import pyworld
-from scipy.io import wavfile
-import numpy as np, logging
-
-logging.getLogger("numba").setLevel(logging.WARNING)
-from multiprocessing import Process
-
-exp_dir = sys.argv[1]
-f = open("%s/extract_f0_feature.log" % exp_dir, "a+")
-
-
-def printt(strr):
- print(strr)
- f.write("%s\n" % strr)
- f.flush()
-
-
-n_p = int(sys.argv[2])
-f0method = sys.argv[3]
-
-
-class FeatureInput(object):
- def __init__(self, samplerate=16000, hop_size=160):
- self.fs = samplerate
- self.hop = hop_size
-
- self.f0_bin = 256
- self.f0_max = 1100.0
- self.f0_min = 50.0
- self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
- self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
-
- def compute_f0(self, path, f0_method):
- x = load_audio(path, self.fs)
- p_len = x.shape[0] // self.hop
- if f0_method == "pm":
- time_step = 160 / 16000 * 1000
- f0_min = 50
- f0_max = 1100
- f0 = (
- parselmouth.Sound(x, self.fs)
- .to_pitch_ac(
- time_step=time_step / 1000,
- voicing_threshold=0.6,
- pitch_floor=f0_min,
- pitch_ceiling=f0_max,
- )
- .selected_array["frequency"]
- )
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(
- f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
- )
- elif f0_method == "harvest":
- f0, t = pyworld.harvest(
- x.astype(np.double),
- fs=self.fs,
- f0_ceil=self.f0_max,
- f0_floor=self.f0_min,
- frame_period=1000 * self.hop / self.fs,
- )
- f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.fs)
- elif f0_method == "dio":
- f0, t = pyworld.dio(
- x.astype(np.double),
- fs=self.fs,
- f0_ceil=self.f0_max,
- f0_floor=self.f0_min,
- frame_period=1000 * self.hop / self.fs,
- )
- f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.fs)
- return f0
-
- def coarse_f0(self, f0):
- f0_mel = 1127 * np.log(1 + f0 / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * (
- self.f0_bin - 2
- ) / (self.f0_mel_max - self.f0_mel_min) + 1
-
- # use 0 or 1
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > self.f0_bin - 1] = self.f0_bin - 1
- f0_coarse = np.rint(f0_mel).astype(int)
- assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (
- f0_coarse.max(),
- f0_coarse.min(),
- )
- return f0_coarse
-
- def go(self, paths, f0_method):
- if len(paths) == 0:
- printt("no-f0-todo")
- else:
- printt("todo-f0-%s" % len(paths))
- n = max(len(paths) // 5, 1) # 每个进程最多打印5条
- for idx, (inp_path, opt_path1, opt_path2) in enumerate(paths):
- try:
- if idx % n == 0:
- printt("f0ing,now-%s,all-%s,-%s" % (idx, len(paths), inp_path))
- if (
- os.path.exists(opt_path1 + ".npy") == True
- and os.path.exists(opt_path2 + ".npy") == True
- ):
- continue
- featur_pit = self.compute_f0(inp_path, f0_method)
- np.save(
- opt_path2,
- featur_pit,
- allow_pickle=False,
- ) # nsf
- coarse_pit = self.coarse_f0(featur_pit)
- np.save(
- opt_path1,
- coarse_pit,
- allow_pickle=False,
- ) # ori
- except:
- printt("f0fail-%s-%s-%s" % (idx, inp_path, traceback.format_exc()))
-
-
-if __name__ == "__main__":
- # exp_dir=r"E:\codes\py39\dataset\mi-test"
- # n_p=16
- # f = open("%s/log_extract_f0.log"%exp_dir, "w")
- printt(sys.argv)
- featureInput = FeatureInput()
- paths = []
- inp_root = "%s/1_16k_wavs" % (exp_dir)
- opt_root1 = "%s/2a_f0" % (exp_dir)
- opt_root2 = "%s/2b-f0nsf" % (exp_dir)
-
- os.makedirs(opt_root1, exist_ok=True)
- os.makedirs(opt_root2, exist_ok=True)
- for name in sorted(list(os.listdir(inp_root))):
- inp_path = "%s/%s" % (inp_root, name)
- if "spec" in inp_path:
- continue
- opt_path1 = "%s/%s" % (opt_root1, name)
- opt_path2 = "%s/%s" % (opt_root2, name)
- paths.append([inp_path, opt_path1, opt_path2])
-
- ps = []
- for i in range(n_p):
- p = Process(
- target=featureInput.go,
- args=(
- paths[i::n_p],
- f0method,
- ),
- )
- ps.append(p)
- p.start()
- for i in range(n_p):
- ps[i].join()
diff --git a/spaces/lkeab/transfiner/demo/predictor.py b/spaces/lkeab/transfiner/demo/predictor.py
deleted file mode 100644
index 7b7ebd3f846850172c1f560f8492d51e5667f76d..0000000000000000000000000000000000000000
--- a/spaces/lkeab/transfiner/demo/predictor.py
+++ /dev/null
@@ -1,220 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import atexit
-import bisect
-import multiprocessing as mp
-from collections import deque
-import cv2
-import torch
-
-from detectron2.data import MetadataCatalog
-from detectron2.engine.defaults import DefaultPredictor
-from detectron2.utils.video_visualizer import VideoVisualizer
-from detectron2.utils.visualizer import ColorMode, Visualizer
-
-
-class VisualizationDemo(object):
- def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
- """
- Args:
- cfg (CfgNode):
- instance_mode (ColorMode):
- parallel (bool): whether to run the model in different processes from visualization.
- Useful since the visualization logic can be slow.
- """
- self.metadata = MetadataCatalog.get(
- cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
- )
- self.cpu_device = torch.device("cpu")
- self.instance_mode = instance_mode
-
- self.parallel = parallel
- if parallel:
- num_gpu = torch.cuda.device_count()
- self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
- else:
- self.predictor = DefaultPredictor(cfg)
-
- def run_on_image(self, image):
- """
- Args:
- image (np.ndarray): an image of shape (H, W, C) (in BGR order).
- This is the format used by OpenCV.
-
- Returns:
- predictions (dict): the output of the model.
- vis_output (VisImage): the visualized image output.
- """
- vis_output = None
- predictions = self.predictor(image)
- # Convert image from OpenCV BGR format to Matplotlib RGB format.
- image = image[:, :, ::-1]
- visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
- if "panoptic_seg" in predictions:
- panoptic_seg, segments_info = predictions["panoptic_seg"]
- vis_output = visualizer.draw_panoptic_seg_predictions(
- panoptic_seg.to(self.cpu_device), segments_info
- )
- else:
- if "sem_seg" in predictions:
- vis_output = visualizer.draw_sem_seg(
- predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
- )
- if "instances" in predictions:
- instances = predictions["instances"].to(self.cpu_device)
- vis_output = visualizer.draw_instance_predictions(predictions=instances)
-
- return predictions, vis_output
-
- def _frame_from_video(self, video):
- while video.isOpened():
- success, frame = video.read()
- if success:
- yield frame
- else:
- break
-
- def run_on_video(self, video):
- """
- Visualizes predictions on frames of the input video.
-
- Args:
- video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
- either a webcam or a video file.
-
- Yields:
- ndarray: BGR visualizations of each video frame.
- """
- video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
-
- def process_predictions(frame, predictions):
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
- if "panoptic_seg" in predictions:
- panoptic_seg, segments_info = predictions["panoptic_seg"]
- vis_frame = video_visualizer.draw_panoptic_seg_predictions(
- frame, panoptic_seg.to(self.cpu_device), segments_info
- )
- elif "instances" in predictions:
- predictions = predictions["instances"].to(self.cpu_device)
- vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
- elif "sem_seg" in predictions:
- vis_frame = video_visualizer.draw_sem_seg(
- frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
- )
-
- # Converts Matplotlib RGB format to OpenCV BGR format
- vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
- return vis_frame
-
- frame_gen = self._frame_from_video(video)
- if self.parallel:
- buffer_size = self.predictor.default_buffer_size
-
- frame_data = deque()
-
- for cnt, frame in enumerate(frame_gen):
- frame_data.append(frame)
- self.predictor.put(frame)
-
- if cnt >= buffer_size:
- frame = frame_data.popleft()
- predictions = self.predictor.get()
- yield process_predictions(frame, predictions)
-
- while len(frame_data):
- frame = frame_data.popleft()
- predictions = self.predictor.get()
- yield process_predictions(frame, predictions)
- else:
- for frame in frame_gen:
- yield process_predictions(frame, self.predictor(frame))
-
-
-class AsyncPredictor:
- """
- A predictor that runs the model asynchronously, possibly on >1 GPUs.
- Because rendering the visualization takes considerably amount of time,
- this helps improve throughput a little bit when rendering videos.
- """
-
- class _StopToken:
- pass
-
- class _PredictWorker(mp.Process):
- def __init__(self, cfg, task_queue, result_queue):
- self.cfg = cfg
- self.task_queue = task_queue
- self.result_queue = result_queue
- super().__init__()
-
- def run(self):
- predictor = DefaultPredictor(self.cfg)
-
- while True:
- task = self.task_queue.get()
- if isinstance(task, AsyncPredictor._StopToken):
- break
- idx, data = task
- result = predictor(data)
- self.result_queue.put((idx, result))
-
- def __init__(self, cfg, num_gpus: int = 1):
- """
- Args:
- cfg (CfgNode):
- num_gpus (int): if 0, will run on CPU
- """
- num_workers = max(num_gpus, 1)
- self.task_queue = mp.Queue(maxsize=num_workers * 3)
- self.result_queue = mp.Queue(maxsize=num_workers * 3)
- self.procs = []
- for gpuid in range(max(num_gpus, 1)):
- cfg = cfg.clone()
- cfg.defrost()
- cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
- self.procs.append(
- AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
- )
-
- self.put_idx = 0
- self.get_idx = 0
- self.result_rank = []
- self.result_data = []
-
- for p in self.procs:
- p.start()
- atexit.register(self.shutdown)
-
- def put(self, image):
- self.put_idx += 1
- self.task_queue.put((self.put_idx, image))
-
- def get(self):
- self.get_idx += 1 # the index needed for this request
- if len(self.result_rank) and self.result_rank[0] == self.get_idx:
- res = self.result_data[0]
- del self.result_data[0], self.result_rank[0]
- return res
-
- while True:
- # make sure the results are returned in the correct order
- idx, res = self.result_queue.get()
- if idx == self.get_idx:
- return res
- insert = bisect.bisect(self.result_rank, idx)
- self.result_rank.insert(insert, idx)
- self.result_data.insert(insert, res)
-
- def __len__(self):
- return self.put_idx - self.get_idx
-
- def __call__(self, image):
- self.put(image)
- return self.get()
-
- def shutdown(self):
- for _ in self.procs:
- self.task_queue.put(AsyncPredictor._StopToken())
-
- @property
- def default_buffer_size(self):
- return len(self.procs) * 5
diff --git a/spaces/lmz/candle-llama2/llama2cWorker.js b/spaces/lmz/candle-llama2/llama2cWorker.js
deleted file mode 100644
index a46b5bc8ea7d2724e99be19e0f0db27a2c37fc1e..0000000000000000000000000000000000000000
--- a/spaces/lmz/candle-llama2/llama2cWorker.js
+++ /dev/null
@@ -1,113 +0,0 @@
-import init, { Model } from "./build/m.js";
-
-async function fetchArrayBuffer(url) {
- const cacheName = "llama2c-candle-cache";
- const cache = await caches.open(cacheName);
- const cachedResponse = await cache.match(url);
- if (cachedResponse) {
- const data = await cachedResponse.arrayBuffer();
- return new Uint8Array(data);
- }
- const res = await fetch(url, { cache: "force-cache" });
- cache.put(url, res.clone());
- return new Uint8Array(await res.arrayBuffer());
-}
-class Llama2C {
- static instance = {};
-
- static async getInstance(weightsURL, modelID, tokenizerURL) {
- // load individual modelID only once
- if (!this.instance[modelID]) {
- await init();
-
- self.postMessage({ status: "loading", message: "Loading Model" });
-
- const [weightsArrayU8, tokenizerArrayU8] = await Promise.all([
- fetchArrayBuffer(weightsURL),
- fetchArrayBuffer(tokenizerURL),
- ]);
-
- this.instance[modelID] = new Model(weightsArrayU8, tokenizerArrayU8);
- }
- return this.instance[modelID];
- }
-}
-
-let controller = null;
-self.addEventListener("message", (event) => {
- if (event.data.command === "start") {
- controller = new AbortController();
- generate(event.data);
- } else if (event.data.command === "abort") {
- controller.abort();
- }
-});
-
-async function generate(data) {
- const {
- weightsURL,
- modelID,
- tokenizerURL,
- prompt,
- temp,
- top_p,
- repeatPenalty,
- seed,
- maxSeqLen,
- } = data;
- try {
- self.postMessage({ status: "loading", message: "Starting llama2.c" });
- const model = await Llama2C.getInstance(weightsURL, modelID, tokenizerURL);
-
- self.postMessage({ status: "loading", message: "Initializing model" });
- const firstToken = model.init_with_prompt(
- prompt,
- temp,
- top_p,
- repeatPenalty,
- seed
- );
-
- const seq_len = model.get_seq_len();
-
- let sentence = firstToken;
- let maxTokens = maxSeqLen ? maxSeqLen : seq_len - prompt.length - 1;
- let startTime = performance.now();
- let tokensCount = 0;
- while (tokensCount < maxTokens) {
- await new Promise(async (resolve) => {
- if (controller && controller.signal.aborted) {
- self.postMessage({
- status: "aborted",
- message: "Aborted",
- output: prompt + sentence,
- });
- return;
- }
- const token = await model.next_token();
- const tokensSec =
- ((tokensCount + 1) / (performance.now() - startTime)) * 1000;
-
- sentence += token;
- self.postMessage({
- status: "generating",
- message: "Generating token",
- token: token,
- sentence: sentence,
- totalTime: performance.now() - startTime,
- tokensSec,
- prompt: prompt,
- });
- setTimeout(resolve, 0);
- });
- tokensCount++;
- }
- self.postMessage({
- status: "complete",
- message: "complete",
- output: prompt + sentence,
- });
- } catch (e) {
- self.postMessage({ error: e });
- }
-}
diff --git a/spaces/ltgoslo/ssa-perin/data/dataset.py b/spaces/ltgoslo/ssa-perin/data/dataset.py
deleted file mode 100644
index 790f4c024a488ad6445668037c0d03a78002af3a..0000000000000000000000000000000000000000
--- a/spaces/ltgoslo/ssa-perin/data/dataset.py
+++ /dev/null
@@ -1,245 +0,0 @@
-#!/usr/bin/env python3
-# coding=utf-8
-
-import pickle
-
-import torch
-
-from data.parser.from_mrp.node_centric_parser import NodeCentricParser
-from data.parser.from_mrp.labeled_edge_parser import LabeledEdgeParser
-from data.parser.from_mrp.sequential_parser import SequentialParser
-from data.parser.from_mrp.evaluation_parser import EvaluationParser
-from data.parser.from_mrp.request_parser import RequestParser
-from data.field.edge_field import EdgeField
-from data.field.edge_label_field import EdgeLabelField
-from data.field.field import Field
-from data.field.mini_torchtext.field import Field as TorchTextField
-from data.field.label_field import LabelField
-from data.field.anchored_label_field import AnchoredLabelField
-from data.field.nested_field import NestedField
-from data.field.basic_field import BasicField
-from data.field.bert_field import BertField
-from data.field.anchor_field import AnchorField
-from data.batch import Batch
-
-
-def char_tokenize(word):
- return [c for i, c in enumerate(word)] # if i < 10 or len(word) - i <= 10]
-
-
-class Collate:
- def __call__(self, batch):
- batch.sort(key=lambda example: example["every_input"][0].size(0), reverse=True)
- return Batch.build(batch)
-
-
-class Dataset:
- def __init__(self, args, verbose=True):
- self.verbose = verbose
- self.sos, self.eos, self.pad, self.unk = "", "", "", ""
-
- self.bert_input_field = BertField()
- self.scatter_field = BasicField()
- self.every_word_input_field = Field(lower=True, init_token=self.sos, eos_token=self.eos, batch_first=True, include_lengths=True)
-
- char_form_nesting = TorchTextField(tokenize=char_tokenize, init_token=self.sos, eos_token=self.eos, batch_first=True)
- self.char_form_field = NestedField(char_form_nesting, include_lengths=True)
-
- self.label_field = LabelField(preprocessing=lambda nodes: [n["label"] for n in nodes])
- self.anchored_label_field = AnchoredLabelField()
-
- self.id_field = Field(batch_first=True, tokenize=lambda x: [x])
- self.edge_presence_field = EdgeField()
- self.edge_label_field = EdgeLabelField()
- self.anchor_field = AnchorField()
- self.source_anchor_field = AnchorField()
- self.target_anchor_field = AnchorField()
- self.token_interval_field = BasicField()
-
- self.load_dataset(args)
-
- def log(self, text):
- if not self.verbose:
- return
- print(text, flush=True)
-
- def load_state_dict(self, args, d):
- for key, value in d["vocabs"].items():
- getattr(self, key).vocab = pickle.loads(value)
-
- def state_dict(self):
- return {
- "vocabs": {key: pickle.dumps(value.vocab) for key, value in self.__dict__.items() if hasattr(value, "vocab")}
- }
-
- def load_sentences(self, sentences, args):
- dataset = RequestParser(
- sentences, args,
- fields={
- "input": [("every_input", self.every_word_input_field), ("char_form_input", self.char_form_field)],
- "bert input": ("input", self.bert_input_field),
- "to scatter": ("input_scatter", self.scatter_field),
- "token anchors": ("token_intervals", self.token_interval_field),
- "id": ("id", self.id_field),
- },
- )
-
- self.every_word_input_field.build_vocab(dataset, min_freq=1, specials=[self.pad, self.unk, self.sos, self.eos])
- self.id_field.build_vocab(dataset, min_freq=1, specials=[])
-
- return torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, collate_fn=Collate())
-
- def load_dataset(self, args):
- parser = {
- "sequential": SequentialParser,
- "node-centric": NodeCentricParser,
- "labeled-edge": LabeledEdgeParser
- }[args.graph_mode]
-
- train = parser(
- args, "training",
- fields={
- "input": [("every_input", self.every_word_input_field), ("char_form_input", self.char_form_field)],
- "bert input": ("input", self.bert_input_field),
- "to scatter": ("input_scatter", self.scatter_field),
- "nodes": ("labels", self.label_field),
- "anchored labels": ("anchored_labels", self.anchored_label_field),
- "edge presence": ("edge_presence", self.edge_presence_field),
- "edge labels": ("edge_labels", self.edge_label_field),
- "anchor edges": ("anchor", self.anchor_field),
- "source anchor edges": ("source_anchor", self.source_anchor_field),
- "target anchor edges": ("target_anchor", self.target_anchor_field),
- "token anchors": ("token_intervals", self.token_interval_field),
- "id": ("id", self.id_field),
- },
- filter_pred=lambda example: len(example.input) <= 256,
- )
-
- val = parser(
- args, "validation",
- fields={
- "input": [("every_input", self.every_word_input_field), ("char_form_input", self.char_form_field)],
- "bert input": ("input", self.bert_input_field),
- "to scatter": ("input_scatter", self.scatter_field),
- "nodes": ("labels", self.label_field),
- "anchored labels": ("anchored_labels", self.anchored_label_field),
- "edge presence": ("edge_presence", self.edge_presence_field),
- "edge labels": ("edge_labels", self.edge_label_field),
- "anchor edges": ("anchor", self.anchor_field),
- "source anchor edges": ("source_anchor", self.source_anchor_field),
- "target anchor edges": ("target_anchor", self.target_anchor_field),
- "token anchors": ("token_intervals", self.token_interval_field),
- "id": ("id", self.id_field),
- },
- )
-
- test = EvaluationParser(
- args,
- fields={
- "input": [("every_input", self.every_word_input_field), ("char_form_input", self.char_form_field)],
- "bert input": ("input", self.bert_input_field),
- "to scatter": ("input_scatter", self.scatter_field),
- "token anchors": ("token_intervals", self.token_interval_field),
- "id": ("id", self.id_field),
- },
- )
-
- del train.data, val.data, test.data # TODO: why?
- for f in list(train.fields.values()) + list(val.fields.values()) + list(test.fields.values()): # TODO: why?
- if hasattr(f, "preprocessing"):
- del f.preprocessing
-
- self.train_size = len(train)
- self.val_size = len(val)
- self.test_size = len(test)
-
- self.log(f"\n{self.train_size} sentences in the train split")
- self.log(f"{self.val_size} sentences in the validation split")
- self.log(f"{self.test_size} sentences in the test split")
-
- self.node_count = train.node_counter
- self.token_count = train.input_count
- self.edge_count = train.edge_counter
- self.no_edge_count = train.no_edge_counter
- self.anchor_freq = train.anchor_freq
-
- self.source_anchor_freq = train.source_anchor_freq if hasattr(train, "source_anchor_freq") else 0.5
- self.target_anchor_freq = train.target_anchor_freq if hasattr(train, "target_anchor_freq") else 0.5
- self.log(f"{self.node_count} nodes in the train split")
-
- self.every_word_input_field.build_vocab(val, test, min_freq=1, specials=[self.pad, self.unk, self.sos, self.eos])
- self.char_form_field.build_vocab(train, min_freq=1, specials=[self.pad, self.unk, self.sos, self.eos])
- self.char_form_field.nesting_field.vocab = self.char_form_field.vocab
- self.id_field.build_vocab(train, val, test, min_freq=1, specials=[])
- self.label_field.build_vocab(train)
- self.anchored_label_field.vocab = self.label_field.vocab
- self.edge_label_field.build_vocab(train)
- print(list(self.edge_label_field.vocab.freqs.keys()), flush=True)
-
- self.char_form_vocab_size = len(self.char_form_field.vocab)
- self.create_label_freqs(args)
- self.create_edge_freqs(args)
-
- self.log(f"Edge frequency: {self.edge_presence_freq*100:.2f} %")
- self.log(f"{len(self.label_field.vocab)} words in the label vocabulary")
- self.log(f"{len(self.anchored_label_field.vocab)} words in the anchored label vocabulary")
- self.log(f"{len(self.edge_label_field.vocab)} words in the edge label vocabulary")
- self.log(f"{len(self.char_form_field.vocab)} characters in the vocabulary")
-
- self.log(self.label_field.vocab.freqs)
- self.log(self.anchored_label_field.vocab.freqs)
-
- self.train = torch.utils.data.DataLoader(
- train,
- batch_size=args.batch_size,
- shuffle=True,
- num_workers=args.workers,
- collate_fn=Collate(),
- pin_memory=True,
- drop_last=True
- )
- self.train_size = len(self.train.dataset)
-
- self.val = torch.utils.data.DataLoader(
- val,
- batch_size=args.batch_size,
- shuffle=False,
- num_workers=args.workers,
- collate_fn=Collate(),
- pin_memory=True,
- )
- self.val_size = len(self.val.dataset)
-
- self.test = torch.utils.data.DataLoader(
- test,
- batch_size=args.batch_size,
- shuffle=False,
- num_workers=args.workers,
- collate_fn=Collate(),
- pin_memory=True,
- )
- self.test_size = len(self.test.dataset)
-
- if self.verbose:
- batch = next(iter(self.train))
- print(f"\nBatch content: {Batch.to_str(batch)}\n")
- print(flush=True)
-
- def create_label_freqs(self, args):
- n_rules = len(self.label_field.vocab)
- blank_count = (args.query_length * self.token_count - self.node_count)
- label_counts = [blank_count] + [
- self.label_field.vocab.freqs[self.label_field.vocab.itos[i]]
- for i in range(n_rules)
- ]
- label_counts = torch.FloatTensor(label_counts)
- self.label_freqs = label_counts / (self.node_count + blank_count)
- self.log(f"Label frequency: {self.label_freqs}")
-
- def create_edge_freqs(self, args):
- edge_counter = [
- self.edge_label_field.vocab.freqs[self.edge_label_field.vocab.itos[i]] for i in range(len(self.edge_label_field.vocab))
- ]
- edge_counter = torch.FloatTensor(edge_counter)
- self.edge_label_freqs = edge_counter / self.edge_count
- self.edge_presence_freq = self.edge_count / (self.edge_count + self.no_edge_count)
diff --git a/spaces/lucinnerieux23/kotkindjn/README.md b/spaces/lucinnerieux23/kotkindjn/README.md
deleted file mode 100644
index f001e7058fef619e5459afb2de94ec8ff3aae00d..0000000000000000000000000000000000000000
--- a/spaces/lucinnerieux23/kotkindjn/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Kotkindjn
-emoji: 🐨
-colorFrom: indigo
-colorTo: indigo
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ma-xu/LIVE/thrust/thrust/iterator/detail/normal_iterator.h b/spaces/ma-xu/LIVE/thrust/thrust/iterator/detail/normal_iterator.h
deleted file mode 100644
index 0f6e1660e8f4692b08bca7af2a971c3e7cf554e1..0000000000000000000000000000000000000000
--- a/spaces/ma-xu/LIVE/thrust/thrust/iterator/detail/normal_iterator.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-/*! \file normal_iterator.h
- * \brief Defines the interface to an iterator class
- * which adapts a pointer type.
- */
-
-#pragma once
-
-#include
-#include
-#include
-
-namespace thrust
-{
-namespace detail
-{
-
-
-template
- class normal_iterator
- : public iterator_adaptor<
- normal_iterator,
- Pointer
- >
-{
- typedef iterator_adaptor, Pointer> super_t;
-
- public:
- __host__ __device__
- normal_iterator() {}
-
- __host__ __device__
- normal_iterator(Pointer p)
- : super_t(p) {}
-
- template
- __host__ __device__
- normal_iterator(const normal_iterator &other,
- typename thrust::detail::enable_if_convertible<
- OtherPointer,
- Pointer
- >::type * = 0)
- : super_t(other.base()) {}
-
-}; // end normal_iterator
-
-
-template
- inline __host__ __device__ normal_iterator make_normal_iterator(Pointer ptr)
-{
- return normal_iterator(ptr);
-}
-
-} // end detail
-
-template
-struct proclaim_contiguous_iterator<
- thrust::detail::normal_iterator
-> : true_type {};
-
-} // end thrust
-
diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/remove.h b/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/remove.h
deleted file mode 100644
index 48de522dfdc5b1f6e0e274eb31f98d352943fccd..0000000000000000000000000000000000000000
--- a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/remove.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-/*! \file remove.h
- * \brief Sequential implementations of remove functions.
- */
-
-#pragma once
-
-#include
-#include
-#include
-
-namespace thrust
-{
-namespace system
-{
-namespace detail
-{
-namespace sequential
-{
-
-
-__thrust_exec_check_disable__
-template
-__host__ __device__
- ForwardIterator remove_if(sequential::execution_policy &,
- ForwardIterator first,
- ForwardIterator last,
- Predicate pred)
-{
- // wrap pred
- thrust::detail::wrapped_function<
- Predicate,
- bool
- > wrapped_pred(pred);
-
- // advance iterators until wrapped_pred(*first) is true or we reach the end of input
- while(first != last && !wrapped_pred(*first))
- ++first;
-
- if(first == last)
- return first;
-
- // result always trails first
- ForwardIterator result = first;
-
- ++first;
-
- while(first != last)
- {
- if(!wrapped_pred(*first))
- {
- *result = *first;
- ++result;
- }
- ++first;
- }
-
- return result;
-}
-
-
-__thrust_exec_check_disable__
-template
-__host__ __device__
- ForwardIterator remove_if(sequential::execution_policy &,
- ForwardIterator first,
- ForwardIterator last,
- InputIterator stencil,
- Predicate pred)
-{
- // wrap pred
- thrust::detail::wrapped_function<
- Predicate,
- bool
- > wrapped_pred(pred);
-
- // advance iterators until wrapped_pred(*stencil) is true or we reach the end of input
- while(first != last && !wrapped_pred(*stencil))
- {
- ++first;
- ++stencil;
- }
-
- if(first == last)
- return first;
-
- // result always trails first
- ForwardIterator result = first;
-
- ++first;
- ++stencil;
-
- while(first != last)
- {
- if(!wrapped_pred(*stencil))
- {
- *result = *first;
- ++result;
- }
- ++first;
- ++stencil;
- }
-
- return result;
-}
-
-
-__thrust_exec_check_disable__
-template
-__host__ __device__
- OutputIterator remove_copy_if(sequential::execution_policy &,
- InputIterator first,
- InputIterator last,
- OutputIterator result,
- Predicate pred)
-{
- // wrap pred
- thrust::detail::wrapped_function<
- Predicate,
- bool
- > wrapped_pred(pred);
-
- while (first != last)
- {
- if (!wrapped_pred(*first))
- {
- *result = *first;
- ++result;
- }
-
- ++first;
- }
-
- return result;
-}
-
-
-__thrust_exec_check_disable__
-template
-__host__ __device__
- OutputIterator remove_copy_if(sequential::execution_policy &,
- InputIterator1 first,
- InputIterator1 last,
- InputIterator2 stencil,
- OutputIterator result,
- Predicate pred)
-{
- // wrap pred
- thrust::detail::wrapped_function<
- Predicate,
- bool
- > wrapped_pred(pred);
-
- while (first != last)
- {
- if (!wrapped_pred(*stencil))
- {
- *result = *first;
- ++result;
- }
-
- ++first;
- ++stencil;
- }
-
- return result;
-}
-
-
-} // end namespace sequential
-} // end namespace detail
-} // end namespace system
-} // end namespace thrust
-
diff --git a/spaces/manhkhanhUIT/BOPBTL/Global/detection_models/sync_batchnorm/batchnorm.py b/spaces/manhkhanhUIT/BOPBTL/Global/detection_models/sync_batchnorm/batchnorm.py
deleted file mode 100644
index bf8d7a7325b474771a11a137053971fd40426079..0000000000000000000000000000000000000000
--- a/spaces/manhkhanhUIT/BOPBTL/Global/detection_models/sync_batchnorm/batchnorm.py
+++ /dev/null
@@ -1,412 +0,0 @@
-# -*- coding: utf-8 -*-
-# File : batchnorm.py
-# Author : Jiayuan Mao
-# Email : maojiayuan@gmail.com
-# Date : 27/01/2018
-#
-# This file is part of Synchronized-BatchNorm-PyTorch.
-# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
-# Distributed under MIT License.
-
-import collections
-import contextlib
-
-import torch
-import torch.nn.functional as F
-
-from torch.nn.modules.batchnorm import _BatchNorm
-
-try:
- from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
-except ImportError:
- ReduceAddCoalesced = Broadcast = None
-
-try:
- from jactorch.parallel.comm import SyncMaster
- from jactorch.parallel.data_parallel import JacDataParallel as DataParallelWithCallback
-except ImportError:
- from .comm import SyncMaster
- from .replicate import DataParallelWithCallback
-
-__all__ = [
- 'set_sbn_eps_mode',
- 'SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d',
- 'patch_sync_batchnorm', 'convert_model'
-]
-
-
-SBN_EPS_MODE = 'clamp'
-
-
-def set_sbn_eps_mode(mode):
- global SBN_EPS_MODE
- assert mode in ('clamp', 'plus')
- SBN_EPS_MODE = mode
-
-
-def _sum_ft(tensor):
- """sum over the first and last dimention"""
- return tensor.sum(dim=0).sum(dim=-1)
-
-
-def _unsqueeze_ft(tensor):
- """add new dimensions at the front and the tail"""
- return tensor.unsqueeze(0).unsqueeze(-1)
-
-
-_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
-_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
-
-
-class _SynchronizedBatchNorm(_BatchNorm):
- def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True):
- assert ReduceAddCoalesced is not None, 'Can not use Synchronized Batch Normalization without CUDA support.'
-
- super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine,
- track_running_stats=track_running_stats)
-
- if not self.track_running_stats:
- import warnings
- warnings.warn('track_running_stats=False is not supported by the SynchronizedBatchNorm.')
-
- self._sync_master = SyncMaster(self._data_parallel_master)
-
- self._is_parallel = False
- self._parallel_id = None
- self._slave_pipe = None
-
- def forward(self, input):
- # If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
- if not (self._is_parallel and self.training):
- return F.batch_norm(
- input, self.running_mean, self.running_var, self.weight, self.bias,
- self.training, self.momentum, self.eps)
-
- # Resize the input to (B, C, -1).
- input_shape = input.size()
- assert input.size(1) == self.num_features, 'Channel size mismatch: got {}, expect {}.'.format(input.size(1), self.num_features)
- input = input.view(input.size(0), self.num_features, -1)
-
- # Compute the sum and square-sum.
- sum_size = input.size(0) * input.size(2)
- input_sum = _sum_ft(input)
- input_ssum = _sum_ft(input ** 2)
-
- # Reduce-and-broadcast the statistics.
- if self._parallel_id == 0:
- mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
- else:
- mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
-
- # Compute the output.
- if self.affine:
- # MJY:: Fuse the multiplication for speed.
- output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
- else:
- output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
-
- # Reshape it.
- return output.view(input_shape)
-
- def __data_parallel_replicate__(self, ctx, copy_id):
- self._is_parallel = True
- self._parallel_id = copy_id
-
- # parallel_id == 0 means master device.
- if self._parallel_id == 0:
- ctx.sync_master = self._sync_master
- else:
- self._slave_pipe = ctx.sync_master.register_slave(copy_id)
-
- def _data_parallel_master(self, intermediates):
- """Reduce the sum and square-sum, compute the statistics, and broadcast it."""
-
- # Always using same "device order" makes the ReduceAdd operation faster.
- # Thanks to:: Tete Xiao (http://tetexiao.com/)
- intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
-
- to_reduce = [i[1][:2] for i in intermediates]
- to_reduce = [j for i in to_reduce for j in i] # flatten
- target_gpus = [i[1].sum.get_device() for i in intermediates]
-
- sum_size = sum([i[1].sum_size for i in intermediates])
- sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
- mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
-
- broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
-
- outputs = []
- for i, rec in enumerate(intermediates):
- outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2])))
-
- return outputs
-
- def _compute_mean_std(self, sum_, ssum, size):
- """Compute the mean and standard-deviation with sum and square-sum. This method
- also maintains the moving average on the master device."""
- assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
- mean = sum_ / size
- sumvar = ssum - sum_ * mean
- unbias_var = sumvar / (size - 1)
- bias_var = sumvar / size
-
- if hasattr(torch, 'no_grad'):
- with torch.no_grad():
- self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
- self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
- else:
- self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
- self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
-
- if SBN_EPS_MODE == 'clamp':
- return mean, bias_var.clamp(self.eps) ** -0.5
- elif SBN_EPS_MODE == 'plus':
- return mean, (bias_var + self.eps) ** -0.5
- else:
- raise ValueError('Unknown EPS mode: {}.'.format(SBN_EPS_MODE))
-
-
-class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
- r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
- mini-batch.
-
- .. math::
-
- y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
-
- This module differs from the built-in PyTorch BatchNorm1d as the mean and
- standard-deviation are reduced across all devices during training.
-
- For example, when one uses `nn.DataParallel` to wrap the network during
- training, PyTorch's implementation normalize the tensor on each device using
- the statistics only on that device, which accelerated the computation and
- is also easy to implement, but the statistics might be inaccurate.
- Instead, in this synchronized version, the statistics will be computed
- over all training samples distributed on multiple devices.
-
- Note that, for one-GPU or CPU-only case, this module behaves exactly same
- as the built-in PyTorch implementation.
-
- The mean and standard-deviation are calculated per-dimension over
- the mini-batches and gamma and beta are learnable parameter vectors
- of size C (where C is the input size).
-
- During training, this layer keeps a running estimate of its computed mean
- and variance. The running sum is kept with a default momentum of 0.1.
-
- During evaluation, this running mean/variance is used for normalization.
-
- Because the BatchNorm is done over the `C` dimension, computing statistics
- on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
-
- Args:
- num_features: num_features from an expected input of size
- `batch_size x num_features [x width]`
- eps: a value added to the denominator for numerical stability.
- Default: 1e-5
- momentum: the value used for the running_mean and running_var
- computation. Default: 0.1
- affine: a boolean value that when set to ``True``, gives the layer learnable
- affine parameters. Default: ``True``
-
- Shape::
- - Input: :math:`(N, C)` or :math:`(N, C, L)`
- - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
-
- Examples:
- >>> # With Learnable Parameters
- >>> m = SynchronizedBatchNorm1d(100)
- >>> # Without Learnable Parameters
- >>> m = SynchronizedBatchNorm1d(100, affine=False)
- >>> input = torch.autograd.Variable(torch.randn(20, 100))
- >>> output = m(input)
- """
-
- def _check_input_dim(self, input):
- if input.dim() != 2 and input.dim() != 3:
- raise ValueError('expected 2D or 3D input (got {}D input)'
- .format(input.dim()))
-
-
-class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
- r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
- of 3d inputs
-
- .. math::
-
- y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
-
- This module differs from the built-in PyTorch BatchNorm2d as the mean and
- standard-deviation are reduced across all devices during training.
-
- For example, when one uses `nn.DataParallel` to wrap the network during
- training, PyTorch's implementation normalize the tensor on each device using
- the statistics only on that device, which accelerated the computation and
- is also easy to implement, but the statistics might be inaccurate.
- Instead, in this synchronized version, the statistics will be computed
- over all training samples distributed on multiple devices.
-
- Note that, for one-GPU or CPU-only case, this module behaves exactly same
- as the built-in PyTorch implementation.
-
- The mean and standard-deviation are calculated per-dimension over
- the mini-batches and gamma and beta are learnable parameter vectors
- of size C (where C is the input size).
-
- During training, this layer keeps a running estimate of its computed mean
- and variance. The running sum is kept with a default momentum of 0.1.
-
- During evaluation, this running mean/variance is used for normalization.
-
- Because the BatchNorm is done over the `C` dimension, computing statistics
- on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
-
- Args:
- num_features: num_features from an expected input of
- size batch_size x num_features x height x width
- eps: a value added to the denominator for numerical stability.
- Default: 1e-5
- momentum: the value used for the running_mean and running_var
- computation. Default: 0.1
- affine: a boolean value that when set to ``True``, gives the layer learnable
- affine parameters. Default: ``True``
-
- Shape::
- - Input: :math:`(N, C, H, W)`
- - Output: :math:`(N, C, H, W)` (same shape as input)
-
- Examples:
- >>> # With Learnable Parameters
- >>> m = SynchronizedBatchNorm2d(100)
- >>> # Without Learnable Parameters
- >>> m = SynchronizedBatchNorm2d(100, affine=False)
- >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
- >>> output = m(input)
- """
-
- def _check_input_dim(self, input):
- if input.dim() != 4:
- raise ValueError('expected 4D input (got {}D input)'
- .format(input.dim()))
-
-
-class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
- r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
- of 4d inputs
-
- .. math::
-
- y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
-
- This module differs from the built-in PyTorch BatchNorm3d as the mean and
- standard-deviation are reduced across all devices during training.
-
- For example, when one uses `nn.DataParallel` to wrap the network during
- training, PyTorch's implementation normalize the tensor on each device using
- the statistics only on that device, which accelerated the computation and
- is also easy to implement, but the statistics might be inaccurate.
- Instead, in this synchronized version, the statistics will be computed
- over all training samples distributed on multiple devices.
-
- Note that, for one-GPU or CPU-only case, this module behaves exactly same
- as the built-in PyTorch implementation.
-
- The mean and standard-deviation are calculated per-dimension over
- the mini-batches and gamma and beta are learnable parameter vectors
- of size C (where C is the input size).
-
- During training, this layer keeps a running estimate of its computed mean
- and variance. The running sum is kept with a default momentum of 0.1.
-
- During evaluation, this running mean/variance is used for normalization.
-
- Because the BatchNorm is done over the `C` dimension, computing statistics
- on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
- or Spatio-temporal BatchNorm
-
- Args:
- num_features: num_features from an expected input of
- size batch_size x num_features x depth x height x width
- eps: a value added to the denominator for numerical stability.
- Default: 1e-5
- momentum: the value used for the running_mean and running_var
- computation. Default: 0.1
- affine: a boolean value that when set to ``True``, gives the layer learnable
- affine parameters. Default: ``True``
-
- Shape::
- - Input: :math:`(N, C, D, H, W)`
- - Output: :math:`(N, C, D, H, W)` (same shape as input)
-
- Examples:
- >>> # With Learnable Parameters
- >>> m = SynchronizedBatchNorm3d(100)
- >>> # Without Learnable Parameters
- >>> m = SynchronizedBatchNorm3d(100, affine=False)
- >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
- >>> output = m(input)
- """
-
- def _check_input_dim(self, input):
- if input.dim() != 5:
- raise ValueError('expected 5D input (got {}D input)'
- .format(input.dim()))
-
-
-@contextlib.contextmanager
-def patch_sync_batchnorm():
- import torch.nn as nn
-
- backup = nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d
-
- nn.BatchNorm1d = SynchronizedBatchNorm1d
- nn.BatchNorm2d = SynchronizedBatchNorm2d
- nn.BatchNorm3d = SynchronizedBatchNorm3d
-
- yield
-
- nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d = backup
-
-
-def convert_model(module):
- """Traverse the input module and its child recursively
- and replace all instance of torch.nn.modules.batchnorm.BatchNorm*N*d
- to SynchronizedBatchNorm*N*d
-
- Args:
- module: the input module needs to be convert to SyncBN model
-
- Examples:
- >>> import torch.nn as nn
- >>> import torchvision
- >>> # m is a standard pytorch model
- >>> m = torchvision.models.resnet18(True)
- >>> m = nn.DataParallel(m)
- >>> # after convert, m is using SyncBN
- >>> m = convert_model(m)
- """
- if isinstance(module, torch.nn.DataParallel):
- mod = module.module
- mod = convert_model(mod)
- mod = DataParallelWithCallback(mod, device_ids=module.device_ids)
- return mod
-
- mod = module
- for pth_module, sync_module in zip([torch.nn.modules.batchnorm.BatchNorm1d,
- torch.nn.modules.batchnorm.BatchNorm2d,
- torch.nn.modules.batchnorm.BatchNorm3d],
- [SynchronizedBatchNorm1d,
- SynchronizedBatchNorm2d,
- SynchronizedBatchNorm3d]):
- if isinstance(module, pth_module):
- mod = sync_module(module.num_features, module.eps, module.momentum, module.affine)
- mod.running_mean = module.running_mean
- mod.running_var = module.running_var
- if module.affine:
- mod.weight.data = module.weight.data.clone().detach()
- mod.bias.data = module.bias.data.clone().detach()
-
- for name, child in module.named_children():
- mod.add_module(name, convert_model(child))
-
- return mod
diff --git a/spaces/matthoffner/chatbot/components/Promptbar/components/Prompts.tsx b/spaces/matthoffner/chatbot/components/Promptbar/components/Prompts.tsx
deleted file mode 100644
index b84f250c9ad5ac79a8f3ae893ff6f050e2846ebe..0000000000000000000000000000000000000000
--- a/spaces/matthoffner/chatbot/components/Promptbar/components/Prompts.tsx
+++ /dev/null
@@ -1,22 +0,0 @@
-import { FC } from 'react';
-
-import { Prompt } from '@/types/prompt';
-
-import { PromptComponent } from './Prompt';
-
-interface Props {
- prompts: Prompt[];
-}
-
-export const Prompts: FC = ({ prompts }) => {
- return (
-
- {prompts
- .slice()
- .reverse()
- .map((prompt, index) => (
-
- ))}
-
- );
-};
diff --git a/spaces/matthoffner/web-llm-embed/README.md b/spaces/matthoffner/web-llm-embed/README.md
deleted file mode 100644
index 8071fe88a61f12b2b56e22ddd1d0334160a637fd..0000000000000000000000000000000000000000
--- a/spaces/matthoffner/web-llm-embed/README.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-title: web-gpu-doc-chat
-emoji: 💎📑
-sdk: docker
-app_port: 3000
----
-
-# 💎 web-llm-embed 📄
-
-Web-LLM based document chat demo. Uses React-LLM to manage conversation state and Transformers.js for embeddings.
-
-## Roadmap
-
-* [ ] Ship static version Github pages
-* [ ] Dexie local storage
-
-
-## Credits
-
-### [react-llm](https://github.com/r2d4/react-llm)
-### [web-llm](https://github.com/mlc-ai/web-llm)
-### [transformers.js](https://github.com/xenova/transformers.js)
-### next.js
-### langchain
\ No newline at end of file
diff --git a/spaces/maxime/chat-with-your-telegram-chat/README.md b/spaces/maxime/chat-with-your-telegram-chat/README.md
deleted file mode 100644
index fa3d93e8fedcec320a19555fd81f4000e6cfbef4..0000000000000000000000000000000000000000
--- a/spaces/maxime/chat-with-your-telegram-chat/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
----
-title: Telegram Chat Chat
-emoji: 🤗
-colorFrom: gray
-colorTo: purple
-sdk: gradio
-sdk_version: 3.17.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-
-# Chat with your Telegram Chat!
-
-Understand who you are and your relationships by creating a ChatGPT like experience over your own Telegram chat with [LangChain](https://github.com/hwchase17/langchain).
-
-Here is a very scientific peer-reviewed mathematical equation:
-
- ```
- Your Telegram chats ≈ Your thoughts ≈ You
- ```
-
-
-When have you been the happiest? What triggers you instantly? How could you have been more compassionate? When do you say yes and when do you say no? At what time are you the funniest?
-
-
-Ask anything you've wanted to know about yourself and your relationship with someone.
-
-## Ingest data
-From the Telegram Desktop App, export your chat history.
-
-
-
-Uncheck all the boxes and make sure you export it in `JSON`, you will then be able to download a `result.json` file.
-
-
-
-You can then embed the data by running
-
-```python ingest_data.py [result.json path]```
-
-
-## Running the Application
-
-By running `python app.py` from the command line you can easily interact with your Telegram Chat through a Gradio Web App.
-
-
-
-*The original boilerplate is from https://github.com/hwchase17/chat-your-data*
\ No newline at end of file
diff --git a/spaces/merle/PROTEIN_GENERATOR/model/se3_transformer/runtime/gpu_affinity.py b/spaces/merle/PROTEIN_GENERATOR/model/se3_transformer/runtime/gpu_affinity.py
deleted file mode 100644
index 9d46215bbbcb04f87e4c8250e08b575f8e529693..0000000000000000000000000000000000000000
--- a/spaces/merle/PROTEIN_GENERATOR/model/se3_transformer/runtime/gpu_affinity.py
+++ /dev/null
@@ -1,325 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-#
-# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
-# SPDX-License-Identifier: MIT
-
-import collections
-import itertools
-import math
-import os
-import pathlib
-import re
-
-import pynvml
-
-
-class Device:
- # assumes nvml returns list of 64 bit ints
- _nvml_affinity_elements = math.ceil(os.cpu_count() / 64)
-
- def __init__(self, device_idx):
- super().__init__()
- self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
-
- def get_name(self):
- return pynvml.nvmlDeviceGetName(self.handle)
-
- def get_uuid(self):
- return pynvml.nvmlDeviceGetUUID(self.handle)
-
- def get_cpu_affinity(self):
- affinity_string = ""
- for j in pynvml.nvmlDeviceGetCpuAffinity(self.handle, Device._nvml_affinity_elements):
- # assume nvml returns list of 64 bit ints
- affinity_string = "{:064b}".format(j) + affinity_string
-
- affinity_list = [int(x) for x in affinity_string]
- affinity_list.reverse() # so core 0 is in 0th element of list
-
- ret = [i for i, e in enumerate(affinity_list) if e != 0]
- return ret
-
-
-def get_thread_siblings_list():
- """
- Returns a list of 2-element integer tuples representing pairs of
- hyperthreading cores.
- """
- path = "/sys/devices/system/cpu/cpu*/topology/thread_siblings_list"
- thread_siblings_list = []
- pattern = re.compile(r"(\d+)\D(\d+)")
- for fname in pathlib.Path(path[0]).glob(path[1:]):
- with open(fname) as f:
- content = f.read().strip()
- res = pattern.findall(content)
- if res:
- pair = tuple(map(int, res[0]))
- thread_siblings_list.append(pair)
- return thread_siblings_list
-
-
-def check_socket_affinities(socket_affinities):
- # sets of cores should be either identical or disjoint
- for i, j in itertools.product(socket_affinities, socket_affinities):
- if not set(i) == set(j) and not set(i).isdisjoint(set(j)):
- raise RuntimeError(f"Sets of cores should be either identical or disjoint, " f"but got {i} and {j}.")
-
-
-def get_socket_affinities(nproc_per_node, exclude_unavailable_cores=True):
- devices = [Device(i) for i in range(nproc_per_node)]
- socket_affinities = [dev.get_cpu_affinity() for dev in devices]
-
- if exclude_unavailable_cores:
- available_cores = os.sched_getaffinity(0)
- socket_affinities = [list(set(affinity) & available_cores) for affinity in socket_affinities]
-
- check_socket_affinities(socket_affinities)
-
- return socket_affinities
-
-
-def set_socket_affinity(gpu_id):
- """
- The process is assigned with all available logical CPU cores from the CPU
- socket connected to the GPU with a given id.
-
- Args:
- gpu_id: index of a GPU
- """
- dev = Device(gpu_id)
- affinity = dev.get_cpu_affinity()
- os.sched_setaffinity(0, affinity)
-
-
-def set_single_affinity(gpu_id):
- """
- The process is assigned with the first available logical CPU core from the
- list of all CPU cores from the CPU socket connected to the GPU with a given
- id.
-
- Args:
- gpu_id: index of a GPU
- """
- dev = Device(gpu_id)
- affinity = dev.get_cpu_affinity()
-
- # exclude unavailable cores
- available_cores = os.sched_getaffinity(0)
- affinity = list(set(affinity) & available_cores)
- os.sched_setaffinity(0, affinity[:1])
-
-
-def set_single_unique_affinity(gpu_id, nproc_per_node):
- """
- The process is assigned with a single unique available physical CPU core
- from the list of all CPU cores from the CPU socket connected to the GPU with
- a given id.
-
- Args:
- gpu_id: index of a GPU
- """
- socket_affinities = get_socket_affinities(nproc_per_node)
-
- siblings_list = get_thread_siblings_list()
- siblings_dict = dict(siblings_list)
-
- # remove siblings
- for idx, socket_affinity in enumerate(socket_affinities):
- socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
-
- affinities = []
- assigned = []
-
- for socket_affinity in socket_affinities:
- for core in socket_affinity:
- if core not in assigned:
- affinities.append([core])
- assigned.append(core)
- break
- os.sched_setaffinity(0, affinities[gpu_id])
-
-
-def set_socket_unique_affinity(gpu_id, nproc_per_node, mode, balanced=True):
- """
- The process is assigned with an unique subset of available physical CPU
- cores from the CPU socket connected to a GPU with a given id.
- Assignment automatically includes hyperthreading siblings (if siblings are
- available).
-
- Args:
- gpu_id: index of a GPU
- nproc_per_node: total number of processes per node
- mode: mode
- balanced: assign an equal number of physical cores to each process
- """
- socket_affinities = get_socket_affinities(nproc_per_node)
-
- siblings_list = get_thread_siblings_list()
- siblings_dict = dict(siblings_list)
-
- # remove hyperthreading siblings
- for idx, socket_affinity in enumerate(socket_affinities):
- socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
-
- socket_affinities_to_device_ids = collections.defaultdict(list)
-
- for idx, socket_affinity in enumerate(socket_affinities):
- socket_affinities_to_device_ids[tuple(socket_affinity)].append(idx)
-
- # compute minimal number of physical cores per GPU across all GPUs and
- # sockets, code assigns this number of cores per GPU if balanced == True
- min_physical_cores_per_gpu = min(
- [len(cores) // len(gpus) for cores, gpus in socket_affinities_to_device_ids.items()]
- )
-
- for socket_affinity, device_ids in socket_affinities_to_device_ids.items():
- devices_per_group = len(device_ids)
- if balanced:
- cores_per_device = min_physical_cores_per_gpu
- socket_affinity = socket_affinity[: devices_per_group * min_physical_cores_per_gpu]
- else:
- cores_per_device = len(socket_affinity) // devices_per_group
-
- for group_id, device_id in enumerate(device_ids):
- if device_id == gpu_id:
-
- # In theory there should be no difference in performance between
- # 'interleaved' and 'continuous' pattern on Intel-based DGX-1,
- # but 'continuous' should be better for DGX A100 because on AMD
- # Rome 4 consecutive cores are sharing L3 cache.
- # TODO: code doesn't attempt to automatically detect layout of
- # L3 cache, also external environment may already exclude some
- # cores, this code makes no attempt to detect it and to align
- # mapping to multiples of 4.
-
- if mode == "interleaved":
- affinity = list(socket_affinity[group_id::devices_per_group])
- elif mode == "continuous":
- affinity = list(socket_affinity[group_id * cores_per_device: (group_id + 1) * cores_per_device])
- else:
- raise RuntimeError("Unknown set_socket_unique_affinity mode")
-
- # unconditionally reintroduce hyperthreading siblings, this step
- # may result in a different numbers of logical cores assigned to
- # each GPU even if balanced == True (if hyperthreading siblings
- # aren't available for a subset of cores due to some external
- # constraints, siblings are re-added unconditionally, in the
- # worst case unavailable logical core will be ignored by
- # os.sched_setaffinity().
- affinity += [siblings_dict[aff] for aff in affinity if aff in siblings_dict]
- os.sched_setaffinity(0, affinity)
-
-
-def set_affinity(gpu_id, nproc_per_node, mode="socket_unique_continuous", balanced=True):
- """
- The process is assigned with a proper CPU affinity which matches hardware
- architecture on a given platform. Usually it improves and stabilizes
- performance of deep learning training workloads.
-
- This function assumes that the workload is running in multi-process
- single-device mode (there are multiple training processes and each process
- is running on a single GPU), which is typical for multi-GPU training
- workloads using `torch.nn.parallel.DistributedDataParallel`.
-
- Available affinity modes:
- * 'socket' - the process is assigned with all available logical CPU cores
- from the CPU socket connected to the GPU with a given id.
- * 'single' - the process is assigned with the first available logical CPU
- core from the list of all CPU cores from the CPU socket connected to the GPU
- with a given id (multiple GPUs could be assigned with the same CPU core).
- * 'single_unique' - the process is assigned with a single unique available
- physical CPU core from the list of all CPU cores from the CPU socket
- connected to the GPU with a given id.
- * 'socket_unique_interleaved' - the process is assigned with an unique
- subset of available physical CPU cores from the CPU socket connected to a
- GPU with a given id, hyperthreading siblings are included automatically,
- cores are assigned with interleaved indexing pattern
- * 'socket_unique_continuous' - (the default) the process is assigned with an
- unique subset of available physical CPU cores from the CPU socket connected
- to a GPU with a given id, hyperthreading siblings are included
- automatically, cores are assigned with continuous indexing pattern
-
- 'socket_unique_continuous' is the recommended mode for deep learning
- training workloads on NVIDIA DGX machines.
-
- Args:
- gpu_id: integer index of a GPU
- nproc_per_node: number of processes per node
- mode: affinity mode
- balanced: assign an equal number of physical cores to each process,
- affects only 'socket_unique_interleaved' and
- 'socket_unique_continuous' affinity modes
-
- Returns a set of logical CPU cores on which the process is eligible to run.
-
- Example:
-
- import argparse
- import os
-
- import gpu_affinity
- import torch
-
-
- def main():
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--local_rank',
- type=int,
- default=os.getenv('LOCAL_RANK', 0),
- )
- args = parser.parse_args()
-
- nproc_per_node = torch.cuda.device_count()
-
- affinity = gpu_affinity.set_affinity(args.local_rank, nproc_per_node)
- print(f'{args.local_rank}: core affinity: {affinity}')
-
-
- if __name__ == "__main__":
- main()
-
- Launch the example with:
- python -m torch.distributed.launch --nproc_per_node <#GPUs> example.py
-
-
- WARNING: On DGX A100 only a half of CPU cores have direct access to GPUs.
- This function restricts execution only to the CPU cores directly connected
- to GPUs, so on DGX A100 it will limit the code to half of CPU cores and half
- of CPU memory bandwidth (which may be fine for many DL models).
- """
- pynvml.nvmlInit()
-
- if mode == "socket":
- set_socket_affinity(gpu_id)
- elif mode == "single":
- set_single_affinity(gpu_id)
- elif mode == "single_unique":
- set_single_unique_affinity(gpu_id, nproc_per_node)
- elif mode == "socket_unique_interleaved":
- set_socket_unique_affinity(gpu_id, nproc_per_node, "interleaved", balanced)
- elif mode == "socket_unique_continuous":
- set_socket_unique_affinity(gpu_id, nproc_per_node, "continuous", balanced)
- else:
- raise RuntimeError("Unknown affinity mode")
-
- affinity = os.sched_getaffinity(0)
- return affinity
diff --git a/spaces/merle/PROTEIN_GENERATOR/model/util.py b/spaces/merle/PROTEIN_GENERATOR/model/util.py
deleted file mode 100644
index 017f7028636ce8bf5597294913273fc257c2c844..0000000000000000000000000000000000000000
--- a/spaces/merle/PROTEIN_GENERATOR/model/util.py
+++ /dev/null
@@ -1,522 +0,0 @@
-import sys
-
-import numpy as np
-import torch
-
-import scipy.sparse
-
-from chemical import *
-from scoring import *
-
-def th_ang_v(ab,bc,eps:float=1e-8):
- def th_norm(x,eps:float=1e-8):
- return x.square().sum(-1,keepdim=True).add(eps).sqrt()
- def th_N(x,alpha:float=0):
- return x/th_norm(x).add(alpha)
- ab, bc = th_N(ab),th_N(bc)
- cos_angle = torch.clamp( (ab*bc).sum(-1), -1, 1)
- sin_angle = torch.sqrt(1-cos_angle.square() + eps)
- dih = torch.stack((cos_angle,sin_angle),-1)
- return dih
-
-def th_dih_v(ab,bc,cd):
- def th_cross(a,b):
- a,b = torch.broadcast_tensors(a,b)
- return torch.cross(a,b, dim=-1)
- def th_norm(x,eps:float=1e-8):
- return x.square().sum(-1,keepdim=True).add(eps).sqrt()
- def th_N(x,alpha:float=0):
- return x/th_norm(x).add(alpha)
-
- ab, bc, cd = th_N(ab),th_N(bc),th_N(cd)
- n1 = th_N( th_cross(ab,bc) )
- n2 = th_N( th_cross(bc,cd) )
- sin_angle = (th_cross(n1,bc)*n2).sum(-1)
- cos_angle = (n1*n2).sum(-1)
- dih = torch.stack((cos_angle,sin_angle),-1)
- return dih
-
-def th_dih(a,b,c,d):
- return th_dih_v(a-b,b-c,c-d)
-
-# More complicated version splits error in CA-N and CA-C (giving more accurate CB position)
-# It returns the rigid transformation from local frame to global frame
-def rigid_from_3_points(N, Ca, C, non_ideal=False, eps=1e-8):
- #N, Ca, C - [B,L, 3]
- #R - [B,L, 3, 3], det(R)=1, inv(R) = R.T, R is a rotation matrix
- B,L = N.shape[:2]
-
- v1 = C-Ca
- v2 = N-Ca
- e1 = v1/(torch.norm(v1, dim=-1, keepdim=True)+eps)
- u2 = v2-(torch.einsum('bli, bli -> bl', e1, v2)[...,None]*e1)
- e2 = u2/(torch.norm(u2, dim=-1, keepdim=True)+eps)
- e3 = torch.cross(e1, e2, dim=-1)
- R = torch.cat([e1[...,None], e2[...,None], e3[...,None]], axis=-1) #[B,L,3,3] - rotation matrix
-
- if non_ideal:
- v2 = v2/(torch.norm(v2, dim=-1, keepdim=True)+eps)
- cosref = torch.sum(e1*v2, dim=-1) # cosine of current N-CA-C bond angle
- costgt = cos_ideal_NCAC.item()
- cos2del = torch.clamp( cosref*costgt + torch.sqrt((1-cosref*cosref)*(1-costgt*costgt)+eps), min=-1.0, max=1.0 )
- cosdel = torch.sqrt(0.5*(1+cos2del)+eps)
- sindel = torch.sign(costgt-cosref) * torch.sqrt(1-0.5*(1+cos2del)+eps)
- Rp = torch.eye(3, device=N.device).repeat(B,L,1,1)
- Rp[:,:,0,0] = cosdel
- Rp[:,:,0,1] = -sindel
- Rp[:,:,1,0] = sindel
- Rp[:,:,1,1] = cosdel
-
- R = torch.einsum('blij,bljk->blik', R,Rp)
-
- return R, Ca
-
-def get_tor_mask(seq, torsion_indices, mask_in=None):
- B,L = seq.shape[:2]
- tors_mask = torch.ones((B,L,10), dtype=torch.bool, device=seq.device)
- tors_mask[...,3:7] = torsion_indices[seq,:,-1] > 0
- tors_mask[:,0,1] = False
- tors_mask[:,-1,0] = False
-
- # mask for additional angles
- tors_mask[:,:,7] = seq!=aa2num['GLY']
- tors_mask[:,:,8] = seq!=aa2num['GLY']
- tors_mask[:,:,9] = torch.logical_and( seq!=aa2num['GLY'], seq!=aa2num['ALA'] )
- tors_mask[:,:,9] = torch.logical_and( tors_mask[:,:,9], seq!=aa2num['UNK'] )
- tors_mask[:,:,9] = torch.logical_and( tors_mask[:,:,9], seq!=aa2num['MAS'] )
-
- if mask_in != None:
- # mask for missing atoms
- # chis
- ti0 = torch.gather(mask_in,2,torsion_indices[seq,:,0])
- ti1 = torch.gather(mask_in,2,torsion_indices[seq,:,1])
- ti2 = torch.gather(mask_in,2,torsion_indices[seq,:,2])
- ti3 = torch.gather(mask_in,2,torsion_indices[seq,:,3])
- is_valid = torch.stack((ti0, ti1, ti2, ti3), dim=-2).all(dim=-1)
- tors_mask[...,3:7] = torch.logical_and(tors_mask[...,3:7], is_valid)
- tors_mask[:,:,7] = torch.logical_and(tors_mask[:,:,7], mask_in[:,:,4]) # CB exist?
- tors_mask[:,:,8] = torch.logical_and(tors_mask[:,:,8], mask_in[:,:,4]) # CB exist?
- tors_mask[:,:,9] = torch.logical_and(tors_mask[:,:,9], mask_in[:,:,5]) # XG exist?
-
- return tors_mask
-
-def get_torsions(xyz_in, seq, torsion_indices, torsion_can_flip, ref_angles, mask_in=None):
- B,L = xyz_in.shape[:2]
-
- tors_mask = get_tor_mask(seq, torsion_indices, mask_in)
-
- # torsions to restrain to 0 or 180degree
- tors_planar = torch.zeros((B, L, 10), dtype=torch.bool, device=xyz_in.device)
- tors_planar[:,:,5] = seq == aa2num['TYR'] # TYR chi 3 should be planar
-
- # idealize given xyz coordinates before computing torsion angles
- xyz = xyz_in.clone()
- Rs, Ts = rigid_from_3_points(xyz[...,0,:],xyz[...,1,:],xyz[...,2,:])
- Nideal = torch.tensor([-0.5272, 1.3593, 0.000], device=xyz_in.device)
- Cideal = torch.tensor([1.5233, 0.000, 0.000], device=xyz_in.device)
- xyz[...,0,:] = torch.einsum('brij,j->bri', Rs, Nideal) + Ts
- xyz[...,2,:] = torch.einsum('brij,j->bri', Rs, Cideal) + Ts
-
- torsions = torch.zeros( (B,L,10,2), device=xyz.device )
- # avoid undefined angles for H generation
- torsions[:,0,1,0] = 1.0
- torsions[:,-1,0,0] = 1.0
-
- # omega
- torsions[:,:-1,0,:] = th_dih(xyz[:,:-1,1,:],xyz[:,:-1,2,:],xyz[:,1:,0,:],xyz[:,1:,1,:])
- # phi
- torsions[:,1:,1,:] = th_dih(xyz[:,:-1,2,:],xyz[:,1:,0,:],xyz[:,1:,1,:],xyz[:,1:,2,:])
- # psi
- torsions[:,:,2,:] = -1 * th_dih(xyz[:,:,0,:],xyz[:,:,1,:],xyz[:,:,2,:],xyz[:,:,3,:])
-
- # chis
- ti0 = torch.gather(xyz,2,torsion_indices[seq,:,0,None].repeat(1,1,1,3))
- ti1 = torch.gather(xyz,2,torsion_indices[seq,:,1,None].repeat(1,1,1,3))
- ti2 = torch.gather(xyz,2,torsion_indices[seq,:,2,None].repeat(1,1,1,3))
- ti3 = torch.gather(xyz,2,torsion_indices[seq,:,3,None].repeat(1,1,1,3))
- torsions[:,:,3:7,:] = th_dih(ti0,ti1,ti2,ti3)
-
- # CB bend
- NC = 0.5*( xyz[:,:,0,:3] + xyz[:,:,2,:3] )
- CA = xyz[:,:,1,:3]
- CB = xyz[:,:,4,:3]
- t = th_ang_v(CB-CA,NC-CA)
- t0 = ref_angles[seq][...,0,:]
- torsions[:,:,7,:] = torch.stack(
- (torch.sum(t*t0,dim=-1),t[...,0]*t0[...,1]-t[...,1]*t0[...,0]),
- dim=-1 )
-
- # CB twist
- NCCA = NC-CA
- NCp = xyz[:,:,2,:3] - xyz[:,:,0,:3]
- NCpp = NCp - torch.sum(NCp*NCCA, dim=-1, keepdim=True)/ torch.sum(NCCA*NCCA, dim=-1, keepdim=True) * NCCA
- t = th_ang_v(CB-CA,NCpp)
- t0 = ref_angles[seq][...,1,:]
- torsions[:,:,8,:] = torch.stack(
- (torch.sum(t*t0,dim=-1),t[...,0]*t0[...,1]-t[...,1]*t0[...,0]),
- dim=-1 )
-
- # CG bend
- CG = xyz[:,:,5,:3]
- t = th_ang_v(CG-CB,CA-CB)
- t0 = ref_angles[seq][...,2,:]
- torsions[:,:,9,:] = torch.stack(
- (torch.sum(t*t0,dim=-1),t[...,0]*t0[...,1]-t[...,1]*t0[...,0]),
- dim=-1 )
-
- mask0 = torch.isnan(torsions[...,0]).nonzero()
- mask1 = torch.isnan(torsions[...,1]).nonzero()
- torsions[mask0[:,0],mask0[:,1],mask0[:,2],0] = 1.0
- torsions[mask1[:,0],mask1[:,1],mask1[:,2],1] = 0.0
-
- # alt chis
- torsions_alt = torsions.clone()
- torsions_alt[torsion_can_flip[seq,:]] *= -1
-
- return torsions, torsions_alt, tors_mask, tors_planar
-
-def get_tips(xyz, seq):
- B,L = xyz.shape[:2]
-
- xyz_tips = torch.gather(xyz, 2, tip_indices.to(xyz.device)[seq][:,:,None,None].expand(-1,-1,-1,3)).reshape(B, L, 3)
- mask = ~(torch.isnan(xyz_tips[:,:,0]))
- if torch.isnan(xyz_tips).any(): # replace NaN tip atom with virtual Cb atom
- # three anchor atoms
- N = xyz[:,:,0]
- Ca = xyz[:,:,1]
- C = xyz[:,:,2]
-
- # recreate Cb given N,Ca,C
- b = Ca - N
- c = C - Ca
- a = torch.cross(b, c, dim=-1)
- Cb = -0.58273431*a + 0.56802827*b - 0.54067466*c + Ca
-
- xyz_tips = torch.where(torch.isnan(xyz_tips), Cb, xyz_tips)
- return xyz_tips, mask
-
-# process ideal frames
-def make_frame(X, Y):
- Xn = X / torch.linalg.norm(X)
- Y = Y - torch.dot(Y, Xn) * Xn
- Yn = Y / torch.linalg.norm(Y)
- Z = torch.cross(Xn,Yn)
- Zn = Z / torch.linalg.norm(Z)
-
- return torch.stack((Xn,Yn,Zn), dim=-1)
-
-def cross_product_matrix(u):
- B, L = u.shape[:2]
- matrix = torch.zeros((B, L, 3, 3), device=u.device)
- matrix[:,:,0,1] = -u[...,2]
- matrix[:,:,0,2] = u[...,1]
- matrix[:,:,1,0] = u[...,2]
- matrix[:,:,1,2] = -u[...,0]
- matrix[:,:,2,0] = -u[...,1]
- matrix[:,:,2,1] = u[...,0]
- return matrix
-
-# writepdb
-def writepdb(filename, atoms, seq, idx_pdb=None, bfacts=None):
- f = open(filename,"w")
- ctr = 1
- scpu = seq.cpu().squeeze()
- atomscpu = atoms.cpu().squeeze()
- if bfacts is None:
- bfacts = torch.zeros(atomscpu.shape[0])
- if idx_pdb is None:
- idx_pdb = 1 + torch.arange(atomscpu.shape[0])
-
- Bfacts = torch.clamp( bfacts.cpu(), 0, 1)
- for i,s in enumerate(scpu):
- if (len(atomscpu.shape)==2):
- f.write ("%-6s%5s %4s %3s %s%4d %8.3f%8.3f%8.3f%6.2f%6.2f\n"%(
- "ATOM", ctr, " CA ", num2aa[s],
- "A", idx_pdb[i], atomscpu[i,0], atomscpu[i,1], atomscpu[i,2],
- 1.0, Bfacts[i] ) )
- ctr += 1
- elif atomscpu.shape[1]==3:
- for j,atm_j in enumerate([" N "," CA "," C "]):
- f.write ("%-6s%5s %4s %3s %s%4d %8.3f%8.3f%8.3f%6.2f%6.2f\n"%(
- "ATOM", ctr, atm_j, num2aa[s],
- "A", idx_pdb[i], atomscpu[i,j,0], atomscpu[i,j,1], atomscpu[i,j,2],
- 1.0, Bfacts[i] ) )
- ctr += 1
- else:
- natoms = atomscpu.shape[1]
- if (natoms!=14 and natoms!=27):
- print ('bad size!', atoms.shape)
- assert(False)
- atms = aa2long[s]
- # his prot hack
- if (s==8 and torch.linalg.norm( atomscpu[i,9,:]-atomscpu[i,5,:] ) < 1.7):
- atms = (
- " N "," CA "," C "," O "," CB "," CG "," NE2"," CD2"," CE1"," ND1",
- None, None, None, None," H "," HA ","1HB ","2HB "," HD2"," HE1",
- " HD1", None, None, None, None, None, None) # his_d
-
- for j,atm_j in enumerate(atms):
- if (j=4] = 4
- num_bonds[i,...] = torch.tensor(num_bonds_i)
-
-
-# LJ/LK scoring parameters
-ljlk_parameters = torch.zeros((22,27,5), dtype=torch.float)
-lj_correction_parameters = torch.zeros((22,27,4), dtype=bool) # donor/acceptor/hpol/disulf
-for i in range(22):
- for j,a in enumerate(aa2type[i]):
- if (a is not None):
- ljlk_parameters[i,j,:] = torch.tensor( type2ljlk[a] )
- lj_correction_parameters[i,j,0] = (type2hb[a]==HbAtom.DO)+(type2hb[a]==HbAtom.DA)
- lj_correction_parameters[i,j,1] = (type2hb[a]==HbAtom.AC)+(type2hb[a]==HbAtom.DA)
- lj_correction_parameters[i,j,2] = (type2hb[a]==HbAtom.HP)
- lj_correction_parameters[i,j,3] = (a=="SH1" or a=="HS")
-
-# hbond scoring parameters
-def donorHs(D,bonds,atoms):
- dHs = []
- for (i,j) in bonds:
- if (i==D):
- idx_j = atoms.index(j)
- if (idx_j>=14): # if atom j is a hydrogen
- dHs.append(idx_j)
- if (j==D):
- idx_i = atoms.index(i)
- if (idx_i>=14): # if atom j is a hydrogen
- dHs.append(idx_i)
- assert (len(dHs)>0)
- return dHs
-
-def acceptorBB0(A,hyb,bonds,atoms):
- if (hyb == HbHybType.SP2):
- for (i,j) in bonds:
- if (i==A):
- B = atoms.index(j)
- if (B<14):
- break
- if (j==A):
- B = atoms.index(i)
- if (B<14):
- break
- for (i,j) in bonds:
- if (i==atoms[B]):
- B0 = atoms.index(j)
- if (B0<14):
- break
- if (j==atoms[B]):
- B0 = atoms.index(i)
- if (B0<14):
- break
- elif (hyb == HbHybType.SP3 or hyb == HbHybType.RING):
- for (i,j) in bonds:
- if (i==A):
- B = atoms.index(j)
- if (B<14):
- break
- if (j==A):
- B = atoms.index(i)
- if (B<14):
- break
- for (i,j) in bonds:
- if (i==A and j!=atoms[B]):
- B0 = atoms.index(j)
- break
- if (j==A and i!=atoms[B]):
- B0 = atoms.index(i)
- break
-
- return B,B0
-
-
-hbtypes = torch.full((22,27,3),-1, dtype=torch.long) # (donortype, acceptortype, acchybtype)
-hbbaseatoms = torch.full((22,27,2),-1, dtype=torch.long) # (B,B0) for acc; (D,-1) for don
-hbpolys = torch.zeros((HbDonType.NTYPES,HbAccType.NTYPES,3,15)) # weight,xmin,xmax,ymin,ymax,c9,...,c0
-
-for i in range(22):
- for j,a in enumerate(aa2type[i]):
- if (a in type2dontype):
- j_hs = donorHs(aa2long[i][j],aabonds[i],aa2long[i])
- for j_h in j_hs:
- hbtypes[i,j_h,0] = type2dontype[a]
- hbbaseatoms[i,j_h,0] = j
- if (a in type2acctype):
- j_b, j_b0 = acceptorBB0(aa2long[i][j],type2hybtype[a],aabonds[i],aa2long[i])
- hbtypes[i,j,1] = type2acctype[a]
- hbtypes[i,j,2] = type2hybtype[a]
- hbbaseatoms[i,j,0] = j_b
- hbbaseatoms[i,j,1] = j_b0
-
-for i in range(HbDonType.NTYPES):
- for j in range(HbAccType.NTYPES):
- weight = dontype2wt[i]*acctype2wt[j]
-
- pdist,pbah,pahd = hbtypepair2poly[(i,j)]
- xrange,yrange,coeffs = hbpolytype2coeffs[pdist]
- hbpolys[i,j,0,0] = weight
- hbpolys[i,j,0,1:3] = torch.tensor(xrange)
- hbpolys[i,j,0,3:5] = torch.tensor(yrange)
- hbpolys[i,j,0,5:] = torch.tensor(coeffs)
- xrange,yrange,coeffs = hbpolytype2coeffs[pahd]
- hbpolys[i,j,1,0] = weight
- hbpolys[i,j,1,1:3] = torch.tensor(xrange)
- hbpolys[i,j,1,3:5] = torch.tensor(yrange)
- hbpolys[i,j,1,5:] = torch.tensor(coeffs)
- xrange,yrange,coeffs = hbpolytype2coeffs[pbah]
- hbpolys[i,j,2,0] = weight
- hbpolys[i,j,2,1:3] = torch.tensor(xrange)
- hbpolys[i,j,2,3:5] = torch.tensor(yrange)
- hbpolys[i,j,2,5:] = torch.tensor(coeffs)
-
-# kinematic parameters
-base_indices = torch.full((22,27),0, dtype=torch.long)
-xyzs_in_base_frame = torch.ones((22,27,4))
-RTs_by_torsion = torch.eye(4).repeat(22,7,1,1)
-reference_angles = torch.ones((22,3,2))
-
-for i in range(22):
- i_l = aa2long[i]
- for name, base, coords in ideal_coords[i]:
- idx = i_l.index(name)
- base_indices[i,idx] = base
- xyzs_in_base_frame[i,idx,:3] = torch.tensor(coords)
-
- # omega frame
- RTs_by_torsion[i,0,:3,:3] = torch.eye(3)
- RTs_by_torsion[i,0,:3,3] = torch.zeros(3)
-
- # phi frame
- RTs_by_torsion[i,1,:3,:3] = make_frame(
- xyzs_in_base_frame[i,0,:3] - xyzs_in_base_frame[i,1,:3],
- torch.tensor([1.,0.,0.])
- )
- RTs_by_torsion[i,1,:3,3] = xyzs_in_base_frame[i,0,:3]
-
- # psi frame
- RTs_by_torsion[i,2,:3,:3] = make_frame(
- xyzs_in_base_frame[i,2,:3] - xyzs_in_base_frame[i,1,:3],
- xyzs_in_base_frame[i,1,:3] - xyzs_in_base_frame[i,0,:3]
- )
- RTs_by_torsion[i,2,:3,3] = xyzs_in_base_frame[i,2,:3]
-
- # chi1 frame
- if torsions[i][0] is not None:
- a0,a1,a2 = torsion_indices[i,0,0:3]
- RTs_by_torsion[i,3,:3,:3] = make_frame(
- xyzs_in_base_frame[i,a2,:3]-xyzs_in_base_frame[i,a1,:3],
- xyzs_in_base_frame[i,a0,:3]-xyzs_in_base_frame[i,a1,:3],
- )
- RTs_by_torsion[i,3,:3,3] = xyzs_in_base_frame[i,a2,:3]
-
- # chi2~4 frame
- for j in range(1,4):
- if torsions[i][j] is not None:
- a2 = torsion_indices[i,j,2]
- if ((i==18 and j==2) or (i==8 and j==2)): # TYR CZ-OH & HIS CE1-HE1 a special case
- a0,a1 = torsion_indices[i,j,0:2]
- RTs_by_torsion[i,3+j,:3,:3] = make_frame(
- xyzs_in_base_frame[i,a2,:3]-xyzs_in_base_frame[i,a1,:3],
- xyzs_in_base_frame[i,a0,:3]-xyzs_in_base_frame[i,a1,:3] )
- else:
- RTs_by_torsion[i,3+j,:3,:3] = make_frame(
- xyzs_in_base_frame[i,a2,:3],
- torch.tensor([-1.,0.,0.]), )
- RTs_by_torsion[i,3+j,:3,3] = xyzs_in_base_frame[i,a2,:3]
-
-
- # CB/CG angles
- NCr = 0.5*(xyzs_in_base_frame[i,0,:3]+xyzs_in_base_frame[i,2,:3])
- CAr = xyzs_in_base_frame[i,1,:3]
- CBr = xyzs_in_base_frame[i,4,:3]
- CGr = xyzs_in_base_frame[i,5,:3]
- reference_angles[i,0,:]=th_ang_v(CBr-CAr,NCr-CAr)
- NCp = xyzs_in_base_frame[i,2,:3]-xyzs_in_base_frame[i,0,:3]
- NCpp = NCp - torch.dot(NCp,NCr)/ torch.dot(NCr,NCr) * NCr
- reference_angles[i,1,:]=th_ang_v(CBr-CAr,NCpp)
- reference_angles[i,2,:]=th_ang_v(CGr,torch.tensor([-1.,0.,0.]))
-
-def get_rmsd(a, b, eps=1e-6):
- '''
- align crds b to a : always use all alphas
- expexted tensor of shape (L,3)
- jake's torch adapted version
- '''
- assert a.shape == b.shape, 'make sure tensors are the same size'
- L = a.shape[0]
- assert a.shape == torch.Size([L,3]), 'make sure tensors are in format [L,3]'
-
- # center to CA centroid
- a = a - a.mean(dim=0)
- b = b - b.mean(dim=0)
-
- # Computation of the covariance matrix
- C = torch.einsum('kj,ji->ki', torch.transpose(b.type(torch.float32),0,1), a.type(torch.float32))
-
- # Compute optimal rotation matrix using SVD
- V, S, W = torch.linalg.svd(C)
-
- # get sign to ensure right-handedness
- d = torch.ones([3,3])
- d[:,-1] = torch.sign(torch.linalg.det(V)*torch.linalg.det(W))
-
- # Rotation matrix U
- U = torch.einsum('kj,ji->ki',(d*V),W)
-
- # Rotate xyz_hal
- rP = torch.einsum('kj,ji->ki',b.type(torch.float32),U.type(torch.float32))
-
- L = rP.shape[0]
- rmsd = torch.sqrt(torch.sum((rP-a)*(rP-a), axis=(0,1)) / L + eps)
-
- return rmsd, U
-
diff --git a/spaces/merle/PROTEIN_GENERATOR/utils/examples/motif_scaffolding.sh b/spaces/merle/PROTEIN_GENERATOR/utils/examples/motif_scaffolding.sh
deleted file mode 100644
index ba1f0b03227cc3de8ec140c2fcc42dead2a083e2..0000000000000000000000000000000000000000
--- a/spaces/merle/PROTEIN_GENERATOR/utils/examples/motif_scaffolding.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-#SBATCH -J seq_diff
-#SBATCH -p gpu
-#SBATCH --mem=8g
-#SBATCH --gres=gpu:a6000:1
-#SBATCH -o ./out/slurm/slurm_%j.out
-
-source activate /software/conda/envs/SE3nv
-
-srun python ../inference.py \
- --num_designs 10 \
- --out out/design \
- --pdb pdbs/rsv5_5tpn.pdb \
- --contigs 0-25,A163-181,25-30 --T 25 --save_best_plddt
diff --git a/spaces/merve/anonymization/source/_posts/2021-03-03-fill-in-the-blank.md b/spaces/merve/anonymization/source/_posts/2021-03-03-fill-in-the-blank.md
deleted file mode 100644
index c5a251a9297e84f8b3ed4e504ff25f19793a57c2..0000000000000000000000000000000000000000
--- a/spaces/merve/anonymization/source/_posts/2021-03-03-fill-in-the-blank.md
+++ /dev/null
@@ -1,136 +0,0 @@
----
-template: post.html
-title: What Have Language Models Learned?
-summary: By asking language models to fill in the blank, we can probe their understanding of the world.
-shareimg: https://pair.withgoogle.com/explorables/images/fill-in-the-blank.png
-shareimgabstract: https://pair.withgoogle.com/explorables/images/fill-in-the-blank-abstract.png
-permalink: /fill-in-the-blank/
-date: 2021-07-28
----
-
-Large language models are making it possible for computers to [write stories](https://openai.com/blog/better-language-models/), [program a website](https://twitter.com/sharifshameem/status/1282676454690451457) and [turn captions into images](https://openai.com/blog/dall-e/).
-
-One of the first of these models, [BERT](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html), is trained by taking sentences, splitting them into individual words, randomly hiding some of them, and predicting what the hidden words are. After doing this millions of times, BERT has "read" enough Shakespeare to predict how this phrase usually ends:
-
-
-
-This page is hooked up to a version of BERT trained on Wikipedia and books. Try clicking on different words to see how they'd be filled in or typing in another sentence to see what else has BERT picked up on.
-
-
-
-### Cattle or Clothes?
-
-Besides Hamlet's existential dread, the text BERT was trained on also contains more patterns:
-
-
-
-Cattle and horses aren't top purchase predictions in every state, though! In New York, some of the most likely words are clothes, books and art:
-
-
-
-There are more than 30,000 words, punctuation marks and word fragments in BERT's [vocabulary](https://huggingface.co/transformers/tokenizer_summary.html). Every time BERT fills in a hidden word, it assigns each of them a probability. By looking at how slightly different sentences shift those probabilities, we can get a glimpse at how purchasing patterns in different places are understood.
-
-
-
-You can **edit these sentences**. Or try one of these comparisons to get started:
-
-To the extent that a computer program can "know" something, what does BERT know about where you live?
-### What's in a Name?
-
-This technique can also probe what associations BERT has learned about different groups of people. For example, it predicts people named Elsie are older than people named Lauren:
-
-
-
-It's also learned that people named Jim have more [typically masculine](https://flowingdata.com/2017/09/11/most-female-and-male-occupations-since-1950/) jobs than people named Jane:
-
-
-
-These aren't just spurious correlations — Elsies really are more likely to be [older](https://rhiever.github.io/name-age-calculator/) than Laurens. And occupations the model associates with feminine names are held by a [higher percentage](https://purehost.bath.ac.uk/ws/portalfiles/portal/168480066/CaliskanEtAl_authors_full.pdf ) of women.
-
-Should we be concerned about these correlations? BERT was trained to fill in blanks in Wikipedia articles and books — it does a great job at that! The problem is that the internal representations of language these models have learned are used for much more – by some [measures](https://super.gluebenchmark.com/leaderboard), they're the best way we have of getting computers to understand and manipulate text.
-
-We wouldn't hesitate to call a conversation partner or recruiter who blithely assumed that doctors are men sexist, but that's exactly what BERT might do if heedlessly incorporated into a chatbot or HR software:
-
-
-
-Adjusting for assumptions like this isn't trivial. *Why* machine learning systems produce a given output still isn't well understood – determining if a credit model built on top of BERT rejected a loan application because of [gender discrimation](https://pair.withgoogle.com/explorables/hidden-bias/) might be quite difficult.
-
-Deploying large language models at scale also risks [amplifying](https://machinesgonewrong.com/bias_i/#harms-of-representation) and [perpetuating](http://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf) today's harmful stereotypes. When [prompted](https://arxiv.org/pdf/2101.05783v1.pdf#page=3) with "Two Muslims walked into a…", for example, [GPT-3](https://en.wikipedia.org/wiki/GPT-3) typically finishes the sentence with descriptions of violence.
-### How Can We Fix This?
-
-One conceptually straightforward approach: reduce unwanted correlations from the training data to [mitigate](https://arxiv.org/abs/1906.08976) model [bias](https://arxiv.org/abs/2005.14050).
-
-Last year a version of BERT called [Zari](https://ai.googleblog.com/2020/10/measuring-gendered-correlations-in-pre.html) was [trained](https://arxiv.org/pdf/2010.06032.pdf#page=6) with an additional set of generated sentences. For every sentence with a [gendered noun](https://github.com/uclanlp/corefBias/blob/master/WinoBias/wino/generalized_swaps.txt), like boy or aunt, another sentence that replaced the noun with its gender-partner was added to the training data: in addition to "The *lady* doth protest too much," Zari was also trained on "The *gentleman* doth protest too much."
-
-
-
-Unlike BERT, Zari assigns nurses and doctors an equal probability of being a "she" or a "he" after being trained on the swapped sentences. This approach hasn't removed all the gender correlations; because names weren't swapped, Zari's association between masculine names and doctors has only slightly decreased from BERT's. And the retraining doesn't change how the model understands nonbinary gender.
-
-Something similar happened with [other attempts](https://arxiv.org/abs/1607.06520) to remove gender bias from models' representations of words. It's possible to mathematically define bias and perform "brain surgery" on a model to remove it, but language is steeped in gender. Large models can have billions of parameters in which to learn stereotypes — slightly different measures of bias have found the retrained models only [shifted the stereotypes](https://www.aclweb.org/anthology/N19-1061/) around to be undetectable by the initial measure.
-
-As with [other applications](https://pair.withgoogle.com/explorables/measuring-fairness/) of machine learning, it's helpful to focus instead on the actual harms that could occur. Tools like [AllenNLP](https://allennlp.org/), [LMdiff](http://lmdiff.net/) and the [Language Interpretability Tool](https://pair-code.github.io/lit/) make it easier to interact with language models to find where they might be falling short. Once those shortcomings are spotted, [task specific](https://arxiv.org/abs/2004.07667) mitigation measures can be simpler to apply than modifying the entire model.
-
-It's also possible that as models grow more capable, they might be able to [explain](https://arxiv.org/abs/2004.14546) and perform some of this debiasing themselves. Instead of forcing the model to tell us the gender of "the doctor," we could let it respond with [uncertainty](https://arr.am/2020/07/25/gpt-3-uncertainty-prompts/) that's [shown to the user](https://ai.googleblog.com/2018/12/providing-gender-specific-translations.html) and controls to override assumptions.
-
-### Credits
-
-Adam Pearce // July 2021
-
-Thanks to Ben Wedin, Emily Reif, James Wexler, Fernanda Viégas, Ian Tenney, Kellie Webster, Kevin Robinson, Lucas Dixon, Ludovic Peran, Martin Wattenberg, Michael Terry, Tolga Bolukbasi, Vinodkumar Prabhakaran, Xuezhi Wang, Yannick Assogba, and Zan Armstrong for their help with this piece.
-
-### Footnotes
-
- The BERT model used on this page is the Hugging Face version of [bert-large-uncased-whole-word-masking](https://huggingface.co/bert-large-uncased-whole-word-masking). "BERT" also refers to a type of model architecture; hundreds of BERT models have been [trained and published](https://huggingface.co/models?filter=bert). The model and chart code used here are available on [GitHub](https://github.com/PAIR-code/ai-explorables).
-
- Notice that "1800", "1900" and "2000" are some of the top predictions, though. People aren't actually more likely to be born at the start of a century, but in BERT's training corpus of books and Wikipedia articles round numbers are [more common](https://blocks.roadtolarissa.com/1wheel/cea123a8c17d51d9dacbd1c17e6fe601).
-
-Comparing BERT and Zari in this interface requires carefully tracking tokens during a transition. The [BERT Difference Plots](https://colab.research.google.com/drive/1xfPGKqjdE635cVSi-Ggt-cRBU5pyJNWP) colab has ideas for extensions to systemically look at differences between the models' output.
-
- This analysis shouldn't stop once a model is deployed — as language and model usage shifts, it's important to continue studying and mitigating potential harms.
-
-
-### Appendix: Differences Over Time
-
-In addition to looking at how predictions for men and women are different for a given sentence, we can also chart how those differences have changed over time:
-
-
-
-The convergence in more recent years suggests another potential mitigation technique: using a prefix to steer the model away from unwanted correlations while preserving its understanding of natural language.
-
-Using "In $year" as the prefix is quite limited, though, as it doesn't handle gender-neutral pronouns and potentially [increases](https://www.pnas.org/content/pnas/115/16/E3635.full.pdf#page=8) other correlations. However, it may be possible to [find a better prefix](https://arxiv.org/abs/2104.08691) that mitigates a specific type of bias with just a [couple of dozen examples](https://www.openai.com/blog/improving-language-model-behavior/ ).
-
-
-
-Closer examination of these differences in differences also shows there's a limit to the facts we can pull out of BERT this way.
-
-Below, the top row of charts shows how predicted differences in occupations between men and women change between 1908 and 2018. The rightmost chart shows the he/she difference in 1908 against the he/she difference in 2018.
-
-The flat slope of the rightmost chart indicates that the he/she difference has decreased for each job by about the same amount. But in reality, [shifts in occupation](https://www.weforum.org/agenda/2016/03/a-visual-history-of-gender-and-employment) weren't nearly so smooth and some occupations, like accounting, switched from being majority male to majority female.
-
-
-
-This reality-prediction mismatch could be caused by lack of training data, model size or the coarseness of the probing method. There's an immense amount of general knowledge inside of these models — with a little bit of focused training, they can even become expert [trivia](https://t5-trivia.glitch.me/) players.
-### More Explorables
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/merve/measuring-fairness/public/third_party/d3_.js b/spaces/merve/measuring-fairness/public/third_party/d3_.js
deleted file mode 100644
index 9c4b6815ec3cdc0e9f8a072b2d05be7ad48fa703..0000000000000000000000000000000000000000
--- a/spaces/merve/measuring-fairness/public/third_party/d3_.js
+++ /dev/null
@@ -1,143 +0,0 @@
-/**
- * @license
- * Lodash lodash.com/license | Underscore.js 1.8.3 underscorejs.org/LICENSE
- */
-;(function(){function n(n,t){return n.set(t[0],t[1]),n}function t(n,t){return n.add(t),n}function r(n,t,r){switch(r.length){case 0:return n.call(t);case 1:return n.call(t,r[0]);case 2:return n.call(t,r[0],r[1]);case 3:return n.call(t,r[0],r[1],r[2])}return n.apply(t,r)}function e(n,t,r,e){for(var u=-1,i=null==n?0:n.length;++u"']/g,J=RegExp(G.source),Y=RegExp(H.source),Q=/<%-([\s\S]+?)%>/g,X=/<%([\s\S]+?)%>/g,nn=/<%=([\s\S]+?)%>/g,tn=/\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\\]|\\.)*?\1)\]/,rn=/^\w*$/,en=/^\./,un=/[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|$))/g,on=/[\\^$.*+?()[\]{}|]/g,fn=RegExp(on.source),cn=/^\s+|\s+$/g,an=/^\s+/,ln=/\s+$/,sn=/\{(?:\n\/\* \[wrapped with .+\] \*\/)?\n?/,hn=/\{\n\/\* \[wrapped with (.+)\] \*/,pn=/,? & /,_n=/[^\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\x7f]+/g,vn=/\\(\\)?/g,gn=/\$\{([^\\}]*(?:\\.[^\\}]*)*)\}/g,dn=/\w*$/,yn=/^[-+]0x[0-9a-f]+$/i,bn=/^0b[01]+$/i,xn=/^\[object .+?Constructor\]$/,jn=/^0o[0-7]+$/i,wn=/^(?:0|[1-9]\d*)$/,mn=/[\xc0-\xd6\xd8-\xf6\xf8-\xff\u0100-\u017f]/g,An=/($^)/,kn=/['\n\r\u2028\u2029\\]/g,En="[\\ufe0e\\ufe0f]?(?:[\\u0300-\\u036f\\ufe20-\\ufe2f\\u20d0-\\u20ff]|\\ud83c[\\udffb-\\udfff])?(?:\\u200d(?:[^\\ud800-\\udfff]|(?:\\ud83c[\\udde6-\\uddff]){2}|[\\ud800-\\udbff][\\udc00-\\udfff])[\\ufe0e\\ufe0f]?(?:[\\u0300-\\u036f\\ufe20-\\ufe2f\\u20d0-\\u20ff]|\\ud83c[\\udffb-\\udfff])?)*",On="(?:[\\u2700-\\u27bf]|(?:\\ud83c[\\udde6-\\uddff]){2}|[\\ud800-\\udbff][\\udc00-\\udfff])"+En,Sn="(?:[^\\ud800-\\udfff][\\u0300-\\u036f\\ufe20-\\ufe2f\\u20d0-\\u20ff]?|[\\u0300-\\u036f\\ufe20-\\ufe2f\\u20d0-\\u20ff]|(?:\\ud83c[\\udde6-\\uddff]){2}|[\\ud800-\\udbff][\\udc00-\\udfff]|[\\ud800-\\udfff])",In=RegExp("['\u2019]","g"),Rn=RegExp("[\\u0300-\\u036f\\ufe20-\\ufe2f\\u20d0-\\u20ff]","g"),zn=RegExp("\\ud83c[\\udffb-\\udfff](?=\\ud83c[\\udffb-\\udfff])|"+Sn+En,"g"),Wn=RegExp(["[A-Z\\xc0-\\xd6\\xd8-\\xde]?[a-z\\xdf-\\xf6\\xf8-\\xff]+(?:['\u2019](?:d|ll|m|re|s|t|ve))?(?=[\\xac\\xb1\\xd7\\xf7\\x00-\\x2f\\x3a-\\x40\\x5b-\\x60\\x7b-\\xbf\\u2000-\\u206f \\t\\x0b\\f\\xa0\\ufeff\\n\\r\\u2028\\u2029\\u1680\\u180e\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200a\\u202f\\u205f\\u3000]|[A-Z\\xc0-\\xd6\\xd8-\\xde]|$)|(?:[A-Z\\xc0-\\xd6\\xd8-\\xde]|[^\\ud800-\\udfff\\xac\\xb1\\xd7\\xf7\\x00-\\x2f\\x3a-\\x40\\x5b-\\x60\\x7b-\\xbf\\u2000-\\u206f \\t\\x0b\\f\\xa0\\ufeff\\n\\r\\u2028\\u2029\\u1680\\u180e\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200a\\u202f\\u205f\\u3000\\d+\\u2700-\\u27bfa-z\\xdf-\\xf6\\xf8-\\xffA-Z\\xc0-\\xd6\\xd8-\\xde])+(?:['\u2019](?:D|LL|M|RE|S|T|VE))?(?=[\\xac\\xb1\\xd7\\xf7\\x00-\\x2f\\x3a-\\x40\\x5b-\\x60\\x7b-\\xbf\\u2000-\\u206f \\t\\x0b\\f\\xa0\\ufeff\\n\\r\\u2028\\u2029\\u1680\\u180e\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200a\\u202f\\u205f\\u3000]|[A-Z\\xc0-\\xd6\\xd8-\\xde](?:[a-z\\xdf-\\xf6\\xf8-\\xff]|[^\\ud800-\\udfff\\xac\\xb1\\xd7\\xf7\\x00-\\x2f\\x3a-\\x40\\x5b-\\x60\\x7b-\\xbf\\u2000-\\u206f \\t\\x0b\\f\\xa0\\ufeff\\n\\r\\u2028\\u2029\\u1680\\u180e\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200a\\u202f\\u205f\\u3000\\d+\\u2700-\\u27bfa-z\\xdf-\\xf6\\xf8-\\xffA-Z\\xc0-\\xd6\\xd8-\\xde])|$)|[A-Z\\xc0-\\xd6\\xd8-\\xde]?(?:[a-z\\xdf-\\xf6\\xf8-\\xff]|[^\\ud800-\\udfff\\xac\\xb1\\xd7\\xf7\\x00-\\x2f\\x3a-\\x40\\x5b-\\x60\\x7b-\\xbf\\u2000-\\u206f \\t\\x0b\\f\\xa0\\ufeff\\n\\r\\u2028\\u2029\\u1680\\u180e\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200a\\u202f\\u205f\\u3000\\d+\\u2700-\\u27bfa-z\\xdf-\\xf6\\xf8-\\xffA-Z\\xc0-\\xd6\\xd8-\\xde])+(?:['\u2019](?:d|ll|m|re|s|t|ve))?|[A-Z\\xc0-\\xd6\\xd8-\\xde]+(?:['\u2019](?:D|LL|M|RE|S|T|VE))?|\\d*(?:(?:1ST|2ND|3RD|(?![123])\\dTH)\\b)|\\d*(?:(?:1st|2nd|3rd|(?![123])\\dth)\\b)|\\d+",On].join("|"),"g"),Bn=RegExp("[\\u200d\\ud800-\\udfff\\u0300-\\u036f\\ufe20-\\ufe2f\\u20d0-\\u20ff\\ufe0e\\ufe0f]"),Ln=/[a-z][A-Z]|[A-Z]{2,}[a-z]|[0-9][a-zA-Z]|[a-zA-Z][0-9]|[^a-zA-Z0-9 ]/,Un="Array Buffer DataView Date Error Float32Array Float64Array Function Int8Array Int16Array Int32Array Map Math Object Promise RegExp Set String Symbol TypeError Uint8Array Uint8ClampedArray Uint16Array Uint32Array WeakMap _ clearTimeout isFinite parseInt setTimeout".split(" "),Cn={};
-Cn["[object Float32Array]"]=Cn["[object Float64Array]"]=Cn["[object Int8Array]"]=Cn["[object Int16Array]"]=Cn["[object Int32Array]"]=Cn["[object Uint8Array]"]=Cn["[object Uint8ClampedArray]"]=Cn["[object Uint16Array]"]=Cn["[object Uint32Array]"]=true,Cn["[object Arguments]"]=Cn["[object Array]"]=Cn["[object ArrayBuffer]"]=Cn["[object Boolean]"]=Cn["[object DataView]"]=Cn["[object Date]"]=Cn["[object Error]"]=Cn["[object Function]"]=Cn["[object Map]"]=Cn["[object Number]"]=Cn["[object Object]"]=Cn["[object RegExp]"]=Cn["[object Set]"]=Cn["[object String]"]=Cn["[object WeakMap]"]=false;
-var Dn={};Dn["[object Arguments]"]=Dn["[object Array]"]=Dn["[object ArrayBuffer]"]=Dn["[object DataView]"]=Dn["[object Boolean]"]=Dn["[object Date]"]=Dn["[object Float32Array]"]=Dn["[object Float64Array]"]=Dn["[object Int8Array]"]=Dn["[object Int16Array]"]=Dn["[object Int32Array]"]=Dn["[object Map]"]=Dn["[object Number]"]=Dn["[object Object]"]=Dn["[object RegExp]"]=Dn["[object Set]"]=Dn["[object String]"]=Dn["[object Symbol]"]=Dn["[object Uint8Array]"]=Dn["[object Uint8ClampedArray]"]=Dn["[object Uint16Array]"]=Dn["[object Uint32Array]"]=true,
-Dn["[object Error]"]=Dn["[object Function]"]=Dn["[object WeakMap]"]=false;var Mn,Tn={"\\":"\\","'":"'","\n":"n","\r":"r","\u2028":"u2028","\u2029":"u2029"},$n=parseFloat,Fn=parseInt,Nn=typeof global=="object"&&global&&global.Object===Object&&global,Pn=typeof self=="object"&&self&&self.Object===Object&&self,Zn=Nn||Pn||Function("return this")(),qn=typeof exports=="object"&&exports&&!exports.nodeType&&exports,Vn=qn&&typeof module=="object"&&module&&!module.nodeType&&module,Kn=Vn&&Vn.exports===qn,Gn=Kn&&Nn.process;
-n:{try{Mn=Gn&&Gn.binding&&Gn.binding("util");break n}catch(n){}Mn=void 0}var Hn=Mn&&Mn.isArrayBuffer,Jn=Mn&&Mn.isDate,Yn=Mn&&Mn.isMap,Qn=Mn&&Mn.isRegExp,Xn=Mn&&Mn.isSet,nt=Mn&&Mn.isTypedArray,tt=j("length"),rt=w({"\xc0":"A","\xc1":"A","\xc2":"A","\xc3":"A","\xc4":"A","\xc5":"A","\xe0":"a","\xe1":"a","\xe2":"a","\xe3":"a","\xe4":"a","\xe5":"a","\xc7":"C","\xe7":"c","\xd0":"D","\xf0":"d","\xc8":"E","\xc9":"E","\xca":"E","\xcb":"E","\xe8":"e","\xe9":"e","\xea":"e","\xeb":"e","\xcc":"I","\xcd":"I","\xce":"I",
-"\xcf":"I","\xec":"i","\xed":"i","\xee":"i","\xef":"i","\xd1":"N","\xf1":"n","\xd2":"O","\xd3":"O","\xd4":"O","\xd5":"O","\xd6":"O","\xd8":"O","\xf2":"o","\xf3":"o","\xf4":"o","\xf5":"o","\xf6":"o","\xf8":"o","\xd9":"U","\xda":"U","\xdb":"U","\xdc":"U","\xf9":"u","\xfa":"u","\xfb":"u","\xfc":"u","\xdd":"Y","\xfd":"y","\xff":"y","\xc6":"Ae","\xe6":"ae","\xde":"Th","\xfe":"th","\xdf":"ss","\u0100":"A","\u0102":"A","\u0104":"A","\u0101":"a","\u0103":"a","\u0105":"a","\u0106":"C","\u0108":"C","\u010a":"C",
-"\u010c":"C","\u0107":"c","\u0109":"c","\u010b":"c","\u010d":"c","\u010e":"D","\u0110":"D","\u010f":"d","\u0111":"d","\u0112":"E","\u0114":"E","\u0116":"E","\u0118":"E","\u011a":"E","\u0113":"e","\u0115":"e","\u0117":"e","\u0119":"e","\u011b":"e","\u011c":"G","\u011e":"G","\u0120":"G","\u0122":"G","\u011d":"g","\u011f":"g","\u0121":"g","\u0123":"g","\u0124":"H","\u0126":"H","\u0125":"h","\u0127":"h","\u0128":"I","\u012a":"I","\u012c":"I","\u012e":"I","\u0130":"I","\u0129":"i","\u012b":"i","\u012d":"i",
-"\u012f":"i","\u0131":"i","\u0134":"J","\u0135":"j","\u0136":"K","\u0137":"k","\u0138":"k","\u0139":"L","\u013b":"L","\u013d":"L","\u013f":"L","\u0141":"L","\u013a":"l","\u013c":"l","\u013e":"l","\u0140":"l","\u0142":"l","\u0143":"N","\u0145":"N","\u0147":"N","\u014a":"N","\u0144":"n","\u0146":"n","\u0148":"n","\u014b":"n","\u014c":"O","\u014e":"O","\u0150":"O","\u014d":"o","\u014f":"o","\u0151":"o","\u0154":"R","\u0156":"R","\u0158":"R","\u0155":"r","\u0157":"r","\u0159":"r","\u015a":"S","\u015c":"S",
-"\u015e":"S","\u0160":"S","\u015b":"s","\u015d":"s","\u015f":"s","\u0161":"s","\u0162":"T","\u0164":"T","\u0166":"T","\u0163":"t","\u0165":"t","\u0167":"t","\u0168":"U","\u016a":"U","\u016c":"U","\u016e":"U","\u0170":"U","\u0172":"U","\u0169":"u","\u016b":"u","\u016d":"u","\u016f":"u","\u0171":"u","\u0173":"u","\u0174":"W","\u0175":"w","\u0176":"Y","\u0177":"y","\u0178":"Y","\u0179":"Z","\u017b":"Z","\u017d":"Z","\u017a":"z","\u017c":"z","\u017e":"z","\u0132":"IJ","\u0133":"ij","\u0152":"Oe","\u0153":"oe",
-"\u0149":"'n","\u017f":"s"}),et=w({"&":"&","<":"<",">":">",'"':""","'":"'"}),ut=w({"&":"&","<":"<",">":">",""":'"',"'":"'"}),it=function w(En){function On(n){if(xu(n)&&!af(n)&&!(n instanceof Mn)){if(n instanceof zn)return n;if(ci.call(n,"__wrapped__"))return Pe(n)}return new zn(n)}function Sn(){}function zn(n,t){this.__wrapped__=n,this.__actions__=[],this.__chain__=!!t,this.__index__=0,this.__values__=F}function Mn(n){this.__wrapped__=n,this.__actions__=[],this.__dir__=1,
-this.__filtered__=false,this.__iteratees__=[],this.__takeCount__=4294967295,this.__views__=[]}function Tn(n){var t=-1,r=null==n?0:n.length;for(this.clear();++t=t?n:t)),n}function dt(n,t,r,e,i,o){var f,c=1&t,a=2&t,l=4&t;if(r&&(f=i?r(n,e,i,o):r(n)),f!==F)return f;if(!bu(n))return n;if(e=af(n)){if(f=Ee(n),!c)return Mr(n,f)}else{var s=yo(n),h="[object Function]"==s||"[object GeneratorFunction]"==s;if(sf(n))return Wr(n,c);if("[object Object]"==s||"[object Arguments]"==s||h&&!i){if(f=a||h?{}:Oe(n),!c)return a?Fr(n,pt(f,n)):$r(n,ht(f,n))}else{if(!Dn[s])return i?n:{};f=Se(n,s,dt,c)}}if(o||(o=new Vn),
-i=o.get(n))return i;o.set(n,f);var a=l?a?ye:de:a?Uu:Lu,p=e?F:a(n);return u(p||n,function(e,u){p&&(u=e,e=n[u]),at(f,u,dt(e,t,r,u,n,o))}),f}function yt(n){var t=Lu(n);return function(r){return bt(r,n,t)}}function bt(n,t,r){var e=r.length;if(null==n)return!e;for(n=ni(n);e--;){var u=r[e],i=t[u],o=n[u];if(o===F&&!(u in n)||!i(o))return false}return true}function xt(n,t,r){if(typeof n!="function")throw new ei("Expected a function");return jo(function(){n.apply(F,r)},t)}function jt(n,t,r,e){var u=-1,i=c,o=true,f=n.length,s=[],h=t.length;
-if(!f)return s;r&&(t=l(t,S(r))),e?(i=a,o=false):200<=t.length&&(i=R,o=false,t=new qn(t));n:for(;++ut}function Bt(n,t){return null!=n&&ci.call(n,t)}function Lt(n,t){return null!=n&&t in ni(n)}function Ut(n,t,r){for(var e=r?a:c,u=n[0].length,i=n.length,o=i,f=Hu(i),s=1/0,h=[];o--;){var p=n[o];o&&t&&(p=l(p,S(t))),s=Mi(p.length,s),f[o]=!r&&(t||120<=u&&120<=p.length)?new qn(o&&p):F}var p=n[0],_=-1,v=f[0];n:for(;++_t.length?n:It(n,vr(t,0,-1)),t=null==n?n:n[$e(Ge(t))],null==t?F:r(t,n,e)}function Mt(n){return xu(n)&&"[object Arguments]"==zt(n)}function Tt(n){return xu(n)&&"[object ArrayBuffer]"==zt(n)}function $t(n){return xu(n)&&"[object Date]"==zt(n)}function Ft(n,t,r,e,u){if(n===t)t=true;else if(null==n||null==t||!xu(n)&&!xu(t))t=n!==n&&t!==t;else n:{
-var i=af(n),o=af(t),f=i?"[object Array]":yo(n),c=o?"[object Array]":yo(t),f="[object Arguments]"==f?"[object Object]":f,c="[object Arguments]"==c?"[object Object]":c,a="[object Object]"==f,o="[object Object]"==c;if((c=f==c)&&sf(n)){if(!sf(t)){t=false;break n}i=true,a=false}if(c&&!a)u||(u=new Vn),t=i||gf(n)?_e(n,t,r,e,Ft,u):ve(n,t,f,r,e,Ft,u);else{if(!(1&r)&&(i=a&&ci.call(n,"__wrapped__"),f=o&&ci.call(t,"__wrapped__"),i||f)){n=i?n.value():n,t=f?t.value():t,u||(u=new Vn),t=Ft(n,t,r,e,u);break n}if(c)t:if(u||(u=new Vn),
-i=1&r,f=de(n),o=f.length,c=de(t).length,o==c||i){for(a=o;a--;){var l=f[a];if(!(i?l in t:ci.call(t,l))){t=false;break t}}if((c=u.get(n))&&u.get(t))t=c==t;else{c=true,u.set(n,t),u.set(t,n);for(var s=i;++at?r:0,Re(t,r)?n[t]:F}function rr(n,t,r){var e=-1;return t=l(t.length?t:[Nu],S(je())),n=Yt(n,function(n){return{a:l(t,function(t){return t(n)}),b:++e,c:n}}),A(n,function(n,t){var e;n:{e=-1;for(var u=n.a,i=t.a,o=u.length,f=r.length;++e=f?c:c*("desc"==r[e]?-1:1);
-break n}}e=n.b-t.b}return e})}function er(n,t){return ur(n,t,function(t,r){return Bu(n,r)})}function ur(n,t,r){for(var e=-1,u=t.length,i={};++et||9007199254740991t&&(t=-t>u?0:u+t),r=r>u?u:r,0>r&&(r+=u),u=t>r?0:r-t>>>0,t>>>=0,r=Hu(u);++e=u){for(;e>>1,o=n[i];null!==o&&!Au(o)&&(r?o<=t:ot.length?n:It(n,vr(t,0,-1)),
-null==n||delete n[$e(Ge(t))]}function Ar(n,t,r,e){for(var u=n.length,i=e?u:-1;(e?i--:++ie)return e?wr(n[0]):[];for(var u=-1,i=Hu(e);++u=e?n:vr(n,t,r)}function Wr(n,t){if(t)return n.slice();var r=n.length,r=yi?yi(r):new n.constructor(r);return n.copy(r),r}function Br(n){var t=new n.constructor(n.byteLength);return new di(t).set(new di(n)),t}function Lr(n,t){return new n.constructor(t?Br(n.buffer):n.buffer,n.byteOffset,n.length)}function Ur(n,t){
-if(n!==t){var r=n!==F,e=null===n,u=n===n,i=Au(n),o=t!==F,f=null===t,c=t===t,a=Au(t);if(!f&&!a&&!i&&n>t||i&&o&&c&&!f&&!a||e&&o&&c||!r&&c||!u)return 1;if(!e&&!i&&!a&&nu?F:i,u=1),t=ni(t);++eo&&f[0]!==a&&f[o-1]!==a?[]:C(f,a),o-=c.length,or?r?ar(t,n):t:(r=ar(t,Ri(n/T(t))),Bn.test(t)?zr($(r),0,n).join(""):r.slice(0,n))}function ue(n,t,e,u){function i(){for(var t=-1,c=arguments.length,a=-1,l=u.length,s=Hu(l+c),h=this&&this!==Zn&&this instanceof i?f:n;++at||e)&&(1&n&&(i[2]=h[2],t|=1&r?0:4),(r=h[3])&&(e=i[3],i[3]=e?Cr(e,r,h[4]):r,i[4]=e?C(i[3],"__lodash_placeholder__"):h[4]),(r=h[5])&&(e=i[5],i[5]=e?Dr(e,r,h[6]):r,i[6]=e?C(i[5],"__lodash_placeholder__"):h[6]),(r=h[7])&&(i[7]=r),128&n&&(i[8]=null==i[8]?h[8]:Mi(i[8],h[8])),null==i[9]&&(i[9]=h[9]),i[0]=h[0],i[1]=t),n=i[0],t=i[1],
-r=i[2],e=i[3],u=i[4],f=i[9]=i[9]===F?c?0:n.length:Di(i[9]-a,0),!f&&24&t&&(t&=-25),De((h?lo:xo)(t&&1!=t?8==t||16==t?Jr(n,t,f):32!=t&&33!=t||u.length?Xr.apply(F,i):ue(n,t,r,e):Vr(n,t,r),i),n,t)}function se(n,t,r,e){return n===F||hu(n,ii[r])&&!ci.call(e,r)?t:n}function he(n,t,r,e,u,i){return bu(n)&&bu(t)&&(i.set(t,n),nr(n,t,F,he,i),i.delete(t)),n}function pe(n){return wu(n)?F:n}function _e(n,t,r,e,u,i){var o=1&r,f=n.length,c=t.length;if(f!=c&&!(o&&c>f))return false;if((c=i.get(n))&&i.get(t))return c==t;var c=-1,a=true,l=2&r?new qn:F;
-for(i.set(n,t),i.set(t,n);++cr&&(r=Di(e+r,0)),g(n,je(t,3),r)):-1}function qe(n,t,r){var e=null==n?0:n.length;if(!e)return-1;var u=e-1;return r!==F&&(u=Ou(r),u=0>r?Di(e+u,0):Mi(u,e-1)),
-g(n,je(t,3),u,true)}function Ve(n){return(null==n?0:n.length)?kt(n,1):[]}function Ke(n){return n&&n.length?n[0]:F}function Ge(n){var t=null==n?0:n.length;return t?n[t-1]:F}function He(n,t){return n&&n.length&&t&&t.length?or(n,t):n}function Je(n){return null==n?n:Ni.call(n)}function Ye(n){if(!n||!n.length)return[];var t=0;return n=f(n,function(n){if(_u(n))return t=Di(n.length,t),true}),E(t,function(t){return l(n,j(t))})}function Qe(n,t){if(!n||!n.length)return[];var e=Ye(n);return null==t?e:l(e,function(n){
-return r(t,F,n)})}function Xe(n){return n=On(n),n.__chain__=true,n}function nu(n,t){return t(n)}function tu(){return this}function ru(n,t){return(af(n)?u:oo)(n,je(t,3))}function eu(n,t){return(af(n)?i:fo)(n,je(t,3))}function uu(n,t){return(af(n)?l:Yt)(n,je(t,3))}function iu(n,t,r){return t=r?F:t,t=n&&null==t?n.length:t,le(n,128,F,F,F,F,t)}function ou(n,t){var r;if(typeof t!="function")throw new ei("Expected a function");return n=Ou(n),function(){return 0<--n&&(r=t.apply(this,arguments)),1>=n&&(t=F),
-r}}function fu(n,t,r){return t=r?F:t,n=le(n,8,F,F,F,F,F,t),n.placeholder=fu.placeholder,n}function cu(n,t,r){return t=r?F:t,n=le(n,16,F,F,F,F,F,t),n.placeholder=cu.placeholder,n}function au(n,t,r){function e(t){var r=c,e=a;return c=a=F,_=t,s=n.apply(e,r)}function u(n){var r=n-p;return n-=_,p===F||r>=t||0>r||g&&n>=l}function i(){var n=Jo();if(u(n))return o(n);var r,e=jo;r=n-_,n=t-(n-p),r=g?Mi(n,l-r):n,h=e(i,r)}function o(n){return h=F,d&&c?e(n):(c=a=F,s)}function f(){var n=Jo(),r=u(n);if(c=arguments,
-a=this,p=n,r){if(h===F)return _=n=p,h=jo(i,t),v?e(n):s;if(g)return h=jo(i,t),e(p)}return h===F&&(h=jo(i,t)),s}var c,a,l,s,h,p,_=0,v=false,g=false,d=true;if(typeof n!="function")throw new ei("Expected a function");return t=Iu(t)||0,bu(r)&&(v=!!r.leading,l=(g="maxWait"in r)?Di(Iu(r.maxWait)||0,t):l,d="trailing"in r?!!r.trailing:d),f.cancel=function(){h!==F&&ho(h),_=0,c=p=a=h=F},f.flush=function(){return h===F?s:o(Jo())},f}function lu(n,t){function r(){var e=arguments,u=t?t.apply(this,e):e[0],i=r.cache;return i.has(u)?i.get(u):(e=n.apply(this,e),
-r.cache=i.set(u,e)||i,e)}if(typeof n!="function"||null!=t&&typeof t!="function")throw new ei("Expected a function");return r.cache=new(lu.Cache||Pn),r}function su(n){if(typeof n!="function")throw new ei("Expected a function");return function(){var t=arguments;switch(t.length){case 0:return!n.call(this);case 1:return!n.call(this,t[0]);case 2:return!n.call(this,t[0],t[1]);case 3:return!n.call(this,t[0],t[1],t[2])}return!n.apply(this,t)}}function hu(n,t){return n===t||n!==n&&t!==t}function pu(n){return null!=n&&yu(n.length)&&!gu(n);
-}function _u(n){return xu(n)&&pu(n)}function vu(n){if(!xu(n))return false;var t=zt(n);return"[object Error]"==t||"[object DOMException]"==t||typeof n.message=="string"&&typeof n.name=="string"&&!wu(n)}function gu(n){return!!bu(n)&&(n=zt(n),"[object Function]"==n||"[object GeneratorFunction]"==n||"[object AsyncFunction]"==n||"[object Proxy]"==n)}function du(n){return typeof n=="number"&&n==Ou(n)}function yu(n){return typeof n=="number"&&-1=n}function bu(n){var t=typeof n;return null!=n&&("object"==t||"function"==t);
-}function xu(n){return null!=n&&typeof n=="object"}function ju(n){return typeof n=="number"||xu(n)&&"[object Number]"==zt(n)}function wu(n){return!(!xu(n)||"[object Object]"!=zt(n))&&(n=bi(n),null===n||(n=ci.call(n,"constructor")&&n.constructor,typeof n=="function"&&n instanceof n&&fi.call(n)==hi))}function mu(n){return typeof n=="string"||!af(n)&&xu(n)&&"[object String]"==zt(n)}function Au(n){return typeof n=="symbol"||xu(n)&&"[object Symbol]"==zt(n)}function ku(n){if(!n)return[];if(pu(n))return mu(n)?$(n):Mr(n);
-if(Ai&&n[Ai]){n=n[Ai]();for(var t,r=[];!(t=n.next()).done;)r.push(t.value);return r}return t=yo(n),("[object Map]"==t?L:"[object Set]"==t?D:Du)(n)}function Eu(n){return n?(n=Iu(n),n===N||n===-N?1.7976931348623157e308*(0>n?-1:1):n===n?n:0):0===n?n:0}function Ou(n){n=Eu(n);var t=n%1;return n===n?t?n-t:n:0}function Su(n){return n?gt(Ou(n),0,4294967295):0}function Iu(n){if(typeof n=="number")return n;if(Au(n))return P;if(bu(n)&&(n=typeof n.valueOf=="function"?n.valueOf():n,n=bu(n)?n+"":n),typeof n!="string")return 0===n?n:+n;
-n=n.replace(cn,"");var t=bn.test(n);return t||jn.test(n)?Fn(n.slice(2),t?2:8):yn.test(n)?P:+n}function Ru(n){return Tr(n,Uu(n))}function zu(n){return null==n?"":jr(n)}function Wu(n,t,r){return n=null==n?F:It(n,t),n===F?r:n}function Bu(n,t){return null!=n&&ke(n,t,Lt)}function Lu(n){return pu(n)?Gn(n):Ht(n)}function Uu(n){if(pu(n))n=Gn(n,true);else if(bu(n)){var t,r=Le(n),e=[];for(t in n)("constructor"!=t||!r&&ci.call(n,t))&&e.push(t);n=e}else{if(t=[],null!=n)for(r in ni(n))t.push(r);n=t}return n}function Cu(n,t){
-if(null==n)return{};var r=l(ye(n),function(n){return[n]});return t=je(t),ur(n,r,function(n,r){return t(n,r[0])})}function Du(n){return null==n?[]:I(n,Lu(n))}function Mu(n){return Nf(zu(n).toLowerCase())}function Tu(n){return(n=zu(n))&&n.replace(mn,rt).replace(Rn,"")}function $u(n,t,r){return n=zu(n),t=r?F:t,t===F?Ln.test(n)?n.match(Wn)||[]:n.match(_n)||[]:n.match(t)||[]}function Fu(n){return function(){return n}}function Nu(n){return n}function Pu(n){return Gt(typeof n=="function"?n:dt(n,1))}function Zu(n,t,r){
-var e=Lu(t),i=St(t,e);null!=r||bu(t)&&(i.length||!e.length)||(r=t,t=n,n=this,i=St(t,Lu(t)));var o=!(bu(r)&&"chain"in r&&!r.chain),f=gu(n);return u(i,function(r){var e=t[r];n[r]=e,f&&(n.prototype[r]=function(){var t=this.__chain__;if(o||t){var r=n(this.__wrapped__);return(r.__actions__=Mr(this.__actions__)).push({func:e,args:arguments,thisArg:n}),r.__chain__=t,r}return e.apply(n,s([this.value()],arguments))})}),n}function qu(){}function Vu(n){return We(n)?j($e(n)):ir(n)}function Ku(){return[]}function Gu(){
-return false}En=null==En?Zn:it.defaults(Zn.Object(),En,it.pick(Zn,Un));var Hu=En.Array,Ju=En.Date,Yu=En.Error,Qu=En.Function,Xu=En.Math,ni=En.Object,ti=En.RegExp,ri=En.String,ei=En.TypeError,ui=Hu.prototype,ii=ni.prototype,oi=En["__core-js_shared__"],fi=Qu.prototype.toString,ci=ii.hasOwnProperty,ai=0,li=function(){var n=/[^.]+$/.exec(oi&&oi.keys&&oi.keys.IE_PROTO||"");return n?"Symbol(src)_1."+n:""}(),si=ii.toString,hi=fi.call(ni),pi=Zn._,_i=ti("^"+fi.call(ci).replace(on,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$"),vi=Kn?En.Buffer:F,gi=En.Symbol,di=En.Uint8Array,yi=vi?vi.f:F,bi=U(ni.getPrototypeOf,ni),xi=ni.create,ji=ii.propertyIsEnumerable,wi=ui.splice,mi=gi?gi.isConcatSpreadable:F,Ai=gi?gi.iterator:F,ki=gi?gi.toStringTag:F,Ei=function(){
-try{var n=Ae(ni,"defineProperty");return n({},"",{}),n}catch(n){}}(),Oi=En.clearTimeout!==Zn.clearTimeout&&En.clearTimeout,Si=Ju&&Ju.now!==Zn.Date.now&&Ju.now,Ii=En.setTimeout!==Zn.setTimeout&&En.setTimeout,Ri=Xu.ceil,zi=Xu.floor,Wi=ni.getOwnPropertySymbols,Bi=vi?vi.isBuffer:F,Li=En.isFinite,Ui=ui.join,Ci=U(ni.keys,ni),Di=Xu.max,Mi=Xu.min,Ti=Ju.now,$i=En.parseInt,Fi=Xu.random,Ni=ui.reverse,Pi=Ae(En,"DataView"),Zi=Ae(En,"Map"),qi=Ae(En,"Promise"),Vi=Ae(En,"Set"),Ki=Ae(En,"WeakMap"),Gi=Ae(ni,"create"),Hi=Ki&&new Ki,Ji={},Yi=Fe(Pi),Qi=Fe(Zi),Xi=Fe(qi),no=Fe(Vi),to=Fe(Ki),ro=gi?gi.prototype:F,eo=ro?ro.valueOf:F,uo=ro?ro.toString:F,io=function(){
-function n(){}return function(t){return bu(t)?xi?xi(t):(n.prototype=t,t=new n,n.prototype=F,t):{}}}();On.templateSettings={escape:Q,evaluate:X,interpolate:nn,variable:"",imports:{_:On}},On.prototype=Sn.prototype,On.prototype.constructor=On,zn.prototype=io(Sn.prototype),zn.prototype.constructor=zn,Mn.prototype=io(Sn.prototype),Mn.prototype.constructor=Mn,Tn.prototype.clear=function(){this.__data__=Gi?Gi(null):{},this.size=0},Tn.prototype.delete=function(n){return n=this.has(n)&&delete this.__data__[n],
-this.size-=n?1:0,n},Tn.prototype.get=function(n){var t=this.__data__;return Gi?(n=t[n],"__lodash_hash_undefined__"===n?F:n):ci.call(t,n)?t[n]:F},Tn.prototype.has=function(n){var t=this.__data__;return Gi?t[n]!==F:ci.call(t,n)},Tn.prototype.set=function(n,t){var r=this.__data__;return this.size+=this.has(n)?0:1,r[n]=Gi&&t===F?"__lodash_hash_undefined__":t,this},Nn.prototype.clear=function(){this.__data__=[],this.size=0},Nn.prototype.delete=function(n){var t=this.__data__;return n=lt(t,n),!(0>n)&&(n==t.length-1?t.pop():wi.call(t,n,1),
---this.size,true)},Nn.prototype.get=function(n){var t=this.__data__;return n=lt(t,n),0>n?F:t[n][1]},Nn.prototype.has=function(n){return-1e?(++this.size,r.push([n,t])):r[e][1]=t,this},Pn.prototype.clear=function(){this.size=0,this.__data__={hash:new Tn,map:new(Zi||Nn),string:new Tn}},Pn.prototype.delete=function(n){return n=we(this,n).delete(n),this.size-=n?1:0,n},Pn.prototype.get=function(n){return we(this,n).get(n);
-},Pn.prototype.has=function(n){return we(this,n).has(n)},Pn.prototype.set=function(n,t){var r=we(this,n),e=r.size;return r.set(n,t),this.size+=r.size==e?0:1,this},qn.prototype.add=qn.prototype.push=function(n){return this.__data__.set(n,"__lodash_hash_undefined__"),this},qn.prototype.has=function(n){return this.__data__.has(n)},Vn.prototype.clear=function(){this.__data__=new Nn,this.size=0},Vn.prototype.delete=function(n){var t=this.__data__;return n=t.delete(n),this.size=t.size,n},Vn.prototype.get=function(n){
-return this.__data__.get(n)},Vn.prototype.has=function(n){return this.__data__.has(n)},Vn.prototype.set=function(n,t){var r=this.__data__;if(r instanceof Nn){var e=r.__data__;if(!Zi||199>e.length)return e.push([n,t]),this.size=++r.size,this;r=this.__data__=new Pn(e)}return r.set(n,t),this.size=r.size,this};var oo=Zr(Et),fo=Zr(Ot,true),co=qr(),ao=qr(true),lo=Hi?function(n,t){return Hi.set(n,t),n}:Nu,so=Ei?function(n,t){return Ei(n,"toString",{configurable:true,enumerable:false,value:Fu(t),writable:true})}:Nu,ho=Oi||function(n){
-return Zn.clearTimeout(n)},po=Vi&&1/D(new Vi([,-0]))[1]==N?function(n){return new Vi(n)}:qu,_o=Hi?function(n){return Hi.get(n)}:qu,vo=Wi?function(n){return null==n?[]:(n=ni(n),f(Wi(n),function(t){return ji.call(n,t)}))}:Ku,go=Wi?function(n){for(var t=[];n;)s(t,vo(n)),n=bi(n);return t}:Ku,yo=zt;(Pi&&"[object DataView]"!=yo(new Pi(new ArrayBuffer(1)))||Zi&&"[object Map]"!=yo(new Zi)||qi&&"[object Promise]"!=yo(qi.resolve())||Vi&&"[object Set]"!=yo(new Vi)||Ki&&"[object WeakMap]"!=yo(new Ki))&&(yo=function(n){
-var t=zt(n);if(n=(n="[object Object]"==t?n.constructor:F)?Fe(n):"")switch(n){case Yi:return"[object DataView]";case Qi:return"[object Map]";case Xi:return"[object Promise]";case no:return"[object Set]";case to:return"[object WeakMap]"}return t});var bo=oi?gu:Gu,xo=Me(lo),jo=Ii||function(n,t){return Zn.setTimeout(n,t)},wo=Me(so),mo=function(n){n=lu(n,function(n){return 500===t.size&&t.clear(),n});var t=n.cache;return n}(function(n){var t=[];return en.test(n)&&t.push(""),n.replace(un,function(n,r,e,u){
-t.push(e?u.replace(vn,"$1"):r||n)}),t}),Ao=lr(function(n,t){return _u(n)?jt(n,kt(t,1,_u,true)):[]}),ko=lr(function(n,t){var r=Ge(t);return _u(r)&&(r=F),_u(n)?jt(n,kt(t,1,_u,true),je(r,2)):[]}),Eo=lr(function(n,t){var r=Ge(t);return _u(r)&&(r=F),_u(n)?jt(n,kt(t,1,_u,true),F,r):[]}),Oo=lr(function(n){var t=l(n,Sr);return t.length&&t[0]===n[0]?Ut(t):[]}),So=lr(function(n){var t=Ge(n),r=l(n,Sr);return t===Ge(r)?t=F:r.pop(),r.length&&r[0]===n[0]?Ut(r,je(t,2)):[]}),Io=lr(function(n){var t=Ge(n),r=l(n,Sr);return(t=typeof t=="function"?t:F)&&r.pop(),
-r.length&&r[0]===n[0]?Ut(r,F,t):[]}),Ro=lr(He),zo=ge(function(n,t){var r=null==n?0:n.length,e=vt(n,t);return fr(n,l(t,function(n){return Re(n,r)?+n:n}).sort(Ur)),e}),Wo=lr(function(n){return wr(kt(n,1,_u,true))}),Bo=lr(function(n){var t=Ge(n);return _u(t)&&(t=F),wr(kt(n,1,_u,true),je(t,2))}),Lo=lr(function(n){var t=Ge(n),t=typeof t=="function"?t:F;return wr(kt(n,1,_u,true),F,t)}),Uo=lr(function(n,t){return _u(n)?jt(n,t):[]}),Co=lr(function(n){return Er(f(n,_u))}),Do=lr(function(n){var t=Ge(n);return _u(t)&&(t=F),
-Er(f(n,_u),je(t,2))}),Mo=lr(function(n){var t=Ge(n),t=typeof t=="function"?t:F;return Er(f(n,_u),F,t)}),To=lr(Ye),$o=lr(function(n){var t=n.length,t=1=t}),cf=Mt(function(){return arguments}())?Mt:function(n){return xu(n)&&ci.call(n,"callee")&&!ji.call(n,"callee")},af=Hu.isArray,lf=Hn?S(Hn):Tt,sf=Bi||Gu,hf=Jn?S(Jn):$t,pf=Yn?S(Yn):Nt,_f=Qn?S(Qn):qt,vf=Xn?S(Xn):Vt,gf=nt?S(nt):Kt,df=oe(Jt),yf=oe(function(n,t){return n<=t}),bf=Pr(function(n,t){
-if(Le(t)||pu(t))Tr(t,Lu(t),n);else for(var r in t)ci.call(t,r)&&at(n,r,t[r])}),xf=Pr(function(n,t){Tr(t,Uu(t),n)}),jf=Pr(function(n,t,r,e){Tr(t,Uu(t),n,e)}),wf=Pr(function(n,t,r,e){Tr(t,Lu(t),n,e)}),mf=ge(vt),Af=lr(function(n){return n.push(F,se),r(jf,F,n)}),kf=lr(function(n){return n.push(F,he),r(Rf,F,n)}),Ef=ne(function(n,t,r){n[t]=r},Fu(Nu)),Of=ne(function(n,t,r){ci.call(n,t)?n[t].push(r):n[t]=[r]},je),Sf=lr(Dt),If=Pr(function(n,t,r){nr(n,t,r)}),Rf=Pr(function(n,t,r,e){nr(n,t,r,e)}),zf=ge(function(n,t){
-var r={};if(null==n)return r;var e=false;t=l(t,function(t){return t=Rr(t,n),e||(e=1--n)return t.apply(this,arguments)}},On.ary=iu,On.assign=bf,On.assignIn=xf,On.assignInWith=jf,On.assignWith=wf,On.at=mf,On.before=ou,On.bind=Yo,On.bindAll=Zf,On.bindKey=Qo,On.castArray=function(){if(!arguments.length)return[];var n=arguments[0];return af(n)?n:[n]},
-On.chain=Xe,On.chunk=function(n,t,r){if(t=(r?ze(n,t,r):t===F)?1:Di(Ou(t),0),r=null==n?0:n.length,!r||1>t)return[];for(var e=0,u=0,i=Hu(Ri(r/t));et?0:t,e)):[]},On.dropRight=function(n,t,r){var e=null==n?0:n.length;return e?(t=r||t===F?1:Ou(t),t=e-t,vr(n,0,0>t?0:t)):[]},On.dropRightWhile=function(n,t){return n&&n.length?Ar(n,je(t,3),true,true):[]},On.dropWhile=function(n,t){return n&&n.length?Ar(n,je(t,3),true):[]},On.fill=function(n,t,r,e){var u=null==n?0:n.length;if(!u)return[];for(r&&typeof r!="number"&&ze(n,t,r)&&(r=0,e=u),u=n.length,r=Ou(r),0>r&&(r=-r>u?0:u+r),e=e===F||e>u?u:Ou(e),0>e&&(e+=u),e=r>e?0:Su(e);r>>0,r?(n=zu(n))&&(typeof t=="string"||null!=t&&!_f(t))&&(t=jr(t),
-!t&&Bn.test(n))?zr($(n),0,r):n.split(t,r):[]},On.spread=function(n,t){if(typeof n!="function")throw new ei("Expected a function");return t=null==t?0:Di(Ou(t),0),lr(function(e){var u=e[t];return e=zr(e,0,t),u&&s(e,u),r(n,this,e)})},On.tail=function(n){var t=null==n?0:n.length;return t?vr(n,1,t):[]},On.take=function(n,t,r){return n&&n.length?(t=r||t===F?1:Ou(t),vr(n,0,0>t?0:t)):[]},On.takeRight=function(n,t,r){var e=null==n?0:n.length;return e?(t=r||t===F?1:Ou(t),t=e-t,vr(n,0>t?0:t,e)):[]},On.takeRightWhile=function(n,t){
-return n&&n.length?Ar(n,je(t,3),false,true):[]},On.takeWhile=function(n,t){return n&&n.length?Ar(n,je(t,3)):[]},On.tap=function(n,t){return t(n),n},On.throttle=function(n,t,r){var e=true,u=true;if(typeof n!="function")throw new ei("Expected a function");return bu(r)&&(e="leading"in r?!!r.leading:e,u="trailing"in r?!!r.trailing:u),au(n,t,{leading:e,maxWait:t,trailing:u})},On.thru=nu,On.toArray=ku,On.toPairs=Bf,On.toPairsIn=Lf,On.toPath=function(n){return af(n)?l(n,$e):Au(n)?[n]:Mr(mo(zu(n)))},On.toPlainObject=Ru,
-On.transform=function(n,t,r){var e=af(n),i=e||sf(n)||gf(n);if(t=je(t,4),null==r){var o=n&&n.constructor;r=i?e?new o:[]:bu(n)&&gu(o)?io(bi(n)):{}}return(i?u:Et)(n,function(n,e,u){return t(r,n,e,u)}),r},On.unary=function(n){return iu(n,1)},On.union=Wo,On.unionBy=Bo,On.unionWith=Lo,On.uniq=function(n){return n&&n.length?wr(n):[]},On.uniqBy=function(n,t){return n&&n.length?wr(n,je(t,2)):[]},On.uniqWith=function(n,t){return t=typeof t=="function"?t:F,n&&n.length?wr(n,F,t):[]},On.unset=function(n,t){return null==n||mr(n,t);
-},On.unzip=Ye,On.unzipWith=Qe,On.update=function(n,t,r){return null==n?n:pr(n,t,Ir(r)(It(n,t)),void 0)},On.updateWith=function(n,t,r,e){return e=typeof e=="function"?e:F,null!=n&&(n=pr(n,t,Ir(r)(It(n,t)),e)),n},On.values=Du,On.valuesIn=function(n){return null==n?[]:I(n,Uu(n))},On.without=Uo,On.words=$u,On.wrap=function(n,t){return rf(Ir(t),n)},On.xor=Co,On.xorBy=Do,On.xorWith=Mo,On.zip=To,On.zipObject=function(n,t){return Or(n||[],t||[],at)},On.zipObjectDeep=function(n,t){return Or(n||[],t||[],pr);
-},On.zipWith=$o,On.entries=Bf,On.entriesIn=Lf,On.extend=xf,On.extendWith=jf,Zu(On,On),On.add=nc,On.attempt=Pf,On.camelCase=Uf,On.capitalize=Mu,On.ceil=tc,On.clamp=function(n,t,r){return r===F&&(r=t,t=F),r!==F&&(r=Iu(r),r=r===r?r:0),t!==F&&(t=Iu(t),t=t===t?t:0),gt(Iu(n),t,r)},On.clone=function(n){return dt(n,4)},On.cloneDeep=function(n){return dt(n,5)},On.cloneDeepWith=function(n,t){return t=typeof t=="function"?t:F,dt(n,5,t)},On.cloneWith=function(n,t){return t=typeof t=="function"?t:F,dt(n,4,t)},
-On.conformsTo=function(n,t){return null==t||bt(n,t,Lu(t))},On.deburr=Tu,On.defaultTo=function(n,t){return null==n||n!==n?t:n},On.divide=rc,On.endsWith=function(n,t,r){n=zu(n),t=jr(t);var e=n.length,e=r=r===F?e:gt(Ou(r),0,e);return r-=t.length,0<=r&&n.slice(r,e)==t},On.eq=hu,On.escape=function(n){return(n=zu(n))&&Y.test(n)?n.replace(H,et):n},On.escapeRegExp=function(n){return(n=zu(n))&&fn.test(n)?n.replace(on,"\\$&"):n},On.every=function(n,t,r){var e=af(n)?o:wt;return r&&ze(n,t,r)&&(t=F),e(n,je(t,3));
-},On.find=Po,On.findIndex=Ze,On.findKey=function(n,t){return v(n,je(t,3),Et)},On.findLast=Zo,On.findLastIndex=qe,On.findLastKey=function(n,t){return v(n,je(t,3),Ot)},On.floor=ec,On.forEach=ru,On.forEachRight=eu,On.forIn=function(n,t){return null==n?n:co(n,je(t,3),Uu)},On.forInRight=function(n,t){return null==n?n:ao(n,je(t,3),Uu)},On.forOwn=function(n,t){return n&&Et(n,je(t,3))},On.forOwnRight=function(n,t){return n&&Ot(n,je(t,3))},On.get=Wu,On.gt=of,On.gte=ff,On.has=function(n,t){return null!=n&&ke(n,t,Bt);
-},On.hasIn=Bu,On.head=Ke,On.identity=Nu,On.includes=function(n,t,r,e){return n=pu(n)?n:Du(n),r=r&&!e?Ou(r):0,e=n.length,0>r&&(r=Di(e+r,0)),mu(n)?r<=e&&-1r&&(r=Di(e+r,0)),d(n,t,r)):-1},On.inRange=function(n,t,r){return t=Eu(t),r===F?(r=t,t=0):r=Eu(r),n=Iu(n),n>=Mi(t,r)&&n=n},On.isSet=vf,On.isString=mu,On.isSymbol=Au,On.isTypedArray=gf,On.isUndefined=function(n){return n===F},On.isWeakMap=function(n){return xu(n)&&"[object WeakMap]"==yo(n)},On.isWeakSet=function(n){return xu(n)&&"[object WeakSet]"==zt(n)},On.join=function(n,t){
-return null==n?"":Ui.call(n,t)},On.kebabCase=Cf,On.last=Ge,On.lastIndexOf=function(n,t,r){var e=null==n?0:n.length;if(!e)return-1;var u=e;if(r!==F&&(u=Ou(r),u=0>u?Di(e+u,0):Mi(u,e-1)),t===t){for(r=u+1;r--&&n[r]!==t;);n=r}else n=g(n,b,u,true);return n},On.lowerCase=Df,On.lowerFirst=Mf,On.lt=df,On.lte=yf,On.max=function(n){return n&&n.length?mt(n,Nu,Wt):F},On.maxBy=function(n,t){return n&&n.length?mt(n,je(t,2),Wt):F},On.mean=function(n){return x(n,Nu)},On.meanBy=function(n,t){return x(n,je(t,2))},On.min=function(n){
-return n&&n.length?mt(n,Nu,Jt):F},On.minBy=function(n,t){return n&&n.length?mt(n,je(t,2),Jt):F},On.stubArray=Ku,On.stubFalse=Gu,On.stubObject=function(){return{}},On.stubString=function(){return""},On.stubTrue=function(){return true},On.multiply=uc,On.nth=function(n,t){return n&&n.length?tr(n,Ou(t)):F},On.noConflict=function(){return Zn._===this&&(Zn._=pi),this},On.noop=qu,On.now=Jo,On.pad=function(n,t,r){n=zu(n);var e=(t=Ou(t))?T(n):0;return!t||e>=t?n:(t=(t-e)/2,ee(zi(t),r)+n+ee(Ri(t),r))},On.padEnd=function(n,t,r){
-n=zu(n);var e=(t=Ou(t))?T(n):0;return t&&et){var e=n;n=t,t=e}return r||n%1||t%1?(r=Fi(),Mi(n+r*(t-n+$n("1e-"+((r+"").length-1))),t)):cr(n,t);
-},On.reduce=function(n,t,r){var e=af(n)?h:m,u=3>arguments.length;return e(n,je(t,4),r,u,oo)},On.reduceRight=function(n,t,r){var e=af(n)?p:m,u=3>arguments.length;return e(n,je(t,4),r,u,fo)},On.repeat=function(n,t,r){return t=(r?ze(n,t,r):t===F)?1:Ou(t),ar(zu(n),t)},On.replace=function(){var n=arguments,t=zu(n[0]);return 3>n.length?t:t.replace(n[1],n[2])},On.result=function(n,t,r){t=Rr(t,n);var e=-1,u=t.length;for(u||(u=1,n=F);++en||9007199254740991=i)return n;if(i=r-T(e),1>i)return e;
-if(r=o?zr(o,0,i).join(""):n.slice(0,i),u===F)return r+e;if(o&&(i+=r.length-i),_f(u)){if(n.slice(i).search(u)){var f=r;for(u.global||(u=ti(u.source,zu(dn.exec(u))+"g")),u.lastIndex=0;o=u.exec(f);)var c=o.index;r=r.slice(0,c===F?i:c)}}else n.indexOf(jr(u),i)!=i&&(u=r.lastIndexOf(u),-1e.__dir__?"Right":"")}),e},Mn.prototype[n+"Right"]=function(t){
-return this.reverse()[n](t).reverse()}}),u(["filter","map","takeWhile"],function(n,t){var r=t+1,e=1==r||3==r;Mn.prototype[n]=function(n){var t=this.clone();return t.__iteratees__.push({iteratee:je(n,3),type:r}),t.__filtered__=t.__filtered__||e,t}}),u(["head","last"],function(n,t){var r="take"+(t?"Right":"");Mn.prototype[n]=function(){return this[r](1).value()[0]}}),u(["initial","tail"],function(n,t){var r="drop"+(t?"":"Right");Mn.prototype[n]=function(){return this.__filtered__?new Mn(this):this[r](1);
-}}),Mn.prototype.compact=function(){return this.filter(Nu)},Mn.prototype.find=function(n){return this.filter(n).head()},Mn.prototype.findLast=function(n){return this.reverse().find(n)},Mn.prototype.invokeMap=lr(function(n,t){return typeof n=="function"?new Mn(this):this.map(function(r){return Dt(r,n,t)})}),Mn.prototype.reject=function(n){return this.filter(su(je(n)))},Mn.prototype.slice=function(n,t){n=Ou(n);var r=this;return r.__filtered__&&(0t)?new Mn(r):(0>n?r=r.takeRight(-n):n&&(r=r.drop(n)),
-t!==F&&(t=Ou(t),r=0>t?r.dropRight(-t):r.take(t-n)),r)},Mn.prototype.takeRightWhile=function(n){return this.reverse().takeWhile(n).reverse()},Mn.prototype.toArray=function(){return this.take(4294967295)},Et(Mn.prototype,function(n,t){var r=/^(?:filter|find|map|reject)|While$/.test(t),e=/^(?:head|last)$/.test(t),u=On[e?"take"+("last"==t?"Right":""):t],i=e||/^find/.test(t);u&&(On.prototype[t]=function(){function t(n){return n=u.apply(On,s([n],f)),e&&h?n[0]:n}var o=this.__wrapped__,f=e?[1]:arguments,c=o instanceof Mn,a=f[0],l=c||af(o);
-l&&r&&typeof a=="function"&&1!=a.length&&(c=l=false);var h=this.__chain__,p=!!this.__actions__.length,a=i&&!h,c=c&&!p;return!i&&l?(o=c?o:new Mn(this),o=n.apply(o,f),o.__actions__.push({func:nu,args:[t],thisArg:F}),new zn(o,h)):a&&c?n.apply(this,f):(o=this.thru(t),a?e?o.value()[0]:o.value():o)})}),u("pop push shift sort splice unshift".split(" "),function(n){var t=ui[n],r=/^(?:push|sort|unshift)$/.test(n)?"tap":"thru",e=/^(?:pop|shift)$/.test(n);On.prototype[n]=function(){var n=arguments;if(e&&!this.__chain__){
-var u=this.value();return t.apply(af(u)?u:[],n)}return this[r](function(r){return t.apply(af(r)?r:[],n)})}}),Et(Mn.prototype,function(n,t){var r=On[t];if(r){var e=r.name+"";(Ji[e]||(Ji[e]=[])).push({name:t,func:r})}}),Ji[Xr(F,2).name]=[{name:"wrapper",func:F}],Mn.prototype.clone=function(){var n=new Mn(this.__wrapped__);return n.__actions__=Mr(this.__actions__),n.__dir__=this.__dir__,n.__filtered__=this.__filtered__,n.__iteratees__=Mr(this.__iteratees__),n.__takeCount__=this.__takeCount__,n.__views__=Mr(this.__views__),
-n},Mn.prototype.reverse=function(){if(this.__filtered__){var n=new Mn(this);n.__dir__=-1,n.__filtered__=true}else n=this.clone(),n.__dir__*=-1;return n},Mn.prototype.value=function(){var n,t=this.__wrapped__.value(),r=this.__dir__,e=af(t),u=0>r,i=e?t.length:0;n=i;for(var o=this.__views__,f=0,c=-1,a=o.length;++c=this.__values__.length;return{done:n,value:n?F:this.__values__[this.__index__++]}},On.prototype.plant=function(n){for(var t,r=this;r instanceof Sn;){var e=Pe(r);e.__index__=0,e.__values__=F,t?u.__wrapped__=e:t=e;var u=e,r=r.__wrapped__}return u.__wrapped__=n,t},On.prototype.reverse=function(){var n=this.__wrapped__;return n instanceof Mn?(this.__actions__.length&&(n=new Mn(this)),n=n.reverse(),n.__actions__.push({func:nu,args:[Je],thisArg:F}),new zn(n,this.__chain__)):this.thru(Je);
-},On.prototype.toJSON=On.prototype.valueOf=On.prototype.value=function(){return kr(this.__wrapped__,this.__actions__)},On.prototype.first=On.prototype.head,Ai&&(On.prototype[Ai]=tu),On}();typeof define=="function"&&typeof define.amd=="object"&&define.amd?(Zn._=it, define(function(){return it})):Vn?((Vn.exports=it)._=it,qn._=it):Zn._=it}).call(this);!function(t,n){"object"==typeof exports&&"undefined"!=typeof module?n(exports):"function"==typeof define&&define.amd?define(["exports"],n):n(t.d3=t.d3||{})}(this,function(t){"use strict";function n(t){return function(n,e){return Mf(t(n),e)}}function e(t,n){return[t,n]}function r(t,n,e){var r=(n-t)/Math.max(0,e),i=Math.floor(Math.log(r)/Math.LN10),o=r/Math.pow(10,i);return i>=0?(o>=If?10:o>=Hf?5:o>=Bf?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(o>=If?10:o>=Hf?5:o>=Bf?2:1)}function i(t,n,e){var r=Math.abs(n-t)/Math.max(0,e),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),o=r/i;return o>=If?i*=10:o>=Hf?i*=5:o>=Bf&&(i*=2),n=0&&(e=t.slice(r+1),t=t.slice(0,r)),t&&!n.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:e}})}function m(t,n){for(var e,r=0,i=t.length;r=0&&(n=t.slice(e+1),t=t.slice(0,e)),{type:t,name:n}})}function A(t){return function(){var n=this.__on;if(n){for(var e,r=0,i=-1,o=n.length;rn?1:t>=n?0:NaN}function U(t){return function(){this.removeAttribute(t)}}function O(t){return function(){this.removeAttributeNS(t.space,t.local)}}function F(t,n){return function(){this.setAttribute(t,n)}}function Y(t,n){return function(){this.setAttributeNS(t.space,t.local,n)}}function I(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttribute(t):this.setAttribute(t,e)}}function H(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,e)}}function B(t){return function(){this.style.removeProperty(t)}}function j(t,n,e){return function(){this.style.setProperty(t,n,e)}}function X(t,n,e){return function(){var r=n.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,e)}}function W(t,n){return t.style.getPropertyValue(n)||Gl(t).getComputedStyle(t,null).getPropertyValue(n)}function V(t){return function(){delete this[t]}}function $(t,n){return function(){this[t]=n}}function Z(t,n){return function(){var e=n.apply(this,arguments);null==e?delete this[t]:this[t]=e}}function G(t){return t.trim().split(/^|\s+/)}function Q(t){return t.classList||new J(t)}function J(t){this._node=t,this._names=G(t.getAttribute("class")||"")}function K(t,n){for(var e=Q(t),r=-1,i=n.length;++r>8&15|n>>4&240,n>>4&15|240&n,(15&n)<<4|15&n,1)):(n=Mh.exec(t))?Et(parseInt(n[1],16)):(n=Th.exec(t))?new Rt(n[1],n[2],n[3],1):(n=Nh.exec(t))?new Rt(255*n[1]/100,255*n[2]/100,255*n[3]/100,1):(n=kh.exec(t))?Ct(n[1],n[2],n[3],n[4]):(n=Sh.exec(t))?Ct(255*n[1]/100,255*n[2]/100,255*n[3]/100,n[4]):(n=Ah.exec(t))?Lt(n[1],n[2]/100,n[3]/100,1):(n=Eh.exec(t))?Lt(n[1],n[2]/100,n[3]/100,n[4]):Ch.hasOwnProperty(t)?Et(Ch[t]):"transparent"===t?new Rt(NaN,NaN,NaN,0):null}function Et(t){return new Rt(t>>16&255,t>>8&255,255&t,1)}function Ct(t,n,e,r){return r<=0&&(t=n=e=NaN),new Rt(t,n,e,r)}function zt(t){return t instanceof St||(t=At(t)),t?(t=t.rgb(),new Rt(t.r,t.g,t.b,t.opacity)):new Rt}function Pt(t,n,e,r){return 1===arguments.length?zt(t):new Rt(t,n,e,null==r?1:r)}function Rt(t,n,e,r){this.r=+t,this.g=+n,this.b=+e,this.opacity=+r}function Lt(t,n,e,r){return r<=0?t=n=e=NaN:e<=0||e>=1?t=n=NaN:n<=0&&(t=NaN),new Ut(t,n,e,r)}function Dt(t){if(t instanceof Ut)return new Ut(t.h,t.s,t.l,t.opacity);if(t instanceof St||(t=At(t)),!t)return new Ut;if(t instanceof Ut)return t;t=t.rgb();var n=t.r/255,e=t.g/255,r=t.b/255,i=Math.min(n,e,r),o=Math.max(n,e,r),u=NaN,a=o-i,c=(o+i)/2;return a?(u=n===o?(e-r)/a+6*(e0&&c<1?0:u,new Ut(u,a,c,t.opacity)}function qt(t,n,e,r){return 1===arguments.length?Dt(t):new Ut(t,n,e,null==r?1:r)}function Ut(t,n,e,r){this.h=+t,this.s=+n,this.l=+e,this.opacity=+r}function Ot(t,n,e){return 255*(t<60?n+(e-n)*t/60:t<180?e:t<240?n+(e-n)*(240-t)/60:n)}function Ft(t){if(t instanceof It)return new It(t.l,t.a,t.b,t.opacity);if(t instanceof $t){var n=t.h*zh;return new It(t.l,Math.cos(n)*t.c,Math.sin(n)*t.c,t.opacity)}t instanceof Rt||(t=zt(t));var e=Xt(t.r),r=Xt(t.g),i=Xt(t.b),o=Ht((.4124564*e+.3575761*r+.1804375*i)/Rh),u=Ht((.2126729*e+.7151522*r+.072175*i)/Lh);return new It(116*u-16,500*(o-u),200*(u-Ht((.0193339*e+.119192*r+.9503041*i)/Dh)),t.opacity)}function Yt(t,n,e,r){return 1===arguments.length?Ft(t):new It(t,n,e,null==r?1:r)}function It(t,n,e,r){this.l=+t,this.a=+n,this.b=+e,this.opacity=+r}function Ht(t){return t>Fh?Math.pow(t,1/3):t/Oh+qh}function Bt(t){return t>Uh?t*t*t:Oh*(t-qh)}function jt(t){return 255*(t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055)}function Xt(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function Wt(t){if(t instanceof $t)return new $t(t.h,t.c,t.l,t.opacity);t instanceof It||(t=Ft(t));var n=Math.atan2(t.b,t.a)*Ph;return new $t(n<0?n+360:n,Math.sqrt(t.a*t.a+t.b*t.b),t.l,t.opacity)}function Vt(t,n,e,r){return 1===arguments.length?Wt(t):new $t(t,n,e,null==r?1:r)}function $t(t,n,e,r){this.h=+t,this.c=+n,this.l=+e,this.opacity=+r}function Zt(t){if(t instanceof Qt)return new Qt(t.h,t.s,t.l,t.opacity);t instanceof Rt||(t=zt(t));var n=t.r/255,e=t.g/255,r=t.b/255,i=(Vh*r+Xh*n-Wh*e)/(Vh+Xh-Wh),o=r-i,u=(jh*(e-i)-Hh*o)/Bh,a=Math.sqrt(u*u+o*o)/(jh*i*(1-i)),c=a?Math.atan2(u,o)*Ph-120:NaN;return new Qt(c<0?c+360:c,a,i,t.opacity)}function Gt(t,n,e,r){return 1===arguments.length?Zt(t):new Qt(t,n,e,null==r?1:r)}function Qt(t,n,e,r){this.h=+t,this.s=+n,this.l=+e,this.opacity=+r}function Jt(t,n,e,r,i){var o=t*t,u=o*t;return((1-3*t+3*o-u)*n+(4-6*o+3*u)*e+(1+3*t+3*o-3*u)*r+u*i)/6}function Kt(t,n){return function(e){return t+e*n}}function tn(t,n,e){return t=Math.pow(t,e),n=Math.pow(n,e)-t,e=1/e,function(r){return Math.pow(t+r*n,e)}}function nn(t,n){var e=n-t;return e?Kt(t,e>180||e<-180?e-360*Math.round(e/360):e):ep(isNaN(t)?n:t)}function en(t){return 1==(t=+t)?rn:function(n,e){return e-n?tn(n,e,t):ep(isNaN(n)?e:n)}}function rn(t,n){var e=n-t;return e?Kt(t,e):ep(isNaN(t)?n:t)}function on(t){return function(n){var e,r,i=n.length,o=new Array(i),u=new Array(i),a=new Array(i);for(e=0;e180?n+=360:n-t>180&&(t+=360),o.push({i:e.push(i(e)+"rotate(",null,r)-2,x:cp(t,n)})):n&&e.push(i(e)+"rotate("+n+r)}function a(t,n,e,o){t!==n?o.push({i:e.push(i(e)+"skewX(",null,r)-2,x:cp(t,n)}):n&&e.push(i(e)+"skewX("+n+r)}function c(t,n,e,r,o,u){if(t!==e||n!==r){var a=o.push(i(o)+"scale(",null,",",null,")");u.push({i:a-4,x:cp(t,e)},{i:a-2,x:cp(n,r)})}else 1===e&&1===r||o.push(i(o)+"scale("+e+","+r+")")}return function(n,e){var r=[],i=[];return n=t(n),e=t(e),o(n.translateX,n.translateY,e.translateX,e.translateY,r,i),u(n.rotate,e.rotate,r,i),a(n.skewX,e.skewX,r,i),c(n.scaleX,n.scaleY,e.scaleX,e.scaleY,r,i),n=e=null,function(t){for(var n,e=-1,o=i.length;++e=0&&n._call.call(null,t),n=n._next;--Ep}function Mn(){Lp=(Rp=qp.now())+Dp,Ep=Cp=0;try{wn()}finally{Ep=0,Nn(),Lp=0}}function Tn(){var t=qp.now(),n=t-Rp;n>Pp&&(Dp-=n,Rp=t)}function Nn(){for(var t,n,e=Jh,r=1/0;e;)e._call?(r>e._time&&(r=e._time),t=e,e=e._next):(n=e._next,e._next=null,e=t?t._next=n:Jh=n);Kh=t,kn(r)}function kn(t){if(!Ep){Cp&&(Cp=clearTimeout(Cp));t-Lp>24?(t<1/0&&(Cp=setTimeout(Mn,t-qp.now()-Dp)),zp&&(zp=clearInterval(zp))):(zp||(Rp=qp.now(),zp=setInterval(Tn,Pp)),Ep=1,Up(Mn))}}function Sn(t,n){var e=En(t,n);if(e.state>Hp)throw new Error("too late; already scheduled");return e}function An(t,n){var e=En(t,n);if(e.state>jp)throw new Error("too late; already started");return e}function En(t,n){var e=t.__transition;if(!e||!(e=e[n]))throw new Error("transition not found");return e}function Cn(t,n,e){function r(t){e.state=Bp,e.timer.restart(i,e.delay,e.time),e.delay<=t&&i(t-e.delay)}function i(r){var s,f,l,h;if(e.state!==Bp)return u();for(s in c)if(h=c[s],h.name===e.name){if(h.state===Xp)return Op(i);h.state===Wp?(h.state=$p,h.timer.stop(),h.on.call("interrupt",t,t.__data__,h.index,h.group),delete c[s]):+s=0&&(t=t.slice(0,n)),!t||"start"===t})}function $n(t,n,e){var r,i,o=Vn(n)?Sn:An;return function(){var u=o(this,t),a=u.on;a!==r&&(i=(r=a).copy()).on(n,e),u.on=i}}function Zn(t){return function(){var n=this.parentNode;for(var e in this.__transition)if(+e!==t)return;n&&n.removeChild(this)}}function Gn(t,n){var e,r,i;return function(){var o=W(this,t),u=(this.style.removeProperty(t),W(this,t));return o===u?null:o===e&&u===r?i:i=n(e=o,r=u)}}function Qn(t){return function(){this.style.removeProperty(t)}}function Jn(t,n,e){var r,i;return function(){var o=W(this,t);return o===e?null:o===r?i:i=n(r=o,e)}}function Kn(t,n,e){var r,i,o;return function(){var u=W(this,t),a=e(this);return null==a&&(this.style.removeProperty(t),a=W(this,t)),u===a?null:u===r&&a===i?o:o=n(r=u,i=a)}}function te(t,n,e){function r(){var r=this,i=n.apply(r,arguments);return i&&function(n){r.style.setProperty(t,i(n),e)}}return r._value=n,r}function ne(t){return function(){this.textContent=t}}function ee(t){return function(){var n=t(this);this.textContent=null==n?"":n}}function re(t,n,e,r){this._groups=t,this._parents=n,this._name=e,this._id=r}function ie(t){return _t().transition(t)}function oe(){return++yd}function ue(t){return+t}function ae(t){return t*t}function ce(t){return t*(2-t)}function se(t){return((t*=2)<=1?t*t:--t*(2-t)+1)/2}function fe(t){return t*t*t}function le(t){return--t*t*t+1}function he(t){return((t*=2)<=1?t*t*t:(t-=2)*t*t+2)/2}function pe(t){return 1-Math.cos(t*Md)}function de(t){return Math.sin(t*Md)}function ve(t){return(1-Math.cos(wd*t))/2}function ge(t){return Math.pow(2,10*t-10)}function ye(t){return 1-Math.pow(2,-10*t)}function _e(t){return((t*=2)<=1?Math.pow(2,10*t-10):2-Math.pow(2,10-10*t))/2}function me(t){return 1-Math.sqrt(1-t*t)}function xe(t){return Math.sqrt(1- --t*t)}function be(t){return((t*=2)<=1?1-Math.sqrt(1-t*t):Math.sqrt(1-(t-=2)*t)+1)/2}function we(t){return 1-Me(1-t)}function Me(t){return(t=+t)Math.abs(t[1]-O[1])?M=!0:w=!0),O=t,b=!0,Vd(),o()}function o(){var t;switch(m=O[0]-U[0],x=O[1]-U[1],k){case Zd:case $d:S&&(m=Math.max(P-l,Math.min(L-v,m)),h=l+m,g=v+m),A&&(x=Math.max(R-p,Math.min(D-y,x)),d=p+x,_=y+x);break;case Gd:S<0?(m=Math.max(P-l,Math.min(L-l,m)),h=l+m,g=v):S>0&&(m=Math.max(P-v,Math.min(L-v,m)),h=l,g=v+m),A<0?(x=Math.max(R-p,Math.min(D-p,x)),d=p+x,_=y):A>0&&(x=Math.max(R-y,Math.min(D-y,x)),d=p,_=y+x);break;case Qd:S&&(h=Math.max(P,Math.min(L,l-m*S)),g=Math.max(P,Math.min(L,v+m*S))),A&&(d=Math.max(R,Math.min(D,p-x*A)),_=Math.max(R,Math.min(D,y+x*A)))}g0&&(l=h-m),A<0?y=_-x:A>0&&(p=d-x),k=Zd,I.attr("cursor",nv.selection),o());break;default:return}Vd()}function s(){switch(t.event.keyCode){case 16:q&&(w=M=q=!1,o());break;case 18:k===Qd&&(S<0?v=g:S>0&&(l=h),A<0?y=_:A>0&&(p=d),k=Gd,o());break;case 32:k===Zd&&(t.event.altKey?(S&&(v=g-m*S,l=h+m*S),A&&(y=_-x*A,p=d+x*A),k=Qd):(S<0?v=g:S>0&&(l=h),A<0?y=_:A>0&&(p=d),k=Gd),I.attr("cursor",nv[N]),o());break;default:return}Vd()}if(t.event.touches){if(t.event.changedTouches.length=(o=(v+y)/2))?v=o:y=o,(f=e>=(u=(g+_)/2))?g=u:_=u,i=p,!(p=p[l=f<<1|s]))return i[l]=d,t;if(a=+t._x.call(null,p.data),c=+t._y.call(null,p.data),n===a&&e===c)return d.next=p,i?i[l]=d:t._root=d,t;do{i=i?i[l]=new Array(4):t._root=new Array(4),(s=n>=(o=(v+y)/2))?v=o:y=o,(f=e>=(u=(g+_)/2))?g=u:_=u}while((l=f<<1|s)==(h=(c>=u)<<1|a>=o));return i[h]=p,i[l]=d,t}function er(t){var n,e,r,i,o=t.length,u=new Array(o),a=new Array(o),c=1/0,s=1/0,f=-1/0,l=-1/0;for(e=0;ef&&(f=r),il&&(l=i));for(f",i=n[3]||"-",o=n[4]||"",u=!!n[5],a=n[6]&&+n[6],c=!!n[7],s=n[8]&&+n[8].slice(1),f=n[9]||""
-;"n"===f?(c=!0,f="g"):bg[f]||(f=""),(u||"0"===e&&"="===r)&&(u=!0,e="0",r="="),this.fill=e,this.align=r,this.sign=i,this.symbol=o,this.zero=u,this.width=a,this.comma=c,this.precision=s,this.type=f}function yr(n){return Mg=kg(n),t.format=Mg.format,t.formatPrefix=Mg.formatPrefix,Mg}function _r(){this.reset()}function mr(t,n,e){var r=t.s=n+e,i=r-n,o=r-i;t.t=n-o+(e-i)}function xr(t){return t>1?0:t<-1?fy:Math.acos(t)}function br(t){return t>1?ly:t<-1?-ly:Math.asin(t)}function wr(t){return(t=Ty(t/2))*t}function Mr(){}function Tr(t,n){t&&Ey.hasOwnProperty(t.type)&&Ey[t.type](t,n)}function Nr(t,n,e){var r,i=-1,o=t.length-e;for(n.lineStart();++i=0?1:-1,i=r*e,o=my(n),u=Ty(n),a=Dg*u,c=Lg*o+a*my(i),s=a*r*Ty(i);zy.add(_y(s,c)),Rg=t,Lg=o,Dg=u}function zr(t){return[_y(t[1],t[0]),br(t[2])]}function Pr(t){var n=t[0],e=t[1],r=my(e);return[r*my(n),r*Ty(n),Ty(e)]}function Rr(t,n){return t[0]*n[0]+t[1]*n[1]+t[2]*n[2]}function Lr(t,n){return[t[1]*n[2]-t[2]*n[1],t[2]*n[0]-t[0]*n[2],t[0]*n[1]-t[1]*n[0]]}function Dr(t,n){t[0]+=n[0],t[1]+=n[1],t[2]+=n[2]}function qr(t,n){return[t[0]*n,t[1]*n,t[2]*n]}function Ur(t){var n=ky(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=n,t[1]/=n,t[2]/=n}function Or(t,n){jg.push(Xg=[qg=t,Og=t]),nFg&&(Fg=n)}function Fr(t,n){var e=Pr([t*vy,n*vy]);if(Bg){var r=Lr(Bg,e),i=[r[1],-r[0],0],o=Lr(i,r);Ur(o),o=zr(o);var u,a=t-Yg,c=a>0?1:-1,s=o[0]*dy*c,f=gy(a)>180;f^(c*YgFg&&(Fg=u):(s=(s+360)%360-180,f^(c*YgFg&&(Fg=n))),f?tXr(qg,Og)&&(Og=t):Xr(t,Og)>Xr(qg,Og)&&(qg=t):Og>=qg?(tOg&&(Og=t)):t>Yg?Xr(qg,t)>Xr(qg,Og)&&(Og=t):Xr(t,Og)>Xr(qg,Og)&&(qg=t)}else jg.push(Xg=[qg=t,Og=t]);nFg&&(Fg=n),Bg=e,Yg=t}function Yr(){qy.point=Fr}function Ir(){Xg[0]=qg,Xg[1]=Og,qy.point=Or,Bg=null}function Hr(t,n){if(Bg){var e=t-Yg;Dy.add(gy(e)>180?e+(e>0?360:-360):e)}else Ig=t,Hg=n;Ry.point(t,n),Fr(t,n)}function Br(){Ry.lineStart()}function jr(){Hr(Ig,Hg),Ry.lineEnd(),gy(Dy)>sy&&(qg=-(Og=180)),Xg[0]=qg,Xg[1]=Og,Bg=null}function Xr(t,n){return(n-=t)<0?n+360:n}function Wr(t,n){return t[0]-n[0]}function Vr(t,n){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:nfy?t-py:t<-fy?t+py:t,n]}function oi(t,n,e){return(t%=py)?n||e?Iy(ai(t),ci(n,e)):ai(t):n||e?ci(n,e):ii}function ui(t){return function(n,e){return n+=t,[n>fy?n-py:n<-fy?n+py:n,e]}}function ai(t){var n=ui(t);return n.invert=ui(-t),n}function ci(t,n){function e(t,n){var e=my(n),a=my(t)*e,c=Ty(t)*e,s=Ty(n),f=s*r+a*i;return[_y(c*o-f*u,a*r-s*i),br(f*o+c*u)]}var r=my(t),i=Ty(t),o=my(n),u=Ty(n);return e.invert=function(t,n){var e=my(n),a=my(t)*e,c=Ty(t)*e,s=Ty(n),f=s*o-c*u;return[_y(c*o+s*u,a*r+f*i),br(f*r-a*i)]},e}function si(t,n,e,r,i,o){if(e){var u=my(n),a=Ty(n),c=r*e;null==i?(i=n+r*py,o=n-c/2):(i=fi(u,i),o=fi(u,o),(r>0?io)&&(i+=r*py));for(var s,f=i;r>0?f>o:f1}function di(t,n){return((t=t.x)[0]<0?t[1]-ly-sy:ly-t[1])-((n=n.x)[0]<0?n[1]-ly-sy:ly-n[1])}function vi(t){var n,e=NaN,r=NaN,i=NaN;return{lineStart:function(){t.lineStart(),n=1},point:function(o,u){var a=o>0?fy:-fy,c=gy(o-e);gy(c-fy)0?ly:-ly),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(a,r),t.point(o,r),n=0):i!==a&&c>=fy&&(gy(e-i)sy?yy((Ty(n)*(o=my(r))*Ty(e)-Ty(r)*(i=my(n))*Ty(t))/(i*o*u)):(n+r)/2}function yi(t,n,e,r){var i;if(null==t)i=e*ly,r.point(-fy,i),r.point(0,i),r.point(fy,i),r.point(fy,0),r.point(fy,-i),r.point(0,-i),r.point(-fy,-i),r.point(-fy,0),r.point(-fy,i);else if(gy(t[0]-n[0])>sy){var o=t[0]0)do{s.point(0===f||3===f?t:e,f>1?r:n)}while((f=(f+a+4)%4)!==l);else s.point(o[0],o[1])}function u(r,i){return gy(r[0]-t)0?0:3:gy(r[0]-e)0?2:1:gy(r[1]-n)0?1:0:i>0?3:2}function a(t,n){return c(t.x,n.x)}function c(t,n){var e=u(t,1),r=u(n,1);return e!==r?e-r:0===e?n[1]-t[1]:1===e?t[0]-n[0]:2===e?t[1]-n[1]:n[0]-t[0]}return function(u){function c(t,n){i(t,n)&&k.point(t,n)}function s(){for(var n=0,e=0,i=g.length;er&&(l-o)*(r-u)>(h-u)*(t-o)&&++n:h<=r&&(l-o)*(r-u)<(h-u)*(t-o)&&--n;return n}function f(){k=S,v=[],g=[],N=!0}function l(){var t=s(),n=N&&t,e=(v=Kf(v)).length;(n||e)&&(u.polygonStart(),n&&(u.lineStart(),o(null,null,1,u),u.lineEnd()),e&&r_(v,a,t,o,u),u.polygonEnd()),k=u,v=g=y=null}function h(){A.point=d,g&&g.push(y=[]),T=!0,M=!1,b=w=NaN}function p(){v&&(d(_,m),x&&M&&S.rejoin(),v.push(S.result())),A.point=c,M&&k.lineEnd()}function d(o,u){var a=i(o,u);if(g&&y.push([o,u]),T)_=o,m=u,x=a,T=!1,a&&(k.lineStart(),k.point(o,u));else if(a&&M)k.point(o,u);else{var c=[b=Math.max(l_,Math.min(f_,b)),w=Math.max(l_,Math.min(f_,w))],s=[o=Math.max(l_,Math.min(f_,o)),u=Math.max(l_,Math.min(f_,u))];s_(c,s,t,n,e,r)?(M||(k.lineStart(),k.point(c[0],c[1])),k.point(s[0],s[1]),a||k.lineEnd(),N=!1):a&&(k.lineStart(),k.point(o,u),N=!1)}b=o,w=u,M=a}var v,g,y,_,m,x,b,w,M,T,N,k=u,S=n_(),A={point:c,lineStart:h,lineEnd:p,polygonStart:f,polygonEnd:l};return A}}function mi(){d_.point=bi,d_.lineEnd=xi}function xi(){d_.point=d_.lineEnd=Mr}function bi(t,n){t*=vy,n*=vy,Hy=t,By=Ty(n),jy=my(n),d_.point=wi}function wi(t,n){t*=vy,n*=vy;var e=Ty(n),r=my(n),i=gy(t-Hy),o=my(i),u=Ty(i),a=r*u,c=jy*e-By*r*o,s=By*e+jy*r*o;p_.add(_y(ky(a*a+c*c),s)),Hy=t,By=e,jy=r}function Mi(t,n){return!(!t||!x_.hasOwnProperty(t.type))&&x_[t.type](t,n)}function Ti(t,n){return 0===__(t,n)}function Ni(t,n){var e=__(t[0],t[1]);return __(t[0],n)+__(n,t[1])<=e+sy}function ki(t,n){return!!o_(t.map(Si),Ai(n))}function Si(t){return t=t.map(Ai),t.pop(),t}function Ai(t){return[t[0]*vy,t[1]*vy]}function Ei(t,n,e){var r=Yf(t,n-sy,e).concat(n);return function(t){return r.map(function(n){return[t,n]})}}function Ci(t,n,e){var r=Yf(t,n-sy,e).concat(n);return function(t){return r.map(function(n){return[n,t]})}}function zi(){function t(){return{type:"MultiLineString",coordinates:n()}}function n(){return Yf(xy(o/g)*g,i,g).map(h).concat(Yf(xy(s/y)*y,c,y).map(p)).concat(Yf(xy(r/d)*d,e,d).filter(function(t){return gy(t%g)>sy}).map(f)).concat(Yf(xy(a/v)*v,u,v).filter(function(t){return gy(t%y)>sy}).map(l))}var e,r,i,o,u,a,c,s,f,l,h,p,d=10,v=d,g=90,y=360,_=2.5;return t.lines=function(){return n().map(function(t){return{type:"LineString",coordinates:t}})},t.outline=function(){return{type:"Polygon",coordinates:[h(o).concat(p(c).slice(1),h(i).reverse().slice(1),p(s).reverse().slice(1))]}},t.extent=function(n){return arguments.length?t.extentMajor(n).extentMinor(n):t.extentMinor()},t.extentMajor=function(n){return arguments.length?(o=+n[0][0],i=+n[1][0],s=+n[0][1],c=+n[1][1],o>i&&(n=o,o=i,i=n),s>c&&(n=s,s=c,c=n),t.precision(_)):[[o,s],[i,c]]},t.extentMinor=function(n){return arguments.length?(r=+n[0][0],e=+n[1][0],a=+n[0][1],u=+n[1][1],r>e&&(n=r,r=e,e=n),a>u&&(n=a,a=u,u=n),t.precision(_)):[[r,a],[e,u]]},t.step=function(n){return arguments.length?t.stepMajor(n).stepMinor(n):t.stepMinor()},t.stepMajor=function(n){return arguments.length?(g=+n[0],y=+n[1],t):[g,y]},t.stepMinor=function(n){return arguments.length?(d=+n[0],v=+n[1],t):[d,v]},t.precision=function(n){return arguments.length?(_=+n,f=Ei(a,u,90),l=Ci(r,e,_),h=Ei(s,c,90),p=Ci(o,i,_),t):_},t.extentMajor([[-180,-90+sy],[180,90-sy]]).extentMinor([[-180,-80-sy],[180,80+sy]])}function Pi(){return zi()()}function Ri(){k_.point=Li}function Li(t,n){k_.point=Di,Xy=Vy=t,Wy=$y=n}function Di(t,n){N_.add($y*t-Vy*n),Vy=t,$y=n}function qi(){Di(Xy,Wy)}function Ui(t,n){tE_&&(E_=t),nC_&&(C_=n)}function Oi(t,n){P_+=t,R_+=n,++L_}function Fi(){I_.point=Yi}function Yi(t,n){I_.point=Ii,Oi(Qy=t,Jy=n)}function Ii(t,n){var e=t-Qy,r=n-Jy,i=ky(e*e+r*r);D_+=i*(Qy+t)/2,q_+=i*(Jy+n)/2,U_+=i,Oi(Qy=t,Jy=n)}function Hi(){I_.point=Oi}function Bi(){I_.point=Xi}function ji(){Wi(Zy,Gy)}function Xi(t,n){I_.point=Wi,Oi(Zy=Qy=t,Gy=Jy=n)}function Wi(t,n){var e=t-Qy,r=n-Jy,i=ky(e*e+r*r);D_+=i*(Qy+t)/2,q_+=i*(Jy+n)/2,U_+=i,i=Jy*t-Qy*n,O_+=i*(Qy+t),F_+=i*(Jy+n),Y_+=3*i,Oi(Qy=t,Jy=n)}function Vi(t){this._context=t}function $i(t,n){$_.point=Zi,B_=X_=t,j_=W_=n}function Zi(t,n){X_-=t,W_-=n,V_.add(ky(X_*X_+W_*W_)),X_=t,W_=n}function Gi(){this._string=[]}function Qi(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}function Ji(t){return function(n){var e=new Ki;for(var r in t)e[r]=t[r];return e.stream=n,e}}function Ki(){}function to(t,n,e){var r=t.clipExtent&&t.clipExtent();return t.scale(150).translate([0,0]),null!=r&&t.clipExtent(null),Cy(e,t.stream(z_)),n(z_.result()),null!=r&&t.clipExtent(r),t}function no(t,n,e){return to(t,function(e){var r=n[1][0]-n[0][0],i=n[1][1]-n[0][1],o=Math.min(r/(e[1][0]-e[0][0]),i/(e[1][1]-e[0][1])),u=+n[0][0]+(r-o*(e[1][0]+e[0][0]))/2,a=+n[0][1]+(i-o*(e[1][1]+e[0][1]))/2;t.scale(150*o).translate([u,a])},e)}function eo(t,n,e){return no(t,[[0,0],n],e)}function ro(t,n,e){return to(t,function(e){var r=+n,i=r/(e[1][0]-e[0][0]),o=(r-i*(e[1][0]+e[0][0]))/2,u=-i*e[0][1];t.scale(150*i).translate([o,u])},e)}function io(t,n,e){return to(t,function(e){var r=+n,i=r/(e[1][1]-e[0][1]),o=-i*e[0][0],u=(r-i*(e[1][1]+e[0][1]))/2;t.scale(150*i).translate([o,u])},e)}function oo(t){return Ji({point:function(n,e){n=t(n,e),this.stream.point(n[0],n[1])}})}function uo(t,n){function e(r,i,o,u,a,c,s,f,l,h,p,d,v,g){var y=s-r,_=f-i,m=y*y+_*_;if(m>4*n&&v--){var x=u+h,b=a+p,w=c+d,M=ky(x*x+b*b+w*w),T=br(w/=M),N=gy(gy(w)-1)n||gy((y*E+_*C)/m-.5)>.3||u*h+a*p+c*d2?t[2]%360*vy:0,i()):[b*dy,w*dy,M*dy]},n.precision=function(t){return arguments.length?(E=K_(r,A=t*t),o()):ky(A)},n.fitExtent=function(t,e){return no(n,t,e)},n.fitSize=function(t,e){return eo(n,t,e)},n.fitWidth=function(t,e){return ro(n,t,e)},n.fitHeight=function(t,e){return io(n,t,e)},function(){return u=t.apply(this,arguments),n.invert=u.invert&&e,i()}}function fo(t){var n=0,e=fy/3,r=so(t),i=r(n,e);return i.parallels=function(t){return arguments.length?r(n=t[0]*vy,e=t[1]*vy):[n*dy,e*dy]},i}function lo(t){function n(t,n){return[t*e,Ty(n)/e]}var e=my(t);return n.invert=function(t,n){return[t/e,br(n*e)]},n}function ho(t,n){function e(t,n){var e=ky(o-2*i*Ty(n))/i;return[e*Ty(t*=i),u-e*my(t)]}var r=Ty(t),i=(r+Ty(n))/2;if(gy(i)0?n<-ly+sy&&(n=-ly+sy):n>ly-sy&&(n=ly-sy);var e=o/My(mo(n),i);return[e*Ty(i*t),o-e*my(i*t)]}var r=my(t),i=t===n?Ty(t):wy(r/my(n))/wy(mo(n)/mo(t)),o=r*My(mo(t),i)/i;return i?(e.invert=function(t,n){var e=o-n,r=Ny(i)*ky(t*t+e*e);return[_y(t,gy(e))/i*Ny(e),2*yy(My(o/r,1/i))-ly]},e):yo}function bo(t,n){return[t,n]}function wo(t,n){function e(t,n){var e=o-n,r=i*t;return[e*Ty(r),o-e*my(r)]}var r=my(t),i=t===n?Ty(t):(r-my(n))/(n-t),o=r/i+t;return gy(i)=0;)n+=e[r].value;else n=1;t.value=n}function Uo(t,n){if(t===n)return t;var e=t.ancestors(),r=n.ancestors(),i=null;for(t=e.pop(),n=r.pop();t===n;)i=t,t=e.pop(),n=r.pop();return i}function Oo(t,n){var e,r,i,o,u,a=new Bo(t),c=+t.value&&(a.value=t.value),s=[a];for(null==n&&(n=Yo);e=s.pop();)if(c&&(e.value=+e.data.value),(i=n(e.data))&&(u=i.length))for(e.children=new Array(u),o=u-1;o>=0;--o)s.push(r=e.children[o]=new Bo(i[o])),r.parent=e,r.depth=e.depth+1;return a.eachBefore(Ho)}function Fo(){return Oo(this).eachBefore(Io)}function Yo(t){return t.children}function Io(t){t.data=t.data.data}function Ho(t){var n=0;do{t.height=n}while((t=t.parent)&&t.height<++n)}function Bo(t){this.data=t,this.depth=this.height=0,this.parent=null}function jo(t){for(var n,e,r=t.length;r;)e=Math.random()*r--|0,n=t[r],t[r]=t[e],t[e]=n;return t}function Xo(t,n){var e,r;if($o(n,t))return[n];for(e=0;e0&&e*e>r*r+i*i}function $o(t,n){for(var e=0;ee*e+r*r}function nu(t){var n=t._,e=t.next._,r=n.r+e.r,i=(n.x*e.r+e.x*n.r)/r,o=(n.y*e.r+e.y*n.r)/r;return i*i+o*o}function eu(t){this._=t,this.next=null,this.previous=null}function ru(t){if(!(i=t.length))return 0;var n,e,r,i,o,u,a,c,s,f,l;if(n=t[0],n.x=0,n.y=0,!(i>1))return n.r;if(e=t[1],n.x=-e.r,e.x=n.r,e.y=0,!(i>2))return n.r+e.r;Ko(e,n,r=t[2]),n=new eu(n),e=new eu(e),r=new eu(r),n.next=r.previous=e,e.next=n.previous=r,r.next=e.previous=n;t:for(a=3;a=0;)n=i[o],n.z+=e,n.m+=e,e+=n.s+(r+=n.c)}function _u(t,n,e){return t.a.parent===n.parent?t.a:e}function mu(t,n){this._=t,this.parent=null,this.children=null,this.A=null,this.a=this,this.z=0,this.m=0,this.c=0,this.s=0,this.t=null,this.i=n}function xu(t){for(var n,e,r,i,o,u=new mu(t,0),a=[u];n=a.pop();)if(r=n._.children)for(n.children=new Array(o=r.length),i=o-1;i>=0;--i)a.push(e=n.children[i]=new mu(r[i],i)),e.parent=n;return(u.parent=new mu(null,0)).children=[u],u}function bu(t,n,e,r,i,o){for(var u,a,c,s,f,l,h,p,d,v,g,y=[],_=n.children,m=0,x=0,b=_.length,w=n.value;mh&&(h=a),g=f*f*v,(p=Math.max(h/g,g/l))>d){f-=a;break}d=p}y.push(u={value:f,dice:c1&&Jm(t[e[r-2]],t[e[r-1]],t[i])<=0;)--r;e[r++]=i}return e.slice(0,r)}function Tu(t){this._size=t,this._call=this._error=null,this._tasks=[],this._data=[],this._waiting=this._active=this._ended=this._start=0}function Nu(t){if(!t._start)try{ku(t)}catch(n){if(t._tasks[t._ended+t._active-1])Au(t,n);else if(!t._data)throw n}}function ku(t){for(;t._start=t._waiting&&t._active=0;)if((e=t._tasks[r])&&(t._tasks[r]=null,e.abort))try{e.abort()}catch(n){}t._active=NaN,Eu(t)}function Eu(t){if(!t._active&&t._call){var n=t._data;t._data=void 0,t._call(t._error,n)}}function Cu(t){if(null==t)t=1/0;else if(!((t=+t)>=1))throw new Error("invalid concurrency");return new Tu(t)}function zu(t){return function(n,e){t(null==n?e:null)}}function Pu(t){var n=t.responseType;return n&&"text"!==n?t.response:t.responseText}function Ru(t,n){return function(e){return t(e.responseText,n)}}function Lu(t){function n(n){var o=n+"",u=e.get(o);if(!u){if(i!==Mx)return i;e.set(o,u=r.push(n))}return t[(u-1)%t.length]}var e=Xe(),r=[],i=Mx;return t=null==t?[]:wx.call(t),n.domain=function(t){if(!arguments.length)return r.slice();r=[],e=Xe();for(var i,o,u=-1,a=t.length;++u=e?1:r(t)}}}function Yu(t){return function(n,e){var r=t(n=+n,e=+e);return function(t){return t<=0?n:t>=1?e:r(t)}}}function Iu(t,n,e,r){var i=t[0],o=t[1],u=n[0],a=n[1];return o2?Hu:Iu,o=u=null,r}function r(n){return(o||(o=i(a,c,f?Fu(t):t,s)))(+n)}var i,o,u,a=kx,c=kx,s=pp,f=!1;return r.invert=function(t){return(u||(u=i(c,a,Ou,f?Yu(n):n)))(+t)},r.domain=function(t){return arguments.length?(a=bx.call(t,Nx),e()):a.slice()},r.range=function(t){return arguments.length?(c=wx.call(t),e()):c.slice()},r.rangeRound=function(t){return c=wx.call(t),s=dp,e()},r.clamp=function(t){return arguments.length?(f=!!t,e()):f},r.interpolate=function(t){return arguments.length?(s=t,e()):s},e()}function Xu(t){var n=t.domain;return t.ticks=function(t){var e=n();return jf(e[0],e[e.length-1],null==t?10:t)},t.tickFormat=function(t,e){return Sx(n(),t,e)},t.nice=function(e){null==e&&(e=10);var i,o=n(),u=0,a=o.length-1,c=o[u],s=o[a];return s0?(c=Math.floor(c/i)*i,s=Math.ceil(s/i)*i,i=r(c,s,e)):i<0&&(c=Math.ceil(c*i)/i,s=Math.floor(s*i)/i,i=r(c,s,e)),i>0?(o[u]=Math.floor(c/i)*i,o[a]=Math.ceil(s/i)*i,n(o)):i<0&&(o[u]=Math.ceil(c*i)/i,o[a]=Math.floor(s*i)/i,n(o)),t},t}function Wu(){var t=ju(Ou,cp);return t.copy=function(){return Bu(t,Wu())},Xu(t)}function Vu(){function t(t){return+t}var n=[0,1];return t.invert=t,t.domain=t.range=function(e){return arguments.length?(n=bx.call(e,Nx),t):n.slice()},t.copy=function(){return Vu().domain(n)},Xu(t)}function $u(t,n){return(n=Math.log(n/t))?function(e){return Math.log(e/t)/n}:Tx(n)}function Zu(t,n){return t<0?function(e){return-Math.pow(-n,e)*Math.pow(-t,1-e)}:function(e){return Math.pow(n,e)*Math.pow(t,1-e)}}function Gu(t){return isFinite(t)?+("1e"+t):t<0?0:t}function Qu(t){return 10===t?Gu:t===Math.E?Math.exp:function(n){return Math.pow(t,n)}}function Ju(t){return t===Math.E?Math.log:10===t&&Math.log10||2===t&&Math.log2||(t=Math.log(t),function(n){return Math.log(n)/t})}function Ku(t){return function(n){return-t(-n)}}function ta(){function n(){return o=Ju(i),u=Qu(i),r()[0]<0&&(o=Ku(o),u=Ku(u)),e}var e=ju($u,Zu).domain([1,10]),r=e.domain,i=10,o=Ju(10),u=Qu(10);return e.base=function(t){return arguments.length?(i=+t,n()):i},e.domain=function(t){return arguments.length?(r(t),n()):r()},e.ticks=function(t){var n,e=r(),a=e[0],c=e[e.length-1];(n=c0){for(;hc)break;v.push(l)}}else for(;h
=1;--f)if(!((l=s*f)c)break;v.push(l)}}else v=jf(h,p,Math.min(p-h,d)).map(u);return n?v.reverse():v},e.tickFormat=function(n,r){if(null==r&&(r=10===i?".0e":","),"function"!=typeof r&&(r=t.format(r)),n===1/0)return r;null==n&&(n=10);var a=Math.max(1,i*n/e.ticks().length);return function(t){var n=t/u(Math.round(o(t)));return n*i0?i[n-1]:e[0],n=i?[o[i-1],r]:[o[n-1],o[n]]},t.copy=function(){return oa().domain([e,r]).range(u)},Xu(t)}function ua(){function t(t){if(t<=t)return e[kf(n,t,0,r)]}var n=[.5],e=[0,1],r=1;return t.domain=function(i){return arguments.length?(n=wx.call(i),r=Math.min(n.length,e.length-1),t):n.slice()},t.range=function(i){return arguments.length?(e=wx.call(i),r=Math.min(n.length,e.length-1),t):e.slice()},t.invertExtent=function(t){var r=e.indexOf(t);return[n[r-1],n[r]]},t.copy=function(){return ua().domain(n).range(e)},t}function aa(t,n,e,r){function i(n){return t(n=new Date(+n)),n}return i.floor=i,i.ceil=function(e){return t(e=new Date(e-1)),n(e,1),t(e),e},i.round=function(t){var n=i(t),e=i.ceil(t);return t-n0))return a;do{a.push(u=new Date(+e)),n(e,o),t(e)}while(u=n)for(;t(n),!e(n);)n.setTime(n-1)},function(t,r){if(t>=t)if(r<0)for(;++r<=0;)for(;n(t,-1),!e(t););else for(;--r>=0;)for(;n(t,1),!e(t););})},e&&(i.count=function(n,r){return Ex.setTime(+n),Cx.setTime(+r),t(Ex),t(Cx),Math.floor(e(Ex,Cx))},i.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?i.filter(r?function(n){return r(n)%t==0
-}:function(n){return i.count(0,n)%t==0}):i:null}),i}function ca(t){return aa(function(n){n.setDate(n.getDate()-(n.getDay()+7-t)%7),n.setHours(0,0,0,0)},function(t,n){t.setDate(t.getDate()+7*n)},function(t,n){return(n-t-(n.getTimezoneOffset()-t.getTimezoneOffset())*Rx)/Lx})}function sa(t){return aa(function(n){n.setUTCDate(n.getUTCDate()-(n.getUTCDay()+7-t)%7),n.setUTCHours(0,0,0,0)},function(t,n){t.setUTCDate(t.getUTCDate()+7*n)},function(t,n){return(n-t)/Lx})}function fa(t){if(0<=t.y&&t.y<100){var n=new Date(-1,t.m,t.d,t.H,t.M,t.S,t.L);return n.setFullYear(t.y),n}return new Date(t.y,t.m,t.d,t.H,t.M,t.S,t.L)}function la(t){if(0<=t.y&&t.y<100){var n=new Date(Date.UTC(-1,t.m,t.d,t.H,t.M,t.S,t.L));return n.setUTCFullYear(t.y),n}return new Date(Date.UTC(t.y,t.m,t.d,t.H,t.M,t.S,t.L))}function ha(t){return{y:t,m:0,d:1,H:0,M:0,S:0,L:0}}function pa(t){function n(t,n){return function(e){var r,i,o,u=[],a=-1,c=0,s=t.length;for(e instanceof Date||(e=new Date(+e));++a53)return null;"w"in u||(u.w=1),"Z"in u?(i=la(ha(u.y)),o=i.getUTCDay(),i=o>4||0===o?db.ceil(i):db(i),i=lb.offset(i,7*(u.V-1)),u.y=i.getUTCFullYear(),u.m=i.getUTCMonth(),u.d=i.getUTCDate()+(u.w+6)%7):(i=n(ha(u.y)),o=i.getDay(),i=o>4||0===o?jx.ceil(i):jx(i),i=Ix.offset(i,7*(u.V-1)),u.y=i.getFullYear(),u.m=i.getMonth(),u.d=i.getDate()+(u.w+6)%7)}else("W"in u||"U"in u)&&("w"in u||(u.w="u"in u?u.u%7:"W"in u?1:0),o="Z"in u?la(ha(u.y)).getUTCDay():n(ha(u.y)).getDay(),u.m=0,u.d="W"in u?(u.w+6)%7+7*u.W-(o+5)%7:u.w+7*u.U-(o+6)%7);return"Z"in u?(u.H+=u.Z/100|0,u.M+=u.Z%100,la(u)):n(u)}}function r(t,n,e,r){for(var i,o,u=0,a=n.length,c=e.length;u=c)return-1;if(37===(i=n.charCodeAt(u++))){if(i=n.charAt(u++),!(o=H[i in Pb?n.charAt(u++):i])||(r=o(t,e,r))<0)return-1}else if(i!=e.charCodeAt(r++))return-1}return r}function i(t,n,e){var r=C.exec(n.slice(e));return r?(t.p=z[r[0].toLowerCase()],e+r[0].length):-1}function o(t,n,e){var r=L.exec(n.slice(e));return r?(t.w=D[r[0].toLowerCase()],e+r[0].length):-1}function u(t,n,e){var r=P.exec(n.slice(e));return r?(t.w=R[r[0].toLowerCase()],e+r[0].length):-1}function a(t,n,e){var r=O.exec(n.slice(e));return r?(t.m=F[r[0].toLowerCase()],e+r[0].length):-1}function c(t,n,e){var r=q.exec(n.slice(e));return r?(t.m=U[r[0].toLowerCase()],e+r[0].length):-1}function s(t,n,e){return r(t,w,n,e)}function f(t,n,e){return r(t,M,n,e)}function l(t,n,e){return r(t,T,n,e)}function h(t){return S[t.getDay()]}function p(t){return k[t.getDay()]}function d(t){return E[t.getMonth()]}function v(t){return A[t.getMonth()]}function g(t){return N[+(t.getHours()>=12)]}function y(t){return S[t.getUTCDay()]}function _(t){return k[t.getUTCDay()]}function m(t){return E[t.getUTCMonth()]}function x(t){return A[t.getUTCMonth()]}function b(t){return N[+(t.getUTCHours()>=12)]}var w=t.dateTime,M=t.date,T=t.time,N=t.periods,k=t.days,S=t.shortDays,A=t.months,E=t.shortMonths,C=ga(N),z=ya(N),P=ga(k),R=ya(k),L=ga(S),D=ya(S),q=ga(A),U=ya(A),O=ga(E),F=ya(E),Y={a:h,A:p,b:d,B:v,c:null,d:Ua,e:Ua,f:Ha,H:Oa,I:Fa,j:Ya,L:Ia,m:Ba,M:ja,p:g,Q:_c,s:mc,S:Xa,u:Wa,U:Va,V:$a,w:Za,W:Ga,x:null,X:null,y:Qa,Y:Ja,Z:Ka,"%":yc},I={a:y,A:_,b:m,B:x,c:null,d:tc,e:tc,f:oc,H:nc,I:ec,j:rc,L:ic,m:uc,M:ac,p:b,Q:_c,s:mc,S:cc,u:sc,U:fc,V:lc,w:hc,W:pc,x:null,X:null,y:dc,Y:vc,Z:gc,"%":yc},H={a:o,A:u,b:a,B:c,c:s,d:Sa,e:Sa,f:Ra,H:Ea,I:Ea,j:Aa,L:Pa,m:ka,M:Ca,p:i,Q:Da,s:qa,S:za,u:ma,U:xa,V:ba,w:_a,W:wa,x:f,X:l,y:Ta,Y:Ma,Z:Na,"%":La};return Y.x=n(M,Y),Y.X=n(T,Y),Y.c=n(w,Y),I.x=n(M,I),I.X=n(T,I),I.c=n(w,I),{format:function(t){var e=n(t+="",Y);return e.toString=function(){return t},e},parse:function(t){var n=e(t+="",fa);return n.toString=function(){return t},n},utcFormat:function(t){var e=n(t+="",I);return e.toString=function(){return t},e},utcParse:function(t){var n=e(t,la);return n.toString=function(){return t},n}}}function da(t,n,e){var r=t<0?"-":"",i=(r?-t:t)+"",o=i.length;return r+(o68?1900:2e3),e+r[0].length):-1}function Na(t,n,e){var r=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(n.slice(e,e+6));return r?(t.Z=r[1]?0:-(r[2]+(r[3]||"00")),e+r[0].length):-1}function ka(t,n,e){var r=Rb.exec(n.slice(e,e+2));return r?(t.m=r[0]-1,e+r[0].length):-1}function Sa(t,n,e){var r=Rb.exec(n.slice(e,e+2));return r?(t.d=+r[0],e+r[0].length):-1}function Aa(t,n,e){var r=Rb.exec(n.slice(e,e+3));return r?(t.m=0,t.d=+r[0],e+r[0].length):-1}function Ea(t,n,e){var r=Rb.exec(n.slice(e,e+2));return r?(t.H=+r[0],e+r[0].length):-1}function Ca(t,n,e){var r=Rb.exec(n.slice(e,e+2));return r?(t.M=+r[0],e+r[0].length):-1}function za(t,n,e){var r=Rb.exec(n.slice(e,e+2));return r?(t.S=+r[0],e+r[0].length):-1}function Pa(t,n,e){var r=Rb.exec(n.slice(e,e+3));return r?(t.L=+r[0],e+r[0].length):-1}function Ra(t,n,e){var r=Rb.exec(n.slice(e,e+6));return r?(t.L=Math.floor(r[0]/1e3),e+r[0].length):-1}function La(t,n,e){var r=Lb.exec(n.slice(e,e+1));return r?e+r[0].length:-1}function Da(t,n,e){var r=Rb.exec(n.slice(e));return r?(t.Q=+r[0],e+r[0].length):-1}function qa(t,n,e){var r=Rb.exec(n.slice(e));return r?(t.Q=1e3*+r[0],e+r[0].length):-1}function Ua(t,n){return da(t.getDate(),n,2)}function Oa(t,n){return da(t.getHours(),n,2)}function Fa(t,n){return da(t.getHours()%12||12,n,2)}function Ya(t,n){return da(1+Ix.count(ob(t),t),n,3)}function Ia(t,n){return da(t.getMilliseconds(),n,3)}function Ha(t,n){return Ia(t,n)+"000"}function Ba(t,n){return da(t.getMonth()+1,n,2)}function ja(t,n){return da(t.getMinutes(),n,2)}function Xa(t,n){return da(t.getSeconds(),n,2)}function Wa(t){var n=t.getDay();return 0===n?7:n}function Va(t,n){return da(Bx.count(ob(t),t),n,2)}function $a(t,n){var e=t.getDay();return t=e>=4||0===e?Vx(t):Vx.ceil(t),da(Vx.count(ob(t),t)+(4===ob(t).getDay()),n,2)}function Za(t){return t.getDay()}function Ga(t,n){return da(jx.count(ob(t),t),n,2)}function Qa(t,n){return da(t.getFullYear()%100,n,2)}function Ja(t,n){return da(t.getFullYear()%1e4,n,4)}function Ka(t){var n=t.getTimezoneOffset();return(n>0?"-":(n*=-1,"+"))+da(n/60|0,"0",2)+da(n%60,"0",2)}function tc(t,n){return da(t.getUTCDate(),n,2)}function nc(t,n){return da(t.getUTCHours(),n,2)}function ec(t,n){return da(t.getUTCHours()%12||12,n,2)}function rc(t,n){return da(1+lb.count(Eb(t),t),n,3)}function ic(t,n){return da(t.getUTCMilliseconds(),n,3)}function oc(t,n){return ic(t,n)+"000"}function uc(t,n){return da(t.getUTCMonth()+1,n,2)}function ac(t,n){return da(t.getUTCMinutes(),n,2)}function cc(t,n){return da(t.getUTCSeconds(),n,2)}function sc(t){var n=t.getUTCDay();return 0===n?7:n}function fc(t,n){return da(pb.count(Eb(t),t),n,2)}function lc(t,n){var e=t.getUTCDay();return t=e>=4||0===e?yb(t):yb.ceil(t),da(yb.count(Eb(t),t)+(4===Eb(t).getUTCDay()),n,2)}function hc(t){return t.getUTCDay()}function pc(t,n){return da(db.count(Eb(t),t),n,2)}function dc(t,n){return da(t.getUTCFullYear()%100,n,2)}function vc(t,n){return da(t.getUTCFullYear()%1e4,n,4)}function gc(){return"+0000"}function yc(){return"%"}function _c(t){return+t}function mc(t){return Math.floor(+t/1e3)}function xc(n){return Cb=pa(n),t.timeFormat=Cb.format,t.timeParse=Cb.parse,t.utcFormat=Cb.utcFormat,t.utcParse=Cb.utcParse,Cb}function bc(t){return t.toISOString()}function wc(t){var n=new Date(t);return isNaN(n)?null:n}function Mc(t){return new Date(t)}function Tc(t){return t instanceof Date?+t:+new Date(+t)}function Nc(t,n,e,r,o,u,a,c,s){function f(i){return(a(i)1?0:t<-1?gw:Math.acos(t)}function Ec(t){return t>=1?yw:t<=-1?-yw:Math.asin(t)}function Cc(t){return t.innerRadius}function zc(t){return t.outerRadius}function Pc(t){return t.startAngle}function Rc(t){return t.endAngle}function Lc(t){return t&&t.padAngle}function Dc(t,n,e,r,i,o,u,a){var c=e-t,s=r-n,f=u-i,l=a-o,h=(f*(n-o)-l*(t-i))/(l*c-f*s);return[t+h*c,n+h*s]}function qc(t,n,e,r,i,o,u){var a=t-e,c=n-r,s=(u?o:-o)/dw(a*a+c*c),f=s*c,l=-s*a,h=t+f,p=n+l,d=e+f,v=r+l,g=(h+d)/2,y=(p+v)/2,_=d-h,m=v-p,x=_*_+m*m,b=i-o,w=h*v-d*p,M=(m<0?-1:1)*dw(lw(0,b*b*x-w*w)),T=(w*m-_*M)/x,N=(-w*_-m*M)/x,k=(w*m+_*M)/x,S=(-w*_+m*M)/x,A=T-g,E=N-y,C=k-g,z=S-y;return A*A+E*E>C*C+z*z&&(T=k,N=S),{cx:T,cy:N,x01:-f,y01:-l,x11:T*(i/b-1),y11:N*(i/b-1)}}function Uc(t){this._context=t}function Oc(t){return t[0]}function Fc(t){return t[1]}function Yc(t){this._curve=t}function Ic(t){function n(n){return new Yc(t(n))}return n._curve=t,n}function Hc(t){var n=t.curve;return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t.curve=function(t){return arguments.length?n(Ic(t)):n()._curve},t}function Bc(t){return t.source}function jc(t){return t.target}function Xc(t){function n(){var n,a=Cw.call(arguments),c=e.apply(this,a),s=r.apply(this,a);if(u||(u=n=Oe()),t(u,+i.apply(this,(a[0]=c,a)),+o.apply(this,a),+i.apply(this,(a[0]=s,a)),+o.apply(this,a)),n)return u=null,n+""||null}var e=Bc,r=jc,i=Oc,o=Fc,u=null;return n.source=function(t){return arguments.length?(e=t,n):e},n.target=function(t){return arguments.length?(r=t,n):r},n.x=function(t){return arguments.length?(i="function"==typeof t?t:aw(+t),n):i},n.y=function(t){return arguments.length?(o="function"==typeof t?t:aw(+t),n):o},n.context=function(t){return arguments.length?(u=null==t?null:t,n):u},n}function Wc(t,n,e,r,i){t.moveTo(n,e),t.bezierCurveTo(n=(n+r)/2,e,n,i,r,i)}function Vc(t,n,e,r,i){t.moveTo(n,e),t.bezierCurveTo(n,e=(e+i)/2,r,e,r,i)}function $c(t,n,e,r,i){var o=Ew(n,e),u=Ew(n,e=(e+i)/2),a=Ew(r,e),c=Ew(r,i);t.moveTo(o[0],o[1]),t.bezierCurveTo(u[0],u[1],a[0],a[1],c[0],c[1])}function Zc(){return Xc(Wc)}function Gc(){return Xc(Vc)}function Qc(){var t=Xc($c);return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t}function Jc(t,n,e){t._context.bezierCurveTo((2*t._x0+t._x1)/3,(2*t._y0+t._y1)/3,(t._x0+2*t._x1)/3,(t._y0+2*t._y1)/3,(t._x0+4*t._x1+n)/6,(t._y0+4*t._y1+e)/6)}function Kc(t){this._context=t}function ts(t){this._context=t}function ns(t){this._context=t}function es(t,n){this._basis=new Kc(t),this._beta=n}function rs(t,n,e){t._context.bezierCurveTo(t._x1+t._k*(t._x2-t._x0),t._y1+t._k*(t._y2-t._y0),t._x2+t._k*(t._x1-n),t._y2+t._k*(t._y1-e),t._x2,t._y2)}function is(t,n){this._context=t,this._k=(1-n)/6}function os(t,n){this._context=t,this._k=(1-n)/6}function us(t,n){this._context=t,this._k=(1-n)/6}function as(t,n,e){var r=t._x1,i=t._y1,o=t._x2,u=t._y2;if(t._l01_a>vw){var a=2*t._l01_2a+3*t._l01_a*t._l12_a+t._l12_2a,c=3*t._l01_a*(t._l01_a+t._l12_a);r=(r*a-t._x0*t._l12_2a+t._x2*t._l01_2a)/c,i=(i*a-t._y0*t._l12_2a+t._y2*t._l01_2a)/c}if(t._l23_a>vw){var s=2*t._l23_2a+3*t._l23_a*t._l12_a+t._l12_2a,f=3*t._l23_a*(t._l23_a+t._l12_a);o=(o*s+t._x1*t._l23_2a-n*t._l12_2a)/f,u=(u*s+t._y1*t._l23_2a-e*t._l12_2a)/f}t._context.bezierCurveTo(r,i,o,u,t._x2,t._y2)}function cs(t,n){this._context=t,this._alpha=n}function ss(t,n){this._context=t,this._alpha=n}function fs(t,n){this._context=t,this._alpha=n}function ls(t){this._context=t}function hs(t){return t<0?-1:1}function ps(t,n,e){var r=t._x1-t._x0,i=n-t._x1,o=(t._y1-t._y0)/(r||i<0&&-0),u=(e-t._y1)/(i||r<0&&-0),a=(o*i+u*r)/(r+i);return(hs(o)+hs(u))*Math.min(Math.abs(o),Math.abs(u),.5*Math.abs(a))||0}function ds(t,n){var e=t._x1-t._x0;return e?(3*(t._y1-t._y0)/e-n)/2:n}function vs(t,n,e){var r=t._x0,i=t._y0,o=t._x1,u=t._y1,a=(o-r)/3;t._context.bezierCurveTo(r+a,i+a*n,o-a,u-a*e,o,u)}function gs(t){this._context=t}function ys(t){this._context=new _s(t)}function _s(t){this._context=t}function ms(t){return new gs(t)}function xs(t){return new ys(t)}function bs(t){this._context=t}function ws(t){var n,e,r=t.length-1,i=new Array(r),o=new Array(r),u=new Array(r);for(i[0]=0,o[0]=2,u[0]=t[0]+2*t[1],n=1;n=0;--n)i[n]=(u[n]-i[n+1])/o[n];for(o[r-1]=(t[r]+i[r-1])/2,n=0;n0)){if(o/=d,d<0){if(o0){if(o>p)return;o>h&&(h=o)}if(o=r-c,d||!(o<0)){if(o/=d,d<0){if(o>p)return;o>h&&(h=o)}else if(d>0){if(o0)){if(o/=v,v<0){if(o0){if(o>p)return;o>h&&(h=o)}if(o=i-s,v||!(o<0)){if(o/=v,v<0){if(o>p)return;o>h&&(h=o)}else if(v>0){if(o0||p<1)||(h>0&&(t[0]=[c+h*d,s+h*v]),p<1&&(t[1]=[c+p*d,s+p*v]),!0)}}}}}function Fs(t,n,e,r,i){var o=t[1];if(o)return!0;var u,a,c=t[0],s=t.left,f=t.right,l=s[0],h=s[1],p=f[0],d=f[1],v=(l+p)/2,g=(h+d)/2;if(d===h){if(v=r)return;if(l>p){if(c){if(c[1]>=i)return}else c=[v,e];o=[v,i]}else{if(c){if(c[1]1)if(l>p){if(c){if(c[1]>=i)return}else c=[(e-a)/u,e];o=[(i-a)/u,i]}else{if(c){if(c[1]=r)return}else c=[n,u*n+a];o=[r,u*r+a]}else{if(c){if(c[0]EM||Math.abs(i[0][1]-i[1][1])>EM)||delete kM[o]}function Is(t){return TM[t.index]={site:t,halfedges:[]}}function Hs(t,n){var e=t.site,r=n.left,i=n.right;return e===i&&(i=r,r=e),i?Math.atan2(i[1]-r[1],i[0]-r[0]):(e===r?(r=n[1],i=n[0]):(r=n[0],i=n[1]),Math.atan2(r[0]-i[0],i[1]-r[1]))}function Bs(t,n){return n[+(n.left!==t.site)]}function js(t,n){return n[+(n.left===t.site)]}function Xs(){for(var t,n,e,r,i=0,o=TM.length;iEM||Math.abs(v-h)>EM)&&(c.splice(a,0,kM.push(qs(u,p,Math.abs(d-t)EM?[t,Math.abs(l-t)EM?[Math.abs(h-r)EM?[e,Math.abs(l-e)EM?[Math.abs(h-n)=-CM)){var p=c*c+s*s,d=f*f+l*l,v=(l*p-s*d)/h,g=(c*d-f*p)/h,y=SM.pop()||new Vs;y.arc=t,y.site=i,y.x=v+u,y.y=(y.cy=g+a)+Math.sqrt(v*v+g*g),t.circle=y;for(var _=null,m=NM._;m;)if(y.yEM)a=a.L;else{if(!((i=o-ef(a,u))>EM)){r>-EM?(n=a.P,e=a):i>-EM?(n=a,e=a.N):n=e=a;break}if(!a.R){n=a;break}a=a.R}Is(t);var c=Qs(t);if(MM.insert(n,c),n||e){if(n===e)return Zs(n),e=Qs(n.site),MM.insert(c,e),c.edge=e.edge=Ds(n.site,c.site),$s(n),void $s(e);if(!e)return void(c.edge=Ds(n.site,c.site));Zs(n),Zs(e);var s=n.site,f=s[0],l=s[1],h=t[0]-f,p=t[1]-l,d=e.site,v=d[0]-f,g=d[1]-l,y=2*(h*g-p*v),_=h*h+p*p,m=v*v+g*g,x=[(g*_-p*m)/y+f,(h*m-v*_)/y+l];Us(e.edge,s,d,x),c.edge=Ds(s,t,null,x),e.edge=Ds(t,d,null,x),$s(n),$s(e)}}function nf(t,n){var e=t.site,r=e[0],i=e[1],o=i-n;if(!o)return r;var u=t.P;if(!u)return-1/0;e=u.site;var a=e[0],c=e[1],s=c-n;if(!s)return a;var f=a-r,l=1/o-1/s,h=f/s;return l?(-h+Math.sqrt(h*h-2*l*(f*f/(-2*s)-c+s/2+i-o/2)))/l+r:(r+a)/2}function ef(t,n){var e=t.N;if(e)return nf(e,n);var r=t.site;return r[1]===n?r[0]:1/0}function rf(t,n,e){return(t[0]-e[0])*(n[1]-t[1])-(t[0]-n[0])*(e[1]-t[1])}function of(t,n){return n[1]-t[1]||n[0]-t[0]}function uf(t,n){var e,r,i,o=t.sort(of).pop();for(kM=[],TM=new Array(t.length),MM=new Cs,NM=new Cs;;)if(i=wM,o&&(!i||o[1]r?(r+i)/2:Math.min(0,r)||Math.max(0,i),u>o?(o+u)/2:Math.min(0,o)||Math.max(0,u))}function yf(){return null}function _f(){for(var t=arguments,n=0,e=t.length;no;h!=n&&(h=n,p.classed("graph-scroll-below",h));var e=!h&&pageYOffset>d;l!=e&&(l=e,p.classed("graph-scroll-fixed",l)),h&&(t=i-1),c!=t&&(a.classed("graph-scroll-active",function(n,e){return e===t}),u.call("active",null,t),c=t)}function e(){s=[];var t;a.each(function(n,e){e||(t=this.getBoundingClientRect().top),s.push(this.getBoundingClientRect().top-t)});var n=p.node().getBoundingClientRect(),e=f.node()?f.node().getBoundingClientRect().height:0;d=n.top+pageYOffset,o=n.bottom-e+pageYOffset}function r(){if(l){var n;switch(t.event.keyCode){case 39:if(t.event.metaKey)return;case 40:case 34:n=t.event.metaKey?1/0:1;break;case 37:if(t.event.metaKey)return;case 38:case 33:n=t.event.metaKey?-1/0:-1;break;case 32:n=t.event.shiftKey?-1:1;break;default:return}var e=Math.max(0,Math.min(c+n,i-1));e!=c&&(fh(document.documentElement).interrupt().transition().duration(500).tween("scroll",function(){var t=cp(pageYOffset,s[e]+d);return function(n){scrollTo(0,t(n))}}),t.event.preventDefault())}}var i,o,u=g("scroll","active"),a=fh("null"),c=NaN,s=[],f=fh("null"),l=null,h=null,p=fh("body"),d=0,v=Math.random(),y=200,_={};return _.container=function(t){return t?(p=t,_):p},_.graph=function(t){return t?(f=t,_):f},_.eventId=function(t){return t?(v=t,_):v},_.sections=function(t){return t?(a=t,i=a.size(),fh(window).on("scroll.gscroll"+v,n).on("resize.gscroll"+v,e).on("keydown.gscroll"+v,r),e(),window["gscrollTimer"+v]&&window["gscrollTimer"+v].stop(),window["gscrollTimer"+v]=bn(n),_):a},_.on=function(){var t=u.on.apply(u,arguments);return t===u?_:t},_.offset=function(t){return t?(y=t,_):y},_}var Mf=function(t,n){return tn?1:t>=n?0:NaN},Tf=function(t){return 1===t.length&&(t=n(t)),{left:function(n,e,r,i){for(null==r&&(r=0),null==i&&(i=n.length);r>>1;t(n[o],e)<0?r=o+1:i=o}return r},right:function(n,e,r,i){for(null==r&&(r=0),null==i&&(i=n.length);r>>1;t(n[o],e)>0?i=o:r=o+1}return r}}},Nf=Tf(Mf),kf=Nf.right,Sf=Nf.left,Af=function(t,n){null==n&&(n=e);for(var r=0,i=t.length-1,o=t[0],u=new Array(i<0?0:i);rt?1:n>=t?0:NaN},zf=function(t){return null===t?NaN:+t},Pf=function(t,n){var e,r,i=t.length,o=0,u=-1,a=0,c=0;if(null==n)for(;++u1)return c/(o-1)},Rf=function(t,n){var e=Pf(t,n);return e?Math.sqrt(e):e},Lf=function(t,n){var e,r,i,o=t.length,u=-1;if(null==n){for(;++u=e)for(r=i=e;++ue&&(r=e),i=e)for(r=i=e;++ue&&(r=e),i0)return[t];if((i=n0)for(t=Math.ceil(t/a),n=Math.floor(n/a),u=new Array(o=Math.ceil(n-t+1));++cl;)h.pop(),--p;var d,v=new Array(p+1);for(o=0;o<=p;++o)d=v[o]=[],d.x0=o>0?h[o-1]:f,d.x1=o=1)return+e(t[r-1],r-1,t);var r,i=(r-1)*n,o=Math.floor(i),u=+e(t[o],o,t);return u+(+e(t[o+1],o+1,t)-u)*(i-o)}},$f=function(t,n,e){return t=Uf.call(t,zf).sort(Mf),Math.ceil((e-n)/(2*(Vf(t,.75)-Vf(t,.25))*Math.pow(t.length,-1/3)))},Zf=function(t,n,e){return Math.ceil((e-n)/(3.5*Rf(t)*Math.pow(t.length,-1/3)))},Gf=function(t,n){var e,r,i=t.length,o=-1;if(null==n){for(;++o=e)for(r=e;++or&&(r=e)}else for(;++o=e)for(r=e;++or&&(r=e);return r},Qf=function(t,n){var e,r=t.length,i=r,o=-1,u=0;if(null==n)for(;++o=0;)for(r=t[i],n=r.length;--n>=0;)e[--u]=r[n];return e},tl=function(t,n){var e,r,i=t.length,o=-1;if(null==n){for(;++o=e)for(r=e;++oe&&(r=e)}else for(;++o=e)for(r=e;++oe&&(r=e);return r},nl=function(t,n){for(var e=n.length,r=new Array(e);e--;)r[e]=t[n[e]];return r},el=function(t,n){if(e=t.length){var e,r,i=0,o=0,u=t[o];for(null==n&&(n=Mf);++i0)for(var e,r,i=new Array(e),o=0;o=0&&"xmlns"!==(n=t.slice(0,e))&&(t=t.slice(e+1)),gl.hasOwnProperty(n)?{space:gl[n],local:t}:t},_l=function(t){var n=yl(t);return(n.local?w:b)(n)},ml=0;T.prototype=M.prototype={constructor:T,get:function(t){for(var n=this._;!(n in t);)if(!(t=t.parentNode))return;return t[n]},set:function(t,n){return t[this._]=n},remove:function(t){return this._ in t&&delete t[this._]},toString:function(){return this._}};var xl=function(t){return function(){return this.matches(t)}};if("undefined"!=typeof document){var bl=document.documentElement;if(!bl.matches){var wl=bl.webkitMatchesSelector||bl.msMatchesSelector||bl.mozMatchesSelector||bl.oMatchesSelector;xl=function(t){return function(){return wl.call(this,t)}}}}var Ml=xl,Tl={};if(t.event=null,"undefined"!=typeof document){"onmouseenter"in document.documentElement||(Tl={mouseenter:"mouseover",mouseleave:"mouseout"})}var Nl=function(t,n,e){var r,i,o=S(t+""),u=o.length;{if(!(arguments.length<2)){for(a=n?E:A,null==e&&(e=!1),r=0;r=x&&(x=m+1);!(_=g[x])&&++x=0;)(r=i[o])&&(u&&u!==r.nextSibling&&u.parentNode.insertBefore(r,u),u=r);return this},Hl=function(t){function n(n,e){return n&&e?t(n.__data__,e.__data__):!n-!e}t||(t=q);for(var e=this._groups,r=e.length,i=new Array(r),o=0;o1?this.each((null==n?B:"function"==typeof n?X:j)(t,n,null==e?"":e)):W(this.node(),t)},Jl=function(t,n){return arguments.length>1?this.each((null==n?V:"function"==typeof n?Z:$)(t,n)):this.node()[t]};J.prototype={add:function(t){this._names.indexOf(t)<0&&(this._names.push(t),this._node.setAttribute("class",this._names.join(" ")))},remove:function(t){var n=this._names.indexOf(t);n>=0&&(this._names.splice(n,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var Kl=function(t,n){var e=G(t+"");if(arguments.length<2){for(var r=Q(this.node()),i=-1,o=e.length;++i